xref: /netbsd-src/sys/net/lagg/if_lagg.c (revision 7d62b00eb9ad855ffcd7da46b41e23feb5476fac)
1 /*	$NetBSD: if_lagg.c,v 1.48 2022/06/26 17:55:24 riastradh Exp $	*/
2 
3 /*
4  * Copyright (c) 2005, 2006 Reyk Floeter <reyk@openbsd.org>
5  * Copyright (c) 2007 Andrew Thompson <thompsa@FreeBSD.org>
6  * Copyright (c) 2014, 2016 Marcelo Araujo <araujo@FreeBSD.org>
7  * Copyright (c) 2021, Internet Initiative Japan Inc.
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 #include <sys/cdefs.h>
23 __KERNEL_RCSID(0, "$NetBSD: if_lagg.c,v 1.48 2022/06/26 17:55:24 riastradh Exp $");
24 
25 #ifdef _KERNEL_OPT
26 #include "opt_inet.h"
27 #include "opt_lagg.h"
28 #endif
29 
30 #include <sys/param.h>
31 #include <sys/types.h>
32 
33 #include <sys/cprng.h>
34 #include <sys/cpu.h>
35 #include <sys/device.h>
36 #include <sys/evcnt.h>
37 #include <sys/hash.h>
38 #include <sys/kmem.h>
39 #include <sys/module.h>
40 #include <sys/pserialize.h>
41 #include <sys/pslist.h>
42 #include <sys/psref.h>
43 #include <sys/sysctl.h>
44 #include <sys/syslog.h>
45 #include <sys/workqueue.h>
46 
47 #include <net/bpf.h>
48 #include <net/if.h>
49 #include <net/if_dl.h>
50 #include <net/if_ether.h>
51 #include <net/if_media.h>
52 #include <net/if_types.h>
53 #include <net/if_vlanvar.h>
54 #include <netinet/ip.h>
55 #include <netinet/ip6.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 
59 #if defined(INET) || defined(INET6)
60 #include <netinet/in.h>
61 #endif
62 
63 #ifdef INET6
64 #include <netinet6/in6_ifattach.h>
65 #include <netinet6/in6_var.h>
66 #endif
67 
68 #include <net/lagg/if_lagg.h>
69 #include <net/lagg/if_laggproto.h>
70 
71 #include "ioconf.h"
72 
73 enum lagg_portctrl {
74 	LAGG_PORTCTRL_ALLOC,
75 	LAGG_PORTCTRL_FREE,
76 	LAGG_PORTCTRL_START,
77 	LAGG_PORTCTRL_STOP
78 };
79 
80 enum lagg_iftypes {
81 	LAGG_IF_TYPE_ETHERNET,
82 };
83 
84 static const struct lagg_proto lagg_protos[] = {
85 	[LAGG_PROTO_NONE] = {
86 		.pr_num = LAGG_PROTO_NONE,
87 		.pr_attach = lagg_none_attach,
88 	},
89 	[LAGG_PROTO_LACP] = {
90 		.pr_num = LAGG_PROTO_LACP,
91 		.pr_attach = lacp_attach,
92 		.pr_detach = lacp_detach,
93 		.pr_up = lacp_up,
94 		.pr_down = lacp_down,
95 		.pr_transmit = lacp_transmit,
96 		.pr_input = lacp_input,
97 		.pr_allocport = lacp_allocport,
98 		.pr_freeport = lacp_freeport,
99 		.pr_startport = lacp_startport,
100 		.pr_stopport = lacp_stopport,
101 		.pr_protostat = lacp_protostat,
102 		.pr_portstat = lacp_portstat,
103 		.pr_linkstate = lacp_linkstate_ifnet_locked,
104 		.pr_ioctl = lacp_ioctl,
105 	},
106 	[LAGG_PROTO_FAILOVER] = {
107 		.pr_num = LAGG_PROTO_FAILOVER,
108 		.pr_attach = lagg_fail_attach,
109 		.pr_detach = lagg_common_detach,
110 		.pr_transmit = lagg_fail_transmit,
111 		.pr_input = lagg_fail_input,
112 		.pr_allocport = lagg_common_allocport,
113 		.pr_freeport = lagg_common_freeport,
114 		.pr_startport = lagg_common_startport,
115 		.pr_stopport = lagg_common_stopport,
116 		.pr_portstat = lagg_fail_portstat,
117 		.pr_linkstate = lagg_common_linkstate,
118 		.pr_ioctl = lagg_fail_ioctl,
119 	},
120 	[LAGG_PROTO_LOADBALANCE] = {
121 		.pr_num = LAGG_PROTO_LOADBALANCE,
122 		.pr_attach = lagg_lb_attach,
123 		.pr_detach = lagg_common_detach,
124 		.pr_transmit = lagg_lb_transmit,
125 		.pr_input = lagg_lb_input,
126 		.pr_allocport = lagg_common_allocport,
127 		.pr_freeport = lagg_common_freeport,
128 		.pr_startport = lagg_lb_startport,
129 		.pr_stopport = lagg_lb_stopport,
130 		.pr_portstat = lagg_lb_portstat,
131 		.pr_linkstate = lagg_common_linkstate,
132 	},
133 };
134 
135 static int	lagg_chg_sadl(struct ifnet *, const uint8_t *, size_t);
136 static void	lagg_input_ethernet(struct ifnet *, struct mbuf *);
137 static int	lagg_clone_create(struct if_clone *, int);
138 static int	lagg_clone_destroy(struct ifnet *);
139 static int	lagg_init(struct ifnet *);
140 static int	lagg_init_locked(struct lagg_softc *);
141 static void	lagg_stop(struct ifnet *, int);
142 static void	lagg_stop_locked(struct lagg_softc *);
143 static int	lagg_ioctl(struct ifnet *, u_long, void *);
144 static int	lagg_transmit(struct ifnet *, struct mbuf *);
145 static void	lagg_start(struct ifnet *);
146 static int	lagg_media_change(struct ifnet *);
147 static void	lagg_media_status(struct ifnet *, struct ifmediareq *);
148 static int	lagg_vlan_cb(struct ethercom *, uint16_t, bool);
149 static void	lagg_linkstate_changed(void *);
150 static void	lagg_ifdetach(void *);
151 static struct lagg_softc *
152 		lagg_softc_alloc(enum lagg_iftypes);
153 static void	lagg_softc_free(struct lagg_softc *);
154 static int	lagg_setup_sysctls(struct lagg_softc *);
155 static void	lagg_teardown_sysctls(struct lagg_softc *);
156 static int	lagg_proto_attach(struct lagg_softc *, lagg_proto,
157 		    struct lagg_proto_softc **);
158 static void	lagg_proto_detach(struct lagg_variant *);
159 static int	lagg_proto_up(struct lagg_softc *);
160 static void	lagg_proto_down(struct lagg_softc *);
161 static int	lagg_proto_allocport(struct lagg_softc *, struct lagg_port *);
162 static void	lagg_proto_freeport(struct lagg_softc *, struct lagg_port *);
163 static void	lagg_proto_startport(struct lagg_softc *,
164 		    struct lagg_port *);
165 static void	lagg_proto_stopport(struct lagg_softc *,
166 		    struct lagg_port *);
167 static struct mbuf *
168 		lagg_proto_input(struct lagg_softc *, struct lagg_port *,
169 		    struct mbuf *);
170 static void	lagg_proto_linkstate(struct lagg_softc *, struct lagg_port *);
171 static int	lagg_proto_ioctl(struct lagg_softc *, struct lagg_req *);
172 static int	lagg_get_stats(struct lagg_softc *, struct lagg_req *, size_t);
173 static int	lagg_pr_attach(struct lagg_softc *, lagg_proto);
174 static void	lagg_pr_detach(struct lagg_softc *);
175 static int	lagg_addport(struct lagg_softc *, struct ifnet *);
176 static int	lagg_delport(struct lagg_softc *, struct ifnet *);
177 static int	lagg_delport_all(struct lagg_softc *);
178 static int	lagg_port_ioctl(struct ifnet *, u_long, void *);
179 static int	lagg_port_output(struct ifnet *, struct mbuf *,
180 		    const struct sockaddr *, const struct rtentry *);
181 static void	lagg_config_promisc(struct lagg_softc *, struct lagg_port *);
182 static void	lagg_unconfig_promisc(struct lagg_softc *, struct lagg_port *);
183 static struct lagg_variant *
184 		lagg_variant_getref(struct lagg_softc *, struct psref *);
185 static void	lagg_variant_putref(struct lagg_variant *, struct psref *);
186 static int	lagg_ether_addmulti(struct lagg_softc *, struct ifreq *);
187 static int	lagg_ether_delmulti(struct lagg_softc *, struct ifreq *);
188 static void	lagg_port_syncmulti(struct lagg_softc *, struct lagg_port *);
189 static void	lagg_port_purgemulti(struct lagg_softc *, struct lagg_port *);
190 static int	lagg_port_setup(struct lagg_softc *, struct lagg_port *,
191 		    struct ifnet *);
192 static void	lagg_port_teardown(struct lagg_softc *, struct lagg_port *,
193 		    bool);
194 static void	lagg_port_syncvlan(struct lagg_softc *, struct lagg_port *);
195 static void	lagg_port_purgevlan(struct lagg_softc *, struct lagg_port *);
196 static void	lagg_capabilities_update(struct lagg_softc *);
197 static void	lagg_sync_ifcaps(struct lagg_softc *);
198 static void	lagg_sync_ethcaps(struct lagg_softc *);
199 static void	lagg_sync_sadl(struct lagg_softc *);
200 
201 static struct if_clone	 lagg_cloner =
202     IF_CLONE_INITIALIZER("lagg", lagg_clone_create, lagg_clone_destroy);
203 static unsigned int	 lagg_count;
204 static struct psref_class
205 		*lagg_psref_class __read_mostly;
206 static struct psref_class
207 		*lagg_port_psref_class __read_mostly;
208 
209 static enum lagg_iftypes
210 		 lagg_iftype = LAGG_IF_TYPE_ETHERNET;
211 
212 #ifdef LAGG_DEBUG
213 #define __LAGGDEBUGUSED
214 #define LAGG_DPRINTF(_sc, _fmt, _args...)	do {	\
215 	printf("%s: " _fmt, (_sc) != NULL ?		\
216 	(_sc)->sc_if.if_xname : "lagg", ##_args);		\
217 } while (0)
218 #else
219 #define __LAGGDEBUGUSED				__unused
220 #define LAGG_DPRINTF(_sc, _fmt, _args...)	__nothing
221 #endif
222 
223 #ifndef LAGG_SETCAPS_RETRY
224 #define LAGG_SETCAPS_RETRY	(LAGG_MAX_PORTS * 2)
225 #endif
226 
227 static size_t
228 lagg_sizeof_softc(enum lagg_iftypes ift)
229 {
230 	struct lagg_softc *_dummy = NULL;
231 	size_t s;
232 
233 	s = sizeof(*_dummy) - sizeof(_dummy->sc_if);
234 
235 	switch (ift) {
236 	case LAGG_IF_TYPE_ETHERNET:
237 		s += sizeof(struct ethercom);
238 		break;
239 	default:
240 		s += sizeof(struct ifnet);
241 		break;
242 	}
243 
244 	return s;
245 }
246 
247 static void
248 lagg_evcnt_attach(struct lagg_softc *sc,
249     struct evcnt *ev, const char *name)
250 {
251 
252 	evcnt_attach_dynamic(ev, EVCNT_TYPE_MISC, NULL,
253 	    sc->sc_evgroup, name);
254 }
255 
256 static void
257 lagg_in6_ifattach(struct ifnet *ifp)
258 {
259 
260 #ifdef INET6
261 	KERNEL_LOCK_UNLESS_NET_MPSAFE();
262 	if (in6_present) {
263 		if (ISSET(ifp->if_flags, IFF_UP))
264 			in6_ifattach(ifp, NULL);
265 	}
266 	KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
267 #endif
268 }
269 
270 static void
271 lagg_in6_ifdetach(struct ifnet *ifp)
272 {
273 
274 #ifdef INET6
275 	KERNEL_LOCK_UNLESS_NET_MPSAFE();
276 	if (in6_present)
277 		in6_ifdetach(ifp);
278 	KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
279 #endif
280 }
281 
282 static int
283 lagg_lp_ioctl(struct lagg_port *lp, u_long cmd, void *data)
284 {
285 	struct ifnet *ifp_port;
286 	int error;
287 
288 	if (lp->lp_ioctl == NULL)
289 		return EINVAL;
290 
291 	ifp_port = lp->lp_ifp;
292 	IFNET_LOCK(ifp_port);
293 	error = lp->lp_ioctl(ifp_port, cmd, data);
294 	IFNET_UNLOCK(ifp_port);
295 
296 	return error;
297 }
298 
299 static bool
300 lagg_lladdr_equal(const uint8_t *a, const uint8_t *b)
301 {
302 
303 	if (memcmp(a, b, ETHER_ADDR_LEN) == 0)
304 		return true;
305 
306 	return false;
307 }
308 
309 static void
310 lagg_lladdr_cpy(uint8_t *dst, const uint8_t *src)
311 {
312 
313 	memcpy(dst, src, ETHER_ADDR_LEN);
314 }
315 
316 void
317 laggattach(int n)
318 {
319 
320 	/*
321 	 * Nothing to do here, initialization is handled by the
322 	 * module initialization code in lagginit() below).
323 	 */
324 }
325 
326 static void
327 lagginit(void)
328 {
329 	size_t i;
330 
331 	lagg_psref_class = psref_class_create("laggvariant", IPL_SOFTNET);
332 	lagg_port_psref_class = psref_class_create("laggport", IPL_SOFTNET);
333 
334 	for (i = 0; i < LAGG_PROTO_MAX; i++) {
335 		if (lagg_protos[i].pr_init != NULL)
336 			lagg_protos[i].pr_init();
337 	}
338 
339 	if_clone_attach(&lagg_cloner);
340 }
341 
342 static int
343 laggdetach(void)
344 {
345 	size_t i;
346 
347 	if (lagg_count > 0)
348 		return EBUSY;
349 
350 	if_clone_detach(&lagg_cloner);
351 
352 	for (i = 0; i < LAGG_PROTO_MAX; i++) {
353 		if (lagg_protos[i].pr_fini != NULL)
354 			lagg_protos[i].pr_fini();
355 	}
356 
357 	psref_class_destroy(lagg_port_psref_class);
358 	psref_class_destroy(lagg_psref_class);
359 
360 	return 0;
361 }
362 
363 static int
364 lagg_clone_create(struct if_clone *ifc, int unit)
365 {
366 	struct lagg_softc *sc;
367 	struct ifnet *ifp;
368 	int error;
369 
370 	sc = lagg_softc_alloc(lagg_iftype);
371 	ifp = &sc->sc_if;
372 
373 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTNET);
374 	sc->sc_psz = pserialize_create();
375 	SIMPLEQ_INIT(&sc->sc_ports);
376 	LIST_INIT(&sc->sc_mclist);
377 	TAILQ_INIT(&sc->sc_vtags);
378 	sc->sc_hash_mac = true;
379 	sc->sc_hash_ipaddr = true;
380 	sc->sc_hash_ip6addr = true;
381 	sc->sc_hash_tcp = true;
382 	sc->sc_hash_udp = true;
383 
384 	if_initname(ifp, ifc->ifc_name, unit);
385 	ifp->if_softc = sc;
386 	ifp->if_init = lagg_init;
387 	ifp->if_stop = lagg_stop;
388 	ifp->if_ioctl = lagg_ioctl;
389 	ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
390 	ifp->if_extflags = IFEF_MPSAFE;
391 	ifp->if_transmit = lagg_transmit;
392 	ifp->if_start = lagg_start;
393 	IFQ_SET_READY(&ifp->if_snd);
394 
395 	error = lagg_setup_sysctls(sc);
396 	if (error != 0)
397 		goto destroy_psz;
398 
399 	/*XXX dependent on ethernet */
400 	ifmedia_init_with_lock(&sc->sc_media, 0, lagg_media_change,
401 	    lagg_media_status, &sc->sc_lock);
402 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
403 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
404 
405 	if_initialize(ifp);
406 
407 	switch (lagg_iftype) {
408 	case LAGG_IF_TYPE_ETHERNET:
409 		cprng_fast(sc->sc_lladdr_rand, sizeof(sc->sc_lladdr_rand));
410 		sc->sc_lladdr_rand[0] &= 0xFE; /* clear I/G bit */
411 		sc->sc_lladdr_rand[0] |= 0x02; /* set G/L bit */
412 		lagg_lladdr_cpy(sc->sc_lladdr, sc->sc_lladdr_rand);
413 		ether_set_vlan_cb((struct ethercom *)ifp, lagg_vlan_cb);
414 		ether_ifattach(ifp, sc->sc_lladdr_rand);
415 		break;
416 	default:
417 		panic("unknown if type");
418 	}
419 
420 	snprintf(sc->sc_evgroup, sizeof(sc->sc_evgroup),
421 	    "%s", ifp->if_xname);
422 	lagg_evcnt_attach(sc, &sc->sc_novar, "no lagg variant");
423 	if_link_state_change(&sc->sc_if, LINK_STATE_DOWN);
424 	lagg_setup_sysctls(sc);
425 	(void)lagg_pr_attach(sc, LAGG_PROTO_NONE);
426 	if_register(ifp);
427 	lagg_count++;
428 
429 	return 0;
430 
431 destroy_psz:
432 	pserialize_destroy(sc->sc_psz);
433 	mutex_destroy(&sc->sc_lock);
434 	lagg_softc_free(sc);
435 
436 	return error;
437 }
438 
439 static int
440 lagg_clone_destroy(struct ifnet *ifp)
441 {
442 	struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
443 	struct lagg_port *lp;
444 
445 	lagg_stop(ifp, 1);
446 
447 	LAGG_LOCK(sc);
448 	while ((lp = LAGG_PORTS_FIRST(sc)) != NULL) {
449 		lagg_port_teardown(sc, lp, false);
450 	}
451 	LAGG_UNLOCK(sc);
452 
453 	switch (ifp->if_type) {
454 	case IFT_ETHER:
455 		ether_ifdetach(ifp);
456 		KASSERT(TAILQ_EMPTY(&sc->sc_vtags));
457 		break;
458 	}
459 
460 	if_detach(ifp);
461 	ifmedia_fini(&sc->sc_media);
462 	lagg_pr_detach(sc);
463 	evcnt_detach(&sc->sc_novar);
464 	lagg_teardown_sysctls(sc);
465 
466 	pserialize_destroy(sc->sc_psz);
467 	mutex_destroy(&sc->sc_lock);
468 	lagg_softc_free(sc);
469 
470 	if (lagg_count > 0)
471 		lagg_count--;
472 
473 	return 0;
474 }
475 
476 static int
477 lagg_init(struct ifnet *ifp)
478 {
479 	struct lagg_softc *sc;
480 	int rv;
481 
482 	sc = ifp->if_softc;
483 	LAGG_LOCK(sc);
484 	rv = lagg_init_locked(sc);
485 	LAGG_UNLOCK(sc);
486 
487 	return rv;
488 }
489 
490 static int
491 lagg_init_locked(struct lagg_softc *sc)
492 {
493 	struct ifnet *ifp = &sc->sc_if;
494 	int rv;
495 
496 	KASSERT(LAGG_LOCKED(sc));
497 
498 	if (ISSET(ifp->if_flags, IFF_RUNNING))
499 		lagg_stop_locked(sc);
500 
501 	lagg_sync_sadl(sc);
502 
503 	SET(ifp->if_flags, IFF_RUNNING);
504 
505 	rv = lagg_proto_up(sc);
506 	if (rv != 0)
507 		lagg_stop_locked(sc);
508 
509 	return rv;
510 }
511 
512 static void
513 lagg_stop(struct ifnet *ifp, int disable __unused)
514 {
515 	struct lagg_softc *sc;
516 
517 	sc = ifp->if_softc;
518 	LAGG_LOCK(sc);
519 	lagg_stop_locked(sc);
520 	LAGG_UNLOCK(sc);
521 }
522 
523 static void
524 lagg_stop_locked(struct lagg_softc *sc)
525 {
526 	struct ifnet *ifp = &sc->sc_if;
527 
528 	KASSERT(LAGG_LOCKED(sc));
529 
530 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
531 		return;
532 
533 	CLR(ifp->if_flags, IFF_RUNNING);
534 	lagg_proto_down(sc);
535 
536 }
537 
538 static int
539 lagg_config(struct lagg_softc *sc, struct lagg_req *lrq)
540 {
541 	struct ifnet *ifp_port;
542 	struct laggreqport *rp;
543 	struct lagg_port *lp;
544 	struct psref psref;
545 	size_t i;
546 	int error, bound;
547 
548 	error = 0;
549 	bound = curlwp_bind();
550 
551 	switch (lrq->lrq_ioctl) {
552 	case LAGGIOC_SETPROTO:
553 		if (lrq->lrq_proto >= LAGG_PROTO_MAX) {
554 			error = EPROTONOSUPPORT;
555 			break;
556 		}
557 
558 		error = lagg_delport_all(sc);
559 		if (error != 0)
560 			break;
561 		error = lagg_pr_attach(sc, lrq->lrq_proto);
562 		if (error != 0)
563 			break;
564 
565 		for (i = 0; i < lrq->lrq_nports; i++) {
566 			rp = &lrq->lrq_reqports[i];
567 			ifp_port = if_get(rp->rp_portname, &psref);
568 			if (ifp_port == NULL) {
569 				error = ENOENT;
570 				break;	/* break for */
571 			}
572 
573 			error = lagg_addport(sc, ifp_port);
574 			if_put(ifp_port, &psref);
575 
576 			if (error != 0)
577 				break;	/* break for */
578 		}
579 		break;	/* break switch */
580 	case LAGGIOC_ADDPORT:
581 		rp = &lrq->lrq_reqports[0];
582 		ifp_port = if_get(rp->rp_portname, &psref);
583 		if (ifp_port == NULL) {
584 			error = ENOENT;
585 			break;
586 		}
587 
588 		error = lagg_addport(sc, ifp_port);
589 		if_put(ifp_port, &psref);
590 		break;
591 	case LAGGIOC_DELPORT:
592 		rp = &lrq->lrq_reqports[0];
593 		ifp_port = if_get(rp->rp_portname, &psref);
594 		if (ifp_port == NULL) {
595 			error = ENOENT;
596 			break;
597 		}
598 
599 		error = lagg_delport(sc, ifp_port);
600 		if_put(ifp_port, &psref);
601 		break;
602 	case LAGGIOC_SETPORTPRI:
603 		rp = &lrq->lrq_reqports[0];
604 		ifp_port = if_get(rp->rp_portname, &psref);
605 		if (ifp_port == NULL) {
606 			error = ENOENT;
607 			break;
608 		}
609 
610 		lp = ifp_port->if_lagg;
611 		if (lp == NULL || lp->lp_softc != sc) {
612 			if_put(ifp_port, &psref);
613 			error = ENOENT;
614 			break;
615 		}
616 
617 		lp->lp_prio = rp->rp_prio;
618 
619 		/* restart protocol */
620 		LAGG_LOCK(sc);
621 		lagg_proto_stopport(sc, lp);
622 		lagg_proto_startport(sc, lp);
623 		LAGG_UNLOCK(sc);
624 		if_put(ifp_port, &psref);
625 		break;
626 	case LAGGIOC_SETPROTOOPT:
627 		error = lagg_proto_ioctl(sc, lrq);
628 		break;
629 	default:
630 		error = ENOTTY;
631 	}
632 
633 	curlwp_bindx(bound);
634 	return error;
635 }
636 
637 static int
638 lagg_ioctl(struct ifnet *ifp, u_long cmd, void *data)
639 {
640 	struct lagg_softc *sc;
641 	struct ifreq *ifr = (struct ifreq *)data;
642 	struct lagg_req laggreq, *laggresp;
643 	struct lagg_port *lp;
644 	size_t allocsiz, outlen, nports;
645 	char *outbuf;
646 	void *buf;
647 	int error = 0, rv;
648 
649 	sc = ifp->if_softc;
650 
651 	switch (cmd) {
652 	case SIOCGLAGG:
653 		error = copyin(ifr->ifr_data, &laggreq, sizeof(laggreq));
654 		if (error != 0)
655 			break;
656 
657 		nports = sc->sc_nports;
658 		nports = MIN(nports, laggreq.lrq_nports);
659 
660 		allocsiz = sizeof(*laggresp)
661 		    + sizeof(laggresp->lrq_reqports[0]) * nports;
662 		laggresp = kmem_zalloc(allocsiz, KM_SLEEP);
663 
664 		rv = lagg_get_stats(sc, laggresp, nports);
665 
666 		outbuf = (char *)laggresp;
667 
668 		nports = MIN(laggresp->lrq_nports, nports);
669 		outlen = sizeof(*laggresp)
670 		    + sizeof(laggresp->lrq_reqports[0]) * nports;
671 
672 		error = copyout(outbuf, ifr->ifr_data, outlen);
673 		kmem_free(outbuf, allocsiz);
674 
675 		if (error == 0 && rv != 0)
676 			error = rv;
677 
678 		break;
679 	case SIOCSLAGG:
680 		error = copyin(ifr->ifr_data, &laggreq, sizeof(laggreq));
681 		if (error != 0)
682 			break;
683 
684 		nports = laggreq.lrq_nports;
685 		if (nports > 1) {
686 			allocsiz = sizeof(struct lagg_req)
687 			    + sizeof(struct laggreqport) * nports;
688 			buf = kmem_alloc(allocsiz, KM_SLEEP);
689 
690 			error = copyin(ifr->ifr_data, buf, allocsiz);
691 			if (error != 0) {
692 				kmem_free(buf, allocsiz);
693 				break;
694 			}
695 		} else {
696 			buf = (void *)&laggreq;
697 			allocsiz = 0;
698 		}
699 
700 		error = lagg_config(sc, buf);
701 		if (allocsiz > 0)
702 			kmem_free(buf, allocsiz);
703 		break;
704 	case SIOCSIFFLAGS:
705 		error = ifioctl_common(ifp, cmd, data);
706 		if (error != 0)
707 			break;
708 
709 		switch (ifp->if_flags & (IFF_UP | IFF_RUNNING)) {
710 		case IFF_RUNNING:
711 			if_stop(ifp, 1);
712 			break;
713 		case IFF_UP:
714 		case IFF_UP | IFF_RUNNING:
715 			error = if_init(ifp);
716 			break;
717 		}
718 
719 		if (error != 0)
720 			break;
721 
722 		/* Set flags on ports too */
723 		LAGG_LOCK(sc);
724 		LAGG_PORTS_FOREACH(sc, lp) {
725 			(void)lagg_config_promisc(sc, lp);
726 		}
727 		LAGG_UNLOCK(sc);
728 		break;
729 	case SIOCSIFMTU:
730 		LAGG_LOCK(sc);
731 		/* set the MTU to each port */
732 		LAGG_PORTS_FOREACH(sc, lp) {
733 			error = lagg_lp_ioctl(lp, cmd, (void *)ifr);
734 
735 			if (error != 0) {
736 				LAGG_LOG(sc, LOG_ERR,
737 				    "failed to change MTU to %d on port %s, "
738 				    "reverting all ports to original "
739 				    "MTU(%" PRIu64 ")\n",
740 				    ifr->ifr_mtu, lp->lp_ifp->if_xname,
741 				    ifp->if_mtu);
742 				break;
743 			}
744 		}
745 
746 		/* set the MTU to the lagg interface */
747 		if (error == 0)
748 			error = ether_ioctl(ifp, cmd, data);
749 
750 		if (error != 0) {
751 			/* undo the changed MTU */
752 			ifr->ifr_mtu = ifp->if_mtu;
753 			LAGG_PORTS_FOREACH(sc, lp) {
754 				if (lp->lp_ioctl != NULL)
755 					lagg_lp_ioctl(lp, cmd, (void *)ifr);
756 			}
757 		}
758 		LAGG_UNLOCK(sc);
759 		break;
760 	case SIOCADDMULTI:
761 		if (sc->sc_if.if_type == IFT_ETHER) {
762 			error = lagg_ether_addmulti(sc, ifr);
763 		} else {
764 			error = EPROTONOSUPPORT;
765 		}
766 		break;
767 	case SIOCDELMULTI:
768 		if (sc->sc_if.if_type == IFT_ETHER) {
769 			error = lagg_ether_delmulti(sc, ifr);
770 		} else {
771 			error = EPROTONOSUPPORT;
772 		}
773 		break;
774 	case SIOCSIFCAP:
775 		error = ether_ioctl(ifp, cmd, data);
776 		if (error == 0)
777 			lagg_sync_ifcaps(sc);
778 		break;
779 	case SIOCSETHERCAP:
780 		error = ether_ioctl(ifp, cmd, data);
781 		if (error == 0)
782 			lagg_sync_ethcaps(sc);
783 		break;
784 	default:
785 		error = ether_ioctl(ifp, cmd, data);
786 	}
787 	return error;
788 }
789 
790 static int
791 lagg_setup_sysctls(struct lagg_softc *sc)
792 {
793 	struct sysctllog **slog;
794 	const struct sysctlnode **rnode, *hashnode;
795 	const char *ifname;
796 	int error;
797 
798 	slog = &sc->sc_sysctllog;
799 	rnode = &sc->sc_sysctlnode;
800 	ifname = sc->sc_if.if_xname;
801 
802 	error = sysctl_createv(slog, 0, NULL, rnode,
803 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, ifname,
804 	    SYSCTL_DESCR("lagg information and settings"),
805 	    NULL, 0, NULL, 0, CTL_NET, CTL_CREATE, CTL_EOL);
806 	if (error != 0)
807 		goto done;
808 
809 	error = sysctl_createv(slog, 0, rnode, &hashnode,
810 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "hash",
811 	    SYSCTL_DESCR("hash calculation settings"),
812 	    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
813 	if (error != 0)
814 		goto done;
815 
816 	error = sysctl_createv(slog, 0, &hashnode, NULL,
817 	    CTLFLAG_READWRITE, CTLTYPE_BOOL, "macaddr",
818 	    SYSCTL_DESCR("use src/dst mac addresses"),
819 	    NULL, 0, &sc->sc_hash_mac, 0, CTL_CREATE, CTL_EOL);
820 	if (error != 0)
821 		goto done;
822 
823 	error = sysctl_createv(slog, 0, &hashnode, NULL,
824 	    CTLFLAG_READWRITE, CTLTYPE_BOOL, "ipaddr",
825 	    SYSCTL_DESCR("use src/dst IPv4 addresses"),
826 	    NULL, 0, &sc->sc_hash_ipaddr, 0, CTL_CREATE, CTL_EOL);
827 	if (error != 0)
828 		goto done;
829 
830 	error = sysctl_createv(slog, 0, &hashnode, NULL,
831 	    CTLFLAG_READWRITE, CTLTYPE_BOOL, "ip6addr",
832 	    SYSCTL_DESCR("use src/dst IPv6 addresses"),
833 	    NULL, 0, &sc->sc_hash_ip6addr, 0, CTL_CREATE, CTL_EOL);
834 	if (error != 0)
835 		goto done;
836 
837 	error = sysctl_createv(slog, 0, &hashnode, NULL,
838 	    CTLFLAG_READWRITE, CTLTYPE_BOOL, "tcp",
839 	    SYSCTL_DESCR("use TCP src/dst port"),
840 	    NULL, 0, &sc->sc_hash_tcp, 0, CTL_CREATE, CTL_EOL);
841 	if (error != 0)
842 		goto done;
843 
844 	error = sysctl_createv(slog, 0, &hashnode, NULL,
845 	   CTLFLAG_READWRITE, CTLTYPE_BOOL, "udp",
846 	   SYSCTL_DESCR("use UDP src/dst port"),
847 	   NULL, 0, &sc->sc_hash_udp, 0, CTL_CREATE, CTL_EOL);
848 done:
849 	if (error != 0) {
850 		LAGG_LOG(sc, LOG_ERR, "unable to create sysctl node\n");
851 		sysctl_teardown(slog);
852 	}
853 
854 	return error;
855 }
856 
857 static void
858 lagg_teardown_sysctls(struct lagg_softc *sc)
859 {
860 
861 	sc->sc_sysctlnode = NULL;
862 	sysctl_teardown(&sc->sc_sysctllog);
863 }
864 
865 uint32_t
866 lagg_hashmbuf(struct lagg_softc *sc, struct mbuf *m)
867 {
868 	union {
869 		struct ether_header _eh;
870 		struct ether_vlan_header _evl;
871 		struct ip _ip;
872 		struct ip6_hdr _ip6;
873 		struct tcphdr _th;
874 		struct udphdr _uh;
875 	} buf;
876 	const struct ether_header *eh;
877 	const struct ether_vlan_header *evl;
878 	const struct ip *ip;
879 	const struct ip6_hdr *ip6;
880 	const struct tcphdr *th;
881 	const struct udphdr *uh;
882 	uint32_t hash, hash_src, hash_dst;
883 	uint32_t flowlabel;
884 	uint16_t etype, vlantag;
885 	uint8_t proto;
886 	size_t off;
887 
888 	KASSERT(ISSET(m->m_flags, M_PKTHDR));
889 
890 	hash = HASH32_BUF_INIT;
891 	hash_src = HASH32_BUF_INIT;
892 	hash_dst = HASH32_BUF_INIT;
893 
894 #define LAGG_HASH_ADD(hp, v) do {		\
895 	*(hp) = hash32_buf(&(v), sizeof(v), *(hp));	\
896 } while(0)
897 
898 	eh = lagg_m_extract(m, 0, sizeof(*eh), __alignof(*eh), &buf);
899 	if (eh == NULL)
900 		goto out;
901 
902 	off = ETHER_HDR_LEN;
903 	etype = ntohs(eh->ether_type);
904 
905 	if (etype == ETHERTYPE_VLAN) {
906 		evl = lagg_m_extract(m, 0, sizeof(*evl), __alignof(*evl),
907 		    &buf);
908 		if (evl == NULL)
909 			goto out;
910 
911 		vlantag = ntohs(evl->evl_tag);
912 		etype = ntohs(evl->evl_proto);
913 		off += ETHER_VLAN_ENCAP_LEN;
914 	} else if (vlan_has_tag(m)) {
915 		vlantag = vlan_get_tag(m);
916 	} else {
917 		vlantag = 0;
918 	}
919 
920 	if (sc->sc_hash_mac) {
921 		LAGG_HASH_ADD(&hash_dst, eh->ether_dhost);
922 		LAGG_HASH_ADD(&hash_src, eh->ether_shost);
923 		LAGG_HASH_ADD(&hash, vlantag);
924 	}
925 
926 	switch (etype) {
927 	case ETHERTYPE_IP:
928 		ip = lagg_m_extract(m, off, sizeof(*ip), __alignof(*ip), &buf);
929 		if (ip == NULL)
930 			goto out;
931 
932 		if (sc->sc_hash_ipaddr) {
933 			LAGG_HASH_ADD(&hash_src, ip->ip_src);
934 			LAGG_HASH_ADD(&hash_dst, ip->ip_dst);
935 			LAGG_HASH_ADD(&hash, ip->ip_p);
936 		}
937 		off += ip->ip_hl << 2;
938 		proto = ip->ip_p;
939 		break;
940 	case ETHERTYPE_IPV6:
941 		ip6 = lagg_m_extract(m, off, sizeof(*ip6), __alignof(*ip6),
942 		    &buf);
943 		if (ip6 == NULL)
944 			goto out;
945 
946 		if (sc->sc_hash_ip6addr) {
947 			LAGG_HASH_ADD(&hash_src, ip6->ip6_src);
948 			LAGG_HASH_ADD(&hash_dst, ip6->ip6_dst);
949 			flowlabel = ip6->ip6_flow & IPV6_FLOWLABEL_MASK;
950 			LAGG_HASH_ADD(&hash, flowlabel);
951 		}
952 		proto = ip6->ip6_nxt;
953 		off += sizeof(*ip6);
954 		break;
955 
956 	default:
957 		return hash;
958 	}
959 
960 	switch (proto) {
961 	case IPPROTO_TCP:
962 		th = lagg_m_extract(m, off, sizeof(*th), __alignof(*th), &buf);
963 		if (th == NULL)
964 			goto out;
965 
966 		if (sc->sc_hash_tcp) {
967 			LAGG_HASH_ADD(&hash_src, th->th_sport);
968 			LAGG_HASH_ADD(&hash_dst, th->th_dport);
969 		}
970 		break;
971 	case IPPROTO_UDP:
972 		uh = lagg_m_extract(m, off, sizeof(*uh), __alignof(*uh), &buf);
973 		if (uh == NULL)
974 			goto out;
975 
976 		if (sc->sc_hash_udp) {
977 			LAGG_HASH_ADD(&hash_src, uh->uh_sport);
978 			LAGG_HASH_ADD(&hash_dst, uh->uh_dport);
979 		}
980 		break;
981 	}
982 
983 out:
984 	hash_src ^= hash_dst;
985 	LAGG_HASH_ADD(&hash, hash_src);
986 #undef LAGG_HASH_ADD
987 
988 	return hash;
989 }
990 
991 static int
992 lagg_tx_common(struct ifnet *ifp, struct mbuf *m)
993 {
994 	struct lagg_variant *var;
995 	lagg_proto pr;
996 	struct psref psref;
997 	int error;
998 
999 	var = lagg_variant_getref(ifp->if_softc, &psref);
1000 
1001 	if (__predict_false(var == NULL)) {
1002 		m_freem(m);
1003 		if_statinc(ifp, if_oerrors);
1004 		return ENOENT;
1005 	}
1006 
1007 	pr = var->lv_proto;
1008 	if (__predict_true(lagg_protos[pr].pr_transmit != NULL)) {
1009 		error = lagg_protos[pr].pr_transmit(var->lv_psc, m);
1010 		/* mbuf is already freed */
1011 	} else {
1012 		m_freem(m);
1013 		if_statinc(ifp, if_oerrors);
1014 		error = ENOBUFS;
1015 	}
1016 
1017 	lagg_variant_putref(var, &psref);
1018 
1019 	return error;
1020 }
1021 
1022 static int
1023 lagg_transmit(struct ifnet *ifp, struct mbuf *m)
1024 {
1025 
1026 	return lagg_tx_common(ifp, m);
1027 }
1028 
1029 static void
1030 lagg_start(struct ifnet *ifp)
1031 {
1032 	struct mbuf *m;
1033 
1034 	for (;;) {
1035 		IFQ_DEQUEUE(&ifp->if_snd, m);
1036 		if (m == NULL)
1037 			break;
1038 
1039 		(void)lagg_tx_common(ifp, m);
1040 	}
1041 }
1042 
1043 void
1044 lagg_output(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1045 {
1046 	struct ifnet *ifp;
1047 	int len, error;
1048 	short mflags;
1049 
1050 	ifp = &sc->sc_if;
1051 	len = m->m_pkthdr.len;
1052 	mflags = m->m_flags;
1053 
1054 	error = lagg_port_xmit(lp, m);
1055 	if (error) {
1056 		/* mbuf is already freed */
1057 		if_statinc(ifp, if_oerrors);
1058 	}
1059 
1060 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1061 	if_statinc_ref(nsr, if_opackets);
1062 	if_statadd_ref(nsr, if_obytes, len);
1063 	if (mflags & M_MCAST)
1064 		if_statinc_ref(nsr, if_omcasts);
1065 	IF_STAT_PUTREF(ifp);
1066 }
1067 
1068 static struct mbuf *
1069 lagg_proto_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1070 {
1071 	struct psref psref;
1072 	struct lagg_variant *var;
1073 	lagg_proto pr;
1074 
1075 	var = lagg_variant_getref(sc, &psref);
1076 
1077 	if (var == NULL) {
1078 		sc->sc_novar.ev_count++;
1079 		m_freem(m);
1080 		return NULL;
1081 	}
1082 
1083 	pr = var->lv_proto;
1084 
1085 	if (lagg_protos[pr].pr_input != NULL) {
1086 		m = lagg_protos[pr].pr_input(var->lv_psc, lp, m);
1087 	} else {
1088 		m_freem(m);
1089 		m = NULL;
1090 	}
1091 
1092 	lagg_variant_putref(var, &psref);
1093 
1094 	return m;
1095 }
1096 
1097 static void
1098 lagg_input_ethernet(struct ifnet *ifp_port, struct mbuf *m)
1099 {
1100 	struct ifnet *ifp;
1101 	struct psref psref;
1102 	struct lagg_port *lp;
1103 	struct ether_header *eh;
1104 	int s;
1105 
1106 	/* sanity check */
1107 	s = pserialize_read_enter();
1108 	lp = atomic_load_consume(&ifp_port->if_lagg);
1109 	if (lp == NULL) {
1110 		/* This interface is not a member of lagg */
1111 		pserialize_read_exit(s);
1112 		m_freem(m);
1113 		if_statinc(ifp_port, if_ierrors);
1114 		return;
1115 	}
1116 	lagg_port_getref(lp, &psref);
1117 	pserialize_read_exit(s);
1118 
1119 	ifp = &lp->lp_softc->sc_if;
1120 
1121 	/*
1122 	 * Drop promiscuously received packets
1123 	 * if we are not in promiscuous mode.
1124 	 */
1125 
1126 	if (__predict_false(m->m_len < (int)sizeof(*eh))) {
1127 		if ((m = m_pullup(m, sizeof(*eh))) == NULL) {
1128 			if_statinc(ifp, if_ierrors);
1129 			goto out;
1130 		}
1131 	}
1132 
1133 	eh = mtod(m, struct ether_header *);
1134 
1135 	if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
1136 		/*
1137 		 * If this is not a simplex interface, drop the packet
1138 		 * if it came from us.
1139 		 */
1140 		if ((ifp->if_flags & IFF_SIMPLEX) == 0 &&
1141 		    memcmp(CLLADDR(ifp->if_sadl), eh->ether_shost,
1142 		    ETHER_ADDR_LEN) == 0) {
1143 			goto drop;
1144 		}
1145 
1146 		if_statinc(ifp_port, if_imcasts);
1147 	} else {
1148 		if ((ifp->if_flags & IFF_PROMISC) == 0 &&
1149 		    (ifp_port->if_flags & IFF_PROMISC) != 0 &&
1150 		    memcmp(CLLADDR(ifp->if_sadl), eh->ether_dhost,
1151 		    ETHER_ADDR_LEN) != 0)
1152 			goto drop;
1153 	}
1154 
1155 	if_statadd(ifp_port, if_ibytes, m->m_pkthdr.len);
1156 
1157 	if (pfil_run_hooks(ifp_port->if_pfil, &m,
1158 	    ifp_port, PFIL_IN) != 0)
1159 		goto out;
1160 
1161 	m = lagg_proto_input(lp->lp_softc, lp, m);
1162 	if (m != NULL) {
1163 		m_set_rcvif(m, ifp);
1164 		m->m_flags &= ~M_PROMISC;
1165 		if_input(ifp, m);
1166 	}
1167 
1168 out:
1169 	lagg_port_putref(lp, &psref);
1170 	return;
1171 
1172 drop:
1173 	lagg_port_putref(lp, &psref);
1174 	m_freem(m);
1175 	if_statinc(ifp_port, if_iqdrops);
1176 	return;
1177 }
1178 
1179 static int
1180 lagg_media_change(struct ifnet *ifp)
1181 {
1182 
1183 	if (ISSET(ifp->if_flags, IFF_DEBUG))
1184 		printf("%s: ignore media change\n", ifp->if_xname);
1185 
1186 	return 0;
1187 }
1188 
1189 static void
1190 lagg_media_status(struct ifnet *ifp, struct ifmediareq *imr)
1191 {
1192 	struct lagg_softc *sc;
1193 	struct lagg_port *lp;
1194 
1195 	sc = ifp->if_softc;
1196 
1197 	imr->ifm_status = IFM_AVALID;
1198 	imr->ifm_active = IFM_ETHER | IFM_AUTO;
1199 
1200 	LAGG_LOCK(sc);
1201 	LAGG_PORTS_FOREACH(sc, lp) {
1202 		if (lagg_portactive(lp))
1203 			imr->ifm_status |= IFM_ACTIVE;
1204 	}
1205 	LAGG_UNLOCK(sc);
1206 }
1207 
1208 static int
1209 lagg_port_vlan_cb(struct lagg_port *lp,
1210     struct lagg_vlantag *lvt, bool set)
1211 {
1212 	struct ifnet *ifp_port;
1213 	int error;
1214 
1215 	if (lp->lp_iftype != IFT_ETHER)
1216 		return 0;
1217 
1218 	error = 0;
1219 	ifp_port = lp->lp_ifp;
1220 
1221 	if (set) {
1222 		error = ether_add_vlantag(ifp_port,
1223 		    lvt->lvt_vtag, NULL);
1224 	} else {
1225 		error = ether_del_vlantag(ifp_port,
1226 		    lvt->lvt_vtag);
1227 	}
1228 
1229 	return error;
1230 }
1231 
1232 static int
1233 lagg_vlan_cb(struct ethercom *ec, uint16_t vtag, bool set)
1234 {
1235 	struct ifnet *ifp;
1236 	struct lagg_softc *sc;
1237 	struct lagg_vlantag *lvt, *lvt0;
1238 	struct lagg_port *lp;
1239 	int error;
1240 
1241 	ifp = (struct ifnet *)ec;
1242 	sc = ifp->if_softc;
1243 
1244 	if (set) {
1245 		lvt = kmem_zalloc(sizeof(*lvt), KM_SLEEP);
1246 		lvt->lvt_vtag = vtag;
1247 		TAILQ_INSERT_TAIL(&sc->sc_vtags, lvt, lvt_entry);
1248 	} else {
1249 		TAILQ_FOREACH_SAFE(lvt, &sc->sc_vtags, lvt_entry, lvt0) {
1250 			if (lvt->lvt_vtag == vtag) {
1251 				TAILQ_REMOVE(&sc->sc_vtags, lvt, lvt_entry);
1252 				break;
1253 			}
1254 		}
1255 
1256 		if (lvt == NULL)
1257 			return ENOENT;
1258 	}
1259 
1260 	KASSERT(lvt != NULL);
1261 	LAGG_PORTS_FOREACH(sc, lp) {
1262 		error = lagg_port_vlan_cb(lp, lvt, set);
1263 		if (error != 0) {
1264 			LAGG_LOG(sc, LOG_WARNING,
1265 			    "%s failed to configure vlan on %d\n",
1266 			    lp->lp_ifp->if_xname, error);
1267 		}
1268 	}
1269 
1270 	return 0;
1271 }
1272 
1273 static struct lagg_softc *
1274 lagg_softc_alloc(enum lagg_iftypes ift)
1275 {
1276 	struct lagg_softc *sc;
1277 	size_t s;
1278 
1279 	s = lagg_sizeof_softc(ift);
1280 	KASSERT(s > 0);
1281 
1282 	sc = kmem_zalloc(s, KM_SLEEP);
1283 	KASSERT(sc != NULL);
1284 
1285 	return sc;
1286 }
1287 
1288 static void
1289 lagg_softc_free(struct lagg_softc *sc)
1290 {
1291 
1292 	kmem_free(sc,
1293 	    lagg_sizeof_softc(sc->sc_iftype));
1294 }
1295 
1296 static void
1297 lagg_variant_update(struct lagg_softc *sc, struct lagg_variant *newvar)
1298 {
1299 	struct lagg_variant *oldvar;
1300 
1301 	KASSERT(LAGG_LOCKED(sc));
1302 
1303 	psref_target_init(&newvar->lv_psref, lagg_psref_class);
1304 
1305 	oldvar = sc->sc_var;
1306 	atomic_store_release(&sc->sc_var, newvar);
1307 	pserialize_perform(sc->sc_psz);
1308 
1309 	if (__predict_true(oldvar != NULL))
1310 		psref_target_destroy(&oldvar->lv_psref, lagg_psref_class);
1311 }
1312 
1313 static struct lagg_variant *
1314 lagg_variant_getref(struct lagg_softc *sc, struct psref *psref)
1315 {
1316 	struct lagg_variant *var;
1317 	int s;
1318 
1319 	s = pserialize_read_enter();
1320 	var = atomic_load_consume(&sc->sc_var);
1321 	if (var == NULL) {
1322 		pserialize_read_exit(s);
1323 		return NULL;
1324 	}
1325 
1326 	psref_acquire(psref, &var->lv_psref, lagg_psref_class);
1327 	pserialize_read_exit(s);
1328 
1329 	return var;
1330 }
1331 
1332 static void
1333 lagg_variant_putref(struct lagg_variant *var, struct psref *psref)
1334 {
1335 
1336 	if (__predict_false(var == NULL))
1337 		return;
1338 	psref_release(psref, &var->lv_psref, lagg_psref_class);
1339 }
1340 
1341 static int
1342 lagg_proto_attach(struct lagg_softc *sc, lagg_proto pr,
1343     struct lagg_proto_softc **psc)
1344 {
1345 
1346 	KASSERT(lagg_protos[pr].pr_attach != NULL);
1347 	return lagg_protos[pr].pr_attach(sc, psc);
1348 }
1349 
1350 static void
1351 lagg_proto_detach(struct lagg_variant *oldvar)
1352 {
1353 	lagg_proto pr;
1354 
1355 	pr = oldvar->lv_proto;
1356 
1357 	if (lagg_protos[pr].pr_detach == NULL)
1358 		return;
1359 
1360 	lagg_protos[pr].pr_detach(oldvar->lv_psc);
1361 }
1362 
1363 static int
1364 lagg_proto_updown(struct lagg_softc *sc, bool is_up)
1365 {
1366 	struct lagg_variant *var;
1367 	struct psref psref;
1368 	lagg_proto pr;
1369 	int error, bound;
1370 
1371 	error = 0;
1372 	bound = curlwp_bind();
1373 
1374 	var = lagg_variant_getref(sc, &psref);
1375 	if (var == NULL) {
1376 		curlwp_bindx(bound);
1377 		return ENXIO;
1378 	}
1379 
1380 	pr = var->lv_proto;
1381 
1382 	if (is_up && lagg_protos[pr].pr_up != NULL) {
1383 		error = lagg_protos[pr].pr_up(var->lv_psc);
1384 	} else if (!is_up && lagg_protos[pr].pr_down != NULL) {
1385 		lagg_protos[pr].pr_down(var->lv_psc);
1386 	}
1387 
1388 	lagg_variant_putref(var, &psref);
1389 	curlwp_bindx(bound);
1390 
1391 	return error;
1392 }
1393 
1394 static int
1395 lagg_proto_up(struct lagg_softc *sc)
1396 {
1397 
1398 	return lagg_proto_updown(sc, true);
1399 }
1400 
1401 static void
1402 lagg_proto_down(struct lagg_softc *sc)
1403 {
1404 
1405 	(void)lagg_proto_updown(sc, false);
1406 }
1407 
1408 static int
1409 lagg_proto_portctrl(struct lagg_softc *sc, struct lagg_port *lp,
1410     enum lagg_portctrl ctrl)
1411 {
1412 	struct lagg_variant *var;
1413 	struct psref psref;
1414 	lagg_proto pr;
1415 	int error, bound;
1416 
1417 	error = 0;
1418 	bound = curlwp_bind();
1419 
1420 	var = lagg_variant_getref(sc, &psref);
1421 	if (var == NULL) {
1422 		curlwp_bindx(bound);
1423 		return ENXIO;
1424 	}
1425 
1426 	pr = var->lv_proto;
1427 
1428 	switch (ctrl) {
1429 	case LAGG_PORTCTRL_ALLOC:
1430 		if (lagg_protos[pr].pr_allocport == NULL) {
1431 			goto nosupport;
1432 		}
1433 		error = lagg_protos[pr].pr_allocport(var->lv_psc, lp);
1434 		break;
1435 	case LAGG_PORTCTRL_FREE:
1436 		if (lagg_protos[pr].pr_freeport == NULL) {
1437 			goto nosupport;
1438 		}
1439 		lagg_protos[pr].pr_freeport(var->lv_psc, lp);
1440 		break;
1441 	case LAGG_PORTCTRL_START:
1442 		if (lagg_protos[pr].pr_startport == NULL) {
1443 			goto nosupport;
1444 		}
1445 		lagg_protos[pr].pr_startport(var->lv_psc, lp);
1446 		break;
1447 	case LAGG_PORTCTRL_STOP:
1448 		if (lagg_protos[pr].pr_stopport == NULL) {
1449 			goto nosupport;
1450 		}
1451 		lagg_protos[pr].pr_stopport(var->lv_psc, lp);
1452 		break;
1453 	default:
1454 		goto nosupport;
1455 	}
1456 
1457 	lagg_variant_putref(var, &psref);
1458 	curlwp_bindx(bound);
1459 	return error;
1460 
1461 nosupport:
1462 	lagg_variant_putref(var, &psref);
1463 	curlwp_bindx(bound);
1464 	return EPROTONOSUPPORT;
1465 }
1466 
1467 static int
1468 lagg_proto_allocport(struct lagg_softc *sc, struct lagg_port *lp)
1469 {
1470 
1471 	return lagg_proto_portctrl(sc, lp, LAGG_PORTCTRL_ALLOC);
1472 }
1473 
1474 static void
1475 lagg_proto_freeport(struct lagg_softc *sc, struct lagg_port *lp)
1476 {
1477 
1478 	lagg_proto_portctrl(sc, lp, LAGG_PORTCTRL_FREE);
1479 }
1480 
1481 static void
1482 lagg_proto_startport(struct lagg_softc *sc, struct lagg_port *lp)
1483 {
1484 
1485 	lagg_proto_portctrl(sc, lp, LAGG_PORTCTRL_START);
1486 }
1487 
1488 static void
1489 lagg_proto_stopport(struct lagg_softc *sc, struct lagg_port *lp)
1490 {
1491 
1492 	lagg_proto_portctrl(sc, lp, LAGG_PORTCTRL_STOP);
1493 }
1494 
1495 static void
1496 lagg_proto_linkstate(struct lagg_softc *sc, struct lagg_port *lp)
1497 {
1498 	struct lagg_variant *var;
1499 	struct psref psref;
1500 	lagg_proto pr;
1501 	int bound;
1502 
1503 	KASSERT(IFNET_LOCKED(lp->lp_ifp));
1504 
1505 	bound = curlwp_bind();
1506 	var = lagg_variant_getref(sc, &psref);
1507 
1508 	if (var == NULL) {
1509 		curlwp_bindx(bound);
1510 		return;
1511 	}
1512 
1513 	pr = var->lv_proto;
1514 
1515 	if (lagg_protos[pr].pr_linkstate)
1516 		lagg_protos[pr].pr_linkstate(var->lv_psc, lp);
1517 
1518 	lagg_variant_putref(var, &psref);
1519 	curlwp_bindx(bound);
1520 }
1521 
1522 static void
1523 lagg_proto_stat(struct lagg_variant *var, struct laggreqproto *resp)
1524 {
1525 	lagg_proto pr;
1526 
1527 	pr = var->lv_proto;
1528 
1529 	if (lagg_protos[pr].pr_protostat != NULL)
1530 		lagg_protos[pr].pr_protostat(var->lv_psc, resp);
1531 }
1532 
1533 static void
1534 lagg_proto_portstat(struct lagg_variant *var, struct lagg_port *lp,
1535     struct laggreqport *resp)
1536 {
1537 	lagg_proto pr;
1538 
1539 	pr = var->lv_proto;
1540 
1541 	if (lagg_protos[pr].pr_portstat != NULL)
1542 		lagg_protos[pr].pr_portstat(var->lv_psc, lp, resp);
1543 }
1544 
1545 static int
1546 lagg_proto_ioctl(struct lagg_softc *sc, struct lagg_req *lreq)
1547 {
1548 	struct lagg_variant *var;
1549 	struct psref psref;
1550 	lagg_proto pr;
1551 	int bound, error;
1552 
1553 	error = ENOTTY;
1554 	bound = curlwp_bind();
1555 	var = lagg_variant_getref(sc, &psref);
1556 
1557 	if (var == NULL) {
1558 		error = ENXIO;
1559 		goto done;
1560 	}
1561 
1562 	pr = var->lv_proto;
1563 	if (pr != lreq->lrq_proto) {
1564 		error = EBUSY;
1565 		goto done;
1566 	}
1567 
1568 	if (lagg_protos[pr].pr_ioctl != NULL) {
1569 		error = lagg_protos[pr].pr_ioctl(var->lv_psc,
1570 		    &lreq->lrq_reqproto);
1571 	}
1572 
1573 done:
1574 	if (var != NULL)
1575 		lagg_variant_putref(var, &psref);
1576 	curlwp_bindx(bound);
1577 	return error;
1578 }
1579 
1580 static int
1581 lagg_pr_attach(struct lagg_softc *sc, lagg_proto pr)
1582 {
1583 	struct lagg_variant *newvar, *oldvar;
1584 	struct lagg_proto_softc *psc;
1585 	bool cleanup_oldvar;
1586 	int error;
1587 
1588 	error = 0;
1589 	cleanup_oldvar = false;
1590 	newvar = kmem_alloc(sizeof(*newvar), KM_SLEEP);
1591 
1592 	LAGG_LOCK(sc);
1593 	oldvar = sc->sc_var;
1594 
1595 	if (oldvar != NULL && oldvar->lv_proto == pr) {
1596 		error = 0;
1597 		goto done;
1598 	}
1599 
1600 	error = lagg_proto_attach(sc, pr, &psc);
1601 	if (error != 0)
1602 		goto done;
1603 
1604 	newvar->lv_proto = pr;
1605 	newvar->lv_psc = psc;
1606 
1607 	lagg_variant_update(sc, newvar);
1608 	newvar = NULL;
1609 
1610 	if (oldvar != NULL) {
1611 		lagg_proto_detach(oldvar);
1612 		cleanup_oldvar = true;
1613 	}
1614 done:
1615 	LAGG_UNLOCK(sc);
1616 
1617 	if (newvar != NULL)
1618 		kmem_free(newvar, sizeof(*newvar));
1619 	if (cleanup_oldvar)
1620 		kmem_free(oldvar, sizeof(*oldvar));
1621 
1622 	return error;
1623 }
1624 
1625 static void
1626 lagg_pr_detach(struct lagg_softc *sc)
1627 {
1628 	struct lagg_variant *var;
1629 
1630 	LAGG_LOCK(sc);
1631 
1632 	var = sc->sc_var;
1633 	atomic_store_release(&sc->sc_var, NULL);
1634 	pserialize_perform(sc->sc_psz);
1635 
1636 	if (var != NULL)
1637 		lagg_proto_detach(var);
1638 
1639 	LAGG_UNLOCK(sc);
1640 
1641 	if (var != NULL)
1642 		kmem_free(var, sizeof(*var));
1643 }
1644 
1645 static int
1646 lagg_ether_addmulti(struct lagg_softc *sc, struct ifreq *ifr)
1647 {
1648 	struct lagg_port *lp;
1649 	struct lagg_mc_entry *mc;
1650 	struct ethercom *ec;
1651 	const struct sockaddr *sa;
1652 	uint8_t addrlo[ETHER_ADDR_LEN], addrhi[ETHER_ADDR_LEN];
1653 	int error;
1654 
1655 	if (sc->sc_if.if_type != IFT_ETHER)
1656 		return EPROTONOSUPPORT;
1657 
1658 	ec = (struct ethercom *)&sc->sc_if;
1659 	sa = ifreq_getaddr(SIOCADDMULTI, ifr);
1660 
1661 	error = ether_addmulti(sa, ec);
1662 	if (error != ENETRESET)
1663 		return error;
1664 
1665 	error = ether_multiaddr(sa, addrlo, addrhi);
1666 	KASSERT(error == 0);
1667 
1668 	mc = kmem_zalloc(sizeof(*mc), KM_SLEEP);
1669 
1670 	ETHER_LOCK(ec);
1671 	mc->mc_enm = ether_lookup_multi(addrlo, addrhi, ec);
1672 	ETHER_UNLOCK(ec);
1673 
1674 	KASSERT(mc->mc_enm != NULL);
1675 
1676 	LAGG_LOCK(sc);
1677 	LAGG_PORTS_FOREACH(sc, lp) {
1678 		(void)lagg_lp_ioctl(lp, SIOCADDMULTI, (void *)ifr);
1679 	}
1680 	LAGG_UNLOCK(sc);
1681 
1682 	KASSERT(sa->sa_len <= sizeof(mc->mc_addr));
1683 	memcpy(&mc->mc_addr, sa, sa->sa_len);
1684 	LIST_INSERT_HEAD(&sc->sc_mclist, mc, mc_entry);
1685 
1686 	return 0;
1687 }
1688 
1689 static int
1690 lagg_ether_delmulti(struct lagg_softc *sc, struct ifreq *ifr)
1691 {
1692 	struct lagg_port *lp;
1693 	struct lagg_mc_entry *mc;
1694 	const struct sockaddr *sa;
1695 	struct ethercom *ec;
1696 	struct ether_multi *enm;
1697 	uint8_t addrlo[ETHER_ADDR_LEN], addrhi[ETHER_ADDR_LEN];
1698 	int error;
1699 
1700 	ec = (struct ethercom *)&sc->sc_if;
1701 	sa = ifreq_getaddr(SIOCDELMULTI, ifr);
1702 	error = ether_multiaddr(sa, addrlo, addrhi);
1703 	if (error != 0)
1704 		return error;
1705 
1706 	ETHER_LOCK(ec);
1707 	enm = ether_lookup_multi(addrlo, addrhi, ec);
1708 	ETHER_UNLOCK(ec);
1709 
1710 	if (enm == NULL)
1711 		return ENOENT;
1712 
1713 	LIST_FOREACH(mc, &sc->sc_mclist, mc_entry) {
1714 		if (mc->mc_enm == enm)
1715 			break;
1716 	}
1717 
1718 	if (mc == NULL)
1719 		return ENOENT;
1720 
1721 	error = ether_delmulti(sa, ec);
1722 	if (error != ENETRESET)
1723 		return error;
1724 
1725 	LAGG_LOCK(sc);
1726 	LAGG_PORTS_FOREACH(sc, lp) {
1727 		(void)lagg_lp_ioctl(lp, SIOCDELMULTI, (void *)ifr);
1728 	}
1729 	LAGG_UNLOCK(sc);
1730 
1731 	LIST_REMOVE(mc, mc_entry);
1732 	kmem_free(mc, sizeof(*mc));
1733 
1734 	return 0;
1735 }
1736 
1737 static void
1738 lagg_port_multi(struct lagg_softc *sc, struct lagg_port *lp,
1739     u_long cmd)
1740 {
1741 	struct lagg_mc_entry *mc;
1742 	struct ifreq ifr;
1743 	struct ifnet *ifp_port;
1744 	const struct sockaddr *sa;
1745 
1746 	ifp_port = lp->lp_ifp;
1747 
1748 	memset(&ifr, 0, sizeof(ifr));
1749 	strlcpy(ifr.ifr_name, ifp_port->if_xname, sizeof(ifr.ifr_name));
1750 
1751 	LIST_FOREACH(mc, &sc->sc_mclist, mc_entry) {
1752 		sa = (struct sockaddr *)&mc->mc_addr;
1753 		KASSERT(sizeof(ifr.ifr_space) >= sa->sa_len);
1754 		memcpy(&ifr.ifr_addr, sa, sa->sa_len);
1755 		(void)lagg_lp_ioctl(lp, cmd, (void *)&ifr);
1756 	}
1757 
1758 }
1759 
1760 static void
1761 lagg_port_syncmulti(struct lagg_softc *sc, struct lagg_port *lp)
1762 {
1763 
1764 	lagg_port_multi(sc, lp, SIOCADDMULTI);
1765 }
1766 
1767 static void
1768 lagg_port_purgemulti(struct lagg_softc *sc, struct lagg_port *lp)
1769 {
1770 
1771 	lagg_port_multi(sc, lp, SIOCDELMULTI);
1772 }
1773 
1774 static void
1775 lagg_port_vlan(struct lagg_softc *sc, struct lagg_port *lp,
1776     bool set)
1777 {
1778 	struct lagg_vlantag *lvt;
1779 	int error;
1780 
1781 	TAILQ_FOREACH(lvt, &sc->sc_vtags, lvt_entry) {
1782 		error = lagg_port_vlan_cb(lp, lvt, set);
1783 		if (error != 0) {
1784 			LAGG_LOG(sc, LOG_WARNING,
1785 			    "%s failed to configure vlan on %d\n",
1786 			    lp->lp_ifp->if_xname, error);
1787 		}
1788 	}
1789 }
1790 
1791 static void
1792 lagg_port_syncvlan(struct lagg_softc *sc, struct lagg_port *lp)
1793 
1794 {
1795 	lagg_port_vlan(sc, lp, true);
1796 }
1797 
1798 static void
1799 lagg_port_purgevlan(struct lagg_softc *sc, struct lagg_port *lp)
1800 {
1801 
1802 	lagg_port_vlan(sc, lp, false);
1803 }
1804 
1805 static int
1806 lagg_setifcaps(struct lagg_port *lp, uint64_t cap)
1807 {
1808 	struct ifcapreq ifcr;
1809 	int error;
1810 
1811 	if (lp->lp_ifp->if_capenable == cap)
1812 		return 0;
1813 
1814 	memset(&ifcr, 0, sizeof(ifcr));
1815 	ifcr.ifcr_capenable = cap;
1816 
1817 	IFNET_LOCK(lp->lp_ifp);
1818 	error = LAGG_PORT_IOCTL(lp, SIOCSIFCAP, &ifcr);
1819 	IFNET_UNLOCK(lp->lp_ifp);
1820 
1821 	return error;
1822 }
1823 
1824 static void
1825 lagg_sync_ifcaps(struct lagg_softc *sc)
1826 {
1827 	struct lagg_port *lp;
1828 	struct ifnet *ifp;
1829 	int error = 0;
1830 
1831 	ifp = (struct ifnet *)&sc->sc_if;
1832 
1833 	LAGG_LOCK(sc);
1834 	LAGG_PORTS_FOREACH(sc, lp) {
1835 		error = lagg_setifcaps(lp, ifp->if_capenable);
1836 
1837 		if (error != 0) {
1838 			LAGG_LOG(sc, LOG_WARNING,
1839 			    "failed to update capabilities "
1840 			    "of %s, error=%d\n",
1841 			    lp->lp_ifp->if_xname, error);
1842 		}
1843 	}
1844 	LAGG_UNLOCK(sc);
1845 }
1846 
1847 static int
1848 lagg_setethcaps(struct lagg_port *lp, int cap)
1849 {
1850 	struct ethercom *ec;
1851 	struct eccapreq eccr;
1852 	int error;
1853 
1854 	KASSERT(lp->lp_iftype == IFT_ETHER);
1855 	ec = (struct ethercom *)lp->lp_ifp;
1856 
1857 	if (ec->ec_capenable == cap)
1858 		return 0;
1859 
1860 	memset(&eccr, 0, sizeof(eccr));
1861 	eccr.eccr_capenable = cap;
1862 
1863 	IFNET_LOCK(lp->lp_ifp);
1864 	error = LAGG_PORT_IOCTL(lp, SIOCSETHERCAP, &eccr);
1865 	IFNET_UNLOCK(lp->lp_ifp);
1866 
1867 	return error;
1868 }
1869 
1870 static void
1871 lagg_sync_ethcaps(struct lagg_softc *sc)
1872 {
1873 	struct ethercom *ec;
1874 	struct lagg_port *lp;
1875 	int error;
1876 
1877 	ec = (struct ethercom *)&sc->sc_if;
1878 
1879 	LAGG_LOCK(sc);
1880 	LAGG_PORTS_FOREACH(sc, lp) {
1881 		if (lp->lp_iftype != IFT_ETHER)
1882 			continue;
1883 
1884 		error = lagg_setethcaps(lp, ec->ec_capenable);
1885 		if (error != 0) {
1886 			LAGG_LOG(sc, LOG_WARNING,
1887 			    "failed to update ether "
1888 			    "capabilities"" of %s, error=%d\n",
1889 			    lp->lp_ifp->if_xname, error);
1890 		}
1891 
1892 	}
1893 	LAGG_UNLOCK(sc);
1894 }
1895 
1896 static void
1897 lagg_ifcap_update(struct lagg_softc *sc)
1898 {
1899 	struct ifnet *ifp;
1900 	struct lagg_port *lp;
1901 	uint64_t cap, ena, pena;
1902 	size_t i;
1903 
1904 	KASSERT(LAGG_LOCKED(sc));
1905 
1906 	/* Get common capabilities for the lagg ports */
1907 	ena = ~(uint64_t)0;
1908 	cap = ~(uint64_t)0;
1909 	LAGG_PORTS_FOREACH(sc, lp) {
1910 		ena &= lp->lp_ifp->if_capenable;
1911 		cap &= lp->lp_ifp->if_capabilities;
1912 	}
1913 
1914 	if (ena == ~(uint64_t)0)
1915 		ena = 0;
1916 	if (cap == ~(uint64_t)0)
1917 		cap = 0;
1918 
1919 	/*
1920 	 * Apply common enabled capabilities back to the lagg ports.
1921 	 * May require several iterations if they are dependent.
1922 	 */
1923 	for (i = 0; i < LAGG_SETCAPS_RETRY; i++) {
1924 		pena = ena;
1925 		LAGG_PORTS_FOREACH(sc, lp) {
1926 			lagg_setifcaps(lp, ena);
1927 			ena &= lp->lp_ifp->if_capenable;
1928 		}
1929 
1930 		if (pena == ena)
1931 			break;
1932 	}
1933 
1934 	if (pena != ena) {
1935 		LAGG_LOG(sc, LOG_DEBUG, "couldn't set "
1936 		    "capabilities 0x%08"PRIx64"\n", pena);
1937 	}
1938 
1939 	ifp = &sc->sc_if;
1940 
1941 	if (ifp->if_capabilities != cap ||
1942 	    ifp->if_capenable != ena) {
1943 		ifp->if_capabilities = cap;
1944 		ifp->if_capenable = ena;
1945 
1946 		LAGG_LOG(sc, LOG_DEBUG,"capabilities "
1947 		    "0x%08"PRIx64" enabled 0x%08"PRIx64"\n",
1948 		    cap, ena);
1949 	}
1950 }
1951 
1952 static void
1953 lagg_ethercap_update(struct lagg_softc *sc)
1954 {
1955 	struct ethercom *ec;
1956 	struct lagg_port *lp;
1957 	int cap, ena, pena;
1958 	size_t i;
1959 
1960 	KASSERT(LAGG_LOCKED(sc));
1961 
1962 	if (sc->sc_if.if_type != IFT_ETHER)
1963 		return;
1964 
1965 	/* Get common enabled capabilities for the lagg ports */
1966 	ena = ~0;
1967 	cap = ~0;
1968 	LAGG_PORTS_FOREACH(sc, lp) {
1969 		switch (lp->lp_iftype) {
1970 		case IFT_ETHER:
1971 			ec = (struct ethercom *)lp->lp_ifp;
1972 			ena &= ec->ec_capenable;
1973 			cap &= ec->ec_capabilities;
1974 			break;
1975 		case IFT_L2TP:
1976 			ena &= (ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU);
1977 			cap &= (ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU);
1978 			break;
1979 		default:
1980 			ena = 0;
1981 			cap = 0;
1982 		}
1983 	}
1984 
1985 	if (ena == ~0)
1986 		ena = 0;
1987 	if (cap == ~0)
1988 		cap = 0;
1989 
1990 	/*
1991 	 * Apply common enabled capabilities back to the lagg ports.
1992 	 * May require several iterations if they are dependent.
1993 	 */
1994 	for (i = 0; i < LAGG_SETCAPS_RETRY; i++) {
1995 		pena = ena;
1996 		LAGG_PORTS_FOREACH(sc, lp) {
1997 			if (lp->lp_iftype != IFT_ETHER)
1998 				continue;
1999 
2000 			ec = (struct ethercom *)lp->lp_ifp;
2001 			lagg_setethcaps(lp, ena);
2002 			ena &= ec->ec_capenable;
2003 		}
2004 
2005 		if (pena == ena)
2006 			break;
2007 	}
2008 
2009 	if (pena != ena) {
2010 		LAGG_LOG(sc, LOG_DEBUG, "couldn't set "
2011 		    "ether capabilities 0x%08x\n", pena);
2012 	}
2013 
2014 	ec = (struct ethercom *)&sc->sc_if;
2015 
2016 	if (ec->ec_capabilities != cap ||
2017 	    ec->ec_capenable != ena) {
2018 		ec->ec_capabilities = cap;
2019 		ec->ec_capenable = ena;
2020 
2021 		LAGG_LOG(sc, LOG_DEBUG,
2022 		    "ether capabilities 0x%08x"
2023 		    " enabled 0x%08x\n", cap, ena);
2024 	}
2025 }
2026 
2027 static void
2028 lagg_capabilities_update(struct lagg_softc *sc)
2029 {
2030 
2031 	lagg_ifcap_update(sc);
2032 	lagg_ethercap_update(sc);
2033 }
2034 
2035 static int
2036 lagg_setmtu(struct ifnet *ifp, uint64_t mtu)
2037 {
2038 	struct lagg_softc *sc __LAGGDEBUGUSED;
2039 	struct lagg_port *lp;
2040 	struct ifreq ifr;
2041 	int error;
2042 
2043 	KASSERT(IFNET_LOCKED(ifp));
2044 
2045 	memset(&ifr, 0, sizeof(ifr));
2046 	ifr.ifr_mtu = mtu;
2047 	lp = ifp->if_lagg;
2048 
2049 	if (lp != NULL) {
2050 		/* ioctl for port interface */
2051 		error = lp->lp_ioctl(ifp, SIOCSIFMTU, &ifr);
2052 		sc = lp->lp_softc;
2053 	} else {
2054 		/* ioctl for lagg interface */
2055 		error = ether_ioctl(ifp, SIOCSIFMTU, &ifr);
2056 		sc = ifp->if_softc;
2057 	}
2058 
2059 	if (error != 0) {
2060 		LAGG_DPRINTF(sc,
2061 		    "couldn't change MTU for %s\n",
2062 		    ifp->if_xname);
2063 	}
2064 
2065 	return error;
2066 }
2067 
2068 static void
2069 lagg_port_setsadl(struct lagg_port *lp, const uint8_t *lladdr)
2070 {
2071 	struct ifnet *ifp_port;
2072 	int error;
2073 
2074 	ifp_port = lp->lp_ifp;
2075 
2076 	KASSERT(LAGG_LOCKED(lp->lp_softc));
2077 	KASSERT(IFNET_LOCKED(ifp_port));
2078 
2079 	switch (lp->lp_iftype) {
2080 	case IFT_ETHER:
2081 		if (lladdr == NULL) {
2082 			lladdr = lp->lp_lladdr;
2083 		} else {
2084 			if (lagg_lladdr_equal(lladdr,
2085 			    CLLADDR(ifp_port->if_sadl)))
2086 				break;
2087 		}
2088 
2089 		lagg_chg_sadl(ifp_port,
2090 		    lladdr, ETHER_ADDR_LEN);
2091 
2092 		if (ifp_port->if_init != NULL) {
2093 			error = 0;
2094 			if (ISSET(ifp_port->if_flags, IFF_RUNNING))
2095 				error = if_init(ifp_port);
2096 
2097 			if (error != 0) {
2098 				LAGG_LOG(lp->lp_softc, LOG_WARNING,
2099 				    "%s failed to if_init() on %d\n",
2100 				    ifp_port->if_xname, error);
2101 			}
2102 		}
2103 		break;
2104 	default:
2105 		if_alloc_sadl(ifp_port);
2106 		break;
2107 	}
2108 }
2109 
2110 static void
2111 lagg_if_setsadl(struct lagg_softc *sc, uint8_t *lladdr)
2112 {
2113 	struct ifnet *ifp;
2114 
2115 	KASSERT(LAGG_LOCKED(sc));
2116 
2117 	ifp = &sc->sc_if;
2118 
2119 	if (lagg_lladdr_equal(CLLADDR(ifp->if_sadl), lladdr))
2120 		return;
2121 
2122 	lagg_chg_sadl(ifp, lladdr, ETHER_ADDR_LEN);
2123 
2124 	LAGG_UNLOCK(sc);
2125 	lagg_in6_ifdetach(ifp);
2126 	lagg_in6_ifattach(ifp);
2127 	LAGG_LOCK(sc);
2128 
2129 	lagg_sync_sadl(sc);
2130 }
2131 
2132 static void
2133 lagg_sync_sadl(struct lagg_softc *sc)
2134 {
2135 	struct ifnet *ifp;
2136 	struct lagg_port *lp;
2137 	const uint8_t *lla;
2138 
2139 	ifp = &sc->sc_if;
2140 	KASSERT(IFNET_LOCKED(ifp));
2141 
2142 	lla = CLLADDR(ifp->if_sadl);
2143 	if (lagg_lladdr_equal(lla, sc->sc_lladdr))
2144 		return;
2145 
2146 	lagg_lladdr_cpy(sc->sc_lladdr, lla);
2147 
2148 	LAGG_PORTS_FOREACH(sc, lp) {
2149 		IFNET_LOCK(lp->lp_ifp);
2150 		lagg_port_setsadl(lp, lla);
2151 		IFNET_UNLOCK(lp->lp_ifp);
2152 	}
2153 }
2154 
2155 static int
2156 lagg_port_setup(struct lagg_softc *sc,
2157     struct lagg_port *lp, struct ifnet *ifp_port)
2158 {
2159 	struct ifnet *ifp;
2160 	u_char if_type;
2161 	int error;
2162 	bool stopped, is_1st_port;
2163 
2164 	KASSERT(LAGG_LOCKED(sc));
2165 	IFNET_ASSERT_UNLOCKED(ifp_port);
2166 
2167 	ifp = &sc->sc_if;
2168 	is_1st_port = SIMPLEQ_EMPTY(&sc->sc_ports);
2169 
2170 	if (&sc->sc_if == ifp_port) {
2171 		LAGG_DPRINTF(sc, "cannot add a lagg to itself as a port\n");
2172 		return EINVAL;
2173 	}
2174 
2175 	if (sc->sc_nports > LAGG_MAX_PORTS)
2176 		return ENOSPC;
2177 
2178 	if (ifp_port->if_lagg != NULL) {
2179 		lp = (struct lagg_port *)ifp_port->if_lagg;
2180 		if (lp->lp_softc == sc)
2181 			return EEXIST;
2182 		return EBUSY;
2183 	}
2184 
2185 	switch (ifp_port->if_type) {
2186 	case IFT_ETHER:
2187 	case IFT_L2TP:
2188 		if_type = IFT_IEEE8023ADLAG;
2189 		break;
2190 	default:
2191 		return ENOTSUP;
2192 	}
2193 
2194 	error = 0;
2195 	stopped = false;
2196 	lp->lp_softc = sc;
2197 	lp->lp_prio = LAGG_PORT_PRIO;
2198 	lp->lp_linkstate_hook = if_linkstate_change_establish(ifp_port,
2199 	    lagg_linkstate_changed, ifp_port);
2200 	lp->lp_ifdetach_hook = ether_ifdetachhook_establish(ifp_port,
2201 	    lagg_ifdetach, ifp_port);
2202 	psref_target_init(&lp->lp_psref, lagg_port_psref_class);
2203 
2204 	IFNET_LOCK(ifp_port);
2205 	/* stop packet processing */
2206 	if (ISSET(ifp_port->if_flags, IFF_RUNNING) &&
2207 	    ifp_port->if_init != NULL) {
2208 		if_stop(ifp_port, 0);
2209 		stopped = true;
2210 	}
2211 
2212 	/* to delete ipv6 link local address */
2213 	lagg_in6_ifdetach(ifp_port);
2214 
2215 	/* backup members */
2216 	lp->lp_iftype = ifp_port->if_type;
2217 	lp->lp_ioctl = ifp_port->if_ioctl;
2218 	lp->lp_input = ifp_port->_if_input;
2219 	lp->lp_output = ifp_port->if_output;
2220 	lp->lp_ifcapenable = ifp_port->if_capenable;
2221 	lp->lp_mtu = ifp_port->if_mtu;
2222 	if (lp->lp_iftype == IFT_ETHER) {
2223 		struct ethercom *ec;
2224 		ec = (struct ethercom *)ifp_port;
2225 
2226 		lagg_lladdr_cpy(lp->lp_lladdr, CLLADDR(ifp_port->if_sadl));
2227 		lp->lp_eccapenable = ec->ec_capenable;
2228 	}
2229 
2230 	/* change callbacks and others */
2231 	atomic_store_release(&ifp_port->if_lagg, (void *)lp);
2232 	ifp_port->if_type = if_type;
2233 	ifp_port->if_ioctl = lagg_port_ioctl;
2234 	ifp_port->_if_input = lagg_input_ethernet;
2235 	ifp_port->if_output = lagg_port_output;
2236 	if (is_1st_port) {
2237 		if (lp->lp_iftype != ifp_port->if_type)
2238 			lagg_port_setsadl(lp, NULL);
2239 	} else {
2240 		lagg_port_setsadl(lp, CLLADDR(ifp->if_sadl));
2241 		error = lagg_setmtu(ifp_port, ifp->if_mtu);
2242 		if (error != 0)
2243 			goto restore_sadl;
2244 	}
2245 
2246 	error = lagg_proto_allocport(sc, lp);
2247 	if (error != 0)
2248 		goto restore_mtu;
2249 
2250 	/* restart packet processing */
2251 	if (stopped) {
2252 		error = if_init(ifp_port);
2253 		if (error != 0)
2254 			goto free_port;
2255 	}
2256 
2257 	/* setup of ifp_port is complete */
2258 	IFNET_UNLOCK(ifp_port);
2259 
2260 	if (is_1st_port) {
2261 		error = lagg_setmtu(ifp, lp->lp_mtu);
2262 		if (error != 0)
2263 			goto restore_ifp_port;
2264 		if (lp->lp_iftype == IFT_ETHER &&
2265 		    lagg_lladdr_equal(sc->sc_lladdr_rand,
2266 		    CLLADDR(ifp->if_sadl))) {
2267 			lagg_if_setsadl(sc, lp->lp_lladdr);
2268 		}
2269 	}
2270 
2271 	SIMPLEQ_INSERT_TAIL(&sc->sc_ports, lp, lp_entry);
2272 	sc->sc_nports++;
2273 
2274 	lagg_capabilities_update(sc);
2275 	lagg_port_syncmulti(sc, lp);
2276 	lagg_port_syncvlan(sc, lp);
2277 	lagg_config_promisc(sc, lp);
2278 
2279 	lagg_proto_startport(sc, lp);
2280 
2281 	return 0;
2282 
2283 restore_ifp_port:
2284 	IFNET_LOCK(ifp_port);
2285 	if (stopped) {
2286 		if_stop(ifp_port, 0);
2287 	}
2288 free_port:
2289 	KASSERT(IFNET_LOCKED(ifp_port));
2290 	lagg_proto_freeport(sc, lp);
2291 restore_mtu:
2292 	KASSERT(IFNET_LOCKED(ifp_port));
2293 	if (ifp_port->if_mtu != lp->lp_mtu)
2294 		lagg_setmtu(ifp_port, lp->lp_mtu);
2295 restore_sadl:
2296 	KASSERT(IFNET_LOCKED(ifp_port));
2297 
2298 	/* restore if_type before changing sadl */
2299 	if_type = ifp_port->if_type;
2300 	ifp_port->if_type = lp->lp_iftype;
2301 
2302 	if (!SIMPLEQ_EMPTY(&sc->sc_ports)) {
2303 		lagg_port_setsadl(lp, lp->lp_lladdr);
2304 	} else {
2305 		if (ifp_port->if_type != if_type)
2306 		lagg_port_setsadl(lp, NULL);
2307 	}
2308 
2309 	lagg_in6_ifattach(ifp_port);
2310 	if (stopped) {
2311 		if (if_init(ifp_port) != 0) {
2312 			LAGG_LOG(sc, LOG_WARNING,
2313 			    "couldn't re-start port %s\n",
2314 			    ifp_port->if_xname);
2315 		}
2316 	}
2317 
2318 	ifp_port->if_ioctl = lp->lp_ioctl;
2319 	ifp_port->_if_input = lp->lp_input;
2320 	ifp_port->if_output = lp->lp_output;
2321 	atomic_store_release(&ifp_port->if_lagg, NULL);
2322 	IFNET_UNLOCK(ifp_port);
2323 
2324 	psref_target_destroy(&lp->lp_psref, lagg_port_psref_class);
2325 	if_linkstate_change_disestablish(ifp_port,
2326 	    lp->lp_linkstate_hook, NULL);
2327 	ether_ifdetachhook_disestablish(ifp_port,
2328 	    lp->lp_ifdetach_hook, &sc->sc_lock);
2329 
2330 	return error;
2331 }
2332 
2333 static void
2334 lagg_port_teardown(struct lagg_softc *sc, struct lagg_port *lp,
2335     bool is_ifdetach)
2336 {
2337 	struct ifnet *ifp, *ifp_port;
2338 	bool stopped, is_1st_port, iftype_changed;
2339 
2340 	KASSERT(LAGG_LOCKED(sc));
2341 
2342 	ifp = &sc->sc_if;
2343 	ifp_port = lp->lp_ifp;
2344 	stopped = false;
2345 	is_1st_port =
2346 	    SIMPLEQ_FIRST(&sc->sc_ports) == lp ? true : false;
2347 
2348 	ether_ifdetachhook_disestablish(ifp_port,
2349 	    lp->lp_ifdetach_hook, &sc->sc_lock);
2350 
2351 	if (ifp_port->if_lagg == NULL) {
2352 		/* already done in lagg_ifdetach() */
2353 		return;
2354 	}
2355 
2356 	if_linkstate_change_disestablish(ifp_port,
2357 	    lp->lp_linkstate_hook, NULL);
2358 
2359 	lagg_proto_stopport(sc, lp);
2360 
2361 	lagg_port_purgemulti(sc, lp);
2362 	lagg_port_purgevlan(sc, lp);
2363 	if (is_ifdetach == false) {
2364 		lagg_unconfig_promisc(sc, lp);
2365 		lagg_setifcaps(lp, lp->lp_ifcapenable);
2366 		if (lp->lp_iftype == IFT_ETHER)
2367 			lagg_setethcaps(lp, lp->lp_eccapenable);
2368 	}
2369 
2370 	SIMPLEQ_REMOVE(&sc->sc_ports, lp, lagg_port, lp_entry);
2371 	sc->sc_nports--;
2372 
2373 	if (is_1st_port) {
2374 		if (lp->lp_iftype == IFT_ETHER &&
2375 		    lagg_lladdr_equal(lp->lp_lladdr,
2376 		    CLLADDR(ifp->if_sadl))) {
2377 			struct lagg_port *lp0;
2378 			uint8_t *lla;
2379 
2380 			lp0 = SIMPLEQ_FIRST(&sc->sc_ports);
2381 			if (lp0 != NULL &&
2382 			    lp0->lp_iftype == IFT_ETHER) {
2383 				lla = lp0->lp_lladdr;
2384 			} else {
2385 				lla = sc->sc_lladdr_rand;
2386 			}
2387 
2388 			lagg_if_setsadl(sc, lla);
2389 		}
2390 	}
2391 
2392 	IFNET_LOCK(ifp_port);
2393 	/* stop packet processing */
2394 	if (ISSET(ifp_port->if_flags, IFF_RUNNING) &&
2395 	    ifp_port->if_init != NULL) {
2396 		if_stop(ifp_port, 0);
2397 		stopped = true;
2398 	}
2399 
2400 	lagg_proto_freeport(sc, lp);
2401 
2402 	/* change if_type before set sadl */
2403 	iftype_changed = ifp_port->if_type != lp->lp_iftype ?
2404 	    true : false;
2405 	ifp_port->if_type = lp->lp_iftype;
2406 
2407 	if (is_ifdetach == false) {
2408 		if (iftype_changed &&
2409 		    lagg_lladdr_equal(CLLADDR(ifp_port->if_sadl),
2410 		    lp->lp_lladdr)) {
2411 			lagg_port_setsadl(lp, NULL);
2412 		}
2413 		lagg_port_setsadl(lp, lp->lp_lladdr);
2414 		lagg_in6_ifattach(ifp_port);
2415 		(void)lagg_setmtu(ifp_port, lp->lp_mtu);
2416 	}
2417 
2418 	ifp_port->_if_input = lp->lp_input;
2419 	ifp_port->if_output = lp->lp_output;
2420 	if (ifp_port->if_ioctl == lagg_port_ioctl)
2421 		ifp_port->if_ioctl = lp->lp_ioctl;
2422 	atomic_store_release(&ifp_port->if_lagg, NULL);
2423 	pserialize_perform(sc->sc_psz);
2424 
2425 	/* to assign ipv6 link local address */
2426 	if (is_ifdetach == false) {
2427 		lagg_in6_ifattach(ifp_port);
2428 	}
2429 
2430 	/* restart packet processing */
2431 	if (stopped) {
2432 		int error;
2433 		error = if_init(ifp_port);
2434 		if (error != 0) {
2435 			LAGG_LOG(sc, LOG_WARNING,
2436 			    "%s failed to if_init() on %d\n",
2437 			    ifp_port->if_xname, error);
2438 		}
2439 	}
2440 	IFNET_UNLOCK(ifp_port);
2441 
2442 	psref_target_destroy(&lp->lp_psref, lagg_port_psref_class);
2443 	kmem_free(lp, sizeof(*lp));
2444 }
2445 
2446 static int
2447 lagg_addport(struct lagg_softc *sc, struct ifnet *ifp_port)
2448 {
2449 	struct lagg_port *lp;
2450 	int error;
2451 
2452 	lp = kmem_zalloc(sizeof(*lp), KM_SLEEP);
2453 	lp->lp_ifp = ifp_port;
2454 
2455 	LAGG_LOCK(sc);
2456 	error = lagg_port_setup(sc, lp, ifp_port);
2457 	LAGG_UNLOCK(sc);
2458 
2459 	if (error != 0)
2460 		kmem_free(lp, sizeof(*lp));
2461 
2462 	return error;
2463 }
2464 
2465 static int
2466 lagg_delport(struct lagg_softc *sc, struct ifnet *ifp_port)
2467 {
2468 	struct lagg_port *lp;
2469 	int error;
2470 
2471 	KASSERT(IFNET_LOCKED(&sc->sc_if));
2472 
2473 	error = 0;
2474 	LAGG_LOCK(sc);
2475 	lp = ifp_port->if_lagg;
2476 	if (lp == NULL || lp->lp_softc != sc) {
2477 		error = ENOENT;
2478 		goto out;
2479 	}
2480 
2481 	if (lp->lp_ifdetaching) {
2482 		error = EBUSY;
2483 		goto out;
2484 	}
2485 
2486 	lagg_port_teardown(sc, lp, false);
2487 
2488 out:
2489 	LAGG_UNLOCK(sc);
2490 
2491 	return error;
2492 }
2493 
2494 static int
2495 lagg_delport_all(struct lagg_softc *sc)
2496 {
2497 	struct lagg_port *lp;
2498 	int error;
2499 
2500 	KASSERT(IFNET_LOCKED(&sc->sc_if));
2501 
2502 	error = 0;
2503 
2504 	LAGG_LOCK(sc);
2505 	while ((lp = LAGG_PORTS_FIRST(sc)) != NULL) {
2506 		if (lp->lp_ifdetaching) {
2507 			error = EBUSY;
2508 			continue;
2509 		}
2510 
2511 		lagg_port_teardown(sc, lp, false);
2512 	}
2513 
2514 	LAGG_UNLOCK(sc);
2515 
2516 	return error;
2517 }
2518 
2519 static int
2520 lagg_get_stats(struct lagg_softc *sc, struct lagg_req *resp,
2521     size_t nports)
2522 {
2523 	struct lagg_variant *var;
2524 	struct lagg_port *lp;
2525 	struct laggreqport *port;
2526 	struct psref psref;
2527 	struct ifnet *ifp;
2528 	int bound;
2529 	size_t n;
2530 
2531 	bound = curlwp_bind();
2532 	var = lagg_variant_getref(sc, &psref);
2533 	if (var == NULL) {
2534 		curlwp_bindx(bound);
2535 		return ENOENT;
2536 	}
2537 
2538 	resp->lrq_proto = var->lv_proto;
2539 
2540 	lagg_proto_stat(var, &resp->lrq_reqproto);
2541 
2542 	n = 0;
2543 	LAGG_LOCK(sc);
2544 	LAGG_PORTS_FOREACH(sc, lp) {
2545 		if (n < nports) {
2546 			port = &resp->lrq_reqports[n];
2547 
2548 			ifp = lp->lp_ifp;
2549 			strlcpy(port->rp_portname, ifp->if_xname,
2550 			    sizeof(port->rp_portname));
2551 
2552 			port->rp_prio = lp->lp_prio;
2553 			port->rp_flags = lp->lp_flags;
2554 			lagg_proto_portstat(var, lp, port);
2555 		}
2556 		n++;
2557 	}
2558 	LAGG_UNLOCK(sc);
2559 
2560 	resp->lrq_nports = n;
2561 
2562 	lagg_variant_putref(var, &psref);
2563 	curlwp_bindx(bound);
2564 
2565 	if (resp->lrq_nports > nports) {
2566 		return ENOBUFS;
2567 	}
2568 	return 0;
2569 }
2570 
2571 static void
2572 lagg_config_promisc(struct lagg_softc *sc, struct lagg_port *lp)
2573 {
2574 	struct ifnet *ifp, *ifp_port;
2575 	int error;
2576 	bool promisc;
2577 
2578 	KASSERT(LAGG_LOCKED(sc));
2579 
2580 	ifp = &sc->sc_if;
2581 	ifp_port = lp->lp_ifp;
2582 
2583 	if (lp->lp_iftype == IFT_ETHER) {
2584 		promisc = ISSET(ifp->if_flags, IFF_PROMISC) ?
2585 		    true : false;
2586 	} else {
2587 		promisc = true;
2588 	}
2589 
2590 	if (lp->lp_promisc == promisc)
2591 		return;
2592 
2593 	error = ifpromisc(ifp_port, promisc ? 1 : 0);
2594 	if (error == ENETRESET) {
2595 		error = ifp_port->if_init(ifp_port);
2596 	}
2597 
2598 	if (error == 0) {
2599 		lp->lp_promisc = promisc;
2600 	} else {
2601 		LAGG_LOG(sc, LOG_WARNING,
2602 		    "couldn't %s promisc on %s\n",
2603 		    promisc ? "set" : "unset",
2604 		    ifp_port->if_xname);
2605 	}
2606 }
2607 
2608 static void
2609 lagg_unconfig_promisc(struct lagg_softc *sc, struct lagg_port *lp)
2610 {
2611 	struct ifnet *ifp_port;
2612 	int error;
2613 
2614 	KASSERT(LAGG_LOCKED(sc));
2615 
2616 	ifp_port = lp->lp_ifp;
2617 
2618 	if (lp->lp_promisc == false)
2619 		return;
2620 
2621 	error = ifpromisc(ifp_port, 0);
2622 	if (error == ENETRESET) {
2623 		error = ifp_port->if_init(ifp_port);
2624 	}
2625 
2626 	if (error != 0) {
2627 		LAGG_LOG(sc, LOG_WARNING,
2628 		    "couldn't unset promisc on %s\n",
2629 		    ifp_port->if_xname);
2630 	}
2631 }
2632 
2633 static int
2634 lagg_port_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2635 {
2636 	struct lagg_softc *sc;
2637 	struct lagg_port *lp;
2638 	int error = 0;
2639 	u_int ifflags;
2640 
2641 	if ((lp = ifp->if_lagg) == NULL ||
2642 	    (sc = lp->lp_softc) == NULL) {
2643 		goto fallback;
2644 	}
2645 
2646 	KASSERT(IFNET_LOCKED(lp->lp_ifp));
2647 
2648 	switch (cmd) {
2649 	case SIOCSIFCAP:
2650 	case SIOCSIFMTU:
2651 	case SIOCSETHERCAP:
2652 		/* Do not allow the setting to be cahanged once joined */
2653 		error = EINVAL;
2654 		break;
2655 	case SIOCSIFFLAGS:
2656 		ifflags = ifp->if_flags;
2657 		error = LAGG_PORT_IOCTL(lp, cmd, data);
2658 		ifflags ^= ifp->if_flags;
2659 
2660 		if ((ifflags & (IFF_UP | IFF_RUNNING)) != 0)
2661 			lagg_proto_linkstate(sc, lp);
2662 		break;
2663 	default:
2664 		goto fallback;
2665 	}
2666 
2667 	return error;
2668 fallback:
2669 	if (lp != NULL) {
2670 		error = LAGG_PORT_IOCTL(lp, cmd, data);
2671 	} else {
2672 		error = ENOTTY;
2673 	}
2674 
2675 	return error;
2676 }
2677 
2678 static int
2679 lagg_port_output(struct ifnet *ifp, struct mbuf *m,
2680     const struct sockaddr *dst, const struct rtentry *rt)
2681 {
2682 	struct lagg_port *lp = ifp->if_lagg;
2683 	int error = 0;
2684 
2685 	switch (dst->sa_family) {
2686 	case pseudo_AF_HDRCMPLT:
2687 	case AF_UNSPEC:
2688 		if (lp != NULL)
2689 			error = lp->lp_output(ifp, m, dst, rt);
2690 		else
2691 			error = ENETDOWN;
2692 		break;
2693 	default:
2694 		m_freem(m);
2695 		error = ENETDOWN;
2696 	}
2697 
2698 	return error;
2699 }
2700 
2701 void
2702 lagg_ifdetach(void *xifp_port)
2703 {
2704 	struct ifnet *ifp_port = xifp_port;
2705 	struct lagg_port *lp;
2706 	struct lagg_softc *sc;
2707 	int s;
2708 
2709 	IFNET_ASSERT_UNLOCKED(ifp_port);
2710 
2711 	s = pserialize_read_enter();
2712 	lp = atomic_load_consume(&ifp_port->if_lagg);
2713 	if (lp == NULL) {
2714 		pserialize_read_exit(s);
2715 		return;
2716 	}
2717 
2718 	sc = lp->lp_softc;
2719 	if (sc == NULL) {
2720 		pserialize_read_exit(s);
2721 		return;
2722 	}
2723 	pserialize_read_exit(s);
2724 
2725 	LAGG_LOCK(sc);
2726 	lp = ifp_port->if_lagg;
2727 	if (lp == NULL) {
2728 		LAGG_UNLOCK(sc);
2729 		return;
2730 	}
2731 
2732 	/*
2733 	 * mark as a detaching to prevent other
2734 	 * lagg_port_teardown() processings with IFNET_LOCK() held
2735 	 */
2736 	lp->lp_ifdetaching = true;
2737 
2738 	LAGG_UNLOCK(sc);
2739 
2740 	IFNET_LOCK(&sc->sc_if);
2741 	LAGG_LOCK(sc);
2742 	lp = ifp_port->if_lagg;
2743 	if (lp != NULL) {
2744 		lagg_port_teardown(sc, lp, true);
2745 	}
2746 	LAGG_UNLOCK(sc);
2747 	IFNET_UNLOCK(&sc->sc_if);
2748 }
2749 
2750 void
2751 lagg_linkstate_changed(void *xifp)
2752 {
2753 	struct ifnet *ifp = xifp;
2754 	struct lagg_port *lp;
2755 	struct psref psref;
2756 	int s, bound;
2757 
2758 	s = pserialize_read_enter();
2759 	lp = atomic_load_consume(&ifp->if_lagg);
2760 	if (lp != NULL) {
2761 		bound = curlwp_bind();
2762 		lagg_port_getref(lp, &psref);
2763 	} else {
2764 		pserialize_read_exit(s);
2765 		return;
2766 	}
2767 	pserialize_read_exit(s);
2768 
2769 	IFNET_LOCK(lp->lp_ifp);
2770 	lagg_proto_linkstate(lp->lp_softc, lp);
2771 	IFNET_UNLOCK(lp->lp_ifp);
2772 
2773 	lagg_port_putref(lp, &psref);
2774 	curlwp_bindx(bound);
2775 }
2776 
2777 void
2778 lagg_port_getref(struct lagg_port *lp, struct psref *psref)
2779 {
2780 
2781 	psref_acquire(psref, &lp->lp_psref, lagg_port_psref_class);
2782 }
2783 
2784 void
2785 lagg_port_putref(struct lagg_port *lp, struct psref *psref)
2786 {
2787 
2788 	psref_release(psref, &lp->lp_psref, lagg_port_psref_class);
2789 }
2790 
2791 static void
2792 lagg_workq_work(struct work *wk, void *context)
2793 {
2794 	struct lagg_work *lw;
2795 
2796 	lw = container_of(wk, struct lagg_work, lw_cookie);
2797 
2798 	atomic_cas_uint(&lw->lw_state, LAGG_WORK_ENQUEUED, LAGG_WORK_IDLE);
2799 	lw->lw_func(lw, lw->lw_arg);
2800 }
2801 
2802 struct workqueue *
2803 lagg_workq_create(const char *name, pri_t prio, int ipl, int flags)
2804 {
2805 	struct workqueue *wq;
2806 	int error;
2807 
2808 	error = workqueue_create(&wq, name, lagg_workq_work,
2809 	    NULL, prio, ipl, flags);
2810 
2811 	if (error)
2812 		return NULL;
2813 
2814 	return wq;
2815 }
2816 
2817 void
2818 lagg_workq_destroy(struct workqueue *wq)
2819 {
2820 
2821 	workqueue_destroy(wq);
2822 }
2823 
2824 void
2825 lagg_workq_add(struct workqueue *wq, struct lagg_work *lw)
2826 {
2827 
2828 	if (atomic_cas_uint(&lw->lw_state, LAGG_WORK_IDLE,
2829 	    LAGG_WORK_ENQUEUED) != LAGG_WORK_IDLE)
2830 		return;
2831 
2832 	KASSERT(lw->lw_func != NULL);
2833 	kpreempt_disable();
2834 	workqueue_enqueue(wq, &lw->lw_cookie, NULL);
2835 	kpreempt_enable();
2836 }
2837 
2838 void
2839 lagg_workq_wait(struct workqueue *wq, struct lagg_work *lw)
2840 {
2841 
2842 	atomic_swap_uint(&lw->lw_state, LAGG_WORK_STOPPING);
2843 	workqueue_wait(wq, &lw->lw_cookie);
2844 }
2845 
2846 static int
2847 lagg_chg_sadl(struct ifnet *ifp, const uint8_t *lla, size_t lla_len)
2848 {
2849 	struct psref psref_cur, psref_next;
2850 	struct ifaddr *ifa_cur, *ifa_next, *ifa_lla;
2851 	const struct sockaddr_dl *sdl, *nsdl;
2852 	int s, error;
2853 
2854 	KASSERT(!cpu_intr_p() && !cpu_softintr_p());
2855 	KASSERT(IFNET_LOCKED(ifp));
2856 	KASSERT(ifp->if_addrlen == lla_len);
2857 
2858 	error = 0;
2859 	ifa_lla = NULL;
2860 
2861 	while (1) {
2862 		s = pserialize_read_enter();
2863 		IFADDR_READER_FOREACH(ifa_cur, ifp) {
2864 			sdl = satocsdl(ifa_cur->ifa_addr);
2865 			if (sdl->sdl_family != AF_LINK)
2866 				continue;
2867 
2868 			if (sdl->sdl_type != ifp->if_type) {
2869 				ifa_acquire(ifa_cur, &psref_cur);
2870 				break;
2871 			}
2872 		}
2873 		pserialize_read_exit(s);
2874 
2875 		if (ifa_cur == NULL)
2876 			break;
2877 
2878 		ifa_next = if_dl_create(ifp, &nsdl);
2879 		if (ifa_next == NULL) {
2880 			error = ENOMEM;
2881 			ifa_release(ifa_cur, &psref_cur);
2882 			goto done;
2883 		}
2884 		ifa_acquire(ifa_next, &psref_next);
2885 		(void)sockaddr_dl_setaddr(__UNCONST(nsdl), nsdl->sdl_len,
2886 		    CLLADDR(sdl), ifp->if_addrlen);
2887 		ifa_insert(ifp, ifa_next);
2888 
2889 		if (ifa_lla == NULL &&
2890 		    memcmp(CLLADDR(sdl), lla, lla_len) == 0) {
2891 			ifa_lla = ifa_next;
2892 			ifaref(ifa_lla);
2893 		}
2894 
2895 		if (ifa_cur == ifp->if_dl)
2896 			if_activate_sadl(ifp, ifa_next, nsdl);
2897 
2898 		if (ifa_cur == ifp->if_hwdl) {
2899 			ifp->if_hwdl = ifa_next;
2900 			ifaref(ifa_next);
2901 			ifafree(ifa_cur);
2902 		}
2903 
2904 		ifaref(ifa_cur);
2905 		ifa_release(ifa_cur, &psref_cur);
2906 		ifa_remove(ifp, ifa_cur);
2907 		KASSERTMSG(ifa_cur->ifa_refcnt == 1,
2908 		    "ifa_refcnt=%d", ifa_cur->ifa_refcnt);
2909 		ifafree(ifa_cur);
2910 		ifa_release(ifa_next, &psref_next);
2911 	}
2912 
2913 	if (ifa_lla != NULL) {
2914 		ifa_next = ifa_lla;
2915 
2916 		ifa_acquire(ifa_next, &psref_next);
2917 		ifafree(ifa_lla);
2918 
2919 		nsdl = satocsdl(ifa_next->ifa_addr);
2920 	} else {
2921 		ifa_next = if_dl_create(ifp, &nsdl);
2922 		if (ifa_next == NULL) {
2923 			error = ENOMEM;
2924 			goto done;
2925 		}
2926 		ifa_acquire(ifa_next, &psref_next);
2927 		(void)sockaddr_dl_setaddr(__UNCONST(nsdl),
2928 		    nsdl->sdl_len, lla, ifp->if_addrlen);
2929 		ifa_insert(ifp, ifa_next);
2930 	}
2931 
2932 	if (ifa_next != ifp->if_dl) {
2933 		ifa_cur = ifp->if_dl;
2934 		if (ifa_cur != NULL)
2935 			ifa_acquire(ifa_cur, &psref_cur);
2936 
2937 		if_activate_sadl(ifp, ifa_next, nsdl);
2938 
2939 		if (ifa_cur != NULL) {
2940 			if (ifa_cur != ifp->if_hwdl) {
2941 				ifaref(ifa_cur);
2942 				ifa_release(ifa_cur, &psref_cur);
2943 				ifa_remove(ifp, ifa_cur);
2944 				KASSERTMSG(ifa_cur->ifa_refcnt == 1,
2945 				    "ifa_refcnt=%d",
2946 				    ifa_cur->ifa_refcnt);
2947 				ifafree(ifa_cur);
2948 			} else {
2949 				ifa_release(ifa_cur, &psref_cur);
2950 			}
2951 		}
2952 	}
2953 
2954 	ifa_release(ifa_next, &psref_next);
2955 
2956 done:
2957 	return error;
2958 }
2959 
2960 /*
2961  * Module infrastructure
2962  */
2963 #include <net/if_module.h>
2964 
2965 IF_MODULE(MODULE_CLASS_DRIVER, lagg, NULL)
2966