xref: /netbsd-src/sys/net/lagg/if_laggproto.c (revision 8e33eff89e26cf71871ead62f0d5063e1313c33a)
1 /*	$NetBSD: if_laggproto.c,v 1.15 2024/04/05 06:51:41 yamaguchi Exp $	*/
2 
3 /*-
4  * SPDX-License-Identifier: BSD-2-Clause-NetBSD
5  *
6  * Copyright (c)2021 Internet Initiative Japan, Inc.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: if_laggproto.c,v 1.15 2024/04/05 06:51:41 yamaguchi Exp $");
33 
34 #include <sys/param.h>
35 #include <sys/types.h>
36 
37 #include <sys/evcnt.h>
38 #include <sys/kmem.h>
39 #include <sys/mbuf.h>
40 #include <sys/mutex.h>
41 #include <sys/pslist.h>
42 #include <sys/syslog.h>
43 #include <sys/workqueue.h>
44 
45 #include <net/if.h>
46 #include <net/if_ether.h>
47 #include <net/if_media.h>
48 
49 #include <net/lagg/if_lagg.h>
50 #include <net/lagg/if_laggproto.h>
51 
52 struct lagg_proto_softc {
53 	struct lagg_softc	*psc_softc;
54 	struct pslist_head	 psc_ports;
55 	kmutex_t		 psc_lock;
56 	pserialize_t		 psc_psz;
57 	size_t			 psc_ctxsiz;
58 	void			*psc_ctx;
59 	size_t			 psc_nactports;
60 	struct workqueue	*psc_workq;
61 	struct lagg_work	 psc_work_linkspeed;
62 };
63 
64 /*
65  * Locking notes:
66  * - Items of struct lagg_proto_softc is protected by
67  *   psc_lock (an adaptive mutex)
68  * - psc_ports is protected by pselialize (psc_psz) and
69  *   it updates exclusively by LAGG_PROTO_LOCK.
70  * - Other locking notes are described in if_laggproto.h
71  */
72 
73 struct lagg_failover {
74 	bool		 fo_rx_all;
75 };
76 
77 struct lagg_portmap {
78 	struct lagg_port	*pm_ports[LAGG_MAX_PORTS];
79 	size_t			 pm_nports;
80 };
81 
82 struct lagg_portmaps {
83 	struct lagg_portmap	 maps_pmap[2];
84 	size_t			 maps_activepmap;
85 };
86 
87 struct lagg_lb {
88 	struct lagg_portmaps	 lb_pmaps;
89 };
90 
91 struct lagg_proto_port {
92 	struct pslist_entry	 lpp_entry;
93 	struct lagg_port	*lpp_laggport;
94 	uint64_t		 lpp_linkspeed;
95 	bool			 lpp_active;
96 	bool			 lpp_running;
97 };
98 
99 #define LAGG_PROTO_LOCK(_psc)	mutex_enter(&(_psc)->psc_lock)
100 #define LAGG_PROTO_UNLOCK(_psc)	mutex_exit(&(_psc)->psc_lock)
101 #define LAGG_PROTO_LOCKED(_psc)	mutex_owned(&(_psc)->psc_lock)
102 
103 static struct lagg_proto_softc *
104 		lagg_proto_alloc(lagg_proto, struct lagg_softc *);
105 static void	lagg_proto_free(struct lagg_proto_softc *);
106 static void	lagg_proto_insert_port(struct lagg_proto_softc *,
107 		    struct lagg_proto_port *);
108 static void	lagg_proto_remove_port(struct lagg_proto_softc *,
109 		    struct lagg_proto_port *);
110 static struct lagg_port *
111 		lagg_link_active(struct lagg_proto_softc *psc,
112 		    struct lagg_proto_port *, struct psref *);
113 static void	lagg_fail_linkspeed_work(struct lagg_work *, void *);
114 static void	lagg_lb_linkspeed_work(struct lagg_work*,
115 		    void *);
116 static void	lagg_common_linkstate(struct lagg_proto_softc *,
117 		    struct lagg_port *);
118 
119 static inline struct lagg_portmap *
120 lagg_portmap_active(struct lagg_portmaps *maps)
121 {
122 	size_t i;
123 
124 	i = atomic_load_consume(&maps->maps_activepmap);
125 
126 	return &maps->maps_pmap[i];
127 }
128 
129 static inline struct lagg_portmap *
130 lagg_portmap_next(struct lagg_portmaps *maps)
131 {
132 	size_t i;
133 
134 	i = atomic_load_consume(&maps->maps_activepmap);
135 	i ^= 0x1;
136 
137 	return &maps->maps_pmap[i];
138 }
139 
140 static inline void
141 lagg_portmap_switch(struct lagg_portmaps *maps)
142 {
143 	size_t i;
144 
145 	i = atomic_load_consume(&maps->maps_activepmap);
146 	i &= 0x1;
147 	i ^= 0x1;
148 
149 	atomic_store_release(&maps->maps_activepmap, i);
150 }
151 
152 static struct lagg_proto_softc *
153 lagg_proto_alloc(lagg_proto pr, struct lagg_softc *sc)
154 {
155 	struct lagg_proto_softc *psc;
156 	char xnamebuf[MAXCOMLEN];
157 	size_t ctxsiz;
158 
159 	switch (pr) {
160 	case LAGG_PROTO_FAILOVER:
161 		ctxsiz = sizeof(struct lagg_failover);
162 		break;
163 	case LAGG_PROTO_LOADBALANCE:
164 		ctxsiz = sizeof(struct lagg_lb);
165 		break;
166 	default:
167 		ctxsiz = 0;
168 	}
169 
170 	psc = kmem_zalloc(sizeof(*psc), KM_NOSLEEP);
171 	if (psc == NULL)
172 		return NULL;
173 
174 	psc->psc_workq = lagg_workq_create(xnamebuf,
175 		    PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
176 	if (psc->psc_workq == NULL) {
177 		LAGG_LOG(sc, LOG_ERR, "workqueue create failed\n");
178 		kmem_free(psc, sizeof(*psc));
179 		return NULL;
180 	}
181 
182 	if (ctxsiz > 0) {
183 		psc->psc_ctx = kmem_zalloc(ctxsiz, KM_NOSLEEP);
184 		if (psc->psc_ctx == NULL) {
185 			lagg_workq_destroy(psc->psc_workq);
186 			kmem_free(psc, sizeof(*psc));
187 			return NULL;
188 		}
189 
190 		psc->psc_ctxsiz = ctxsiz;
191 	}
192 
193 	PSLIST_INIT(&psc->psc_ports);
194 	psc->psc_psz = pserialize_create();
195 	mutex_init(&psc->psc_lock, MUTEX_DEFAULT, IPL_SOFTNET);
196 	psc->psc_softc = sc;
197 
198 	return psc;
199 }
200 
201 static void
202 lagg_proto_free(struct lagg_proto_softc *psc)
203 {
204 
205 	lagg_workq_wait(psc->psc_workq, &psc->psc_work_linkspeed);
206 	pserialize_destroy(psc->psc_psz);
207 	mutex_destroy(&psc->psc_lock);
208 	lagg_workq_destroy(psc->psc_workq);
209 	PSLIST_DESTROY(&psc->psc_ports);
210 
211 	if (psc->psc_ctxsiz > 0)
212 		kmem_free(psc->psc_ctx, psc->psc_ctxsiz);
213 
214 	kmem_free(psc, sizeof(*psc));
215 }
216 
217 static struct lagg_port *
218 lagg_link_active(struct lagg_proto_softc *psc,
219     struct lagg_proto_port *pport, struct psref *psref)
220 {
221 	struct lagg_port *lp;
222 	int s;
223 
224 	lp = NULL;
225 	s = pserialize_read_enter();
226 
227 	for (;pport != NULL;
228 	    pport = PSLIST_READER_NEXT(pport,
229 	    struct lagg_proto_port, lpp_entry)) {
230 		if (atomic_load_relaxed(&pport->lpp_active)) {
231 			lp = pport->lpp_laggport;
232 			goto done;
233 		}
234 	}
235 
236 	PSLIST_READER_FOREACH(pport, &psc->psc_ports,
237 	    struct lagg_proto_port, lpp_entry) {
238 		if (atomic_load_relaxed(&pport->lpp_active)) {
239 			lp = pport->lpp_laggport;
240 			break;
241 		}
242 	}
243 done:
244 	if (lp != NULL)
245 		lagg_port_getref(lp, psref);
246 	pserialize_read_exit(s);
247 
248 	return lp;
249 }
250 
251 int
252 lagg_common_allocport(struct lagg_proto_softc *psc, struct lagg_port *lp)
253 {
254 	struct lagg_proto_port *pport;
255 
256 	KASSERT(LAGG_LOCKED(psc->psc_softc));
257 
258 	pport = kmem_zalloc(sizeof(*pport), KM_NOSLEEP);
259 	if (pport == NULL)
260 		return ENOMEM;
261 
262 	PSLIST_ENTRY_INIT(pport, lpp_entry);
263 	pport->lpp_laggport = lp;
264 	lp->lp_proto_ctx = (void *)pport;
265 	return 0;
266 }
267 
268 void
269 lagg_common_freeport(struct lagg_proto_softc *psc, struct lagg_port *lp)
270 {
271 	struct lagg_proto_port *pport;
272 
273 	pport = lp->lp_proto_ctx;
274 	KASSERT(!pport->lpp_running);
275 	lp->lp_proto_ctx = NULL;
276 
277 	kmem_free(pport, sizeof(*pport));
278 }
279 
280 static void
281 lagg_proto_insert_port(struct lagg_proto_softc *psc,
282     struct lagg_proto_port *pport)
283 {
284 	struct lagg_proto_port *pport0;
285 	struct lagg_port *lp, *lp0;
286 	bool insert_after;
287 
288 	insert_after = false;
289 	lp = pport->lpp_laggport;
290 
291 	LAGG_PROTO_LOCK(psc);
292 	PSLIST_WRITER_FOREACH(pport0, &psc->psc_ports,
293 	    struct lagg_proto_port, lpp_entry) {
294 		lp0 = pport0->lpp_laggport;
295 		if (lp0->lp_prio > lp->lp_prio)
296 			break;
297 
298 		if (PSLIST_WRITER_NEXT(pport0,
299 		    struct lagg_proto_port, lpp_entry) == NULL) {
300 			insert_after = true;
301 			break;
302 		}
303 	}
304 
305 	if (pport0 == NULL) {
306 		PSLIST_WRITER_INSERT_HEAD(&psc->psc_ports, pport,
307 		    lpp_entry);
308 	} else if (insert_after) {
309 		PSLIST_WRITER_INSERT_AFTER(pport0, pport, lpp_entry);
310 	} else {
311 		PSLIST_WRITER_INSERT_BEFORE(pport0, pport, lpp_entry);
312 	}
313 	LAGG_PROTO_UNLOCK(psc);
314 }
315 
316 static void
317 lagg_proto_remove_port(struct lagg_proto_softc *psc,
318     struct lagg_proto_port *pport)
319 {
320 
321 	LAGG_PROTO_LOCK(psc);
322 	PSLIST_WRITER_REMOVE(pport, lpp_entry);
323 	LAGG_PROTO_UNLOCK(psc);
324 	pserialize_perform(psc->psc_psz);
325 
326 	/* re-initialize for reuse */
327 	PSLIST_ENTRY_DESTROY(pport, lpp_entry);
328 	PSLIST_ENTRY_INIT(pport, lpp_entry);
329 }
330 
331 void
332 lagg_common_startport(struct lagg_proto_softc *psc, struct lagg_port *lp)
333 {
334 	struct lagg_proto_port *pport;
335 
336 	pport = lp->lp_proto_ctx;
337 	lagg_proto_insert_port(psc, pport);
338 
339 	LAGG_PROTO_LOCK(psc);
340 	pport->lpp_running = true;
341 	LAGG_PROTO_UNLOCK(psc);
342 
343 	lagg_common_linkstate(psc, lp);
344 }
345 
346 void
347 lagg_common_stopport(struct lagg_proto_softc *psc, struct lagg_port *lp)
348 {
349 	struct lagg_proto_port *pport;
350 	struct ifnet *ifp;
351 
352 	pport = lp->lp_proto_ctx;
353 
354 	LAGG_PROTO_LOCK(psc);
355 	pport->lpp_running = false;
356 	LAGG_PROTO_UNLOCK(psc);
357 
358 	lagg_proto_remove_port(psc, pport);
359 
360 	if (pport->lpp_active) {
361 		KASSERT(psc->psc_nactports > 0);
362 		psc->psc_nactports--;
363 
364 		if (psc->psc_nactports == 0) {
365 			ifp = &psc->psc_softc->sc_if;
366 			if_link_state_change(ifp, LINK_STATE_DOWN);
367 		}
368 
369 		pport->lpp_active = false;
370 	}
371 
372 	lagg_workq_add(psc->psc_workq, &psc->psc_work_linkspeed);
373 }
374 static void
375 lagg_common_linkstate(struct lagg_proto_softc *psc, struct lagg_port *lp)
376 {
377 
378 	IFNET_ASSERT_UNLOCKED(lp->lp_ifp);
379 
380 	IFNET_LOCK(lp->lp_ifp);
381 	lagg_common_linkstate_ifnet_locked(psc, lp);
382 	IFNET_UNLOCK(lp->lp_ifp);
383 }
384 
385 void
386 lagg_common_linkstate_ifnet_locked(struct lagg_proto_softc *psc, struct lagg_port *lp)
387 {
388 	struct lagg_proto_port *pport;
389 	struct ifnet *ifp, *ifp_port;
390 	struct ifmediareq ifmr;
391 	uint64_t linkspeed;
392 	bool is_active;
393 	int error;
394 
395 	pport = lp->lp_proto_ctx;
396 	is_active = lagg_portactive(lp);
397 	ifp_port = lp->lp_ifp;
398 
399 	KASSERT(IFNET_LOCKED(ifp_port));
400 
401 	LAGG_PROTO_LOCK(psc);
402 	if (!pport->lpp_running ||
403 	    pport->lpp_active == is_active) {
404 		LAGG_PROTO_UNLOCK(psc);
405 		return;
406 	}
407 
408 	ifp = &psc->psc_softc->sc_if;
409 	pport->lpp_active = is_active;
410 
411 	if (is_active) {
412 		psc->psc_nactports++;
413 		if (psc->psc_nactports == 1)
414 			if_link_state_change(ifp, LINK_STATE_UP);
415 	} else {
416 		KASSERT(psc->psc_nactports > 0);
417 		psc->psc_nactports--;
418 
419 		if (psc->psc_nactports == 0)
420 			if_link_state_change(ifp, LINK_STATE_DOWN);
421 	}
422 	LAGG_PROTO_UNLOCK(psc);
423 
424 	memset(&ifmr, 0, sizeof(ifmr));
425 	error = if_ioctl(ifp_port, SIOCGIFMEDIA, (void *)&ifmr);
426 	if (error == 0) {
427 		linkspeed = ifmedia_baudrate(ifmr.ifm_active);
428 	} else {
429 		linkspeed = 0;
430 	}
431 
432 	LAGG_PROTO_LOCK(psc);
433 	pport->lpp_linkspeed = linkspeed;
434 	LAGG_PROTO_UNLOCK(psc);
435 	lagg_workq_add(psc->psc_workq, &psc->psc_work_linkspeed);
436 }
437 
438 void
439 lagg_common_detach(struct lagg_proto_softc *psc)
440 {
441 
442 	lagg_proto_free(psc);
443 }
444 
445 int
446 lagg_none_attach(struct lagg_softc *sc, struct lagg_proto_softc **pscp)
447 {
448 
449 	*pscp = NULL;
450 	return 0;
451 }
452 
453 int
454 lagg_fail_attach(struct lagg_softc *sc, struct lagg_proto_softc **xpsc)
455 {
456 	struct lagg_proto_softc *psc;
457 	struct lagg_failover *fovr;
458 
459 	psc = lagg_proto_alloc(LAGG_PROTO_FAILOVER, sc);
460 	if (psc == NULL)
461 		return ENOMEM;
462 
463 	fovr = psc->psc_ctx;
464 	fovr->fo_rx_all = true;
465 	lagg_work_set(&psc->psc_work_linkspeed,
466 	    lagg_fail_linkspeed_work, psc);
467 
468 	*xpsc = psc;
469 	return 0;
470 }
471 
472 int
473 lagg_fail_transmit(struct lagg_proto_softc *psc, struct mbuf *m)
474 {
475 	struct ifnet *ifp;
476 	struct lagg_port *lp;
477 	struct psref psref;
478 
479 	lp = lagg_link_active(psc, NULL, &psref);
480 	if (lp == NULL) {
481 		ifp = &psc->psc_softc->sc_if;
482 		if_statinc(ifp, if_oerrors);
483 		m_freem(m);
484 		return ENOENT;
485 	}
486 
487 	lagg_output(psc->psc_softc, lp, m);
488 	lagg_port_putref(lp, &psref);
489 	return 0;
490 }
491 
492 struct mbuf *
493 lagg_fail_input(struct lagg_proto_softc *psc, struct lagg_port *lp,
494     struct mbuf *m)
495 {
496 	struct lagg_failover *fovr;
497 	struct lagg_port *lp0;
498 	struct ifnet *ifp;
499 	struct psref psref;
500 
501 	fovr = psc->psc_ctx;
502 	if (atomic_load_relaxed(&fovr->fo_rx_all))
503 		return m;
504 
505 	lp0 = lagg_link_active(psc, NULL, &psref);
506 	if (lp0 == NULL) {
507 		goto drop;
508 	}
509 
510 	if (lp0 != lp) {
511 		lagg_port_putref(lp0, &psref);
512 		goto drop;
513 	}
514 
515 	lagg_port_putref(lp0, &psref);
516 
517 	return m;
518 drop:
519 	ifp = &psc->psc_softc->sc_if;
520 	if_statinc(ifp, if_ierrors);
521 	m_freem(m);
522 	return NULL;
523 }
524 
525 void
526 lagg_fail_portstat(struct lagg_proto_softc *psc, struct lagg_port *lp,
527     struct laggreqport *resp)
528 {
529 	struct lagg_failover *fovr;
530 	struct lagg_proto_port *pport;
531 	struct lagg_port *lp0;
532 	struct psref psref;
533 
534 	fovr = psc->psc_ctx;
535 	pport = lp->lp_proto_ctx;
536 
537 	if (pport->lpp_active) {
538 		lp0 = lagg_link_active(psc, NULL, &psref);
539 		if (lp0 == lp) {
540 			SET(resp->rp_flags,
541 			    (LAGG_PORT_ACTIVE |
542 			    LAGG_PORT_COLLECTING |
543 			    LAGG_PORT_DISTRIBUTING));
544 		} else {
545 			if (fovr->fo_rx_all) {
546 				SET(resp->rp_flags,
547 				    LAGG_PORT_COLLECTING);
548 			}
549 		}
550 
551 		if (lp0 != NULL)
552 			lagg_port_putref(lp0, &psref);
553 	}
554 }
555 
556 int
557 lagg_fail_ioctl(struct lagg_proto_softc *psc, struct laggreqproto *lreq)
558 {
559 	struct lagg_failover *fovr;
560 	struct laggreq_fail *rpfail;
561 	int error;
562 	bool set;
563 
564 	error = 0;
565 	fovr = psc->psc_ctx;
566 	rpfail = &lreq->rp_fail;
567 
568 	switch (rpfail->command) {
569 	case LAGGIOC_FAILSETFLAGS:
570 	case LAGGIOC_FAILCLRFLAGS:
571 		set = (rpfail->command == LAGGIOC_FAILSETFLAGS) ?
572 			true : false;
573 
574 		if (ISSET(rpfail->flags, LAGGREQFAIL_RXALL))
575 			fovr->fo_rx_all = set;
576 		break;
577 	default:
578 		error = ENOTTY;
579 		break;
580 	}
581 
582 	return error;
583 }
584 
585 void
586 lagg_fail_linkspeed_work(struct lagg_work *_lw __unused, void *xpsc)
587 {
588 	struct lagg_proto_softc *psc = xpsc;
589 	struct lagg_proto_port *pport;
590 	struct lagg_port *lp;
591 	struct psref psref;
592 	uint64_t linkspeed;
593 
594 	kpreempt_disable();
595 	lp = lagg_link_active(psc, NULL, &psref);
596 	if (lp != NULL) {
597 		pport = lp->lp_proto_ctx;
598 		LAGG_PROTO_LOCK(psc);
599 		linkspeed = pport->lpp_linkspeed;
600 		LAGG_PROTO_UNLOCK(psc);
601 		lagg_port_putref(lp, &psref);
602 	} else {
603 		linkspeed = 0;
604 	}
605 	kpreempt_enable();
606 
607 	LAGG_LOCK(psc->psc_softc);
608 	lagg_set_linkspeed(psc->psc_softc, linkspeed);
609 	LAGG_UNLOCK(psc->psc_softc);
610 }
611 
612 int
613 lagg_lb_attach(struct lagg_softc *sc, struct lagg_proto_softc **xpsc)
614 {
615 	struct lagg_proto_softc *psc;
616 	struct lagg_lb *lb;
617 
618 	psc = lagg_proto_alloc(LAGG_PROTO_LOADBALANCE, sc);
619 	if (psc == NULL)
620 		return ENOMEM;
621 
622 	lb = psc->psc_ctx;
623 	lb->lb_pmaps.maps_activepmap = 0;
624 	lagg_work_set(&psc->psc_work_linkspeed,
625 	    lagg_lb_linkspeed_work, psc);
626 
627 	*xpsc = psc;
628 	return 0;
629 }
630 
631 void
632 lagg_lb_startport(struct lagg_proto_softc *psc, struct lagg_port *lp)
633 {
634 	struct lagg_lb *lb;
635 	struct lagg_portmap *pm_act, *pm_next;
636 	size_t n;
637 
638 	lb = psc->psc_ctx;
639 	lagg_common_startport(psc, lp);
640 
641 	LAGG_PROTO_LOCK(psc);
642 	pm_act = lagg_portmap_active(&lb->lb_pmaps);
643 	pm_next = lagg_portmap_next(&lb->lb_pmaps);
644 
645 	*pm_next = *pm_act;
646 
647 	n = pm_next->pm_nports;
648 	pm_next->pm_ports[n] = lp;
649 
650 	n++;
651 	pm_next->pm_nports = n;
652 
653 	lagg_portmap_switch(&lb->lb_pmaps);
654 	LAGG_PROTO_UNLOCK(psc);
655 	pserialize_perform(psc->psc_psz);
656 }
657 
658 void
659 lagg_lb_stopport(struct lagg_proto_softc *psc, struct lagg_port *lp)
660 {
661 	struct lagg_lb *lb;
662 	struct lagg_portmap *pm_act, *pm_next;
663 	size_t i, n;
664 
665 	lb = psc->psc_ctx;
666 
667 	LAGG_PROTO_LOCK(psc);
668 	pm_act = lagg_portmap_active(&lb->lb_pmaps);
669 	pm_next = lagg_portmap_next(&lb->lb_pmaps);
670 	n = 0;
671 
672 	for (i = 0; i < pm_act->pm_nports; i++) {
673 		if (pm_act->pm_ports[i] == lp)
674 			continue;
675 
676 		pm_next->pm_ports[n] = pm_act->pm_ports[i];
677 		n++;
678 	}
679 
680 	pm_next->pm_nports = n;
681 
682 	lagg_portmap_switch(&lb->lb_pmaps);
683 	LAGG_PROTO_UNLOCK(psc);
684 	pserialize_perform(psc->psc_psz);
685 
686 	lagg_common_stopport(psc, lp);
687 }
688 
689 int
690 lagg_lb_transmit(struct lagg_proto_softc *psc, struct mbuf *m)
691 {
692 	struct lagg_lb *lb;
693 	struct lagg_portmap *pm;
694 	struct lagg_port *lp, *lp0;
695 	struct ifnet *ifp;
696 	struct psref psref;
697 	uint32_t hash;
698 	int s;
699 
700 	lb = psc->psc_ctx;
701 	hash = lagg_hashmbuf(psc->psc_softc, m);
702 
703 	s = pserialize_read_enter();
704 
705 	pm = lagg_portmap_active(&lb->lb_pmaps);
706 	if (__predict_true(pm->pm_nports != 0)) {
707 		hash %= pm->pm_nports;
708 		lp0 = pm->pm_ports[hash];
709 		lp = lagg_link_active(psc, lp0->lp_proto_ctx, &psref);
710 	} else {
711 		lp = NULL;
712 	}
713 
714 	pserialize_read_exit(s);
715 
716 	if (__predict_false(lp == NULL)) {
717 		ifp = &psc->psc_softc->sc_if;
718 		if_statinc(ifp, if_oerrors);
719 		m_freem(m);
720 		return ENOENT;
721 	}
722 
723 	lagg_output(psc->psc_softc, lp, m);
724 	lagg_port_putref(lp, &psref);
725 
726 	return 0;
727 }
728 
729 struct mbuf *
730 lagg_lb_input(struct lagg_proto_softc *psc __unused,
731     struct lagg_port *lp __unused, struct mbuf *m)
732 {
733 
734 	return m;
735 }
736 
737 void
738 lagg_lb_portstat(struct lagg_proto_softc *psc, struct lagg_port *lp,
739     struct laggreqport *resp)
740 {
741 	struct lagg_proto_port *pport;
742 
743 	pport = lp->lp_proto_ctx;
744 
745 	if (pport->lpp_active) {
746 		SET(resp->rp_flags, LAGG_PORT_ACTIVE |
747 		    LAGG_PORT_COLLECTING | LAGG_PORT_DISTRIBUTING);
748 	}
749 }
750 
751 static void
752 lagg_lb_linkspeed_work(struct lagg_work *_lw __unused, void *xpsc)
753 {
754 	struct lagg_proto_softc *psc = xpsc;
755 	struct lagg_proto_port *pport;
756 	uint64_t linkspeed, l;
757 
758 	linkspeed = 0;
759 
760 	LAGG_PROTO_LOCK(psc); /* acquired to refer lpp_linkspeed */
761 	PSLIST_READER_FOREACH(pport, &psc->psc_ports,
762 	    struct lagg_proto_port, lpp_entry) {
763 		if (pport->lpp_active) {
764 			l = pport->lpp_linkspeed;
765 			linkspeed = MAX(linkspeed, l);
766 		}
767 	}
768 	LAGG_PROTO_UNLOCK(psc);
769 
770 	LAGG_LOCK(psc->psc_softc);
771 	lagg_set_linkspeed(psc->psc_softc, linkspeed);
772 	LAGG_UNLOCK(psc->psc_softc);
773 }
774