Home | History | Annotate | Line # | Download | only in lagg
if_laggproto.c revision 1.11
      1 /*	$NetBSD: if_laggproto.c,v 1.11 2024/04/04 07:35:01 yamaguchi Exp $	*/
      2 
      3 /*-
      4  * SPDX-License-Identifier: BSD-2-Clause-NetBSD
      5  *
      6  * Copyright (c)2021 Internet Initiative Japan, Inc.
      7  * All rights reserved.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     28  * SUCH DAMAGE.
     29  */
     30 
     31 #include <sys/cdefs.h>
     32 __KERNEL_RCSID(0, "$NetBSD: if_laggproto.c,v 1.11 2024/04/04 07:35:01 yamaguchi Exp $");
     33 
     34 #include <sys/param.h>
     35 #include <sys/types.h>
     36 
     37 #include <sys/evcnt.h>
     38 #include <sys/kmem.h>
     39 #include <sys/mbuf.h>
     40 #include <sys/mutex.h>
     41 #include <sys/pslist.h>
     42 #include <sys/syslog.h>
     43 #include <sys/workqueue.h>
     44 
     45 #include <net/if.h>
     46 #include <net/if_ether.h>
     47 #include <net/if_media.h>
     48 
     49 #include <net/lagg/if_lagg.h>
     50 #include <net/lagg/if_laggproto.h>
     51 
     52 struct lagg_proto_softc {
     53 	struct lagg_softc	*psc_softc;
     54 	struct pslist_head	 psc_ports;
     55 	kmutex_t		 psc_lock;
     56 	pserialize_t		 psc_psz;
     57 	size_t			 psc_ctxsiz;
     58 	void			*psc_ctx;
     59 	size_t			 psc_nactports;
     60 	struct workqueue	*psc_workq;
     61 	struct lagg_work	 psc_work_linkspeed;
     62 };
     63 
     64 /*
     65  * Locking notes:
     66  * - Items of struct lagg_proto_softc is protected by
     67  *   psc_lock (an adaptive mutex)
     68  * - psc_ports is protected by pserialize (psc_psz)
     69  *   - Updates of psc_ports is serialized by sc_lock in
     70  *     struct lagg_softc
     71  * - Other locking notes are described in if_laggproto.h
     72  */
     73 
     74 struct lagg_failover {
     75 	bool		 fo_rx_all;
     76 };
     77 
     78 struct lagg_portmap {
     79 	struct lagg_port	*pm_ports[LAGG_MAX_PORTS];
     80 	size_t			 pm_nports;
     81 };
     82 
     83 struct lagg_portmaps {
     84 	struct lagg_portmap	 maps_pmap[2];
     85 	size_t			 maps_activepmap;
     86 };
     87 
     88 struct lagg_lb {
     89 	struct lagg_portmaps	 lb_pmaps;
     90 };
     91 
     92 struct lagg_proto_port {
     93 	struct pslist_entry	 lpp_entry;
     94 	struct lagg_port	*lpp_laggport;
     95 	uint64_t		 lpp_linkspeed;
     96 	bool			 lpp_active;
     97 	bool			 lpp_running;
     98 };
     99 
    100 #define LAGG_PROTO_LOCK(_psc)	mutex_enter(&(_psc)->psc_lock)
    101 #define LAGG_PROTO_UNLOCK(_psc)	mutex_exit(&(_psc)->psc_lock)
    102 #define LAGG_PROTO_LOCKED(_psc)	mutex_owned(&(_psc)->psc_lock)
    103 
    104 static struct lagg_proto_softc *
    105 		lagg_proto_alloc(lagg_proto, struct lagg_softc *);
    106 static void	lagg_proto_free(struct lagg_proto_softc *);
    107 static void	lagg_proto_insert_port(struct lagg_proto_softc *,
    108 		    struct lagg_proto_port *);
    109 static void	lagg_proto_remove_port(struct lagg_proto_softc *,
    110 		    struct lagg_proto_port *);
    111 static struct lagg_port *
    112 		lagg_link_active(struct lagg_proto_softc *psc,
    113 		    struct lagg_proto_port *, struct psref *);
    114 static void	lagg_fail_linkspeed_work(struct lagg_work *, void *);
    115 static void	lagg_lb_linkspeed_work(struct lagg_work*,
    116 		    void *);
    117 static void	lagg_common_linkstate(struct lagg_proto_softc *,
    118 		    struct lagg_port *);
    119 
    120 static inline struct lagg_portmap *
    121 lagg_portmap_active(struct lagg_portmaps *maps)
    122 {
    123 	size_t i;
    124 
    125 	i = atomic_load_consume(&maps->maps_activepmap);
    126 
    127 	return &maps->maps_pmap[i];
    128 }
    129 
    130 static inline struct lagg_portmap *
    131 lagg_portmap_next(struct lagg_portmaps *maps)
    132 {
    133 	size_t i;
    134 
    135 	i = atomic_load_consume(&maps->maps_activepmap);
    136 	i &= 0x1;
    137 	i ^= 0x1;
    138 
    139 	return &maps->maps_pmap[i];
    140 }
    141 
    142 static inline void
    143 lagg_portmap_switch(struct lagg_portmaps *maps)
    144 {
    145 	size_t i;
    146 
    147 	i = atomic_load_consume(&maps->maps_activepmap);
    148 	i &= 0x1;
    149 	i ^= 0x1;
    150 
    151 	atomic_store_release(&maps->maps_activepmap, i);
    152 }
    153 
    154 static struct lagg_proto_softc *
    155 lagg_proto_alloc(lagg_proto pr, struct lagg_softc *sc)
    156 {
    157 	struct lagg_proto_softc *psc;
    158 	char xnamebuf[MAXCOMLEN];
    159 	size_t ctxsiz;
    160 
    161 	switch (pr) {
    162 	case LAGG_PROTO_FAILOVER:
    163 		ctxsiz = sizeof(struct lagg_failover);
    164 		break;
    165 	case LAGG_PROTO_LOADBALANCE:
    166 		ctxsiz = sizeof(struct lagg_lb);
    167 		break;
    168 	default:
    169 		ctxsiz = 0;
    170 	}
    171 
    172 	psc = kmem_zalloc(sizeof(*psc), KM_NOSLEEP);
    173 	if (psc == NULL)
    174 		return NULL;
    175 
    176 	psc->psc_workq = lagg_workq_create(xnamebuf,
    177 		    PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
    178 	if (psc->psc_workq == NULL) {
    179 		LAGG_LOG(sc, LOG_ERR, "workqueue create failed\n");
    180 		kmem_free(psc, sizeof(*psc));
    181 		return NULL;
    182 	}
    183 
    184 	if (ctxsiz > 0) {
    185 		psc->psc_ctx = kmem_zalloc(ctxsiz, KM_NOSLEEP);
    186 		if (psc->psc_ctx == NULL) {
    187 			lagg_workq_destroy(psc->psc_workq);
    188 			kmem_free(psc, sizeof(*psc));
    189 			return NULL;
    190 		}
    191 
    192 		psc->psc_ctxsiz = ctxsiz;
    193 	}
    194 
    195 	PSLIST_INIT(&psc->psc_ports);
    196 	psc->psc_psz = pserialize_create();
    197 	mutex_init(&psc->psc_lock, MUTEX_DEFAULT, IPL_SOFTNET);
    198 	psc->psc_softc = sc;
    199 
    200 	return psc;
    201 }
    202 
    203 static void
    204 lagg_proto_free(struct lagg_proto_softc *psc)
    205 {
    206 
    207 	lagg_workq_wait(psc->psc_workq, &psc->psc_work_linkspeed);
    208 	pserialize_destroy(psc->psc_psz);
    209 	mutex_destroy(&psc->psc_lock);
    210 	lagg_workq_destroy(psc->psc_workq);
    211 
    212 	if (psc->psc_ctxsiz > 0)
    213 		kmem_free(psc->psc_ctx, psc->psc_ctxsiz);
    214 
    215 	kmem_free(psc, sizeof(*psc));
    216 }
    217 
    218 static struct lagg_port *
    219 lagg_link_active(struct lagg_proto_softc *psc,
    220     struct lagg_proto_port *pport, struct psref *psref)
    221 {
    222 	struct lagg_port *lp;
    223 	int s;
    224 
    225 	lp = NULL;
    226 	s = pserialize_read_enter();
    227 
    228 	for (;pport != NULL;
    229 	    pport = PSLIST_READER_NEXT(pport,
    230 	    struct lagg_proto_port, lpp_entry)) {
    231 		if (atomic_load_relaxed(&pport->lpp_active)) {
    232 			lp = pport->lpp_laggport;
    233 			goto done;
    234 		}
    235 	}
    236 
    237 	PSLIST_READER_FOREACH(pport, &psc->psc_ports,
    238 	    struct lagg_proto_port, lpp_entry) {
    239 		if (atomic_load_relaxed(&pport->lpp_active)) {
    240 			lp = pport->lpp_laggport;
    241 			break;
    242 		}
    243 	}
    244 done:
    245 	if (lp != NULL)
    246 		lagg_port_getref(lp, psref);
    247 	pserialize_read_exit(s);
    248 
    249 	return lp;
    250 }
    251 
    252 int
    253 lagg_common_allocport(struct lagg_proto_softc *psc, struct lagg_port *lp)
    254 {
    255 	struct lagg_proto_port *pport;
    256 
    257 	KASSERT(LAGG_LOCKED(psc->psc_softc));
    258 
    259 	pport = kmem_zalloc(sizeof(*pport), KM_NOSLEEP);
    260 	if (pport == NULL)
    261 		return ENOMEM;
    262 
    263 	PSLIST_ENTRY_INIT(pport, lpp_entry);
    264 	pport->lpp_laggport = lp;
    265 	lp->lp_proto_ctx = (void *)pport;
    266 	return 0;
    267 }
    268 
    269 void
    270 lagg_common_freeport(struct lagg_proto_softc *psc, struct lagg_port *lp)
    271 {
    272 	struct lagg_proto_port *pport;
    273 
    274 	pport = lp->lp_proto_ctx;
    275 	KASSERT(!pport->lpp_running);
    276 	lp->lp_proto_ctx = NULL;
    277 
    278 	kmem_free(pport, sizeof(*pport));
    279 }
    280 
    281 static void
    282 lagg_proto_insert_port(struct lagg_proto_softc *psc,
    283     struct lagg_proto_port *pport)
    284 {
    285 	struct lagg_proto_port *pport0;
    286 	struct lagg_port *lp, *lp0;
    287 	bool insert_after;
    288 
    289 	insert_after = false;
    290 	lp = pport->lpp_laggport;
    291 
    292 	LAGG_PROTO_LOCK(psc);
    293 	PSLIST_WRITER_FOREACH(pport0, &psc->psc_ports,
    294 	    struct lagg_proto_port, lpp_entry) {
    295 		lp0 = pport0->lpp_laggport;
    296 		if (lp0->lp_prio > lp->lp_prio)
    297 			break;
    298 
    299 		if (PSLIST_WRITER_NEXT(pport0,
    300 		    struct lagg_proto_port, lpp_entry) == NULL) {
    301 			insert_after = true;
    302 			break;
    303 		}
    304 	}
    305 
    306 	if (pport0 == NULL) {
    307 		PSLIST_WRITER_INSERT_HEAD(&psc->psc_ports, pport,
    308 		    lpp_entry);
    309 	} else if (insert_after) {
    310 		PSLIST_WRITER_INSERT_AFTER(pport0, pport, lpp_entry);
    311 	} else {
    312 		PSLIST_WRITER_INSERT_BEFORE(pport0, pport, lpp_entry);
    313 	}
    314 	LAGG_PROTO_UNLOCK(psc);
    315 }
    316 
    317 static void
    318 lagg_proto_remove_port(struct lagg_proto_softc *psc,
    319     struct lagg_proto_port *pport)
    320 {
    321 
    322 	LAGG_PROTO_LOCK(psc);
    323 	PSLIST_WRITER_REMOVE(pport, lpp_entry);
    324 	pserialize_perform(psc->psc_psz);
    325 	LAGG_PROTO_UNLOCK(psc);
    326 }
    327 
    328 void
    329 lagg_common_startport(struct lagg_proto_softc *psc, struct lagg_port *lp)
    330 {
    331 	struct lagg_proto_port *pport;
    332 
    333 	pport = lp->lp_proto_ctx;
    334 	lagg_proto_insert_port(psc, pport);
    335 
    336 	LAGG_PROTO_LOCK(psc);
    337 	pport->lpp_running = true;
    338 	LAGG_PROTO_UNLOCK(psc);
    339 
    340 	lagg_common_linkstate(psc, lp);
    341 }
    342 
    343 void
    344 lagg_common_stopport(struct lagg_proto_softc *psc, struct lagg_port *lp)
    345 {
    346 	struct lagg_proto_port *pport;
    347 	struct ifnet *ifp;
    348 
    349 	pport = lp->lp_proto_ctx;
    350 
    351 	LAGG_PROTO_LOCK(psc);
    352 	pport->lpp_running = false;
    353 	LAGG_PROTO_UNLOCK(psc);
    354 
    355 	lagg_proto_remove_port(psc, pport);
    356 
    357 	if (pport->lpp_active) {
    358 		KASSERT(psc->psc_nactports > 0);
    359 		psc->psc_nactports--;
    360 
    361 		if (psc->psc_nactports == 0) {
    362 			ifp = &psc->psc_softc->sc_if;
    363 			if_link_state_change(ifp, LINK_STATE_DOWN);
    364 		}
    365 
    366 		pport->lpp_active = false;
    367 	}
    368 
    369 	lagg_workq_add(psc->psc_workq, &psc->psc_work_linkspeed);
    370 }
    371 static void
    372 lagg_common_linkstate(struct lagg_proto_softc *psc, struct lagg_port *lp)
    373 {
    374 
    375 	IFNET_ASSERT_UNLOCKED(lp->lp_ifp);
    376 
    377 	IFNET_LOCK(lp->lp_ifp);
    378 	lagg_common_linkstate_ifnet_locked(psc, lp);
    379 	IFNET_UNLOCK(lp->lp_ifp);
    380 }
    381 
    382 void
    383 lagg_common_linkstate_ifnet_locked(struct lagg_proto_softc *psc, struct lagg_port *lp)
    384 {
    385 	struct lagg_proto_port *pport;
    386 	struct ifnet *ifp, *ifp_port;
    387 	struct ifmediareq ifmr;
    388 	uint64_t linkspeed;
    389 	bool is_active;
    390 	int error;
    391 
    392 	pport = lp->lp_proto_ctx;
    393 	is_active = lagg_portactive(lp);
    394 	ifp_port = lp->lp_ifp;
    395 
    396 	KASSERT(IFNET_LOCKED(ifp_port));
    397 
    398 	LAGG_PROTO_LOCK(psc);
    399 	if (!pport->lpp_running ||
    400 	    pport->lpp_active == is_active) {
    401 		LAGG_PROTO_UNLOCK(psc);
    402 		return;
    403 	}
    404 
    405 	ifp = &psc->psc_softc->sc_if;
    406 	pport->lpp_active = is_active;
    407 
    408 	if (is_active) {
    409 		psc->psc_nactports++;
    410 		if (psc->psc_nactports == 1)
    411 			if_link_state_change(ifp, LINK_STATE_UP);
    412 	} else {
    413 		KASSERT(psc->psc_nactports > 0);
    414 		psc->psc_nactports--;
    415 
    416 		if (psc->psc_nactports == 0)
    417 			if_link_state_change(ifp, LINK_STATE_DOWN);
    418 	}
    419 	LAGG_PROTO_UNLOCK(psc);
    420 
    421 	memset(&ifmr, 0, sizeof(ifmr));
    422 	error = if_ioctl(ifp_port, SIOCGIFMEDIA, (void *)&ifmr);
    423 	if (error == 0) {
    424 		linkspeed = ifmedia_baudrate(ifmr.ifm_active);
    425 	} else {
    426 		linkspeed = 0;
    427 	}
    428 
    429 	LAGG_PROTO_LOCK(psc);
    430 	pport->lpp_linkspeed = linkspeed;
    431 	LAGG_PROTO_UNLOCK(psc);
    432 	lagg_workq_add(psc->psc_workq, &psc->psc_work_linkspeed);
    433 }
    434 
    435 void
    436 lagg_common_detach(struct lagg_proto_softc *psc)
    437 {
    438 
    439 	lagg_proto_free(psc);
    440 }
    441 
    442 int
    443 lagg_none_attach(struct lagg_softc *sc, struct lagg_proto_softc **pscp)
    444 {
    445 
    446 	*pscp = NULL;
    447 	return 0;
    448 }
    449 
    450 int
    451 lagg_fail_attach(struct lagg_softc *sc, struct lagg_proto_softc **xpsc)
    452 {
    453 	struct lagg_proto_softc *psc;
    454 	struct lagg_failover *fovr;
    455 
    456 	psc = lagg_proto_alloc(LAGG_PROTO_FAILOVER, sc);
    457 	if (psc == NULL)
    458 		return ENOMEM;
    459 
    460 	fovr = psc->psc_ctx;
    461 	fovr->fo_rx_all = true;
    462 	lagg_work_set(&psc->psc_work_linkspeed,
    463 	    lagg_fail_linkspeed_work, psc);
    464 
    465 	*xpsc = psc;
    466 	return 0;
    467 }
    468 
    469 int
    470 lagg_fail_transmit(struct lagg_proto_softc *psc, struct mbuf *m)
    471 {
    472 	struct ifnet *ifp;
    473 	struct lagg_port *lp;
    474 	struct psref psref;
    475 
    476 	lp = lagg_link_active(psc, NULL, &psref);
    477 	if (lp == NULL) {
    478 		ifp = &psc->psc_softc->sc_if;
    479 		if_statinc(ifp, if_oerrors);
    480 		m_freem(m);
    481 		return ENOENT;
    482 	}
    483 
    484 	lagg_output(psc->psc_softc, lp, m);
    485 	lagg_port_putref(lp, &psref);
    486 	return 0;
    487 }
    488 
    489 struct mbuf *
    490 lagg_fail_input(struct lagg_proto_softc *psc, struct lagg_port *lp,
    491     struct mbuf *m)
    492 {
    493 	struct lagg_failover *fovr;
    494 	struct lagg_port *lp0;
    495 	struct ifnet *ifp;
    496 	struct psref psref;
    497 
    498 	fovr = psc->psc_ctx;
    499 	if (atomic_load_relaxed(&fovr->fo_rx_all))
    500 		return m;
    501 
    502 	lp0 = lagg_link_active(psc, NULL, &psref);
    503 	if (lp0 == NULL) {
    504 		goto drop;
    505 	}
    506 
    507 	if (lp0 != lp) {
    508 		lagg_port_putref(lp0, &psref);
    509 		goto drop;
    510 	}
    511 
    512 	lagg_port_putref(lp0, &psref);
    513 
    514 	return m;
    515 drop:
    516 	ifp = &psc->psc_softc->sc_if;
    517 	if_statinc(ifp, if_ierrors);
    518 	m_freem(m);
    519 	return NULL;
    520 }
    521 
    522 void
    523 lagg_fail_portstat(struct lagg_proto_softc *psc, struct lagg_port *lp,
    524     struct laggreqport *resp)
    525 {
    526 	struct lagg_failover *fovr;
    527 	struct lagg_proto_port *pport;
    528 	struct lagg_port *lp0;
    529 	struct psref psref;
    530 
    531 	fovr = psc->psc_ctx;
    532 	pport = lp->lp_proto_ctx;
    533 
    534 	if (pport->lpp_active) {
    535 		lp0 = lagg_link_active(psc, NULL, &psref);
    536 		if (lp0 == lp) {
    537 			SET(resp->rp_flags,
    538 			    (LAGG_PORT_ACTIVE |
    539 			    LAGG_PORT_COLLECTING |
    540 			    LAGG_PORT_DISTRIBUTING));
    541 		} else {
    542 			if (fovr->fo_rx_all) {
    543 				SET(resp->rp_flags,
    544 				    LAGG_PORT_COLLECTING);
    545 			}
    546 		}
    547 
    548 		if (lp0 != NULL)
    549 			lagg_port_putref(lp0, &psref);
    550 	}
    551 }
    552 
    553 int
    554 lagg_fail_ioctl(struct lagg_proto_softc *psc, struct laggreqproto *lreq)
    555 {
    556 	struct lagg_failover *fovr;
    557 	struct laggreq_fail *rpfail;
    558 	int error;
    559 	bool set;
    560 
    561 	error = 0;
    562 	fovr = psc->psc_ctx;
    563 	rpfail = &lreq->rp_fail;
    564 
    565 	switch (rpfail->command) {
    566 	case LAGGIOC_FAILSETFLAGS:
    567 	case LAGGIOC_FAILCLRFLAGS:
    568 		set = (rpfail->command == LAGGIOC_FAILSETFLAGS) ?
    569 			true : false;
    570 
    571 		if (ISSET(rpfail->flags, LAGGREQFAIL_RXALL))
    572 			fovr->fo_rx_all = set;
    573 		break;
    574 	default:
    575 		error = ENOTTY;
    576 		break;
    577 	}
    578 
    579 	return error;
    580 }
    581 
    582 void
    583 lagg_fail_linkspeed_work(struct lagg_work *_lw __unused, void *xpsc)
    584 {
    585 	struct lagg_proto_softc *psc = xpsc;
    586 	struct lagg_proto_port *pport;
    587 	struct lagg_port *lp;
    588 	struct psref psref;
    589 	uint64_t linkspeed;
    590 
    591 	kpreempt_disable();
    592 	lp = lagg_link_active(psc, NULL, &psref);
    593 	if (lp != NULL) {
    594 		pport = lp->lp_proto_ctx;
    595 		LAGG_PROTO_LOCK(psc);
    596 		linkspeed = pport->lpp_linkspeed;
    597 		LAGG_PROTO_UNLOCK(psc);
    598 		lagg_port_putref(lp, &psref);
    599 	} else {
    600 		linkspeed = 0;
    601 	}
    602 	kpreempt_enable();
    603 
    604 	LAGG_LOCK(psc->psc_softc);
    605 	lagg_set_linkspeed(psc->psc_softc, linkspeed);
    606 	LAGG_UNLOCK(psc->psc_softc);
    607 }
    608 
    609 int
    610 lagg_lb_attach(struct lagg_softc *sc, struct lagg_proto_softc **xpsc)
    611 {
    612 	struct lagg_proto_softc *psc;
    613 	struct lagg_lb *lb;
    614 
    615 	psc = lagg_proto_alloc(LAGG_PROTO_LOADBALANCE, sc);
    616 	if (psc == NULL)
    617 		return ENOMEM;
    618 
    619 	lb = psc->psc_ctx;
    620 	lb->lb_pmaps.maps_activepmap = 0;
    621 	lagg_work_set(&psc->psc_work_linkspeed,
    622 	    lagg_lb_linkspeed_work, psc);
    623 
    624 	*xpsc = psc;
    625 	return 0;
    626 }
    627 
    628 void
    629 lagg_lb_startport(struct lagg_proto_softc *psc, struct lagg_port *lp)
    630 {
    631 	struct lagg_lb *lb;
    632 	struct lagg_portmap *pm_act, *pm_next;
    633 	size_t n;
    634 
    635 	lb = psc->psc_ctx;
    636 	lagg_common_startport(psc, lp);
    637 
    638 	LAGG_PROTO_LOCK(psc);
    639 	pm_act = lagg_portmap_active(&lb->lb_pmaps);
    640 	pm_next = lagg_portmap_next(&lb->lb_pmaps);
    641 
    642 	*pm_next = *pm_act;
    643 
    644 	n = pm_next->pm_nports;
    645 	pm_next->pm_ports[n] = lp;
    646 
    647 	n++;
    648 	pm_next->pm_nports = n;
    649 
    650 	lagg_portmap_switch(&lb->lb_pmaps);
    651 	pserialize_perform(psc->psc_psz);
    652 	LAGG_PROTO_UNLOCK(psc);
    653 }
    654 
    655 void
    656 lagg_lb_stopport(struct lagg_proto_softc *psc, struct lagg_port *lp)
    657 {
    658 	struct lagg_lb *lb;
    659 	struct lagg_portmap *pm_act, *pm_next;
    660 	size_t i, n;
    661 
    662 	lb = psc->psc_ctx;
    663 
    664 	LAGG_PROTO_LOCK(psc);
    665 	pm_act = lagg_portmap_active(&lb->lb_pmaps);
    666 	pm_next = lagg_portmap_next(&lb->lb_pmaps);
    667 	n = 0;
    668 
    669 	for (i = 0; i < pm_act->pm_nports; i++) {
    670 		if (pm_act->pm_ports[i] == lp)
    671 			continue;
    672 
    673 		pm_next->pm_ports[n] = pm_act->pm_ports[i];
    674 		n++;
    675 	}
    676 
    677 	pm_next->pm_nports = n;
    678 
    679 	lagg_portmap_switch(&lb->lb_pmaps);
    680 	pserialize_perform(psc->psc_psz);
    681 	LAGG_PROTO_UNLOCK(psc);
    682 
    683 	lagg_common_stopport(psc, lp);
    684 }
    685 
    686 int
    687 lagg_lb_transmit(struct lagg_proto_softc *psc, struct mbuf *m)
    688 {
    689 	struct lagg_lb *lb;
    690 	struct lagg_portmap *pm;
    691 	struct lagg_port *lp, *lp0;
    692 	struct ifnet *ifp;
    693 	struct psref psref;
    694 	uint32_t hash;
    695 	int s;
    696 
    697 	lb = psc->psc_ctx;
    698 	hash = lagg_hashmbuf(psc->psc_softc, m);
    699 
    700 	s = pserialize_read_enter();
    701 
    702 	pm = lagg_portmap_active(&lb->lb_pmaps);
    703 	if (__predict_true(pm->pm_nports != 0)) {
    704 		hash %= pm->pm_nports;
    705 		lp0 = pm->pm_ports[hash];
    706 		lp = lagg_link_active(psc, lp0->lp_proto_ctx, &psref);
    707 	} else {
    708 		lp = NULL;
    709 	}
    710 
    711 	pserialize_read_exit(s);
    712 
    713 	if (__predict_false(lp == NULL)) {
    714 		ifp = &psc->psc_softc->sc_if;
    715 		if_statinc(ifp, if_oerrors);
    716 		m_freem(m);
    717 		return ENOENT;
    718 	}
    719 
    720 	lagg_output(psc->psc_softc, lp, m);
    721 	lagg_port_putref(lp, &psref);
    722 
    723 	return 0;
    724 }
    725 
    726 struct mbuf *
    727 lagg_lb_input(struct lagg_proto_softc *psc __unused,
    728     struct lagg_port *lp __unused, struct mbuf *m)
    729 {
    730 
    731 	return m;
    732 }
    733 
    734 void
    735 lagg_lb_portstat(struct lagg_proto_softc *psc, struct lagg_port *lp,
    736     struct laggreqport *resp)
    737 {
    738 	struct lagg_proto_port *pport;
    739 
    740 	pport = lp->lp_proto_ctx;
    741 
    742 	if (pport->lpp_active) {
    743 		SET(resp->rp_flags, LAGG_PORT_ACTIVE |
    744 		    LAGG_PORT_COLLECTING | LAGG_PORT_DISTRIBUTING);
    745 	}
    746 }
    747 
    748 static void
    749 lagg_lb_linkspeed_work(struct lagg_work *_lw __unused, void *xpsc)
    750 {
    751 	struct lagg_proto_softc *psc = xpsc;
    752 	struct lagg_proto_port *pport;
    753 	uint64_t linkspeed, l;
    754 	int s;
    755 
    756 	linkspeed = 0;
    757 
    758 	s = pserialize_read_enter();
    759 	PSLIST_READER_FOREACH(pport, &psc->psc_ports,
    760 	    struct lagg_proto_port, lpp_entry) {
    761 		if (pport->lpp_active) {
    762 			LAGG_PROTO_LOCK(psc);
    763 			l = pport->lpp_linkspeed;
    764 			LAGG_PROTO_UNLOCK(psc);
    765 			linkspeed = MAX(linkspeed, l);
    766 		}
    767 	}
    768 	pserialize_read_exit(s);
    769 
    770 	LAGG_LOCK(psc->psc_softc);
    771 	lagg_set_linkspeed(psc->psc_softc, linkspeed);
    772 	LAGG_UNLOCK(psc->psc_softc);
    773 }
    774