Home | History | Annotate | Line # | Download | only in lagg
      1 /*	$NetBSD: if_laggproto.c,v 1.16 2024/09/26 06:08:24 rin Exp $	*/
      2 
      3 /*-
      4  * SPDX-License-Identifier: BSD-2-Clause-NetBSD
      5  *
      6  * Copyright (c)2021 Internet Initiative Japan, Inc.
      7  * All rights reserved.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     28  * SUCH DAMAGE.
     29  */
     30 
     31 #include <sys/cdefs.h>
     32 __KERNEL_RCSID(0, "$NetBSD: if_laggproto.c,v 1.16 2024/09/26 06:08:24 rin Exp $");
     33 
     34 #include <sys/param.h>
     35 #include <sys/types.h>
     36 
     37 #include <sys/evcnt.h>
     38 #include <sys/kmem.h>
     39 #include <sys/mbuf.h>
     40 #include <sys/mutex.h>
     41 #include <sys/pslist.h>
     42 #include <sys/syslog.h>
     43 #include <sys/workqueue.h>
     44 
     45 #include <net/if.h>
     46 #include <net/if_ether.h>
     47 #include <net/if_media.h>
     48 
     49 #include <net/lagg/if_lagg.h>
     50 #include <net/lagg/if_laggproto.h>
     51 
     52 struct lagg_proto_softc {
     53 	struct lagg_softc	*psc_softc;
     54 	struct pslist_head	 psc_ports;
     55 	kmutex_t		 psc_lock;
     56 	pserialize_t		 psc_psz;
     57 	size_t			 psc_ctxsiz;
     58 	void			*psc_ctx;
     59 	size_t			 psc_nactports;
     60 	struct workqueue	*psc_workq;
     61 	struct lagg_work	 psc_work_linkspeed;
     62 };
     63 
     64 /*
     65  * Locking notes:
     66  * - Items of struct lagg_proto_softc is protected by
     67  *   psc_lock (an adaptive mutex)
     68  * - psc_ports is protected by pselialize (psc_psz) and
     69  *   it updates exclusively by LAGG_PROTO_LOCK.
     70  * - Other locking notes are described in if_laggproto.h
     71  */
     72 
     73 struct lagg_failover {
     74 	bool		 fo_rx_all;
     75 };
     76 
     77 struct lagg_portmap {
     78 	struct lagg_port	*pm_ports[LAGG_MAX_PORTS];
     79 	size_t			 pm_nports;
     80 };
     81 
     82 struct lagg_portmaps {
     83 	struct lagg_portmap	 maps_pmap[2];
     84 	size_t			 maps_activepmap;
     85 };
     86 
     87 struct lagg_lb {
     88 	struct lagg_portmaps	 lb_pmaps;
     89 };
     90 
     91 struct lagg_proto_port {
     92 	struct pslist_entry	 lpp_entry;
     93 	struct lagg_port	*lpp_laggport;
     94 	uint64_t		 lpp_linkspeed;
     95 	bool			 lpp_active;
     96 	bool			 lpp_running;
     97 };
     98 
     99 #define LAGG_PROTO_LOCK(_psc)	mutex_enter(&(_psc)->psc_lock)
    100 #define LAGG_PROTO_UNLOCK(_psc)	mutex_exit(&(_psc)->psc_lock)
    101 #define LAGG_PROTO_LOCKED(_psc)	mutex_owned(&(_psc)->psc_lock)
    102 
    103 static struct lagg_proto_softc *
    104 		lagg_proto_alloc(lagg_proto, struct lagg_softc *);
    105 static void	lagg_proto_free(struct lagg_proto_softc *);
    106 static void	lagg_proto_insert_port(struct lagg_proto_softc *,
    107 		    struct lagg_proto_port *);
    108 static void	lagg_proto_remove_port(struct lagg_proto_softc *,
    109 		    struct lagg_proto_port *);
    110 static struct lagg_port *
    111 		lagg_link_active(struct lagg_proto_softc *psc,
    112 		    struct lagg_proto_port *, struct psref *);
    113 static void	lagg_fail_linkspeed_work(struct lagg_work *, void *);
    114 static void	lagg_lb_linkspeed_work(struct lagg_work*,
    115 		    void *);
    116 static void	lagg_common_linkstate(struct lagg_proto_softc *,
    117 		    struct lagg_port *);
    118 
    119 static inline struct lagg_portmap *
    120 lagg_portmap_active(struct lagg_portmaps *maps)
    121 {
    122 	size_t i;
    123 
    124 	i = atomic_load_consume(&maps->maps_activepmap);
    125 
    126 	return &maps->maps_pmap[i];
    127 }
    128 
    129 static inline struct lagg_portmap *
    130 lagg_portmap_next(struct lagg_portmaps *maps)
    131 {
    132 	size_t i;
    133 
    134 	i = atomic_load_consume(&maps->maps_activepmap);
    135 	i ^= 0x1;
    136 
    137 	return &maps->maps_pmap[i];
    138 }
    139 
    140 static inline void
    141 lagg_portmap_switch(struct lagg_portmaps *maps)
    142 {
    143 	size_t i;
    144 
    145 	i = atomic_load_consume(&maps->maps_activepmap);
    146 	i &= 0x1;
    147 	i ^= 0x1;
    148 
    149 	atomic_store_release(&maps->maps_activepmap, i);
    150 }
    151 
    152 static struct lagg_proto_softc *
    153 lagg_proto_alloc(lagg_proto pr, struct lagg_softc *sc)
    154 {
    155 	struct lagg_proto_softc *psc;
    156 	char xnamebuf[MAXCOMLEN];
    157 	size_t ctxsiz;
    158 
    159 	switch (pr) {
    160 	case LAGG_PROTO_FAILOVER:
    161 		ctxsiz = sizeof(struct lagg_failover);
    162 		break;
    163 	case LAGG_PROTO_LOADBALANCE:
    164 		ctxsiz = sizeof(struct lagg_lb);
    165 		break;
    166 	default:
    167 		ctxsiz = 0;
    168 	}
    169 
    170 	psc = kmem_zalloc(sizeof(*psc), KM_NOSLEEP);
    171 	if (psc == NULL)
    172 		return NULL;
    173 
    174 	snprintf(xnamebuf, sizeof(xnamebuf), "%s.proto",
    175 	    sc->sc_if.if_xname);
    176 	psc->psc_workq = lagg_workq_create(xnamebuf,
    177 		    PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
    178 	if (psc->psc_workq == NULL) {
    179 		LAGG_LOG(sc, LOG_ERR, "workqueue create failed\n");
    180 		kmem_free(psc, sizeof(*psc));
    181 		return NULL;
    182 	}
    183 
    184 	if (ctxsiz > 0) {
    185 		psc->psc_ctx = kmem_zalloc(ctxsiz, KM_NOSLEEP);
    186 		if (psc->psc_ctx == NULL) {
    187 			lagg_workq_destroy(psc->psc_workq);
    188 			kmem_free(psc, sizeof(*psc));
    189 			return NULL;
    190 		}
    191 
    192 		psc->psc_ctxsiz = ctxsiz;
    193 	}
    194 
    195 	PSLIST_INIT(&psc->psc_ports);
    196 	psc->psc_psz = pserialize_create();
    197 	mutex_init(&psc->psc_lock, MUTEX_DEFAULT, IPL_SOFTNET);
    198 	psc->psc_softc = sc;
    199 
    200 	return psc;
    201 }
    202 
    203 static void
    204 lagg_proto_free(struct lagg_proto_softc *psc)
    205 {
    206 
    207 	lagg_workq_wait(psc->psc_workq, &psc->psc_work_linkspeed);
    208 	pserialize_destroy(psc->psc_psz);
    209 	mutex_destroy(&psc->psc_lock);
    210 	lagg_workq_destroy(psc->psc_workq);
    211 	PSLIST_DESTROY(&psc->psc_ports);
    212 
    213 	if (psc->psc_ctxsiz > 0)
    214 		kmem_free(psc->psc_ctx, psc->psc_ctxsiz);
    215 
    216 	kmem_free(psc, sizeof(*psc));
    217 }
    218 
    219 static struct lagg_port *
    220 lagg_link_active(struct lagg_proto_softc *psc,
    221     struct lagg_proto_port *pport, struct psref *psref)
    222 {
    223 	struct lagg_port *lp;
    224 	int s;
    225 
    226 	lp = NULL;
    227 	s = pserialize_read_enter();
    228 
    229 	for (;pport != NULL;
    230 	    pport = PSLIST_READER_NEXT(pport,
    231 	    struct lagg_proto_port, lpp_entry)) {
    232 		if (atomic_load_relaxed(&pport->lpp_active)) {
    233 			lp = pport->lpp_laggport;
    234 			goto done;
    235 		}
    236 	}
    237 
    238 	PSLIST_READER_FOREACH(pport, &psc->psc_ports,
    239 	    struct lagg_proto_port, lpp_entry) {
    240 		if (atomic_load_relaxed(&pport->lpp_active)) {
    241 			lp = pport->lpp_laggport;
    242 			break;
    243 		}
    244 	}
    245 done:
    246 	if (lp != NULL)
    247 		lagg_port_getref(lp, psref);
    248 	pserialize_read_exit(s);
    249 
    250 	return lp;
    251 }
    252 
    253 int
    254 lagg_common_allocport(struct lagg_proto_softc *psc, struct lagg_port *lp)
    255 {
    256 	struct lagg_proto_port *pport;
    257 
    258 	KASSERT(LAGG_LOCKED(psc->psc_softc));
    259 
    260 	pport = kmem_zalloc(sizeof(*pport), KM_NOSLEEP);
    261 	if (pport == NULL)
    262 		return ENOMEM;
    263 
    264 	PSLIST_ENTRY_INIT(pport, lpp_entry);
    265 	pport->lpp_laggport = lp;
    266 	lp->lp_proto_ctx = (void *)pport;
    267 	return 0;
    268 }
    269 
    270 void
    271 lagg_common_freeport(struct lagg_proto_softc *psc, struct lagg_port *lp)
    272 {
    273 	struct lagg_proto_port *pport;
    274 
    275 	pport = lp->lp_proto_ctx;
    276 	KASSERT(!pport->lpp_running);
    277 	lp->lp_proto_ctx = NULL;
    278 
    279 	kmem_free(pport, sizeof(*pport));
    280 }
    281 
    282 static void
    283 lagg_proto_insert_port(struct lagg_proto_softc *psc,
    284     struct lagg_proto_port *pport)
    285 {
    286 	struct lagg_proto_port *pport0;
    287 	struct lagg_port *lp, *lp0;
    288 	bool insert_after;
    289 
    290 	insert_after = false;
    291 	lp = pport->lpp_laggport;
    292 
    293 	LAGG_PROTO_LOCK(psc);
    294 	PSLIST_WRITER_FOREACH(pport0, &psc->psc_ports,
    295 	    struct lagg_proto_port, lpp_entry) {
    296 		lp0 = pport0->lpp_laggport;
    297 		if (lp0->lp_prio > lp->lp_prio)
    298 			break;
    299 
    300 		if (PSLIST_WRITER_NEXT(pport0,
    301 		    struct lagg_proto_port, lpp_entry) == NULL) {
    302 			insert_after = true;
    303 			break;
    304 		}
    305 	}
    306 
    307 	if (pport0 == NULL) {
    308 		PSLIST_WRITER_INSERT_HEAD(&psc->psc_ports, pport,
    309 		    lpp_entry);
    310 	} else if (insert_after) {
    311 		PSLIST_WRITER_INSERT_AFTER(pport0, pport, lpp_entry);
    312 	} else {
    313 		PSLIST_WRITER_INSERT_BEFORE(pport0, pport, lpp_entry);
    314 	}
    315 	LAGG_PROTO_UNLOCK(psc);
    316 }
    317 
    318 static void
    319 lagg_proto_remove_port(struct lagg_proto_softc *psc,
    320     struct lagg_proto_port *pport)
    321 {
    322 
    323 	LAGG_PROTO_LOCK(psc);
    324 	PSLIST_WRITER_REMOVE(pport, lpp_entry);
    325 	LAGG_PROTO_UNLOCK(psc);
    326 	pserialize_perform(psc->psc_psz);
    327 
    328 	/* re-initialize for reuse */
    329 	PSLIST_ENTRY_DESTROY(pport, lpp_entry);
    330 	PSLIST_ENTRY_INIT(pport, lpp_entry);
    331 }
    332 
    333 void
    334 lagg_common_startport(struct lagg_proto_softc *psc, struct lagg_port *lp)
    335 {
    336 	struct lagg_proto_port *pport;
    337 
    338 	pport = lp->lp_proto_ctx;
    339 	lagg_proto_insert_port(psc, pport);
    340 
    341 	LAGG_PROTO_LOCK(psc);
    342 	pport->lpp_running = true;
    343 	LAGG_PROTO_UNLOCK(psc);
    344 
    345 	lagg_common_linkstate(psc, lp);
    346 }
    347 
    348 void
    349 lagg_common_stopport(struct lagg_proto_softc *psc, struct lagg_port *lp)
    350 {
    351 	struct lagg_proto_port *pport;
    352 	struct ifnet *ifp;
    353 
    354 	pport = lp->lp_proto_ctx;
    355 
    356 	LAGG_PROTO_LOCK(psc);
    357 	pport->lpp_running = false;
    358 	LAGG_PROTO_UNLOCK(psc);
    359 
    360 	lagg_proto_remove_port(psc, pport);
    361 
    362 	if (pport->lpp_active) {
    363 		KASSERT(psc->psc_nactports > 0);
    364 		psc->psc_nactports--;
    365 
    366 		if (psc->psc_nactports == 0) {
    367 			ifp = &psc->psc_softc->sc_if;
    368 			if_link_state_change(ifp, LINK_STATE_DOWN);
    369 		}
    370 
    371 		pport->lpp_active = false;
    372 	}
    373 
    374 	lagg_workq_add(psc->psc_workq, &psc->psc_work_linkspeed);
    375 }
    376 static void
    377 lagg_common_linkstate(struct lagg_proto_softc *psc, struct lagg_port *lp)
    378 {
    379 
    380 	IFNET_ASSERT_UNLOCKED(lp->lp_ifp);
    381 
    382 	IFNET_LOCK(lp->lp_ifp);
    383 	lagg_common_linkstate_ifnet_locked(psc, lp);
    384 	IFNET_UNLOCK(lp->lp_ifp);
    385 }
    386 
    387 void
    388 lagg_common_linkstate_ifnet_locked(struct lagg_proto_softc *psc, struct lagg_port *lp)
    389 {
    390 	struct lagg_proto_port *pport;
    391 	struct ifnet *ifp, *ifp_port;
    392 	struct ifmediareq ifmr;
    393 	uint64_t linkspeed;
    394 	bool is_active;
    395 	int error;
    396 
    397 	pport = lp->lp_proto_ctx;
    398 	is_active = lagg_portactive(lp);
    399 	ifp_port = lp->lp_ifp;
    400 
    401 	KASSERT(IFNET_LOCKED(ifp_port));
    402 
    403 	LAGG_PROTO_LOCK(psc);
    404 	if (!pport->lpp_running ||
    405 	    pport->lpp_active == is_active) {
    406 		LAGG_PROTO_UNLOCK(psc);
    407 		return;
    408 	}
    409 
    410 	ifp = &psc->psc_softc->sc_if;
    411 	pport->lpp_active = is_active;
    412 
    413 	if (is_active) {
    414 		psc->psc_nactports++;
    415 		if (psc->psc_nactports == 1)
    416 			if_link_state_change(ifp, LINK_STATE_UP);
    417 	} else {
    418 		KASSERT(psc->psc_nactports > 0);
    419 		psc->psc_nactports--;
    420 
    421 		if (psc->psc_nactports == 0)
    422 			if_link_state_change(ifp, LINK_STATE_DOWN);
    423 	}
    424 	LAGG_PROTO_UNLOCK(psc);
    425 
    426 	memset(&ifmr, 0, sizeof(ifmr));
    427 	error = if_ioctl(ifp_port, SIOCGIFMEDIA, (void *)&ifmr);
    428 	if (error == 0) {
    429 		linkspeed = ifmedia_baudrate(ifmr.ifm_active);
    430 	} else {
    431 		linkspeed = 0;
    432 	}
    433 
    434 	LAGG_PROTO_LOCK(psc);
    435 	pport->lpp_linkspeed = linkspeed;
    436 	LAGG_PROTO_UNLOCK(psc);
    437 	lagg_workq_add(psc->psc_workq, &psc->psc_work_linkspeed);
    438 }
    439 
    440 void
    441 lagg_common_detach(struct lagg_proto_softc *psc)
    442 {
    443 
    444 	lagg_proto_free(psc);
    445 }
    446 
    447 int
    448 lagg_none_attach(struct lagg_softc *sc, struct lagg_proto_softc **pscp)
    449 {
    450 
    451 	*pscp = NULL;
    452 	return 0;
    453 }
    454 
    455 int
    456 lagg_fail_attach(struct lagg_softc *sc, struct lagg_proto_softc **xpsc)
    457 {
    458 	struct lagg_proto_softc *psc;
    459 	struct lagg_failover *fovr;
    460 
    461 	psc = lagg_proto_alloc(LAGG_PROTO_FAILOVER, sc);
    462 	if (psc == NULL)
    463 		return ENOMEM;
    464 
    465 	fovr = psc->psc_ctx;
    466 	fovr->fo_rx_all = true;
    467 	lagg_work_set(&psc->psc_work_linkspeed,
    468 	    lagg_fail_linkspeed_work, psc);
    469 
    470 	*xpsc = psc;
    471 	return 0;
    472 }
    473 
    474 int
    475 lagg_fail_transmit(struct lagg_proto_softc *psc, struct mbuf *m)
    476 {
    477 	struct ifnet *ifp;
    478 	struct lagg_port *lp;
    479 	struct psref psref;
    480 
    481 	lp = lagg_link_active(psc, NULL, &psref);
    482 	if (lp == NULL) {
    483 		ifp = &psc->psc_softc->sc_if;
    484 		if_statinc(ifp, if_oerrors);
    485 		m_freem(m);
    486 		return ENOENT;
    487 	}
    488 
    489 	lagg_output(psc->psc_softc, lp, m);
    490 	lagg_port_putref(lp, &psref);
    491 	return 0;
    492 }
    493 
    494 struct mbuf *
    495 lagg_fail_input(struct lagg_proto_softc *psc, struct lagg_port *lp,
    496     struct mbuf *m)
    497 {
    498 	struct lagg_failover *fovr;
    499 	struct lagg_port *lp0;
    500 	struct ifnet *ifp;
    501 	struct psref psref;
    502 
    503 	fovr = psc->psc_ctx;
    504 	if (atomic_load_relaxed(&fovr->fo_rx_all))
    505 		return m;
    506 
    507 	lp0 = lagg_link_active(psc, NULL, &psref);
    508 	if (lp0 == NULL) {
    509 		goto drop;
    510 	}
    511 
    512 	if (lp0 != lp) {
    513 		lagg_port_putref(lp0, &psref);
    514 		goto drop;
    515 	}
    516 
    517 	lagg_port_putref(lp0, &psref);
    518 
    519 	return m;
    520 drop:
    521 	ifp = &psc->psc_softc->sc_if;
    522 	if_statinc(ifp, if_ierrors);
    523 	m_freem(m);
    524 	return NULL;
    525 }
    526 
    527 void
    528 lagg_fail_portstat(struct lagg_proto_softc *psc, struct lagg_port *lp,
    529     struct laggreqport *resp)
    530 {
    531 	struct lagg_failover *fovr;
    532 	struct lagg_proto_port *pport;
    533 	struct lagg_port *lp0;
    534 	struct psref psref;
    535 
    536 	fovr = psc->psc_ctx;
    537 	pport = lp->lp_proto_ctx;
    538 
    539 	if (pport->lpp_active) {
    540 		lp0 = lagg_link_active(psc, NULL, &psref);
    541 		if (lp0 == lp) {
    542 			SET(resp->rp_flags,
    543 			    (LAGG_PORT_ACTIVE |
    544 			    LAGG_PORT_COLLECTING |
    545 			    LAGG_PORT_DISTRIBUTING));
    546 		} else {
    547 			if (fovr->fo_rx_all) {
    548 				SET(resp->rp_flags,
    549 				    LAGG_PORT_COLLECTING);
    550 			}
    551 		}
    552 
    553 		if (lp0 != NULL)
    554 			lagg_port_putref(lp0, &psref);
    555 	}
    556 }
    557 
    558 int
    559 lagg_fail_ioctl(struct lagg_proto_softc *psc, struct laggreqproto *lreq)
    560 {
    561 	struct lagg_failover *fovr;
    562 	struct laggreq_fail *rpfail;
    563 	int error;
    564 	bool set;
    565 
    566 	error = 0;
    567 	fovr = psc->psc_ctx;
    568 	rpfail = &lreq->rp_fail;
    569 
    570 	switch (rpfail->command) {
    571 	case LAGGIOC_FAILSETFLAGS:
    572 	case LAGGIOC_FAILCLRFLAGS:
    573 		set = (rpfail->command == LAGGIOC_FAILSETFLAGS) ?
    574 			true : false;
    575 
    576 		if (ISSET(rpfail->flags, LAGGREQFAIL_RXALL))
    577 			fovr->fo_rx_all = set;
    578 		break;
    579 	default:
    580 		error = ENOTTY;
    581 		break;
    582 	}
    583 
    584 	return error;
    585 }
    586 
    587 void
    588 lagg_fail_linkspeed_work(struct lagg_work *_lw __unused, void *xpsc)
    589 {
    590 	struct lagg_proto_softc *psc = xpsc;
    591 	struct lagg_proto_port *pport;
    592 	struct lagg_port *lp;
    593 	struct psref psref;
    594 	uint64_t linkspeed;
    595 
    596 	kpreempt_disable();
    597 	lp = lagg_link_active(psc, NULL, &psref);
    598 	if (lp != NULL) {
    599 		pport = lp->lp_proto_ctx;
    600 		LAGG_PROTO_LOCK(psc);
    601 		linkspeed = pport->lpp_linkspeed;
    602 		LAGG_PROTO_UNLOCK(psc);
    603 		lagg_port_putref(lp, &psref);
    604 	} else {
    605 		linkspeed = 0;
    606 	}
    607 	kpreempt_enable();
    608 
    609 	LAGG_LOCK(psc->psc_softc);
    610 	lagg_set_linkspeed(psc->psc_softc, linkspeed);
    611 	LAGG_UNLOCK(psc->psc_softc);
    612 }
    613 
    614 int
    615 lagg_lb_attach(struct lagg_softc *sc, struct lagg_proto_softc **xpsc)
    616 {
    617 	struct lagg_proto_softc *psc;
    618 	struct lagg_lb *lb;
    619 
    620 	psc = lagg_proto_alloc(LAGG_PROTO_LOADBALANCE, sc);
    621 	if (psc == NULL)
    622 		return ENOMEM;
    623 
    624 	lb = psc->psc_ctx;
    625 	lb->lb_pmaps.maps_activepmap = 0;
    626 	lagg_work_set(&psc->psc_work_linkspeed,
    627 	    lagg_lb_linkspeed_work, psc);
    628 
    629 	*xpsc = psc;
    630 	return 0;
    631 }
    632 
    633 void
    634 lagg_lb_startport(struct lagg_proto_softc *psc, struct lagg_port *lp)
    635 {
    636 	struct lagg_lb *lb;
    637 	struct lagg_portmap *pm_act, *pm_next;
    638 	size_t n;
    639 
    640 	lb = psc->psc_ctx;
    641 	lagg_common_startport(psc, lp);
    642 
    643 	LAGG_PROTO_LOCK(psc);
    644 	pm_act = lagg_portmap_active(&lb->lb_pmaps);
    645 	pm_next = lagg_portmap_next(&lb->lb_pmaps);
    646 
    647 	*pm_next = *pm_act;
    648 
    649 	n = pm_next->pm_nports;
    650 	pm_next->pm_ports[n] = lp;
    651 
    652 	n++;
    653 	pm_next->pm_nports = n;
    654 
    655 	lagg_portmap_switch(&lb->lb_pmaps);
    656 	LAGG_PROTO_UNLOCK(psc);
    657 	pserialize_perform(psc->psc_psz);
    658 }
    659 
    660 void
    661 lagg_lb_stopport(struct lagg_proto_softc *psc, struct lagg_port *lp)
    662 {
    663 	struct lagg_lb *lb;
    664 	struct lagg_portmap *pm_act, *pm_next;
    665 	size_t i, n;
    666 
    667 	lb = psc->psc_ctx;
    668 
    669 	LAGG_PROTO_LOCK(psc);
    670 	pm_act = lagg_portmap_active(&lb->lb_pmaps);
    671 	pm_next = lagg_portmap_next(&lb->lb_pmaps);
    672 	n = 0;
    673 
    674 	for (i = 0; i < pm_act->pm_nports; i++) {
    675 		if (pm_act->pm_ports[i] == lp)
    676 			continue;
    677 
    678 		pm_next->pm_ports[n] = pm_act->pm_ports[i];
    679 		n++;
    680 	}
    681 
    682 	pm_next->pm_nports = n;
    683 
    684 	lagg_portmap_switch(&lb->lb_pmaps);
    685 	LAGG_PROTO_UNLOCK(psc);
    686 	pserialize_perform(psc->psc_psz);
    687 
    688 	lagg_common_stopport(psc, lp);
    689 }
    690 
    691 int
    692 lagg_lb_transmit(struct lagg_proto_softc *psc, struct mbuf *m)
    693 {
    694 	struct lagg_lb *lb;
    695 	struct lagg_portmap *pm;
    696 	struct lagg_port *lp, *lp0;
    697 	struct ifnet *ifp;
    698 	struct psref psref;
    699 	uint32_t hash;
    700 	int s;
    701 
    702 	lb = psc->psc_ctx;
    703 	hash = lagg_hashmbuf(psc->psc_softc, m);
    704 
    705 	s = pserialize_read_enter();
    706 
    707 	pm = lagg_portmap_active(&lb->lb_pmaps);
    708 	if (__predict_true(pm->pm_nports != 0)) {
    709 		hash %= pm->pm_nports;
    710 		lp0 = pm->pm_ports[hash];
    711 		lp = lagg_link_active(psc, lp0->lp_proto_ctx, &psref);
    712 	} else {
    713 		lp = NULL;
    714 	}
    715 
    716 	pserialize_read_exit(s);
    717 
    718 	if (__predict_false(lp == NULL)) {
    719 		ifp = &psc->psc_softc->sc_if;
    720 		if_statinc(ifp, if_oerrors);
    721 		m_freem(m);
    722 		return ENOENT;
    723 	}
    724 
    725 	lagg_output(psc->psc_softc, lp, m);
    726 	lagg_port_putref(lp, &psref);
    727 
    728 	return 0;
    729 }
    730 
    731 struct mbuf *
    732 lagg_lb_input(struct lagg_proto_softc *psc __unused,
    733     struct lagg_port *lp __unused, struct mbuf *m)
    734 {
    735 
    736 	return m;
    737 }
    738 
    739 void
    740 lagg_lb_portstat(struct lagg_proto_softc *psc, struct lagg_port *lp,
    741     struct laggreqport *resp)
    742 {
    743 	struct lagg_proto_port *pport;
    744 
    745 	pport = lp->lp_proto_ctx;
    746 
    747 	if (pport->lpp_active) {
    748 		SET(resp->rp_flags, LAGG_PORT_ACTIVE |
    749 		    LAGG_PORT_COLLECTING | LAGG_PORT_DISTRIBUTING);
    750 	}
    751 }
    752 
    753 static void
    754 lagg_lb_linkspeed_work(struct lagg_work *_lw __unused, void *xpsc)
    755 {
    756 	struct lagg_proto_softc *psc = xpsc;
    757 	struct lagg_proto_port *pport;
    758 	uint64_t linkspeed, l;
    759 
    760 	linkspeed = 0;
    761 
    762 	LAGG_PROTO_LOCK(psc); /* acquired to refer lpp_linkspeed */
    763 	PSLIST_READER_FOREACH(pport, &psc->psc_ports,
    764 	    struct lagg_proto_port, lpp_entry) {
    765 		if (pport->lpp_active) {
    766 			l = pport->lpp_linkspeed;
    767 			linkspeed = MAX(linkspeed, l);
    768 		}
    769 	}
    770 	LAGG_PROTO_UNLOCK(psc);
    771 
    772 	LAGG_LOCK(psc->psc_softc);
    773 	lagg_set_linkspeed(psc->psc_softc, linkspeed);
    774 	LAGG_UNLOCK(psc->psc_softc);
    775 }
    776