if_laggproto.c revision 1.14 1 /* $NetBSD: if_laggproto.c,v 1.14 2024/04/05 06:37:29 yamaguchi Exp $ */
2
3 /*-
4 * SPDX-License-Identifier: BSD-2-Clause-NetBSD
5 *
6 * Copyright (c)2021 Internet Initiative Japan, Inc.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: if_laggproto.c,v 1.14 2024/04/05 06:37:29 yamaguchi Exp $");
33
34 #include <sys/param.h>
35 #include <sys/types.h>
36
37 #include <sys/evcnt.h>
38 #include <sys/kmem.h>
39 #include <sys/mbuf.h>
40 #include <sys/mutex.h>
41 #include <sys/pslist.h>
42 #include <sys/syslog.h>
43 #include <sys/workqueue.h>
44
45 #include <net/if.h>
46 #include <net/if_ether.h>
47 #include <net/if_media.h>
48
49 #include <net/lagg/if_lagg.h>
50 #include <net/lagg/if_laggproto.h>
51
52 struct lagg_proto_softc {
53 struct lagg_softc *psc_softc;
54 struct pslist_head psc_ports;
55 kmutex_t psc_lock;
56 pserialize_t psc_psz;
57 size_t psc_ctxsiz;
58 void *psc_ctx;
59 size_t psc_nactports;
60 struct workqueue *psc_workq;
61 struct lagg_work psc_work_linkspeed;
62 };
63
64 /*
65 * Locking notes:
66 * - Items of struct lagg_proto_softc is protected by
67 * psc_lock (an adaptive mutex)
68 * - psc_ports is protected by pselialize (psc_psz) and
69 * it updates exclusively by LAGG_PROTO_LOCK.
70 * - Other locking notes are described in if_laggproto.h
71 */
72
73 struct lagg_failover {
74 bool fo_rx_all;
75 };
76
77 struct lagg_portmap {
78 struct lagg_port *pm_ports[LAGG_MAX_PORTS];
79 size_t pm_nports;
80 };
81
82 struct lagg_portmaps {
83 struct lagg_portmap maps_pmap[2];
84 size_t maps_activepmap;
85 };
86
87 struct lagg_lb {
88 struct lagg_portmaps lb_pmaps;
89 };
90
91 struct lagg_proto_port {
92 struct pslist_entry lpp_entry;
93 struct lagg_port *lpp_laggport;
94 uint64_t lpp_linkspeed;
95 bool lpp_active;
96 bool lpp_running;
97 };
98
99 #define LAGG_PROTO_LOCK(_psc) mutex_enter(&(_psc)->psc_lock)
100 #define LAGG_PROTO_UNLOCK(_psc) mutex_exit(&(_psc)->psc_lock)
101 #define LAGG_PROTO_LOCKED(_psc) mutex_owned(&(_psc)->psc_lock)
102
103 static struct lagg_proto_softc *
104 lagg_proto_alloc(lagg_proto, struct lagg_softc *);
105 static void lagg_proto_free(struct lagg_proto_softc *);
106 static void lagg_proto_insert_port(struct lagg_proto_softc *,
107 struct lagg_proto_port *);
108 static void lagg_proto_remove_port(struct lagg_proto_softc *,
109 struct lagg_proto_port *);
110 static struct lagg_port *
111 lagg_link_active(struct lagg_proto_softc *psc,
112 struct lagg_proto_port *, struct psref *);
113 static void lagg_fail_linkspeed_work(struct lagg_work *, void *);
114 static void lagg_lb_linkspeed_work(struct lagg_work*,
115 void *);
116 static void lagg_common_linkstate(struct lagg_proto_softc *,
117 struct lagg_port *);
118
119 static inline struct lagg_portmap *
120 lagg_portmap_active(struct lagg_portmaps *maps)
121 {
122 size_t i;
123
124 i = atomic_load_consume(&maps->maps_activepmap);
125
126 return &maps->maps_pmap[i];
127 }
128
129 static inline struct lagg_portmap *
130 lagg_portmap_next(struct lagg_portmaps *maps)
131 {
132 size_t i;
133
134 i = atomic_load_consume(&maps->maps_activepmap);
135 i ^= 0x1;
136
137 return &maps->maps_pmap[i];
138 }
139
140 static inline void
141 lagg_portmap_switch(struct lagg_portmaps *maps)
142 {
143 size_t i;
144
145 i = atomic_load_consume(&maps->maps_activepmap);
146 i &= 0x1;
147 i ^= 0x1;
148
149 atomic_store_release(&maps->maps_activepmap, i);
150 }
151
152 static struct lagg_proto_softc *
153 lagg_proto_alloc(lagg_proto pr, struct lagg_softc *sc)
154 {
155 struct lagg_proto_softc *psc;
156 char xnamebuf[MAXCOMLEN];
157 size_t ctxsiz;
158
159 switch (pr) {
160 case LAGG_PROTO_FAILOVER:
161 ctxsiz = sizeof(struct lagg_failover);
162 break;
163 case LAGG_PROTO_LOADBALANCE:
164 ctxsiz = sizeof(struct lagg_lb);
165 break;
166 default:
167 ctxsiz = 0;
168 }
169
170 psc = kmem_zalloc(sizeof(*psc), KM_NOSLEEP);
171 if (psc == NULL)
172 return NULL;
173
174 psc->psc_workq = lagg_workq_create(xnamebuf,
175 PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
176 if (psc->psc_workq == NULL) {
177 LAGG_LOG(sc, LOG_ERR, "workqueue create failed\n");
178 kmem_free(psc, sizeof(*psc));
179 return NULL;
180 }
181
182 if (ctxsiz > 0) {
183 psc->psc_ctx = kmem_zalloc(ctxsiz, KM_NOSLEEP);
184 if (psc->psc_ctx == NULL) {
185 lagg_workq_destroy(psc->psc_workq);
186 kmem_free(psc, sizeof(*psc));
187 return NULL;
188 }
189
190 psc->psc_ctxsiz = ctxsiz;
191 }
192
193 PSLIST_INIT(&psc->psc_ports);
194 psc->psc_psz = pserialize_create();
195 mutex_init(&psc->psc_lock, MUTEX_DEFAULT, IPL_SOFTNET);
196 psc->psc_softc = sc;
197
198 return psc;
199 }
200
201 static void
202 lagg_proto_free(struct lagg_proto_softc *psc)
203 {
204
205 lagg_workq_wait(psc->psc_workq, &psc->psc_work_linkspeed);
206 pserialize_destroy(psc->psc_psz);
207 mutex_destroy(&psc->psc_lock);
208 lagg_workq_destroy(psc->psc_workq);
209
210 if (psc->psc_ctxsiz > 0)
211 kmem_free(psc->psc_ctx, psc->psc_ctxsiz);
212
213 kmem_free(psc, sizeof(*psc));
214 }
215
216 static struct lagg_port *
217 lagg_link_active(struct lagg_proto_softc *psc,
218 struct lagg_proto_port *pport, struct psref *psref)
219 {
220 struct lagg_port *lp;
221 int s;
222
223 lp = NULL;
224 s = pserialize_read_enter();
225
226 for (;pport != NULL;
227 pport = PSLIST_READER_NEXT(pport,
228 struct lagg_proto_port, lpp_entry)) {
229 if (atomic_load_relaxed(&pport->lpp_active)) {
230 lp = pport->lpp_laggport;
231 goto done;
232 }
233 }
234
235 PSLIST_READER_FOREACH(pport, &psc->psc_ports,
236 struct lagg_proto_port, lpp_entry) {
237 if (atomic_load_relaxed(&pport->lpp_active)) {
238 lp = pport->lpp_laggport;
239 break;
240 }
241 }
242 done:
243 if (lp != NULL)
244 lagg_port_getref(lp, psref);
245 pserialize_read_exit(s);
246
247 return lp;
248 }
249
250 int
251 lagg_common_allocport(struct lagg_proto_softc *psc, struct lagg_port *lp)
252 {
253 struct lagg_proto_port *pport;
254
255 KASSERT(LAGG_LOCKED(psc->psc_softc));
256
257 pport = kmem_zalloc(sizeof(*pport), KM_NOSLEEP);
258 if (pport == NULL)
259 return ENOMEM;
260
261 PSLIST_ENTRY_INIT(pport, lpp_entry);
262 pport->lpp_laggport = lp;
263 lp->lp_proto_ctx = (void *)pport;
264 return 0;
265 }
266
267 void
268 lagg_common_freeport(struct lagg_proto_softc *psc, struct lagg_port *lp)
269 {
270 struct lagg_proto_port *pport;
271
272 pport = lp->lp_proto_ctx;
273 KASSERT(!pport->lpp_running);
274 lp->lp_proto_ctx = NULL;
275
276 kmem_free(pport, sizeof(*pport));
277 }
278
279 static void
280 lagg_proto_insert_port(struct lagg_proto_softc *psc,
281 struct lagg_proto_port *pport)
282 {
283 struct lagg_proto_port *pport0;
284 struct lagg_port *lp, *lp0;
285 bool insert_after;
286
287 insert_after = false;
288 lp = pport->lpp_laggport;
289
290 LAGG_PROTO_LOCK(psc);
291 PSLIST_WRITER_FOREACH(pport0, &psc->psc_ports,
292 struct lagg_proto_port, lpp_entry) {
293 lp0 = pport0->lpp_laggport;
294 if (lp0->lp_prio > lp->lp_prio)
295 break;
296
297 if (PSLIST_WRITER_NEXT(pport0,
298 struct lagg_proto_port, lpp_entry) == NULL) {
299 insert_after = true;
300 break;
301 }
302 }
303
304 if (pport0 == NULL) {
305 PSLIST_WRITER_INSERT_HEAD(&psc->psc_ports, pport,
306 lpp_entry);
307 } else if (insert_after) {
308 PSLIST_WRITER_INSERT_AFTER(pport0, pport, lpp_entry);
309 } else {
310 PSLIST_WRITER_INSERT_BEFORE(pport0, pport, lpp_entry);
311 }
312 LAGG_PROTO_UNLOCK(psc);
313 }
314
315 static void
316 lagg_proto_remove_port(struct lagg_proto_softc *psc,
317 struct lagg_proto_port *pport)
318 {
319
320 LAGG_PROTO_LOCK(psc);
321 PSLIST_WRITER_REMOVE(pport, lpp_entry);
322 LAGG_PROTO_UNLOCK(psc);
323 pserialize_perform(psc->psc_psz);
324 }
325
326 void
327 lagg_common_startport(struct lagg_proto_softc *psc, struct lagg_port *lp)
328 {
329 struct lagg_proto_port *pport;
330
331 pport = lp->lp_proto_ctx;
332 lagg_proto_insert_port(psc, pport);
333
334 LAGG_PROTO_LOCK(psc);
335 pport->lpp_running = true;
336 LAGG_PROTO_UNLOCK(psc);
337
338 lagg_common_linkstate(psc, lp);
339 }
340
341 void
342 lagg_common_stopport(struct lagg_proto_softc *psc, struct lagg_port *lp)
343 {
344 struct lagg_proto_port *pport;
345 struct ifnet *ifp;
346
347 pport = lp->lp_proto_ctx;
348
349 LAGG_PROTO_LOCK(psc);
350 pport->lpp_running = false;
351 LAGG_PROTO_UNLOCK(psc);
352
353 lagg_proto_remove_port(psc, pport);
354
355 if (pport->lpp_active) {
356 KASSERT(psc->psc_nactports > 0);
357 psc->psc_nactports--;
358
359 if (psc->psc_nactports == 0) {
360 ifp = &psc->psc_softc->sc_if;
361 if_link_state_change(ifp, LINK_STATE_DOWN);
362 }
363
364 pport->lpp_active = false;
365 }
366
367 lagg_workq_add(psc->psc_workq, &psc->psc_work_linkspeed);
368 }
369 static void
370 lagg_common_linkstate(struct lagg_proto_softc *psc, struct lagg_port *lp)
371 {
372
373 IFNET_ASSERT_UNLOCKED(lp->lp_ifp);
374
375 IFNET_LOCK(lp->lp_ifp);
376 lagg_common_linkstate_ifnet_locked(psc, lp);
377 IFNET_UNLOCK(lp->lp_ifp);
378 }
379
380 void
381 lagg_common_linkstate_ifnet_locked(struct lagg_proto_softc *psc, struct lagg_port *lp)
382 {
383 struct lagg_proto_port *pport;
384 struct ifnet *ifp, *ifp_port;
385 struct ifmediareq ifmr;
386 uint64_t linkspeed;
387 bool is_active;
388 int error;
389
390 pport = lp->lp_proto_ctx;
391 is_active = lagg_portactive(lp);
392 ifp_port = lp->lp_ifp;
393
394 KASSERT(IFNET_LOCKED(ifp_port));
395
396 LAGG_PROTO_LOCK(psc);
397 if (!pport->lpp_running ||
398 pport->lpp_active == is_active) {
399 LAGG_PROTO_UNLOCK(psc);
400 return;
401 }
402
403 ifp = &psc->psc_softc->sc_if;
404 pport->lpp_active = is_active;
405
406 if (is_active) {
407 psc->psc_nactports++;
408 if (psc->psc_nactports == 1)
409 if_link_state_change(ifp, LINK_STATE_UP);
410 } else {
411 KASSERT(psc->psc_nactports > 0);
412 psc->psc_nactports--;
413
414 if (psc->psc_nactports == 0)
415 if_link_state_change(ifp, LINK_STATE_DOWN);
416 }
417 LAGG_PROTO_UNLOCK(psc);
418
419 memset(&ifmr, 0, sizeof(ifmr));
420 error = if_ioctl(ifp_port, SIOCGIFMEDIA, (void *)&ifmr);
421 if (error == 0) {
422 linkspeed = ifmedia_baudrate(ifmr.ifm_active);
423 } else {
424 linkspeed = 0;
425 }
426
427 LAGG_PROTO_LOCK(psc);
428 pport->lpp_linkspeed = linkspeed;
429 LAGG_PROTO_UNLOCK(psc);
430 lagg_workq_add(psc->psc_workq, &psc->psc_work_linkspeed);
431 }
432
433 void
434 lagg_common_detach(struct lagg_proto_softc *psc)
435 {
436
437 lagg_proto_free(psc);
438 }
439
440 int
441 lagg_none_attach(struct lagg_softc *sc, struct lagg_proto_softc **pscp)
442 {
443
444 *pscp = NULL;
445 return 0;
446 }
447
448 int
449 lagg_fail_attach(struct lagg_softc *sc, struct lagg_proto_softc **xpsc)
450 {
451 struct lagg_proto_softc *psc;
452 struct lagg_failover *fovr;
453
454 psc = lagg_proto_alloc(LAGG_PROTO_FAILOVER, sc);
455 if (psc == NULL)
456 return ENOMEM;
457
458 fovr = psc->psc_ctx;
459 fovr->fo_rx_all = true;
460 lagg_work_set(&psc->psc_work_linkspeed,
461 lagg_fail_linkspeed_work, psc);
462
463 *xpsc = psc;
464 return 0;
465 }
466
467 int
468 lagg_fail_transmit(struct lagg_proto_softc *psc, struct mbuf *m)
469 {
470 struct ifnet *ifp;
471 struct lagg_port *lp;
472 struct psref psref;
473
474 lp = lagg_link_active(psc, NULL, &psref);
475 if (lp == NULL) {
476 ifp = &psc->psc_softc->sc_if;
477 if_statinc(ifp, if_oerrors);
478 m_freem(m);
479 return ENOENT;
480 }
481
482 lagg_output(psc->psc_softc, lp, m);
483 lagg_port_putref(lp, &psref);
484 return 0;
485 }
486
487 struct mbuf *
488 lagg_fail_input(struct lagg_proto_softc *psc, struct lagg_port *lp,
489 struct mbuf *m)
490 {
491 struct lagg_failover *fovr;
492 struct lagg_port *lp0;
493 struct ifnet *ifp;
494 struct psref psref;
495
496 fovr = psc->psc_ctx;
497 if (atomic_load_relaxed(&fovr->fo_rx_all))
498 return m;
499
500 lp0 = lagg_link_active(psc, NULL, &psref);
501 if (lp0 == NULL) {
502 goto drop;
503 }
504
505 if (lp0 != lp) {
506 lagg_port_putref(lp0, &psref);
507 goto drop;
508 }
509
510 lagg_port_putref(lp0, &psref);
511
512 return m;
513 drop:
514 ifp = &psc->psc_softc->sc_if;
515 if_statinc(ifp, if_ierrors);
516 m_freem(m);
517 return NULL;
518 }
519
520 void
521 lagg_fail_portstat(struct lagg_proto_softc *psc, struct lagg_port *lp,
522 struct laggreqport *resp)
523 {
524 struct lagg_failover *fovr;
525 struct lagg_proto_port *pport;
526 struct lagg_port *lp0;
527 struct psref psref;
528
529 fovr = psc->psc_ctx;
530 pport = lp->lp_proto_ctx;
531
532 if (pport->lpp_active) {
533 lp0 = lagg_link_active(psc, NULL, &psref);
534 if (lp0 == lp) {
535 SET(resp->rp_flags,
536 (LAGG_PORT_ACTIVE |
537 LAGG_PORT_COLLECTING |
538 LAGG_PORT_DISTRIBUTING));
539 } else {
540 if (fovr->fo_rx_all) {
541 SET(resp->rp_flags,
542 LAGG_PORT_COLLECTING);
543 }
544 }
545
546 if (lp0 != NULL)
547 lagg_port_putref(lp0, &psref);
548 }
549 }
550
551 int
552 lagg_fail_ioctl(struct lagg_proto_softc *psc, struct laggreqproto *lreq)
553 {
554 struct lagg_failover *fovr;
555 struct laggreq_fail *rpfail;
556 int error;
557 bool set;
558
559 error = 0;
560 fovr = psc->psc_ctx;
561 rpfail = &lreq->rp_fail;
562
563 switch (rpfail->command) {
564 case LAGGIOC_FAILSETFLAGS:
565 case LAGGIOC_FAILCLRFLAGS:
566 set = (rpfail->command == LAGGIOC_FAILSETFLAGS) ?
567 true : false;
568
569 if (ISSET(rpfail->flags, LAGGREQFAIL_RXALL))
570 fovr->fo_rx_all = set;
571 break;
572 default:
573 error = ENOTTY;
574 break;
575 }
576
577 return error;
578 }
579
580 void
581 lagg_fail_linkspeed_work(struct lagg_work *_lw __unused, void *xpsc)
582 {
583 struct lagg_proto_softc *psc = xpsc;
584 struct lagg_proto_port *pport;
585 struct lagg_port *lp;
586 struct psref psref;
587 uint64_t linkspeed;
588
589 kpreempt_disable();
590 lp = lagg_link_active(psc, NULL, &psref);
591 if (lp != NULL) {
592 pport = lp->lp_proto_ctx;
593 LAGG_PROTO_LOCK(psc);
594 linkspeed = pport->lpp_linkspeed;
595 LAGG_PROTO_UNLOCK(psc);
596 lagg_port_putref(lp, &psref);
597 } else {
598 linkspeed = 0;
599 }
600 kpreempt_enable();
601
602 LAGG_LOCK(psc->psc_softc);
603 lagg_set_linkspeed(psc->psc_softc, linkspeed);
604 LAGG_UNLOCK(psc->psc_softc);
605 }
606
607 int
608 lagg_lb_attach(struct lagg_softc *sc, struct lagg_proto_softc **xpsc)
609 {
610 struct lagg_proto_softc *psc;
611 struct lagg_lb *lb;
612
613 psc = lagg_proto_alloc(LAGG_PROTO_LOADBALANCE, sc);
614 if (psc == NULL)
615 return ENOMEM;
616
617 lb = psc->psc_ctx;
618 lb->lb_pmaps.maps_activepmap = 0;
619 lagg_work_set(&psc->psc_work_linkspeed,
620 lagg_lb_linkspeed_work, psc);
621
622 *xpsc = psc;
623 return 0;
624 }
625
626 void
627 lagg_lb_startport(struct lagg_proto_softc *psc, struct lagg_port *lp)
628 {
629 struct lagg_lb *lb;
630 struct lagg_portmap *pm_act, *pm_next;
631 size_t n;
632
633 lb = psc->psc_ctx;
634 lagg_common_startport(psc, lp);
635
636 LAGG_PROTO_LOCK(psc);
637 pm_act = lagg_portmap_active(&lb->lb_pmaps);
638 pm_next = lagg_portmap_next(&lb->lb_pmaps);
639
640 *pm_next = *pm_act;
641
642 n = pm_next->pm_nports;
643 pm_next->pm_ports[n] = lp;
644
645 n++;
646 pm_next->pm_nports = n;
647
648 lagg_portmap_switch(&lb->lb_pmaps);
649 LAGG_PROTO_UNLOCK(psc);
650 pserialize_perform(psc->psc_psz);
651 }
652
653 void
654 lagg_lb_stopport(struct lagg_proto_softc *psc, struct lagg_port *lp)
655 {
656 struct lagg_lb *lb;
657 struct lagg_portmap *pm_act, *pm_next;
658 size_t i, n;
659
660 lb = psc->psc_ctx;
661
662 LAGG_PROTO_LOCK(psc);
663 pm_act = lagg_portmap_active(&lb->lb_pmaps);
664 pm_next = lagg_portmap_next(&lb->lb_pmaps);
665 n = 0;
666
667 for (i = 0; i < pm_act->pm_nports; i++) {
668 if (pm_act->pm_ports[i] == lp)
669 continue;
670
671 pm_next->pm_ports[n] = pm_act->pm_ports[i];
672 n++;
673 }
674
675 pm_next->pm_nports = n;
676
677 lagg_portmap_switch(&lb->lb_pmaps);
678 LAGG_PROTO_UNLOCK(psc);
679 pserialize_perform(psc->psc_psz);
680
681 lagg_common_stopport(psc, lp);
682 }
683
684 int
685 lagg_lb_transmit(struct lagg_proto_softc *psc, struct mbuf *m)
686 {
687 struct lagg_lb *lb;
688 struct lagg_portmap *pm;
689 struct lagg_port *lp, *lp0;
690 struct ifnet *ifp;
691 struct psref psref;
692 uint32_t hash;
693 int s;
694
695 lb = psc->psc_ctx;
696 hash = lagg_hashmbuf(psc->psc_softc, m);
697
698 s = pserialize_read_enter();
699
700 pm = lagg_portmap_active(&lb->lb_pmaps);
701 if (__predict_true(pm->pm_nports != 0)) {
702 hash %= pm->pm_nports;
703 lp0 = pm->pm_ports[hash];
704 lp = lagg_link_active(psc, lp0->lp_proto_ctx, &psref);
705 } else {
706 lp = NULL;
707 }
708
709 pserialize_read_exit(s);
710
711 if (__predict_false(lp == NULL)) {
712 ifp = &psc->psc_softc->sc_if;
713 if_statinc(ifp, if_oerrors);
714 m_freem(m);
715 return ENOENT;
716 }
717
718 lagg_output(psc->psc_softc, lp, m);
719 lagg_port_putref(lp, &psref);
720
721 return 0;
722 }
723
724 struct mbuf *
725 lagg_lb_input(struct lagg_proto_softc *psc __unused,
726 struct lagg_port *lp __unused, struct mbuf *m)
727 {
728
729 return m;
730 }
731
732 void
733 lagg_lb_portstat(struct lagg_proto_softc *psc, struct lagg_port *lp,
734 struct laggreqport *resp)
735 {
736 struct lagg_proto_port *pport;
737
738 pport = lp->lp_proto_ctx;
739
740 if (pport->lpp_active) {
741 SET(resp->rp_flags, LAGG_PORT_ACTIVE |
742 LAGG_PORT_COLLECTING | LAGG_PORT_DISTRIBUTING);
743 }
744 }
745
746 static void
747 lagg_lb_linkspeed_work(struct lagg_work *_lw __unused, void *xpsc)
748 {
749 struct lagg_proto_softc *psc = xpsc;
750 struct lagg_proto_port *pport;
751 uint64_t linkspeed, l;
752
753 linkspeed = 0;
754
755 LAGG_PROTO_LOCK(psc); /* acquired to refer lpp_linkspeed */
756 PSLIST_READER_FOREACH(pport, &psc->psc_ports,
757 struct lagg_proto_port, lpp_entry) {
758 if (pport->lpp_active) {
759 l = pport->lpp_linkspeed;
760 linkspeed = MAX(linkspeed, l);
761 }
762 }
763 LAGG_PROTO_UNLOCK(psc);
764
765 LAGG_LOCK(psc->psc_softc);
766 lagg_set_linkspeed(psc->psc_softc, linkspeed);
767 LAGG_UNLOCK(psc->psc_softc);
768 }
769