if_laggproto.c revision 1.8 1 /* $NetBSD: if_laggproto.c,v 1.8 2023/11/28 05:28:37 yamaguchi Exp $ */
2
3 /*-
4 * SPDX-License-Identifier: BSD-2-Clause-NetBSD
5 *
6 * Copyright (c)2021 Internet Initiative Japan, Inc.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: if_laggproto.c,v 1.8 2023/11/28 05:28:37 yamaguchi Exp $");
33
34 #include <sys/param.h>
35 #include <sys/types.h>
36
37 #include <sys/evcnt.h>
38 #include <sys/kmem.h>
39 #include <sys/mbuf.h>
40 #include <sys/mutex.h>
41 #include <sys/pslist.h>
42 #include <sys/syslog.h>
43 #include <sys/workqueue.h>
44
45 #include <net/if.h>
46 #include <net/if_ether.h>
47 #include <net/if_media.h>
48
49 #include <net/lagg/if_lagg.h>
50 #include <net/lagg/if_laggproto.h>
51
52 struct lagg_proto_softc {
53 struct lagg_softc *psc_softc;
54 struct pslist_head psc_ports;
55 kmutex_t psc_lock;
56 pserialize_t psc_psz;
57 size_t psc_ctxsiz;
58 void *psc_ctx;
59 size_t psc_nactports;
60 struct workqueue *psc_workq;
61 struct lagg_work psc_work_linkspeed;
62 };
63
64 /*
65 * Locking notes:
66 * - Items of struct lagg_proto_softc is protected by
67 * psc_lock (an adaptive mutex)
68 * - psc_ports is protected by pserialize (psc_psz)
69 * - Updates of psc_ports is serialized by sc_lock in
70 * struct lagg_softc
71 * - Other locking notes are described in if_laggproto.h
72 */
73
74 struct lagg_failover {
75 bool fo_rx_all;
76 };
77
78 struct lagg_portmap {
79 struct lagg_port *pm_ports[LAGG_MAX_PORTS];
80 size_t pm_nports;
81 };
82
83 struct lagg_portmaps {
84 struct lagg_portmap maps_pmap[2];
85 size_t maps_activepmap;
86 };
87
88 struct lagg_lb {
89 struct lagg_portmaps lb_pmaps;
90 };
91
92 struct lagg_proto_port {
93 struct pslist_entry lpp_entry;
94 struct lagg_port *lpp_laggport;
95 uint64_t lpp_linkspeed;
96 bool lpp_active;
97 bool lpp_running;
98 };
99
100 #define LAGG_PROTO_LOCK(_psc) mutex_enter(&(_psc)->psc_lock)
101 #define LAGG_PROTO_UNLOCK(_psc) mutex_exit(&(_psc)->psc_lock)
102 #define LAGG_PROTO_LOCKED(_psc) mutex_owned(&(_psc)->psc_lock)
103
104 static struct lagg_proto_softc *
105 lagg_proto_alloc(lagg_proto, struct lagg_softc *);
106 static void lagg_proto_free(struct lagg_proto_softc *);
107 static void lagg_proto_insert_port(struct lagg_proto_softc *,
108 struct lagg_proto_port *);
109 static void lagg_proto_remove_port(struct lagg_proto_softc *,
110 struct lagg_proto_port *);
111 static struct lagg_port *
112 lagg_link_active(struct lagg_proto_softc *psc,
113 struct lagg_proto_port *, struct psref *);
114 static void lagg_fail_linkspeed_work(struct lagg_work *, void *);
115 static void lagg_lb_linkspeed_work(struct lagg_work*,
116 void *);
117 static void lagg_common_linkstate(struct lagg_proto_softc *,
118 struct lagg_port *);
119
120 static inline struct lagg_portmap *
121 lagg_portmap_active(struct lagg_portmaps *maps)
122 {
123 size_t i;
124
125 i = atomic_load_consume(&maps->maps_activepmap);
126
127 return &maps->maps_pmap[i];
128 }
129
130 static inline struct lagg_portmap *
131 lagg_portmap_next(struct lagg_portmaps *maps)
132 {
133 size_t i;
134
135 i = atomic_load_consume(&maps->maps_activepmap);
136 i &= 0x1;
137 i ^= 0x1;
138
139 return &maps->maps_pmap[i];
140 }
141
142 static inline void
143 lagg_portmap_switch(struct lagg_portmaps *maps)
144 {
145 size_t i;
146
147 i = atomic_load_consume(&maps->maps_activepmap);
148 i &= 0x1;
149 i ^= 0x1;
150
151 atomic_store_release(&maps->maps_activepmap, i);
152 }
153
154 static struct lagg_proto_softc *
155 lagg_proto_alloc(lagg_proto pr, struct lagg_softc *sc)
156 {
157 struct lagg_proto_softc *psc;
158 char xnamebuf[MAXCOMLEN];
159 size_t ctxsiz;
160
161 switch (pr) {
162 case LAGG_PROTO_FAILOVER:
163 ctxsiz = sizeof(struct lagg_failover);
164 break;
165 case LAGG_PROTO_LOADBALANCE:
166 ctxsiz = sizeof(struct lagg_lb);
167 break;
168 default:
169 ctxsiz = 0;
170 }
171
172 psc = kmem_zalloc(sizeof(*psc), KM_NOSLEEP);
173 if (psc == NULL)
174 return NULL;
175
176 psc->psc_workq = lagg_workq_create(xnamebuf,
177 PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
178 if (psc->psc_workq == NULL) {
179 LAGG_LOG(sc, LOG_ERR, "workqueue create failed\n");
180 kmem_free(psc, sizeof(*psc));
181 return NULL;
182 }
183
184 if (ctxsiz > 0) {
185 psc->psc_ctx = kmem_zalloc(ctxsiz, KM_NOSLEEP);
186 if (psc->psc_ctx == NULL) {
187 lagg_workq_destroy(psc->psc_workq);
188 kmem_free(psc, sizeof(*psc));
189 return NULL;
190 }
191
192 psc->psc_ctxsiz = ctxsiz;
193 }
194
195 PSLIST_INIT(&psc->psc_ports);
196 psc->psc_psz = pserialize_create();
197 mutex_init(&psc->psc_lock, MUTEX_DEFAULT, IPL_SOFTNET);
198 psc->psc_softc = sc;
199
200 return psc;
201 }
202
203 static void
204 lagg_proto_free(struct lagg_proto_softc *psc)
205 {
206
207 lagg_workq_wait(psc->psc_workq, &psc->psc_work_linkspeed);
208 pserialize_destroy(psc->psc_psz);
209 mutex_destroy(&psc->psc_lock);
210 lagg_workq_destroy(psc->psc_workq);
211
212 if (psc->psc_ctxsiz > 0)
213 kmem_free(psc->psc_ctx, psc->psc_ctxsiz);
214
215 kmem_free(psc, sizeof(*psc));
216 }
217
218 static struct lagg_port *
219 lagg_link_active(struct lagg_proto_softc *psc,
220 struct lagg_proto_port *pport, struct psref *psref)
221 {
222 struct lagg_port *lp;
223 int s;
224
225 lp = NULL;
226 s = pserialize_read_enter();
227
228 for (;pport != NULL;
229 pport = PSLIST_READER_NEXT(pport,
230 struct lagg_proto_port, lpp_entry)) {
231 if (atomic_load_relaxed(&pport->lpp_active)) {
232 lp = pport->lpp_laggport;
233 goto done;
234 }
235 }
236
237 PSLIST_READER_FOREACH(pport, &psc->psc_ports,
238 struct lagg_proto_port, lpp_entry) {
239 if (atomic_load_relaxed(&pport->lpp_active)) {
240 lp = pport->lpp_laggport;
241 break;
242 }
243 }
244 done:
245 if (lp != NULL)
246 lagg_port_getref(lp, psref);
247 pserialize_read_exit(s);
248
249 return lp;
250 }
251
252 int
253 lagg_common_allocport(struct lagg_proto_softc *psc, struct lagg_port *lp)
254 {
255 struct lagg_proto_port *pport;
256
257 KASSERT(LAGG_LOCKED(psc->psc_softc));
258
259 pport = kmem_zalloc(sizeof(*pport), KM_NOSLEEP);
260 if (pport == NULL)
261 return ENOMEM;
262
263 PSLIST_ENTRY_INIT(pport, lpp_entry);
264 pport->lpp_laggport = lp;
265 lp->lp_proto_ctx = (void *)pport;
266 return 0;
267 }
268
269 void
270 lagg_common_freeport(struct lagg_proto_softc *psc, struct lagg_port *lp)
271 {
272 struct lagg_proto_port *pport;
273
274 pport = lp->lp_proto_ctx;
275 KASSERT(!pport->lpp_running);
276 lp->lp_proto_ctx = NULL;
277
278 kmem_free(pport, sizeof(*pport));
279 }
280
281 static void
282 lagg_proto_insert_port(struct lagg_proto_softc *psc,
283 struct lagg_proto_port *pport)
284 {
285 struct lagg_proto_port *pport0;
286 struct lagg_port *lp, *lp0;
287 bool insert_after;
288
289 insert_after = false;
290 lp = pport->lpp_laggport;
291
292 LAGG_PROTO_LOCK(psc);
293 PSLIST_WRITER_FOREACH(pport0, &psc->psc_ports,
294 struct lagg_proto_port, lpp_entry) {
295 lp0 = pport0->lpp_laggport;
296 if (lp0->lp_prio > lp->lp_prio)
297 break;
298
299 if (PSLIST_WRITER_NEXT(pport0,
300 struct lagg_proto_port, lpp_entry) == NULL) {
301 insert_after = true;
302 break;
303 }
304 }
305
306 if (pport0 == NULL) {
307 PSLIST_WRITER_INSERT_HEAD(&psc->psc_ports, pport,
308 lpp_entry);
309 } else if (insert_after) {
310 PSLIST_WRITER_INSERT_AFTER(pport0, pport, lpp_entry);
311 } else {
312 PSLIST_WRITER_INSERT_BEFORE(pport0, pport, lpp_entry);
313 }
314 LAGG_PROTO_UNLOCK(psc);
315 }
316
317 static void
318 lagg_proto_remove_port(struct lagg_proto_softc *psc,
319 struct lagg_proto_port *pport)
320 {
321
322 LAGG_PROTO_LOCK(psc);
323 PSLIST_WRITER_REMOVE(pport, lpp_entry);
324 pserialize_perform(psc->psc_psz);
325 LAGG_PROTO_UNLOCK(psc);
326 }
327
328 void
329 lagg_common_startport(struct lagg_proto_softc *psc, struct lagg_port *lp)
330 {
331 struct lagg_proto_port *pport;
332
333 pport = lp->lp_proto_ctx;
334 lagg_proto_insert_port(psc, pport);
335
336 LAGG_PROTO_LOCK(psc);
337 pport->lpp_running = true;
338 LAGG_PROTO_UNLOCK(psc);
339
340 lagg_common_linkstate(psc, lp);
341 }
342
343 void
344 lagg_common_stopport(struct lagg_proto_softc *psc, struct lagg_port *lp)
345 {
346 struct lagg_proto_port *pport;
347 struct ifnet *ifp;
348
349 pport = lp->lp_proto_ctx;
350
351 LAGG_PROTO_LOCK(psc);
352 pport->lpp_running = false;
353 LAGG_PROTO_UNLOCK(psc);
354
355 lagg_proto_remove_port(psc, pport);
356
357 if (pport->lpp_active) {
358 KASSERT(psc->psc_nactports > 0);
359 psc->psc_nactports--;
360
361 if (psc->psc_nactports == 0) {
362 ifp = &psc->psc_softc->sc_if;
363 if_link_state_change(ifp, LINK_STATE_DOWN);
364 }
365
366 pport->lpp_active = false;
367 }
368 }
369 static void
370 lagg_common_linkstate(struct lagg_proto_softc *psc, struct lagg_port *lp)
371 {
372
373 IFNET_ASSERT_UNLOCKED(lp->lp_ifp);
374
375 IFNET_LOCK(lp->lp_ifp);
376 lagg_common_linkstate_ifnet_locked(psc, lp);
377 IFNET_UNLOCK(lp->lp_ifp);
378 }
379
380 void
381 lagg_common_linkstate_ifnet_locked(struct lagg_proto_softc *psc, struct lagg_port *lp)
382 {
383 struct lagg_proto_port *pport;
384 struct ifnet *ifp, *ifp_port;
385 struct ifmediareq ifmr;
386 uint64_t linkspeed;
387 bool is_active;
388 int error;
389
390 pport = lp->lp_proto_ctx;
391 is_active = lagg_portactive(lp);
392 ifp_port = lp->lp_ifp;
393
394 KASSERT(IFNET_LOCKED(ifp_port));
395
396 LAGG_PROTO_LOCK(psc);
397 if (!pport->lpp_running ||
398 pport->lpp_active == is_active) {
399 LAGG_PROTO_UNLOCK(psc);
400 return;
401 }
402
403 ifp = &psc->psc_softc->sc_if;
404 pport->lpp_active = is_active;
405
406 if (is_active) {
407 psc->psc_nactports++;
408 if (psc->psc_nactports == 1)
409 if_link_state_change(ifp, LINK_STATE_UP);
410 } else {
411 KASSERT(psc->psc_nactports > 0);
412 psc->psc_nactports--;
413
414 if (psc->psc_nactports == 0)
415 if_link_state_change(ifp, LINK_STATE_DOWN);
416 }
417 LAGG_PROTO_UNLOCK(psc);
418
419 memset(&ifmr, 0, sizeof(ifmr));
420 error = if_ioctl(ifp_port, SIOCGIFMEDIA, (void *)&ifmr);
421 if (error == 0) {
422 linkspeed = ifmedia_baudrate(ifmr.ifm_active);
423 } else {
424 linkspeed = 0;
425 }
426
427 LAGG_PROTO_LOCK(psc);
428 pport->lpp_linkspeed = linkspeed;
429 LAGG_PROTO_UNLOCK(psc);
430 lagg_workq_add(psc->psc_workq, &psc->psc_work_linkspeed);
431 }
432
433 void
434 lagg_common_detach(struct lagg_proto_softc *psc)
435 {
436
437 lagg_proto_free(psc);
438 }
439
440 int
441 lagg_none_attach(struct lagg_softc *sc, struct lagg_proto_softc **pscp)
442 {
443
444 *pscp = NULL;
445 return 0;
446 }
447
448 int
449 lagg_fail_attach(struct lagg_softc *sc, struct lagg_proto_softc **xpsc)
450 {
451 struct lagg_proto_softc *psc;
452 struct lagg_failover *fovr;
453
454 psc = lagg_proto_alloc(LAGG_PROTO_FAILOVER, sc);
455 if (psc == NULL)
456 return ENOMEM;
457
458 fovr = psc->psc_ctx;
459 fovr->fo_rx_all = true;
460 lagg_work_set(&psc->psc_work_linkspeed,
461 lagg_fail_linkspeed_work, psc);
462
463 *xpsc = psc;
464 return 0;
465 }
466
467 int
468 lagg_fail_transmit(struct lagg_proto_softc *psc, struct mbuf *m)
469 {
470 struct ifnet *ifp;
471 struct lagg_port *lp;
472 struct psref psref;
473
474 lp = lagg_link_active(psc, NULL, &psref);
475 if (lp == NULL) {
476 ifp = &psc->psc_softc->sc_if;
477 if_statinc(ifp, if_oerrors);
478 m_freem(m);
479 return ENOENT;
480 }
481
482 lagg_output(psc->psc_softc, lp, m);
483 lagg_port_putref(lp, &psref);
484 return 0;
485 }
486
487 struct mbuf *
488 lagg_fail_input(struct lagg_proto_softc *psc, struct lagg_port *lp,
489 struct mbuf *m)
490 {
491 struct lagg_failover *fovr;
492 struct lagg_port *lp0;
493 struct ifnet *ifp;
494 struct psref psref;
495
496 fovr = psc->psc_ctx;
497 if (atomic_load_relaxed(&fovr->fo_rx_all))
498 return m;
499
500 lp0 = lagg_link_active(psc, NULL, &psref);
501 if (lp0 == NULL) {
502 goto drop;
503 }
504
505 if (lp0 != lp) {
506 lagg_port_putref(lp0, &psref);
507 goto drop;
508 }
509
510 lagg_port_putref(lp0, &psref);
511
512 return m;
513 drop:
514 ifp = &psc->psc_softc->sc_if;
515 if_statinc(ifp, if_ierrors);
516 m_freem(m);
517 return NULL;
518 }
519
520 void
521 lagg_fail_portstat(struct lagg_proto_softc *psc, struct lagg_port *lp,
522 struct laggreqport *resp)
523 {
524 struct lagg_failover *fovr;
525 struct lagg_proto_port *pport;
526 struct lagg_port *lp0;
527 struct psref psref;
528
529 fovr = psc->psc_ctx;
530 pport = lp->lp_proto_ctx;
531
532 if (pport->lpp_active) {
533 lp0 = lagg_link_active(psc, NULL, &psref);
534 if (lp0 == lp) {
535 SET(resp->rp_flags,
536 (LAGG_PORT_ACTIVE |
537 LAGG_PORT_COLLECTING |
538 LAGG_PORT_DISTRIBUTING));
539 } else {
540 if (fovr->fo_rx_all) {
541 SET(resp->rp_flags,
542 LAGG_PORT_COLLECTING);
543 }
544 }
545
546 if (lp0 != NULL)
547 lagg_port_putref(lp0, &psref);
548 }
549 }
550
551 int
552 lagg_fail_ioctl(struct lagg_proto_softc *psc, struct laggreqproto *lreq)
553 {
554 struct lagg_failover *fovr;
555 struct laggreq_fail *rpfail;
556 int error;
557 bool set;
558
559 error = 0;
560 fovr = psc->psc_ctx;
561 rpfail = &lreq->rp_fail;
562
563 switch (rpfail->command) {
564 case LAGGIOC_FAILSETFLAGS:
565 case LAGGIOC_FAILCLRFLAGS:
566 set = (rpfail->command == LAGGIOC_FAILSETFLAGS) ?
567 true : false;
568
569 if (ISSET(rpfail->flags, LAGGREQFAIL_RXALL))
570 fovr->fo_rx_all = set;
571 break;
572 default:
573 error = ENOTTY;
574 break;
575 }
576
577 return error;
578 }
579
580 void
581 lagg_fail_linkspeed_work(struct lagg_work *_lw __unused, void *xpsc)
582 {
583 struct lagg_proto_softc *psc = xpsc;
584 struct lagg_proto_port *pport;
585 struct lagg_port *lp;
586 struct psref psref;
587 uint64_t linkspeed;
588
589 kpreempt_disable();
590 lp = lagg_link_active(psc, NULL, &psref);
591 if (lp != NULL) {
592 pport = lp->lp_proto_ctx;
593 LAGG_PROTO_LOCK(psc);
594 linkspeed = pport->lpp_linkspeed;
595 LAGG_PROTO_UNLOCK(psc);
596 lagg_port_putref(lp, &psref);
597 } else {
598 linkspeed = 0;
599 }
600 kpreempt_enable();
601
602 LAGG_LOCK(psc->psc_softc);
603 lagg_set_linkspeed(psc->psc_softc, linkspeed);
604 LAGG_UNLOCK(psc->psc_softc);
605 }
606
607 int
608 lagg_lb_attach(struct lagg_softc *sc, struct lagg_proto_softc **xpsc)
609 {
610 struct lagg_proto_softc *psc;
611 struct lagg_lb *lb;
612
613 psc = lagg_proto_alloc(LAGG_PROTO_LOADBALANCE, sc);
614 if (psc == NULL)
615 return ENOMEM;
616
617 lb = psc->psc_ctx;
618 lb->lb_pmaps.maps_activepmap = 0;
619 lagg_work_set(&psc->psc_work_linkspeed,
620 lagg_lb_linkspeed_work, psc);
621
622 *xpsc = psc;
623 return 0;
624 }
625
626 void
627 lagg_lb_startport(struct lagg_proto_softc *psc, struct lagg_port *lp)
628 {
629 struct lagg_lb *lb;
630 struct lagg_portmap *pm_act, *pm_next;
631 size_t n;
632
633 lb = psc->psc_ctx;
634 lagg_common_startport(psc, lp);
635
636 LAGG_PROTO_LOCK(psc);
637 pm_act = lagg_portmap_active(&lb->lb_pmaps);
638 pm_next = lagg_portmap_next(&lb->lb_pmaps);
639
640 *pm_next = *pm_act;
641
642 n = pm_next->pm_nports;
643 pm_next->pm_ports[n] = lp;
644
645 n++;
646 pm_next->pm_nports = n;
647
648 lagg_portmap_switch(&lb->lb_pmaps);
649 pserialize_perform(psc->psc_psz);
650 LAGG_PROTO_UNLOCK(psc);
651 }
652
653 void
654 lagg_lb_stopport(struct lagg_proto_softc *psc, struct lagg_port *lp)
655 {
656 struct lagg_lb *lb;
657 struct lagg_portmap *pm_act, *pm_next;
658 size_t i, n;
659
660 lb = psc->psc_ctx;
661
662 LAGG_PROTO_LOCK(psc);
663 pm_act = lagg_portmap_active(&lb->lb_pmaps);
664 pm_next = lagg_portmap_next(&lb->lb_pmaps);
665 n = 0;
666
667 for (i = 0; i < pm_act->pm_nports; i++) {
668 if (pm_act->pm_ports[i] == lp)
669 continue;
670
671 pm_next->pm_ports[n] = pm_act->pm_ports[i];
672 n++;
673 }
674
675 lagg_portmap_switch(&lb->lb_pmaps);
676 pserialize_perform(psc->psc_psz);
677 LAGG_PROTO_UNLOCK(psc);
678
679 lagg_common_stopport(psc, lp);
680 }
681
682 int
683 lagg_lb_transmit(struct lagg_proto_softc *psc, struct mbuf *m)
684 {
685 struct lagg_lb *lb;
686 struct lagg_portmap *pm;
687 struct lagg_port *lp, *lp0;
688 struct ifnet *ifp;
689 struct psref psref;
690 uint32_t hash;
691 int s;
692
693 lb = psc->psc_ctx;
694 hash = lagg_hashmbuf(psc->psc_softc, m);
695
696 s = pserialize_read_enter();
697
698 pm = lagg_portmap_active(&lb->lb_pmaps);
699 hash %= pm->pm_nports;
700 lp0 = pm->pm_ports[hash];
701 lp = lagg_link_active(psc, lp0->lp_proto_ctx, &psref);
702
703 pserialize_read_exit(s);
704
705 if (__predict_false(lp == NULL)) {
706 ifp = &psc->psc_softc->sc_if;
707 if_statinc(ifp, if_oerrors);
708 m_freem(m);
709 return ENOENT;
710 }
711
712 lagg_output(psc->psc_softc, lp, m);
713 lagg_port_putref(lp, &psref);
714
715 return 0;
716 }
717
718 struct mbuf *
719 lagg_lb_input(struct lagg_proto_softc *psc __unused,
720 struct lagg_port *lp __unused, struct mbuf *m)
721 {
722
723 return m;
724 }
725
726 void
727 lagg_lb_portstat(struct lagg_proto_softc *psc, struct lagg_port *lp,
728 struct laggreqport *resp)
729 {
730 struct lagg_proto_port *pport;
731
732 pport = lp->lp_proto_ctx;
733
734 if (pport->lpp_active) {
735 SET(resp->rp_flags, LAGG_PORT_ACTIVE |
736 LAGG_PORT_COLLECTING | LAGG_PORT_DISTRIBUTING);
737 }
738 }
739
740 static void
741 lagg_lb_linkspeed_work(struct lagg_work *_lw __unused, void *xpsc)
742 {
743 struct lagg_proto_softc *psc = xpsc;
744 struct lagg_proto_port *pport;
745 uint64_t linkspeed, l;
746 int s;
747
748 linkspeed = 0;
749
750 s = pserialize_read_enter();
751 PSLIST_READER_FOREACH(pport, &psc->psc_ports,
752 struct lagg_proto_port, lpp_entry) {
753 if (pport->lpp_active) {
754 LAGG_PROTO_LOCK(psc);
755 l = pport->lpp_linkspeed;
756 LAGG_PROTO_UNLOCK(psc);
757 linkspeed = MAX(linkspeed, l);
758 }
759 }
760 pserialize_read_exit(s);
761
762 LAGG_LOCK(psc->psc_softc);
763 lagg_set_linkspeed(psc->psc_softc, linkspeed);
764 LAGG_UNLOCK(psc->psc_softc);
765 }
766