1 /* $NetBSD: altq_wfq.c,v 1.24 2025/01/08 13:00:04 joe Exp $ */ 2 /* $KAME: altq_wfq.c,v 1.14 2005/04/13 03:44:25 suz Exp $ */ 3 4 /* 5 * Copyright (C) 1997-2002 6 * Sony Computer Science Laboratories Inc. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 /* 30 * March 27, 1997. Written by Hiroshi Kyusojin of Keio University 31 * (kyu (at) mt.cs.keio.ac.jp). 32 */ 33 34 #include <sys/cdefs.h> 35 __KERNEL_RCSID(0, "$NetBSD: altq_wfq.c,v 1.24 2025/01/08 13:00:04 joe Exp $"); 36 37 #ifdef _KERNEL_OPT 38 #include "opt_altq.h" 39 #include "opt_inet.h" 40 #endif 41 42 #ifdef ALTQ_WFQ 43 44 #include <sys/param.h> 45 #include <sys/malloc.h> 46 #include <sys/mbuf.h> 47 #include <sys/uio.h> 48 #include <sys/socket.h> 49 #include <sys/systm.h> 50 #include <sys/proc.h> 51 #include <sys/errno.h> 52 #include <sys/time.h> 53 #include <sys/kernel.h> 54 #include <sys/kauth.h> 55 56 #include <net/if.h> 57 #include <net/if_types.h> 58 #include <netinet/in.h> 59 60 #include <altq/altq.h> 61 #include <altq/altq_conf.h> 62 #include <altq/altq_wfq.h> 63 64 #ifdef ALTQ3_COMPAT 65 /* 66 #define WFQ_DEBUG 67 */ 68 69 static int wfq_setenable(struct wfq_interface *, int); 70 static int wfq_ifattach(struct wfq_interface *); 71 static int wfq_ifdetach(struct wfq_interface *); 72 static int wfq_ifenqueue(struct ifaltq *, struct mbuf *); 73 static u_long wfq_hash(struct flowinfo *, int); 74 static inline u_long wfq_hashbydstaddr(struct flowinfo *, int); 75 static inline u_long wfq_hashbysrcaddr(struct flowinfo *, int); 76 static inline u_long wfq_hashbysrcport(struct flowinfo *, int); 77 static wfq *wfq_maxqueue(wfq_state_t *); 78 static struct mbuf *wfq_ifdequeue(struct ifaltq *, int); 79 static int wfq_getqid(struct wfq_getqid *); 80 static int wfq_setweight(struct wfq_setweight *); 81 static int wfq_getstats(struct wfq_getstats *); 82 static int wfq_config(struct wfq_conf *); 83 static int wfq_request(struct ifaltq *, int, void *); 84 static int wfq_flush(struct ifaltq *); 85 static void *wfq_classify(void *, struct mbuf *, int); 86 87 /* global value : pointer to wfq queue list */ 88 static wfq_state_t *wfq_list = NULL; 89 90 static int 91 wfq_setenable(struct wfq_interface *ifacep, int flag) 92 { 93 wfq_state_t *wfqp; 94 int error = 0; 95 96 if ((wfqp = altq_lookup(ifacep->wfq_ifacename, ALTQT_WFQ)) == NULL) 97 return EBADF; 98 99 switch(flag){ 100 case ENABLE: 101 error = altq_enable(wfqp->ifq); 102 break; 103 case DISABLE: 104 error = altq_disable(wfqp->ifq); 105 break; 106 } 107 return error; 108 } 109 110 111 static int 112 wfq_ifattach(struct wfq_interface *ifacep) 113 { 114 int error = 0, i; 115 struct ifnet *ifp; 116 wfq_state_t *new_wfqp; 117 wfq *queue; 118 119 if ((ifp = ifunit(ifacep->wfq_ifacename)) == NULL) { 120 #ifdef WFQ_DEBUG 121 printf("wfq_ifattach()...no ifp found\n"); 122 #endif 123 return ENXIO; 124 } 125 126 if (!ALTQ_IS_READY(&ifp->if_snd)) { 127 #ifdef WFQ_DEBUG 128 printf("wfq_ifattach()...altq is not ready\n"); 129 #endif 130 return ENXIO; 131 } 132 133 /* allocate and initialize wfq_state_t */ 134 new_wfqp = malloc(sizeof(wfq_state_t), M_DEVBUF, M_WAITOK|M_ZERO); 135 if (new_wfqp == NULL) 136 return ENOMEM; 137 138 queue = malloc(sizeof(wfq) * DEFAULT_QSIZE, M_DEVBUF, M_WAITOK|M_ZERO); 139 if (queue == NULL) { 140 free(new_wfqp, M_DEVBUF); 141 return ENOMEM; 142 } 143 144 /* keep the ifq */ 145 new_wfqp->ifq = &ifp->if_snd; 146 new_wfqp->nums = DEFAULT_QSIZE; 147 new_wfqp->hwm = HWM; 148 new_wfqp->bytes = 0; 149 new_wfqp->rrp = NULL; 150 new_wfqp->queue = queue; 151 new_wfqp->hash_func = wfq_hashbydstaddr; 152 new_wfqp->fbmask = FIMB4_DADDR; 153 154 for (i = 0; i < new_wfqp->nums; i++, queue++) { 155 queue->next = queue->prev = NULL; 156 queue->head = queue->tail = NULL; 157 queue->bytes = queue->quota = 0; 158 queue->weight = 100; 159 } 160 161 /* 162 * set WFQ to this ifnet structure. 163 */ 164 if ((error = altq_attach(&ifp->if_snd, ALTQT_WFQ, new_wfqp, 165 wfq_ifenqueue, wfq_ifdequeue, wfq_request, 166 new_wfqp, wfq_classify)) != 0) { 167 free(queue, M_DEVBUF); 168 free(new_wfqp, M_DEVBUF); 169 return error; 170 } 171 172 new_wfqp->next = wfq_list; 173 wfq_list = new_wfqp; 174 175 return error; 176 } 177 178 179 static int 180 wfq_ifdetach(struct wfq_interface *ifacep) 181 { 182 int error = 0; 183 wfq_state_t *wfqp; 184 185 if ((wfqp = altq_lookup(ifacep->wfq_ifacename, ALTQT_WFQ)) == NULL) 186 return EBADF; 187 188 /* free queued mbuf */ 189 wfq_flush(wfqp->ifq); 190 191 /* remove WFQ from the ifnet structure. */ 192 (void)altq_disable(wfqp->ifq); 193 (void)altq_detach(wfqp->ifq); 194 195 /* remove from the wfqstate list */ 196 if (wfq_list == wfqp) 197 wfq_list = wfqp->next; 198 else { 199 wfq_state_t *wp = wfq_list; 200 do { 201 if (wp->next == wfqp) { 202 wp->next = wfqp->next; 203 break; 204 } 205 } while ((wp = wp->next) != NULL); 206 } 207 208 /* deallocate wfq_state_t */ 209 free(wfqp->queue, M_DEVBUF); 210 free(wfqp, M_DEVBUF); 211 return error; 212 } 213 214 static int 215 wfq_request(struct ifaltq *ifq, int req, void *arg) 216 { 217 wfq_state_t *wfqp = (wfq_state_t *)ifq->altq_disc; 218 219 switch (req) { 220 case ALTRQ_PURGE: 221 wfq_flush(wfqp->ifq); 222 break; 223 } 224 return 0; 225 } 226 227 228 static int 229 wfq_flush(struct ifaltq *ifq) 230 { 231 struct mbuf *mp; 232 233 while ((mp = wfq_ifdequeue(ifq, ALTDQ_REMOVE)) != NULL) 234 m_freem(mp); 235 if (ALTQ_IS_ENABLED(ifq)) 236 ifq->ifq_len = 0; 237 return 0; 238 } 239 240 static void * 241 wfq_classify(void *clfier, struct mbuf *m, int af) 242 { 243 wfq_state_t *wfqp = (wfq_state_t *)clfier; 244 struct flowinfo flow; 245 246 altq_extractflow(m, af, &flow, wfqp->fbmask); 247 return (&wfqp->queue[(*wfqp->hash_func)(&flow, wfqp->nums)]); 248 } 249 250 static int 251 wfq_ifenqueue(struct ifaltq *ifq, struct mbuf *mp) 252 { 253 wfq_state_t *wfqp; 254 wfq *queue; 255 int byte, error = 0; 256 257 wfqp = (wfq_state_t *)ifq->altq_disc; 258 mp->m_nextpkt = NULL; 259 260 /* grab a queue selected by classifier */ 261 if ((queue = mp->m_pkthdr.pattr_class) == NULL) 262 queue = &wfqp->queue[0]; 263 264 if (queue->tail == NULL) 265 queue->head = mp; 266 else 267 queue->tail->m_nextpkt = mp; 268 queue->tail = mp; 269 byte = mp->m_pkthdr.len; 270 queue->bytes += byte; 271 wfqp->bytes += byte; 272 ifq->ifq_len++; 273 274 if (queue->next == NULL) { 275 /* this queue gets active. add the queue to the active list */ 276 if (wfqp->rrp == NULL){ 277 /* no queue in the active list */ 278 queue->next = queue->prev = queue; 279 wfqp->rrp = queue; 280 WFQ_ADDQUOTA(queue); 281 } else { 282 /* insert the queue at the tail of the active list */ 283 queue->prev = wfqp->rrp->prev; 284 wfqp->rrp->prev->next = queue; 285 wfqp->rrp->prev = queue; 286 queue->next = wfqp->rrp; 287 queue->quota = 0; 288 } 289 } 290 291 /* check overflow. if the total size exceeds the high water mark, 292 drop packets from the longest queue. */ 293 while (wfqp->bytes > wfqp->hwm) { 294 wfq *drop_queue = wfq_maxqueue(wfqp); 295 296 /* drop the packet at the head. */ 297 mp = drop_queue->head; 298 if ((drop_queue->head = mp->m_nextpkt) == NULL) 299 drop_queue->tail = NULL; 300 mp->m_nextpkt = NULL; 301 byte = mp->m_pkthdr.len; 302 drop_queue->bytes -= byte; 303 PKTCNTR_ADD(&drop_queue->drop_cnt, byte); 304 wfqp->bytes -= byte; 305 m_freem(mp); 306 ifq->ifq_len--; 307 if(drop_queue == queue) 308 /* the queue for this flow is selected to drop */ 309 error = ENOBUFS; 310 } 311 return error; 312 } 313 314 static u_long 315 wfq_hash(struct flowinfo *flow, int n) 316 { 317 u_long val = 0; 318 319 if (flow != NULL) { 320 if (flow->fi_family == AF_INET) { 321 struct flowinfo_in *fp = (struct flowinfo_in *)flow; 322 u_long val2; 323 324 val = fp->fi_dst.s_addr ^ fp->fi_src.s_addr; 325 val = val ^ (val >> 8) ^ (val >> 16) ^ (val >> 24); 326 val2 = fp->fi_dport ^ fp->fi_sport ^ fp->fi_proto; 327 val2 = val2 ^ (val2 >> 8); 328 val = val ^ val2; 329 } 330 #ifdef INET6 331 else if (flow->fi_family == AF_INET6) { 332 struct flowinfo_in6 *fp6 = (struct flowinfo_in6 *)flow; 333 334 val = ntohl(fp6->fi6_flowlabel); 335 } 336 #endif 337 } 338 339 return (val % n); 340 } 341 342 static inline u_long 343 wfq_hashbydstaddr(struct flowinfo *flow, int n) 344 { 345 u_long val = 0; 346 347 if (flow != NULL) { 348 if (flow->fi_family == AF_INET) { 349 struct flowinfo_in *fp = (struct flowinfo_in *)flow; 350 351 val = fp->fi_dst.s_addr; 352 val = val ^ (val >> 8) ^ (val >> 16) ^ (val >> 24); 353 } 354 #ifdef INET6 355 else if (flow->fi_family == AF_INET6) { 356 struct flowinfo_in6 *fp6 = (struct flowinfo_in6 *)flow; 357 358 val = ntohl(fp6->fi6_flowlabel); 359 } 360 #endif 361 } 362 363 return (val % n); 364 } 365 366 static inline u_long 367 wfq_hashbysrcaddr(struct flowinfo *flow, int n) 368 { 369 u_long val = 0; 370 371 if (flow != NULL) { 372 if (flow->fi_family == AF_INET) { 373 struct flowinfo_in *fp = (struct flowinfo_in *)flow; 374 375 val = fp->fi_src.s_addr; 376 val = val ^ (val >> 8) ^ (val >> 16) ^ (val >> 24); 377 } 378 #ifdef INET6 379 else if (flow->fi_family == AF_INET6) { 380 struct flowinfo_in6 *fp6 = (struct flowinfo_in6 *)flow; 381 382 val = ntohl(fp6->fi6_flowlabel); 383 } 384 #endif 385 } 386 387 return (val % n); 388 } 389 390 static inline u_long 391 wfq_hashbysrcport(struct flowinfo *flow, int n) 392 { 393 u_long val = 0; 394 395 if (flow != NULL) { 396 if (flow->fi_family == AF_INET) { 397 struct flowinfo_in *fp = (struct flowinfo_in *)flow; 398 399 val = fp->fi_sport; 400 } 401 #ifdef INET6 402 else if (flow->fi_family == AF_INET6) { 403 struct flowinfo_in6 *fp6 = (struct flowinfo_in6 *)flow; 404 405 val = fp6->fi6_sport; 406 } 407 #endif 408 } 409 val = val ^ (val >> 8); 410 411 return (val % n); 412 } 413 414 static wfq * 415 wfq_maxqueue(wfq_state_t *wfqp) 416 { 417 int byte, max_byte = 0; 418 wfq *queue, *max_queue = NULL; 419 420 if((queue = wfqp->rrp) == NULL) 421 /* never happens */ 422 return NULL; 423 do{ 424 if ((byte = queue->bytes * 100 / queue->weight) > max_byte) { 425 max_queue = queue; 426 max_byte = byte; 427 } 428 } while ((queue = queue->next) != wfqp->rrp); 429 430 return max_queue; 431 } 432 433 434 static struct mbuf * 435 wfq_ifdequeue(struct ifaltq *ifq, int op) 436 { 437 wfq_state_t *wfqp; 438 wfq *queue; 439 struct mbuf *mp; 440 int byte; 441 442 wfqp = (wfq_state_t *)ifq->altq_disc; 443 444 if ((wfqp->bytes == 0) || ((queue = wfqp->rrp) == NULL)) 445 /* no packet in the queues */ 446 return NULL; 447 448 while (1) { 449 if (queue->quota > 0) { 450 if (queue->bytes <= 0) { 451 /* this queue no longer has packet. 452 remove the queue from the active list. */ 453 if (queue->next == queue){ 454 /* no other active queue 455 -- this case never happens in 456 this algorithm. */ 457 queue->next = queue->prev = NULL; 458 wfqp->rrp = NULL; 459 return NULL; 460 } else { 461 queue->prev->next = queue->next; 462 queue->next->prev = queue->prev; 463 /* the round-robin pointer points 464 to this queue, advance the rrp */ 465 wfqp->rrp = queue->next; 466 queue->next = queue->prev = NULL; 467 queue = wfqp->rrp; 468 WFQ_ADDQUOTA(queue); 469 continue; 470 } 471 } 472 473 /* dequeue a packet from this queue */ 474 mp = queue->head; 475 if (op == ALTDQ_REMOVE) { 476 if((queue->head = mp->m_nextpkt) == NULL) 477 queue->tail = NULL; 478 byte = mp->m_pkthdr.len; 479 mp->m_nextpkt = NULL; 480 queue->quota -= byte; 481 queue->bytes -= byte; 482 PKTCNTR_ADD(&queue->xmit_cnt, byte); 483 wfqp->bytes -= byte; 484 if (ALTQ_IS_ENABLED(ifq)) 485 ifq->ifq_len--; 486 } 487 return mp; 488 489 /* if the queue gets empty by this dequeueing, 490 the queue will be removed from the active list 491 at the next round */ 492 } 493 494 /* advance the round-robin pointer */ 495 queue = wfqp->rrp = queue->next; 496 WFQ_ADDQUOTA(queue); 497 } 498 } 499 500 static int 501 wfq_getqid(struct wfq_getqid *gqidp) 502 { 503 wfq_state_t *wfqp; 504 505 if ((wfqp = altq_lookup(gqidp->iface.wfq_ifacename, ALTQT_WFQ)) 506 == NULL) 507 return (EBADF); 508 509 gqidp->qid = (*wfqp->hash_func)(&gqidp->flow, wfqp->nums); 510 return 0; 511 } 512 513 static int 514 wfq_setweight(struct wfq_setweight *swp) 515 { 516 wfq_state_t *wfqp; 517 wfq *queue; 518 int old; 519 520 if (swp->weight < 0) 521 return (EINVAL); 522 523 if ((wfqp = altq_lookup(swp->iface.wfq_ifacename, ALTQT_WFQ)) == NULL) 524 return (EBADF); 525 526 if (swp->qid < 0 || swp->qid >= wfqp->nums) 527 return (EINVAL); 528 529 queue = &wfqp->queue[swp->qid]; 530 old = queue->weight; 531 queue->weight = swp->weight; 532 swp->weight = old; 533 return 0; 534 } 535 536 537 static int 538 wfq_getstats(struct wfq_getstats *gsp) 539 { 540 wfq_state_t *wfqp; 541 wfq *queue; 542 queue_stats *stats; 543 544 if ((wfqp = altq_lookup(gsp->iface.wfq_ifacename, ALTQT_WFQ)) == NULL) 545 return (EBADF); 546 547 if (gsp->qid < 0 || gsp->qid >= wfqp->nums) 548 return (EINVAL); 549 550 queue = &wfqp->queue[gsp->qid]; 551 stats = &gsp->stats; 552 553 stats->bytes = queue->bytes; 554 stats->weight = queue->weight; 555 stats->xmit_cnt = queue->xmit_cnt; 556 stats->drop_cnt = queue->drop_cnt; 557 558 return 0; 559 } 560 561 562 static int 563 wfq_config(struct wfq_conf *cf) 564 { 565 wfq_state_t *wfqp; 566 wfq *queue; 567 int i, error = 0; 568 569 if ((wfqp = altq_lookup(cf->iface.wfq_ifacename, ALTQT_WFQ)) == NULL) 570 return (EBADF); 571 572 if(cf->nqueues <= 0 || MAX_QSIZE < cf->nqueues) 573 cf->nqueues = DEFAULT_QSIZE; 574 575 if (cf->nqueues != wfqp->nums) { 576 /* free queued mbuf */ 577 wfq_flush(wfqp->ifq); 578 free(wfqp->queue, M_DEVBUF); 579 580 queue = malloc(sizeof(wfq) * cf->nqueues, M_DEVBUF, 581 M_WAITOK|M_ZERO); 582 if (queue == NULL) 583 return ENOMEM; 584 585 wfqp->nums = cf->nqueues; 586 wfqp->bytes = 0; 587 wfqp->rrp = NULL; 588 wfqp->queue = queue; 589 for (i = 0; i < wfqp->nums; i++, queue++) { 590 queue->next = queue->prev = NULL; 591 queue->head = queue->tail = NULL; 592 queue->bytes = queue->quota = 0; 593 queue->weight = 100; 594 } 595 } 596 597 if (cf->qlimit != 0) 598 wfqp->hwm = cf->qlimit; 599 600 switch (cf->hash_policy) { 601 case WFQ_HASH_DSTADDR: 602 wfqp->hash_func = wfq_hashbydstaddr; 603 wfqp->fbmask = FIMB4_DADDR; 604 #ifdef INET6 605 wfqp->fbmask |= FIMB6_FLABEL; /* use flowlabel for ipv6 */ 606 #endif 607 break; 608 case WFQ_HASH_SRCPORT: 609 wfqp->hash_func = wfq_hashbysrcport; 610 wfqp->fbmask = FIMB4_SPORT; 611 #ifdef INET6 612 wfqp->fbmask |= FIMB6_SPORT; 613 #endif 614 break; 615 case WFQ_HASH_FULL: 616 wfqp->hash_func = wfq_hash; 617 wfqp->fbmask = FIMB4_ALL; 618 #ifdef INET6 619 wfqp->fbmask |= FIMB6_FLABEL; /* use flowlabel for ipv6 */ 620 #endif 621 break; 622 case WFQ_HASH_SRCADDR: 623 wfqp->hash_func = wfq_hashbysrcaddr; 624 wfqp->fbmask = FIMB4_DADDR; 625 #ifdef INET6 626 wfqp->fbmask |= FIMB6_FLABEL; /* use flowlabel for ipv6 */ 627 #endif 628 break; 629 default: 630 error = EINVAL; 631 break; 632 } 633 return error; 634 } 635 636 /* 637 * wfq device interface 638 */ 639 640 altqdev_decl(wfq); 641 642 int 643 wfqopen(dev_t dev, int flag, int fmt, 644 struct lwp *l) 645 { 646 return 0; 647 } 648 649 int 650 wfqclose(dev_t dev, int flag, int fmt, 651 struct lwp *l) 652 { 653 struct ifnet *ifp; 654 struct wfq_interface iface; 655 wfq_state_t *wfqp; 656 int s; 657 658 s = splnet(); 659 while ((wfqp = wfq_list) != NULL) { 660 ifp = wfqp->ifq->altq_ifp; 661 snprintf(iface.wfq_ifacename, sizeof(iface.wfq_ifacename), 662 "%s", ifp->if_xname); 663 wfq_ifdetach(&iface); 664 } 665 splx(s); 666 return 0; 667 } 668 669 int 670 wfqioctl(dev_t dev, ioctlcmd_t cmd, void *addr, int flag, 671 struct lwp *l) 672 { 673 int error = 0; 674 int s; 675 676 /* check cmd for superuser only */ 677 switch (cmd) { 678 case WFQ_GET_QID: 679 case WFQ_GET_STATS: 680 break; 681 default: 682 if ((error = kauth_authorize_network(l->l_cred, 683 KAUTH_NETWORK_ALTQ, KAUTH_REQ_NETWORK_ALTQ_WFQ, NULL, 684 NULL, NULL)) != 0) 685 return error; 686 break; 687 } 688 689 s = splnet(); 690 switch (cmd) { 691 692 case WFQ_ENABLE: 693 error = wfq_setenable((struct wfq_interface *)addr, ENABLE); 694 break; 695 696 case WFQ_DISABLE: 697 error = wfq_setenable((struct wfq_interface *)addr, DISABLE); 698 break; 699 700 case WFQ_IF_ATTACH: 701 error = wfq_ifattach((struct wfq_interface *)addr); 702 break; 703 704 case WFQ_IF_DETACH: 705 error = wfq_ifdetach((struct wfq_interface *)addr); 706 break; 707 708 case WFQ_GET_QID: 709 error = wfq_getqid((struct wfq_getqid *)addr); 710 break; 711 712 case WFQ_SET_WEIGHT: 713 error = wfq_setweight((struct wfq_setweight *)addr); 714 break; 715 716 case WFQ_GET_STATS: 717 error = wfq_getstats((struct wfq_getstats *)addr); 718 break; 719 720 case WFQ_CONFIG: 721 error = wfq_config((struct wfq_conf *)addr); 722 break; 723 724 default: 725 error = EINVAL; 726 break; 727 } 728 splx(s); 729 return error; 730 } 731 732 #ifdef KLD_MODULE 733 734 static struct altqsw wfq_sw = 735 {"wfq", wfqopen, wfqclose, wfqioctl}; 736 737 ALTQ_MODULE(altq_wfq, ALTQT_WFQ, &wfq_sw); 738 739 #endif /* KLD_MODULE */ 740 741 #endif /* ALTQ3_COMPAT */ 742 #endif /* ALTQ_WFQ */ 743