altq_rio.c revision 1.8.12.2 1 /* $NetBSD: altq_rio.c,v 1.8.12.2 2006/06/09 19:52:35 peter Exp $ */
2 /* $KAME: altq_rio.c,v 1.19 2005/04/13 03:44:25 suz Exp $ */
3
4 /*
5 * Copyright (C) 1998-2003
6 * Sony Computer Science Laboratories Inc. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29 /*
30 * Copyright (c) 1990-1994 Regents of the University of California.
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the Computer Systems
44 * Engineering Group at Lawrence Berkeley Laboratory.
45 * 4. Neither the name of the University nor of the Laboratory may be used
46 * to endorse or promote products derived from this software without
47 * specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 */
61
62 #include <sys/cdefs.h>
63 __KERNEL_RCSID(0, "$NetBSD: altq_rio.c,v 1.8.12.2 2006/06/09 19:52:35 peter Exp $");
64
65 #ifdef _KERNEL_OPT
66 #include "opt_altq.h"
67 #include "opt_inet.h"
68 #endif
69
70 #ifdef ALTQ_RIO /* rio is enabled by ALTQ_RIO option in opt_altq.h */
71
72 #include <sys/param.h>
73 #include <sys/malloc.h>
74 #include <sys/mbuf.h>
75 #include <sys/socket.h>
76 #include <sys/systm.h>
77 #include <sys/errno.h>
78 #include <sys/kauth.h>
79 #if 1 /* ALTQ3_COMPAT */
80 #include <sys/proc.h>
81 #include <sys/sockio.h>
82 #include <sys/kernel.h>
83 #endif
84
85 #include <net/if.h>
86
87 #include <netinet/in.h>
88 #include <netinet/in_systm.h>
89 #include <netinet/ip.h>
90 #ifdef INET6
91 #include <netinet/ip6.h>
92 #endif
93
94 #include <net/pfvar.h>
95 #include <altq/altq.h>
96 #include <altq/altq_cdnr.h>
97 #include <altq/altq_red.h>
98 #include <altq/altq_rio.h>
99 #ifdef ALTQ3_COMPAT
100 #include <altq/altq_conf.h>
101 #endif
102
103 /*
104 * RIO: RED with IN/OUT bit
105 * described in
106 * "Explicit Allocation of Best Effort Packet Delivery Service"
107 * David D. Clark and Wenjia Fang, MIT Lab for Computer Science
108 * http://diffserv.lcs.mit.edu/Papers/exp-alloc-ddc-wf.{ps,pdf}
109 *
110 * this implementation is extended to support more than 2 drop precedence
111 * values as described in RFC2597 (Assured Forwarding PHB Group).
112 *
113 */
114 /*
115 * AF DS (differentiated service) codepoints.
116 * (classes can be mapped to CBQ or H-FSC classes.)
117 *
118 * 0 1 2 3 4 5 6 7
119 * +---+---+---+---+---+---+---+---+
120 * | CLASS |DropPre| 0 | CU |
121 * +---+---+---+---+---+---+---+---+
122 *
123 * class 1: 001
124 * class 2: 010
125 * class 3: 011
126 * class 4: 100
127 *
128 * low drop prec: 01
129 * medium drop prec: 10
130 * high drop prec: 11
131 */
132
133 /* normal red parameters */
134 #define W_WEIGHT 512 /* inverse of weight of EWMA (511/512) */
135 /* q_weight = 0.00195 */
136
137 /* red parameters for a slow link */
138 #define W_WEIGHT_1 128 /* inverse of weight of EWMA (127/128) */
139 /* q_weight = 0.0078125 */
140
141 /* red parameters for a very slow link (e.g., dialup) */
142 #define W_WEIGHT_2 64 /* inverse of weight of EWMA (63/64) */
143 /* q_weight = 0.015625 */
144
145 /* fixed-point uses 12-bit decimal places */
146 #define FP_SHIFT 12 /* fixed-point shift */
147
148 /* red parameters for drop probability */
149 #define INV_P_MAX 10 /* inverse of max drop probability */
150 #define TH_MIN 5 /* min threshold */
151 #define TH_MAX 15 /* max threshold */
152
153 #define RIO_LIMIT 60 /* default max queue lenght */
154 #define RIO_STATS /* collect statistics */
155
156 #define TV_DELTA(a, b, delta) { \
157 register int xxs; \
158 \
159 delta = (a)->tv_usec - (b)->tv_usec; \
160 if ((xxs = (a)->tv_sec - (b)->tv_sec) != 0) { \
161 if (xxs < 0) { \
162 delta = 60000000; \
163 } else if (xxs > 4) { \
164 if (xxs > 60) \
165 delta = 60000000; \
166 else \
167 delta += xxs * 1000000; \
168 } else while (xxs > 0) { \
169 delta += 1000000; \
170 xxs--; \
171 } \
172 } \
173 }
174
175 #ifdef ALTQ3_COMPAT
176 /* rio_list keeps all rio_queue_t's allocated. */
177 static rio_queue_t *rio_list = NULL;
178 #endif
179 /* default rio parameter values */
180 static struct redparams default_rio_params[RIO_NDROPPREC] = {
181 /* th_min, th_max, inv_pmax */
182 { TH_MAX * 2 + TH_MIN, TH_MAX * 3, INV_P_MAX }, /* low drop precedence */
183 { TH_MAX + TH_MIN, TH_MAX * 2, INV_P_MAX }, /* medium drop precedence */
184 { TH_MIN, TH_MAX, INV_P_MAX } /* high drop precedence */
185 };
186
187 /* internal function prototypes */
188 static int dscp2index(u_int8_t);
189 #ifdef ALTQ3_COMPAT
190 static int rio_enqueue(struct ifaltq *, struct mbuf *, struct altq_pktattr *);
191 static struct mbuf *rio_dequeue(struct ifaltq *, int);
192 static int rio_request(struct ifaltq *, int, void *);
193 static int rio_detach(rio_queue_t *);
194
195 /*
196 * rio device interface
197 */
198 altqdev_decl(rio);
199
200 #endif /* ALTQ3_COMPAT */
201
202 rio_t *
203 rio_alloc(int weight, struct redparams *params, int flags, int pkttime)
204 {
205 rio_t *rp;
206 int w, i;
207 int npkts_per_sec;
208
209 rp = malloc(sizeof(rio_t), M_DEVBUF, M_WAITOK|M_ZERO);
210 if (rp == NULL)
211 return (NULL);
212
213 rp->rio_flags = flags;
214 if (pkttime == 0)
215 /* default packet time: 1000 bytes / 10Mbps * 8 * 1000000 */
216 rp->rio_pkttime = 800;
217 else
218 rp->rio_pkttime = pkttime;
219
220 if (weight != 0)
221 rp->rio_weight = weight;
222 else {
223 /* use default */
224 rp->rio_weight = W_WEIGHT;
225
226 /* when the link is very slow, adjust red parameters */
227 npkts_per_sec = 1000000 / rp->rio_pkttime;
228 if (npkts_per_sec < 50) {
229 /* up to about 400Kbps */
230 rp->rio_weight = W_WEIGHT_2;
231 } else if (npkts_per_sec < 300) {
232 /* up to about 2.4Mbps */
233 rp->rio_weight = W_WEIGHT_1;
234 }
235 }
236
237 /* calculate wshift. weight must be power of 2 */
238 w = rp->rio_weight;
239 for (i = 0; w > 1; i++)
240 w = w >> 1;
241 rp->rio_wshift = i;
242 w = 1 << rp->rio_wshift;
243 if (w != rp->rio_weight) {
244 printf("invalid weight value %d for red! use %d\n",
245 rp->rio_weight, w);
246 rp->rio_weight = w;
247 }
248
249 /* allocate weight table */
250 rp->rio_wtab = wtab_alloc(rp->rio_weight);
251
252 for (i = 0; i < RIO_NDROPPREC; i++) {
253 struct dropprec_state *prec = &rp->rio_precstate[i];
254
255 prec->avg = 0;
256 prec->idle = 1;
257
258 if (params == NULL || params[i].inv_pmax == 0)
259 prec->inv_pmax = default_rio_params[i].inv_pmax;
260 else
261 prec->inv_pmax = params[i].inv_pmax;
262 if (params == NULL || params[i].th_min == 0)
263 prec->th_min = default_rio_params[i].th_min;
264 else
265 prec->th_min = params[i].th_min;
266 if (params == NULL || params[i].th_max == 0)
267 prec->th_max = default_rio_params[i].th_max;
268 else
269 prec->th_max = params[i].th_max;
270
271 /*
272 * th_min_s and th_max_s are scaled versions of th_min
273 * and th_max to be compared with avg.
274 */
275 prec->th_min_s = prec->th_min << (rp->rio_wshift + FP_SHIFT);
276 prec->th_max_s = prec->th_max << (rp->rio_wshift + FP_SHIFT);
277
278 /*
279 * precompute probability denominator
280 * probd = (2 * (TH_MAX-TH_MIN) / pmax) in fixed-point
281 */
282 prec->probd = (2 * (prec->th_max - prec->th_min)
283 * prec->inv_pmax) << FP_SHIFT;
284
285 microtime(&prec->last);
286 }
287
288 return (rp);
289 }
290
291 void
292 rio_destroy(rio_t *rp)
293 {
294 wtab_destroy(rp->rio_wtab);
295 free(rp, M_DEVBUF);
296 }
297
298 void
299 rio_getstats(rio_t *rp, struct redstats *sp)
300 {
301 int i;
302
303 for (i = 0; i < RIO_NDROPPREC; i++) {
304 bcopy(&rp->q_stats[i], sp, sizeof(struct redstats));
305 sp->q_avg = rp->rio_precstate[i].avg >> rp->rio_wshift;
306 sp++;
307 }
308 }
309
310 #if (RIO_NDROPPREC == 3)
311 /*
312 * internally, a drop precedence value is converted to an index
313 * starting from 0.
314 */
315 static int
316 dscp2index(u_int8_t dscp)
317 {
318 int dpindex = dscp & AF_DROPPRECMASK;
319
320 if (dpindex == 0)
321 return (0);
322 return ((dpindex >> 3) - 1);
323 }
324 #endif
325
326 #if 1
327 /*
328 * kludge: when a packet is dequeued, we need to know its drop precedence
329 * in order to keep the queue length of each drop precedence.
330 * use m_pkthdr.rcvif to pass this info.
331 */
332 #define RIOM_SET_PRECINDEX(m, idx) \
333 do { (m)->m_pkthdr.rcvif = (struct ifnet *)((long)(idx)); } while (0)
334 #define RIOM_GET_PRECINDEX(m) \
335 ({ long idx; idx = (long)((m)->m_pkthdr.rcvif); \
336 (m)->m_pkthdr.rcvif = NULL; idx; })
337 #endif
338
339 int
340 rio_addq(rio_t *rp, class_queue_t *q, struct mbuf *m,
341 struct altq_pktattr *pktattr)
342 {
343 int avg, droptype;
344 u_int8_t dsfield, odsfield;
345 int dpindex, i, n, t;
346 struct timeval now;
347 struct dropprec_state *prec;
348
349 dsfield = odsfield = read_dsfield(m, pktattr);
350 dpindex = dscp2index(dsfield);
351
352 /*
353 * update avg of the precedence states whose drop precedence
354 * is larger than or equal to the drop precedence of the packet
355 */
356 now.tv_sec = 0;
357 for (i = dpindex; i < RIO_NDROPPREC; i++) {
358 prec = &rp->rio_precstate[i];
359 avg = prec->avg;
360 if (prec->idle) {
361 prec->idle = 0;
362 if (now.tv_sec == 0)
363 microtime(&now);
364 t = (now.tv_sec - prec->last.tv_sec);
365 if (t > 60)
366 avg = 0;
367 else {
368 t = t * 1000000 +
369 (now.tv_usec - prec->last.tv_usec);
370 n = t / rp->rio_pkttime;
371 /* calculate (avg = (1 - Wq)^n * avg) */
372 if (n > 0)
373 avg = (avg >> FP_SHIFT) *
374 pow_w(rp->rio_wtab, n);
375 }
376 }
377
378 /* run estimator. (avg is scaled by WEIGHT in fixed-point) */
379 avg += (prec->qlen << FP_SHIFT) - (avg >> rp->rio_wshift);
380 prec->avg = avg; /* save the new value */
381 /*
382 * count keeps a tally of arriving traffic that has not
383 * been dropped.
384 */
385 prec->count++;
386 }
387
388 prec = &rp->rio_precstate[dpindex];
389 avg = prec->avg;
390
391 /* see if we drop early */
392 droptype = DTYPE_NODROP;
393 if (avg >= prec->th_min_s && prec->qlen > 1) {
394 if (avg >= prec->th_max_s) {
395 /* avg >= th_max: forced drop */
396 droptype = DTYPE_FORCED;
397 } else if (prec->old == 0) {
398 /* first exceeds th_min */
399 prec->count = 1;
400 prec->old = 1;
401 } else if (drop_early((avg - prec->th_min_s) >> rp->rio_wshift,
402 prec->probd, prec->count)) {
403 /* unforced drop by red */
404 droptype = DTYPE_EARLY;
405 }
406 } else {
407 /* avg < th_min */
408 prec->old = 0;
409 }
410
411 /*
412 * if the queue length hits the hard limit, it's a forced drop.
413 */
414 if (droptype == DTYPE_NODROP && qlen(q) >= qlimit(q))
415 droptype = DTYPE_FORCED;
416
417 if (droptype != DTYPE_NODROP) {
418 /* always drop incoming packet (as opposed to randomdrop) */
419 for (i = dpindex; i < RIO_NDROPPREC; i++)
420 rp->rio_precstate[i].count = 0;
421 #ifdef RIO_STATS
422 if (droptype == DTYPE_EARLY)
423 rp->q_stats[dpindex].drop_unforced++;
424 else
425 rp->q_stats[dpindex].drop_forced++;
426 PKTCNTR_ADD(&rp->q_stats[dpindex].drop_cnt, m_pktlen(m));
427 #endif
428 m_freem(m);
429 return (-1);
430 }
431
432 for (i = dpindex; i < RIO_NDROPPREC; i++)
433 rp->rio_precstate[i].qlen++;
434
435 /* save drop precedence index in mbuf hdr */
436 RIOM_SET_PRECINDEX(m, dpindex);
437
438 if (rp->rio_flags & RIOF_CLEARDSCP)
439 dsfield &= ~DSCP_MASK;
440
441 if (dsfield != odsfield)
442 write_dsfield(m, pktattr, dsfield);
443
444 _addq(q, m);
445
446 #ifdef RIO_STATS
447 PKTCNTR_ADD(&rp->q_stats[dpindex].xmit_cnt, m_pktlen(m));
448 #endif
449 return (0);
450 }
451
452 struct mbuf *
453 rio_getq(rio_t *rp, class_queue_t *q)
454 {
455 struct mbuf *m;
456 int dpindex, i;
457
458 if ((m = _getq(q)) == NULL)
459 return NULL;
460
461 dpindex = RIOM_GET_PRECINDEX(m);
462 for (i = dpindex; i < RIO_NDROPPREC; i++) {
463 if (--rp->rio_precstate[i].qlen == 0) {
464 if (rp->rio_precstate[i].idle == 0) {
465 rp->rio_precstate[i].idle = 1;
466 microtime(&rp->rio_precstate[i].last);
467 }
468 }
469 }
470 return (m);
471 }
472
473 #ifdef ALTQ3_COMPAT
474 int
475 rioopen(dev, flag, fmt, l)
476 dev_t dev;
477 int flag, fmt;
478 struct lwp *l;
479 {
480 /* everything will be done when the queueing scheme is attached. */
481 return 0;
482 }
483
484 int
485 rioclose(dev, flag, fmt, l)
486 dev_t dev;
487 int flag, fmt;
488 struct lwp *l;
489 {
490 rio_queue_t *rqp;
491 int err, error = 0;
492
493 while ((rqp = rio_list) != NULL) {
494 /* destroy all */
495 err = rio_detach(rqp);
496 if (err != 0 && error == 0)
497 error = err;
498 }
499
500 return error;
501 }
502
503 int
504 rioioctl(dev, cmd, addr, flag, l)
505 dev_t dev;
506 ioctlcmd_t cmd;
507 caddr_t addr;
508 int flag;
509 struct lwp *l;
510 {
511 rio_queue_t *rqp;
512 struct rio_interface *ifacep;
513 struct ifnet *ifp;
514 struct proc *p = l->l_proc;
515 int error = 0;
516
517 /* check super-user privilege */
518 switch (cmd) {
519 case RIO_GETSTATS:
520 break;
521 default:
522 #if (__FreeBSD_version > 400000)
523 if ((error = suser(p)) != 0)
524 return (error);
525 #else
526 if ((error = kauth_authorize_generic(p->p_cred,
527 KAUTH_GENERIC_ISSUSER, &p->p_acflag)) != 0)
528 return (error);
529 #endif
530 break;
531 }
532
533 switch (cmd) {
534
535 case RIO_ENABLE:
536 ifacep = (struct rio_interface *)addr;
537 if ((rqp = altq_lookup(ifacep->rio_ifname, ALTQT_RIO)) == NULL) {
538 error = EBADF;
539 break;
540 }
541 error = altq_enable(rqp->rq_ifq);
542 break;
543
544 case RIO_DISABLE:
545 ifacep = (struct rio_interface *)addr;
546 if ((rqp = altq_lookup(ifacep->rio_ifname, ALTQT_RIO)) == NULL) {
547 error = EBADF;
548 break;
549 }
550 error = altq_disable(rqp->rq_ifq);
551 break;
552
553 case RIO_IF_ATTACH:
554 ifp = ifunit(((struct rio_interface *)addr)->rio_ifname);
555 if (ifp == NULL) {
556 error = ENXIO;
557 break;
558 }
559
560 /* allocate and initialize rio_queue_t */
561 rqp = malloc(sizeof(rio_queue_t), M_DEVBUF, M_WAITOK|M_ZERO);
562 if (rqp == NULL) {
563 error = ENOMEM;
564 break;
565 }
566
567 rqp->rq_q = malloc(sizeof(class_queue_t), M_DEVBUF,
568 M_WAITOK|M_ZERO);
569 if (rqp->rq_q == NULL) {
570 free(rqp, M_DEVBUF);
571 error = ENOMEM;
572 break;
573 }
574
575 rqp->rq_rio = rio_alloc(0, NULL, 0, 0);
576 if (rqp->rq_rio == NULL) {
577 free(rqp->rq_q, M_DEVBUF);
578 free(rqp, M_DEVBUF);
579 error = ENOMEM;
580 break;
581 }
582
583 rqp->rq_ifq = &ifp->if_snd;
584 qtail(rqp->rq_q) = NULL;
585 qlen(rqp->rq_q) = 0;
586 qlimit(rqp->rq_q) = RIO_LIMIT;
587 qtype(rqp->rq_q) = Q_RIO;
588
589 /*
590 * set RIO to this ifnet structure.
591 */
592 error = altq_attach(rqp->rq_ifq, ALTQT_RIO, rqp,
593 rio_enqueue, rio_dequeue, rio_request,
594 NULL, NULL);
595 if (error) {
596 rio_destroy(rqp->rq_rio);
597 free(rqp->rq_q, M_DEVBUF);
598 free(rqp, M_DEVBUF);
599 break;
600 }
601
602 /* add this state to the rio list */
603 rqp->rq_next = rio_list;
604 rio_list = rqp;
605 break;
606
607 case RIO_IF_DETACH:
608 ifacep = (struct rio_interface *)addr;
609 if ((rqp = altq_lookup(ifacep->rio_ifname, ALTQT_RIO)) == NULL) {
610 error = EBADF;
611 break;
612 }
613 error = rio_detach(rqp);
614 break;
615
616 case RIO_GETSTATS:
617 do {
618 struct rio_stats *q_stats;
619 rio_t *rp;
620 int i;
621
622 q_stats = (struct rio_stats *)addr;
623 if ((rqp = altq_lookup(q_stats->iface.rio_ifname,
624 ALTQT_RIO)) == NULL) {
625 error = EBADF;
626 break;
627 }
628
629 rp = rqp->rq_rio;
630
631 q_stats->q_limit = qlimit(rqp->rq_q);
632 q_stats->weight = rp->rio_weight;
633 q_stats->flags = rp->rio_flags;
634
635 for (i = 0; i < RIO_NDROPPREC; i++) {
636 q_stats->q_len[i] = rp->rio_precstate[i].qlen;
637 bcopy(&rp->q_stats[i], &q_stats->q_stats[i],
638 sizeof(struct redstats));
639 q_stats->q_stats[i].q_avg =
640 rp->rio_precstate[i].avg >> rp->rio_wshift;
641
642 q_stats->q_params[i].inv_pmax
643 = rp->rio_precstate[i].inv_pmax;
644 q_stats->q_params[i].th_min
645 = rp->rio_precstate[i].th_min;
646 q_stats->q_params[i].th_max
647 = rp->rio_precstate[i].th_max;
648 }
649 } while (/*CONSTCOND*/ 0);
650 break;
651
652 case RIO_CONFIG:
653 do {
654 struct rio_conf *fc;
655 rio_t *new;
656 int s, limit, i;
657
658 fc = (struct rio_conf *)addr;
659 if ((rqp = altq_lookup(fc->iface.rio_ifname,
660 ALTQT_RIO)) == NULL) {
661 error = EBADF;
662 break;
663 }
664
665 new = rio_alloc(fc->rio_weight, &fc->q_params[0],
666 fc->rio_flags, fc->rio_pkttime);
667 if (new == NULL) {
668 error = ENOMEM;
669 break;
670 }
671
672 s = splnet();
673 _flushq(rqp->rq_q);
674 limit = fc->rio_limit;
675 if (limit < fc->q_params[RIO_NDROPPREC-1].th_max)
676 limit = fc->q_params[RIO_NDROPPREC-1].th_max;
677 qlimit(rqp->rq_q) = limit;
678
679 rio_destroy(rqp->rq_rio);
680 rqp->rq_rio = new;
681
682 splx(s);
683
684 /* write back new values */
685 fc->rio_limit = limit;
686 for (i = 0; i < RIO_NDROPPREC; i++) {
687 fc->q_params[i].inv_pmax =
688 rqp->rq_rio->rio_precstate[i].inv_pmax;
689 fc->q_params[i].th_min =
690 rqp->rq_rio->rio_precstate[i].th_min;
691 fc->q_params[i].th_max =
692 rqp->rq_rio->rio_precstate[i].th_max;
693 }
694 } while (/*CONSTCOND*/ 0);
695 break;
696
697 case RIO_SETDEFAULTS:
698 do {
699 struct redparams *rp;
700 int i;
701
702 rp = (struct redparams *)addr;
703 for (i = 0; i < RIO_NDROPPREC; i++)
704 default_rio_params[i] = rp[i];
705 } while (/*CONSTCOND*/ 0);
706 break;
707
708 default:
709 error = EINVAL;
710 break;
711 }
712
713 return error;
714 }
715
716 static int
717 rio_detach(rqp)
718 rio_queue_t *rqp;
719 {
720 rio_queue_t *tmp;
721 int error = 0;
722
723 if (ALTQ_IS_ENABLED(rqp->rq_ifq))
724 altq_disable(rqp->rq_ifq);
725
726 if ((error = altq_detach(rqp->rq_ifq)))
727 return (error);
728
729 if (rio_list == rqp)
730 rio_list = rqp->rq_next;
731 else {
732 for (tmp = rio_list; tmp != NULL; tmp = tmp->rq_next)
733 if (tmp->rq_next == rqp) {
734 tmp->rq_next = rqp->rq_next;
735 break;
736 }
737 if (tmp == NULL)
738 printf("rio_detach: no state found in rio_list!\n");
739 }
740
741 rio_destroy(rqp->rq_rio);
742 free(rqp->rq_q, M_DEVBUF);
743 free(rqp, M_DEVBUF);
744 return (error);
745 }
746
747 /*
748 * rio support routines
749 */
750 static int
751 rio_request(ifq, req, arg)
752 struct ifaltq *ifq;
753 int req;
754 void *arg;
755 {
756 rio_queue_t *rqp = (rio_queue_t *)ifq->altq_disc;
757
758 switch (req) {
759 case ALTRQ_PURGE:
760 _flushq(rqp->rq_q);
761 if (ALTQ_IS_ENABLED(ifq))
762 ifq->ifq_len = 0;
763 break;
764 }
765 return (0);
766 }
767
768 /*
769 * enqueue routine:
770 *
771 * returns: 0 when successfully queued.
772 * ENOBUFS when drop occurs.
773 */
774 static int
775 rio_enqueue(ifq, m, pktattr)
776 struct ifaltq *ifq;
777 struct mbuf *m;
778 struct altq_pktattr *pktattr;
779 {
780 rio_queue_t *rqp = (rio_queue_t *)ifq->altq_disc;
781 int error = 0;
782
783 if (rio_addq(rqp->rq_rio, rqp->rq_q, m, pktattr) == 0)
784 ifq->ifq_len++;
785 else
786 error = ENOBUFS;
787 return error;
788 }
789
790 /*
791 * dequeue routine:
792 * must be called in splnet.
793 *
794 * returns: mbuf dequeued.
795 * NULL when no packet is available in the queue.
796 */
797
798 static struct mbuf *
799 rio_dequeue(ifq, op)
800 struct ifaltq *ifq;
801 int op;
802 {
803 rio_queue_t *rqp = (rio_queue_t *)ifq->altq_disc;
804 struct mbuf *m = NULL;
805
806 if (op == ALTDQ_POLL)
807 return qhead(rqp->rq_q);
808
809 m = rio_getq(rqp->rq_rio, rqp->rq_q);
810 if (m != NULL)
811 ifq->ifq_len--;
812 return m;
813 }
814
815 #ifdef KLD_MODULE
816
817 static struct altqsw rio_sw =
818 {"rio", rioopen, rioclose, rioioctl};
819
820 ALTQ_MODULE(altq_rio, ALTQT_RIO, &rio_sw);
821 MODULE_VERSION(altq_rio, 1);
822 MODULE_DEPEND(altq_rio, altq_red, 1, 1, 1);
823
824 #endif /* KLD_MODULE */
825 #endif /* ALTQ3_COMPAT */
826
827 #endif /* ALTQ_RIO */
828