altq_rio.c revision 1.14 1 /* $NetBSD: altq_rio.c,v 1.14 2006/10/12 19:59:08 peter Exp $ */
2 /* $KAME: altq_rio.c,v 1.19 2005/04/13 03:44:25 suz Exp $ */
3
4 /*
5 * Copyright (C) 1998-2003
6 * Sony Computer Science Laboratories Inc. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29 /*
30 * Copyright (c) 1990-1994 Regents of the University of California.
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the Computer Systems
44 * Engineering Group at Lawrence Berkeley Laboratory.
45 * 4. Neither the name of the University nor of the Laboratory may be used
46 * to endorse or promote products derived from this software without
47 * specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 */
61
62 #include <sys/cdefs.h>
63 __KERNEL_RCSID(0, "$NetBSD: altq_rio.c,v 1.14 2006/10/12 19:59:08 peter Exp $");
64
65 #ifdef _KERNEL_OPT
66 #include "opt_altq.h"
67 #include "opt_inet.h"
68 #endif
69
70 #ifdef ALTQ_RIO /* rio is enabled by ALTQ_RIO option in opt_altq.h */
71
72 #include <sys/param.h>
73 #include <sys/malloc.h>
74 #include <sys/mbuf.h>
75 #include <sys/socket.h>
76 #include <sys/systm.h>
77 #include <sys/errno.h>
78 #include <sys/kauth.h>
79 #if 1 /* ALTQ3_COMPAT */
80 #include <sys/proc.h>
81 #include <sys/sockio.h>
82 #include <sys/kernel.h>
83 #endif
84 #include <sys/kauth.h>
85
86 #include <net/if.h>
87
88 #include <netinet/in.h>
89 #include <netinet/in_systm.h>
90 #include <netinet/ip.h>
91 #ifdef INET6
92 #include <netinet/ip6.h>
93 #endif
94
95 #include <net/pfvar.h>
96 #include <altq/altq.h>
97 #include <altq/altq_cdnr.h>
98 #include <altq/altq_red.h>
99 #include <altq/altq_rio.h>
100 #ifdef ALTQ3_COMPAT
101 #include <altq/altq_conf.h>
102 #endif
103
104 /*
105 * RIO: RED with IN/OUT bit
106 * described in
107 * "Explicit Allocation of Best Effort Packet Delivery Service"
108 * David D. Clark and Wenjia Fang, MIT Lab for Computer Science
109 * http://diffserv.lcs.mit.edu/Papers/exp-alloc-ddc-wf.{ps,pdf}
110 *
111 * this implementation is extended to support more than 2 drop precedence
112 * values as described in RFC2597 (Assured Forwarding PHB Group).
113 *
114 */
115 /*
116 * AF DS (differentiated service) codepoints.
117 * (classes can be mapped to CBQ or H-FSC classes.)
118 *
119 * 0 1 2 3 4 5 6 7
120 * +---+---+---+---+---+---+---+---+
121 * | CLASS |DropPre| 0 | CU |
122 * +---+---+---+---+---+---+---+---+
123 *
124 * class 1: 001
125 * class 2: 010
126 * class 3: 011
127 * class 4: 100
128 *
129 * low drop prec: 01
130 * medium drop prec: 10
131 * high drop prec: 11
132 */
133
134 /* normal red parameters */
135 #define W_WEIGHT 512 /* inverse of weight of EWMA (511/512) */
136 /* q_weight = 0.00195 */
137
138 /* red parameters for a slow link */
139 #define W_WEIGHT_1 128 /* inverse of weight of EWMA (127/128) */
140 /* q_weight = 0.0078125 */
141
142 /* red parameters for a very slow link (e.g., dialup) */
143 #define W_WEIGHT_2 64 /* inverse of weight of EWMA (63/64) */
144 /* q_weight = 0.015625 */
145
146 /* fixed-point uses 12-bit decimal places */
147 #define FP_SHIFT 12 /* fixed-point shift */
148
149 /* red parameters for drop probability */
150 #define INV_P_MAX 10 /* inverse of max drop probability */
151 #define TH_MIN 5 /* min threshold */
152 #define TH_MAX 15 /* max threshold */
153
154 #define RIO_LIMIT 60 /* default max queue lenght */
155 #define RIO_STATS /* collect statistics */
156
157 #define TV_DELTA(a, b, delta) { \
158 register int xxs; \
159 \
160 delta = (a)->tv_usec - (b)->tv_usec; \
161 if ((xxs = (a)->tv_sec - (b)->tv_sec) != 0) { \
162 if (xxs < 0) { \
163 delta = 60000000; \
164 } else if (xxs > 4) { \
165 if (xxs > 60) \
166 delta = 60000000; \
167 else \
168 delta += xxs * 1000000; \
169 } else while (xxs > 0) { \
170 delta += 1000000; \
171 xxs--; \
172 } \
173 } \
174 }
175
176 #ifdef ALTQ3_COMPAT
177 /* rio_list keeps all rio_queue_t's allocated. */
178 static rio_queue_t *rio_list = NULL;
179 #endif
180 /* default rio parameter values */
181 static struct redparams default_rio_params[RIO_NDROPPREC] = {
182 /* th_min, th_max, inv_pmax */
183 { TH_MAX * 2 + TH_MIN, TH_MAX * 3, INV_P_MAX }, /* low drop precedence */
184 { TH_MAX + TH_MIN, TH_MAX * 2, INV_P_MAX }, /* medium drop precedence */
185 { TH_MIN, TH_MAX, INV_P_MAX } /* high drop precedence */
186 };
187
188 /* internal function prototypes */
189 static int dscp2index(u_int8_t);
190 #ifdef ALTQ3_COMPAT
191 static int rio_enqueue(struct ifaltq *, struct mbuf *, struct altq_pktattr *);
192 static struct mbuf *rio_dequeue(struct ifaltq *, int);
193 static int rio_request(struct ifaltq *, int, void *);
194 static int rio_detach(rio_queue_t *);
195
196 /*
197 * rio device interface
198 */
199 altqdev_decl(rio);
200
201 #endif /* ALTQ3_COMPAT */
202
203 rio_t *
204 rio_alloc(int weight, struct redparams *params, int flags, int pkttime)
205 {
206 rio_t *rp;
207 int w, i;
208 int npkts_per_sec;
209
210 rp = malloc(sizeof(rio_t), M_DEVBUF, M_WAITOK|M_ZERO);
211 if (rp == NULL)
212 return (NULL);
213
214 rp->rio_flags = flags;
215 if (pkttime == 0)
216 /* default packet time: 1000 bytes / 10Mbps * 8 * 1000000 */
217 rp->rio_pkttime = 800;
218 else
219 rp->rio_pkttime = pkttime;
220
221 if (weight != 0)
222 rp->rio_weight = weight;
223 else {
224 /* use default */
225 rp->rio_weight = W_WEIGHT;
226
227 /* when the link is very slow, adjust red parameters */
228 npkts_per_sec = 1000000 / rp->rio_pkttime;
229 if (npkts_per_sec < 50) {
230 /* up to about 400Kbps */
231 rp->rio_weight = W_WEIGHT_2;
232 } else if (npkts_per_sec < 300) {
233 /* up to about 2.4Mbps */
234 rp->rio_weight = W_WEIGHT_1;
235 }
236 }
237
238 /* calculate wshift. weight must be power of 2 */
239 w = rp->rio_weight;
240 for (i = 0; w > 1; i++)
241 w = w >> 1;
242 rp->rio_wshift = i;
243 w = 1 << rp->rio_wshift;
244 if (w != rp->rio_weight) {
245 printf("invalid weight value %d for red! use %d\n",
246 rp->rio_weight, w);
247 rp->rio_weight = w;
248 }
249
250 /* allocate weight table */
251 rp->rio_wtab = wtab_alloc(rp->rio_weight);
252
253 for (i = 0; i < RIO_NDROPPREC; i++) {
254 struct dropprec_state *prec = &rp->rio_precstate[i];
255
256 prec->avg = 0;
257 prec->idle = 1;
258
259 if (params == NULL || params[i].inv_pmax == 0)
260 prec->inv_pmax = default_rio_params[i].inv_pmax;
261 else
262 prec->inv_pmax = params[i].inv_pmax;
263 if (params == NULL || params[i].th_min == 0)
264 prec->th_min = default_rio_params[i].th_min;
265 else
266 prec->th_min = params[i].th_min;
267 if (params == NULL || params[i].th_max == 0)
268 prec->th_max = default_rio_params[i].th_max;
269 else
270 prec->th_max = params[i].th_max;
271
272 /*
273 * th_min_s and th_max_s are scaled versions of th_min
274 * and th_max to be compared with avg.
275 */
276 prec->th_min_s = prec->th_min << (rp->rio_wshift + FP_SHIFT);
277 prec->th_max_s = prec->th_max << (rp->rio_wshift + FP_SHIFT);
278
279 /*
280 * precompute probability denominator
281 * probd = (2 * (TH_MAX-TH_MIN) / pmax) in fixed-point
282 */
283 prec->probd = (2 * (prec->th_max - prec->th_min)
284 * prec->inv_pmax) << FP_SHIFT;
285
286 microtime(&prec->last);
287 }
288
289 return (rp);
290 }
291
292 void
293 rio_destroy(rio_t *rp)
294 {
295 wtab_destroy(rp->rio_wtab);
296 free(rp, M_DEVBUF);
297 }
298
299 void
300 rio_getstats(rio_t *rp, struct redstats *sp)
301 {
302 int i;
303
304 for (i = 0; i < RIO_NDROPPREC; i++) {
305 bcopy(&rp->q_stats[i], sp, sizeof(struct redstats));
306 sp->q_avg = rp->rio_precstate[i].avg >> rp->rio_wshift;
307 sp++;
308 }
309 }
310
311 #if (RIO_NDROPPREC == 3)
312 /*
313 * internally, a drop precedence value is converted to an index
314 * starting from 0.
315 */
316 static int
317 dscp2index(u_int8_t dscp)
318 {
319 int dpindex = dscp & AF_DROPPRECMASK;
320
321 if (dpindex == 0)
322 return (0);
323 return ((dpindex >> 3) - 1);
324 }
325 #endif
326
327 #if 1
328 /*
329 * kludge: when a packet is dequeued, we need to know its drop precedence
330 * in order to keep the queue length of each drop precedence.
331 * use m_pkthdr.rcvif to pass this info.
332 */
333 #define RIOM_SET_PRECINDEX(m, idx) \
334 do { (m)->m_pkthdr.rcvif = (struct ifnet *)((long)(idx)); } while (0)
335 #define RIOM_GET_PRECINDEX(m) \
336 ({ long idx; idx = (long)((m)->m_pkthdr.rcvif); \
337 (m)->m_pkthdr.rcvif = NULL; idx; })
338 #endif
339
340 int
341 rio_addq(rio_t *rp, class_queue_t *q, struct mbuf *m,
342 struct altq_pktattr *pktattr)
343 {
344 int avg, droptype;
345 u_int8_t dsfield, odsfield;
346 int dpindex, i, n, t;
347 struct timeval now;
348 struct dropprec_state *prec;
349
350 dsfield = odsfield = read_dsfield(m, pktattr);
351 dpindex = dscp2index(dsfield);
352
353 /*
354 * update avg of the precedence states whose drop precedence
355 * is larger than or equal to the drop precedence of the packet
356 */
357 now.tv_sec = 0;
358 for (i = dpindex; i < RIO_NDROPPREC; i++) {
359 prec = &rp->rio_precstate[i];
360 avg = prec->avg;
361 if (prec->idle) {
362 prec->idle = 0;
363 if (now.tv_sec == 0)
364 microtime(&now);
365 t = (now.tv_sec - prec->last.tv_sec);
366 if (t > 60)
367 avg = 0;
368 else {
369 t = t * 1000000 +
370 (now.tv_usec - prec->last.tv_usec);
371 n = t / rp->rio_pkttime;
372 /* calculate (avg = (1 - Wq)^n * avg) */
373 if (n > 0)
374 avg = (avg >> FP_SHIFT) *
375 pow_w(rp->rio_wtab, n);
376 }
377 }
378
379 /* run estimator. (avg is scaled by WEIGHT in fixed-point) */
380 avg += (prec->qlen << FP_SHIFT) - (avg >> rp->rio_wshift);
381 prec->avg = avg; /* save the new value */
382 /*
383 * count keeps a tally of arriving traffic that has not
384 * been dropped.
385 */
386 prec->count++;
387 }
388
389 prec = &rp->rio_precstate[dpindex];
390 avg = prec->avg;
391
392 /* see if we drop early */
393 droptype = DTYPE_NODROP;
394 if (avg >= prec->th_min_s && prec->qlen > 1) {
395 if (avg >= prec->th_max_s) {
396 /* avg >= th_max: forced drop */
397 droptype = DTYPE_FORCED;
398 } else if (prec->old == 0) {
399 /* first exceeds th_min */
400 prec->count = 1;
401 prec->old = 1;
402 } else if (drop_early((avg - prec->th_min_s) >> rp->rio_wshift,
403 prec->probd, prec->count)) {
404 /* unforced drop by red */
405 droptype = DTYPE_EARLY;
406 }
407 } else {
408 /* avg < th_min */
409 prec->old = 0;
410 }
411
412 /*
413 * if the queue length hits the hard limit, it's a forced drop.
414 */
415 if (droptype == DTYPE_NODROP && qlen(q) >= qlimit(q))
416 droptype = DTYPE_FORCED;
417
418 if (droptype != DTYPE_NODROP) {
419 /* always drop incoming packet (as opposed to randomdrop) */
420 for (i = dpindex; i < RIO_NDROPPREC; i++)
421 rp->rio_precstate[i].count = 0;
422 #ifdef RIO_STATS
423 if (droptype == DTYPE_EARLY)
424 rp->q_stats[dpindex].drop_unforced++;
425 else
426 rp->q_stats[dpindex].drop_forced++;
427 PKTCNTR_ADD(&rp->q_stats[dpindex].drop_cnt, m_pktlen(m));
428 #endif
429 m_freem(m);
430 return (-1);
431 }
432
433 for (i = dpindex; i < RIO_NDROPPREC; i++)
434 rp->rio_precstate[i].qlen++;
435
436 /* save drop precedence index in mbuf hdr */
437 RIOM_SET_PRECINDEX(m, dpindex);
438
439 if (rp->rio_flags & RIOF_CLEARDSCP)
440 dsfield &= ~DSCP_MASK;
441
442 if (dsfield != odsfield)
443 write_dsfield(m, pktattr, dsfield);
444
445 _addq(q, m);
446
447 #ifdef RIO_STATS
448 PKTCNTR_ADD(&rp->q_stats[dpindex].xmit_cnt, m_pktlen(m));
449 #endif
450 return (0);
451 }
452
453 struct mbuf *
454 rio_getq(rio_t *rp, class_queue_t *q)
455 {
456 struct mbuf *m;
457 int dpindex, i;
458
459 if ((m = _getq(q)) == NULL)
460 return NULL;
461
462 dpindex = RIOM_GET_PRECINDEX(m);
463 for (i = dpindex; i < RIO_NDROPPREC; i++) {
464 if (--rp->rio_precstate[i].qlen == 0) {
465 if (rp->rio_precstate[i].idle == 0) {
466 rp->rio_precstate[i].idle = 1;
467 microtime(&rp->rio_precstate[i].last);
468 }
469 }
470 }
471 return (m);
472 }
473
474 #ifdef ALTQ3_COMPAT
475 int
476 rioopen(dev_t dev __unused, int flag __unused, int fmt __unused,
477 struct lwp *l __unused)
478 {
479 /* everything will be done when the queueing scheme is attached. */
480 return 0;
481 }
482
483 int
484 rioclose(dev_t dev __unused, int flag __unused, int fmt __unused,
485 struct lwp *l __unused)
486 {
487 rio_queue_t *rqp;
488 int err, error = 0;
489
490 while ((rqp = rio_list) != NULL) {
491 /* destroy all */
492 err = rio_detach(rqp);
493 if (err != 0 && error == 0)
494 error = err;
495 }
496
497 return error;
498 }
499
500 int
501 rioioctl(dev_t dev __unused, ioctlcmd_t cmd, caddr_t addr, int flag __unused,
502 struct lwp *l)
503 {
504 rio_queue_t *rqp;
505 struct rio_interface *ifacep;
506 struct ifnet *ifp;
507 int error = 0;
508
509 /* check super-user privilege */
510 switch (cmd) {
511 case RIO_GETSTATS:
512 break;
513 default:
514 #if (__FreeBSD_version > 400000)
515 if ((error = suser(p)) != 0)
516 return (error);
517 #else
518 if ((error = kauth_authorize_generic(l->l_cred,
519 KAUTH_GENERIC_ISSUSER, &l->l_acflag)) != 0)
520 return (error);
521 #endif
522 break;
523 }
524
525 switch (cmd) {
526
527 case RIO_ENABLE:
528 ifacep = (struct rio_interface *)addr;
529 if ((rqp = altq_lookup(ifacep->rio_ifname, ALTQT_RIO)) == NULL) {
530 error = EBADF;
531 break;
532 }
533 error = altq_enable(rqp->rq_ifq);
534 break;
535
536 case RIO_DISABLE:
537 ifacep = (struct rio_interface *)addr;
538 if ((rqp = altq_lookup(ifacep->rio_ifname, ALTQT_RIO)) == NULL) {
539 error = EBADF;
540 break;
541 }
542 error = altq_disable(rqp->rq_ifq);
543 break;
544
545 case RIO_IF_ATTACH:
546 ifp = ifunit(((struct rio_interface *)addr)->rio_ifname);
547 if (ifp == NULL) {
548 error = ENXIO;
549 break;
550 }
551
552 /* allocate and initialize rio_queue_t */
553 rqp = malloc(sizeof(rio_queue_t), M_DEVBUF, M_WAITOK|M_ZERO);
554 if (rqp == NULL) {
555 error = ENOMEM;
556 break;
557 }
558
559 rqp->rq_q = malloc(sizeof(class_queue_t), M_DEVBUF,
560 M_WAITOK|M_ZERO);
561 if (rqp->rq_q == NULL) {
562 free(rqp, M_DEVBUF);
563 error = ENOMEM;
564 break;
565 }
566
567 rqp->rq_rio = rio_alloc(0, NULL, 0, 0);
568 if (rqp->rq_rio == NULL) {
569 free(rqp->rq_q, M_DEVBUF);
570 free(rqp, M_DEVBUF);
571 error = ENOMEM;
572 break;
573 }
574
575 rqp->rq_ifq = &ifp->if_snd;
576 qtail(rqp->rq_q) = NULL;
577 qlen(rqp->rq_q) = 0;
578 qlimit(rqp->rq_q) = RIO_LIMIT;
579 qtype(rqp->rq_q) = Q_RIO;
580
581 /*
582 * set RIO to this ifnet structure.
583 */
584 error = altq_attach(rqp->rq_ifq, ALTQT_RIO, rqp,
585 rio_enqueue, rio_dequeue, rio_request,
586 NULL, NULL);
587 if (error) {
588 rio_destroy(rqp->rq_rio);
589 free(rqp->rq_q, M_DEVBUF);
590 free(rqp, M_DEVBUF);
591 break;
592 }
593
594 /* add this state to the rio list */
595 rqp->rq_next = rio_list;
596 rio_list = rqp;
597 break;
598
599 case RIO_IF_DETACH:
600 ifacep = (struct rio_interface *)addr;
601 if ((rqp = altq_lookup(ifacep->rio_ifname, ALTQT_RIO)) == NULL) {
602 error = EBADF;
603 break;
604 }
605 error = rio_detach(rqp);
606 break;
607
608 case RIO_GETSTATS:
609 do {
610 struct rio_stats *q_stats;
611 rio_t *rp;
612 int i;
613
614 q_stats = (struct rio_stats *)addr;
615 if ((rqp = altq_lookup(q_stats->iface.rio_ifname,
616 ALTQT_RIO)) == NULL) {
617 error = EBADF;
618 break;
619 }
620
621 rp = rqp->rq_rio;
622
623 q_stats->q_limit = qlimit(rqp->rq_q);
624 q_stats->weight = rp->rio_weight;
625 q_stats->flags = rp->rio_flags;
626
627 for (i = 0; i < RIO_NDROPPREC; i++) {
628 q_stats->q_len[i] = rp->rio_precstate[i].qlen;
629 bcopy(&rp->q_stats[i], &q_stats->q_stats[i],
630 sizeof(struct redstats));
631 q_stats->q_stats[i].q_avg =
632 rp->rio_precstate[i].avg >> rp->rio_wshift;
633
634 q_stats->q_params[i].inv_pmax
635 = rp->rio_precstate[i].inv_pmax;
636 q_stats->q_params[i].th_min
637 = rp->rio_precstate[i].th_min;
638 q_stats->q_params[i].th_max
639 = rp->rio_precstate[i].th_max;
640 }
641 } while (/*CONSTCOND*/ 0);
642 break;
643
644 case RIO_CONFIG:
645 do {
646 struct rio_conf *fc;
647 rio_t *new;
648 int s, limit, i;
649
650 fc = (struct rio_conf *)addr;
651 if ((rqp = altq_lookup(fc->iface.rio_ifname,
652 ALTQT_RIO)) == NULL) {
653 error = EBADF;
654 break;
655 }
656
657 new = rio_alloc(fc->rio_weight, &fc->q_params[0],
658 fc->rio_flags, fc->rio_pkttime);
659 if (new == NULL) {
660 error = ENOMEM;
661 break;
662 }
663
664 s = splnet();
665 _flushq(rqp->rq_q);
666 limit = fc->rio_limit;
667 if (limit < fc->q_params[RIO_NDROPPREC-1].th_max)
668 limit = fc->q_params[RIO_NDROPPREC-1].th_max;
669 qlimit(rqp->rq_q) = limit;
670
671 rio_destroy(rqp->rq_rio);
672 rqp->rq_rio = new;
673
674 splx(s);
675
676 /* write back new values */
677 fc->rio_limit = limit;
678 for (i = 0; i < RIO_NDROPPREC; i++) {
679 fc->q_params[i].inv_pmax =
680 rqp->rq_rio->rio_precstate[i].inv_pmax;
681 fc->q_params[i].th_min =
682 rqp->rq_rio->rio_precstate[i].th_min;
683 fc->q_params[i].th_max =
684 rqp->rq_rio->rio_precstate[i].th_max;
685 }
686 } while (/*CONSTCOND*/ 0);
687 break;
688
689 case RIO_SETDEFAULTS:
690 do {
691 struct redparams *rp;
692 int i;
693
694 rp = (struct redparams *)addr;
695 for (i = 0; i < RIO_NDROPPREC; i++)
696 default_rio_params[i] = rp[i];
697 } while (/*CONSTCOND*/ 0);
698 break;
699
700 default:
701 error = EINVAL;
702 break;
703 }
704
705 return error;
706 }
707
708 static int
709 rio_detach(rio_queue_t *rqp)
710 {
711 rio_queue_t *tmp;
712 int error = 0;
713
714 if (ALTQ_IS_ENABLED(rqp->rq_ifq))
715 altq_disable(rqp->rq_ifq);
716
717 if ((error = altq_detach(rqp->rq_ifq)))
718 return (error);
719
720 if (rio_list == rqp)
721 rio_list = rqp->rq_next;
722 else {
723 for (tmp = rio_list; tmp != NULL; tmp = tmp->rq_next)
724 if (tmp->rq_next == rqp) {
725 tmp->rq_next = rqp->rq_next;
726 break;
727 }
728 if (tmp == NULL)
729 printf("rio_detach: no state found in rio_list!\n");
730 }
731
732 rio_destroy(rqp->rq_rio);
733 free(rqp->rq_q, M_DEVBUF);
734 free(rqp, M_DEVBUF);
735 return (error);
736 }
737
738 /*
739 * rio support routines
740 */
741 static int
742 rio_request(struct ifaltq *ifq, int req, void *arg __unused)
743 {
744 rio_queue_t *rqp = (rio_queue_t *)ifq->altq_disc;
745
746 switch (req) {
747 case ALTRQ_PURGE:
748 _flushq(rqp->rq_q);
749 if (ALTQ_IS_ENABLED(ifq))
750 ifq->ifq_len = 0;
751 break;
752 }
753 return (0);
754 }
755
756 /*
757 * enqueue routine:
758 *
759 * returns: 0 when successfully queued.
760 * ENOBUFS when drop occurs.
761 */
762 static int
763 rio_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr)
764 {
765 rio_queue_t *rqp = (rio_queue_t *)ifq->altq_disc;
766 int error = 0;
767
768 if (rio_addq(rqp->rq_rio, rqp->rq_q, m, pktattr) == 0)
769 ifq->ifq_len++;
770 else
771 error = ENOBUFS;
772 return error;
773 }
774
775 /*
776 * dequeue routine:
777 * must be called in splnet.
778 *
779 * returns: mbuf dequeued.
780 * NULL when no packet is available in the queue.
781 */
782
783 static struct mbuf *
784 rio_dequeue(struct ifaltq *ifq, int op)
785 {
786 rio_queue_t *rqp = (rio_queue_t *)ifq->altq_disc;
787 struct mbuf *m = NULL;
788
789 if (op == ALTDQ_POLL)
790 return qhead(rqp->rq_q);
791
792 m = rio_getq(rqp->rq_rio, rqp->rq_q);
793 if (m != NULL)
794 ifq->ifq_len--;
795 return m;
796 }
797
798 #ifdef KLD_MODULE
799
800 static struct altqsw rio_sw =
801 {"rio", rioopen, rioclose, rioioctl};
802
803 ALTQ_MODULE(altq_rio, ALTQT_RIO, &rio_sw);
804 MODULE_VERSION(altq_rio, 1);
805 MODULE_DEPEND(altq_rio, altq_red, 1, 1, 1);
806
807 #endif /* KLD_MODULE */
808 #endif /* ALTQ3_COMPAT */
809
810 #endif /* ALTQ_RIO */
811