altq_rio.c revision 1.13 1 /* $NetBSD: altq_rio.c,v 1.13 2006/10/12 01:30:42 christos Exp $ */
2 /* $KAME: altq_rio.c,v 1.8 2000/12/14 08:12:46 thorpej Exp $ */
3
4 /*
5 * Copyright (C) 1998-2000
6 * Sony Computer Science Laboratories Inc. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29 /*
30 * Copyright (c) 1990-1994 Regents of the University of California.
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the Computer Systems
44 * Engineering Group at Lawrence Berkeley Laboratory.
45 * 4. Neither the name of the University nor of the Laboratory may be used
46 * to endorse or promote products derived from this software without
47 * specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 */
61
62 #include <sys/cdefs.h>
63 __KERNEL_RCSID(0, "$NetBSD: altq_rio.c,v 1.13 2006/10/12 01:30:42 christos Exp $");
64
65 #if defined(__FreeBSD__) || defined(__NetBSD__)
66 #include "opt_altq.h"
67 #if (__FreeBSD__ != 2)
68 #include "opt_inet.h"
69 #ifdef __FreeBSD__
70 #include "opt_inet6.h"
71 #endif
72 #endif
73 #endif /* __FreeBSD__ || __NetBSD__ */
74 #ifdef ALTQ_RIO /* rio is enabled by ALTQ_RIO option in opt_altq.h */
75
76 #include <sys/param.h>
77 #include <sys/malloc.h>
78 #include <sys/mbuf.h>
79 #include <sys/socket.h>
80 #include <sys/sockio.h>
81 #include <sys/systm.h>
82 #include <sys/proc.h>
83 #include <sys/errno.h>
84 #include <sys/kernel.h>
85 #include <sys/kauth.h>
86
87 #include <net/if.h>
88 #include <net/if_types.h>
89
90 #include <netinet/in.h>
91 #include <netinet/in_systm.h>
92 #include <netinet/ip.h>
93 #ifdef INET6
94 #include <netinet/ip6.h>
95 #endif
96
97 #include <altq/altq.h>
98 #include <altq/altq_conf.h>
99 #include <altq/altq_cdnr.h>
100 #include <altq/altq_red.h>
101 #include <altq/altq_rio.h>
102
103 /*
104 * RIO: RED with IN/OUT bit
105 * described in
106 * "Explicit Allocation of Best Effort Packet Delivery Service"
107 * David D. Clark and Wenjia Fang, MIT Lab for Computer Science
108 * http://diffserv.lcs.mit.edu/Papers/exp-alloc-ddc-wf.{ps,pdf}
109 *
110 * this implementation is extended to support more than 2 drop precedence
111 * values as described in RFC2597 (Assured Forwarding PHB Group).
112 *
113 */
114 /*
115 * AF DS (differentiated service) codepoints.
116 * (classes can be mapped to CBQ or H-FSC classes.)
117 *
118 * 0 1 2 3 4 5 6 7
119 * +---+---+---+---+---+---+---+---+
120 * | CLASS |DropPre| 0 | CU |
121 * +---+---+---+---+---+---+---+---+
122 *
123 * class 1: 001
124 * class 2: 010
125 * class 3: 011
126 * class 4: 100
127 *
128 * low drop prec: 01
129 * medium drop prec: 10
130 * high drop prec: 01
131 */
132
133 /* normal red parameters */
134 #define W_WEIGHT 512 /* inverse of weight of EWMA (511/512) */
135 /* q_weight = 0.00195 */
136
137 /* red parameters for a slow link */
138 #define W_WEIGHT_1 128 /* inverse of weight of EWMA (127/128) */
139 /* q_weight = 0.0078125 */
140
141 /* red parameters for a very slow link (e.g., dialup) */
142 #define W_WEIGHT_2 64 /* inverse of weight of EWMA (63/64) */
143 /* q_weight = 0.015625 */
144
145 /* fixed-point uses 12-bit decimal places */
146 #define FP_SHIFT 12 /* fixed-point shift */
147
148 /* red parameters for drop probability */
149 #define INV_P_MAX 10 /* inverse of max drop probability */
150 #define TH_MIN 5 /* min threshold */
151 #define TH_MAX 15 /* max threshold */
152
153 #define RIO_LIMIT 60 /* default max queue length */
154
155 #define TV_DELTA(a, b, delta) { \
156 register int xxs; \
157 \
158 delta = (a)->tv_usec - (b)->tv_usec; \
159 if ((xxs = (a)->tv_sec - (b)->tv_sec) != 0) { \
160 if (xxs < 0) { \
161 printf("rm_class: bogus time values"); \
162 delta = 60000000; \
163 } else if (xxs > 4) { \
164 if (xxs > 60) \
165 delta = 60000000; \
166 else \
167 delta += xxs * 1000000; \
168 } else while (xxs > 0) { \
169 delta += 1000000; \
170 xxs--; \
171 } \
172 } \
173 }
174
175 /* rio_list keeps all rio_queue_t's allocated. */
176 static rio_queue_t *rio_list = NULL;
177 /* default rio parameter values */
178 static struct redparams default_rio_params[RIO_NDROPPREC] = {
179 /* th_min, th_max, inv_pmax */
180 { TH_MAX * 2 + TH_MIN, TH_MAX * 3, INV_P_MAX }, /* low drop precedence */
181 { TH_MAX + TH_MIN, TH_MAX * 2, INV_P_MAX }, /* medium drop precedence */
182 { TH_MIN, TH_MAX, INV_P_MAX } /* high drop precedence */
183 };
184
185 /* internal function prototypes */
186 static int rio_enqueue __P((struct ifaltq *, struct mbuf *,
187 struct altq_pktattr *));
188 static struct mbuf *rio_dequeue __P((struct ifaltq *, int));
189 static int rio_request __P((struct ifaltq *, int, void *));
190 static int rio_detach __P((rio_queue_t *));
191 static int dscp2index __P((u_int8_t));
192
193 /*
194 * rio device interface
195 */
196 altqdev_decl(rio);
197
198 int
199 rioopen(dev_t dev __unused, int flag __unused, int fmt __unused,
200 struct lwp *l __unused)
201 {
202 /* everything will be done when the queueing scheme is attached. */
203 return 0;
204 }
205
206 int
207 rioclose(dev_t dev __unused, int flag __unused, int fmt __unused,
208 struct lwp *l __unused)
209 {
210 rio_queue_t *rqp;
211 int err, error = 0;
212
213 while ((rqp = rio_list) != NULL) {
214 /* destroy all */
215 err = rio_detach(rqp);
216 if (err != 0 && error == 0)
217 error = err;
218 }
219
220 return error;
221 }
222
223 int
224 rioioctl(dev_t dev __unused, ioctlcmd_t cmd, caddr_t addr, int flag __unused,
225 struct lwp *l)
226 {
227 rio_queue_t *rqp;
228 struct rio_interface *ifacep;
229 struct ifnet *ifp;
230 int error = 0;
231
232 /* check super-user privilege */
233 switch (cmd) {
234 case RIO_GETSTATS:
235 break;
236 default:
237 #if (__FreeBSD_version > 400000)
238 if ((error = suser(p)) != 0)
239 return (error);
240 #else
241 if ((error = kauth_authorize_generic(l->l_cred,
242 KAUTH_GENERIC_ISSUSER, &l->l_acflag)) != 0)
243 return (error);
244 #endif
245 break;
246 }
247
248 switch (cmd) {
249
250 case RIO_ENABLE:
251 ifacep = (struct rio_interface *)addr;
252 if ((rqp = altq_lookup(ifacep->rio_ifname, ALTQT_RIO)) == NULL) {
253 error = EBADF;
254 break;
255 }
256 error = altq_enable(rqp->rq_ifq);
257 break;
258
259 case RIO_DISABLE:
260 ifacep = (struct rio_interface *)addr;
261 if ((rqp = altq_lookup(ifacep->rio_ifname, ALTQT_RIO)) == NULL) {
262 error = EBADF;
263 break;
264 }
265 error = altq_disable(rqp->rq_ifq);
266 break;
267
268 case RIO_IF_ATTACH:
269 ifp = ifunit(((struct rio_interface *)addr)->rio_ifname);
270 if (ifp == NULL) {
271 error = ENXIO;
272 break;
273 }
274
275 /* allocate and initialize rio_queue_t */
276 rqp = malloc(sizeof(rio_queue_t), M_DEVBUF, M_WAITOK|M_ZERO);
277 if (rqp == NULL) {
278 error = ENOMEM;
279 break;
280 }
281
282 rqp->rq_q = malloc(sizeof(class_queue_t), M_DEVBUF,
283 M_WAITOK|M_ZERO);
284 if (rqp->rq_q == NULL) {
285 free(rqp, M_DEVBUF);
286 error = ENOMEM;
287 break;
288 }
289
290 rqp->rq_rio = rio_alloc(0, NULL, 0, 0);
291 if (rqp->rq_rio == NULL) {
292 free(rqp->rq_q, M_DEVBUF);
293 free(rqp, M_DEVBUF);
294 error = ENOMEM;
295 break;
296 }
297
298 rqp->rq_ifq = &ifp->if_snd;
299 qtail(rqp->rq_q) = NULL;
300 qlen(rqp->rq_q) = 0;
301 qlimit(rqp->rq_q) = RIO_LIMIT;
302 qtype(rqp->rq_q) = Q_RIO;
303
304 /*
305 * set RIO to this ifnet structure.
306 */
307 error = altq_attach(rqp->rq_ifq, ALTQT_RIO, rqp,
308 rio_enqueue, rio_dequeue, rio_request,
309 NULL, NULL);
310 if (error) {
311 rio_destroy(rqp->rq_rio);
312 free(rqp->rq_q, M_DEVBUF);
313 free(rqp, M_DEVBUF);
314 break;
315 }
316
317 /* add this state to the rio list */
318 rqp->rq_next = rio_list;
319 rio_list = rqp;
320 break;
321
322 case RIO_IF_DETACH:
323 ifacep = (struct rio_interface *)addr;
324 if ((rqp = altq_lookup(ifacep->rio_ifname, ALTQT_RIO)) == NULL) {
325 error = EBADF;
326 break;
327 }
328 error = rio_detach(rqp);
329 break;
330
331 case RIO_GETSTATS:
332 do {
333 struct rio_stats *q_stats;
334 rio_t *rp;
335 int i;
336
337 q_stats = (struct rio_stats *)addr;
338 if ((rqp = altq_lookup(q_stats->iface.rio_ifname,
339 ALTQT_RIO)) == NULL) {
340 error = EBADF;
341 break;
342 }
343
344 rp = rqp->rq_rio;
345
346 q_stats->q_limit = qlimit(rqp->rq_q);
347 q_stats->weight = rp->rio_weight;
348 q_stats->flags = rp->rio_flags;
349
350 for (i = 0; i < RIO_NDROPPREC; i++) {
351 q_stats->q_len[i] = rp->rio_precstate[i].qlen;
352 (void)memcpy(&q_stats->q_stats[i],
353 &rp->q_stats[i], sizeof(struct redstats));
354 q_stats->q_stats[i].q_avg =
355 rp->rio_precstate[i].avg >> rp->rio_wshift;
356
357 q_stats->q_params[i].inv_pmax
358 = rp->rio_precstate[i].inv_pmax;
359 q_stats->q_params[i].th_min
360 = rp->rio_precstate[i].th_min;
361 q_stats->q_params[i].th_max
362 = rp->rio_precstate[i].th_max;
363 }
364 } while (0);
365 break;
366
367 case RIO_CONFIG:
368 do {
369 struct rio_conf *fc;
370 rio_t *new;
371 int s, limit, i;
372
373 fc = (struct rio_conf *)addr;
374 if ((rqp = altq_lookup(fc->iface.rio_ifname,
375 ALTQT_RIO)) == NULL) {
376 error = EBADF;
377 break;
378 }
379
380 new = rio_alloc(fc->rio_weight, &fc->q_params[0],
381 fc->rio_flags, fc->rio_pkttime);
382 if (new == NULL) {
383 error = ENOMEM;
384 break;
385 }
386
387 s = splnet();
388 _flushq(rqp->rq_q);
389 limit = fc->rio_limit;
390 if (limit < fc->q_params[RIO_NDROPPREC-1].th_max)
391 limit = fc->q_params[RIO_NDROPPREC-1].th_max;
392 qlimit(rqp->rq_q) = limit;
393
394 rio_destroy(rqp->rq_rio);
395 rqp->rq_rio = new;
396
397 splx(s);
398
399 /* write back new values */
400 fc->rio_limit = limit;
401 for (i = 0; i < RIO_NDROPPREC; i++) {
402 fc->q_params[i].inv_pmax =
403 rqp->rq_rio->rio_precstate[i].inv_pmax;
404 fc->q_params[i].th_min =
405 rqp->rq_rio->rio_precstate[i].th_min;
406 fc->q_params[i].th_max =
407 rqp->rq_rio->rio_precstate[i].th_max;
408 }
409 } while (0);
410 break;
411
412 case RIO_SETDEFAULTS:
413 do {
414 struct redparams *rp;
415 int i;
416
417 rp = (struct redparams *)addr;
418 for (i = 0; i < RIO_NDROPPREC; i++)
419 default_rio_params[i] = rp[i];
420 } while (0);
421 break;
422
423 default:
424 error = EINVAL;
425 break;
426 }
427
428 return error;
429 }
430
431 static int
432 rio_detach(rqp)
433 rio_queue_t *rqp;
434 {
435 rio_queue_t *tmp;
436 int error = 0;
437
438 if (ALTQ_IS_ENABLED(rqp->rq_ifq))
439 altq_disable(rqp->rq_ifq);
440
441 if ((error = altq_detach(rqp->rq_ifq)))
442 return (error);
443
444 if (rio_list == rqp)
445 rio_list = rqp->rq_next;
446 else {
447 for (tmp = rio_list; tmp != NULL; tmp = tmp->rq_next)
448 if (tmp->rq_next == rqp) {
449 tmp->rq_next = rqp->rq_next;
450 break;
451 }
452 if (tmp == NULL)
453 printf("rio_detach: no state found in rio_list!\n");
454 }
455
456 rio_destroy(rqp->rq_rio);
457 free(rqp->rq_q, M_DEVBUF);
458 free(rqp, M_DEVBUF);
459 return (error);
460 }
461
462 /*
463 * rio support routines
464 */
465 static int
466 rio_request(struct ifaltq *ifq, int req, void *arg __unused)
467 {
468 rio_queue_t *rqp = (rio_queue_t *)ifq->altq_disc;
469
470 switch (req) {
471 case ALTRQ_PURGE:
472 _flushq(rqp->rq_q);
473 if (ALTQ_IS_ENABLED(ifq))
474 ifq->ifq_len = 0;
475 break;
476 }
477 return (0);
478 }
479
480
481 rio_t *
482 rio_alloc(weight, params, flags, pkttime)
483 int weight;
484 struct redparams *params;
485 int flags, pkttime;
486 {
487 rio_t *rp;
488 int w, i;
489 int npkts_per_sec;
490
491 rp = malloc(sizeof(rio_t), M_DEVBUF, M_WAITOK|M_ZERO);
492 if (rp == NULL)
493 return (NULL);
494
495 rp->rio_flags = flags;
496 if (pkttime == 0)
497 /* default packet time: 1000 bytes / 10Mbps * 8 * 1000000 */
498 rp->rio_pkttime = 800;
499 else
500 rp->rio_pkttime = pkttime;
501
502 if (weight != 0)
503 rp->rio_weight = weight;
504 else {
505 /* use derfault */
506 rp->rio_weight = W_WEIGHT;
507
508 /* when the link is very slow, adjust red parameters */
509 npkts_per_sec = 1000000 / rp->rio_pkttime;
510 if (npkts_per_sec < 50) {
511 /* up to about 400Kbps */
512 rp->rio_weight = W_WEIGHT_2;
513 } else if (npkts_per_sec < 300) {
514 /* up to about 2.4Mbps */
515 rp->rio_weight = W_WEIGHT_1;
516 }
517 }
518
519 /* calculate wshift. weight must be power of 2 */
520 w = rp->rio_weight;
521 for (i = 0; w > 1; i++)
522 w = w >> 1;
523 rp->rio_wshift = i;
524 w = 1 << rp->rio_wshift;
525 if (w != rp->rio_weight) {
526 printf("invalid weight value %d for red! use %d\n",
527 rp->rio_weight, w);
528 rp->rio_weight = w;
529 }
530
531 /* allocate weight table */
532 rp->rio_wtab = wtab_alloc(rp->rio_weight);
533
534 for (i = 0; i < RIO_NDROPPREC; i++) {
535 struct dropprec_state *prec = &rp->rio_precstate[i];
536
537 prec->avg = 0;
538 prec->idle = 1;
539
540 if (params == NULL || params[i].inv_pmax == 0)
541 prec->inv_pmax = default_rio_params[i].inv_pmax;
542 else
543 prec->inv_pmax = params[i].inv_pmax;
544 if (params == NULL || params[i].th_min == 0)
545 prec->th_min = default_rio_params[i].th_min;
546 else
547 prec->th_min = params[i].th_min;
548 if (params == NULL || params[i].th_max == 0)
549 prec->th_max = default_rio_params[i].th_max;
550 else
551 prec->th_max = params[i].th_max;
552
553 /*
554 * th_min_s and th_max_s are scaled versions of th_min
555 * and th_max to be compared with avg.
556 */
557 prec->th_min_s = prec->th_min << (rp->rio_wshift + FP_SHIFT);
558 prec->th_max_s = prec->th_max << (rp->rio_wshift + FP_SHIFT);
559
560 /*
561 * precompute probability denominator
562 * probd = (2 * (TH_MAX-TH_MIN) / pmax) in fixed-point
563 */
564 prec->probd = (2 * (prec->th_max - prec->th_min)
565 * prec->inv_pmax) << FP_SHIFT;
566
567 microtime(&prec->last);
568 }
569
570 return (rp);
571 }
572
573 void
574 rio_destroy(rp)
575 rio_t *rp;
576 {
577 wtab_destroy(rp->rio_wtab);
578 free(rp, M_DEVBUF);
579 }
580
581 void
582 rio_getstats(rp, sp)
583 rio_t *rp;
584 struct redstats *sp;
585 {
586 int i;
587
588 for (i = 0; i < RIO_NDROPPREC; i++) {
589 (void)memcpy(sp, &rp->q_stats[i], sizeof(struct redstats));
590 sp->q_avg = rp->rio_precstate[i].avg >> rp->rio_wshift;
591 sp++;
592 }
593 }
594
595 /*
596 * enqueue routine:
597 *
598 * returns: 0 when successfully queued.
599 * ENOBUFS when drop occurs.
600 */
601 static int
602 rio_enqueue(ifq, m, pktattr)
603 struct ifaltq *ifq;
604 struct mbuf *m;
605 struct altq_pktattr *pktattr;
606 {
607 rio_queue_t *rqp = (rio_queue_t *)ifq->altq_disc;
608 int error = 0;
609
610 if (rio_addq(rqp->rq_rio, rqp->rq_q, m, pktattr) == 0)
611 ifq->ifq_len++;
612 else
613 error = ENOBUFS;
614 return error;
615 }
616
617 #if (RIO_NDROPPREC == 3)
618 /*
619 * internally, a drop precedence value is converted to an index
620 * starting from 0.
621 */
622 static int
623 dscp2index(u_int8_t dscp)
624 {
625 int dpindex = dscp & AF_DROPPRECMASK;
626
627 if (dpindex == 0)
628 return (0);
629 return ((dpindex >> 3) - 1);
630 }
631 #endif
632
633 #if 1
634 /*
635 * kludge: when a packet is dequeued, we need to know its drop precedence
636 * in order to keep the queue length of each drop precedence.
637 * use m_pkthdr.rcvif to pass this info.
638 */
639 #define RIOM_SET_PRECINDEX(m, idx) \
640 do { (m)->m_pkthdr.rcvif = (struct ifnet *)((long)(idx)); } while (0)
641 #define RIOM_GET_PRECINDEX(m) \
642 ({ long idx; idx = (long)((m)->m_pkthdr.rcvif); \
643 (m)->m_pkthdr.rcvif = NULL; idx; })
644 #endif
645
646 int
647 rio_addq(rp, q, m, pktattr)
648 rio_t *rp;
649 class_queue_t *q;
650 struct mbuf *m;
651 struct altq_pktattr *pktattr;
652 {
653 int avg, droptype;
654 u_int8_t dsfield, odsfield;
655 int dpindex, i, n, t;
656 struct timeval now;
657 struct dropprec_state *prec;
658
659 dsfield = odsfield = read_dsfield(m, pktattr);
660 dpindex = dscp2index(dsfield);
661
662 /*
663 * update avg of the precedence states whose drop precedence
664 * is larger than or equal to the drop precedence of the packet
665 */
666 now.tv_sec = 0;
667 for (i = dpindex; i < RIO_NDROPPREC; i++) {
668 prec = &rp->rio_precstate[i];
669 avg = prec->avg;
670 if (prec->idle) {
671 prec->idle = 0;
672 if (now.tv_sec == 0)
673 microtime(&now);
674 t = (now.tv_sec - prec->last.tv_sec);
675 if (t > 60)
676 avg = 0;
677 else {
678 t = t * 1000000 +
679 (now.tv_usec - prec->last.tv_usec);
680 n = t / rp->rio_pkttime;
681 /* calculate (avg = (1 - Wq)^n * avg) */
682 if (n > 0)
683 avg = (avg >> FP_SHIFT) *
684 pow_w(rp->rio_wtab, n);
685 }
686 }
687
688 /* run estimator. (avg is scaled by WEIGHT in fixed-point) */
689 avg += (prec->qlen << FP_SHIFT) - (avg >> rp->rio_wshift);
690 prec->avg = avg; /* save the new value */
691 /*
692 * count keeps a tally of arriving traffic that has not
693 * been dropped.
694 */
695 prec->count++;
696 }
697
698 prec = &rp->rio_precstate[dpindex];
699 avg = prec->avg;
700
701 /* see if we drop early */
702 droptype = DTYPE_NODROP;
703 if (avg >= prec->th_min_s && prec->qlen > 1) {
704 if (avg >= prec->th_max_s) {
705 /* avg >= th_max: forced drop */
706 droptype = DTYPE_FORCED;
707 } else if (prec->old == 0) {
708 /* first exceeds th_min */
709 prec->count = 1;
710 prec->old = 1;
711 } else if (drop_early((avg - prec->th_min_s) >> rp->rio_wshift,
712 prec->probd, prec->count)) {
713 /* unforced drop by red */
714 droptype = DTYPE_EARLY;
715 }
716 } else {
717 /* avg < th_min */
718 prec->old = 0;
719 }
720
721 /*
722 * if the queue length hits the hard limit, it's a forced drop.
723 */
724 if (droptype == DTYPE_NODROP && qlen(q) >= qlimit(q))
725 droptype = DTYPE_FORCED;
726
727 if (droptype != DTYPE_NODROP) {
728 /* always drop incoming packet (as opposed to randomdrop) */
729 for (i = dpindex; i < RIO_NDROPPREC; i++)
730 rp->rio_precstate[i].count = 0;
731 #ifdef RIO_STATS
732 if (droptype == DTYPE_EARLY)
733 rp->q_stats[dpindex].drop_unforced++;
734 else
735 rp->q_stats[dpindex].drop_forced++;
736 PKTCNTR_ADD(&rp->q_stats[dpindex].drop_cnt, m_pktlen(m));
737 #endif
738 m_freem(m);
739 return (-1);
740 }
741
742 for (i = dpindex; i < RIO_NDROPPREC; i++)
743 rp->rio_precstate[i].qlen++;
744
745 /* save drop precedence index in mbuf hdr */
746 RIOM_SET_PRECINDEX(m, dpindex);
747
748 if (rp->rio_flags & RIOF_CLEARDSCP)
749 dsfield &= ~DSCP_MASK;
750
751 if (dsfield != odsfield)
752 write_dsfield(m, pktattr, dsfield);
753
754 _addq(q, m);
755
756 #ifdef RIO_STATS
757 PKTCNTR_ADD(&rp->q_stats[dpindex].xmit_cnt, m_pktlen(m));
758 #endif
759 return (0);
760 }
761
762 /*
763 * dequeue routine:
764 * must be called in splnet.
765 *
766 * returns: mbuf dequeued.
767 * NULL when no packet is available in the queue.
768 */
769
770 static struct mbuf *
771 rio_dequeue(ifq, op)
772 struct ifaltq *ifq;
773 int op;
774 {
775 rio_queue_t *rqp = (rio_queue_t *)ifq->altq_disc;
776 struct mbuf *m = NULL;
777
778 if (op == ALTDQ_POLL)
779 return qhead(rqp->rq_q);
780
781 m = rio_getq(rqp->rq_rio, rqp->rq_q);
782 if (m != NULL)
783 ifq->ifq_len--;
784 return m;
785 }
786
787 struct mbuf *
788 rio_getq(rp, q)
789 rio_t *rp;
790 class_queue_t *q;
791 {
792 struct mbuf *m;
793 int dpindex, i;
794
795 if ((m = _getq(q)) == NULL)
796 return NULL;
797
798 dpindex = RIOM_GET_PRECINDEX(m);
799 for (i = dpindex; i < RIO_NDROPPREC; i++) {
800 if (--rp->rio_precstate[i].qlen == 0) {
801 if (rp->rio_precstate[i].idle == 0) {
802 rp->rio_precstate[i].idle = 1;
803 microtime(&rp->rio_precstate[i].last);
804 }
805 }
806 }
807 return (m);
808 }
809
810 #ifdef KLD_MODULE
811
812 static struct altqsw rio_sw =
813 {"rio", rioopen, rioclose, rioioctl};
814
815 ALTQ_MODULE(altq_rio, ALTQT_RIO, &rio_sw);
816
817 #endif /* KLD_MODULE */
818
819 #endif /* ALTQ_RIO */
820