altq_rio.c revision 1.11 1 /* $NetBSD: altq_rio.c,v 1.11 2006/05/15 00:05:39 christos Exp $ */
2 /* $KAME: altq_rio.c,v 1.8 2000/12/14 08:12:46 thorpej Exp $ */
3
4 /*
5 * Copyright (C) 1998-2000
6 * Sony Computer Science Laboratories Inc. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29 /*
30 * Copyright (c) 1990-1994 Regents of the University of California.
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the Computer Systems
44 * Engineering Group at Lawrence Berkeley Laboratory.
45 * 4. Neither the name of the University nor of the Laboratory may be used
46 * to endorse or promote products derived from this software without
47 * specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 */
61
62 #include <sys/cdefs.h>
63 __KERNEL_RCSID(0, "$NetBSD: altq_rio.c,v 1.11 2006/05/15 00:05:39 christos Exp $");
64
65 #if defined(__FreeBSD__) || defined(__NetBSD__)
66 #include "opt_altq.h"
67 #if (__FreeBSD__ != 2)
68 #include "opt_inet.h"
69 #ifdef __FreeBSD__
70 #include "opt_inet6.h"
71 #endif
72 #endif
73 #endif /* __FreeBSD__ || __NetBSD__ */
74 #ifdef ALTQ_RIO /* rio is enabled by ALTQ_RIO option in opt_altq.h */
75
76 #include <sys/param.h>
77 #include <sys/malloc.h>
78 #include <sys/mbuf.h>
79 #include <sys/socket.h>
80 #include <sys/sockio.h>
81 #include <sys/systm.h>
82 #include <sys/proc.h>
83 #include <sys/errno.h>
84 #include <sys/kernel.h>
85 #include <sys/kauth.h>
86
87 #include <net/if.h>
88 #include <net/if_types.h>
89
90 #include <netinet/in.h>
91 #include <netinet/in_systm.h>
92 #include <netinet/ip.h>
93 #ifdef INET6
94 #include <netinet/ip6.h>
95 #endif
96
97 #include <altq/altq.h>
98 #include <altq/altq_conf.h>
99 #include <altq/altq_cdnr.h>
100 #include <altq/altq_red.h>
101 #include <altq/altq_rio.h>
102
103 /*
104 * RIO: RED with IN/OUT bit
105 * described in
106 * "Explicit Allocation of Best Effort Packet Delivery Service"
107 * David D. Clark and Wenjia Fang, MIT Lab for Computer Science
108 * http://diffserv.lcs.mit.edu/Papers/exp-alloc-ddc-wf.{ps,pdf}
109 *
110 * this implementation is extended to support more than 2 drop precedence
111 * values as described in RFC2597 (Assured Forwarding PHB Group).
112 *
113 */
114 /*
115 * AF DS (differentiated service) codepoints.
116 * (classes can be mapped to CBQ or H-FSC classes.)
117 *
118 * 0 1 2 3 4 5 6 7
119 * +---+---+---+---+---+---+---+---+
120 * | CLASS |DropPre| 0 | CU |
121 * +---+---+---+---+---+---+---+---+
122 *
123 * class 1: 001
124 * class 2: 010
125 * class 3: 011
126 * class 4: 100
127 *
128 * low drop prec: 01
129 * medium drop prec: 10
130 * high drop prec: 01
131 */
132
133 /* normal red parameters */
134 #define W_WEIGHT 512 /* inverse of weight of EWMA (511/512) */
135 /* q_weight = 0.00195 */
136
137 /* red parameters for a slow link */
138 #define W_WEIGHT_1 128 /* inverse of weight of EWMA (127/128) */
139 /* q_weight = 0.0078125 */
140
141 /* red parameters for a very slow link (e.g., dialup) */
142 #define W_WEIGHT_2 64 /* inverse of weight of EWMA (63/64) */
143 /* q_weight = 0.015625 */
144
145 /* fixed-point uses 12-bit decimal places */
146 #define FP_SHIFT 12 /* fixed-point shift */
147
148 /* red parameters for drop probability */
149 #define INV_P_MAX 10 /* inverse of max drop probability */
150 #define TH_MIN 5 /* min threshold */
151 #define TH_MAX 15 /* max threshold */
152
153 #define RIO_LIMIT 60 /* default max queue length */
154
155 #define TV_DELTA(a, b, delta) { \
156 register int xxs; \
157 \
158 delta = (a)->tv_usec - (b)->tv_usec; \
159 if ((xxs = (a)->tv_sec - (b)->tv_sec) != 0) { \
160 if (xxs < 0) { \
161 printf("rm_class: bogus time values"); \
162 delta = 60000000; \
163 } else if (xxs > 4) { \
164 if (xxs > 60) \
165 delta = 60000000; \
166 else \
167 delta += xxs * 1000000; \
168 } else while (xxs > 0) { \
169 delta += 1000000; \
170 xxs--; \
171 } \
172 } \
173 }
174
175 /* rio_list keeps all rio_queue_t's allocated. */
176 static rio_queue_t *rio_list = NULL;
177 /* default rio parameter values */
178 static struct redparams default_rio_params[RIO_NDROPPREC] = {
179 /* th_min, th_max, inv_pmax */
180 { TH_MAX * 2 + TH_MIN, TH_MAX * 3, INV_P_MAX }, /* low drop precedence */
181 { TH_MAX + TH_MIN, TH_MAX * 2, INV_P_MAX }, /* medium drop precedence */
182 { TH_MIN, TH_MAX, INV_P_MAX } /* high drop precedence */
183 };
184
185 /* internal function prototypes */
186 static int rio_enqueue __P((struct ifaltq *, struct mbuf *,
187 struct altq_pktattr *));
188 static struct mbuf *rio_dequeue __P((struct ifaltq *, int));
189 static int rio_request __P((struct ifaltq *, int, void *));
190 static int rio_detach __P((rio_queue_t *));
191 static int dscp2index __P((u_int8_t));
192
193 /*
194 * rio device interface
195 */
196 altqdev_decl(rio);
197
198 int
199 rioopen(dev, flag, fmt, l)
200 dev_t dev;
201 int flag, fmt;
202 struct lwp *l;
203 {
204 /* everything will be done when the queueing scheme is attached. */
205 return 0;
206 }
207
208 int
209 rioclose(dev, flag, fmt, l)
210 dev_t dev;
211 int flag, fmt;
212 struct lwp *l;
213 {
214 rio_queue_t *rqp;
215 int err, error = 0;
216
217 while ((rqp = rio_list) != NULL) {
218 /* destroy all */
219 err = rio_detach(rqp);
220 if (err != 0 && error == 0)
221 error = err;
222 }
223
224 return error;
225 }
226
227 int
228 rioioctl(dev, cmd, addr, flag, l)
229 dev_t dev;
230 ioctlcmd_t cmd;
231 caddr_t addr;
232 int flag;
233 struct lwp *l;
234 {
235 rio_queue_t *rqp;
236 struct rio_interface *ifacep;
237 struct ifnet *ifp;
238 struct proc *p = l->l_proc;
239 int error = 0;
240
241 /* check super-user privilege */
242 switch (cmd) {
243 case RIO_GETSTATS:
244 break;
245 default:
246 #if (__FreeBSD_version > 400000)
247 if ((error = suser(p)) != 0)
248 return (error);
249 #else
250 if ((error = kauth_authorize_generic(p->p_cred,
251 KAUTH_GENERIC_ISSUSER,
252 &p->p_acflag)) != 0)
253 return (error);
254 #endif
255 break;
256 }
257
258 switch (cmd) {
259
260 case RIO_ENABLE:
261 ifacep = (struct rio_interface *)addr;
262 if ((rqp = altq_lookup(ifacep->rio_ifname, ALTQT_RIO)) == NULL) {
263 error = EBADF;
264 break;
265 }
266 error = altq_enable(rqp->rq_ifq);
267 break;
268
269 case RIO_DISABLE:
270 ifacep = (struct rio_interface *)addr;
271 if ((rqp = altq_lookup(ifacep->rio_ifname, ALTQT_RIO)) == NULL) {
272 error = EBADF;
273 break;
274 }
275 error = altq_disable(rqp->rq_ifq);
276 break;
277
278 case RIO_IF_ATTACH:
279 ifp = ifunit(((struct rio_interface *)addr)->rio_ifname);
280 if (ifp == NULL) {
281 error = ENXIO;
282 break;
283 }
284
285 /* allocate and initialize rio_queue_t */
286 rqp = malloc(sizeof(rio_queue_t), M_DEVBUF, M_WAITOK|M_ZERO);
287 if (rqp == NULL) {
288 error = ENOMEM;
289 break;
290 }
291
292 rqp->rq_q = malloc(sizeof(class_queue_t), M_DEVBUF,
293 M_WAITOK|M_ZERO);
294 if (rqp->rq_q == NULL) {
295 free(rqp, M_DEVBUF);
296 error = ENOMEM;
297 break;
298 }
299
300 rqp->rq_rio = rio_alloc(0, NULL, 0, 0);
301 if (rqp->rq_rio == NULL) {
302 free(rqp->rq_q, M_DEVBUF);
303 free(rqp, M_DEVBUF);
304 error = ENOMEM;
305 break;
306 }
307
308 rqp->rq_ifq = &ifp->if_snd;
309 qtail(rqp->rq_q) = NULL;
310 qlen(rqp->rq_q) = 0;
311 qlimit(rqp->rq_q) = RIO_LIMIT;
312 qtype(rqp->rq_q) = Q_RIO;
313
314 /*
315 * set RIO to this ifnet structure.
316 */
317 error = altq_attach(rqp->rq_ifq, ALTQT_RIO, rqp,
318 rio_enqueue, rio_dequeue, rio_request,
319 NULL, NULL);
320 if (error) {
321 rio_destroy(rqp->rq_rio);
322 free(rqp->rq_q, M_DEVBUF);
323 free(rqp, M_DEVBUF);
324 break;
325 }
326
327 /* add this state to the rio list */
328 rqp->rq_next = rio_list;
329 rio_list = rqp;
330 break;
331
332 case RIO_IF_DETACH:
333 ifacep = (struct rio_interface *)addr;
334 if ((rqp = altq_lookup(ifacep->rio_ifname, ALTQT_RIO)) == NULL) {
335 error = EBADF;
336 break;
337 }
338 error = rio_detach(rqp);
339 break;
340
341 case RIO_GETSTATS:
342 do {
343 struct rio_stats *q_stats;
344 rio_t *rp;
345 int i;
346
347 q_stats = (struct rio_stats *)addr;
348 if ((rqp = altq_lookup(q_stats->iface.rio_ifname,
349 ALTQT_RIO)) == NULL) {
350 error = EBADF;
351 break;
352 }
353
354 rp = rqp->rq_rio;
355
356 q_stats->q_limit = qlimit(rqp->rq_q);
357 q_stats->weight = rp->rio_weight;
358 q_stats->flags = rp->rio_flags;
359
360 for (i = 0; i < RIO_NDROPPREC; i++) {
361 q_stats->q_len[i] = rp->rio_precstate[i].qlen;
362 (void)memcpy(&q_stats->q_stats[i],
363 &rp->q_stats[i], sizeof(struct redstats));
364 q_stats->q_stats[i].q_avg =
365 rp->rio_precstate[i].avg >> rp->rio_wshift;
366
367 q_stats->q_params[i].inv_pmax
368 = rp->rio_precstate[i].inv_pmax;
369 q_stats->q_params[i].th_min
370 = rp->rio_precstate[i].th_min;
371 q_stats->q_params[i].th_max
372 = rp->rio_precstate[i].th_max;
373 }
374 } while (0);
375 break;
376
377 case RIO_CONFIG:
378 do {
379 struct rio_conf *fc;
380 rio_t *new;
381 int s, limit, i;
382
383 fc = (struct rio_conf *)addr;
384 if ((rqp = altq_lookup(fc->iface.rio_ifname,
385 ALTQT_RIO)) == NULL) {
386 error = EBADF;
387 break;
388 }
389
390 new = rio_alloc(fc->rio_weight, &fc->q_params[0],
391 fc->rio_flags, fc->rio_pkttime);
392 if (new == NULL) {
393 error = ENOMEM;
394 break;
395 }
396
397 s = splnet();
398 _flushq(rqp->rq_q);
399 limit = fc->rio_limit;
400 if (limit < fc->q_params[RIO_NDROPPREC-1].th_max)
401 limit = fc->q_params[RIO_NDROPPREC-1].th_max;
402 qlimit(rqp->rq_q) = limit;
403
404 rio_destroy(rqp->rq_rio);
405 rqp->rq_rio = new;
406
407 splx(s);
408
409 /* write back new values */
410 fc->rio_limit = limit;
411 for (i = 0; i < RIO_NDROPPREC; i++) {
412 fc->q_params[i].inv_pmax =
413 rqp->rq_rio->rio_precstate[i].inv_pmax;
414 fc->q_params[i].th_min =
415 rqp->rq_rio->rio_precstate[i].th_min;
416 fc->q_params[i].th_max =
417 rqp->rq_rio->rio_precstate[i].th_max;
418 }
419 } while (0);
420 break;
421
422 case RIO_SETDEFAULTS:
423 do {
424 struct redparams *rp;
425 int i;
426
427 rp = (struct redparams *)addr;
428 for (i = 0; i < RIO_NDROPPREC; i++)
429 default_rio_params[i] = rp[i];
430 } while (0);
431 break;
432
433 default:
434 error = EINVAL;
435 break;
436 }
437
438 return error;
439 }
440
441 static int
442 rio_detach(rqp)
443 rio_queue_t *rqp;
444 {
445 rio_queue_t *tmp;
446 int error = 0;
447
448 if (ALTQ_IS_ENABLED(rqp->rq_ifq))
449 altq_disable(rqp->rq_ifq);
450
451 if ((error = altq_detach(rqp->rq_ifq)))
452 return (error);
453
454 if (rio_list == rqp)
455 rio_list = rqp->rq_next;
456 else {
457 for (tmp = rio_list; tmp != NULL; tmp = tmp->rq_next)
458 if (tmp->rq_next == rqp) {
459 tmp->rq_next = rqp->rq_next;
460 break;
461 }
462 if (tmp == NULL)
463 printf("rio_detach: no state found in rio_list!\n");
464 }
465
466 rio_destroy(rqp->rq_rio);
467 free(rqp->rq_q, M_DEVBUF);
468 free(rqp, M_DEVBUF);
469 return (error);
470 }
471
472 /*
473 * rio support routines
474 */
475 static int
476 rio_request(ifq, req, arg)
477 struct ifaltq *ifq;
478 int req;
479 void *arg;
480 {
481 rio_queue_t *rqp = (rio_queue_t *)ifq->altq_disc;
482
483 switch (req) {
484 case ALTRQ_PURGE:
485 _flushq(rqp->rq_q);
486 if (ALTQ_IS_ENABLED(ifq))
487 ifq->ifq_len = 0;
488 break;
489 }
490 return (0);
491 }
492
493
494 rio_t *
495 rio_alloc(weight, params, flags, pkttime)
496 int weight;
497 struct redparams *params;
498 int flags, pkttime;
499 {
500 rio_t *rp;
501 int w, i;
502 int npkts_per_sec;
503
504 rp = malloc(sizeof(rio_t), M_DEVBUF, M_WAITOK|M_ZERO);
505 if (rp == NULL)
506 return (NULL);
507
508 rp->rio_flags = flags;
509 if (pkttime == 0)
510 /* default packet time: 1000 bytes / 10Mbps * 8 * 1000000 */
511 rp->rio_pkttime = 800;
512 else
513 rp->rio_pkttime = pkttime;
514
515 if (weight != 0)
516 rp->rio_weight = weight;
517 else {
518 /* use derfault */
519 rp->rio_weight = W_WEIGHT;
520
521 /* when the link is very slow, adjust red parameters */
522 npkts_per_sec = 1000000 / rp->rio_pkttime;
523 if (npkts_per_sec < 50) {
524 /* up to about 400Kbps */
525 rp->rio_weight = W_WEIGHT_2;
526 } else if (npkts_per_sec < 300) {
527 /* up to about 2.4Mbps */
528 rp->rio_weight = W_WEIGHT_1;
529 }
530 }
531
532 /* calculate wshift. weight must be power of 2 */
533 w = rp->rio_weight;
534 for (i = 0; w > 1; i++)
535 w = w >> 1;
536 rp->rio_wshift = i;
537 w = 1 << rp->rio_wshift;
538 if (w != rp->rio_weight) {
539 printf("invalid weight value %d for red! use %d\n",
540 rp->rio_weight, w);
541 rp->rio_weight = w;
542 }
543
544 /* allocate weight table */
545 rp->rio_wtab = wtab_alloc(rp->rio_weight);
546
547 for (i = 0; i < RIO_NDROPPREC; i++) {
548 struct dropprec_state *prec = &rp->rio_precstate[i];
549
550 prec->avg = 0;
551 prec->idle = 1;
552
553 if (params == NULL || params[i].inv_pmax == 0)
554 prec->inv_pmax = default_rio_params[i].inv_pmax;
555 else
556 prec->inv_pmax = params[i].inv_pmax;
557 if (params == NULL || params[i].th_min == 0)
558 prec->th_min = default_rio_params[i].th_min;
559 else
560 prec->th_min = params[i].th_min;
561 if (params == NULL || params[i].th_max == 0)
562 prec->th_max = default_rio_params[i].th_max;
563 else
564 prec->th_max = params[i].th_max;
565
566 /*
567 * th_min_s and th_max_s are scaled versions of th_min
568 * and th_max to be compared with avg.
569 */
570 prec->th_min_s = prec->th_min << (rp->rio_wshift + FP_SHIFT);
571 prec->th_max_s = prec->th_max << (rp->rio_wshift + FP_SHIFT);
572
573 /*
574 * precompute probability denominator
575 * probd = (2 * (TH_MAX-TH_MIN) / pmax) in fixed-point
576 */
577 prec->probd = (2 * (prec->th_max - prec->th_min)
578 * prec->inv_pmax) << FP_SHIFT;
579
580 microtime(&prec->last);
581 }
582
583 return (rp);
584 }
585
586 void
587 rio_destroy(rp)
588 rio_t *rp;
589 {
590 wtab_destroy(rp->rio_wtab);
591 free(rp, M_DEVBUF);
592 }
593
594 void
595 rio_getstats(rp, sp)
596 rio_t *rp;
597 struct redstats *sp;
598 {
599 int i;
600
601 for (i = 0; i < RIO_NDROPPREC; i++) {
602 (void)memcpy(sp, &rp->q_stats[i], sizeof(struct redstats));
603 sp->q_avg = rp->rio_precstate[i].avg >> rp->rio_wshift;
604 sp++;
605 }
606 }
607
608 /*
609 * enqueue routine:
610 *
611 * returns: 0 when successfully queued.
612 * ENOBUFS when drop occurs.
613 */
614 static int
615 rio_enqueue(ifq, m, pktattr)
616 struct ifaltq *ifq;
617 struct mbuf *m;
618 struct altq_pktattr *pktattr;
619 {
620 rio_queue_t *rqp = (rio_queue_t *)ifq->altq_disc;
621 int error = 0;
622
623 if (rio_addq(rqp->rq_rio, rqp->rq_q, m, pktattr) == 0)
624 ifq->ifq_len++;
625 else
626 error = ENOBUFS;
627 return error;
628 }
629
630 #if (RIO_NDROPPREC == 3)
631 /*
632 * internally, a drop precedence value is converted to an index
633 * starting from 0.
634 */
635 static int
636 dscp2index(u_int8_t dscp)
637 {
638 int dpindex = dscp & AF_DROPPRECMASK;
639
640 if (dpindex == 0)
641 return (0);
642 return ((dpindex >> 3) - 1);
643 }
644 #endif
645
646 #if 1
647 /*
648 * kludge: when a packet is dequeued, we need to know its drop precedence
649 * in order to keep the queue length of each drop precedence.
650 * use m_pkthdr.rcvif to pass this info.
651 */
652 #define RIOM_SET_PRECINDEX(m, idx) \
653 do { (m)->m_pkthdr.rcvif = (struct ifnet *)((long)(idx)); } while (0)
654 #define RIOM_GET_PRECINDEX(m) \
655 ({ long idx; idx = (long)((m)->m_pkthdr.rcvif); \
656 (m)->m_pkthdr.rcvif = NULL; idx; })
657 #endif
658
659 int
660 rio_addq(rp, q, m, pktattr)
661 rio_t *rp;
662 class_queue_t *q;
663 struct mbuf *m;
664 struct altq_pktattr *pktattr;
665 {
666 int avg, droptype;
667 u_int8_t dsfield, odsfield;
668 int dpindex, i, n, t;
669 struct timeval now;
670 struct dropprec_state *prec;
671
672 dsfield = odsfield = read_dsfield(m, pktattr);
673 dpindex = dscp2index(dsfield);
674
675 /*
676 * update avg of the precedence states whose drop precedence
677 * is larger than or equal to the drop precedence of the packet
678 */
679 now.tv_sec = 0;
680 for (i = dpindex; i < RIO_NDROPPREC; i++) {
681 prec = &rp->rio_precstate[i];
682 avg = prec->avg;
683 if (prec->idle) {
684 prec->idle = 0;
685 if (now.tv_sec == 0)
686 microtime(&now);
687 t = (now.tv_sec - prec->last.tv_sec);
688 if (t > 60)
689 avg = 0;
690 else {
691 t = t * 1000000 +
692 (now.tv_usec - prec->last.tv_usec);
693 n = t / rp->rio_pkttime;
694 /* calculate (avg = (1 - Wq)^n * avg) */
695 if (n > 0)
696 avg = (avg >> FP_SHIFT) *
697 pow_w(rp->rio_wtab, n);
698 }
699 }
700
701 /* run estimator. (avg is scaled by WEIGHT in fixed-point) */
702 avg += (prec->qlen << FP_SHIFT) - (avg >> rp->rio_wshift);
703 prec->avg = avg; /* save the new value */
704 /*
705 * count keeps a tally of arriving traffic that has not
706 * been dropped.
707 */
708 prec->count++;
709 }
710
711 prec = &rp->rio_precstate[dpindex];
712 avg = prec->avg;
713
714 /* see if we drop early */
715 droptype = DTYPE_NODROP;
716 if (avg >= prec->th_min_s && prec->qlen > 1) {
717 if (avg >= prec->th_max_s) {
718 /* avg >= th_max: forced drop */
719 droptype = DTYPE_FORCED;
720 } else if (prec->old == 0) {
721 /* first exceeds th_min */
722 prec->count = 1;
723 prec->old = 1;
724 } else if (drop_early((avg - prec->th_min_s) >> rp->rio_wshift,
725 prec->probd, prec->count)) {
726 /* unforced drop by red */
727 droptype = DTYPE_EARLY;
728 }
729 } else {
730 /* avg < th_min */
731 prec->old = 0;
732 }
733
734 /*
735 * if the queue length hits the hard limit, it's a forced drop.
736 */
737 if (droptype == DTYPE_NODROP && qlen(q) >= qlimit(q))
738 droptype = DTYPE_FORCED;
739
740 if (droptype != DTYPE_NODROP) {
741 /* always drop incoming packet (as opposed to randomdrop) */
742 for (i = dpindex; i < RIO_NDROPPREC; i++)
743 rp->rio_precstate[i].count = 0;
744 #ifdef RIO_STATS
745 if (droptype == DTYPE_EARLY)
746 rp->q_stats[dpindex].drop_unforced++;
747 else
748 rp->q_stats[dpindex].drop_forced++;
749 PKTCNTR_ADD(&rp->q_stats[dpindex].drop_cnt, m_pktlen(m));
750 #endif
751 m_freem(m);
752 return (-1);
753 }
754
755 for (i = dpindex; i < RIO_NDROPPREC; i++)
756 rp->rio_precstate[i].qlen++;
757
758 /* save drop precedence index in mbuf hdr */
759 RIOM_SET_PRECINDEX(m, dpindex);
760
761 if (rp->rio_flags & RIOF_CLEARDSCP)
762 dsfield &= ~DSCP_MASK;
763
764 if (dsfield != odsfield)
765 write_dsfield(m, pktattr, dsfield);
766
767 _addq(q, m);
768
769 #ifdef RIO_STATS
770 PKTCNTR_ADD(&rp->q_stats[dpindex].xmit_cnt, m_pktlen(m));
771 #endif
772 return (0);
773 }
774
775 /*
776 * dequeue routine:
777 * must be called in splnet.
778 *
779 * returns: mbuf dequeued.
780 * NULL when no packet is available in the queue.
781 */
782
783 static struct mbuf *
784 rio_dequeue(ifq, op)
785 struct ifaltq *ifq;
786 int op;
787 {
788 rio_queue_t *rqp = (rio_queue_t *)ifq->altq_disc;
789 struct mbuf *m = NULL;
790
791 if (op == ALTDQ_POLL)
792 return qhead(rqp->rq_q);
793
794 m = rio_getq(rqp->rq_rio, rqp->rq_q);
795 if (m != NULL)
796 ifq->ifq_len--;
797 return m;
798 }
799
800 struct mbuf *
801 rio_getq(rp, q)
802 rio_t *rp;
803 class_queue_t *q;
804 {
805 struct mbuf *m;
806 int dpindex, i;
807
808 if ((m = _getq(q)) == NULL)
809 return NULL;
810
811 dpindex = RIOM_GET_PRECINDEX(m);
812 for (i = dpindex; i < RIO_NDROPPREC; i++) {
813 if (--rp->rio_precstate[i].qlen == 0) {
814 if (rp->rio_precstate[i].idle == 0) {
815 rp->rio_precstate[i].idle = 1;
816 microtime(&rp->rio_precstate[i].last);
817 }
818 }
819 }
820 return (m);
821 }
822
823 #ifdef KLD_MODULE
824
825 static struct altqsw rio_sw =
826 {"rio", rioopen, rioclose, rioioctl};
827
828 ALTQ_MODULE(altq_rio, ALTQT_RIO, &rio_sw);
829
830 #endif /* KLD_MODULE */
831
832 #endif /* ALTQ_RIO */
833