altq_rio.c revision 1.3 1 /* $NetBSD: altq_rio.c,v 1.3 2001/04/13 23:29:56 thorpej Exp $ */
2 /* $KAME: altq_rio.c,v 1.8 2000/12/14 08:12:46 thorpej Exp $ */
3
4 /*
5 * Copyright (C) 1998-2000
6 * Sony Computer Science Laboratories Inc. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29 /*
30 * Copyright (c) 1990-1994 Regents of the University of California.
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the Computer Systems
44 * Engineering Group at Lawrence Berkeley Laboratory.
45 * 4. Neither the name of the University nor of the Laboratory may be used
46 * to endorse or promote products derived from this software without
47 * specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 */
61
62 #if defined(__FreeBSD__) || defined(__NetBSD__)
63 #include "opt_altq.h"
64 #if (__FreeBSD__ != 2)
65 #include "opt_inet.h"
66 #ifdef __FreeBSD__
67 #include "opt_inet6.h"
68 #endif
69 #endif
70 #endif /* __FreeBSD__ || __NetBSD__ */
71 #ifdef ALTQ_RIO /* rio is enabled by ALTQ_RIO option in opt_altq.h */
72
73 #include <sys/param.h>
74 #include <sys/malloc.h>
75 #include <sys/mbuf.h>
76 #include <sys/socket.h>
77 #include <sys/sockio.h>
78 #include <sys/systm.h>
79 #include <sys/proc.h>
80 #include <sys/errno.h>
81 #include <sys/kernel.h>
82
83 #include <net/if.h>
84 #include <net/if_types.h>
85
86 #include <netinet/in.h>
87 #include <netinet/in_systm.h>
88 #include <netinet/ip.h>
89 #ifdef INET6
90 #include <netinet/ip6.h>
91 #endif
92
93 #include <altq/altq.h>
94 #include <altq/altq_conf.h>
95 #include <altq/altq_cdnr.h>
96 #include <altq/altq_red.h>
97 #include <altq/altq_rio.h>
98
99 /*
100 * RIO: RED with IN/OUT bit
101 * described in
102 * "Explicit Allocation of Best Effort Packet Delivery Service"
103 * David D. Clark and Wenjia Fang, MIT Lab for Computer Science
104 * http://diffserv.lcs.mit.edu/Papers/exp-alloc-ddc-wf.{ps,pdf}
105 *
106 * this implementation is extended to support more than 2 drop precedence
107 * values as described in RFC2597 (Assured Forwarding PHB Group).
108 *
109 */
110 /*
111 * AF DS (differentiated service) codepoints.
112 * (classes can be mapped to CBQ or H-FSC classes.)
113 *
114 * 0 1 2 3 4 5 6 7
115 * +---+---+---+---+---+---+---+---+
116 * | CLASS |DropPre| 0 | CU |
117 * +---+---+---+---+---+---+---+---+
118 *
119 * class 1: 001
120 * class 2: 010
121 * class 3: 011
122 * class 4: 100
123 *
124 * low drop prec: 01
125 * medium drop prec: 10
126 * high drop prec: 01
127 */
128
129 /* normal red parameters */
130 #define W_WEIGHT 512 /* inverse of weight of EWMA (511/512) */
131 /* q_weight = 0.00195 */
132
133 /* red parameters for a slow link */
134 #define W_WEIGHT_1 128 /* inverse of weight of EWMA (127/128) */
135 /* q_weight = 0.0078125 */
136
137 /* red parameters for a very slow link (e.g., dialup) */
138 #define W_WEIGHT_2 64 /* inverse of weight of EWMA (63/64) */
139 /* q_weight = 0.015625 */
140
141 /* fixed-point uses 12-bit decimal places */
142 #define FP_SHIFT 12 /* fixed-point shift */
143
144 /* red parameters for drop probability */
145 #define INV_P_MAX 10 /* inverse of max drop probability */
146 #define TH_MIN 5 /* min threshold */
147 #define TH_MAX 15 /* max threshold */
148
149 #define RIO_LIMIT 60 /* default max queue lenght */
150 #define RIO_STATS /* collect statistics */
151
152 #define TV_DELTA(a, b, delta) { \
153 register int xxs; \
154 \
155 delta = (a)->tv_usec - (b)->tv_usec; \
156 if ((xxs = (a)->tv_sec - (b)->tv_sec) != 0) { \
157 if (xxs < 0) { \
158 printf("rm_class: bogus time values"); \
159 delta = 60000000; \
160 } else if (xxs > 4) { \
161 if (xxs > 60) \
162 delta = 60000000; \
163 else \
164 delta += xxs * 1000000; \
165 } else while (xxs > 0) { \
166 delta += 1000000; \
167 xxs--; \
168 } \
169 } \
170 }
171
172 /* rio_list keeps all rio_queue_t's allocated. */
173 static rio_queue_t *rio_list = NULL;
174 /* default rio parameter values */
175 static struct redparams default_rio_params[RIO_NDROPPREC] = {
176 /* th_min, th_max, inv_pmax */
177 { TH_MAX * 2 + TH_MIN, TH_MAX * 3, INV_P_MAX }, /* low drop precedence */
178 { TH_MAX + TH_MIN, TH_MAX * 2, INV_P_MAX }, /* medium drop precedence */
179 { TH_MIN, TH_MAX, INV_P_MAX } /* high drop precedence */
180 };
181
182 /* internal function prototypes */
183 static int rio_enqueue __P((struct ifaltq *, struct mbuf *,
184 struct altq_pktattr *));
185 static struct mbuf *rio_dequeue __P((struct ifaltq *, int));
186 static int rio_request __P((struct ifaltq *, int, void *));
187 static int rio_detach __P((rio_queue_t *));
188 static int dscp2index __P((u_int8_t));
189
190 /*
191 * rio device interface
192 */
193 altqdev_decl(rio);
194
195 int
196 rioopen(dev, flag, fmt, p)
197 dev_t dev;
198 int flag, fmt;
199 struct proc *p;
200 {
201 /* everything will be done when the queueing scheme is attached. */
202 return 0;
203 }
204
205 int
206 rioclose(dev, flag, fmt, p)
207 dev_t dev;
208 int flag, fmt;
209 struct proc *p;
210 {
211 rio_queue_t *rqp;
212 int err, error = 0;
213
214 while ((rqp = rio_list) != NULL) {
215 /* destroy all */
216 err = rio_detach(rqp);
217 if (err != 0 && error == 0)
218 error = err;
219 }
220
221 return error;
222 }
223
224 int
225 rioioctl(dev, cmd, addr, flag, p)
226 dev_t dev;
227 ioctlcmd_t cmd;
228 caddr_t addr;
229 int flag;
230 struct proc *p;
231 {
232 rio_queue_t *rqp;
233 struct rio_interface *ifacep;
234 struct ifnet *ifp;
235 int error = 0;
236
237 /* check super-user privilege */
238 switch (cmd) {
239 case RIO_GETSTATS:
240 break;
241 default:
242 #if (__FreeBSD_version > 400000)
243 if ((error = suser(p)) != 0)
244 return (error);
245 #else
246 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
247 return (error);
248 #endif
249 break;
250 }
251
252 switch (cmd) {
253
254 case RIO_ENABLE:
255 ifacep = (struct rio_interface *)addr;
256 if ((rqp = altq_lookup(ifacep->rio_ifname, ALTQT_RIO)) == NULL) {
257 error = EBADF;
258 break;
259 }
260 error = altq_enable(rqp->rq_ifq);
261 break;
262
263 case RIO_DISABLE:
264 ifacep = (struct rio_interface *)addr;
265 if ((rqp = altq_lookup(ifacep->rio_ifname, ALTQT_RIO)) == NULL) {
266 error = EBADF;
267 break;
268 }
269 error = altq_disable(rqp->rq_ifq);
270 break;
271
272 case RIO_IF_ATTACH:
273 ifp = ifunit(((struct rio_interface *)addr)->rio_ifname);
274 if (ifp == NULL) {
275 error = ENXIO;
276 break;
277 }
278
279 /* allocate and initialize rio_queue_t */
280 MALLOC(rqp, rio_queue_t *, sizeof(rio_queue_t), M_DEVBUF, M_WAITOK);
281 if (rqp == NULL) {
282 error = ENOMEM;
283 break;
284 }
285 bzero(rqp, sizeof(rio_queue_t));
286
287 MALLOC(rqp->rq_q, class_queue_t *, sizeof(class_queue_t),
288 M_DEVBUF, M_WAITOK);
289 if (rqp->rq_q == NULL) {
290 FREE(rqp, M_DEVBUF);
291 error = ENOMEM;
292 break;
293 }
294 bzero(rqp->rq_q, sizeof(class_queue_t));
295
296 rqp->rq_rio = rio_alloc(0, NULL, 0, 0);
297 if (rqp->rq_rio == NULL) {
298 FREE(rqp->rq_q, M_DEVBUF);
299 FREE(rqp, M_DEVBUF);
300 error = ENOMEM;
301 break;
302 }
303
304 rqp->rq_ifq = &ifp->if_snd;
305 qtail(rqp->rq_q) = NULL;
306 qlen(rqp->rq_q) = 0;
307 qlimit(rqp->rq_q) = RIO_LIMIT;
308 qtype(rqp->rq_q) = Q_RIO;
309
310 /*
311 * set RIO to this ifnet structure.
312 */
313 error = altq_attach(rqp->rq_ifq, ALTQT_RIO, rqp,
314 rio_enqueue, rio_dequeue, rio_request,
315 NULL, NULL);
316 if (error) {
317 rio_destroy(rqp->rq_rio);
318 FREE(rqp->rq_q, M_DEVBUF);
319 FREE(rqp, M_DEVBUF);
320 break;
321 }
322
323 /* add this state to the rio list */
324 rqp->rq_next = rio_list;
325 rio_list = rqp;
326 break;
327
328 case RIO_IF_DETACH:
329 ifacep = (struct rio_interface *)addr;
330 if ((rqp = altq_lookup(ifacep->rio_ifname, ALTQT_RIO)) == NULL) {
331 error = EBADF;
332 break;
333 }
334 error = rio_detach(rqp);
335 break;
336
337 case RIO_GETSTATS:
338 do {
339 struct rio_stats *q_stats;
340 rio_t *rp;
341 int i;
342
343 q_stats = (struct rio_stats *)addr;
344 if ((rqp = altq_lookup(q_stats->iface.rio_ifname,
345 ALTQT_RIO)) == NULL) {
346 error = EBADF;
347 break;
348 }
349
350 rp = rqp->rq_rio;
351
352 q_stats->q_limit = qlimit(rqp->rq_q);
353 q_stats->weight = rp->rio_weight;
354 q_stats->flags = rp->rio_flags;
355
356 for (i = 0; i < RIO_NDROPPREC; i++) {
357 q_stats->q_len[i] = rp->rio_precstate[i].qlen;
358 bcopy(&rp->q_stats[i], &q_stats->q_stats[i],
359 sizeof(struct redstats));
360 q_stats->q_stats[i].q_avg =
361 rp->rio_precstate[i].avg >> rp->rio_wshift;
362
363 q_stats->q_params[i].inv_pmax
364 = rp->rio_precstate[i].inv_pmax;
365 q_stats->q_params[i].th_min
366 = rp->rio_precstate[i].th_min;
367 q_stats->q_params[i].th_max
368 = rp->rio_precstate[i].th_max;
369 }
370 } while (0);
371 break;
372
373 case RIO_CONFIG:
374 do {
375 struct rio_conf *fc;
376 rio_t *new;
377 int s, limit, i;
378
379 fc = (struct rio_conf *)addr;
380 if ((rqp = altq_lookup(fc->iface.rio_ifname,
381 ALTQT_RIO)) == NULL) {
382 error = EBADF;
383 break;
384 }
385
386 new = rio_alloc(fc->rio_weight, &fc->q_params[0],
387 fc->rio_flags, fc->rio_pkttime);
388 if (new == NULL) {
389 error = ENOMEM;
390 break;
391 }
392
393 s = splnet();
394 _flushq(rqp->rq_q);
395 limit = fc->rio_limit;
396 if (limit < fc->q_params[RIO_NDROPPREC-1].th_max)
397 limit = fc->q_params[RIO_NDROPPREC-1].th_max;
398 qlimit(rqp->rq_q) = limit;
399
400 rio_destroy(rqp->rq_rio);
401 rqp->rq_rio = new;
402
403 splx(s);
404
405 /* write back new values */
406 fc->rio_limit = limit;
407 for (i = 0; i < RIO_NDROPPREC; i++) {
408 fc->q_params[i].inv_pmax =
409 rqp->rq_rio->rio_precstate[i].inv_pmax;
410 fc->q_params[i].th_min =
411 rqp->rq_rio->rio_precstate[i].th_min;
412 fc->q_params[i].th_max =
413 rqp->rq_rio->rio_precstate[i].th_max;
414 }
415 } while (0);
416 break;
417
418 case RIO_SETDEFAULTS:
419 do {
420 struct redparams *rp;
421 int i;
422
423 rp = (struct redparams *)addr;
424 for (i = 0; i < RIO_NDROPPREC; i++)
425 default_rio_params[i] = rp[i];
426 } while (0);
427 break;
428
429 default:
430 error = EINVAL;
431 break;
432 }
433
434 return error;
435 }
436
437 static int
438 rio_detach(rqp)
439 rio_queue_t *rqp;
440 {
441 rio_queue_t *tmp;
442 int error = 0;
443
444 if (ALTQ_IS_ENABLED(rqp->rq_ifq))
445 altq_disable(rqp->rq_ifq);
446
447 if ((error = altq_detach(rqp->rq_ifq)))
448 return (error);
449
450 if (rio_list == rqp)
451 rio_list = rqp->rq_next;
452 else {
453 for (tmp = rio_list; tmp != NULL; tmp = tmp->rq_next)
454 if (tmp->rq_next == rqp) {
455 tmp->rq_next = rqp->rq_next;
456 break;
457 }
458 if (tmp == NULL)
459 printf("rio_detach: no state found in rio_list!\n");
460 }
461
462 rio_destroy(rqp->rq_rio);
463 FREE(rqp->rq_q, M_DEVBUF);
464 FREE(rqp, M_DEVBUF);
465 return (error);
466 }
467
468 /*
469 * rio support routines
470 */
471 static int
472 rio_request(ifq, req, arg)
473 struct ifaltq *ifq;
474 int req;
475 void *arg;
476 {
477 rio_queue_t *rqp = (rio_queue_t *)ifq->altq_disc;
478
479 switch (req) {
480 case ALTRQ_PURGE:
481 _flushq(rqp->rq_q);
482 if (ALTQ_IS_ENABLED(ifq))
483 ifq->ifq_len = 0;
484 break;
485 }
486 return (0);
487 }
488
489
490 rio_t *
491 rio_alloc(weight, params, flags, pkttime)
492 int weight;
493 struct redparams *params;
494 int flags, pkttime;
495 {
496 rio_t *rp;
497 int w, i;
498 int npkts_per_sec;
499
500 MALLOC(rp, rio_t *, sizeof(rio_t), M_DEVBUF, M_WAITOK);
501 if (rp == NULL)
502 return (NULL);
503 bzero(rp, sizeof(rio_t));
504
505 rp->rio_flags = flags;
506 if (pkttime == 0)
507 /* default packet time: 1000 bytes / 10Mbps * 8 * 1000000 */
508 rp->rio_pkttime = 800;
509 else
510 rp->rio_pkttime = pkttime;
511
512 if (weight != 0)
513 rp->rio_weight = weight;
514 else {
515 /* use derfault */
516 rp->rio_weight = W_WEIGHT;
517
518 /* when the link is very slow, adjust red parameters */
519 npkts_per_sec = 1000000 / rp->rio_pkttime;
520 if (npkts_per_sec < 50) {
521 /* up to about 400Kbps */
522 rp->rio_weight = W_WEIGHT_2;
523 } else if (npkts_per_sec < 300) {
524 /* up to about 2.4Mbps */
525 rp->rio_weight = W_WEIGHT_1;
526 }
527 }
528
529 /* calculate wshift. weight must be power of 2 */
530 w = rp->rio_weight;
531 for (i = 0; w > 1; i++)
532 w = w >> 1;
533 rp->rio_wshift = i;
534 w = 1 << rp->rio_wshift;
535 if (w != rp->rio_weight) {
536 printf("invalid weight value %d for red! use %d\n",
537 rp->rio_weight, w);
538 rp->rio_weight = w;
539 }
540
541 /* allocate weight table */
542 rp->rio_wtab = wtab_alloc(rp->rio_weight);
543
544 for (i = 0; i < RIO_NDROPPREC; i++) {
545 struct dropprec_state *prec = &rp->rio_precstate[i];
546
547 prec->avg = 0;
548 prec->idle = 1;
549
550 if (params == NULL || params[i].inv_pmax == 0)
551 prec->inv_pmax = default_rio_params[i].inv_pmax;
552 else
553 prec->inv_pmax = params[i].inv_pmax;
554 if (params == NULL || params[i].th_min == 0)
555 prec->th_min = default_rio_params[i].th_min;
556 else
557 prec->th_min = params[i].th_min;
558 if (params == NULL || params[i].th_max == 0)
559 prec->th_max = default_rio_params[i].th_max;
560 else
561 prec->th_max = params[i].th_max;
562
563 /*
564 * th_min_s and th_max_s are scaled versions of th_min
565 * and th_max to be compared with avg.
566 */
567 prec->th_min_s = prec->th_min << (rp->rio_wshift + FP_SHIFT);
568 prec->th_max_s = prec->th_max << (rp->rio_wshift + FP_SHIFT);
569
570 /*
571 * precompute probability denominator
572 * probd = (2 * (TH_MAX-TH_MIN) / pmax) in fixed-point
573 */
574 prec->probd = (2 * (prec->th_max - prec->th_min)
575 * prec->inv_pmax) << FP_SHIFT;
576
577 microtime(&prec->last);
578 }
579
580 return (rp);
581 }
582
583 void
584 rio_destroy(rp)
585 rio_t *rp;
586 {
587 wtab_destroy(rp->rio_wtab);
588 FREE(rp, M_DEVBUF);
589 }
590
591 void
592 rio_getstats(rp, sp)
593 rio_t *rp;
594 struct redstats *sp;
595 {
596 int i;
597
598 for (i = 0; i < RIO_NDROPPREC; i++) {
599 bcopy(&rp->q_stats[i], sp, sizeof(struct redstats));
600 sp->q_avg = rp->rio_precstate[i].avg >> rp->rio_wshift;
601 sp++;
602 }
603 }
604
605 /*
606 * enqueue routine:
607 *
608 * returns: 0 when successfully queued.
609 * ENOBUFS when drop occurs.
610 */
611 static int
612 rio_enqueue(ifq, m, pktattr)
613 struct ifaltq *ifq;
614 struct mbuf *m;
615 struct altq_pktattr *pktattr;
616 {
617 rio_queue_t *rqp = (rio_queue_t *)ifq->altq_disc;
618 int error = 0;
619
620 if (rio_addq(rqp->rq_rio, rqp->rq_q, m, pktattr) == 0)
621 ifq->ifq_len++;
622 else
623 error = ENOBUFS;
624 return error;
625 }
626
627 #if (RIO_NDROPPREC == 3)
628 /*
629 * internally, a drop precedence value is converted to an index
630 * starting from 0.
631 */
632 static int
633 dscp2index(u_int8_t dscp)
634 {
635 int dpindex = dscp & AF_DROPPRECMASK;
636
637 if (dpindex == 0)
638 return (0);
639 return ((dpindex >> 3) - 1);
640 }
641 #endif
642
643 #if 1
644 /*
645 * kludge: when a packet is dequeued, we need to know its drop precedence
646 * in order to keep the queue length of each drop precedence.
647 * use m_pkthdr.rcvif to pass this info.
648 */
649 #define RIOM_SET_PRECINDEX(m, idx) \
650 do { (m)->m_pkthdr.rcvif = (struct ifnet *)((long)(idx)); } while (0)
651 #define RIOM_GET_PRECINDEX(m) \
652 ({ long idx; idx = (long)((m)->m_pkthdr.rcvif); \
653 (m)->m_pkthdr.rcvif = NULL; idx; })
654 #endif
655
656 int
657 rio_addq(rp, q, m, pktattr)
658 rio_t *rp;
659 class_queue_t *q;
660 struct mbuf *m;
661 struct altq_pktattr *pktattr;
662 {
663 int avg, droptype;
664 u_int8_t dsfield, odsfield;
665 int dpindex, i, n, t;
666 struct timeval now;
667 struct dropprec_state *prec;
668
669 dsfield = odsfield = read_dsfield(m, pktattr);
670 dpindex = dscp2index(dsfield);
671
672 /*
673 * update avg of the precedence states whose drop precedence
674 * is larger than or equal to the drop precedence of the packet
675 */
676 now.tv_sec = 0;
677 for (i = dpindex; i < RIO_NDROPPREC; i++) {
678 prec = &rp->rio_precstate[i];
679 avg = prec->avg;
680 if (prec->idle) {
681 prec->idle = 0;
682 if (now.tv_sec == 0)
683 microtime(&now);
684 t = (now.tv_sec - prec->last.tv_sec);
685 if (t > 60)
686 avg = 0;
687 else {
688 t = t * 1000000 +
689 (now.tv_usec - prec->last.tv_usec);
690 n = t / rp->rio_pkttime;
691 /* calculate (avg = (1 - Wq)^n * avg) */
692 if (n > 0)
693 avg = (avg >> FP_SHIFT) *
694 pow_w(rp->rio_wtab, n);
695 }
696 }
697
698 /* run estimator. (avg is scaled by WEIGHT in fixed-point) */
699 avg += (prec->qlen << FP_SHIFT) - (avg >> rp->rio_wshift);
700 prec->avg = avg; /* save the new value */
701 /*
702 * count keeps a tally of arriving traffic that has not
703 * been dropped.
704 */
705 prec->count++;
706 }
707
708 prec = &rp->rio_precstate[dpindex];
709 avg = prec->avg;
710
711 /* see if we drop early */
712 droptype = DTYPE_NODROP;
713 if (avg >= prec->th_min_s && prec->qlen > 1) {
714 if (avg >= prec->th_max_s) {
715 /* avg >= th_max: forced drop */
716 droptype = DTYPE_FORCED;
717 } else if (prec->old == 0) {
718 /* first exceeds th_min */
719 prec->count = 1;
720 prec->old = 1;
721 } else if (drop_early((avg - prec->th_min_s) >> rp->rio_wshift,
722 prec->probd, prec->count)) {
723 /* unforced drop by red */
724 droptype = DTYPE_EARLY;
725 }
726 } else {
727 /* avg < th_min */
728 prec->old = 0;
729 }
730
731 /*
732 * if the queue length hits the hard limit, it's a forced drop.
733 */
734 if (droptype == DTYPE_NODROP && qlen(q) >= qlimit(q))
735 droptype = DTYPE_FORCED;
736
737 if (droptype != DTYPE_NODROP) {
738 /* always drop incoming packet (as opposed to randomdrop) */
739 for (i = dpindex; i < RIO_NDROPPREC; i++)
740 rp->rio_precstate[i].count = 0;
741 #ifdef RIO_STATS
742 if (droptype == DTYPE_EARLY)
743 rp->q_stats[dpindex].drop_unforced++;
744 else
745 rp->q_stats[dpindex].drop_forced++;
746 PKTCNTR_ADD(&rp->q_stats[dpindex].drop_cnt, m_pktlen(m));
747 #endif
748 m_freem(m);
749 return (-1);
750 }
751
752 for (i = dpindex; i < RIO_NDROPPREC; i++)
753 rp->rio_precstate[i].qlen++;
754
755 /* save drop precedence index in mbuf hdr */
756 RIOM_SET_PRECINDEX(m, dpindex);
757
758 if (rp->rio_flags & RIOF_CLEARDSCP)
759 dsfield &= ~DSCP_MASK;
760
761 if (dsfield != odsfield)
762 write_dsfield(m, pktattr, dsfield);
763
764 _addq(q, m);
765
766 #ifdef RIO_STATS
767 PKTCNTR_ADD(&rp->q_stats[dpindex].xmit_cnt, m_pktlen(m));
768 #endif
769 return (0);
770 }
771
772 /*
773 * dequeue routine:
774 * must be called in splnet.
775 *
776 * returns: mbuf dequeued.
777 * NULL when no packet is available in the queue.
778 */
779
780 static struct mbuf *
781 rio_dequeue(ifq, op)
782 struct ifaltq *ifq;
783 int op;
784 {
785 rio_queue_t *rqp = (rio_queue_t *)ifq->altq_disc;
786 struct mbuf *m = NULL;
787
788 if (op == ALTDQ_POLL)
789 return qhead(rqp->rq_q);
790
791 m = rio_getq(rqp->rq_rio, rqp->rq_q);
792 if (m != NULL)
793 ifq->ifq_len--;
794 return m;
795 }
796
797 struct mbuf *
798 rio_getq(rp, q)
799 rio_t *rp;
800 class_queue_t *q;
801 {
802 struct mbuf *m;
803 int dpindex, i;
804
805 if ((m = _getq(q)) == NULL)
806 return NULL;
807
808 dpindex = RIOM_GET_PRECINDEX(m);
809 for (i = dpindex; i < RIO_NDROPPREC; i++) {
810 if (--rp->rio_precstate[i].qlen == 0) {
811 if (rp->rio_precstate[i].idle == 0) {
812 rp->rio_precstate[i].idle = 1;
813 microtime(&rp->rio_precstate[i].last);
814 }
815 }
816 }
817 return (m);
818 }
819
820 #ifdef KLD_MODULE
821
822 static struct altqsw rio_sw =
823 {"rio", rioopen, rioclose, rioioctl};
824
825 ALTQ_MODULE(altq_rio, ALTQT_RIO, &rio_sw);
826
827 #endif /* KLD_MODULE */
828
829 #endif /* ALTQ_RIO */
830