altq_hfsc.c revision 1.4 1 /* $NetBSD: altq_hfsc.c,v 1.4 2001/10/26 04:59:18 itojun Exp $ */
2 /* $KAME: altq_hfsc.c,v 1.9 2001/10/26 04:56:11 kjc Exp $ */
3
4 /*
5 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
6 *
7 * Permission to use, copy, modify, and distribute this software and
8 * its documentation is hereby granted (including for commercial or
9 * for-profit use), provided that both the copyright notice and this
10 * permission notice appear in all copies of the software, derivative
11 * works, or modified versions, and any portions thereof, and that
12 * both notices appear in supporting documentation, and that credit
13 * is given to Carnegie Mellon University in all publications reporting
14 * on direct or indirect use of this code or its derivatives.
15 *
16 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
17 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS
18 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
24 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
25 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
26 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
28 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
29 * DAMAGE.
30 *
31 * Carnegie Mellon encourages (but does not require) users of this
32 * software to return any improvements or extensions that they make,
33 * and to grant Carnegie Mellon the rights to redistribute these
34 * changes without encumbrance.
35 */
36 /*
37 * H-FSC is described in Proceedings of SIGCOMM'97,
38 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
39 * Real-Time and Priority Service"
40 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
41 */
42
43 #if defined(__FreeBSD__) || defined(__NetBSD__)
44 #include "opt_altq.h"
45 #if (__FreeBSD__ != 2)
46 #include "opt_inet.h"
47 #ifdef __FreeBSD__
48 #include "opt_inet6.h"
49 #endif
50 #endif
51 #endif /* __FreeBSD__ || __NetBSD__ */
52
53 #ifdef ALTQ_HFSC /* hfsc is enabled by ALTQ_HFSC option in opt_altq.h */
54
55 #include <sys/param.h>
56 #include <sys/malloc.h>
57 #include <sys/mbuf.h>
58 #include <sys/socket.h>
59 #include <sys/sockio.h>
60 #include <sys/systm.h>
61 #include <sys/proc.h>
62 #include <sys/errno.h>
63 #include <sys/kernel.h>
64 #include <sys/queue.h>
65
66 #include <net/if.h>
67 #include <net/if_types.h>
68
69 #include <altq/altq.h>
70 #include <altq/altq_conf.h>
71 #include <altq/altq_hfsc.h>
72
73 /*
74 * function prototypes
75 */
76 static struct hfsc_if *hfsc_attach __P((struct ifaltq *, u_int));
77 static int hfsc_detach __P((struct hfsc_if *));
78 static int hfsc_clear_interface __P((struct hfsc_if *));
79 static int hfsc_request __P((struct ifaltq *, int, void *));
80 static void hfsc_purge __P((struct hfsc_if *));
81 static struct hfsc_class *hfsc_class_create __P((struct hfsc_if *,
82 struct service_curve *, struct hfsc_class *, int, int));
83 static int hfsc_class_destroy __P((struct hfsc_class *));
84 static int hfsc_class_modify __P((struct hfsc_class *,
85 struct service_curve *, struct service_curve *));
86 static struct hfsc_class *hfsc_nextclass __P((struct hfsc_class *));
87
88 static int hfsc_enqueue __P((struct ifaltq *, struct mbuf *,
89 struct altq_pktattr *));
90 static struct mbuf *hfsc_dequeue __P((struct ifaltq *, int));
91
92 static int hfsc_addq __P((struct hfsc_class *, struct mbuf *));
93 static struct mbuf *hfsc_getq __P((struct hfsc_class *));
94 static struct mbuf *hfsc_pollq __P((struct hfsc_class *));
95 static void hfsc_purgeq __P((struct hfsc_class *));
96
97 static void set_active __P((struct hfsc_class *, int));
98 static void set_passive __P((struct hfsc_class *));
99
100 static void init_ed __P((struct hfsc_class *, int));
101 static void update_ed __P((struct hfsc_class *, int));
102 static void update_d __P((struct hfsc_class *, int));
103 static void init_v __P((struct hfsc_class *, int));
104 static void update_v __P((struct hfsc_class *, int));
105 static ellist_t *ellist_alloc __P((void));
106 static void ellist_destroy __P((ellist_t *));
107 static void ellist_insert __P((struct hfsc_class *));
108 static void ellist_remove __P((struct hfsc_class *));
109 static void ellist_update __P((struct hfsc_class *));
110 struct hfsc_class *ellist_get_mindl __P((ellist_t *));
111 static actlist_t *actlist_alloc __P((void));
112 static void actlist_destroy __P((actlist_t *));
113 static void actlist_insert __P((struct hfsc_class *));
114 static void actlist_remove __P((struct hfsc_class *));
115 static void actlist_update __P((struct hfsc_class *));
116
117 static __inline u_int64_t seg_x2y __P((u_int64_t, u_int64_t));
118 static __inline u_int64_t seg_y2x __P((u_int64_t, u_int64_t));
119 static __inline u_int64_t m2sm __P((u_int));
120 static __inline u_int64_t m2ism __P((u_int));
121 static __inline u_int64_t d2dx __P((u_int));
122 static u_int sm2m __P((u_int64_t));
123 static u_int dx2d __P((u_int64_t));
124
125 static void sc2isc __P((struct service_curve *, struct internal_sc *));
126 static void rtsc_init __P((struct runtime_sc *, struct internal_sc *,
127 u_int64_t, u_int64_t));
128 static u_int64_t rtsc_y2x __P((struct runtime_sc *, u_int64_t));
129 static u_int64_t rtsc_x2y __P((struct runtime_sc *, u_int64_t));
130 static void rtsc_min __P((struct runtime_sc *, struct internal_sc *,
131 u_int64_t, u_int64_t));
132
133 int hfscopen __P((dev_t, int, int, struct proc *));
134 int hfscclose __P((dev_t, int, int, struct proc *));
135 int hfscioctl __P((dev_t, ioctlcmd_t, caddr_t, int, struct proc *));
136 static int hfsccmd_if_attach __P((struct hfsc_attach *));
137 static int hfsccmd_if_detach __P((struct hfsc_interface *));
138 static int hfsccmd_add_class __P((struct hfsc_add_class *));
139 static int hfsccmd_delete_class __P((struct hfsc_delete_class *));
140 static int hfsccmd_modify_class __P((struct hfsc_modify_class *));
141 static int hfsccmd_add_filter __P((struct hfsc_add_filter *));
142 static int hfsccmd_delete_filter __P((struct hfsc_delete_filter *));
143 static int hfsccmd_class_stats __P((struct hfsc_class_stats *));
144 static void get_class_stats __P((struct class_stats *, struct hfsc_class *));
145 static struct hfsc_class *clh_to_clp __P((struct hfsc_if *, u_long));
146 static u_long clp_to_clh __P((struct hfsc_class *));
147
148 /*
149 * macros
150 */
151 #define is_a_parent_class(cl) ((cl)->cl_children != NULL)
152
153 /* hif_list keeps all hfsc_if's allocated. */
154 static struct hfsc_if *hif_list = NULL;
155
156 static struct hfsc_if *
157 hfsc_attach(ifq, bandwidth)
158 struct ifaltq *ifq;
159 u_int bandwidth;
160 {
161 struct hfsc_if *hif;
162 struct service_curve root_sc;
163
164 MALLOC(hif, struct hfsc_if *, sizeof(struct hfsc_if),
165 M_DEVBUF, M_WAITOK);
166 if (hif == NULL)
167 return (NULL);
168 bzero(hif, sizeof(struct hfsc_if));
169
170 hif->hif_eligible = ellist_alloc();
171 if (hif->hif_eligible == NULL) {
172 FREE(hif, M_DEVBUF);
173 return NULL;
174 }
175
176 hif->hif_ifq = ifq;
177
178 /*
179 * create root class
180 */
181 root_sc.m1 = bandwidth;
182 root_sc.d = 0;
183 root_sc.m2 = bandwidth;
184 if ((hif->hif_rootclass =
185 hfsc_class_create(hif, &root_sc, NULL, 0, 0)) == NULL) {
186 FREE(hif, M_DEVBUF);
187 return (NULL);
188 }
189
190 /* add this state to the hfsc list */
191 hif->hif_next = hif_list;
192 hif_list = hif;
193
194 return (hif);
195 }
196
197 static int
198 hfsc_detach(hif)
199 struct hfsc_if *hif;
200 {
201 (void)hfsc_clear_interface(hif);
202 (void)hfsc_class_destroy(hif->hif_rootclass);
203
204 /* remove this interface from the hif list */
205 if (hif_list == hif)
206 hif_list = hif->hif_next;
207 else {
208 struct hfsc_if *h;
209
210 for (h = hif_list; h != NULL; h = h->hif_next)
211 if (h->hif_next == hif) {
212 h->hif_next = hif->hif_next;
213 break;
214 }
215 ASSERT(h != NULL);
216 }
217
218 ellist_destroy(hif->hif_eligible);
219
220 FREE(hif, M_DEVBUF);
221
222 return (0);
223 }
224
225 /*
226 * bring the interface back to the initial state by discarding
227 * all the filters and classes except the root class.
228 */
229 static int
230 hfsc_clear_interface(hif)
231 struct hfsc_if *hif;
232 {
233 struct hfsc_class *cl;
234
235 /* free the filters for this interface */
236 acc_discard_filters(&hif->hif_classifier, NULL, 1);
237
238 /* clear out the classes */
239 while ((cl = hif->hif_rootclass->cl_children) != NULL) {
240 /*
241 * remove the first leaf class found in the hierarchy
242 * then start over
243 */
244 for (; cl != NULL; cl = hfsc_nextclass(cl)) {
245 if (!is_a_parent_class(cl)) {
246 (void)hfsc_class_destroy(cl);
247 break;
248 }
249 }
250 }
251
252 return (0);
253 }
254
255 static int
256 hfsc_request(ifq, req, arg)
257 struct ifaltq *ifq;
258 int req;
259 void *arg;
260 {
261 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
262
263 switch (req) {
264 case ALTRQ_PURGE:
265 hfsc_purge(hif);
266 break;
267 }
268 return (0);
269 }
270
271 /* discard all the queued packets on the interface */
272 static void
273 hfsc_purge(hif)
274 struct hfsc_if *hif;
275 {
276 struct hfsc_class *cl;
277
278 for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl))
279 if (!qempty(cl->cl_q))
280 hfsc_purgeq(cl);
281 if (ALTQ_IS_ENABLED(hif->hif_ifq))
282 hif->hif_ifq->ifq_len = 0;
283 }
284
285 struct hfsc_class *
286 hfsc_class_create(hif, sc, parent, qlimit, flags)
287 struct hfsc_if *hif;
288 struct service_curve *sc;
289 struct hfsc_class *parent;
290 int qlimit, flags;
291 {
292 struct hfsc_class *cl, *p;
293 int s;
294
295 #ifndef ALTQ_RED
296 if (flags & HFCF_RED) {
297 printf("hfsc_class_create: RED not configured for HFSC!\n");
298 return (NULL);
299 }
300 #endif
301
302 MALLOC(cl, struct hfsc_class *, sizeof(struct hfsc_class),
303 M_DEVBUF, M_WAITOK);
304 if (cl == NULL)
305 return (NULL);
306 bzero(cl, sizeof(struct hfsc_class));
307
308 MALLOC(cl->cl_q, class_queue_t *, sizeof(class_queue_t),
309 M_DEVBUF, M_WAITOK);
310 if (cl->cl_q == NULL)
311 goto err_ret;
312 bzero(cl->cl_q, sizeof(class_queue_t));
313
314 cl->cl_actc = actlist_alloc();
315 if (cl->cl_actc == NULL)
316 goto err_ret;
317
318 if (qlimit == 0)
319 qlimit = 50; /* use default */
320 qlimit(cl->cl_q) = qlimit;
321 qtype(cl->cl_q) = Q_DROPTAIL;
322 qlen(cl->cl_q) = 0;
323 cl->cl_flags = flags;
324 #ifdef ALTQ_RED
325 if (flags & (HFCF_RED|HFCF_RIO)) {
326 int red_flags, red_pkttime;
327
328 red_flags = 0;
329 if (flags & HFCF_ECN)
330 red_flags |= REDF_ECN;
331 #ifdef ALTQ_RIO
332 if (flags & HFCF_CLEARDSCP)
333 red_flags |= RIOF_CLEARDSCP;
334 #endif
335 if (sc->m2 < 8)
336 red_pkttime = 1000 * 1000 * 1000; /* 1 sec */
337 else
338 red_pkttime = (int64_t)hif->hif_ifq->altq_ifp->if_mtu
339 * 1000 * 1000 * 1000 / (sc->m2 / 8);
340 if (flags & HFCF_RED) {
341 cl->cl_red = red_alloc(0, 0, 0, 0,
342 red_flags, red_pkttime);
343 if (cl->cl_red != NULL)
344 qtype(cl->cl_q) = Q_RED;
345 }
346 #ifdef ALTQ_RIO
347 else {
348 cl->cl_red = (red_t *)rio_alloc(0, NULL,
349 red_flags, red_pkttime);
350 if (cl->cl_red != NULL)
351 qtype(cl->cl_q) = Q_RIO;
352 }
353 #endif
354 }
355 #endif /* ALTQ_RED */
356
357 if (sc != NULL && (sc->m1 != 0 || sc->m2 != 0)) {
358 MALLOC(cl->cl_rsc, struct internal_sc *,
359 sizeof(struct internal_sc), M_DEVBUF, M_WAITOK);
360 if (cl->cl_rsc == NULL)
361 goto err_ret;
362 bzero(cl->cl_rsc, sizeof(struct internal_sc));
363 sc2isc(sc, cl->cl_rsc);
364 rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0);
365 rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0);
366
367 MALLOC(cl->cl_fsc, struct internal_sc *,
368 sizeof(struct internal_sc), M_DEVBUF, M_WAITOK);
369 if (cl->cl_fsc == NULL)
370 goto err_ret;
371 bzero(cl->cl_fsc, sizeof(struct internal_sc));
372 sc2isc(sc, cl->cl_fsc);
373 rtsc_init(&cl->cl_virtual, cl->cl_fsc, 0, 0);
374 }
375
376 cl->cl_id = hif->hif_classid++;
377 cl->cl_handle = (u_long)cl; /* XXX: just a pointer to this class */
378 cl->cl_hif = hif;
379 cl->cl_parent = parent;
380
381 s = splnet();
382 hif->hif_classes++;
383 if (flags & HFCF_DEFAULTCLASS)
384 hif->hif_defaultclass = cl;
385
386 /* add this class to the children list of the parent */
387 if (parent == NULL) {
388 /* this is root class */
389 }
390 else if ((p = parent->cl_children) == NULL)
391 parent->cl_children = cl;
392 else {
393 while (p->cl_siblings != NULL)
394 p = p->cl_siblings;
395 p->cl_siblings = cl;
396 }
397 splx(s);
398
399 return (cl);
400
401 err_ret:
402 if (cl->cl_actc != NULL)
403 actlist_destroy(cl->cl_actc);
404 if (cl->cl_red != NULL) {
405 #ifdef ALTQ_RIO
406 if (q_is_rio(cl->cl_q))
407 rio_destroy((rio_t *)cl->cl_red);
408 #endif
409 #ifdef ALTQ_RED
410 if (q_is_red(cl->cl_q))
411 red_destroy(cl->cl_red);
412 #endif
413 }
414 if (cl->cl_fsc != NULL)
415 FREE(cl->cl_fsc, M_DEVBUF);
416 if (cl->cl_rsc != NULL)
417 FREE(cl->cl_rsc, M_DEVBUF);
418 if (cl->cl_q != NULL)
419 FREE(cl->cl_q, M_DEVBUF);
420 FREE(cl, M_DEVBUF);
421 return (NULL);
422 }
423
424 static int
425 hfsc_class_destroy(cl)
426 struct hfsc_class *cl;
427 {
428 int s;
429
430 if (is_a_parent_class(cl))
431 return (EBUSY);
432
433 s = splnet();
434
435 /* delete filters referencing to this class */
436 acc_discard_filters(&cl->cl_hif->hif_classifier, cl, 0);
437
438 if (!qempty(cl->cl_q))
439 hfsc_purgeq(cl);
440
441 if (cl->cl_parent == NULL) {
442 /* this is root class */
443 } else {
444 struct hfsc_class *p = cl->cl_parent->cl_children;
445
446 if (p == cl)
447 cl->cl_parent->cl_children = cl->cl_siblings;
448 else do {
449 if (p->cl_siblings == cl) {
450 p->cl_siblings = cl->cl_siblings;
451 break;
452 }
453 } while ((p = p->cl_siblings) != NULL);
454 ASSERT(p != NULL);
455 }
456 cl->cl_hif->hif_classes--;
457 splx(s);
458
459 actlist_destroy(cl->cl_actc);
460
461 if (cl->cl_red != NULL) {
462 #ifdef ALTQ_RIO
463 if (q_is_rio(cl->cl_q))
464 rio_destroy((rio_t *)cl->cl_red);
465 #endif
466 #ifdef ALTQ_RED
467 if (q_is_red(cl->cl_q))
468 red_destroy(cl->cl_red);
469 #endif
470 }
471 if (cl->cl_fsc != NULL)
472 FREE(cl->cl_fsc, M_DEVBUF);
473 if (cl->cl_rsc != NULL)
474 FREE(cl->cl_rsc, M_DEVBUF);
475 FREE(cl->cl_q, M_DEVBUF);
476 FREE(cl, M_DEVBUF);
477
478 return (0);
479 }
480
481 static int
482 hfsc_class_modify(cl, rsc, fsc)
483 struct hfsc_class *cl;
484 struct service_curve *rsc, *fsc;
485 {
486 struct internal_sc *tmp;
487 int s;
488
489 s = splnet();
490 if (!qempty(cl->cl_q))
491 hfsc_purgeq(cl);
492
493 if (rsc != NULL) {
494 if (rsc->m1 == 0 && rsc->m2 == 0) {
495 if (cl->cl_rsc != NULL) {
496 FREE(cl->cl_rsc, M_DEVBUF);
497 cl->cl_rsc = NULL;
498 }
499 } else {
500 if (cl->cl_rsc == NULL) {
501 MALLOC(tmp, struct internal_sc *,
502 sizeof(struct internal_sc),
503 M_DEVBUF, M_WAITOK);
504 if (tmp == NULL) {
505 splx(s);
506 return (ENOMEM);
507 }
508 cl->cl_rsc = tmp;
509 }
510 bzero(cl->cl_rsc, sizeof(struct internal_sc));
511 sc2isc(rsc, cl->cl_rsc);
512 rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0);
513 rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0);
514 }
515 }
516
517 if (fsc != NULL) {
518 if (fsc->m1 == 0 && fsc->m2 == 0) {
519 if (cl->cl_fsc != NULL) {
520 FREE(cl->cl_fsc, M_DEVBUF);
521 cl->cl_fsc = NULL;
522 }
523 } else {
524 if (cl->cl_fsc == NULL) {
525 MALLOC(tmp, struct internal_sc *,
526 sizeof(struct internal_sc),
527 M_DEVBUF, M_WAITOK);
528 if (tmp == NULL) {
529 splx(s);
530 return (ENOMEM);
531 }
532 cl->cl_fsc = tmp;
533 }
534 bzero(cl->cl_fsc, sizeof(struct internal_sc));
535 sc2isc(fsc, cl->cl_fsc);
536 rtsc_init(&cl->cl_virtual, cl->cl_fsc, 0, 0);
537 }
538 }
539 splx(s);
540
541 return (0);
542 }
543
544 /*
545 * hfsc_nextclass returns the next class in the tree.
546 * usage:
547 * for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl))
548 * do_something;
549 */
550 static struct hfsc_class *
551 hfsc_nextclass(cl)
552 struct hfsc_class *cl;
553 {
554 if (cl->cl_children != NULL)
555 cl = cl->cl_children;
556 else if (cl->cl_siblings != NULL)
557 cl = cl->cl_siblings;
558 else {
559 while ((cl = cl->cl_parent) != NULL)
560 if (cl->cl_siblings) {
561 cl = cl->cl_siblings;
562 break;
563 }
564 }
565
566 return (cl);
567 }
568
569 /*
570 * hfsc_enqueue is an enqueue function to be registered to
571 * (*altq_enqueue) in struct ifaltq.
572 */
573 static int
574 hfsc_enqueue(ifq, m, pktattr)
575 struct ifaltq *ifq;
576 struct mbuf *m;
577 struct altq_pktattr *pktattr;
578 {
579 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
580 struct hfsc_class *cl;
581 int len;
582
583 /* grab class set by classifier */
584 if (pktattr == NULL || (cl = pktattr->pattr_class) == NULL)
585 cl = hif->hif_defaultclass;
586 cl->cl_pktattr = pktattr; /* save proto hdr used by ECN */
587
588 len = m_pktlen(m);
589 if (hfsc_addq(cl, m) != 0) {
590 /* drop occurred. mbuf was freed in hfsc_addq. */
591 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, len);
592 return (ENOBUFS);
593 }
594 IFQ_INC_LEN(ifq);
595 cl->cl_hif->hif_packets++;
596
597 /* successfully queued. */
598 if (qlen(cl->cl_q) == 1)
599 set_active(cl, m_pktlen(m));
600
601 #ifdef HFSC_PKTLOG
602 /* put the logging_hook here */
603 #endif
604 return (0);
605 }
606
607 /*
608 * hfsc_dequeue is a dequeue function to be registered to
609 * (*altq_dequeue) in struct ifaltq.
610 *
611 * note: ALTDQ_POLL returns the next packet without removing the packet
612 * from the queue. ALTDQ_REMOVE is a normal dequeue operation.
613 * ALTDQ_REMOVE must return the same packet if called immediately
614 * after ALTDQ_POLL.
615 */
616 static struct mbuf *
617 hfsc_dequeue(ifq, op)
618 struct ifaltq *ifq;
619 int op;
620 {
621 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
622 struct hfsc_class *cl;
623 struct mbuf *m;
624 int len, next_len;
625 int realtime = 0;
626
627 if (hif->hif_packets == 0)
628 /* no packet in the tree */
629 return (NULL);
630
631 if (op == ALTDQ_REMOVE && hif->hif_pollcache != NULL) {
632 u_int64_t cur_time;
633
634 cl = hif->hif_pollcache;
635 hif->hif_pollcache = NULL;
636 /* check if the class was scheduled by real-time criteria */
637 if (cl->cl_rsc != NULL) {
638 cur_time = read_machclk();
639 realtime = (cl->cl_e <= cur_time);
640 }
641 } else {
642 /*
643 * if there are eligible classes, use real-time criteria.
644 * find the class with the minimum deadline among
645 * the eligible classes.
646 */
647 if ((cl = ellist_get_mindl(hif->hif_eligible)) != NULL) {
648 realtime = 1;
649 } else {
650 /*
651 * use link-sharing criteria
652 * get the class with the minimum vt in the hierarchy
653 */
654 cl = hif->hif_rootclass;
655 while (is_a_parent_class(cl)) {
656 cl = actlist_first(cl->cl_actc);
657 if (cl == NULL)
658 return (NULL);
659 }
660 }
661
662 if (op == ALTDQ_POLL) {
663 hif->hif_pollcache = cl;
664 m = hfsc_pollq(cl);
665 return (m);
666 }
667 }
668
669 m = hfsc_getq(cl);
670 len = m_pktlen(m);
671 cl->cl_hif->hif_packets--;
672 IFQ_DEC_LEN(ifq);
673 PKTCNTR_ADD(&cl->cl_stats.xmit_cnt, len);
674
675 update_v(cl, len);
676 if (realtime)
677 cl->cl_cumul += len;
678
679 if (!qempty(cl->cl_q)) {
680 if (cl->cl_rsc != NULL) {
681 /* update ed */
682 next_len = m_pktlen(qhead(cl->cl_q));
683
684 if (realtime)
685 update_ed(cl, next_len);
686 else
687 update_d(cl, next_len);
688 }
689 } else {
690 /* the class becomes passive */
691 set_passive(cl);
692 }
693
694 #ifdef HFSC_PKTLOG
695 /* put the logging_hook here */
696 #endif
697
698 return (m);
699 }
700
701 static int
702 hfsc_addq(cl, m)
703 struct hfsc_class *cl;
704 struct mbuf *m;
705 {
706
707 #ifdef ALTQ_RIO
708 if (q_is_rio(cl->cl_q))
709 return rio_addq((rio_t *)cl->cl_red, cl->cl_q,
710 m, cl->cl_pktattr);
711 #endif
712 #ifdef ALTQ_RED
713 if (q_is_red(cl->cl_q))
714 return red_addq(cl->cl_red, cl->cl_q, m, cl->cl_pktattr);
715 #endif
716 if (qlen(cl->cl_q) >= qlimit(cl->cl_q)) {
717 m_freem(m);
718 return (-1);
719 }
720
721 if (cl->cl_flags & HFCF_CLEARDSCP)
722 write_dsfield(m, cl->cl_pktattr, 0);
723
724 _addq(cl->cl_q, m);
725
726 return (0);
727 }
728
729 static struct mbuf *
730 hfsc_getq(cl)
731 struct hfsc_class *cl;
732 {
733 #ifdef ALTQ_RIO
734 if (q_is_rio(cl->cl_q))
735 return rio_getq((rio_t *)cl->cl_red, cl->cl_q);
736 #endif
737 #ifdef ALTQ_RED
738 if (q_is_red(cl->cl_q))
739 return red_getq(cl->cl_red, cl->cl_q);
740 #endif
741 return _getq(cl->cl_q);
742 }
743
744 static struct mbuf *
745 hfsc_pollq(cl)
746 struct hfsc_class *cl;
747 {
748 return qhead(cl->cl_q);
749 }
750
751 static void
752 hfsc_purgeq(cl)
753 struct hfsc_class *cl;
754 {
755 struct mbuf *m;
756
757 if (qempty(cl->cl_q))
758 return;
759
760 while ((m = _getq(cl->cl_q)) != NULL) {
761 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, m_pktlen(m));
762 m_freem(m);
763 }
764 ASSERT(qlen(cl->cl_q) == 0);
765
766 set_passive(cl);
767 }
768
769 static void
770 set_active(cl, len)
771 struct hfsc_class *cl;
772 int len;
773 {
774 if (cl->cl_rsc != NULL)
775 init_ed(cl, len);
776 if (cl->cl_fsc != NULL)
777 init_v(cl, len);
778
779 cl->cl_stats.period++;
780 }
781
782 static void
783 set_passive(cl)
784 struct hfsc_class *cl;
785 {
786 if (cl->cl_rsc != NULL)
787 ellist_remove(cl);
788
789 if (cl->cl_fsc != NULL) {
790 while (cl->cl_parent != NULL) {
791 if (--cl->cl_nactive == 0) {
792 /* remove this class from the vt list */
793 actlist_remove(cl);
794 } else
795 /* still has active children */
796 break;
797
798 /* go up to the parent class */
799 cl = cl->cl_parent;
800 }
801 }
802 }
803
804 static void
805 init_ed(cl, next_len)
806 struct hfsc_class *cl;
807 int next_len;
808 {
809 u_int64_t cur_time;
810
811 cur_time = read_machclk();
812
813 /* update the deadline curve */
814 rtsc_min(&cl->cl_deadline, cl->cl_rsc, cur_time, cl->cl_cumul);
815
816 /*
817 * update the eligible curve.
818 * for concave, it is equal to the deadline curve.
819 * for convex, it is a linear curve with slope m2.
820 */
821 cl->cl_eligible = cl->cl_deadline;
822 if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) {
823 cl->cl_eligible.dx = 0;
824 cl->cl_eligible.dy = 0;
825 }
826
827 /* compute e and d */
828 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
829 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
830
831 ellist_insert(cl);
832 }
833
834 static void
835 update_ed(cl, next_len)
836 struct hfsc_class *cl;
837 int next_len;
838 {
839 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
840 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
841
842 ellist_update(cl);
843 }
844
845 static void
846 update_d(cl, next_len)
847 struct hfsc_class *cl;
848 int next_len;
849 {
850 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
851 }
852
853 static void
854 init_v(cl, len)
855 struct hfsc_class *cl;
856 int len;
857 {
858 struct hfsc_class *min_cl, *max_cl;
859
860 while (cl->cl_parent != NULL) {
861
862 if (cl->cl_nactive++ > 0)
863 /* already active */
864 break;
865
866 min_cl = actlist_first(cl->cl_parent->cl_actc);
867 if (min_cl != NULL) {
868 u_int64_t vt;
869
870 /*
871 * set vt to the average of the min and max classes.
872 * if the parent's period didn't change,
873 * don't decrease vt of the class.
874 */
875 max_cl = actlist_last(cl->cl_parent->cl_actc);
876 vt = (min_cl->cl_vt + max_cl->cl_vt) / 2;
877 if (cl->cl_parent->cl_vtperiod == cl->cl_parentperiod)
878 vt = max(cl->cl_vt, vt);
879 cl->cl_vt = vt;
880 } else {
881 /* no packet is backlogged. set vt to 0 */
882 cl->cl_vt = 0;
883 }
884
885 /* update the virtual curve */
886 rtsc_min(&cl->cl_virtual, cl->cl_fsc,
887 cl->cl_vt, cl->cl_total);
888
889 cl->cl_vtperiod++; /* increment vt period */
890 cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
891 if (cl->cl_parent->cl_nactive == 0)
892 cl->cl_parentperiod++;
893
894 actlist_insert(cl);
895
896 /* go up to the parent class */
897 cl = cl->cl_parent;
898 }
899 }
900
901 static void
902 update_v(cl, len)
903 struct hfsc_class *cl;
904 int len;
905 {
906 while (cl->cl_parent != NULL) {
907
908 cl->cl_total += len;
909
910 if (cl->cl_fsc != NULL) {
911 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total);
912
913 /* update the vt list */
914 actlist_update(cl);
915 }
916
917 /* go up to the parent class */
918 cl = cl->cl_parent;
919 }
920 }
921
922 /*
923 * TAILQ based ellist and actlist implementation
924 * (ion wanted to make a calendar queue based implementation)
925 */
926 /*
927 * eligible list holds backlogged classes being sorted by their eligible times.
928 * there is one eligible list per interface.
929 */
930
931 static ellist_t *
932 ellist_alloc()
933 {
934 ellist_t *head;
935
936 MALLOC(head, ellist_t *, sizeof(ellist_t), M_DEVBUF, M_WAITOK);
937 TAILQ_INIT(head);
938 return (head);
939 }
940
941 static void
942 ellist_destroy(head)
943 ellist_t *head;
944 {
945 FREE(head, M_DEVBUF);
946 }
947
948 static void
949 ellist_insert(cl)
950 struct hfsc_class *cl;
951 {
952 struct hfsc_if *hif = cl->cl_hif;
953 struct hfsc_class *p;
954
955 /* check the last entry first */
956 if ((p = TAILQ_LAST(hif->hif_eligible, _eligible)) == NULL ||
957 p->cl_e <= cl->cl_e) {
958 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist);
959 return;
960 }
961
962 TAILQ_FOREACH(p, hif->hif_eligible, cl_ellist) {
963 if (cl->cl_e < p->cl_e) {
964 TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
965 return;
966 }
967 }
968 ASSERT(0); /* should not reach here */
969 }
970
971 static void
972 ellist_remove(cl)
973 struct hfsc_class *cl;
974 {
975 struct hfsc_if *hif = cl->cl_hif;
976
977 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
978 }
979
980 static void
981 ellist_update(cl)
982 struct hfsc_class *cl;
983 {
984 struct hfsc_if *hif = cl->cl_hif;
985 struct hfsc_class *p, *last;
986
987 /*
988 * the eligible time of a class increases monotonically.
989 * if the next entry has a larger eligible time, nothing to do.
990 */
991 p = TAILQ_NEXT(cl, cl_ellist);
992 if (p == NULL || cl->cl_e <= p->cl_e)
993 return;
994
995 /* check the last entry */
996 last = TAILQ_LAST(hif->hif_eligible, _eligible);
997 ASSERT(last != NULL);
998 if (last->cl_e <= cl->cl_e) {
999 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
1000 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist);
1001 return;
1002 }
1003
1004 /*
1005 * the new position must be between the next entry
1006 * and the last entry
1007 */
1008 while ((p = TAILQ_NEXT(p, cl_ellist)) != NULL) {
1009 if (cl->cl_e < p->cl_e) {
1010 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
1011 TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
1012 return;
1013 }
1014 }
1015 ASSERT(0); /* should not reach here */
1016 }
1017
1018 /* find the class with the minimum deadline among the eligible classes */
1019 struct hfsc_class *
1020 ellist_get_mindl(head)
1021 ellist_t *head;
1022 {
1023 struct hfsc_class *p, *cl = NULL;
1024 u_int64_t cur_time;
1025
1026 cur_time = read_machclk();
1027
1028 TAILQ_FOREACH(p, head, cl_ellist) {
1029 if (p->cl_e > cur_time)
1030 break;
1031 if (cl == NULL || p->cl_d < cl->cl_d)
1032 cl = p;
1033 }
1034 return (cl);
1035 }
1036
1037 /*
1038 * active children list holds backlogged child classes being sorted
1039 * by their virtual time.
1040 * each intermediate class has one active children list.
1041 */
1042 static actlist_t *
1043 actlist_alloc()
1044 {
1045 actlist_t *head;
1046
1047 MALLOC(head, actlist_t *, sizeof(actlist_t), M_DEVBUF, M_WAITOK);
1048 TAILQ_INIT(head);
1049 return (head);
1050 }
1051
1052 static void
1053 actlist_destroy(head)
1054 actlist_t *head;
1055 {
1056 FREE(head, M_DEVBUF);
1057 }
1058 static void
1059 actlist_insert(cl)
1060 struct hfsc_class *cl;
1061 {
1062 struct hfsc_class *p;
1063
1064 /* check the last entry first */
1065 if ((p = TAILQ_LAST(cl->cl_parent->cl_actc, _active)) == NULL
1066 || p->cl_vt <= cl->cl_vt) {
1067 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist);
1068 return;
1069 }
1070
1071 TAILQ_FOREACH(p, cl->cl_parent->cl_actc, cl_actlist) {
1072 if (cl->cl_vt < p->cl_vt) {
1073 TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
1074 return;
1075 }
1076 }
1077 ASSERT(0); /* should not reach here */
1078 }
1079
1080 static void
1081 actlist_remove(cl)
1082 struct hfsc_class *cl;
1083 {
1084 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1085 }
1086
1087 static void
1088 actlist_update(cl)
1089 struct hfsc_class *cl;
1090 {
1091 struct hfsc_class *p, *last;
1092
1093 /*
1094 * the virtual time of a class increases monotonically during its
1095 * backlogged period.
1096 * if the next entry has a larger virtual time, nothing to do.
1097 */
1098 p = TAILQ_NEXT(cl, cl_actlist);
1099 if (p == NULL || cl->cl_vt <= p->cl_vt)
1100 return;
1101
1102 /* check the last entry */
1103 last = TAILQ_LAST(cl->cl_parent->cl_actc, _active);
1104 ASSERT(last != NULL);
1105 if (last->cl_vt <= cl->cl_vt) {
1106 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1107 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist);
1108 return;
1109 }
1110
1111 /*
1112 * the new position must be between the next entry
1113 * and the last entry
1114 */
1115 while ((p = TAILQ_NEXT(p, cl_actlist)) != NULL) {
1116 if (cl->cl_vt < p->cl_vt) {
1117 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1118 TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
1119 return;
1120 }
1121 }
1122 ASSERT(0); /* should not reach here */
1123 }
1124
1125 /*
1126 * service curve support functions
1127 *
1128 * external service curve parameters
1129 * m: bits/sec
1130 * d: msec
1131 * internal service curve parameters
1132 * sm: (bytes/tsc_interval) << SM_SHIFT
1133 * ism: (tsc_count/byte) << ISM_SHIFT
1134 * dx: tsc_count
1135 *
1136 * SM_SHIFT and ISM_SHIFT are scaled in order to keep effective digits.
1137 * we should be able to handle 100K-1Gbps linkspeed with 200Hz-1GHz CPU
1138 * speed. SM_SHIFT and ISM_SHIFT are selected to have at least 3 effective
1139 * digits in decimal using the following table.
1140 *
1141 * bits/set 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps
1142 * ----------+-------------------------------------------------------
1143 * bytes/nsec 12.5e-6 125e-6 1250e-6 12500e-6 125000e-6
1144 * sm(500MHz) 25.0e-6 250e-6 2500e-6 25000e-6 250000e-6
1145 * sm(200MHz) 62.5e-6 625e-6 6250e-6 62500e-6 625000e-6
1146 *
1147 * nsec/byte 80000 8000 800 80 8
1148 * ism(500MHz) 40000 4000 400 40 4
1149 * ism(200MHz) 16000 1600 160 16 1.6
1150 */
1151 #define SM_SHIFT 24
1152 #define ISM_SHIFT 10
1153
1154 #define SC_LARGEVAL (1LL << 32)
1155 #define SC_INFINITY 0xffffffffffffffffLL
1156
1157 static __inline u_int64_t
1158 seg_x2y(x, sm)
1159 u_int64_t x;
1160 u_int64_t sm;
1161 {
1162 u_int64_t y;
1163
1164 if (x < SC_LARGEVAL)
1165 y = x * sm >> SM_SHIFT;
1166 else
1167 y = (x >> SM_SHIFT) * sm;
1168 return (y);
1169 }
1170
1171 static __inline u_int64_t
1172 seg_y2x(y, ism)
1173 u_int64_t y;
1174 u_int64_t ism;
1175 {
1176 u_int64_t x;
1177
1178 if (y == 0)
1179 x = 0;
1180 else if (ism == SC_INFINITY)
1181 x = SC_INFINITY;
1182 else if (y < SC_LARGEVAL)
1183 x = y * ism >> ISM_SHIFT;
1184 else
1185 x = (y >> ISM_SHIFT) * ism;
1186 return (x);
1187 }
1188
1189 static __inline u_int64_t
1190 m2sm(m)
1191 u_int m;
1192 {
1193 u_int64_t sm;
1194
1195 sm = ((u_int64_t)m << SM_SHIFT) / 8 / machclk_freq;
1196 return (sm);
1197 }
1198
1199 static __inline u_int64_t
1200 m2ism(m)
1201 u_int m;
1202 {
1203 u_int64_t ism;
1204
1205 if (m == 0)
1206 ism = SC_INFINITY;
1207 else
1208 ism = ((u_int64_t)machclk_freq << ISM_SHIFT) * 8 / m;
1209 return (ism);
1210 }
1211
1212 static __inline u_int64_t
1213 d2dx(d)
1214 u_int d;
1215 {
1216 u_int64_t dx;
1217
1218 dx = ((u_int64_t)d * machclk_freq) / 1000;
1219 return (dx);
1220 }
1221
1222 static u_int
1223 sm2m(sm)
1224 u_int64_t sm;
1225 {
1226 u_int64_t m;
1227
1228 m = (sm * 8 * machclk_freq) >> SM_SHIFT;
1229 return ((u_int)m);
1230 }
1231
1232 static u_int
1233 dx2d(dx)
1234 u_int64_t dx;
1235 {
1236 u_int64_t d;
1237
1238 d = dx * 1000 / machclk_freq;
1239 return ((u_int)d);
1240 }
1241
1242 static void
1243 sc2isc(sc, isc)
1244 struct service_curve *sc;
1245 struct internal_sc *isc;
1246 {
1247 isc->sm1 = m2sm(sc->m1);
1248 isc->ism1 = m2ism(sc->m1);
1249 isc->dx = d2dx(sc->d);
1250 isc->dy = seg_x2y(isc->dx, isc->sm1);
1251 isc->sm2 = m2sm(sc->m2);
1252 isc->ism2 = m2ism(sc->m2);
1253 }
1254
1255 /*
1256 * initialize the runtime service curve with the given internal
1257 * service curve starting at (x, y).
1258 */
1259 static void
1260 rtsc_init(rtsc, isc, x, y)
1261 struct runtime_sc *rtsc;
1262 struct internal_sc *isc;
1263 u_int64_t x, y;
1264 {
1265 rtsc->x = x;
1266 rtsc->y = y;
1267 rtsc->sm1 = isc->sm1;
1268 rtsc->ism1 = isc->ism1;
1269 rtsc->dx = isc->dx;
1270 rtsc->dy = isc->dy;
1271 rtsc->sm2 = isc->sm2;
1272 rtsc->ism2 = isc->ism2;
1273 }
1274
1275 /*
1276 * calculate the y-projection of the runtime service curve by the
1277 * given x-projection value
1278 */
1279 static u_int64_t
1280 rtsc_y2x(rtsc, y)
1281 struct runtime_sc *rtsc;
1282 u_int64_t y;
1283 {
1284 u_int64_t x;
1285
1286 if (y < rtsc->y)
1287 x = rtsc->x;
1288 else if (y <= rtsc->y + rtsc->dy) {
1289 /* x belongs to the 1st segment */
1290 if (rtsc->dy == 0)
1291 x = rtsc->x + rtsc->dx;
1292 else
1293 x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
1294 } else {
1295 /* x belongs to the 2nd segment */
1296 x = rtsc->x + rtsc->dx
1297 + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
1298 }
1299 return (x);
1300 }
1301
1302 static u_int64_t
1303 rtsc_x2y(rtsc, x)
1304 struct runtime_sc *rtsc;
1305 u_int64_t x;
1306 {
1307 u_int64_t y;
1308
1309 if (x <= rtsc->x)
1310 y = rtsc->y;
1311 else if (x <= rtsc->x + rtsc->dx)
1312 /* y belongs to the 1st segment */
1313 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
1314 else
1315 /* y belongs to the 2nd segment */
1316 y = rtsc->y + rtsc->dy
1317 + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
1318 return (y);
1319 }
1320
1321 /*
1322 * update the runtime service curve by taking the minimum of the current
1323 * runtime service curve and the service curve starting at (x, y).
1324 */
1325 static void
1326 rtsc_min(rtsc, isc, x, y)
1327 struct runtime_sc *rtsc;
1328 struct internal_sc *isc;
1329 u_int64_t x, y;
1330 {
1331 u_int64_t y1, y2, dx, dy;
1332
1333 if (isc->sm1 <= isc->sm2) {
1334 /* service curve is convex */
1335 y1 = rtsc_x2y(rtsc, x);
1336 if (y1 < y)
1337 /* the current rtsc is smaller */
1338 return;
1339 rtsc->x = x;
1340 rtsc->y = y;
1341 return;
1342 }
1343
1344 /*
1345 * service curve is concave
1346 * compute the two y values of the current rtsc
1347 * y1: at x
1348 * y2: at (x + dx)
1349 */
1350 y1 = rtsc_x2y(rtsc, x);
1351 if (y1 <= y) {
1352 /* rtsc is below isc, no change to rtsc */
1353 return;
1354 }
1355
1356 y2 = rtsc_x2y(rtsc, x + isc->dx);
1357 if (y2 >= y + isc->dy) {
1358 /* rtsc is above isc, replace rtsc by isc */
1359 rtsc->x = x;
1360 rtsc->y = y;
1361 rtsc->dx = isc->dx;
1362 rtsc->dy = isc->dy;
1363 return;
1364 }
1365
1366 /*
1367 * the two curves intersect
1368 * compute the offsets (dx, dy) using the reverse
1369 * function of seg_x2y()
1370 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
1371 */
1372 dx = ((y1 - y) << SM_SHIFT) / (isc->sm1 - isc->sm2);
1373 /*
1374 * check if (x, y1) belongs to the 1st segment of rtsc.
1375 * if so, add the offset.
1376 */
1377 if (rtsc->x + rtsc->dx > x)
1378 dx += rtsc->x + rtsc->dx - x;
1379 dy = seg_x2y(dx, isc->sm1);
1380
1381 rtsc->x = x;
1382 rtsc->y = y;
1383 rtsc->dx = dx;
1384 rtsc->dy = dy;
1385 return;
1386 }
1387
1388 /*
1389 * hfsc device interface
1390 */
1391 int
1392 hfscopen(dev, flag, fmt, p)
1393 dev_t dev;
1394 int flag, fmt;
1395 struct proc *p;
1396 {
1397 if (machclk_freq == 0)
1398 init_machclk();
1399
1400 if (machclk_freq == 0) {
1401 printf("hfsc: no cpu clock available!\n");
1402 return (ENXIO);
1403 }
1404
1405 /* everything will be done when the queueing scheme is attached. */
1406 return 0;
1407 }
1408
1409 int
1410 hfscclose(dev, flag, fmt, p)
1411 dev_t dev;
1412 int flag, fmt;
1413 struct proc *p;
1414 {
1415 struct hfsc_if *hif;
1416 int err, error = 0;
1417
1418 while ((hif = hif_list) != NULL) {
1419 /* destroy all */
1420 if (ALTQ_IS_ENABLED(hif->hif_ifq))
1421 altq_disable(hif->hif_ifq);
1422
1423 err = altq_detach(hif->hif_ifq);
1424 if (err == 0)
1425 err = hfsc_detach(hif);
1426 if (err != 0 && error == 0)
1427 error = err;
1428 }
1429
1430 return error;
1431 }
1432
1433 int
1434 hfscioctl(dev, cmd, addr, flag, p)
1435 dev_t dev;
1436 ioctlcmd_t cmd;
1437 caddr_t addr;
1438 int flag;
1439 struct proc *p;
1440 {
1441 struct hfsc_if *hif;
1442 struct hfsc_interface *ifacep;
1443 int error = 0;
1444
1445 /* check super-user privilege */
1446 switch (cmd) {
1447 case HFSC_GETSTATS:
1448 break;
1449 default:
1450 #if (__FreeBSD_version > 400000)
1451 if ((error = suser(p)) != 0)
1452 return (error);
1453 #else
1454 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
1455 return (error);
1456 #endif
1457 break;
1458 }
1459
1460 switch (cmd) {
1461
1462 case HFSC_IF_ATTACH:
1463 error = hfsccmd_if_attach((struct hfsc_attach *)addr);
1464 break;
1465
1466 case HFSC_IF_DETACH:
1467 error = hfsccmd_if_detach((struct hfsc_interface *)addr);
1468 break;
1469
1470 case HFSC_ENABLE:
1471 case HFSC_DISABLE:
1472 case HFSC_CLEAR_HIERARCHY:
1473 ifacep = (struct hfsc_interface *)addr;
1474 if ((hif = altq_lookup(ifacep->hfsc_ifname,
1475 ALTQT_HFSC)) == NULL) {
1476 error = EBADF;
1477 break;
1478 }
1479
1480 switch (cmd) {
1481
1482 case HFSC_ENABLE:
1483 if (hif->hif_defaultclass == NULL) {
1484 #if 1
1485 printf("hfsc: no default class\n");
1486 #endif
1487 error = EINVAL;
1488 break;
1489 }
1490 error = altq_enable(hif->hif_ifq);
1491 break;
1492
1493 case HFSC_DISABLE:
1494 error = altq_disable(hif->hif_ifq);
1495 break;
1496
1497 case HFSC_CLEAR_HIERARCHY:
1498 hfsc_clear_interface(hif);
1499 break;
1500 }
1501 break;
1502
1503 case HFSC_ADD_CLASS:
1504 error = hfsccmd_add_class((struct hfsc_add_class *)addr);
1505 break;
1506
1507 case HFSC_DEL_CLASS:
1508 error = hfsccmd_delete_class((struct hfsc_delete_class *)addr);
1509 break;
1510
1511 case HFSC_MOD_CLASS:
1512 error = hfsccmd_modify_class((struct hfsc_modify_class *)addr);
1513 break;
1514
1515 case HFSC_ADD_FILTER:
1516 error = hfsccmd_add_filter((struct hfsc_add_filter *)addr);
1517 break;
1518
1519 case HFSC_DEL_FILTER:
1520 error = hfsccmd_delete_filter((struct hfsc_delete_filter *)addr);
1521 break;
1522
1523 case HFSC_GETSTATS:
1524 error = hfsccmd_class_stats((struct hfsc_class_stats *)addr);
1525 break;
1526
1527 default:
1528 error = EINVAL;
1529 break;
1530 }
1531 return error;
1532 }
1533
1534 static int
1535 hfsccmd_if_attach(ap)
1536 struct hfsc_attach *ap;
1537 {
1538 struct hfsc_if *hif;
1539 struct ifnet *ifp;
1540 int error;
1541
1542 if ((ifp = ifunit(ap->iface.hfsc_ifname)) == NULL)
1543 return (ENXIO);
1544
1545 if ((hif = hfsc_attach(&ifp->if_snd, ap->bandwidth)) == NULL)
1546 return (ENOMEM);
1547
1548 /*
1549 * set HFSC to this ifnet structure.
1550 */
1551 if ((error = altq_attach(&ifp->if_snd, ALTQT_HFSC, hif,
1552 hfsc_enqueue, hfsc_dequeue, hfsc_request,
1553 &hif->hif_classifier, acc_classify)) != 0)
1554 (void)hfsc_detach(hif);
1555
1556 return (error);
1557 }
1558
1559 static int
1560 hfsccmd_if_detach(ap)
1561 struct hfsc_interface *ap;
1562 {
1563 struct hfsc_if *hif;
1564 int error;
1565
1566 if ((hif = altq_lookup(ap->hfsc_ifname, ALTQT_HFSC)) == NULL)
1567 return (EBADF);
1568
1569 if (ALTQ_IS_ENABLED(hif->hif_ifq))
1570 altq_disable(hif->hif_ifq);
1571
1572 if ((error = altq_detach(hif->hif_ifq)))
1573 return (error);
1574
1575 return hfsc_detach(hif);
1576 }
1577
1578 static int
1579 hfsccmd_add_class(ap)
1580 struct hfsc_add_class *ap;
1581 {
1582 struct hfsc_if *hif;
1583 struct hfsc_class *cl, *parent;
1584
1585 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
1586 return (EBADF);
1587
1588 if ((parent = clh_to_clp(hif, ap->parent_handle)) == NULL) {
1589 if (ap->parent_handle == HFSC_ROOTCLASS_HANDLE)
1590 parent = hif->hif_rootclass;
1591 else
1592 return (EINVAL);
1593 }
1594
1595 if ((cl = hfsc_class_create(hif, &ap->service_curve, parent,
1596 ap->qlimit, ap->flags)) == NULL)
1597 return (ENOMEM);
1598
1599 /* return a class handle to the user */
1600 ap->class_handle = clp_to_clh(cl);
1601 return (0);
1602 }
1603
1604 static int
1605 hfsccmd_delete_class(ap)
1606 struct hfsc_delete_class *ap;
1607 {
1608 struct hfsc_if *hif;
1609 struct hfsc_class *cl;
1610
1611 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
1612 return (EBADF);
1613
1614 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
1615 return (EINVAL);
1616
1617 return hfsc_class_destroy(cl);
1618 }
1619
1620 static int
1621 hfsccmd_modify_class(ap)
1622 struct hfsc_modify_class *ap;
1623 {
1624 struct hfsc_if *hif;
1625 struct hfsc_class *cl;
1626 struct service_curve *rsc = NULL;
1627 struct service_curve *fsc = NULL;
1628
1629 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
1630 return (EBADF);
1631
1632 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
1633 return (EINVAL);
1634
1635 if (ap->sctype & HFSC_REALTIMESC)
1636 rsc = &ap->service_curve;
1637 if (ap->sctype & HFSC_LINKSHARINGSC)
1638 fsc = &ap->service_curve;
1639
1640 return hfsc_class_modify(cl, rsc, fsc);
1641 }
1642
1643 static int
1644 hfsccmd_add_filter(ap)
1645 struct hfsc_add_filter *ap;
1646 {
1647 struct hfsc_if *hif;
1648 struct hfsc_class *cl;
1649
1650 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
1651 return (EBADF);
1652
1653 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
1654 return (EINVAL);
1655
1656 if (is_a_parent_class(cl)) {
1657 #if 1
1658 printf("hfsccmd_add_filter: not a leaf class!\n");
1659 #endif
1660 return (EINVAL);
1661 }
1662
1663 return acc_add_filter(&hif->hif_classifier, &ap->filter,
1664 cl, &ap->filter_handle);
1665 }
1666
1667 static int
1668 hfsccmd_delete_filter(ap)
1669 struct hfsc_delete_filter *ap;
1670 {
1671 struct hfsc_if *hif;
1672
1673 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
1674 return (EBADF);
1675
1676 return acc_delete_filter(&hif->hif_classifier,
1677 ap->filter_handle);
1678 }
1679
1680 static int
1681 hfsccmd_class_stats(ap)
1682 struct hfsc_class_stats *ap;
1683 {
1684 struct hfsc_if *hif;
1685 struct hfsc_class *cl;
1686 struct class_stats stats, *usp;
1687 int n, nclasses, error;
1688
1689 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
1690 return (EBADF);
1691
1692 ap->cur_time = read_machclk();
1693 ap->hif_classes = hif->hif_classes;
1694 ap->hif_packets = hif->hif_packets;
1695
1696 /* skip the first N classes in the tree */
1697 nclasses = ap->nskip;
1698 for (cl = hif->hif_rootclass, n = 0; cl != NULL && n < nclasses;
1699 cl = hfsc_nextclass(cl), n++)
1700 ;
1701 if (n != nclasses)
1702 return (EINVAL);
1703
1704 /* then, read the next N classes in the tree */
1705 nclasses = ap->nclasses;
1706 usp = ap->stats;
1707 for (n = 0; cl != NULL && n < nclasses; cl = hfsc_nextclass(cl), n++) {
1708
1709 get_class_stats(&stats, cl);
1710
1711 if ((error = copyout((caddr_t)&stats, (caddr_t)usp++,
1712 sizeof(stats))) != 0)
1713 return (error);
1714 }
1715
1716 ap->nclasses = n;
1717
1718 return (0);
1719 }
1720
1721 static void get_class_stats(sp, cl)
1722 struct class_stats *sp;
1723 struct hfsc_class *cl;
1724 {
1725 sp->class_id = cl->cl_id;
1726 sp->class_handle = clp_to_clh(cl);
1727
1728 if (cl->cl_rsc != NULL) {
1729 sp->rsc.m1 = sm2m(cl->cl_rsc->sm1);
1730 sp->rsc.d = dx2d(cl->cl_rsc->dx);
1731 sp->rsc.m2 = sm2m(cl->cl_rsc->sm2);
1732 } else {
1733 sp->rsc.m1 = 0;
1734 sp->rsc.d = 0;
1735 sp->rsc.m2 = 0;
1736 }
1737 if (cl->cl_fsc != NULL) {
1738 sp->fsc.m1 = sm2m(cl->cl_fsc->sm1);
1739 sp->fsc.d = dx2d(cl->cl_fsc->dx);
1740 sp->fsc.m2 = sm2m(cl->cl_fsc->sm2);
1741 } else {
1742 sp->fsc.m1 = 0;
1743 sp->fsc.d = 0;
1744 sp->fsc.m2 = 0;
1745 }
1746
1747 sp->total = cl->cl_total;
1748 sp->cumul = cl->cl_cumul;
1749
1750 sp->d = cl->cl_d;
1751 sp->e = cl->cl_e;
1752 sp->vt = cl->cl_vt;
1753
1754 sp->qlength = qlen(cl->cl_q);
1755 sp->xmit_cnt = cl->cl_stats.xmit_cnt;
1756 sp->drop_cnt = cl->cl_stats.drop_cnt;
1757 sp->period = cl->cl_stats.period;
1758
1759 sp->qtype = qtype(cl->cl_q);
1760 #ifdef ALTQ_RED
1761 if (q_is_red(cl->cl_q))
1762 red_getstats(cl->cl_red, &sp->red[0]);
1763 #endif
1764 #ifdef ALTQ_RIO
1765 if (q_is_rio(cl->cl_q))
1766 rio_getstats((rio_t *)cl->cl_red, &sp->red[0]);
1767 #endif
1768 }
1769
1770 /* convert a class handle to the corresponding class pointer */
1771 static struct hfsc_class *
1772 clh_to_clp(hif, chandle)
1773 struct hfsc_if *hif;
1774 u_long chandle;
1775 {
1776 struct hfsc_class *cl;
1777
1778 cl = (struct hfsc_class *)chandle;
1779 if (chandle != ALIGN(cl)) {
1780 #if 1
1781 printf("clh_to_cl: unaligned pointer %p\n", cl);
1782 #endif
1783 return (NULL);
1784 }
1785
1786 if (cl == NULL || cl->cl_handle != chandle || cl->cl_hif != hif)
1787 return (NULL);
1788
1789 return (cl);
1790 }
1791
1792 /* convert a class pointer to the corresponding class handle */
1793 static u_long
1794 clp_to_clh(cl)
1795 struct hfsc_class *cl;
1796 {
1797 if (cl->cl_parent == NULL)
1798 return (HFSC_ROOTCLASS_HANDLE); /* XXX */
1799 return (cl->cl_handle);
1800 }
1801
1802 #ifdef KLD_MODULE
1803
1804 static struct altqsw hfsc_sw =
1805 {"hfsc", hfscopen, hfscclose, hfscioctl};
1806
1807 ALTQ_MODULE(altq_hfsc, ALTQT_HFSC, &hfsc_sw);
1808
1809 #endif /* KLD_MODULE */
1810
1811 #endif /* ALTQ_HFSC */
1812