altq_hfsc.c revision 1.17 1 /* $NetBSD: altq_hfsc.c,v 1.17 2006/07/21 16:48:45 ad Exp $ */
2 /* $KAME: altq_hfsc.c,v 1.9 2001/10/26 04:56:11 kjc Exp $ */
3
4 /*
5 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
6 *
7 * Permission to use, copy, modify, and distribute this software and
8 * its documentation is hereby granted (including for commercial or
9 * for-profit use), provided that both the copyright notice and this
10 * permission notice appear in all copies of the software, derivative
11 * works, or modified versions, and any portions thereof, and that
12 * both notices appear in supporting documentation, and that credit
13 * is given to Carnegie Mellon University in all publications reporting
14 * on direct or indirect use of this code or its derivatives.
15 *
16 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
17 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS
18 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
24 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
25 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
26 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
28 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
29 * DAMAGE.
30 *
31 * Carnegie Mellon encourages (but does not require) users of this
32 * software to return any improvements or extensions that they make,
33 * and to grant Carnegie Mellon the rights to redistribute these
34 * changes without encumbrance.
35 */
36 /*
37 * H-FSC is described in Proceedings of SIGCOMM'97,
38 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
39 * Real-Time and Priority Service"
40 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: altq_hfsc.c,v 1.17 2006/07/21 16:48:45 ad Exp $");
45
46 #if defined(__FreeBSD__) || defined(__NetBSD__)
47 #include "opt_altq.h"
48 #if (__FreeBSD__ != 2)
49 #include "opt_inet.h"
50 #ifdef __FreeBSD__
51 #include "opt_inet6.h"
52 #endif
53 #endif
54 #endif /* __FreeBSD__ || __NetBSD__ */
55
56 #ifdef ALTQ_HFSC /* hfsc is enabled by ALTQ_HFSC option in opt_altq.h */
57
58 #include <sys/param.h>
59 #include <sys/malloc.h>
60 #include <sys/mbuf.h>
61 #include <sys/socket.h>
62 #include <sys/sockio.h>
63 #include <sys/systm.h>
64 #include <sys/proc.h>
65 #include <sys/errno.h>
66 #include <sys/kernel.h>
67 #include <sys/queue.h>
68 #include <sys/kauth.h>
69
70 #include <net/if.h>
71 #include <net/if_types.h>
72
73 #include <altq/altq.h>
74 #include <altq/altq_conf.h>
75 #include <altq/altq_hfsc.h>
76
77 /*
78 * function prototypes
79 */
80 static struct hfsc_if *hfsc_attach __P((struct ifaltq *, u_int));
81 static int hfsc_detach __P((struct hfsc_if *));
82 static int hfsc_clear_interface __P((struct hfsc_if *));
83 static int hfsc_request __P((struct ifaltq *, int, void *));
84 static void hfsc_purge __P((struct hfsc_if *));
85 static struct hfsc_class *hfsc_class_create __P((struct hfsc_if *,
86 struct service_curve *, struct hfsc_class *, int, int));
87 static int hfsc_class_destroy __P((struct hfsc_class *));
88 static int hfsc_class_modify __P((struct hfsc_class *,
89 struct service_curve *, struct service_curve *));
90 static struct hfsc_class *hfsc_nextclass __P((struct hfsc_class *));
91
92 static int hfsc_enqueue __P((struct ifaltq *, struct mbuf *,
93 struct altq_pktattr *));
94 static struct mbuf *hfsc_dequeue __P((struct ifaltq *, int));
95
96 static int hfsc_addq __P((struct hfsc_class *, struct mbuf *));
97 static struct mbuf *hfsc_getq __P((struct hfsc_class *));
98 static struct mbuf *hfsc_pollq __P((struct hfsc_class *));
99 static void hfsc_purgeq __P((struct hfsc_class *));
100
101 static void set_active __P((struct hfsc_class *, int));
102 static void set_passive __P((struct hfsc_class *));
103
104 static void init_ed __P((struct hfsc_class *, int));
105 static void update_ed __P((struct hfsc_class *, int));
106 static void update_d __P((struct hfsc_class *, int));
107 static void init_v __P((struct hfsc_class *, int));
108 static void update_v __P((struct hfsc_class *, int));
109 static ellist_t *ellist_alloc __P((void));
110 static void ellist_destroy __P((ellist_t *));
111 static void ellist_insert __P((struct hfsc_class *));
112 static void ellist_remove __P((struct hfsc_class *));
113 static void ellist_update __P((struct hfsc_class *));
114 struct hfsc_class *ellist_get_mindl __P((ellist_t *));
115 static actlist_t *actlist_alloc __P((void));
116 static void actlist_destroy __P((actlist_t *));
117 static void actlist_insert __P((struct hfsc_class *));
118 static void actlist_remove __P((struct hfsc_class *));
119 static void actlist_update __P((struct hfsc_class *));
120
121 static inline u_int64_t seg_x2y __P((u_int64_t, u_int64_t));
122 static inline u_int64_t seg_y2x __P((u_int64_t, u_int64_t));
123 static inline u_int64_t m2sm __P((u_int));
124 static inline u_int64_t m2ism __P((u_int));
125 static inline u_int64_t d2dx __P((u_int));
126 static u_int sm2m __P((u_int64_t));
127 static u_int dx2d __P((u_int64_t));
128
129 static void sc2isc __P((struct service_curve *, struct internal_sc *));
130 static void rtsc_init __P((struct runtime_sc *, struct internal_sc *,
131 u_int64_t, u_int64_t));
132 static u_int64_t rtsc_y2x __P((struct runtime_sc *, u_int64_t));
133 static u_int64_t rtsc_x2y __P((struct runtime_sc *, u_int64_t));
134 static void rtsc_min __P((struct runtime_sc *, struct internal_sc *,
135 u_int64_t, u_int64_t));
136
137 int hfscopen __P((dev_t, int, int, struct lwp *));
138 int hfscclose __P((dev_t, int, int, struct lwp *));
139 int hfscioctl __P((dev_t, ioctlcmd_t, caddr_t, int, struct lwp *));
140 static int hfsccmd_if_attach __P((struct hfsc_attach *));
141 static int hfsccmd_if_detach __P((struct hfsc_interface *));
142 static int hfsccmd_add_class __P((struct hfsc_add_class *));
143 static int hfsccmd_delete_class __P((struct hfsc_delete_class *));
144 static int hfsccmd_modify_class __P((struct hfsc_modify_class *));
145 static int hfsccmd_add_filter __P((struct hfsc_add_filter *));
146 static int hfsccmd_delete_filter __P((struct hfsc_delete_filter *));
147 static int hfsccmd_class_stats __P((struct hfsc_class_stats *));
148 static void get_class_stats __P((struct hfsc_basic_class_stats *,
149 struct hfsc_class *));
150 static struct hfsc_class *clh_to_clp __P((struct hfsc_if *, u_long));
151 static u_long clp_to_clh __P((struct hfsc_class *));
152
153 /*
154 * macros
155 */
156 #define is_a_parent_class(cl) ((cl)->cl_children != NULL)
157
158 /* hif_list keeps all hfsc_if's allocated. */
159 static struct hfsc_if *hif_list = NULL;
160
161 static struct hfsc_if *
162 hfsc_attach(ifq, bandwidth)
163 struct ifaltq *ifq;
164 u_int bandwidth;
165 {
166 struct hfsc_if *hif;
167 struct service_curve root_sc;
168
169 hif = malloc(sizeof(struct hfsc_if), M_DEVBUF, M_WAITOK|M_ZERO);
170 if (hif == NULL)
171 return (NULL);
172
173 hif->hif_eligible = ellist_alloc();
174 if (hif->hif_eligible == NULL) {
175 free(hif, M_DEVBUF);
176 return NULL;
177 }
178
179 hif->hif_ifq = ifq;
180
181 /*
182 * create root class
183 */
184 root_sc.m1 = bandwidth;
185 root_sc.d = 0;
186 root_sc.m2 = bandwidth;
187 if ((hif->hif_rootclass =
188 hfsc_class_create(hif, &root_sc, NULL, 0, 0)) == NULL) {
189 free(hif, M_DEVBUF);
190 return (NULL);
191 }
192
193 /* add this state to the hfsc list */
194 hif->hif_next = hif_list;
195 hif_list = hif;
196
197 return (hif);
198 }
199
200 static int
201 hfsc_detach(hif)
202 struct hfsc_if *hif;
203 {
204 (void)hfsc_clear_interface(hif);
205 (void)hfsc_class_destroy(hif->hif_rootclass);
206
207 /* remove this interface from the hif list */
208 if (hif_list == hif)
209 hif_list = hif->hif_next;
210 else {
211 struct hfsc_if *h;
212
213 for (h = hif_list; h != NULL; h = h->hif_next)
214 if (h->hif_next == hif) {
215 h->hif_next = hif->hif_next;
216 break;
217 }
218 ASSERT(h != NULL);
219 }
220
221 ellist_destroy(hif->hif_eligible);
222
223 free(hif, M_DEVBUF);
224
225 return (0);
226 }
227
228 /*
229 * bring the interface back to the initial state by discarding
230 * all the filters and classes except the root class.
231 */
232 static int
233 hfsc_clear_interface(hif)
234 struct hfsc_if *hif;
235 {
236 struct hfsc_class *cl;
237
238 /* free the filters for this interface */
239 acc_discard_filters(&hif->hif_classifier, NULL, 1);
240
241 /* clear out the classes */
242 while ((cl = hif->hif_rootclass->cl_children) != NULL) {
243 /*
244 * remove the first leaf class found in the hierarchy
245 * then start over
246 */
247 for (; cl != NULL; cl = hfsc_nextclass(cl)) {
248 if (!is_a_parent_class(cl)) {
249 (void)hfsc_class_destroy(cl);
250 break;
251 }
252 }
253 }
254
255 return (0);
256 }
257
258 static int
259 hfsc_request(ifq, req, arg)
260 struct ifaltq *ifq;
261 int req;
262 void *arg;
263 {
264 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
265
266 switch (req) {
267 case ALTRQ_PURGE:
268 hfsc_purge(hif);
269 break;
270 }
271 return (0);
272 }
273
274 /* discard all the queued packets on the interface */
275 static void
276 hfsc_purge(hif)
277 struct hfsc_if *hif;
278 {
279 struct hfsc_class *cl;
280
281 for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl))
282 if (!qempty(cl->cl_q))
283 hfsc_purgeq(cl);
284 if (ALTQ_IS_ENABLED(hif->hif_ifq))
285 hif->hif_ifq->ifq_len = 0;
286 }
287
288 struct hfsc_class *
289 hfsc_class_create(hif, sc, parent, qlimit, flags)
290 struct hfsc_if *hif;
291 struct service_curve *sc;
292 struct hfsc_class *parent;
293 int qlimit, flags;
294 {
295 struct hfsc_class *cl, *p;
296 int s;
297
298 #ifndef ALTQ_RED
299 if (flags & HFCF_RED) {
300 printf("hfsc_class_create: RED not configured for HFSC!\n");
301 return (NULL);
302 }
303 #endif
304
305 cl = malloc(sizeof(struct hfsc_class), M_DEVBUF, M_WAITOK|M_ZERO);
306 if (cl == NULL)
307 return (NULL);
308
309 cl->cl_q = malloc(sizeof(class_queue_t), M_DEVBUF, M_WAITOK|M_ZERO);
310 if (cl->cl_q == NULL)
311 goto err_ret;
312
313 cl->cl_actc = actlist_alloc();
314 if (cl->cl_actc == NULL)
315 goto err_ret;
316
317 if (qlimit == 0)
318 qlimit = 50; /* use default */
319 qlimit(cl->cl_q) = qlimit;
320 qtype(cl->cl_q) = Q_DROPTAIL;
321 qlen(cl->cl_q) = 0;
322 cl->cl_flags = flags;
323 #ifdef ALTQ_RED
324 if (flags & (HFCF_RED|HFCF_RIO)) {
325 int red_flags, red_pkttime;
326
327 red_flags = 0;
328 if (flags & HFCF_ECN)
329 red_flags |= REDF_ECN;
330 #ifdef ALTQ_RIO
331 if (flags & HFCF_CLEARDSCP)
332 red_flags |= RIOF_CLEARDSCP;
333 #endif
334 if (sc->m2 < 8)
335 red_pkttime = 1000 * 1000 * 1000; /* 1 sec */
336 else
337 red_pkttime = (int64_t)hif->hif_ifq->altq_ifp->if_mtu
338 * 1000 * 1000 * 1000 / (sc->m2 / 8);
339 if (flags & HFCF_RED) {
340 cl->cl_red = red_alloc(0, 0, 0, 0,
341 red_flags, red_pkttime);
342 if (cl->cl_red != NULL)
343 qtype(cl->cl_q) = Q_RED;
344 }
345 #ifdef ALTQ_RIO
346 else {
347 cl->cl_red = (red_t *)rio_alloc(0, NULL,
348 red_flags, red_pkttime);
349 if (cl->cl_red != NULL)
350 qtype(cl->cl_q) = Q_RIO;
351 }
352 #endif
353 }
354 #endif /* ALTQ_RED */
355
356 if (sc != NULL && (sc->m1 != 0 || sc->m2 != 0)) {
357 cl->cl_rsc = malloc(sizeof(struct internal_sc), M_DEVBUF,
358 M_WAITOK|M_ZERO);
359 if (cl->cl_rsc == NULL)
360 goto err_ret;
361 sc2isc(sc, cl->cl_rsc);
362 rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0);
363 rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0);
364
365 cl->cl_fsc = malloc(sizeof(struct internal_sc), M_DEVBUF,
366 M_WAITOK|M_ZERO);
367 if (cl->cl_fsc == NULL)
368 goto err_ret;
369 sc2isc(sc, cl->cl_fsc);
370 rtsc_init(&cl->cl_virtual, cl->cl_fsc, 0, 0);
371 }
372
373 cl->cl_id = hif->hif_classid++;
374 cl->cl_handle = (u_long)cl; /* XXX: just a pointer to this class */
375 cl->cl_hif = hif;
376 cl->cl_parent = parent;
377
378 s = splnet();
379 hif->hif_classes++;
380 if (flags & HFCF_DEFAULTCLASS)
381 hif->hif_defaultclass = cl;
382
383 /* add this class to the children list of the parent */
384 if (parent == NULL) {
385 /* this is root class */
386 }
387 else if ((p = parent->cl_children) == NULL)
388 parent->cl_children = cl;
389 else {
390 while (p->cl_siblings != NULL)
391 p = p->cl_siblings;
392 p->cl_siblings = cl;
393 }
394 splx(s);
395
396 return (cl);
397
398 err_ret:
399 if (cl->cl_actc != NULL)
400 actlist_destroy(cl->cl_actc);
401 if (cl->cl_red != NULL) {
402 #ifdef ALTQ_RIO
403 if (q_is_rio(cl->cl_q))
404 rio_destroy((rio_t *)cl->cl_red);
405 #endif
406 #ifdef ALTQ_RED
407 if (q_is_red(cl->cl_q))
408 red_destroy(cl->cl_red);
409 #endif
410 }
411 if (cl->cl_fsc != NULL)
412 free(cl->cl_fsc, M_DEVBUF);
413 if (cl->cl_rsc != NULL)
414 free(cl->cl_rsc, M_DEVBUF);
415 if (cl->cl_q != NULL)
416 free(cl->cl_q, M_DEVBUF);
417 free(cl, M_DEVBUF);
418 return (NULL);
419 }
420
421 static int
422 hfsc_class_destroy(cl)
423 struct hfsc_class *cl;
424 {
425 int s;
426
427 if (is_a_parent_class(cl))
428 return (EBUSY);
429
430 s = splnet();
431
432 /* delete filters referencing to this class */
433 acc_discard_filters(&cl->cl_hif->hif_classifier, cl, 0);
434
435 if (!qempty(cl->cl_q))
436 hfsc_purgeq(cl);
437
438 if (cl->cl_parent == NULL) {
439 /* this is root class */
440 } else {
441 struct hfsc_class *p = cl->cl_parent->cl_children;
442
443 if (p == cl)
444 cl->cl_parent->cl_children = cl->cl_siblings;
445 else do {
446 if (p->cl_siblings == cl) {
447 p->cl_siblings = cl->cl_siblings;
448 break;
449 }
450 } while ((p = p->cl_siblings) != NULL);
451 ASSERT(p != NULL);
452 }
453 cl->cl_hif->hif_classes--;
454 splx(s);
455
456 actlist_destroy(cl->cl_actc);
457
458 if (cl->cl_red != NULL) {
459 #ifdef ALTQ_RIO
460 if (q_is_rio(cl->cl_q))
461 rio_destroy((rio_t *)cl->cl_red);
462 #endif
463 #ifdef ALTQ_RED
464 if (q_is_red(cl->cl_q))
465 red_destroy(cl->cl_red);
466 #endif
467 }
468 if (cl->cl_fsc != NULL)
469 free(cl->cl_fsc, M_DEVBUF);
470 if (cl->cl_rsc != NULL)
471 free(cl->cl_rsc, M_DEVBUF);
472 free(cl->cl_q, M_DEVBUF);
473 free(cl, M_DEVBUF);
474
475 return (0);
476 }
477
478 static int
479 hfsc_class_modify(cl, rsc, fsc)
480 struct hfsc_class *cl;
481 struct service_curve *rsc, *fsc;
482 {
483 struct internal_sc *rsc_tmp, *fsc_tmp;
484 int s;
485
486 if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0) &&
487 cl->cl_rsc == NULL) {
488 rsc_tmp = malloc(sizeof(struct internal_sc), M_DEVBUF,
489 M_WAITOK|M_ZERO);
490 if (rsc_tmp == NULL)
491 return (ENOMEM);
492 } else
493 rsc_tmp = NULL;
494 if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0) &&
495 cl->cl_fsc == NULL) {
496 fsc_tmp = malloc(sizeof(struct internal_sc), M_DEVBUF,
497 M_WAITOK|M_ZERO);
498 if (fsc_tmp == NULL)
499 return (ENOMEM);
500 } else
501 fsc_tmp = NULL;
502
503 s = splnet();
504 if (!qempty(cl->cl_q))
505 hfsc_purgeq(cl);
506
507 if (rsc != NULL) {
508 if (rsc->m1 == 0 && rsc->m2 == 0) {
509 if (cl->cl_rsc != NULL) {
510 free(cl->cl_rsc, M_DEVBUF);
511 cl->cl_rsc = NULL;
512 }
513 } else {
514 if (cl->cl_rsc == NULL)
515 cl->cl_rsc = rsc_tmp;
516 sc2isc(rsc, cl->cl_rsc);
517 rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0);
518 rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0);
519 }
520 }
521
522 if (fsc != NULL) {
523 if (fsc->m1 == 0 && fsc->m2 == 0) {
524 if (cl->cl_fsc != NULL) {
525 free(cl->cl_fsc, M_DEVBUF);
526 cl->cl_fsc = NULL;
527 }
528 } else {
529 if (cl->cl_fsc == NULL)
530 cl->cl_fsc = fsc_tmp;
531 sc2isc(fsc, cl->cl_fsc);
532 rtsc_init(&cl->cl_virtual, cl->cl_fsc, 0, 0);
533 }
534 }
535 splx(s);
536
537 return (0);
538 }
539
540 /*
541 * hfsc_nextclass returns the next class in the tree.
542 * usage:
543 * for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl))
544 * do_something;
545 */
546 static struct hfsc_class *
547 hfsc_nextclass(cl)
548 struct hfsc_class *cl;
549 {
550 if (cl->cl_children != NULL)
551 cl = cl->cl_children;
552 else if (cl->cl_siblings != NULL)
553 cl = cl->cl_siblings;
554 else {
555 while ((cl = cl->cl_parent) != NULL)
556 if (cl->cl_siblings) {
557 cl = cl->cl_siblings;
558 break;
559 }
560 }
561
562 return (cl);
563 }
564
565 /*
566 * hfsc_enqueue is an enqueue function to be registered to
567 * (*altq_enqueue) in struct ifaltq.
568 */
569 static int
570 hfsc_enqueue(ifq, m, pktattr)
571 struct ifaltq *ifq;
572 struct mbuf *m;
573 struct altq_pktattr *pktattr;
574 {
575 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
576 struct hfsc_class *cl;
577 int len;
578
579 /* grab class set by classifier */
580 if (pktattr == NULL || (cl = pktattr->pattr_class) == NULL)
581 cl = hif->hif_defaultclass;
582 cl->cl_pktattr = pktattr; /* save proto hdr used by ECN */
583
584 len = m_pktlen(m);
585 if (hfsc_addq(cl, m) != 0) {
586 /* drop occurred. mbuf was freed in hfsc_addq. */
587 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, len);
588 return (ENOBUFS);
589 }
590 IFQ_INC_LEN(ifq);
591 cl->cl_hif->hif_packets++;
592
593 /* successfully queued. */
594 if (qlen(cl->cl_q) == 1)
595 set_active(cl, m_pktlen(m));
596
597 #ifdef HFSC_PKTLOG
598 /* put the logging_hook here */
599 #endif
600 return (0);
601 }
602
603 /*
604 * hfsc_dequeue is a dequeue function to be registered to
605 * (*altq_dequeue) in struct ifaltq.
606 *
607 * note: ALTDQ_POLL returns the next packet without removing the packet
608 * from the queue. ALTDQ_REMOVE is a normal dequeue operation.
609 * ALTDQ_REMOVE must return the same packet if called immediately
610 * after ALTDQ_POLL.
611 */
612 static struct mbuf *
613 hfsc_dequeue(ifq, op)
614 struct ifaltq *ifq;
615 int op;
616 {
617 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
618 struct hfsc_class *cl;
619 struct mbuf *m;
620 int len, next_len;
621 int realtime = 0;
622
623 if (hif->hif_packets == 0)
624 /* no packet in the tree */
625 return (NULL);
626
627 if (op == ALTDQ_REMOVE && hif->hif_pollcache != NULL) {
628 u_int64_t cur_time;
629
630 cl = hif->hif_pollcache;
631 hif->hif_pollcache = NULL;
632 /* check if the class was scheduled by real-time criteria */
633 if (cl->cl_rsc != NULL) {
634 cur_time = read_machclk();
635 realtime = (cl->cl_e <= cur_time);
636 }
637 } else {
638 /*
639 * if there are eligible classes, use real-time criteria.
640 * find the class with the minimum deadline among
641 * the eligible classes.
642 */
643 if ((cl = ellist_get_mindl(hif->hif_eligible)) != NULL) {
644 realtime = 1;
645 } else {
646 /*
647 * use link-sharing criteria
648 * get the class with the minimum vt in the hierarchy
649 */
650 cl = hif->hif_rootclass;
651 while (is_a_parent_class(cl)) {
652 cl = actlist_first(cl->cl_actc);
653 if (cl == NULL)
654 return (NULL);
655 }
656 }
657
658 if (op == ALTDQ_POLL) {
659 hif->hif_pollcache = cl;
660 m = hfsc_pollq(cl);
661 return (m);
662 }
663 }
664
665 m = hfsc_getq(cl);
666 len = m_pktlen(m);
667 cl->cl_hif->hif_packets--;
668 IFQ_DEC_LEN(ifq);
669 PKTCNTR_ADD(&cl->cl_stats.xmit_cnt, len);
670
671 update_v(cl, len);
672 if (realtime)
673 cl->cl_cumul += len;
674
675 if (!qempty(cl->cl_q)) {
676 if (cl->cl_rsc != NULL) {
677 /* update ed */
678 next_len = m_pktlen(qhead(cl->cl_q));
679
680 if (realtime)
681 update_ed(cl, next_len);
682 else
683 update_d(cl, next_len);
684 }
685 } else {
686 /* the class becomes passive */
687 set_passive(cl);
688 }
689
690 #ifdef HFSC_PKTLOG
691 /* put the logging_hook here */
692 #endif
693
694 return (m);
695 }
696
697 static int
698 hfsc_addq(cl, m)
699 struct hfsc_class *cl;
700 struct mbuf *m;
701 {
702
703 #ifdef ALTQ_RIO
704 if (q_is_rio(cl->cl_q))
705 return rio_addq((rio_t *)cl->cl_red, cl->cl_q,
706 m, cl->cl_pktattr);
707 #endif
708 #ifdef ALTQ_RED
709 if (q_is_red(cl->cl_q))
710 return red_addq(cl->cl_red, cl->cl_q, m, cl->cl_pktattr);
711 #endif
712 if (qlen(cl->cl_q) >= qlimit(cl->cl_q)) {
713 m_freem(m);
714 return (-1);
715 }
716
717 if (cl->cl_flags & HFCF_CLEARDSCP)
718 write_dsfield(m, cl->cl_pktattr, 0);
719
720 _addq(cl->cl_q, m);
721
722 return (0);
723 }
724
725 static struct mbuf *
726 hfsc_getq(cl)
727 struct hfsc_class *cl;
728 {
729 #ifdef ALTQ_RIO
730 if (q_is_rio(cl->cl_q))
731 return rio_getq((rio_t *)cl->cl_red, cl->cl_q);
732 #endif
733 #ifdef ALTQ_RED
734 if (q_is_red(cl->cl_q))
735 return red_getq(cl->cl_red, cl->cl_q);
736 #endif
737 return _getq(cl->cl_q);
738 }
739
740 static struct mbuf *
741 hfsc_pollq(cl)
742 struct hfsc_class *cl;
743 {
744 return qhead(cl->cl_q);
745 }
746
747 static void
748 hfsc_purgeq(cl)
749 struct hfsc_class *cl;
750 {
751 struct mbuf *m;
752
753 if (qempty(cl->cl_q))
754 return;
755
756 while ((m = _getq(cl->cl_q)) != NULL) {
757 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, m_pktlen(m));
758 m_freem(m);
759 }
760 ASSERT(qlen(cl->cl_q) == 0);
761
762 set_passive(cl);
763 }
764
765 static void
766 set_active(cl, len)
767 struct hfsc_class *cl;
768 int len;
769 {
770 if (cl->cl_rsc != NULL)
771 init_ed(cl, len);
772 if (cl->cl_fsc != NULL)
773 init_v(cl, len);
774
775 cl->cl_stats.period++;
776 }
777
778 static void
779 set_passive(cl)
780 struct hfsc_class *cl;
781 {
782 if (cl->cl_rsc != NULL)
783 ellist_remove(cl);
784
785 if (cl->cl_fsc != NULL) {
786 while (cl->cl_parent != NULL) {
787 if (--cl->cl_nactive == 0) {
788 /* remove this class from the vt list */
789 actlist_remove(cl);
790 } else
791 /* still has active children */
792 break;
793
794 /* go up to the parent class */
795 cl = cl->cl_parent;
796 }
797 }
798 }
799
800 static void
801 init_ed(cl, next_len)
802 struct hfsc_class *cl;
803 int next_len;
804 {
805 u_int64_t cur_time;
806
807 cur_time = read_machclk();
808
809 /* update the deadline curve */
810 rtsc_min(&cl->cl_deadline, cl->cl_rsc, cur_time, cl->cl_cumul);
811
812 /*
813 * update the eligible curve.
814 * for concave, it is equal to the deadline curve.
815 * for convex, it is a linear curve with slope m2.
816 */
817 cl->cl_eligible = cl->cl_deadline;
818 if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) {
819 cl->cl_eligible.dx = 0;
820 cl->cl_eligible.dy = 0;
821 }
822
823 /* compute e and d */
824 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
825 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
826
827 ellist_insert(cl);
828 }
829
830 static void
831 update_ed(cl, next_len)
832 struct hfsc_class *cl;
833 int next_len;
834 {
835 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
836 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
837
838 ellist_update(cl);
839 }
840
841 static void
842 update_d(cl, next_len)
843 struct hfsc_class *cl;
844 int next_len;
845 {
846 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
847 }
848
849 static void
850 init_v(cl, len)
851 struct hfsc_class *cl;
852 int len;
853 {
854 struct hfsc_class *min_cl, *max_cl;
855
856 while (cl->cl_parent != NULL) {
857
858 if (cl->cl_nactive++ > 0)
859 /* already active */
860 break;
861
862 /*
863 * if parent became idle while this class was idle.
864 * reset vt and the runtime service curve.
865 */
866 if (cl->cl_parent->cl_nactive == 0 ||
867 cl->cl_parent->cl_vtperiod != cl->cl_parentperiod) {
868 cl->cl_vt = 0;
869 rtsc_init(&cl->cl_virtual, cl->cl_fsc,
870 0, cl->cl_total);
871 }
872 min_cl = actlist_first(cl->cl_parent->cl_actc);
873 if (min_cl != NULL) {
874 u_int64_t vt;
875
876 /*
877 * set vt to the average of the min and max classes.
878 * if the parent's period didn't change,
879 * don't decrease vt of the class.
880 */
881 max_cl = actlist_last(cl->cl_parent->cl_actc);
882 vt = (min_cl->cl_vt + max_cl->cl_vt) / 2;
883 if (cl->cl_parent->cl_vtperiod != cl->cl_parentperiod
884 || vt > cl->cl_vt)
885 cl->cl_vt = vt;
886 }
887
888 /* update the virtual curve */
889 rtsc_min(&cl->cl_virtual, cl->cl_fsc, cl->cl_vt, cl->cl_total);
890
891 cl->cl_vtperiod++; /* increment vt period */
892 cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
893 if (cl->cl_parent->cl_nactive == 0)
894 cl->cl_parentperiod++;
895
896 actlist_insert(cl);
897
898 /* go up to the parent class */
899 cl = cl->cl_parent;
900 }
901 }
902
903 static void
904 update_v(cl, len)
905 struct hfsc_class *cl;
906 int len;
907 {
908 while (cl->cl_parent != NULL) {
909
910 cl->cl_total += len;
911
912 if (cl->cl_fsc != NULL) {
913 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total);
914
915 /* update the vt list */
916 actlist_update(cl);
917 }
918
919 /* go up to the parent class */
920 cl = cl->cl_parent;
921 }
922 }
923
924 /*
925 * TAILQ based ellist and actlist implementation
926 * (ion wanted to make a calendar queue based implementation)
927 */
928 /*
929 * eligible list holds backlogged classes being sorted by their eligible times.
930 * there is one eligible list per interface.
931 */
932
933 static ellist_t *
934 ellist_alloc()
935 {
936 ellist_t *head;
937
938 head = malloc(sizeof(ellist_t), M_DEVBUF, M_WAITOK);
939 if (head != NULL)
940 TAILQ_INIT(head);
941 return (head);
942 }
943
944 static void
945 ellist_destroy(head)
946 ellist_t *head;
947 {
948 free(head, M_DEVBUF);
949 }
950
951 static void
952 ellist_insert(cl)
953 struct hfsc_class *cl;
954 {
955 struct hfsc_if *hif = cl->cl_hif;
956 struct hfsc_class *p;
957
958 /* check the last entry first */
959 if ((p = TAILQ_LAST(hif->hif_eligible, _eligible)) == NULL ||
960 p->cl_e <= cl->cl_e) {
961 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist);
962 return;
963 }
964
965 TAILQ_FOREACH(p, hif->hif_eligible, cl_ellist) {
966 if (cl->cl_e < p->cl_e) {
967 TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
968 return;
969 }
970 }
971 ASSERT(0); /* should not reach here */
972 }
973
974 static void
975 ellist_remove(cl)
976 struct hfsc_class *cl;
977 {
978 struct hfsc_if *hif = cl->cl_hif;
979
980 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
981 }
982
983 static void
984 ellist_update(cl)
985 struct hfsc_class *cl;
986 {
987 struct hfsc_if *hif = cl->cl_hif;
988 struct hfsc_class *p, *last;
989
990 /*
991 * the eligible time of a class increases monotonically.
992 * if the next entry has a larger eligible time, nothing to do.
993 */
994 p = TAILQ_NEXT(cl, cl_ellist);
995 if (p == NULL || cl->cl_e <= p->cl_e)
996 return;
997
998 /* check the last entry */
999 last = TAILQ_LAST(hif->hif_eligible, _eligible);
1000 ASSERT(last != NULL);
1001 if (last->cl_e <= cl->cl_e) {
1002 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
1003 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist);
1004 return;
1005 }
1006
1007 /*
1008 * the new position must be between the next entry
1009 * and the last entry
1010 */
1011 while ((p = TAILQ_NEXT(p, cl_ellist)) != NULL) {
1012 if (cl->cl_e < p->cl_e) {
1013 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
1014 TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
1015 return;
1016 }
1017 }
1018 ASSERT(0); /* should not reach here */
1019 }
1020
1021 /* find the class with the minimum deadline among the eligible classes */
1022 struct hfsc_class *
1023 ellist_get_mindl(head)
1024 ellist_t *head;
1025 {
1026 struct hfsc_class *p, *cl = NULL;
1027 u_int64_t cur_time;
1028
1029 cur_time = read_machclk();
1030
1031 TAILQ_FOREACH(p, head, cl_ellist) {
1032 if (p->cl_e > cur_time)
1033 break;
1034 if (cl == NULL || p->cl_d < cl->cl_d)
1035 cl = p;
1036 }
1037 return (cl);
1038 }
1039
1040 /*
1041 * active children list holds backlogged child classes being sorted
1042 * by their virtual time.
1043 * each intermediate class has one active children list.
1044 */
1045 static actlist_t *
1046 actlist_alloc()
1047 {
1048 actlist_t *head;
1049
1050 head = malloc(sizeof(actlist_t), M_DEVBUF, M_WAITOK);
1051 if (head != NULL)
1052 TAILQ_INIT(head);
1053 return (head);
1054 }
1055
1056 static void
1057 actlist_destroy(head)
1058 actlist_t *head;
1059 {
1060 free(head, M_DEVBUF);
1061 }
1062 static void
1063 actlist_insert(cl)
1064 struct hfsc_class *cl;
1065 {
1066 struct hfsc_class *p;
1067
1068 /* check the last entry first */
1069 if ((p = TAILQ_LAST(cl->cl_parent->cl_actc, _active)) == NULL
1070 || p->cl_vt <= cl->cl_vt) {
1071 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist);
1072 return;
1073 }
1074
1075 TAILQ_FOREACH(p, cl->cl_parent->cl_actc, cl_actlist) {
1076 if (cl->cl_vt < p->cl_vt) {
1077 TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
1078 return;
1079 }
1080 }
1081 ASSERT(0); /* should not reach here */
1082 }
1083
1084 static void
1085 actlist_remove(cl)
1086 struct hfsc_class *cl;
1087 {
1088 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1089 }
1090
1091 static void
1092 actlist_update(cl)
1093 struct hfsc_class *cl;
1094 {
1095 struct hfsc_class *p, *last;
1096
1097 /*
1098 * the virtual time of a class increases monotonically during its
1099 * backlogged period.
1100 * if the next entry has a larger virtual time, nothing to do.
1101 */
1102 p = TAILQ_NEXT(cl, cl_actlist);
1103 if (p == NULL || cl->cl_vt <= p->cl_vt)
1104 return;
1105
1106 /* check the last entry */
1107 last = TAILQ_LAST(cl->cl_parent->cl_actc, _active);
1108 ASSERT(last != NULL);
1109 if (last->cl_vt <= cl->cl_vt) {
1110 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1111 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist);
1112 return;
1113 }
1114
1115 /*
1116 * the new position must be between the next entry
1117 * and the last entry
1118 */
1119 while ((p = TAILQ_NEXT(p, cl_actlist)) != NULL) {
1120 if (cl->cl_vt < p->cl_vt) {
1121 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1122 TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
1123 return;
1124 }
1125 }
1126 ASSERT(0); /* should not reach here */
1127 }
1128
1129 /*
1130 * service curve support functions
1131 *
1132 * external service curve parameters
1133 * m: bits/sec
1134 * d: msec
1135 * internal service curve parameters
1136 * sm: (bytes/tsc_interval) << SM_SHIFT
1137 * ism: (tsc_count/byte) << ISM_SHIFT
1138 * dx: tsc_count
1139 *
1140 * SM_SHIFT and ISM_SHIFT are scaled in order to keep effective digits.
1141 * we should be able to handle 100K-1Gbps linkspeed with 200Hz-1GHz CPU
1142 * speed. SM_SHIFT and ISM_SHIFT are selected to have at least 3 effective
1143 * digits in decimal using the following table.
1144 *
1145 * bits/set 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps
1146 * ----------+-------------------------------------------------------
1147 * bytes/nsec 12.5e-6 125e-6 1250e-6 12500e-6 125000e-6
1148 * sm(500MHz) 25.0e-6 250e-6 2500e-6 25000e-6 250000e-6
1149 * sm(200MHz) 62.5e-6 625e-6 6250e-6 62500e-6 625000e-6
1150 *
1151 * nsec/byte 80000 8000 800 80 8
1152 * ism(500MHz) 40000 4000 400 40 4
1153 * ism(200MHz) 16000 1600 160 16 1.6
1154 */
1155 #define SM_SHIFT 24
1156 #define ISM_SHIFT 10
1157
1158 #define SC_LARGEVAL (1LL << 32)
1159 #define SC_INFINITY 0xffffffffffffffffLL
1160
1161 static inline u_int64_t
1162 seg_x2y(x, sm)
1163 u_int64_t x;
1164 u_int64_t sm;
1165 {
1166 u_int64_t y;
1167
1168 if (x < SC_LARGEVAL)
1169 y = x * sm >> SM_SHIFT;
1170 else
1171 y = (x >> SM_SHIFT) * sm;
1172 return (y);
1173 }
1174
1175 static inline u_int64_t
1176 seg_y2x(y, ism)
1177 u_int64_t y;
1178 u_int64_t ism;
1179 {
1180 u_int64_t x;
1181
1182 if (y == 0)
1183 x = 0;
1184 else if (ism == SC_INFINITY)
1185 x = SC_INFINITY;
1186 else if (y < SC_LARGEVAL)
1187 x = y * ism >> ISM_SHIFT;
1188 else
1189 x = (y >> ISM_SHIFT) * ism;
1190 return (x);
1191 }
1192
1193 static inline u_int64_t
1194 m2sm(m)
1195 u_int m;
1196 {
1197 u_int64_t sm;
1198
1199 sm = ((u_int64_t)m << SM_SHIFT) / 8 / machclk_freq;
1200 return (sm);
1201 }
1202
1203 static inline u_int64_t
1204 m2ism(m)
1205 u_int m;
1206 {
1207 u_int64_t ism;
1208
1209 if (m == 0)
1210 ism = SC_INFINITY;
1211 else
1212 ism = ((u_int64_t)machclk_freq << ISM_SHIFT) * 8 / m;
1213 return (ism);
1214 }
1215
1216 static inline u_int64_t
1217 d2dx(d)
1218 u_int d;
1219 {
1220 u_int64_t dx;
1221
1222 dx = ((u_int64_t)d * machclk_freq) / 1000;
1223 return (dx);
1224 }
1225
1226 static u_int
1227 sm2m(sm)
1228 u_int64_t sm;
1229 {
1230 u_int64_t m;
1231
1232 m = (sm * 8 * machclk_freq) >> SM_SHIFT;
1233 return ((u_int)m);
1234 }
1235
1236 static u_int
1237 dx2d(dx)
1238 u_int64_t dx;
1239 {
1240 u_int64_t d;
1241
1242 d = dx * 1000 / machclk_freq;
1243 return ((u_int)d);
1244 }
1245
1246 static void
1247 sc2isc(sc, isc)
1248 struct service_curve *sc;
1249 struct internal_sc *isc;
1250 {
1251 isc->sm1 = m2sm(sc->m1);
1252 isc->ism1 = m2ism(sc->m1);
1253 isc->dx = d2dx(sc->d);
1254 isc->dy = seg_x2y(isc->dx, isc->sm1);
1255 isc->sm2 = m2sm(sc->m2);
1256 isc->ism2 = m2ism(sc->m2);
1257 }
1258
1259 /*
1260 * initialize the runtime service curve with the given internal
1261 * service curve starting at (x, y).
1262 */
1263 static void
1264 rtsc_init(rtsc, isc, x, y)
1265 struct runtime_sc *rtsc;
1266 struct internal_sc *isc;
1267 u_int64_t x, y;
1268 {
1269 rtsc->x = x;
1270 rtsc->y = y;
1271 rtsc->sm1 = isc->sm1;
1272 rtsc->ism1 = isc->ism1;
1273 rtsc->dx = isc->dx;
1274 rtsc->dy = isc->dy;
1275 rtsc->sm2 = isc->sm2;
1276 rtsc->ism2 = isc->ism2;
1277 }
1278
1279 /*
1280 * calculate the y-projection of the runtime service curve by the
1281 * given x-projection value
1282 */
1283 static u_int64_t
1284 rtsc_y2x(rtsc, y)
1285 struct runtime_sc *rtsc;
1286 u_int64_t y;
1287 {
1288 u_int64_t x;
1289
1290 if (y < rtsc->y)
1291 x = rtsc->x;
1292 else if (y <= rtsc->y + rtsc->dy) {
1293 /* x belongs to the 1st segment */
1294 if (rtsc->dy == 0)
1295 x = rtsc->x + rtsc->dx;
1296 else
1297 x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
1298 } else {
1299 /* x belongs to the 2nd segment */
1300 x = rtsc->x + rtsc->dx
1301 + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
1302 }
1303 return (x);
1304 }
1305
1306 static u_int64_t
1307 rtsc_x2y(rtsc, x)
1308 struct runtime_sc *rtsc;
1309 u_int64_t x;
1310 {
1311 u_int64_t y;
1312
1313 if (x <= rtsc->x)
1314 y = rtsc->y;
1315 else if (x <= rtsc->x + rtsc->dx)
1316 /* y belongs to the 1st segment */
1317 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
1318 else
1319 /* y belongs to the 2nd segment */
1320 y = rtsc->y + rtsc->dy
1321 + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
1322 return (y);
1323 }
1324
1325 /*
1326 * update the runtime service curve by taking the minimum of the current
1327 * runtime service curve and the service curve starting at (x, y).
1328 */
1329 static void
1330 rtsc_min(rtsc, isc, x, y)
1331 struct runtime_sc *rtsc;
1332 struct internal_sc *isc;
1333 u_int64_t x, y;
1334 {
1335 u_int64_t y1, y2, dx, dy;
1336
1337 if (isc->sm1 <= isc->sm2) {
1338 /* service curve is convex */
1339 y1 = rtsc_x2y(rtsc, x);
1340 if (y1 < y)
1341 /* the current rtsc is smaller */
1342 return;
1343 rtsc->x = x;
1344 rtsc->y = y;
1345 return;
1346 }
1347
1348 /*
1349 * service curve is concave
1350 * compute the two y values of the current rtsc
1351 * y1: at x
1352 * y2: at (x + dx)
1353 */
1354 y1 = rtsc_x2y(rtsc, x);
1355 if (y1 <= y) {
1356 /* rtsc is below isc, no change to rtsc */
1357 return;
1358 }
1359
1360 y2 = rtsc_x2y(rtsc, x + isc->dx);
1361 if (y2 >= y + isc->dy) {
1362 /* rtsc is above isc, replace rtsc by isc */
1363 rtsc->x = x;
1364 rtsc->y = y;
1365 rtsc->dx = isc->dx;
1366 rtsc->dy = isc->dy;
1367 return;
1368 }
1369
1370 /*
1371 * the two curves intersect
1372 * compute the offsets (dx, dy) using the reverse
1373 * function of seg_x2y()
1374 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
1375 */
1376 dx = ((y1 - y) << SM_SHIFT) / (isc->sm1 - isc->sm2);
1377 /*
1378 * check if (x, y1) belongs to the 1st segment of rtsc.
1379 * if so, add the offset.
1380 */
1381 if (rtsc->x + rtsc->dx > x)
1382 dx += rtsc->x + rtsc->dx - x;
1383 dy = seg_x2y(dx, isc->sm1);
1384
1385 rtsc->x = x;
1386 rtsc->y = y;
1387 rtsc->dx = dx;
1388 rtsc->dy = dy;
1389 return;
1390 }
1391
1392 /*
1393 * hfsc device interface
1394 */
1395 int
1396 hfscopen(dev, flag, fmt, l)
1397 dev_t dev;
1398 int flag, fmt;
1399 struct lwp *l;
1400 {
1401 if (machclk_freq == 0)
1402 init_machclk();
1403
1404 if (machclk_freq == 0) {
1405 printf("hfsc: no CPU clock available!\n");
1406 return (ENXIO);
1407 }
1408
1409 /* everything will be done when the queueing scheme is attached. */
1410 return 0;
1411 }
1412
1413 int
1414 hfscclose(dev, flag, fmt, l)
1415 dev_t dev;
1416 int flag, fmt;
1417 struct lwp *l;
1418 {
1419 struct hfsc_if *hif;
1420 int err, error = 0;
1421
1422 while ((hif = hif_list) != NULL) {
1423 /* destroy all */
1424 if (ALTQ_IS_ENABLED(hif->hif_ifq))
1425 altq_disable(hif->hif_ifq);
1426
1427 err = altq_detach(hif->hif_ifq);
1428 if (err == 0)
1429 err = hfsc_detach(hif);
1430 if (err != 0 && error == 0)
1431 error = err;
1432 }
1433
1434 return error;
1435 }
1436
1437 int
1438 hfscioctl(dev, cmd, addr, flag, l)
1439 dev_t dev;
1440 ioctlcmd_t cmd;
1441 caddr_t addr;
1442 int flag;
1443 struct lwp *l;
1444 {
1445 struct hfsc_if *hif;
1446 struct hfsc_interface *ifacep;
1447 int error = 0;
1448
1449 /* check super-user privilege */
1450 switch (cmd) {
1451 case HFSC_GETSTATS:
1452 break;
1453 default:
1454 #if (__FreeBSD_version > 400000)
1455 if ((error = suser(p)) != 0)
1456 return (error);
1457 #else
1458 if ((error = kauth_authorize_generic(l->l_cred,
1459 KAUTH_GENERIC_ISSUSER, &l->l_acflag)) != 0)
1460 return (error);
1461 #endif
1462 break;
1463 }
1464
1465 switch (cmd) {
1466
1467 case HFSC_IF_ATTACH:
1468 error = hfsccmd_if_attach((struct hfsc_attach *)addr);
1469 break;
1470
1471 case HFSC_IF_DETACH:
1472 error = hfsccmd_if_detach((struct hfsc_interface *)addr);
1473 break;
1474
1475 case HFSC_ENABLE:
1476 case HFSC_DISABLE:
1477 case HFSC_CLEAR_HIERARCHY:
1478 ifacep = (struct hfsc_interface *)addr;
1479 if ((hif = altq_lookup(ifacep->hfsc_ifname,
1480 ALTQT_HFSC)) == NULL) {
1481 error = EBADF;
1482 break;
1483 }
1484
1485 switch (cmd) {
1486
1487 case HFSC_ENABLE:
1488 if (hif->hif_defaultclass == NULL) {
1489 #if 1
1490 printf("hfsc: no default class\n");
1491 #endif
1492 error = EINVAL;
1493 break;
1494 }
1495 error = altq_enable(hif->hif_ifq);
1496 break;
1497
1498 case HFSC_DISABLE:
1499 error = altq_disable(hif->hif_ifq);
1500 break;
1501
1502 case HFSC_CLEAR_HIERARCHY:
1503 hfsc_clear_interface(hif);
1504 break;
1505 }
1506 break;
1507
1508 case HFSC_ADD_CLASS:
1509 error = hfsccmd_add_class((struct hfsc_add_class *)addr);
1510 break;
1511
1512 case HFSC_DEL_CLASS:
1513 error = hfsccmd_delete_class((struct hfsc_delete_class *)addr);
1514 break;
1515
1516 case HFSC_MOD_CLASS:
1517 error = hfsccmd_modify_class((struct hfsc_modify_class *)addr);
1518 break;
1519
1520 case HFSC_ADD_FILTER:
1521 error = hfsccmd_add_filter((struct hfsc_add_filter *)addr);
1522 break;
1523
1524 case HFSC_DEL_FILTER:
1525 error = hfsccmd_delete_filter((struct hfsc_delete_filter *)addr);
1526 break;
1527
1528 case HFSC_GETSTATS:
1529 error = hfsccmd_class_stats((struct hfsc_class_stats *)addr);
1530 break;
1531
1532 default:
1533 error = EINVAL;
1534 break;
1535 }
1536 return error;
1537 }
1538
1539 static int
1540 hfsccmd_if_attach(ap)
1541 struct hfsc_attach *ap;
1542 {
1543 struct hfsc_if *hif;
1544 struct ifnet *ifp;
1545 int error;
1546
1547 if ((ifp = ifunit(ap->iface.hfsc_ifname)) == NULL)
1548 return (ENXIO);
1549
1550 if ((hif = hfsc_attach(&ifp->if_snd, ap->bandwidth)) == NULL)
1551 return (ENOMEM);
1552
1553 /*
1554 * set HFSC to this ifnet structure.
1555 */
1556 if ((error = altq_attach(&ifp->if_snd, ALTQT_HFSC, hif,
1557 hfsc_enqueue, hfsc_dequeue, hfsc_request,
1558 &hif->hif_classifier, acc_classify)) != 0)
1559 (void)hfsc_detach(hif);
1560
1561 return (error);
1562 }
1563
1564 static int
1565 hfsccmd_if_detach(ap)
1566 struct hfsc_interface *ap;
1567 {
1568 struct hfsc_if *hif;
1569 int error;
1570
1571 if ((hif = altq_lookup(ap->hfsc_ifname, ALTQT_HFSC)) == NULL)
1572 return (EBADF);
1573
1574 if (ALTQ_IS_ENABLED(hif->hif_ifq))
1575 altq_disable(hif->hif_ifq);
1576
1577 if ((error = altq_detach(hif->hif_ifq)))
1578 return (error);
1579
1580 return hfsc_detach(hif);
1581 }
1582
1583 static int
1584 hfsccmd_add_class(ap)
1585 struct hfsc_add_class *ap;
1586 {
1587 struct hfsc_if *hif;
1588 struct hfsc_class *cl, *parent;
1589
1590 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
1591 return (EBADF);
1592
1593 if ((parent = clh_to_clp(hif, ap->parent_handle)) == NULL) {
1594 if (ap->parent_handle == HFSC_ROOTCLASS_HANDLE)
1595 parent = hif->hif_rootclass;
1596 else
1597 return (EINVAL);
1598 }
1599
1600 if ((cl = hfsc_class_create(hif, &ap->service_curve, parent,
1601 ap->qlimit, ap->flags)) == NULL)
1602 return (ENOMEM);
1603
1604 /* return a class handle to the user */
1605 ap->class_handle = clp_to_clh(cl);
1606 return (0);
1607 }
1608
1609 static int
1610 hfsccmd_delete_class(ap)
1611 struct hfsc_delete_class *ap;
1612 {
1613 struct hfsc_if *hif;
1614 struct hfsc_class *cl;
1615
1616 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
1617 return (EBADF);
1618
1619 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
1620 return (EINVAL);
1621
1622 return hfsc_class_destroy(cl);
1623 }
1624
1625 static int
1626 hfsccmd_modify_class(ap)
1627 struct hfsc_modify_class *ap;
1628 {
1629 struct hfsc_if *hif;
1630 struct hfsc_class *cl;
1631 struct service_curve *rsc = NULL;
1632 struct service_curve *fsc = NULL;
1633
1634 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
1635 return (EBADF);
1636
1637 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
1638 return (EINVAL);
1639
1640 if (ap->sctype & HFSC_REALTIMESC)
1641 rsc = &ap->service_curve;
1642 if (ap->sctype & HFSC_LINKSHARINGSC)
1643 fsc = &ap->service_curve;
1644
1645 return hfsc_class_modify(cl, rsc, fsc);
1646 }
1647
1648 static int
1649 hfsccmd_add_filter(ap)
1650 struct hfsc_add_filter *ap;
1651 {
1652 struct hfsc_if *hif;
1653 struct hfsc_class *cl;
1654
1655 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
1656 return (EBADF);
1657
1658 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
1659 return (EINVAL);
1660
1661 if (is_a_parent_class(cl)) {
1662 #if 1
1663 printf("hfsccmd_add_filter: not a leaf class!\n");
1664 #endif
1665 return (EINVAL);
1666 }
1667
1668 return acc_add_filter(&hif->hif_classifier, &ap->filter,
1669 cl, &ap->filter_handle);
1670 }
1671
1672 static int
1673 hfsccmd_delete_filter(ap)
1674 struct hfsc_delete_filter *ap;
1675 {
1676 struct hfsc_if *hif;
1677
1678 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
1679 return (EBADF);
1680
1681 return acc_delete_filter(&hif->hif_classifier,
1682 ap->filter_handle);
1683 }
1684
1685 static int
1686 hfsccmd_class_stats(ap)
1687 struct hfsc_class_stats *ap;
1688 {
1689 struct hfsc_if *hif;
1690 struct hfsc_class *cl;
1691 struct hfsc_basic_class_stats stats, *usp;
1692 int n, nclasses, error;
1693
1694 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
1695 return (EBADF);
1696
1697 ap->cur_time = read_machclk();
1698 ap->hif_classes = hif->hif_classes;
1699 ap->hif_packets = hif->hif_packets;
1700
1701 /* skip the first N classes in the tree */
1702 nclasses = ap->nskip;
1703 for (cl = hif->hif_rootclass, n = 0; cl != NULL && n < nclasses;
1704 cl = hfsc_nextclass(cl), n++)
1705 ;
1706 if (n != nclasses)
1707 return (EINVAL);
1708
1709 /* then, read the next N classes in the tree */
1710 nclasses = ap->nclasses;
1711 usp = ap->stats;
1712 for (n = 0; cl != NULL && n < nclasses; cl = hfsc_nextclass(cl), n++) {
1713
1714 get_class_stats(&stats, cl);
1715
1716 if ((error = copyout((caddr_t)&stats, (caddr_t)usp++,
1717 sizeof(stats))) != 0)
1718 return (error);
1719 }
1720
1721 ap->nclasses = n;
1722
1723 return (0);
1724 }
1725
1726 static void get_class_stats(sp, cl)
1727 struct hfsc_basic_class_stats *sp;
1728 struct hfsc_class *cl;
1729 {
1730 sp->class_id = cl->cl_id;
1731 sp->class_handle = clp_to_clh(cl);
1732
1733 if (cl->cl_rsc != NULL) {
1734 sp->rsc.m1 = sm2m(cl->cl_rsc->sm1);
1735 sp->rsc.d = dx2d(cl->cl_rsc->dx);
1736 sp->rsc.m2 = sm2m(cl->cl_rsc->sm2);
1737 } else {
1738 sp->rsc.m1 = 0;
1739 sp->rsc.d = 0;
1740 sp->rsc.m2 = 0;
1741 }
1742 if (cl->cl_fsc != NULL) {
1743 sp->fsc.m1 = sm2m(cl->cl_fsc->sm1);
1744 sp->fsc.d = dx2d(cl->cl_fsc->dx);
1745 sp->fsc.m2 = sm2m(cl->cl_fsc->sm2);
1746 } else {
1747 sp->fsc.m1 = 0;
1748 sp->fsc.d = 0;
1749 sp->fsc.m2 = 0;
1750 }
1751
1752 sp->total = cl->cl_total;
1753 sp->cumul = cl->cl_cumul;
1754
1755 sp->d = cl->cl_d;
1756 sp->e = cl->cl_e;
1757 sp->vt = cl->cl_vt;
1758
1759 sp->qlength = qlen(cl->cl_q);
1760 sp->xmit_cnt = cl->cl_stats.xmit_cnt;
1761 sp->drop_cnt = cl->cl_stats.drop_cnt;
1762 sp->period = cl->cl_stats.period;
1763
1764 sp->qtype = qtype(cl->cl_q);
1765 #ifdef ALTQ_RED
1766 if (q_is_red(cl->cl_q))
1767 red_getstats(cl->cl_red, &sp->red[0]);
1768 #endif
1769 #ifdef ALTQ_RIO
1770 if (q_is_rio(cl->cl_q))
1771 rio_getstats((rio_t *)cl->cl_red, &sp->red[0]);
1772 #endif
1773 }
1774
1775 /* convert a class handle to the corresponding class pointer */
1776 static struct hfsc_class *
1777 clh_to_clp(hif, chandle)
1778 struct hfsc_if *hif;
1779 u_long chandle;
1780 {
1781 struct hfsc_class *cl;
1782
1783 cl = (struct hfsc_class *)chandle;
1784 if (chandle != ALIGN(cl)) {
1785 #if 1
1786 printf("clh_to_cl: unaligned pointer %p\n", cl);
1787 #endif
1788 return (NULL);
1789 }
1790
1791 if (cl == NULL || cl->cl_handle != chandle || cl->cl_hif != hif)
1792 return (NULL);
1793
1794 return (cl);
1795 }
1796
1797 /* convert a class pointer to the corresponding class handle */
1798 static u_long
1799 clp_to_clh(cl)
1800 struct hfsc_class *cl;
1801 {
1802 if (cl->cl_parent == NULL)
1803 return (HFSC_ROOTCLASS_HANDLE); /* XXX */
1804 return (cl->cl_handle);
1805 }
1806
1807 #ifdef KLD_MODULE
1808
1809 static struct altqsw hfsc_sw =
1810 {"hfsc", hfscopen, hfscclose, hfscioctl};
1811
1812 ALTQ_MODULE(altq_hfsc, ALTQT_HFSC, &hfsc_sw);
1813
1814 #endif /* KLD_MODULE */
1815
1816 #endif /* ALTQ_HFSC */
1817