altq_hfsc.c revision 1.1 1 /* $KAME: altq_hfsc.c,v 1.8 2000/12/14 08:12:46 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
5 *
6 * Permission to use, copy, modify, and distribute this software and
7 * its documentation is hereby granted (including for commercial or
8 * for-profit use), provided that both the copyright notice and this
9 * permission notice appear in all copies of the software, derivative
10 * works, or modified versions, and any portions thereof, and that
11 * both notices appear in supporting documentation, and that credit
12 * is given to Carnegie Mellon University in all publications reporting
13 * on direct or indirect use of this code or its derivatives.
14 *
15 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
16 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS
17 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
23 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
24 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
25 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
28 * DAMAGE.
29 *
30 * Carnegie Mellon encourages (but does not require) users of this
31 * software to return any improvements or extensions that they make,
32 * and to grant Carnegie Mellon the rights to redistribute these
33 * changes without encumbrance.
34 */
35 /*
36 * H-FSC is described in Proceedings of SIGCOMM'97,
37 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
38 * Real-Time and Priority Service"
39 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
40 */
41
42 #if defined(__FreeBSD__) || defined(__NetBSD__)
43 #include "opt_altq.h"
44 #if (__FreeBSD__ != 2)
45 #include "opt_inet.h"
46 #ifdef __FreeBSD__
47 #include "opt_inet6.h"
48 #endif
49 #endif
50 #endif /* __FreeBSD__ || __NetBSD__ */
51
52 #ifdef ALTQ_HFSC /* hfsc is enabled by ALTQ_HFSC option in opt_altq.h */
53
54 #include <sys/param.h>
55 #include <sys/malloc.h>
56 #include <sys/mbuf.h>
57 #include <sys/socket.h>
58 #include <sys/sockio.h>
59 #include <sys/systm.h>
60 #include <sys/proc.h>
61 #include <sys/errno.h>
62 #include <sys/kernel.h>
63 #include <sys/queue.h>
64
65 #include <net/if.h>
66 #include <net/if_types.h>
67
68 #include <altq/altq.h>
69 #include <altq/altq_conf.h>
70 #include <altq/altq_hfsc.h>
71
72 /*
73 * function prototypes
74 */
75 static struct hfsc_if *hfsc_attach __P((struct ifaltq *, u_int));
76 static int hfsc_detach __P((struct hfsc_if *));
77 static int hfsc_clear_interface __P((struct hfsc_if *));
78 static int hfsc_request __P((struct ifaltq *, int, void *));
79 static void hfsc_purge __P((struct hfsc_if *));
80 static struct hfsc_class *hfsc_class_create __P((struct hfsc_if *,
81 struct service_curve *, struct hfsc_class *, int, int));
82 static int hfsc_class_destroy __P((struct hfsc_class *));
83 static int hfsc_class_modify __P((struct hfsc_class *,
84 struct service_curve *, struct service_curve *));
85 static struct hfsc_class *hfsc_nextclass __P((struct hfsc_class *));
86
87 static int hfsc_enqueue __P((struct ifaltq *, struct mbuf *,
88 struct altq_pktattr *));
89 static struct mbuf *hfsc_dequeue __P((struct ifaltq *, int));
90
91 static int hfsc_addq __P((struct hfsc_class *, struct mbuf *));
92 static struct mbuf *hfsc_getq __P((struct hfsc_class *));
93 static struct mbuf *hfsc_pollq __P((struct hfsc_class *));
94 static void hfsc_purgeq __P((struct hfsc_class *));
95
96 static void set_active __P((struct hfsc_class *, int));
97 static void set_passive __P((struct hfsc_class *));
98
99 static void init_ed __P((struct hfsc_class *, int));
100 static void update_ed __P((struct hfsc_class *, int));
101 static void update_d __P((struct hfsc_class *, int));
102 static void init_v __P((struct hfsc_class *, int));
103 static void update_v __P((struct hfsc_class *, int));
104 static ellist_t *ellist_alloc __P((void));
105 static void ellist_destroy __P((ellist_t *));
106 static void ellist_insert __P((struct hfsc_class *));
107 static void ellist_remove __P((struct hfsc_class *));
108 static void ellist_update __P((struct hfsc_class *));
109 struct hfsc_class *ellist_get_mindl __P((ellist_t *));
110 static actlist_t *actlist_alloc __P((void));
111 static void actlist_destroy __P((actlist_t *));
112 static void actlist_insert __P((struct hfsc_class *));
113 static void actlist_remove __P((struct hfsc_class *));
114 static void actlist_update __P((struct hfsc_class *));
115
116 static __inline u_int64_t seg_x2y __P((u_int64_t, u_int64_t));
117 static __inline u_int64_t seg_y2x __P((u_int64_t, u_int64_t));
118 static __inline u_int64_t m2sm __P((u_int));
119 static __inline u_int64_t m2ism __P((u_int));
120 static __inline u_int64_t d2dx __P((u_int));
121 static u_int sm2m __P((u_int64_t));
122 static u_int dx2d __P((u_int64_t));
123
124 static void sc2isc __P((struct service_curve *, struct internal_sc *));
125 static void rtsc_init __P((struct runtime_sc *, struct internal_sc *,
126 u_int64_t, u_int64_t));
127 static u_int64_t rtsc_y2x __P((struct runtime_sc *, u_int64_t));
128 static u_int64_t rtsc_x2y __P((struct runtime_sc *, u_int64_t));
129 static void rtsc_min __P((struct runtime_sc *, struct internal_sc *,
130 u_int64_t, u_int64_t));
131
132 int hfscopen __P((dev_t, int, int, struct proc *));
133 int hfscclose __P((dev_t, int, int, struct proc *));
134 int hfscioctl __P((dev_t, ioctlcmd_t, caddr_t, int, struct proc *));
135 static int hfsccmd_if_attach __P((struct hfsc_attach *));
136 static int hfsccmd_if_detach __P((struct hfsc_interface *));
137 static int hfsccmd_add_class __P((struct hfsc_add_class *));
138 static int hfsccmd_delete_class __P((struct hfsc_delete_class *));
139 static int hfsccmd_modify_class __P((struct hfsc_modify_class *));
140 static int hfsccmd_add_filter __P((struct hfsc_add_filter *));
141 static int hfsccmd_delete_filter __P((struct hfsc_delete_filter *));
142 static int hfsccmd_class_stats __P((struct hfsc_class_stats *));
143 static void get_class_stats __P((struct class_stats *, struct hfsc_class *));
144 static struct hfsc_class *clh_to_clp __P((struct hfsc_if *, u_long));
145 static u_long clp_to_clh __P((struct hfsc_class *));
146
147 /*
148 * macros
149 */
150 #define is_a_parent_class(cl) ((cl)->cl_children != NULL)
151
152 /* hif_list keeps all hfsc_if's allocated. */
153 static struct hfsc_if *hif_list = NULL;
154
155 static struct hfsc_if *
156 hfsc_attach(ifq, bandwidth)
157 struct ifaltq *ifq;
158 u_int bandwidth;
159 {
160 struct hfsc_if *hif;
161 struct service_curve root_sc;
162
163 MALLOC(hif, struct hfsc_if *, sizeof(struct hfsc_if),
164 M_DEVBUF, M_WAITOK);
165 if (hif == NULL)
166 return (NULL);
167 bzero(hif, sizeof(struct hfsc_if));
168
169 hif->hif_eligible = ellist_alloc();
170 if (hif->hif_eligible == NULL) {
171 FREE(hif, M_DEVBUF);
172 return NULL;
173 }
174
175 hif->hif_ifq = ifq;
176
177 /*
178 * create root class
179 */
180 root_sc.m1 = bandwidth;
181 root_sc.d = 0;
182 root_sc.m2 = bandwidth;
183 if ((hif->hif_rootclass =
184 hfsc_class_create(hif, &root_sc, NULL, 0, 0)) == NULL) {
185 FREE(hif, M_DEVBUF);
186 return (NULL);
187 }
188
189 /* add this state to the hfsc list */
190 hif->hif_next = hif_list;
191 hif_list = hif;
192
193 return (hif);
194 }
195
196 static int
197 hfsc_detach(hif)
198 struct hfsc_if *hif;
199 {
200 (void)hfsc_clear_interface(hif);
201 (void)hfsc_class_destroy(hif->hif_rootclass);
202
203 /* remove this interface from the hif list */
204 if (hif_list == hif)
205 hif_list = hif->hif_next;
206 else {
207 struct hfsc_if *h;
208
209 for (h = hif_list; h != NULL; h = h->hif_next)
210 if (h->hif_next == hif) {
211 h->hif_next = hif->hif_next;
212 break;
213 }
214 ASSERT(h != NULL);
215 }
216
217 ellist_destroy(hif->hif_eligible);
218
219 FREE(hif, M_DEVBUF);
220
221 return (0);
222 }
223
224 /*
225 * bring the interface back to the initial state by discarding
226 * all the filters and classes except the root class.
227 */
228 static int
229 hfsc_clear_interface(hif)
230 struct hfsc_if *hif;
231 {
232 struct hfsc_class *cl;
233
234 /* free the filters for this interface */
235 acc_discard_filters(&hif->hif_classifier, NULL, 1);
236
237 /* clear out the classes */
238 while ((cl = hif->hif_rootclass->cl_children) != NULL) {
239 /*
240 * remove the first leaf class found in the hierarchy
241 * then start over
242 */
243 for (; cl != NULL; cl = hfsc_nextclass(cl)) {
244 if (!is_a_parent_class(cl)) {
245 (void)hfsc_class_destroy(cl);
246 break;
247 }
248 }
249 }
250
251 return (0);
252 }
253
254 static int
255 hfsc_request(ifq, req, arg)
256 struct ifaltq *ifq;
257 int req;
258 void *arg;
259 {
260 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
261
262 switch (req) {
263 case ALTRQ_PURGE:
264 hfsc_purge(hif);
265 break;
266 }
267 return (0);
268 }
269
270 /* discard all the queued packets on the interface */
271 static void
272 hfsc_purge(hif)
273 struct hfsc_if *hif;
274 {
275 struct hfsc_class *cl;
276
277 for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl))
278 if (!qempty(cl->cl_q))
279 hfsc_purgeq(cl);
280 if (ALTQ_IS_ENABLED(hif->hif_ifq))
281 hif->hif_ifq->ifq_len = 0;
282 }
283
284 struct hfsc_class *
285 hfsc_class_create(hif, sc, parent, qlimit, flags)
286 struct hfsc_if *hif;
287 struct service_curve *sc;
288 struct hfsc_class *parent;
289 int qlimit, flags;
290 {
291 struct hfsc_class *cl, *p;
292 int s;
293
294 #ifndef ALTQ_RED
295 if (flags & HFCF_RED) {
296 printf("hfsc_class_create: RED not configured for HFSC!\n");
297 return (NULL);
298 }
299 #endif
300
301 MALLOC(cl, struct hfsc_class *, sizeof(struct hfsc_class),
302 M_DEVBUF, M_WAITOK);
303 if (cl == NULL)
304 return (NULL);
305 bzero(cl, sizeof(struct hfsc_class));
306
307 MALLOC(cl->cl_q, class_queue_t *, sizeof(class_queue_t),
308 M_DEVBUF, M_WAITOK);
309 if (cl->cl_q == NULL)
310 goto err_ret;
311 bzero(cl->cl_q, sizeof(class_queue_t));
312
313 cl->cl_actc = actlist_alloc();
314 if (cl->cl_actc == NULL)
315 goto err_ret;
316
317 if (qlimit == 0)
318 qlimit = 50; /* use default */
319 qlimit(cl->cl_q) = qlimit;
320 qtype(cl->cl_q) = Q_DROPTAIL;
321 qlen(cl->cl_q) = 0;
322 cl->cl_flags = flags;
323 #ifdef ALTQ_RED
324 if (flags & (HFCF_RED|HFCF_RIO)) {
325 int red_flags, red_pkttime;
326
327 red_flags = 0;
328 if (flags & HFCF_ECN)
329 red_flags |= REDF_ECN;
330 #ifdef ALTQ_RIO
331 if (flags & HFCF_CLEARDSCP)
332 red_flags |= RIOF_CLEARDSCP;
333 #endif
334 if (sc->m2 == 0)
335 red_pkttime = 1000 * 1000 * 1000; /* 1 sec */
336 else
337 red_pkttime = (int64_t)hif->hif_ifq->altq_ifp->if_mtu
338 * 1000 * 1000 * 1000 / (sc->m2 / 8);
339 if (flags & HFCF_RED) {
340 cl->cl_red = red_alloc(0, 0, 0, 0,
341 red_flags, red_pkttime);
342 if (cl->cl_red != NULL)
343 qtype(cl->cl_q) = Q_RED;
344 }
345 #ifdef ALTQ_RIO
346 else {
347 cl->cl_red = (red_t *)rio_alloc(0, NULL,
348 red_flags, red_pkttime);
349 if (cl->cl_red != NULL)
350 qtype(cl->cl_q) = Q_RIO;
351 }
352 #endif
353 }
354 #endif /* ALTQ_RED */
355
356 if (sc != NULL && (sc->m1 != 0 || sc->m2 != 0)) {
357 MALLOC(cl->cl_rsc, struct internal_sc *,
358 sizeof(struct internal_sc), M_DEVBUF, M_WAITOK);
359 if (cl->cl_rsc == NULL)
360 goto err_ret;
361 bzero(cl->cl_rsc, sizeof(struct internal_sc));
362 sc2isc(sc, cl->cl_rsc);
363 rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0);
364 rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0);
365
366 MALLOC(cl->cl_fsc, struct internal_sc *,
367 sizeof(struct internal_sc), M_DEVBUF, M_WAITOK);
368 if (cl->cl_fsc == NULL)
369 goto err_ret;
370 bzero(cl->cl_fsc, sizeof(struct internal_sc));
371 sc2isc(sc, cl->cl_fsc);
372 rtsc_init(&cl->cl_virtual, cl->cl_fsc, 0, 0);
373 }
374
375 cl->cl_id = hif->hif_classid++;
376 cl->cl_handle = (u_long)cl; /* XXX: just a pointer to this class */
377 cl->cl_hif = hif;
378 cl->cl_parent = parent;
379
380 s = splimp();
381 hif->hif_classes++;
382 if (flags & HFCF_DEFAULTCLASS)
383 hif->hif_defaultclass = cl;
384
385 /* add this class to the children list of the parent */
386 if (parent == NULL) {
387 /* this is root class */
388 }
389 else if ((p = parent->cl_children) == NULL)
390 parent->cl_children = cl;
391 else {
392 while (p->cl_siblings != NULL)
393 p = p->cl_siblings;
394 p->cl_siblings = cl;
395 }
396 splx(s);
397
398 return (cl);
399
400 err_ret:
401 if (cl->cl_actc != NULL)
402 actlist_destroy(cl->cl_actc);
403 if (cl->cl_red != NULL) {
404 #ifdef ALTQ_RIO
405 if (q_is_rio(cl->cl_q))
406 rio_destroy((rio_t *)cl->cl_red);
407 #endif
408 #ifdef ALTQ_RED
409 if (q_is_red(cl->cl_q))
410 red_destroy(cl->cl_red);
411 #endif
412 }
413 if (cl->cl_fsc != NULL)
414 FREE(cl->cl_fsc, M_DEVBUF);
415 if (cl->cl_rsc != NULL)
416 FREE(cl->cl_rsc, M_DEVBUF);
417 if (cl->cl_q != NULL)
418 FREE(cl->cl_q, M_DEVBUF);
419 FREE(cl, M_DEVBUF);
420 return (NULL);
421 }
422
423 static int
424 hfsc_class_destroy(cl)
425 struct hfsc_class *cl;
426 {
427 int s;
428
429 if (is_a_parent_class(cl))
430 return (EBUSY);
431
432 s = splimp();
433
434 /* delete filters referencing to this class */
435 acc_discard_filters(&cl->cl_hif->hif_classifier, cl, 0);
436
437 if (!qempty(cl->cl_q))
438 hfsc_purgeq(cl);
439
440 if (cl->cl_parent == NULL) {
441 /* this is root class */
442 } else {
443 struct hfsc_class *p = cl->cl_parent->cl_children;
444
445 if (p == cl)
446 cl->cl_parent->cl_children = cl->cl_siblings;
447 else do {
448 if (p->cl_siblings == cl) {
449 p->cl_siblings = cl->cl_siblings;
450 break;
451 }
452 } while ((p = p->cl_siblings) != NULL);
453 ASSERT(p != NULL);
454 }
455 cl->cl_hif->hif_classes--;
456 splx(s);
457
458 actlist_destroy(cl->cl_actc);
459
460 if (cl->cl_red != NULL) {
461 #ifdef ALTQ_RIO
462 if (q_is_rio(cl->cl_q))
463 rio_destroy((rio_t *)cl->cl_red);
464 #endif
465 #ifdef ALTQ_RED
466 if (q_is_red(cl->cl_q))
467 red_destroy(cl->cl_red);
468 #endif
469 }
470 if (cl->cl_fsc != NULL)
471 FREE(cl->cl_fsc, M_DEVBUF);
472 if (cl->cl_rsc != NULL)
473 FREE(cl->cl_rsc, M_DEVBUF);
474 FREE(cl->cl_q, M_DEVBUF);
475 FREE(cl, M_DEVBUF);
476
477 return (0);
478 }
479
480 static int
481 hfsc_class_modify(cl, rsc, fsc)
482 struct hfsc_class *cl;
483 struct service_curve *rsc, *fsc;
484 {
485 struct internal_sc *tmp;
486 int s;
487
488 s = splimp();
489 if (!qempty(cl->cl_q))
490 hfsc_purgeq(cl);
491
492 if (rsc != NULL) {
493 if (rsc->m1 == 0 && rsc->m2 == 0) {
494 if (cl->cl_rsc != NULL) {
495 FREE(cl->cl_rsc, M_DEVBUF);
496 cl->cl_rsc = NULL;
497 }
498 } else {
499 if (cl->cl_rsc == NULL) {
500 MALLOC(tmp, struct internal_sc *,
501 sizeof(struct internal_sc),
502 M_DEVBUF, M_WAITOK);
503 if (tmp == NULL) {
504 splx(s);
505 return (ENOMEM);
506 }
507 cl->cl_rsc = tmp;
508 }
509 bzero(cl->cl_rsc, sizeof(struct internal_sc));
510 sc2isc(rsc, cl->cl_rsc);
511 rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0);
512 rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0);
513 }
514 }
515
516 if (fsc != NULL) {
517 if (fsc->m1 == 0 && fsc->m2 == 0) {
518 if (cl->cl_fsc != NULL) {
519 FREE(cl->cl_fsc, M_DEVBUF);
520 cl->cl_fsc = NULL;
521 }
522 } else {
523 if (cl->cl_fsc == NULL) {
524 MALLOC(tmp, struct internal_sc *,
525 sizeof(struct internal_sc),
526 M_DEVBUF, M_WAITOK);
527 if (tmp == NULL) {
528 splx(s);
529 return (ENOMEM);
530 }
531 cl->cl_fsc = tmp;
532 }
533 bzero(cl->cl_fsc, sizeof(struct internal_sc));
534 sc2isc(fsc, cl->cl_fsc);
535 rtsc_init(&cl->cl_virtual, cl->cl_fsc, 0, 0);
536 }
537 }
538 splx(s);
539
540 return (0);
541 }
542
543 /*
544 * hfsc_nextclass returns the next class in the tree.
545 * usage:
546 * for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl))
547 * do_something;
548 */
549 static struct hfsc_class *
550 hfsc_nextclass(cl)
551 struct hfsc_class *cl;
552 {
553 if (cl->cl_children != NULL)
554 cl = cl->cl_children;
555 else if (cl->cl_siblings != NULL)
556 cl = cl->cl_siblings;
557 else {
558 while ((cl = cl->cl_parent) != NULL)
559 if (cl->cl_siblings) {
560 cl = cl->cl_siblings;
561 break;
562 }
563 }
564
565 return (cl);
566 }
567
568 /*
569 * hfsc_enqueue is an enqueue function to be registered to
570 * (*altq_enqueue) in struct ifaltq.
571 */
572 static int
573 hfsc_enqueue(ifq, m, pktattr)
574 struct ifaltq *ifq;
575 struct mbuf *m;
576 struct altq_pktattr *pktattr;
577 {
578 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
579 struct hfsc_class *cl;
580 int len;
581
582 /* grab class set by classifier */
583 if (pktattr == NULL || (cl = pktattr->pattr_class) == NULL)
584 cl = hif->hif_defaultclass;
585 cl->cl_pktattr = pktattr; /* save proto hdr used by ECN */
586
587 len = m_pktlen(m);
588 if (hfsc_addq(cl, m) != 0) {
589 /* drop occurred. mbuf was freed in hfsc_addq. */
590 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, len);
591 return (ENOBUFS);
592 }
593 IFQ_INC_LEN(ifq);
594 cl->cl_hif->hif_packets++;
595
596 /* successfully queued. */
597 if (qlen(cl->cl_q) == 1)
598 set_active(cl, m_pktlen(m));
599
600 #ifdef HFSC_PKTLOG
601 /* put the logging_hook here */
602 #endif
603 return (0);
604 }
605
606 /*
607 * hfsc_dequeue is a dequeue function to be registered to
608 * (*altq_dequeue) in struct ifaltq.
609 *
610 * note: ALTDQ_POLL returns the next packet without removing the packet
611 * from the queue. ALTDQ_REMOVE is a normal dequeue operation.
612 * ALTDQ_REMOVE must return the same packet if called immediately
613 * after ALTDQ_POLL.
614 */
615 static struct mbuf *
616 hfsc_dequeue(ifq, op)
617 struct ifaltq *ifq;
618 int op;
619 {
620 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
621 struct hfsc_class *cl;
622 struct mbuf *m;
623 int len, next_len;
624 int realtime = 0;
625
626 if (hif->hif_packets == 0)
627 /* no packet in the tree */
628 return (NULL);
629
630 if (op == ALTDQ_REMOVE && hif->hif_pollcache != NULL) {
631 u_int64_t cur_time;
632
633 cl = hif->hif_pollcache;
634 hif->hif_pollcache = NULL;
635 /* check if the class was scheduled by real-time criteria */
636 if (cl->cl_rsc != NULL) {
637 cur_time = read_machclk();
638 realtime = (cl->cl_e <= cur_time);
639 }
640 } else {
641 /*
642 * if there are eligible classes, use real-time criteria.
643 * find the class with the minimum deadline among
644 * the eligible classes.
645 */
646 if ((cl = ellist_get_mindl(hif->hif_eligible)) != NULL) {
647 realtime = 1;
648 } else {
649 /*
650 * use link-sharing criteria
651 * get the class with the minimum vt in the hierarchy
652 */
653 cl = hif->hif_rootclass;
654 while (is_a_parent_class(cl)) {
655 cl = actlist_first(cl->cl_actc);
656 if (cl == NULL)
657 return (NULL);
658 }
659 }
660
661 if (op == ALTDQ_POLL) {
662 hif->hif_pollcache = cl;
663 m = hfsc_pollq(cl);
664 return (m);
665 }
666 }
667
668 m = hfsc_getq(cl);
669 len = m_pktlen(m);
670 cl->cl_hif->hif_packets--;
671 IFQ_DEC_LEN(ifq);
672 PKTCNTR_ADD(&cl->cl_stats.xmit_cnt, len);
673
674 update_v(cl, len);
675 if (realtime)
676 cl->cl_cumul += len;
677
678 if (!qempty(cl->cl_q)) {
679 if (cl->cl_rsc != NULL) {
680 /* update ed */
681 next_len = m_pktlen(qhead(cl->cl_q));
682
683 if (realtime)
684 update_ed(cl, next_len);
685 else
686 update_d(cl, next_len);
687 }
688 } else {
689 /* the class becomes passive */
690 set_passive(cl);
691 }
692
693 #ifdef HFSC_PKTLOG
694 /* put the logging_hook here */
695 #endif
696
697 return (m);
698 }
699
700 static int
701 hfsc_addq(cl, m)
702 struct hfsc_class *cl;
703 struct mbuf *m;
704 {
705
706 #ifdef ALTQ_RIO
707 if (q_is_rio(cl->cl_q))
708 return rio_addq((rio_t *)cl->cl_red, cl->cl_q,
709 m, cl->cl_pktattr);
710 #endif
711 #ifdef ALTQ_RED
712 if (q_is_red(cl->cl_q))
713 return red_addq(cl->cl_red, cl->cl_q, m, cl->cl_pktattr);
714 #endif
715 if (qlen(cl->cl_q) >= qlimit(cl->cl_q)) {
716 m_freem(m);
717 return (-1);
718 }
719
720 if (cl->cl_flags & HFCF_CLEARDSCP)
721 write_dsfield(m, cl->cl_pktattr, 0);
722
723 _addq(cl->cl_q, m);
724
725 return (0);
726 }
727
728 static struct mbuf *
729 hfsc_getq(cl)
730 struct hfsc_class *cl;
731 {
732 #ifdef ALTQ_RIO
733 if (q_is_rio(cl->cl_q))
734 return rio_getq((rio_t *)cl->cl_red, cl->cl_q);
735 #endif
736 #ifdef ALTQ_RED
737 if (q_is_red(cl->cl_q))
738 return red_getq(cl->cl_red, cl->cl_q);
739 #endif
740 return _getq(cl->cl_q);
741 }
742
743 static struct mbuf *
744 hfsc_pollq(cl)
745 struct hfsc_class *cl;
746 {
747 return qhead(cl->cl_q);
748 }
749
750 static void
751 hfsc_purgeq(cl)
752 struct hfsc_class *cl;
753 {
754 struct mbuf *m;
755
756 if (qempty(cl->cl_q))
757 return;
758
759 while ((m = _getq(cl->cl_q)) != NULL) {
760 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, m_pktlen(m));
761 m_freem(m);
762 }
763 ASSERT(qlen(cl->cl_q) == 0);
764
765 set_passive(cl);
766 }
767
768 static void
769 set_active(cl, len)
770 struct hfsc_class *cl;
771 int len;
772 {
773 if (cl->cl_rsc != NULL)
774 init_ed(cl, len);
775 if (cl->cl_fsc != NULL)
776 init_v(cl, len);
777
778 cl->cl_stats.period++;
779 }
780
781 static void
782 set_passive(cl)
783 struct hfsc_class *cl;
784 {
785 if (cl->cl_rsc != NULL)
786 ellist_remove(cl);
787
788 if (cl->cl_fsc != NULL) {
789 while (cl->cl_parent != NULL) {
790 if (--cl->cl_nactive == 0) {
791 /* remove this class from the vt list */
792 actlist_remove(cl);
793 } else
794 /* still has active children */
795 break;
796
797 /* go up to the parent class */
798 cl = cl->cl_parent;
799 }
800 }
801 }
802
803 static void
804 init_ed(cl, next_len)
805 struct hfsc_class *cl;
806 int next_len;
807 {
808 u_int64_t cur_time;
809
810 cur_time = read_machclk();
811
812 /* update the deadline curve */
813 rtsc_min(&cl->cl_deadline, cl->cl_rsc, cur_time, cl->cl_cumul);
814
815 /*
816 * update the eligible curve.
817 * for concave, it is equal to the deadline curve.
818 * for convex, it is a linear curve with slope m2.
819 */
820 cl->cl_eligible = cl->cl_deadline;
821 if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) {
822 cl->cl_eligible.dx = 0;
823 cl->cl_eligible.dy = 0;
824 }
825
826 /* compute e and d */
827 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
828 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
829
830 ellist_insert(cl);
831 }
832
833 static void
834 update_ed(cl, next_len)
835 struct hfsc_class *cl;
836 int next_len;
837 {
838 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
839 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
840
841 ellist_update(cl);
842 }
843
844 static void
845 update_d(cl, next_len)
846 struct hfsc_class *cl;
847 int next_len;
848 {
849 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
850 }
851
852 static void
853 init_v(cl, len)
854 struct hfsc_class *cl;
855 int len;
856 {
857 struct hfsc_class *min_cl, *max_cl;
858
859 while (cl->cl_parent != NULL) {
860
861 if (cl->cl_nactive++ > 0)
862 /* already active */
863 break;
864
865 min_cl = actlist_first(cl->cl_parent->cl_actc);
866 if (min_cl != NULL) {
867 u_int64_t vt;
868
869 /*
870 * set vt to the average of the min and max classes.
871 * if the parent's period didn't change,
872 * don't decrease vt of the class.
873 */
874 max_cl = actlist_last(cl->cl_parent->cl_actc);
875 vt = (min_cl->cl_vt + max_cl->cl_vt) / 2;
876 if (cl->cl_parent->cl_vtperiod == cl->cl_parentperiod)
877 vt = max(cl->cl_vt, vt);
878 cl->cl_vt = vt;
879 } else {
880 /* no packet is backlogged. set vt to 0 */
881 cl->cl_vt = 0;
882 }
883
884 /* update the virtual curve */
885 rtsc_min(&cl->cl_virtual, cl->cl_fsc,
886 cl->cl_vt, cl->cl_total);
887
888 cl->cl_vtperiod++; /* increment vt period */
889 cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
890 if (cl->cl_parent->cl_nactive == 0)
891 cl->cl_parentperiod++;
892
893 actlist_insert(cl);
894
895 /* go up to the parent class */
896 cl = cl->cl_parent;
897 }
898 }
899
900 static void
901 update_v(cl, len)
902 struct hfsc_class *cl;
903 int len;
904 {
905 while (cl->cl_parent != NULL) {
906
907 cl->cl_total += len;
908
909 if (cl->cl_fsc != NULL) {
910 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total);
911
912 /* update the vt list */
913 actlist_update(cl);
914 }
915
916 /* go up to the parent class */
917 cl = cl->cl_parent;
918 }
919 }
920
921 /*
922 * TAILQ based ellist and actlist implementation
923 * (ion wanted to make a calendar queue based implementation)
924 */
925 /*
926 * eligible list holds backlogged classes being sorted by their eligible times.
927 * there is one eligible list per interface.
928 */
929
930 static ellist_t *
931 ellist_alloc()
932 {
933 ellist_t *head;
934
935 MALLOC(head, ellist_t *, sizeof(ellist_t), M_DEVBUF, M_WAITOK);
936 TAILQ_INIT(head);
937 return (head);
938 }
939
940 static void
941 ellist_destroy(head)
942 ellist_t *head;
943 {
944 FREE(head, M_DEVBUF);
945 }
946
947 static void
948 ellist_insert(cl)
949 struct hfsc_class *cl;
950 {
951 struct hfsc_if *hif = cl->cl_hif;
952 struct hfsc_class *p;
953
954 /* check the last entry first */
955 if ((p = TAILQ_LAST(hif->hif_eligible, _eligible)) == NULL ||
956 p->cl_e <= cl->cl_e) {
957 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist);
958 return;
959 }
960
961 TAILQ_FOREACH(p, hif->hif_eligible, cl_ellist) {
962 if (cl->cl_e < p->cl_e) {
963 TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
964 return;
965 }
966 }
967 ASSERT(0); /* should not reach here */
968 }
969
970 static void
971 ellist_remove(cl)
972 struct hfsc_class *cl;
973 {
974 struct hfsc_if *hif = cl->cl_hif;
975
976 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
977 }
978
979 static void
980 ellist_update(cl)
981 struct hfsc_class *cl;
982 {
983 struct hfsc_if *hif = cl->cl_hif;
984 struct hfsc_class *p, *last;
985
986 /*
987 * the eligible time of a class increases monotonically.
988 * if the next entry has a larger eligible time, nothing to do.
989 */
990 p = TAILQ_NEXT(cl, cl_ellist);
991 if (p == NULL || cl->cl_e <= p->cl_e)
992 return;
993
994 /* check the last entry */
995 last = TAILQ_LAST(hif->hif_eligible, _eligible);
996 ASSERT(last != NULL);
997 if (last->cl_e <= cl->cl_e) {
998 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
999 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist);
1000 return;
1001 }
1002
1003 /*
1004 * the new position must be between the next entry
1005 * and the last entry
1006 */
1007 while ((p = TAILQ_NEXT(p, cl_ellist)) != NULL) {
1008 if (cl->cl_e < p->cl_e) {
1009 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
1010 TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
1011 return;
1012 }
1013 }
1014 ASSERT(0); /* should not reach here */
1015 }
1016
1017 /* find the class with the minimum deadline among the eligible classes */
1018 struct hfsc_class *
1019 ellist_get_mindl(head)
1020 ellist_t *head;
1021 {
1022 struct hfsc_class *p, *cl = NULL;
1023 u_int64_t cur_time;
1024
1025 cur_time = read_machclk();
1026
1027 TAILQ_FOREACH(p, head, cl_ellist) {
1028 if (p->cl_e > cur_time)
1029 break;
1030 if (cl == NULL || p->cl_d < cl->cl_d)
1031 cl = p;
1032 }
1033 return (cl);
1034 }
1035
1036 /*
1037 * active children list holds backlogged child classes being sorted
1038 * by their virtual time.
1039 * each intermediate class has one active children list.
1040 */
1041 static actlist_t *
1042 actlist_alloc()
1043 {
1044 actlist_t *head;
1045
1046 MALLOC(head, actlist_t *, sizeof(actlist_t), M_DEVBUF, M_WAITOK);
1047 TAILQ_INIT(head);
1048 return (head);
1049 }
1050
1051 static void
1052 actlist_destroy(head)
1053 actlist_t *head;
1054 {
1055 FREE(head, M_DEVBUF);
1056 }
1057 static void
1058 actlist_insert(cl)
1059 struct hfsc_class *cl;
1060 {
1061 struct hfsc_class *p;
1062
1063 /* check the last entry first */
1064 if ((p = TAILQ_LAST(cl->cl_parent->cl_actc, _active)) == NULL
1065 || p->cl_vt <= cl->cl_vt) {
1066 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist);
1067 return;
1068 }
1069
1070 TAILQ_FOREACH(p, cl->cl_parent->cl_actc, cl_actlist) {
1071 if (cl->cl_vt < p->cl_vt) {
1072 TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
1073 return;
1074 }
1075 }
1076 ASSERT(0); /* should not reach here */
1077 }
1078
1079 static void
1080 actlist_remove(cl)
1081 struct hfsc_class *cl;
1082 {
1083 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1084 }
1085
1086 static void
1087 actlist_update(cl)
1088 struct hfsc_class *cl;
1089 {
1090 struct hfsc_class *p, *last;
1091
1092 /*
1093 * the virtual time of a class increases monotonically during its
1094 * backlogged period.
1095 * if the next entry has a larger virtual time, nothing to do.
1096 */
1097 p = TAILQ_NEXT(cl, cl_actlist);
1098 if (p == NULL || cl->cl_vt <= p->cl_vt)
1099 return;
1100
1101 /* check the last entry */
1102 last = TAILQ_LAST(cl->cl_parent->cl_actc, _active);
1103 ASSERT(last != NULL);
1104 if (last->cl_vt <= cl->cl_vt) {
1105 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1106 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist);
1107 return;
1108 }
1109
1110 /*
1111 * the new position must be between the next entry
1112 * and the last entry
1113 */
1114 while ((p = TAILQ_NEXT(p, cl_actlist)) != NULL) {
1115 if (cl->cl_vt < p->cl_vt) {
1116 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1117 TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
1118 return;
1119 }
1120 }
1121 ASSERT(0); /* should not reach here */
1122 }
1123
1124 /*
1125 * service curve support functions
1126 *
1127 * external service curve parameters
1128 * m: bits/sec
1129 * d: msec
1130 * internal service curve parameters
1131 * sm: (bytes/tsc_interval) << SM_SHIFT
1132 * ism: (tsc_count/byte) << ISM_SHIFT
1133 * dx: tsc_count
1134 *
1135 * SM_SHIFT and ISM_SHIFT are scaled in order to keep effective digits.
1136 * we should be able to handle 100K-1Gbps linkspeed with 200Hz-1GHz CPU
1137 * speed. SM_SHIFT and ISM_SHIFT are selected to have at least 3 effective
1138 * digits in decimal using the following table.
1139 *
1140 * bits/set 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps
1141 * ----------+-------------------------------------------------------
1142 * bytes/nsec 12.5e-6 125e-6 1250e-6 12500e-6 125000e-6
1143 * sm(500MHz) 25.0e-6 250e-6 2500e-6 25000e-6 250000e-6
1144 * sm(200MHz) 62.5e-6 625e-6 6250e-6 62500e-6 625000e-6
1145 *
1146 * nsec/byte 80000 8000 800 80 8
1147 * ism(500MHz) 40000 4000 400 40 4
1148 * ism(200MHz) 16000 1600 160 16 1.6
1149 */
1150 #define SM_SHIFT 24
1151 #define ISM_SHIFT 10
1152
1153 #define SC_LARGEVAL (1LL << 32)
1154 #define SC_INFINITY 0xffffffffffffffffLL
1155
1156 static __inline u_int64_t
1157 seg_x2y(x, sm)
1158 u_int64_t x;
1159 u_int64_t sm;
1160 {
1161 u_int64_t y;
1162
1163 if (x < SC_LARGEVAL)
1164 y = x * sm >> SM_SHIFT;
1165 else
1166 y = (x >> SM_SHIFT) * sm;
1167 return (y);
1168 }
1169
1170 static __inline u_int64_t
1171 seg_y2x(y, ism)
1172 u_int64_t y;
1173 u_int64_t ism;
1174 {
1175 u_int64_t x;
1176
1177 if (y == 0)
1178 x = 0;
1179 else if (ism == SC_INFINITY)
1180 x = SC_INFINITY;
1181 else if (y < SC_LARGEVAL)
1182 x = y * ism >> ISM_SHIFT;
1183 else
1184 x = (y >> ISM_SHIFT) * ism;
1185 return (x);
1186 }
1187
1188 static __inline u_int64_t
1189 m2sm(m)
1190 u_int m;
1191 {
1192 u_int64_t sm;
1193
1194 sm = ((u_int64_t)m << SM_SHIFT) / 8 / machclk_freq;
1195 return (sm);
1196 }
1197
1198 static __inline u_int64_t
1199 m2ism(m)
1200 u_int m;
1201 {
1202 u_int64_t ism;
1203
1204 if (m == 0)
1205 ism = SC_INFINITY;
1206 else
1207 ism = ((u_int64_t)machclk_freq << ISM_SHIFT) * 8 / m;
1208 return (ism);
1209 }
1210
1211 static __inline u_int64_t
1212 d2dx(d)
1213 u_int d;
1214 {
1215 u_int64_t dx;
1216
1217 dx = ((u_int64_t)d * machclk_freq) / 1000;
1218 return (dx);
1219 }
1220
1221 static u_int
1222 sm2m(sm)
1223 u_int64_t sm;
1224 {
1225 u_int64_t m;
1226
1227 m = (sm * 8 * machclk_freq) >> SM_SHIFT;
1228 return ((u_int)m);
1229 }
1230
1231 static u_int
1232 dx2d(dx)
1233 u_int64_t dx;
1234 {
1235 u_int64_t d;
1236
1237 d = dx * 1000 / machclk_freq;
1238 return ((u_int)d);
1239 }
1240
1241 static void
1242 sc2isc(sc, isc)
1243 struct service_curve *sc;
1244 struct internal_sc *isc;
1245 {
1246 isc->sm1 = m2sm(sc->m1);
1247 isc->ism1 = m2ism(sc->m1);
1248 isc->dx = d2dx(sc->d);
1249 isc->dy = seg_x2y(isc->dx, isc->sm1);
1250 isc->sm2 = m2sm(sc->m2);
1251 isc->ism2 = m2ism(sc->m2);
1252 }
1253
1254 /*
1255 * initialize the runtime service curve with the given internal
1256 * service curve starting at (x, y).
1257 */
1258 static void
1259 rtsc_init(rtsc, isc, x, y)
1260 struct runtime_sc *rtsc;
1261 struct internal_sc *isc;
1262 u_int64_t x, y;
1263 {
1264 rtsc->x = x;
1265 rtsc->y = y;
1266 rtsc->sm1 = isc->sm1;
1267 rtsc->ism1 = isc->ism1;
1268 rtsc->dx = isc->dx;
1269 rtsc->dy = isc->dy;
1270 rtsc->sm2 = isc->sm2;
1271 rtsc->ism2 = isc->ism2;
1272 }
1273
1274 /*
1275 * calculate the y-projection of the runtime service curve by the
1276 * given x-projection value
1277 */
1278 static u_int64_t
1279 rtsc_y2x(rtsc, y)
1280 struct runtime_sc *rtsc;
1281 u_int64_t y;
1282 {
1283 u_int64_t x;
1284
1285 if (y < rtsc->y)
1286 x = rtsc->x;
1287 else if (y <= rtsc->y + rtsc->dy) {
1288 /* x belongs to the 1st segment */
1289 if (rtsc->dy == 0)
1290 x = rtsc->x + rtsc->dx;
1291 else
1292 x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
1293 } else {
1294 /* x belongs to the 2nd segment */
1295 x = rtsc->x + rtsc->dx
1296 + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
1297 }
1298 return (x);
1299 }
1300
1301 static u_int64_t
1302 rtsc_x2y(rtsc, x)
1303 struct runtime_sc *rtsc;
1304 u_int64_t x;
1305 {
1306 u_int64_t y;
1307
1308 if (x <= rtsc->x)
1309 y = rtsc->y;
1310 else if (x <= rtsc->x + rtsc->dx)
1311 /* y belongs to the 1st segment */
1312 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
1313 else
1314 /* y belongs to the 2nd segment */
1315 y = rtsc->y + rtsc->dy
1316 + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
1317 return (y);
1318 }
1319
1320 /*
1321 * update the runtime service curve by taking the minimum of the current
1322 * runtime service curve and the service curve starting at (x, y).
1323 */
1324 static void
1325 rtsc_min(rtsc, isc, x, y)
1326 struct runtime_sc *rtsc;
1327 struct internal_sc *isc;
1328 u_int64_t x, y;
1329 {
1330 u_int64_t y1, y2, dx, dy;
1331
1332 if (isc->sm1 <= isc->sm2) {
1333 /* service curve is convex */
1334 y1 = rtsc_x2y(rtsc, x);
1335 if (y1 < y)
1336 /* the current rtsc is smaller */
1337 return;
1338 rtsc->x = x;
1339 rtsc->y = y;
1340 return;
1341 }
1342
1343 /*
1344 * service curve is concave
1345 * compute the two y values of the current rtsc
1346 * y1: at x
1347 * y2: at (x + dx)
1348 */
1349 y1 = rtsc_x2y(rtsc, x);
1350 if (y1 <= y) {
1351 /* rtsc is below isc, no change to rtsc */
1352 return;
1353 }
1354
1355 y2 = rtsc_x2y(rtsc, x + isc->dx);
1356 if (y2 >= y + isc->dy) {
1357 /* rtsc is above isc, replace rtsc by isc */
1358 rtsc->x = x;
1359 rtsc->y = y;
1360 rtsc->dx = isc->dx;
1361 rtsc->dy = isc->dy;
1362 return;
1363 }
1364
1365 /*
1366 * the two curves intersect
1367 * compute the offsets (dx, dy) using the reverse
1368 * function of seg_x2y()
1369 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
1370 */
1371 dx = ((y1 - y) << SM_SHIFT) / (isc->sm1 - isc->sm2);
1372 /*
1373 * check if (x, y1) belongs to the 1st segment of rtsc.
1374 * if so, add the offset.
1375 */
1376 if (rtsc->x + rtsc->dx > x)
1377 dx += rtsc->x + rtsc->dx - x;
1378 dy = seg_x2y(dx, isc->sm1);
1379
1380 rtsc->x = x;
1381 rtsc->y = y;
1382 rtsc->dx = dx;
1383 rtsc->dy = dy;
1384 return;
1385 }
1386
1387 /*
1388 * hfsc device interface
1389 */
1390 int
1391 hfscopen(dev, flag, fmt, p)
1392 dev_t dev;
1393 int flag, fmt;
1394 struct proc *p;
1395 {
1396 if (machclk_freq == 0)
1397 init_machclk();
1398
1399 if (machclk_freq == 0) {
1400 printf("hfsc: no cpu clock available!\n");
1401 return (ENXIO);
1402 }
1403
1404 /* everything will be done when the queueing scheme is attached. */
1405 return 0;
1406 }
1407
1408 int
1409 hfscclose(dev, flag, fmt, p)
1410 dev_t dev;
1411 int flag, fmt;
1412 struct proc *p;
1413 {
1414 struct hfsc_if *hif;
1415 int err, error = 0;
1416
1417 while ((hif = hif_list) != NULL) {
1418 /* destroy all */
1419 if (ALTQ_IS_ENABLED(hif->hif_ifq))
1420 altq_disable(hif->hif_ifq);
1421
1422 err = altq_detach(hif->hif_ifq);
1423 if (err == 0)
1424 err = hfsc_detach(hif);
1425 if (err != 0 && error == 0)
1426 error = err;
1427 }
1428
1429 return error;
1430 }
1431
1432 int
1433 hfscioctl(dev, cmd, addr, flag, p)
1434 dev_t dev;
1435 ioctlcmd_t cmd;
1436 caddr_t addr;
1437 int flag;
1438 struct proc *p;
1439 {
1440 struct hfsc_if *hif;
1441 struct hfsc_interface *ifacep;
1442 int error = 0;
1443
1444 /* check super-user privilege */
1445 switch (cmd) {
1446 case HFSC_GETSTATS:
1447 break;
1448 default:
1449 #if (__FreeBSD_version > 400000)
1450 if ((error = suser(p)) != 0)
1451 return (error);
1452 #else
1453 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
1454 return (error);
1455 #endif
1456 break;
1457 }
1458
1459 switch (cmd) {
1460
1461 case HFSC_IF_ATTACH:
1462 error = hfsccmd_if_attach((struct hfsc_attach *)addr);
1463 break;
1464
1465 case HFSC_IF_DETACH:
1466 error = hfsccmd_if_detach((struct hfsc_interface *)addr);
1467 break;
1468
1469 case HFSC_ENABLE:
1470 case HFSC_DISABLE:
1471 case HFSC_CLEAR_HIERARCHY:
1472 ifacep = (struct hfsc_interface *)addr;
1473 if ((hif = altq_lookup(ifacep->hfsc_ifname,
1474 ALTQT_HFSC)) == NULL) {
1475 error = EBADF;
1476 break;
1477 }
1478
1479 switch (cmd) {
1480
1481 case HFSC_ENABLE:
1482 if (hif->hif_defaultclass == NULL) {
1483 #if 1
1484 printf("hfsc: no default class\n");
1485 #endif
1486 error = EINVAL;
1487 break;
1488 }
1489 error = altq_enable(hif->hif_ifq);
1490 break;
1491
1492 case HFSC_DISABLE:
1493 error = altq_disable(hif->hif_ifq);
1494 break;
1495
1496 case HFSC_CLEAR_HIERARCHY:
1497 hfsc_clear_interface(hif);
1498 break;
1499 }
1500 break;
1501
1502 case HFSC_ADD_CLASS:
1503 error = hfsccmd_add_class((struct hfsc_add_class *)addr);
1504 break;
1505
1506 case HFSC_DEL_CLASS:
1507 error = hfsccmd_delete_class((struct hfsc_delete_class *)addr);
1508 break;
1509
1510 case HFSC_MOD_CLASS:
1511 error = hfsccmd_modify_class((struct hfsc_modify_class *)addr);
1512 break;
1513
1514 case HFSC_ADD_FILTER:
1515 error = hfsccmd_add_filter((struct hfsc_add_filter *)addr);
1516 break;
1517
1518 case HFSC_DEL_FILTER:
1519 error = hfsccmd_delete_filter((struct hfsc_delete_filter *)addr);
1520 break;
1521
1522 case HFSC_GETSTATS:
1523 error = hfsccmd_class_stats((struct hfsc_class_stats *)addr);
1524 break;
1525
1526 default:
1527 error = EINVAL;
1528 break;
1529 }
1530 return error;
1531 }
1532
1533 static int
1534 hfsccmd_if_attach(ap)
1535 struct hfsc_attach *ap;
1536 {
1537 struct hfsc_if *hif;
1538 struct ifnet *ifp;
1539 int error;
1540
1541 if ((ifp = ifunit(ap->iface.hfsc_ifname)) == NULL)
1542 return (ENXIO);
1543
1544 if ((hif = hfsc_attach(&ifp->if_snd, ap->bandwidth)) == NULL)
1545 return (ENOMEM);
1546
1547 /*
1548 * set HFSC to this ifnet structure.
1549 */
1550 if ((error = altq_attach(&ifp->if_snd, ALTQT_HFSC, hif,
1551 hfsc_enqueue, hfsc_dequeue, hfsc_request,
1552 &hif->hif_classifier, acc_classify)) != 0)
1553 (void)hfsc_detach(hif);
1554
1555 return (error);
1556 }
1557
1558 static int
1559 hfsccmd_if_detach(ap)
1560 struct hfsc_interface *ap;
1561 {
1562 struct hfsc_if *hif;
1563 int error;
1564
1565 if ((hif = altq_lookup(ap->hfsc_ifname, ALTQT_HFSC)) == NULL)
1566 return (EBADF);
1567
1568 if (ALTQ_IS_ENABLED(hif->hif_ifq))
1569 altq_disable(hif->hif_ifq);
1570
1571 if ((error = altq_detach(hif->hif_ifq)))
1572 return (error);
1573
1574 return hfsc_detach(hif);
1575 }
1576
1577 static int
1578 hfsccmd_add_class(ap)
1579 struct hfsc_add_class *ap;
1580 {
1581 struct hfsc_if *hif;
1582 struct hfsc_class *cl, *parent;
1583
1584 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
1585 return (EBADF);
1586
1587 if ((parent = clh_to_clp(hif, ap->parent_handle)) == NULL) {
1588 if (ap->parent_handle == HFSC_ROOTCLASS_HANDLE)
1589 parent = hif->hif_rootclass;
1590 else
1591 return (EINVAL);
1592 }
1593
1594 if ((cl = hfsc_class_create(hif, &ap->service_curve, parent,
1595 ap->qlimit, ap->flags)) == NULL)
1596 return (ENOMEM);
1597
1598 /* return a class handle to the user */
1599 ap->class_handle = clp_to_clh(cl);
1600 return (0);
1601 }
1602
1603 static int
1604 hfsccmd_delete_class(ap)
1605 struct hfsc_delete_class *ap;
1606 {
1607 struct hfsc_if *hif;
1608 struct hfsc_class *cl;
1609
1610 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
1611 return (EBADF);
1612
1613 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
1614 return (EINVAL);
1615
1616 return hfsc_class_destroy(cl);
1617 }
1618
1619 static int
1620 hfsccmd_modify_class(ap)
1621 struct hfsc_modify_class *ap;
1622 {
1623 struct hfsc_if *hif;
1624 struct hfsc_class *cl;
1625 struct service_curve *rsc = NULL;
1626 struct service_curve *fsc = NULL;
1627
1628 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
1629 return (EBADF);
1630
1631 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
1632 return (EINVAL);
1633
1634 if (ap->sctype & HFSC_REALTIMESC)
1635 rsc = &ap->service_curve;
1636 if (ap->sctype & HFSC_LINKSHARINGSC)
1637 fsc = &ap->service_curve;
1638
1639 return hfsc_class_modify(cl, rsc, fsc);
1640 }
1641
1642 static int
1643 hfsccmd_add_filter(ap)
1644 struct hfsc_add_filter *ap;
1645 {
1646 struct hfsc_if *hif;
1647 struct hfsc_class *cl;
1648
1649 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
1650 return (EBADF);
1651
1652 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
1653 return (EINVAL);
1654
1655 if (is_a_parent_class(cl)) {
1656 #if 1
1657 printf("hfsccmd_add_filter: not a leaf class!\n");
1658 #endif
1659 return (EINVAL);
1660 }
1661
1662 return acc_add_filter(&hif->hif_classifier, &ap->filter,
1663 cl, &ap->filter_handle);
1664 }
1665
1666 static int
1667 hfsccmd_delete_filter(ap)
1668 struct hfsc_delete_filter *ap;
1669 {
1670 struct hfsc_if *hif;
1671
1672 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
1673 return (EBADF);
1674
1675 return acc_delete_filter(&hif->hif_classifier,
1676 ap->filter_handle);
1677 }
1678
1679 static int
1680 hfsccmd_class_stats(ap)
1681 struct hfsc_class_stats *ap;
1682 {
1683 struct hfsc_if *hif;
1684 struct hfsc_class *cl;
1685 struct class_stats stats, *usp;
1686 int n, nclasses, error;
1687
1688 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
1689 return (EBADF);
1690
1691 ap->cur_time = read_machclk();
1692 ap->hif_classes = hif->hif_classes;
1693 ap->hif_packets = hif->hif_packets;
1694
1695 /* skip the first N classes in the tree */
1696 nclasses = ap->nskip;
1697 for (cl = hif->hif_rootclass, n = 0; cl != NULL && n < nclasses;
1698 cl = hfsc_nextclass(cl), n++)
1699 ;
1700 if (n != nclasses)
1701 return (EINVAL);
1702
1703 /* then, read the next N classes in the tree */
1704 nclasses = ap->nclasses;
1705 usp = ap->stats;
1706 for (n = 0; cl != NULL && n < nclasses; cl = hfsc_nextclass(cl), n++) {
1707
1708 get_class_stats(&stats, cl);
1709
1710 if ((error = copyout((caddr_t)&stats, (caddr_t)usp++,
1711 sizeof(stats))) != 0)
1712 return (error);
1713 }
1714
1715 ap->nclasses = n;
1716
1717 return (0);
1718 }
1719
1720 static void get_class_stats(sp, cl)
1721 struct class_stats *sp;
1722 struct hfsc_class *cl;
1723 {
1724 sp->class_id = cl->cl_id;
1725 sp->class_handle = clp_to_clh(cl);
1726
1727 if (cl->cl_rsc != NULL) {
1728 sp->rsc.m1 = sm2m(cl->cl_rsc->sm1);
1729 sp->rsc.d = dx2d(cl->cl_rsc->dx);
1730 sp->rsc.m2 = sm2m(cl->cl_rsc->sm2);
1731 } else {
1732 sp->rsc.m1 = 0;
1733 sp->rsc.d = 0;
1734 sp->rsc.m2 = 0;
1735 }
1736 if (cl->cl_fsc != NULL) {
1737 sp->fsc.m1 = sm2m(cl->cl_fsc->sm1);
1738 sp->fsc.d = dx2d(cl->cl_fsc->dx);
1739 sp->fsc.m2 = sm2m(cl->cl_fsc->sm2);
1740 } else {
1741 sp->fsc.m1 = 0;
1742 sp->fsc.d = 0;
1743 sp->fsc.m2 = 0;
1744 }
1745
1746 sp->total = cl->cl_total;
1747 sp->cumul = cl->cl_cumul;
1748
1749 sp->d = cl->cl_d;
1750 sp->e = cl->cl_e;
1751 sp->vt = cl->cl_vt;
1752
1753 sp->qlength = qlen(cl->cl_q);
1754 sp->xmit_cnt = cl->cl_stats.xmit_cnt;
1755 sp->drop_cnt = cl->cl_stats.drop_cnt;
1756 sp->period = cl->cl_stats.period;
1757
1758 sp->qtype = qtype(cl->cl_q);
1759 #ifdef ALTQ_RED
1760 if (q_is_red(cl->cl_q))
1761 red_getstats(cl->cl_red, &sp->red[0]);
1762 #endif
1763 #ifdef ALTQ_RIO
1764 if (q_is_rio(cl->cl_q))
1765 rio_getstats((rio_t *)cl->cl_red, &sp->red[0]);
1766 #endif
1767 }
1768
1769 /* convert a class handle to the corresponding class pointer */
1770 static struct hfsc_class *
1771 clh_to_clp(hif, chandle)
1772 struct hfsc_if *hif;
1773 u_long chandle;
1774 {
1775 struct hfsc_class *cl;
1776
1777 cl = (struct hfsc_class *)chandle;
1778 if (chandle != ALIGN(cl)) {
1779 #if 1
1780 printf("clh_to_cl: unaligned pointer %p\n", cl);
1781 #endif
1782 return (NULL);
1783 }
1784
1785 if (cl == NULL || cl->cl_handle != chandle || cl->cl_hif != hif)
1786 return (NULL);
1787
1788 return (cl);
1789 }
1790
1791 /* convert a class pointer to the corresponding class handle */
1792 static u_long
1793 clp_to_clh(cl)
1794 struct hfsc_class *cl;
1795 {
1796 if (cl->cl_parent == NULL)
1797 return (HFSC_ROOTCLASS_HANDLE); /* XXX */
1798 return (cl->cl_handle);
1799 }
1800
1801 #ifdef KLD_MODULE
1802
1803 static struct altqsw hfsc_sw =
1804 {"hfsc", hfscopen, hfscclose, hfscioctl};
1805
1806 ALTQ_MODULE(altq_hfsc, ALTQT_HFSC, &hfsc_sw);
1807
1808 #endif /* KLD_MODULE */
1809
1810 #endif /* ALTQ_HFSC */
1811