altq_hfsc.c revision 1.19 1 /* $NetBSD: altq_hfsc.c,v 1.19 2006/10/12 19:59:08 peter Exp $ */
2 /* $KAME: altq_hfsc.c,v 1.26 2005/04/13 03:44:24 suz Exp $ */
3
4 /*
5 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
6 *
7 * Permission to use, copy, modify, and distribute this software and
8 * its documentation is hereby granted (including for commercial or
9 * for-profit use), provided that both the copyright notice and this
10 * permission notice appear in all copies of the software, derivative
11 * works, or modified versions, and any portions thereof.
12 *
13 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
14 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS
15 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
16 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
21 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
22 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
25 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26 * DAMAGE.
27 *
28 * Carnegie Mellon encourages (but does not require) users of this
29 * software to return any improvements or extensions that they make,
30 * and to grant Carnegie Mellon the rights to redistribute these
31 * changes without encumbrance.
32 */
33 /*
34 * H-FSC is described in Proceedings of SIGCOMM'97,
35 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
36 * Real-Time and Priority Service"
37 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
38 *
39 * Oleg Cherevko <olwi (at) aq.ml.com.ua> added the upperlimit for link-sharing.
40 * when a class has an upperlimit, the fit-time is computed from the
41 * upperlimit service curve. the link-sharing scheduler does not schedule
42 * a class whose fit-time exceeds the current time.
43 */
44
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: altq_hfsc.c,v 1.19 2006/10/12 19:59:08 peter Exp $");
47
48 #ifdef _KERNEL_OPT
49 #include "opt_altq.h"
50 #include "opt_inet.h"
51 #endif
52
53 #ifdef ALTQ_HFSC /* hfsc is enabled by ALTQ_HFSC option in opt_altq.h */
54
55 #include <sys/param.h>
56 #include <sys/malloc.h>
57 #include <sys/mbuf.h>
58 #include <sys/socket.h>
59 #include <sys/systm.h>
60 #include <sys/errno.h>
61 #include <sys/queue.h>
62 #if 1 /* ALTQ3_COMPAT */
63 #include <sys/sockio.h>
64 #include <sys/proc.h>
65 #include <sys/kernel.h>
66 #endif /* ALTQ3_COMPAT */
67 #include <sys/kauth.h>
68
69 #include <net/if.h>
70 #include <netinet/in.h>
71
72 #include <net/pfvar.h>
73 #include <altq/altq.h>
74 #include <altq/altq_hfsc.h>
75 #ifdef ALTQ3_COMPAT
76 #include <altq/altq_conf.h>
77 #endif
78
79 /*
80 * function prototypes
81 */
82 static int hfsc_clear_interface(struct hfsc_if *);
83 static int hfsc_request(struct ifaltq *, int, void *);
84 static void hfsc_purge(struct hfsc_if *);
85 static struct hfsc_class *hfsc_class_create(struct hfsc_if *,
86 struct service_curve *, struct service_curve *, struct service_curve *,
87 struct hfsc_class *, int, int, int);
88 static int hfsc_class_destroy(struct hfsc_class *);
89 static struct hfsc_class *hfsc_nextclass(struct hfsc_class *);
90 static int hfsc_enqueue(struct ifaltq *, struct mbuf *,
91 struct altq_pktattr *);
92 static struct mbuf *hfsc_dequeue(struct ifaltq *, int);
93
94 static int hfsc_addq(struct hfsc_class *, struct mbuf *);
95 static struct mbuf *hfsc_getq(struct hfsc_class *);
96 static struct mbuf *hfsc_pollq(struct hfsc_class *);
97 static void hfsc_purgeq(struct hfsc_class *);
98
99 static void update_cfmin(struct hfsc_class *);
100 static void set_active(struct hfsc_class *, int);
101 static void set_passive(struct hfsc_class *);
102
103 static void init_ed(struct hfsc_class *, int);
104 static void update_ed(struct hfsc_class *, int);
105 static void update_d(struct hfsc_class *, int);
106 static void init_vf(struct hfsc_class *, int);
107 static void update_vf(struct hfsc_class *, int, u_int64_t);
108 static ellist_t *ellist_alloc(void);
109 static void ellist_destroy(ellist_t *);
110 static void ellist_insert(struct hfsc_class *);
111 static void ellist_remove(struct hfsc_class *);
112 static void ellist_update(struct hfsc_class *);
113 struct hfsc_class *ellist_get_mindl(ellist_t *, u_int64_t);
114 static actlist_t *actlist_alloc(void);
115 static void actlist_destroy(actlist_t *);
116 static void actlist_insert(struct hfsc_class *);
117 static void actlist_remove(struct hfsc_class *);
118 static void actlist_update(struct hfsc_class *);
119
120 static struct hfsc_class *actlist_firstfit(struct hfsc_class *,
121 u_int64_t);
122
123 static inline u_int64_t seg_x2y(u_int64_t, u_int64_t);
124 static inline u_int64_t seg_y2x(u_int64_t, u_int64_t);
125 static inline u_int64_t m2sm(u_int);
126 static inline u_int64_t m2ism(u_int);
127 static inline u_int64_t d2dx(u_int);
128 static u_int sm2m(u_int64_t);
129 static u_int dx2d(u_int64_t);
130
131 static void sc2isc(struct service_curve *, struct internal_sc *);
132 static void rtsc_init(struct runtime_sc *, struct internal_sc *,
133 u_int64_t, u_int64_t);
134 static u_int64_t rtsc_y2x(struct runtime_sc *, u_int64_t);
135 static u_int64_t rtsc_x2y(struct runtime_sc *, u_int64_t);
136 static void rtsc_min(struct runtime_sc *, struct internal_sc *,
137 u_int64_t, u_int64_t);
138
139 static void get_class_stats(struct hfsc_classstats *,
140 struct hfsc_class *);
141 static struct hfsc_class *clh_to_clp(struct hfsc_if *, u_int32_t);
142
143
144 #ifdef ALTQ3_COMPAT
145 static struct hfsc_if *hfsc_attach(struct ifaltq *, u_int);
146 static int hfsc_detach(struct hfsc_if *);
147 static int hfsc_class_modify(struct hfsc_class *, struct service_curve *,
148 struct service_curve *, struct service_curve *);
149
150 static int hfsccmd_if_attach(struct hfsc_attach *);
151 static int hfsccmd_if_detach(struct hfsc_interface *);
152 static int hfsccmd_add_class(struct hfsc_add_class *);
153 static int hfsccmd_delete_class(struct hfsc_delete_class *);
154 static int hfsccmd_modify_class(struct hfsc_modify_class *);
155 static int hfsccmd_add_filter(struct hfsc_add_filter *);
156 static int hfsccmd_delete_filter(struct hfsc_delete_filter *);
157 static int hfsccmd_class_stats(struct hfsc_class_stats *);
158
159 altqdev_decl(hfsc);
160 #endif /* ALTQ3_COMPAT */
161
162 /*
163 * macros
164 */
165 #define is_a_parent_class(cl) ((cl)->cl_children != NULL)
166
167 #define HT_INFINITY 0xffffffffffffffffLL /* infinite time value */
168
169 #ifdef ALTQ3_COMPAT
170 /* hif_list keeps all hfsc_if's allocated. */
171 static struct hfsc_if *hif_list = NULL;
172 #endif /* ALTQ3_COMPAT */
173
174 int
175 hfsc_pfattach(struct pf_altq *a)
176 {
177 struct ifnet *ifp;
178 int s, error;
179
180 if ((ifp = ifunit(a->ifname)) == NULL || a->altq_disc == NULL)
181 return (EINVAL);
182 s = splnet();
183 error = altq_attach(&ifp->if_snd, ALTQT_HFSC, a->altq_disc,
184 hfsc_enqueue, hfsc_dequeue, hfsc_request, NULL, NULL);
185 splx(s);
186 return (error);
187 }
188
189 int
190 hfsc_add_altq(struct pf_altq *a)
191 {
192 struct hfsc_if *hif;
193 struct ifnet *ifp;
194
195 if ((ifp = ifunit(a->ifname)) == NULL)
196 return (EINVAL);
197 if (!ALTQ_IS_READY(&ifp->if_snd))
198 return (ENODEV);
199
200 hif = malloc(sizeof(struct hfsc_if), M_DEVBUF, M_WAITOK|M_ZERO);
201 if (hif == NULL)
202 return (ENOMEM);
203
204 hif->hif_eligible = ellist_alloc();
205 if (hif->hif_eligible == NULL) {
206 free(hif, M_DEVBUF);
207 return (ENOMEM);
208 }
209
210 hif->hif_ifq = &ifp->if_snd;
211
212 /* keep the state in pf_altq */
213 a->altq_disc = hif;
214
215 return (0);
216 }
217
218 int
219 hfsc_remove_altq(struct pf_altq *a)
220 {
221 struct hfsc_if *hif;
222
223 if ((hif = a->altq_disc) == NULL)
224 return (EINVAL);
225 a->altq_disc = NULL;
226
227 (void)hfsc_clear_interface(hif);
228 (void)hfsc_class_destroy(hif->hif_rootclass);
229
230 ellist_destroy(hif->hif_eligible);
231
232 free(hif, M_DEVBUF);
233
234 return (0);
235 }
236
237 int
238 hfsc_add_queue(struct pf_altq *a)
239 {
240 struct hfsc_if *hif;
241 struct hfsc_class *cl, *parent;
242 struct hfsc_opts *opts;
243 struct service_curve rtsc, lssc, ulsc;
244
245 if ((hif = a->altq_disc) == NULL)
246 return (EINVAL);
247
248 opts = &a->pq_u.hfsc_opts;
249
250 if (a->parent_qid == HFSC_NULLCLASS_HANDLE &&
251 hif->hif_rootclass == NULL)
252 parent = NULL;
253 else if ((parent = clh_to_clp(hif, a->parent_qid)) == NULL)
254 return (EINVAL);
255
256 if (a->qid == 0)
257 return (EINVAL);
258
259 if (clh_to_clp(hif, a->qid) != NULL)
260 return (EBUSY);
261
262 rtsc.m1 = opts->rtsc_m1;
263 rtsc.d = opts->rtsc_d;
264 rtsc.m2 = opts->rtsc_m2;
265 lssc.m1 = opts->lssc_m1;
266 lssc.d = opts->lssc_d;
267 lssc.m2 = opts->lssc_m2;
268 ulsc.m1 = opts->ulsc_m1;
269 ulsc.d = opts->ulsc_d;
270 ulsc.m2 = opts->ulsc_m2;
271
272 cl = hfsc_class_create(hif, &rtsc, &lssc, &ulsc,
273 parent, a->qlimit, opts->flags, a->qid);
274 if (cl == NULL)
275 return (ENOMEM);
276
277 return (0);
278 }
279
280 int
281 hfsc_remove_queue(struct pf_altq *a)
282 {
283 struct hfsc_if *hif;
284 struct hfsc_class *cl;
285
286 if ((hif = a->altq_disc) == NULL)
287 return (EINVAL);
288
289 if ((cl = clh_to_clp(hif, a->qid)) == NULL)
290 return (EINVAL);
291
292 return (hfsc_class_destroy(cl));
293 }
294
295 int
296 hfsc_getqstats(struct pf_altq *a, void *ubuf, int *nbytes)
297 {
298 struct hfsc_if *hif;
299 struct hfsc_class *cl;
300 struct hfsc_classstats stats;
301 int error = 0;
302
303 if ((hif = altq_lookup(a->ifname, ALTQT_HFSC)) == NULL)
304 return (EBADF);
305
306 if ((cl = clh_to_clp(hif, a->qid)) == NULL)
307 return (EINVAL);
308
309 if (*nbytes < sizeof(stats))
310 return (EINVAL);
311
312 get_class_stats(&stats, cl);
313
314 if ((error = copyout((caddr_t)&stats, ubuf, sizeof(stats))) != 0)
315 return (error);
316 *nbytes = sizeof(stats);
317 return (0);
318 }
319
320 /*
321 * bring the interface back to the initial state by discarding
322 * all the filters and classes except the root class.
323 */
324 static int
325 hfsc_clear_interface(struct hfsc_if *hif)
326 {
327 struct hfsc_class *cl;
328
329 #ifdef ALTQ3_COMPAT
330 /* free the filters for this interface */
331 acc_discard_filters(&hif->hif_classifier, NULL, 1);
332 #endif
333
334 /* clear out the classes */
335 while (hif->hif_rootclass != NULL &&
336 (cl = hif->hif_rootclass->cl_children) != NULL) {
337 /*
338 * remove the first leaf class found in the hierarchy
339 * then start over
340 */
341 for (; cl != NULL; cl = hfsc_nextclass(cl)) {
342 if (!is_a_parent_class(cl)) {
343 (void)hfsc_class_destroy(cl);
344 break;
345 }
346 }
347 }
348
349 return (0);
350 }
351
352 static int
353 hfsc_request(struct ifaltq *ifq, int req, void *arg __unused)
354 {
355 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
356
357 switch (req) {
358 case ALTRQ_PURGE:
359 hfsc_purge(hif);
360 break;
361 }
362 return (0);
363 }
364
365 /* discard all the queued packets on the interface */
366 static void
367 hfsc_purge(struct hfsc_if *hif)
368 {
369 struct hfsc_class *cl;
370
371 for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl))
372 if (!qempty(cl->cl_q))
373 hfsc_purgeq(cl);
374 if (ALTQ_IS_ENABLED(hif->hif_ifq))
375 hif->hif_ifq->ifq_len = 0;
376 }
377
378 struct hfsc_class *
379 hfsc_class_create(struct hfsc_if *hif, struct service_curve *rsc,
380 struct service_curve *fsc, struct service_curve *usc,
381 struct hfsc_class *parent, int qlimit, int flags, int qid)
382 {
383 struct hfsc_class *cl, *p;
384 int i, s;
385
386 if (hif->hif_classes >= HFSC_MAX_CLASSES)
387 return (NULL);
388
389 #ifndef ALTQ_RED
390 if (flags & HFCF_RED) {
391 #ifdef ALTQ_DEBUG
392 printf("hfsc_class_create: RED not configured for HFSC!\n");
393 #endif
394 return (NULL);
395 }
396 #endif
397
398 cl = malloc(sizeof(struct hfsc_class), M_DEVBUF, M_WAITOK|M_ZERO);
399 if (cl == NULL)
400 return (NULL);
401
402 cl->cl_q = malloc(sizeof(class_queue_t), M_DEVBUF, M_WAITOK|M_ZERO);
403 if (cl->cl_q == NULL)
404 goto err_ret;
405
406 cl->cl_actc = actlist_alloc();
407 if (cl->cl_actc == NULL)
408 goto err_ret;
409
410 if (qlimit == 0)
411 qlimit = 50; /* use default */
412 qlimit(cl->cl_q) = qlimit;
413 qtype(cl->cl_q) = Q_DROPTAIL;
414 qlen(cl->cl_q) = 0;
415 cl->cl_flags = flags;
416 #ifdef ALTQ_RED
417 if (flags & (HFCF_RED|HFCF_RIO)) {
418 int red_flags, red_pkttime;
419 u_int m2;
420
421 m2 = 0;
422 if (rsc != NULL && rsc->m2 > m2)
423 m2 = rsc->m2;
424 if (fsc != NULL && fsc->m2 > m2)
425 m2 = fsc->m2;
426 if (usc != NULL && usc->m2 > m2)
427 m2 = usc->m2;
428
429 red_flags = 0;
430 if (flags & HFCF_ECN)
431 red_flags |= REDF_ECN;
432 #ifdef ALTQ_RIO
433 if (flags & HFCF_CLEARDSCP)
434 red_flags |= RIOF_CLEARDSCP;
435 #endif
436 if (m2 < 8)
437 red_pkttime = 1000 * 1000 * 1000; /* 1 sec */
438 else
439 red_pkttime = (int64_t)hif->hif_ifq->altq_ifp->if_mtu
440 * 1000 * 1000 * 1000 / (m2 / 8);
441 if (flags & HFCF_RED) {
442 cl->cl_red = red_alloc(0, 0,
443 qlimit(cl->cl_q) * 10/100,
444 qlimit(cl->cl_q) * 30/100,
445 red_flags, red_pkttime);
446 if (cl->cl_red != NULL)
447 qtype(cl->cl_q) = Q_RED;
448 }
449 #ifdef ALTQ_RIO
450 else {
451 cl->cl_red = (red_t *)rio_alloc(0, NULL,
452 red_flags, red_pkttime);
453 if (cl->cl_red != NULL)
454 qtype(cl->cl_q) = Q_RIO;
455 }
456 #endif
457 }
458 #endif /* ALTQ_RED */
459
460 if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0)) {
461 cl->cl_rsc = malloc(sizeof(struct internal_sc), M_DEVBUF,
462 M_WAITOK|M_ZERO);
463 if (cl->cl_rsc == NULL)
464 goto err_ret;
465 sc2isc(rsc, cl->cl_rsc);
466 rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0);
467 rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0);
468 }
469 if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0)) {
470 cl->cl_fsc = malloc(sizeof(struct internal_sc), M_DEVBUF,
471 M_WAITOK|M_ZERO);
472 if (cl->cl_fsc == NULL)
473 goto err_ret;
474 sc2isc(fsc, cl->cl_fsc);
475 rtsc_init(&cl->cl_virtual, cl->cl_fsc, 0, 0);
476 }
477 if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0)) {
478 cl->cl_usc = malloc(sizeof(struct internal_sc), M_DEVBUF,
479 M_WAITOK|M_ZERO);
480 if (cl->cl_usc == NULL)
481 goto err_ret;
482 sc2isc(usc, cl->cl_usc);
483 rtsc_init(&cl->cl_ulimit, cl->cl_usc, 0, 0);
484 }
485
486 cl->cl_id = hif->hif_classid++;
487 cl->cl_handle = qid;
488 cl->cl_hif = hif;
489 cl->cl_parent = parent;
490
491 s = splnet();
492 hif->hif_classes++;
493
494 /*
495 * find a free slot in the class table. if the slot matching
496 * the lower bits of qid is free, use this slot. otherwise,
497 * use the first free slot.
498 */
499 i = qid % HFSC_MAX_CLASSES;
500 if (hif->hif_class_tbl[i] == NULL)
501 hif->hif_class_tbl[i] = cl;
502 else {
503 for (i = 0; i < HFSC_MAX_CLASSES; i++)
504 if (hif->hif_class_tbl[i] == NULL) {
505 hif->hif_class_tbl[i] = cl;
506 break;
507 }
508 if (i == HFSC_MAX_CLASSES) {
509 splx(s);
510 goto err_ret;
511 }
512 }
513
514 if (flags & HFCF_DEFAULTCLASS)
515 hif->hif_defaultclass = cl;
516
517 if (parent == NULL) {
518 /* this is root class */
519 hif->hif_rootclass = cl;
520 } else {
521 /* add this class to the children list of the parent */
522 if ((p = parent->cl_children) == NULL)
523 parent->cl_children = cl;
524 else {
525 while (p->cl_siblings != NULL)
526 p = p->cl_siblings;
527 p->cl_siblings = cl;
528 }
529 }
530 splx(s);
531
532 return (cl);
533
534 err_ret:
535 if (cl->cl_actc != NULL)
536 actlist_destroy(cl->cl_actc);
537 if (cl->cl_red != NULL) {
538 #ifdef ALTQ_RIO
539 if (q_is_rio(cl->cl_q))
540 rio_destroy((rio_t *)cl->cl_red);
541 #endif
542 #ifdef ALTQ_RED
543 if (q_is_red(cl->cl_q))
544 red_destroy(cl->cl_red);
545 #endif
546 }
547 if (cl->cl_fsc != NULL)
548 free(cl->cl_fsc, M_DEVBUF);
549 if (cl->cl_rsc != NULL)
550 free(cl->cl_rsc, M_DEVBUF);
551 if (cl->cl_usc != NULL)
552 free(cl->cl_usc, M_DEVBUF);
553 if (cl->cl_q != NULL)
554 free(cl->cl_q, M_DEVBUF);
555 free(cl, M_DEVBUF);
556 return (NULL);
557 }
558
559 static int
560 hfsc_class_destroy(struct hfsc_class *cl)
561 {
562 int i, s;
563
564 if (cl == NULL)
565 return (0);
566
567 if (is_a_parent_class(cl))
568 return (EBUSY);
569
570 s = splnet();
571
572 #ifdef ALTQ3_COMPAT
573 /* delete filters referencing to this class */
574 acc_discard_filters(&cl->cl_hif->hif_classifier, cl, 0);
575 #endif /* ALTQ3_COMPAT */
576
577 if (!qempty(cl->cl_q))
578 hfsc_purgeq(cl);
579
580 if (cl->cl_parent == NULL) {
581 /* this is root class */
582 } else {
583 struct hfsc_class *p = cl->cl_parent->cl_children;
584
585 if (p == cl)
586 cl->cl_parent->cl_children = cl->cl_siblings;
587 else do {
588 if (p->cl_siblings == cl) {
589 p->cl_siblings = cl->cl_siblings;
590 break;
591 }
592 } while ((p = p->cl_siblings) != NULL);
593 ASSERT(p != NULL);
594 }
595
596 for (i = 0; i < HFSC_MAX_CLASSES; i++)
597 if (cl->cl_hif->hif_class_tbl[i] == cl) {
598 cl->cl_hif->hif_class_tbl[i] = NULL;
599 break;
600 }
601
602 cl->cl_hif->hif_classes--;
603 splx(s);
604
605 actlist_destroy(cl->cl_actc);
606
607 if (cl->cl_red != NULL) {
608 #ifdef ALTQ_RIO
609 if (q_is_rio(cl->cl_q))
610 rio_destroy((rio_t *)cl->cl_red);
611 #endif
612 #ifdef ALTQ_RED
613 if (q_is_red(cl->cl_q))
614 red_destroy(cl->cl_red);
615 #endif
616 }
617
618 if (cl == cl->cl_hif->hif_rootclass)
619 cl->cl_hif->hif_rootclass = NULL;
620 if (cl == cl->cl_hif->hif_defaultclass)
621 cl->cl_hif->hif_defaultclass = NULL;
622
623 if (cl->cl_usc != NULL)
624 free(cl->cl_usc, M_DEVBUF);
625 if (cl->cl_fsc != NULL)
626 free(cl->cl_fsc, M_DEVBUF);
627 if (cl->cl_rsc != NULL)
628 free(cl->cl_rsc, M_DEVBUF);
629 free(cl->cl_q, M_DEVBUF);
630 free(cl, M_DEVBUF);
631
632 return (0);
633 }
634
635 /*
636 * hfsc_nextclass returns the next class in the tree.
637 * usage:
638 * for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl))
639 * do_something;
640 */
641 static struct hfsc_class *
642 hfsc_nextclass(struct hfsc_class *cl)
643 {
644 if (cl->cl_children != NULL)
645 cl = cl->cl_children;
646 else if (cl->cl_siblings != NULL)
647 cl = cl->cl_siblings;
648 else {
649 while ((cl = cl->cl_parent) != NULL)
650 if (cl->cl_siblings) {
651 cl = cl->cl_siblings;
652 break;
653 }
654 }
655
656 return (cl);
657 }
658
659 /*
660 * hfsc_enqueue is an enqueue function to be registered to
661 * (*altq_enqueue) in struct ifaltq.
662 */
663 static int
664 hfsc_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr)
665 {
666 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
667 struct hfsc_class *cl;
668 struct m_tag *t;
669 int len;
670
671 /* grab class set by classifier */
672 if ((m->m_flags & M_PKTHDR) == 0) {
673 /* should not happen */
674 printf("altq: packet for %s does not have pkthdr\n",
675 ifq->altq_ifp->if_xname);
676 m_freem(m);
677 return (ENOBUFS);
678 }
679 cl = NULL;
680 if ((t = m_tag_find(m, PACKET_TAG_PF_QID, NULL)) != NULL)
681 cl = clh_to_clp(hif, ((struct altq_tag *)(t+1))->qid);
682 #ifdef ALTQ3_COMPAT
683 else if ((ifq->altq_flags & ALTQF_CLASSIFY) && pktattr != NULL)
684 cl = pktattr->pattr_class;
685 #endif
686 if (cl == NULL || is_a_parent_class(cl)) {
687 cl = hif->hif_defaultclass;
688 if (cl == NULL) {
689 m_freem(m);
690 return (ENOBUFS);
691 }
692 }
693 #ifdef ALTQ3_COMPAT
694 if (pktattr != NULL)
695 cl->cl_pktattr = pktattr; /* save proto hdr used by ECN */
696 else
697 #endif
698 cl->cl_pktattr = NULL;
699 len = m_pktlen(m);
700 if (hfsc_addq(cl, m) != 0) {
701 /* drop occurred. mbuf was freed in hfsc_addq. */
702 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, len);
703 return (ENOBUFS);
704 }
705 IFQ_INC_LEN(ifq);
706 cl->cl_hif->hif_packets++;
707
708 /* successfully queued. */
709 if (qlen(cl->cl_q) == 1)
710 set_active(cl, m_pktlen(m));
711
712 return (0);
713 }
714
715 /*
716 * hfsc_dequeue is a dequeue function to be registered to
717 * (*altq_dequeue) in struct ifaltq.
718 *
719 * note: ALTDQ_POLL returns the next packet without removing the packet
720 * from the queue. ALTDQ_REMOVE is a normal dequeue operation.
721 * ALTDQ_REMOVE must return the same packet if called immediately
722 * after ALTDQ_POLL.
723 */
724 static struct mbuf *
725 hfsc_dequeue(struct ifaltq *ifq, int op)
726 {
727 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
728 struct hfsc_class *cl;
729 struct mbuf *m;
730 int len, next_len;
731 int realtime = 0;
732 u_int64_t cur_time;
733
734 if (hif->hif_packets == 0)
735 /* no packet in the tree */
736 return (NULL);
737
738 cur_time = read_machclk();
739
740 if (op == ALTDQ_REMOVE && hif->hif_pollcache != NULL) {
741
742 cl = hif->hif_pollcache;
743 hif->hif_pollcache = NULL;
744 /* check if the class was scheduled by real-time criteria */
745 if (cl->cl_rsc != NULL)
746 realtime = (cl->cl_e <= cur_time);
747 } else {
748 /*
749 * if there are eligible classes, use real-time criteria.
750 * find the class with the minimum deadline among
751 * the eligible classes.
752 */
753 if ((cl = ellist_get_mindl(hif->hif_eligible, cur_time))
754 != NULL) {
755 realtime = 1;
756 } else {
757 #ifdef ALTQ_DEBUG
758 int fits = 0;
759 #endif
760 /*
761 * use link-sharing criteria
762 * get the class with the minimum vt in the hierarchy
763 */
764 cl = hif->hif_rootclass;
765 while (is_a_parent_class(cl)) {
766
767 cl = actlist_firstfit(cl, cur_time);
768 if (cl == NULL) {
769 #ifdef ALTQ_DEBUG
770 if (fits > 0)
771 printf("%d fit but none found\n",fits);
772 #endif
773 return (NULL);
774 }
775 /*
776 * update parent's cl_cvtmin.
777 * don't update if the new vt is smaller.
778 */
779 if (cl->cl_parent->cl_cvtmin < cl->cl_vt)
780 cl->cl_parent->cl_cvtmin = cl->cl_vt;
781 #ifdef ALTQ_DEBUG
782 fits++;
783 #endif
784 }
785 }
786
787 if (op == ALTDQ_POLL) {
788 hif->hif_pollcache = cl;
789 m = hfsc_pollq(cl);
790 return (m);
791 }
792 }
793
794 m = hfsc_getq(cl);
795 if (m == NULL)
796 panic("hfsc_dequeue:");
797 len = m_pktlen(m);
798 cl->cl_hif->hif_packets--;
799 IFQ_DEC_LEN(ifq);
800 PKTCNTR_ADD(&cl->cl_stats.xmit_cnt, len);
801
802 update_vf(cl, len, cur_time);
803 if (realtime)
804 cl->cl_cumul += len;
805
806 if (!qempty(cl->cl_q)) {
807 if (cl->cl_rsc != NULL) {
808 /* update ed */
809 next_len = m_pktlen(qhead(cl->cl_q));
810
811 if (realtime)
812 update_ed(cl, next_len);
813 else
814 update_d(cl, next_len);
815 }
816 } else {
817 /* the class becomes passive */
818 set_passive(cl);
819 }
820
821 return (m);
822 }
823
824 static int
825 hfsc_addq(struct hfsc_class *cl, struct mbuf *m)
826 {
827
828 #ifdef ALTQ_RIO
829 if (q_is_rio(cl->cl_q))
830 return rio_addq((rio_t *)cl->cl_red, cl->cl_q,
831 m, cl->cl_pktattr);
832 #endif
833 #ifdef ALTQ_RED
834 if (q_is_red(cl->cl_q))
835 return red_addq(cl->cl_red, cl->cl_q, m, cl->cl_pktattr);
836 #endif
837 if (qlen(cl->cl_q) >= qlimit(cl->cl_q)) {
838 m_freem(m);
839 return (-1);
840 }
841
842 if (cl->cl_flags & HFCF_CLEARDSCP)
843 write_dsfield(m, cl->cl_pktattr, 0);
844
845 _addq(cl->cl_q, m);
846
847 return (0);
848 }
849
850 static struct mbuf *
851 hfsc_getq(struct hfsc_class *cl)
852 {
853 #ifdef ALTQ_RIO
854 if (q_is_rio(cl->cl_q))
855 return rio_getq((rio_t *)cl->cl_red, cl->cl_q);
856 #endif
857 #ifdef ALTQ_RED
858 if (q_is_red(cl->cl_q))
859 return red_getq(cl->cl_red, cl->cl_q);
860 #endif
861 return _getq(cl->cl_q);
862 }
863
864 static struct mbuf *
865 hfsc_pollq(struct hfsc_class *cl)
866 {
867 return qhead(cl->cl_q);
868 }
869
870 static void
871 hfsc_purgeq(struct hfsc_class *cl)
872 {
873 struct mbuf *m;
874
875 if (qempty(cl->cl_q))
876 return;
877
878 while ((m = _getq(cl->cl_q)) != NULL) {
879 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, m_pktlen(m));
880 m_freem(m);
881 cl->cl_hif->hif_packets--;
882 IFQ_DEC_LEN(cl->cl_hif->hif_ifq);
883 }
884 ASSERT(qlen(cl->cl_q) == 0);
885
886 update_vf(cl, 0, 0); /* remove cl from the actlist */
887 set_passive(cl);
888 }
889
890 static void
891 set_active(struct hfsc_class *cl, int len)
892 {
893 if (cl->cl_rsc != NULL)
894 init_ed(cl, len);
895 if (cl->cl_fsc != NULL)
896 init_vf(cl, len);
897
898 cl->cl_stats.period++;
899 }
900
901 static void
902 set_passive(struct hfsc_class *cl)
903 {
904 if (cl->cl_rsc != NULL)
905 ellist_remove(cl);
906
907 /*
908 * actlist is now handled in update_vf() so that update_vf(cl, 0, 0)
909 * needs to be called explicitly to remove a class from actlist
910 */
911 }
912
913 static void
914 init_ed(struct hfsc_class *cl, int next_len)
915 {
916 u_int64_t cur_time;
917
918 cur_time = read_machclk();
919
920 /* update the deadline curve */
921 rtsc_min(&cl->cl_deadline, cl->cl_rsc, cur_time, cl->cl_cumul);
922
923 /*
924 * update the eligible curve.
925 * for concave, it is equal to the deadline curve.
926 * for convex, it is a linear curve with slope m2.
927 */
928 cl->cl_eligible = cl->cl_deadline;
929 if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) {
930 cl->cl_eligible.dx = 0;
931 cl->cl_eligible.dy = 0;
932 }
933
934 /* compute e and d */
935 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
936 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
937
938 ellist_insert(cl);
939 }
940
941 static void
942 update_ed(struct hfsc_class *cl, int next_len)
943 {
944 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
945 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
946
947 ellist_update(cl);
948 }
949
950 static void
951 update_d(struct hfsc_class *cl, int next_len)
952 {
953 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
954 }
955
956 static void
957 init_vf(struct hfsc_class *cl, int len __unused)
958 {
959 struct hfsc_class *max_cl, *p;
960 u_int64_t vt, f, cur_time;
961 int go_active;
962
963 cur_time = 0;
964 go_active = 1;
965 for ( ; cl->cl_parent != NULL; cl = cl->cl_parent) {
966
967 if (go_active && cl->cl_nactive++ == 0)
968 go_active = 1;
969 else
970 go_active = 0;
971
972 if (go_active) {
973 max_cl = actlist_last(cl->cl_parent->cl_actc);
974 if (max_cl != NULL) {
975 /*
976 * set vt to the average of the min and max
977 * classes. if the parent's period didn't
978 * change, don't decrease vt of the class.
979 */
980 vt = max_cl->cl_vt;
981 if (cl->cl_parent->cl_cvtmin != 0)
982 vt = (cl->cl_parent->cl_cvtmin + vt)/2;
983
984 if (cl->cl_parent->cl_vtperiod !=
985 cl->cl_parentperiod || vt > cl->cl_vt)
986 cl->cl_vt = vt;
987 } else {
988 /*
989 * first child for a new parent backlog period.
990 * add parent's cvtmax to vtoff of children
991 * to make a new vt (vtoff + vt) larger than
992 * the vt in the last period for all children.
993 */
994 vt = cl->cl_parent->cl_cvtmax;
995 for (p = cl->cl_parent->cl_children; p != NULL;
996 p = p->cl_siblings)
997 p->cl_vtoff += vt;
998 cl->cl_vt = 0;
999 cl->cl_parent->cl_cvtmax = 0;
1000 cl->cl_parent->cl_cvtmin = 0;
1001 }
1002 cl->cl_initvt = cl->cl_vt;
1003
1004 /* update the virtual curve */
1005 vt = cl->cl_vt + cl->cl_vtoff;
1006 rtsc_min(&cl->cl_virtual, cl->cl_fsc, vt, cl->cl_total);
1007 if (cl->cl_virtual.x == vt) {
1008 cl->cl_virtual.x -= cl->cl_vtoff;
1009 cl->cl_vtoff = 0;
1010 }
1011 cl->cl_vtadj = 0;
1012
1013 cl->cl_vtperiod++; /* increment vt period */
1014 cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
1015 if (cl->cl_parent->cl_nactive == 0)
1016 cl->cl_parentperiod++;
1017 cl->cl_f = 0;
1018
1019 actlist_insert(cl);
1020
1021 if (cl->cl_usc != NULL) {
1022 /* class has upper limit curve */
1023 if (cur_time == 0)
1024 cur_time = read_machclk();
1025
1026 /* update the ulimit curve */
1027 rtsc_min(&cl->cl_ulimit, cl->cl_usc, cur_time,
1028 cl->cl_total);
1029 /* compute myf */
1030 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,
1031 cl->cl_total);
1032 cl->cl_myfadj = 0;
1033 }
1034 }
1035
1036 if (cl->cl_myf > cl->cl_cfmin)
1037 f = cl->cl_myf;
1038 else
1039 f = cl->cl_cfmin;
1040 if (f != cl->cl_f) {
1041 cl->cl_f = f;
1042 update_cfmin(cl->cl_parent);
1043 }
1044 }
1045 }
1046
1047 static void
1048 update_vf(struct hfsc_class *cl, int len, u_int64_t cur_time)
1049 {
1050 u_int64_t f, myf_bound, delta;
1051 int go_passive;
1052
1053 go_passive = qempty(cl->cl_q);
1054
1055 for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
1056
1057 cl->cl_total += len;
1058
1059 if (cl->cl_fsc == NULL || cl->cl_nactive == 0)
1060 continue;
1061
1062 if (go_passive && --cl->cl_nactive == 0)
1063 go_passive = 1;
1064 else
1065 go_passive = 0;
1066
1067 if (go_passive) {
1068 /* no more active child, going passive */
1069
1070 /* update cvtmax of the parent class */
1071 if (cl->cl_vt > cl->cl_parent->cl_cvtmax)
1072 cl->cl_parent->cl_cvtmax = cl->cl_vt;
1073
1074 /* remove this class from the vt list */
1075 actlist_remove(cl);
1076
1077 update_cfmin(cl->cl_parent);
1078
1079 continue;
1080 }
1081
1082 /*
1083 * update vt and f
1084 */
1085 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total)
1086 - cl->cl_vtoff + cl->cl_vtadj;
1087
1088 /*
1089 * if vt of the class is smaller than cvtmin,
1090 * the class was skipped in the past due to non-fit.
1091 * if so, we need to adjust vtadj.
1092 */
1093 if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
1094 cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
1095 cl->cl_vt = cl->cl_parent->cl_cvtmin;
1096 }
1097
1098 /* update the vt list */
1099 actlist_update(cl);
1100
1101 if (cl->cl_usc != NULL) {
1102 cl->cl_myf = cl->cl_myfadj
1103 + rtsc_y2x(&cl->cl_ulimit, cl->cl_total);
1104
1105 /*
1106 * if myf lags behind by more than one clock tick
1107 * from the current time, adjust myfadj to prevent
1108 * a rate-limited class from going greedy.
1109 * in a steady state under rate-limiting, myf
1110 * fluctuates within one clock tick.
1111 */
1112 myf_bound = cur_time - machclk_per_tick;
1113 if (cl->cl_myf < myf_bound) {
1114 delta = cur_time - cl->cl_myf;
1115 cl->cl_myfadj += delta;
1116 cl->cl_myf += delta;
1117 }
1118 }
1119
1120 /* cl_f is max(cl_myf, cl_cfmin) */
1121 if (cl->cl_myf > cl->cl_cfmin)
1122 f = cl->cl_myf;
1123 else
1124 f = cl->cl_cfmin;
1125 if (f != cl->cl_f) {
1126 cl->cl_f = f;
1127 update_cfmin(cl->cl_parent);
1128 }
1129 }
1130 }
1131
1132 static void
1133 update_cfmin(struct hfsc_class *cl)
1134 {
1135 struct hfsc_class *p;
1136 u_int64_t cfmin;
1137
1138 if (TAILQ_EMPTY(cl->cl_actc)) {
1139 cl->cl_cfmin = 0;
1140 return;
1141 }
1142 cfmin = HT_INFINITY;
1143 TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) {
1144 if (p->cl_f == 0) {
1145 cl->cl_cfmin = 0;
1146 return;
1147 }
1148 if (p->cl_f < cfmin)
1149 cfmin = p->cl_f;
1150 }
1151 cl->cl_cfmin = cfmin;
1152 }
1153
1154 /*
1155 * TAILQ based ellist and actlist implementation
1156 * (ion wanted to make a calendar queue based implementation)
1157 */
1158 /*
1159 * eligible list holds backlogged classes being sorted by their eligible times.
1160 * there is one eligible list per interface.
1161 */
1162
1163 static ellist_t *
1164 ellist_alloc(void)
1165 {
1166 ellist_t *head;
1167
1168 head = malloc(sizeof(ellist_t), M_DEVBUF, M_WAITOK);
1169 TAILQ_INIT(head);
1170 return (head);
1171 }
1172
1173 static void
1174 ellist_destroy(ellist_t *head)
1175 {
1176 free(head, M_DEVBUF);
1177 }
1178
1179 static void
1180 ellist_insert(struct hfsc_class *cl)
1181 {
1182 struct hfsc_if *hif = cl->cl_hif;
1183 struct hfsc_class *p;
1184
1185 /* check the last entry first */
1186 if ((p = TAILQ_LAST(hif->hif_eligible, _eligible)) == NULL ||
1187 p->cl_e <= cl->cl_e) {
1188 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist);
1189 return;
1190 }
1191
1192 TAILQ_FOREACH(p, hif->hif_eligible, cl_ellist) {
1193 if (cl->cl_e < p->cl_e) {
1194 TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
1195 return;
1196 }
1197 }
1198 ASSERT(0); /* should not reach here */
1199 }
1200
1201 static void
1202 ellist_remove(struct hfsc_class *cl)
1203 {
1204 struct hfsc_if *hif = cl->cl_hif;
1205
1206 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
1207 }
1208
1209 static void
1210 ellist_update(struct hfsc_class *cl)
1211 {
1212 struct hfsc_if *hif = cl->cl_hif;
1213 struct hfsc_class *p, *last;
1214
1215 /*
1216 * the eligible time of a class increases monotonically.
1217 * if the next entry has a larger eligible time, nothing to do.
1218 */
1219 p = TAILQ_NEXT(cl, cl_ellist);
1220 if (p == NULL || cl->cl_e <= p->cl_e)
1221 return;
1222
1223 /* check the last entry */
1224 last = TAILQ_LAST(hif->hif_eligible, _eligible);
1225 ASSERT(last != NULL);
1226 if (last->cl_e <= cl->cl_e) {
1227 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
1228 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist);
1229 return;
1230 }
1231
1232 /*
1233 * the new position must be between the next entry
1234 * and the last entry
1235 */
1236 while ((p = TAILQ_NEXT(p, cl_ellist)) != NULL) {
1237 if (cl->cl_e < p->cl_e) {
1238 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
1239 TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
1240 return;
1241 }
1242 }
1243 ASSERT(0); /* should not reach here */
1244 }
1245
1246 /* find the class with the minimum deadline among the eligible classes */
1247 struct hfsc_class *
1248 ellist_get_mindl(ellist_t *head, u_int64_t cur_time)
1249 {
1250 struct hfsc_class *p, *cl = NULL;
1251
1252 TAILQ_FOREACH(p, head, cl_ellist) {
1253 if (p->cl_e > cur_time)
1254 break;
1255 if (cl == NULL || p->cl_d < cl->cl_d)
1256 cl = p;
1257 }
1258 return (cl);
1259 }
1260
1261 /*
1262 * active children list holds backlogged child classes being sorted
1263 * by their virtual time.
1264 * each intermediate class has one active children list.
1265 */
1266 static actlist_t *
1267 actlist_alloc(void)
1268 {
1269 actlist_t *head;
1270
1271 head = malloc(sizeof(actlist_t), M_DEVBUF, M_WAITOK);
1272 TAILQ_INIT(head);
1273 return (head);
1274 }
1275
1276 static void
1277 actlist_destroy(actlist_t *head)
1278 {
1279 free(head, M_DEVBUF);
1280 }
1281 static void
1282 actlist_insert(struct hfsc_class *cl)
1283 {
1284 struct hfsc_class *p;
1285
1286 /* check the last entry first */
1287 if ((p = TAILQ_LAST(cl->cl_parent->cl_actc, _active)) == NULL
1288 || p->cl_vt <= cl->cl_vt) {
1289 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist);
1290 return;
1291 }
1292
1293 TAILQ_FOREACH(p, cl->cl_parent->cl_actc, cl_actlist) {
1294 if (cl->cl_vt < p->cl_vt) {
1295 TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
1296 return;
1297 }
1298 }
1299 ASSERT(0); /* should not reach here */
1300 }
1301
1302 static void
1303 actlist_remove(struct hfsc_class *cl)
1304 {
1305 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1306 }
1307
1308 static void
1309 actlist_update(struct hfsc_class *cl)
1310 {
1311 struct hfsc_class *p, *last;
1312
1313 /*
1314 * the virtual time of a class increases monotonically during its
1315 * backlogged period.
1316 * if the next entry has a larger virtual time, nothing to do.
1317 */
1318 p = TAILQ_NEXT(cl, cl_actlist);
1319 if (p == NULL || cl->cl_vt < p->cl_vt)
1320 return;
1321
1322 /* check the last entry */
1323 last = TAILQ_LAST(cl->cl_parent->cl_actc, _active);
1324 ASSERT(last != NULL);
1325 if (last->cl_vt <= cl->cl_vt) {
1326 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1327 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist);
1328 return;
1329 }
1330
1331 /*
1332 * the new position must be between the next entry
1333 * and the last entry
1334 */
1335 while ((p = TAILQ_NEXT(p, cl_actlist)) != NULL) {
1336 if (cl->cl_vt < p->cl_vt) {
1337 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1338 TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
1339 return;
1340 }
1341 }
1342 ASSERT(0); /* should not reach here */
1343 }
1344
1345 static struct hfsc_class *
1346 actlist_firstfit(struct hfsc_class *cl, u_int64_t cur_time)
1347 {
1348 struct hfsc_class *p;
1349
1350 TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) {
1351 if (p->cl_f <= cur_time)
1352 return (p);
1353 }
1354 return (NULL);
1355 }
1356
1357 /*
1358 * service curve support functions
1359 *
1360 * external service curve parameters
1361 * m: bits/sec
1362 * d: msec
1363 * internal service curve parameters
1364 * sm: (bytes/tsc_interval) << SM_SHIFT
1365 * ism: (tsc_count/byte) << ISM_SHIFT
1366 * dx: tsc_count
1367 *
1368 * SM_SHIFT and ISM_SHIFT are scaled in order to keep effective digits.
1369 * we should be able to handle 100K-1Gbps linkspeed with 200Hz-1GHz CPU
1370 * speed. SM_SHIFT and ISM_SHIFT are selected to have at least 3 effective
1371 * digits in decimal using the following table.
1372 *
1373 * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps
1374 * ----------+-------------------------------------------------------
1375 * bytes/nsec 12.5e-6 125e-6 1250e-6 12500e-6 125000e-6
1376 * sm(500MHz) 25.0e-6 250e-6 2500e-6 25000e-6 250000e-6
1377 * sm(200MHz) 62.5e-6 625e-6 6250e-6 62500e-6 625000e-6
1378 *
1379 * nsec/byte 80000 8000 800 80 8
1380 * ism(500MHz) 40000 4000 400 40 4
1381 * ism(200MHz) 16000 1600 160 16 1.6
1382 */
1383 #define SM_SHIFT 24
1384 #define ISM_SHIFT 10
1385
1386 #define SM_MASK ((1LL << SM_SHIFT) - 1)
1387 #define ISM_MASK ((1LL << ISM_SHIFT) - 1)
1388
1389 static inline u_int64_t
1390 seg_x2y(u_int64_t x, u_int64_t sm)
1391 {
1392 u_int64_t y;
1393
1394 /*
1395 * compute
1396 * y = x * sm >> SM_SHIFT
1397 * but divide it for the upper and lower bits to avoid overflow
1398 */
1399 y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT);
1400 return (y);
1401 }
1402
1403 static inline u_int64_t
1404 seg_y2x(u_int64_t y, u_int64_t ism)
1405 {
1406 u_int64_t x;
1407
1408 if (y == 0)
1409 x = 0;
1410 else if (ism == HT_INFINITY)
1411 x = HT_INFINITY;
1412 else {
1413 x = (y >> ISM_SHIFT) * ism
1414 + (((y & ISM_MASK) * ism) >> ISM_SHIFT);
1415 }
1416 return (x);
1417 }
1418
1419 static inline u_int64_t
1420 m2sm(u_int m)
1421 {
1422 u_int64_t sm;
1423
1424 sm = ((u_int64_t)m << SM_SHIFT) / 8 / machclk_freq;
1425 return (sm);
1426 }
1427
1428 static inline u_int64_t
1429 m2ism(u_int m)
1430 {
1431 u_int64_t ism;
1432
1433 if (m == 0)
1434 ism = HT_INFINITY;
1435 else
1436 ism = ((u_int64_t)machclk_freq << ISM_SHIFT) * 8 / m;
1437 return (ism);
1438 }
1439
1440 static inline u_int64_t
1441 d2dx(u_int d)
1442 {
1443 u_int64_t dx;
1444
1445 dx = ((u_int64_t)d * machclk_freq) / 1000;
1446 return (dx);
1447 }
1448
1449 static u_int
1450 sm2m(u_int64_t sm)
1451 {
1452 u_int64_t m;
1453
1454 m = (sm * 8 * machclk_freq) >> SM_SHIFT;
1455 return ((u_int)m);
1456 }
1457
1458 static u_int
1459 dx2d(u_int64_t dx)
1460 {
1461 u_int64_t d;
1462
1463 d = dx * 1000 / machclk_freq;
1464 return ((u_int)d);
1465 }
1466
1467 static void
1468 sc2isc(struct service_curve *sc, struct internal_sc *isc)
1469 {
1470 isc->sm1 = m2sm(sc->m1);
1471 isc->ism1 = m2ism(sc->m1);
1472 isc->dx = d2dx(sc->d);
1473 isc->dy = seg_x2y(isc->dx, isc->sm1);
1474 isc->sm2 = m2sm(sc->m2);
1475 isc->ism2 = m2ism(sc->m2);
1476 }
1477
1478 /*
1479 * initialize the runtime service curve with the given internal
1480 * service curve starting at (x, y).
1481 */
1482 static void
1483 rtsc_init(struct runtime_sc *rtsc, struct internal_sc * isc, u_int64_t x,
1484 u_int64_t y)
1485 {
1486 rtsc->x = x;
1487 rtsc->y = y;
1488 rtsc->sm1 = isc->sm1;
1489 rtsc->ism1 = isc->ism1;
1490 rtsc->dx = isc->dx;
1491 rtsc->dy = isc->dy;
1492 rtsc->sm2 = isc->sm2;
1493 rtsc->ism2 = isc->ism2;
1494 }
1495
1496 /*
1497 * calculate the y-projection of the runtime service curve by the
1498 * given x-projection value
1499 */
1500 static u_int64_t
1501 rtsc_y2x(struct runtime_sc *rtsc, u_int64_t y)
1502 {
1503 u_int64_t x;
1504
1505 if (y < rtsc->y)
1506 x = rtsc->x;
1507 else if (y <= rtsc->y + rtsc->dy) {
1508 /* x belongs to the 1st segment */
1509 if (rtsc->dy == 0)
1510 x = rtsc->x + rtsc->dx;
1511 else
1512 x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
1513 } else {
1514 /* x belongs to the 2nd segment */
1515 x = rtsc->x + rtsc->dx
1516 + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
1517 }
1518 return (x);
1519 }
1520
1521 static u_int64_t
1522 rtsc_x2y(struct runtime_sc *rtsc, u_int64_t x)
1523 {
1524 u_int64_t y;
1525
1526 if (x <= rtsc->x)
1527 y = rtsc->y;
1528 else if (x <= rtsc->x + rtsc->dx)
1529 /* y belongs to the 1st segment */
1530 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
1531 else
1532 /* y belongs to the 2nd segment */
1533 y = rtsc->y + rtsc->dy
1534 + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
1535 return (y);
1536 }
1537
1538 /*
1539 * update the runtime service curve by taking the minimum of the current
1540 * runtime service curve and the service curve starting at (x, y).
1541 */
1542 static void
1543 rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u_int64_t x,
1544 u_int64_t y)
1545 {
1546 u_int64_t y1, y2, dx, dy;
1547
1548 if (isc->sm1 <= isc->sm2) {
1549 /* service curve is convex */
1550 y1 = rtsc_x2y(rtsc, x);
1551 if (y1 < y)
1552 /* the current rtsc is smaller */
1553 return;
1554 rtsc->x = x;
1555 rtsc->y = y;
1556 return;
1557 }
1558
1559 /*
1560 * service curve is concave
1561 * compute the two y values of the current rtsc
1562 * y1: at x
1563 * y2: at (x + dx)
1564 */
1565 y1 = rtsc_x2y(rtsc, x);
1566 if (y1 <= y) {
1567 /* rtsc is below isc, no change to rtsc */
1568 return;
1569 }
1570
1571 y2 = rtsc_x2y(rtsc, x + isc->dx);
1572 if (y2 >= y + isc->dy) {
1573 /* rtsc is above isc, replace rtsc by isc */
1574 rtsc->x = x;
1575 rtsc->y = y;
1576 rtsc->dx = isc->dx;
1577 rtsc->dy = isc->dy;
1578 return;
1579 }
1580
1581 /*
1582 * the two curves intersect
1583 * compute the offsets (dx, dy) using the reverse
1584 * function of seg_x2y()
1585 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
1586 */
1587 dx = ((y1 - y) << SM_SHIFT) / (isc->sm1 - isc->sm2);
1588 /*
1589 * check if (x, y1) belongs to the 1st segment of rtsc.
1590 * if so, add the offset.
1591 */
1592 if (rtsc->x + rtsc->dx > x)
1593 dx += rtsc->x + rtsc->dx - x;
1594 dy = seg_x2y(dx, isc->sm1);
1595
1596 rtsc->x = x;
1597 rtsc->y = y;
1598 rtsc->dx = dx;
1599 rtsc->dy = dy;
1600 return;
1601 }
1602
1603 static void
1604 get_class_stats(struct hfsc_classstats *sp, struct hfsc_class *cl)
1605 {
1606 sp->class_id = cl->cl_id;
1607 sp->class_handle = cl->cl_handle;
1608
1609 if (cl->cl_rsc != NULL) {
1610 sp->rsc.m1 = sm2m(cl->cl_rsc->sm1);
1611 sp->rsc.d = dx2d(cl->cl_rsc->dx);
1612 sp->rsc.m2 = sm2m(cl->cl_rsc->sm2);
1613 } else {
1614 sp->rsc.m1 = 0;
1615 sp->rsc.d = 0;
1616 sp->rsc.m2 = 0;
1617 }
1618 if (cl->cl_fsc != NULL) {
1619 sp->fsc.m1 = sm2m(cl->cl_fsc->sm1);
1620 sp->fsc.d = dx2d(cl->cl_fsc->dx);
1621 sp->fsc.m2 = sm2m(cl->cl_fsc->sm2);
1622 } else {
1623 sp->fsc.m1 = 0;
1624 sp->fsc.d = 0;
1625 sp->fsc.m2 = 0;
1626 }
1627 if (cl->cl_usc != NULL) {
1628 sp->usc.m1 = sm2m(cl->cl_usc->sm1);
1629 sp->usc.d = dx2d(cl->cl_usc->dx);
1630 sp->usc.m2 = sm2m(cl->cl_usc->sm2);
1631 } else {
1632 sp->usc.m1 = 0;
1633 sp->usc.d = 0;
1634 sp->usc.m2 = 0;
1635 }
1636
1637 sp->total = cl->cl_total;
1638 sp->cumul = cl->cl_cumul;
1639
1640 sp->d = cl->cl_d;
1641 sp->e = cl->cl_e;
1642 sp->vt = cl->cl_vt;
1643 sp->f = cl->cl_f;
1644
1645 sp->initvt = cl->cl_initvt;
1646 sp->vtperiod = cl->cl_vtperiod;
1647 sp->parentperiod = cl->cl_parentperiod;
1648 sp->nactive = cl->cl_nactive;
1649 sp->vtoff = cl->cl_vtoff;
1650 sp->cvtmax = cl->cl_cvtmax;
1651 sp->myf = cl->cl_myf;
1652 sp->cfmin = cl->cl_cfmin;
1653 sp->cvtmin = cl->cl_cvtmin;
1654 sp->myfadj = cl->cl_myfadj;
1655 sp->vtadj = cl->cl_vtadj;
1656
1657 sp->cur_time = read_machclk();
1658 sp->machclk_freq = machclk_freq;
1659
1660 sp->qlength = qlen(cl->cl_q);
1661 sp->qlimit = qlimit(cl->cl_q);
1662 sp->xmit_cnt = cl->cl_stats.xmit_cnt;
1663 sp->drop_cnt = cl->cl_stats.drop_cnt;
1664 sp->period = cl->cl_stats.period;
1665
1666 sp->qtype = qtype(cl->cl_q);
1667 #ifdef ALTQ_RED
1668 if (q_is_red(cl->cl_q))
1669 red_getstats(cl->cl_red, &sp->red[0]);
1670 #endif
1671 #ifdef ALTQ_RIO
1672 if (q_is_rio(cl->cl_q))
1673 rio_getstats((rio_t *)cl->cl_red, &sp->red[0]);
1674 #endif
1675 }
1676
1677 /* convert a class handle to the corresponding class pointer */
1678 static struct hfsc_class *
1679 clh_to_clp(struct hfsc_if *hif, u_int32_t chandle)
1680 {
1681 int i;
1682 struct hfsc_class *cl;
1683
1684 if (chandle == 0)
1685 return (NULL);
1686 /*
1687 * first, try optimistically the slot matching the lower bits of
1688 * the handle. if it fails, do the linear table search.
1689 */
1690 i = chandle % HFSC_MAX_CLASSES;
1691 if ((cl = hif->hif_class_tbl[i]) != NULL && cl->cl_handle == chandle)
1692 return (cl);
1693 for (i = 0; i < HFSC_MAX_CLASSES; i++)
1694 if ((cl = hif->hif_class_tbl[i]) != NULL &&
1695 cl->cl_handle == chandle)
1696 return (cl);
1697 return (NULL);
1698 }
1699
1700 #ifdef ALTQ3_COMPAT
1701 static struct hfsc_if *
1702 hfsc_attach(struct ifaltq *ifq, u_int bandwidth __unused)
1703 {
1704 struct hfsc_if *hif;
1705
1706 hif = malloc(sizeof(struct hfsc_if), M_DEVBUF, M_WAITOK|M_ZERO);
1707 if (hif == NULL)
1708 return (NULL);
1709
1710 hif->hif_eligible = ellist_alloc();
1711 if (hif->hif_eligible == NULL) {
1712 free(hif, M_DEVBUF);
1713 return NULL;
1714 }
1715
1716 hif->hif_ifq = ifq;
1717
1718 /* add this state to the hfsc list */
1719 hif->hif_next = hif_list;
1720 hif_list = hif;
1721
1722 return (hif);
1723 }
1724
1725 static int
1726 hfsc_detach(struct hfsc_if *hif)
1727 {
1728 (void)hfsc_clear_interface(hif);
1729 (void)hfsc_class_destroy(hif->hif_rootclass);
1730
1731 /* remove this interface from the hif list */
1732 if (hif_list == hif)
1733 hif_list = hif->hif_next;
1734 else {
1735 struct hfsc_if *h;
1736
1737 for (h = hif_list; h != NULL; h = h->hif_next)
1738 if (h->hif_next == hif) {
1739 h->hif_next = hif->hif_next;
1740 break;
1741 }
1742 ASSERT(h != NULL);
1743 }
1744
1745 ellist_destroy(hif->hif_eligible);
1746
1747 free(hif, M_DEVBUF);
1748
1749 return (0);
1750 }
1751
1752 static int
1753 hfsc_class_modify(struct hfsc_class *cl, struct service_curve *rsc,
1754 struct service_curve *fsc, struct service_curve *usc)
1755 {
1756 struct internal_sc *rsc_tmp, *fsc_tmp, *usc_tmp;
1757 u_int64_t cur_time;
1758 int s;
1759
1760 rsc_tmp = fsc_tmp = usc_tmp = NULL;
1761 if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0) &&
1762 cl->cl_rsc == NULL) {
1763 rsc_tmp = malloc(sizeof(struct internal_sc), M_DEVBUF,
1764 M_WAITOK);
1765 if (rsc_tmp == NULL)
1766 return (ENOMEM);
1767 }
1768 if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0) &&
1769 cl->cl_fsc == NULL) {
1770 fsc_tmp = malloc(sizeof(struct internal_sc), M_DEVBUF,
1771 M_WAITOK);
1772 if (fsc_tmp == NULL)
1773 return (ENOMEM);
1774 }
1775 if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0) &&
1776 cl->cl_usc == NULL) {
1777 usc_tmp = malloc(sizeof(struct internal_sc), M_DEVBUF,
1778 M_WAITOK);
1779 if (usc_tmp == NULL)
1780 return (ENOMEM);
1781 }
1782
1783 cur_time = read_machclk();
1784 s = splnet();
1785
1786 if (rsc != NULL) {
1787 if (rsc->m1 == 0 && rsc->m2 == 0) {
1788 if (cl->cl_rsc != NULL) {
1789 if (!qempty(cl->cl_q))
1790 hfsc_purgeq(cl);
1791 free(cl->cl_rsc, M_DEVBUF);
1792 cl->cl_rsc = NULL;
1793 }
1794 } else {
1795 if (cl->cl_rsc == NULL)
1796 cl->cl_rsc = rsc_tmp;
1797 sc2isc(rsc, cl->cl_rsc);
1798 rtsc_init(&cl->cl_deadline, cl->cl_rsc, cur_time,
1799 cl->cl_cumul);
1800 cl->cl_eligible = cl->cl_deadline;
1801 if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) {
1802 cl->cl_eligible.dx = 0;
1803 cl->cl_eligible.dy = 0;
1804 }
1805 }
1806 }
1807
1808 if (fsc != NULL) {
1809 if (fsc->m1 == 0 && fsc->m2 == 0) {
1810 if (cl->cl_fsc != NULL) {
1811 if (!qempty(cl->cl_q))
1812 hfsc_purgeq(cl);
1813 free(cl->cl_fsc, M_DEVBUF);
1814 cl->cl_fsc = NULL;
1815 }
1816 } else {
1817 if (cl->cl_fsc == NULL)
1818 cl->cl_fsc = fsc_tmp;
1819 sc2isc(fsc, cl->cl_fsc);
1820 rtsc_init(&cl->cl_virtual, cl->cl_fsc, cl->cl_vt,
1821 cl->cl_total);
1822 }
1823 }
1824
1825 if (usc != NULL) {
1826 if (usc->m1 == 0 && usc->m2 == 0) {
1827 if (cl->cl_usc != NULL) {
1828 free(cl->cl_usc, M_DEVBUF);
1829 cl->cl_usc = NULL;
1830 cl->cl_myf = 0;
1831 }
1832 } else {
1833 if (cl->cl_usc == NULL)
1834 cl->cl_usc = usc_tmp;
1835 sc2isc(usc, cl->cl_usc);
1836 rtsc_init(&cl->cl_ulimit, cl->cl_usc, cur_time,
1837 cl->cl_total);
1838 }
1839 }
1840
1841 if (!qempty(cl->cl_q)) {
1842 if (cl->cl_rsc != NULL)
1843 update_ed(cl, m_pktlen(qhead(cl->cl_q)));
1844 if (cl->cl_fsc != NULL)
1845 update_vf(cl, 0, cur_time);
1846 /* is this enough? */
1847 }
1848
1849 splx(s);
1850
1851 return (0);
1852 }
1853
1854 /*
1855 * hfsc device interface
1856 */
1857 int
1858 hfscopen(dev_t dev __unused, int flag __unused, int fmt __unused,
1859 struct lwp *l __unused)
1860 {
1861 if (machclk_freq == 0)
1862 init_machclk();
1863
1864 if (machclk_freq == 0) {
1865 printf("hfsc: no CPU clock available!\n");
1866 return (ENXIO);
1867 }
1868
1869 /* everything will be done when the queueing scheme is attached. */
1870 return 0;
1871 }
1872
1873 int
1874 hfscclose(dev_t dev __unused, int flag __unused, int fmt __unused,
1875 struct lwp *l __unused)
1876 {
1877 struct hfsc_if *hif;
1878 int err, error = 0;
1879
1880 while ((hif = hif_list) != NULL) {
1881 /* destroy all */
1882 if (ALTQ_IS_ENABLED(hif->hif_ifq))
1883 altq_disable(hif->hif_ifq);
1884
1885 err = altq_detach(hif->hif_ifq);
1886 if (err == 0)
1887 err = hfsc_detach(hif);
1888 if (err != 0 && error == 0)
1889 error = err;
1890 }
1891
1892 return error;
1893 }
1894
1895 int
1896 hfscioctl(dev_t dev __unused, ioctlcmd_t cmd, caddr_t addr, int flag __unused,
1897 struct lwp *l)
1898 {
1899 struct hfsc_if *hif;
1900 struct hfsc_interface *ifacep;
1901 int error = 0;
1902
1903 /* check super-user privilege */
1904 switch (cmd) {
1905 case HFSC_GETSTATS:
1906 break;
1907 default:
1908 #if (__FreeBSD_version > 400000)
1909 if ((error = suser(p)) != 0)
1910 return (error);
1911 #else
1912 if ((error = kauth_authorize_generic(l->l_cred,
1913 KAUTH_GENERIC_ISSUSER, &l->l_acflag)) != 0)
1914 return (error);
1915 #endif
1916 break;
1917 }
1918
1919 switch (cmd) {
1920
1921 case HFSC_IF_ATTACH:
1922 error = hfsccmd_if_attach((struct hfsc_attach *)addr);
1923 break;
1924
1925 case HFSC_IF_DETACH:
1926 error = hfsccmd_if_detach((struct hfsc_interface *)addr);
1927 break;
1928
1929 case HFSC_ENABLE:
1930 case HFSC_DISABLE:
1931 case HFSC_CLEAR_HIERARCHY:
1932 ifacep = (struct hfsc_interface *)addr;
1933 if ((hif = altq_lookup(ifacep->hfsc_ifname,
1934 ALTQT_HFSC)) == NULL) {
1935 error = EBADF;
1936 break;
1937 }
1938
1939 switch (cmd) {
1940
1941 case HFSC_ENABLE:
1942 if (hif->hif_defaultclass == NULL) {
1943 #ifdef ALTQ_DEBUG
1944 printf("hfsc: no default class\n");
1945 #endif
1946 error = EINVAL;
1947 break;
1948 }
1949 error = altq_enable(hif->hif_ifq);
1950 break;
1951
1952 case HFSC_DISABLE:
1953 error = altq_disable(hif->hif_ifq);
1954 break;
1955
1956 case HFSC_CLEAR_HIERARCHY:
1957 hfsc_clear_interface(hif);
1958 break;
1959 }
1960 break;
1961
1962 case HFSC_ADD_CLASS:
1963 error = hfsccmd_add_class((struct hfsc_add_class *)addr);
1964 break;
1965
1966 case HFSC_DEL_CLASS:
1967 error = hfsccmd_delete_class((struct hfsc_delete_class *)addr);
1968 break;
1969
1970 case HFSC_MOD_CLASS:
1971 error = hfsccmd_modify_class((struct hfsc_modify_class *)addr);
1972 break;
1973
1974 case HFSC_ADD_FILTER:
1975 error = hfsccmd_add_filter((struct hfsc_add_filter *)addr);
1976 break;
1977
1978 case HFSC_DEL_FILTER:
1979 error = hfsccmd_delete_filter((struct hfsc_delete_filter *)addr);
1980 break;
1981
1982 case HFSC_GETSTATS:
1983 error = hfsccmd_class_stats((struct hfsc_class_stats *)addr);
1984 break;
1985
1986 default:
1987 error = EINVAL;
1988 break;
1989 }
1990 return error;
1991 }
1992
1993 static int
1994 hfsccmd_if_attach(struct hfsc_attach *ap)
1995 {
1996 struct hfsc_if *hif;
1997 struct ifnet *ifp;
1998 int error;
1999
2000 if ((ifp = ifunit(ap->iface.hfsc_ifname)) == NULL)
2001 return (ENXIO);
2002
2003 if ((hif = hfsc_attach(&ifp->if_snd, ap->bandwidth)) == NULL)
2004 return (ENOMEM);
2005
2006 /*
2007 * set HFSC to this ifnet structure.
2008 */
2009 if ((error = altq_attach(&ifp->if_snd, ALTQT_HFSC, hif,
2010 hfsc_enqueue, hfsc_dequeue, hfsc_request,
2011 &hif->hif_classifier, acc_classify)) != 0)
2012 (void)hfsc_detach(hif);
2013
2014 return (error);
2015 }
2016
2017 static int
2018 hfsccmd_if_detach(struct hfsc_interface *ap)
2019 {
2020 struct hfsc_if *hif;
2021 int error;
2022
2023 if ((hif = altq_lookup(ap->hfsc_ifname, ALTQT_HFSC)) == NULL)
2024 return (EBADF);
2025
2026 if (ALTQ_IS_ENABLED(hif->hif_ifq))
2027 altq_disable(hif->hif_ifq);
2028
2029 if ((error = altq_detach(hif->hif_ifq)))
2030 return (error);
2031
2032 return hfsc_detach(hif);
2033 }
2034
2035 static int
2036 hfsccmd_add_class(struct hfsc_add_class *ap)
2037 {
2038 struct hfsc_if *hif;
2039 struct hfsc_class *cl, *parent;
2040 int i;
2041
2042 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2043 return (EBADF);
2044
2045 if (ap->parent_handle == HFSC_NULLCLASS_HANDLE &&
2046 hif->hif_rootclass == NULL)
2047 parent = NULL;
2048 else if ((parent = clh_to_clp(hif, ap->parent_handle)) == NULL)
2049 return (EINVAL);
2050
2051 /* assign a class handle (use a free slot number for now) */
2052 for (i = 1; i < HFSC_MAX_CLASSES; i++)
2053 if (hif->hif_class_tbl[i] == NULL)
2054 break;
2055 if (i == HFSC_MAX_CLASSES)
2056 return (EBUSY);
2057
2058 if ((cl = hfsc_class_create(hif, &ap->service_curve, NULL, NULL,
2059 parent, ap->qlimit, ap->flags, i)) == NULL)
2060 return (ENOMEM);
2061
2062 /* return a class handle to the user */
2063 ap->class_handle = i;
2064
2065 return (0);
2066 }
2067
2068 static int
2069 hfsccmd_delete_class(struct hfsc_delete_class *ap)
2070 {
2071 struct hfsc_if *hif;
2072 struct hfsc_class *cl;
2073
2074 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2075 return (EBADF);
2076
2077 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
2078 return (EINVAL);
2079
2080 return hfsc_class_destroy(cl);
2081 }
2082
2083 static int
2084 hfsccmd_modify_class(struct hfsc_modify_class *ap)
2085 {
2086 struct hfsc_if *hif;
2087 struct hfsc_class *cl;
2088 struct service_curve *rsc = NULL;
2089 struct service_curve *fsc = NULL;
2090 struct service_curve *usc = NULL;
2091
2092 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2093 return (EBADF);
2094
2095 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
2096 return (EINVAL);
2097
2098 if (ap->sctype & HFSC_REALTIMESC)
2099 rsc = &ap->service_curve;
2100 if (ap->sctype & HFSC_LINKSHARINGSC)
2101 fsc = &ap->service_curve;
2102 if (ap->sctype & HFSC_UPPERLIMITSC)
2103 usc = &ap->service_curve;
2104
2105 return hfsc_class_modify(cl, rsc, fsc, usc);
2106 }
2107
2108 static int
2109 hfsccmd_add_filter(struct hfsc_add_filter *ap)
2110 {
2111 struct hfsc_if *hif;
2112 struct hfsc_class *cl;
2113
2114 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2115 return (EBADF);
2116
2117 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
2118 return (EINVAL);
2119
2120 if (is_a_parent_class(cl)) {
2121 #ifdef ALTQ_DEBUG
2122 printf("hfsccmd_add_filter: not a leaf class!\n");
2123 #endif
2124 return (EINVAL);
2125 }
2126
2127 return acc_add_filter(&hif->hif_classifier, &ap->filter,
2128 cl, &ap->filter_handle);
2129 }
2130
2131 static int
2132 hfsccmd_delete_filter(struct hfsc_delete_filter *ap)
2133 {
2134 struct hfsc_if *hif;
2135
2136 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2137 return (EBADF);
2138
2139 return acc_delete_filter(&hif->hif_classifier,
2140 ap->filter_handle);
2141 }
2142
2143 static int
2144 hfsccmd_class_stats(struct hfsc_class_stats *ap)
2145 {
2146 struct hfsc_if *hif;
2147 struct hfsc_class *cl;
2148 struct hfsc_classstats stats, *usp;
2149 int n, nclasses, error;
2150
2151 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2152 return (EBADF);
2153
2154 ap->cur_time = read_machclk();
2155 ap->machclk_freq = machclk_freq;
2156 ap->hif_classes = hif->hif_classes;
2157 ap->hif_packets = hif->hif_packets;
2158
2159 /* skip the first N classes in the tree */
2160 nclasses = ap->nskip;
2161 for (cl = hif->hif_rootclass, n = 0; cl != NULL && n < nclasses;
2162 cl = hfsc_nextclass(cl), n++)
2163 ;
2164 if (n != nclasses)
2165 return (EINVAL);
2166
2167 /* then, read the next N classes in the tree */
2168 nclasses = ap->nclasses;
2169 usp = ap->stats;
2170 for (n = 0; cl != NULL && n < nclasses; cl = hfsc_nextclass(cl), n++) {
2171
2172 get_class_stats(&stats, cl);
2173
2174 if ((error = copyout((caddr_t)&stats, (caddr_t)usp++,
2175 sizeof(stats))) != 0)
2176 return (error);
2177 }
2178
2179 ap->nclasses = n;
2180
2181 return (0);
2182 }
2183
2184 #ifdef KLD_MODULE
2185
2186 static struct altqsw hfsc_sw =
2187 {"hfsc", hfscopen, hfscclose, hfscioctl};
2188
2189 ALTQ_MODULE(altq_hfsc, ALTQT_HFSC, &hfsc_sw);
2190 MODULE_DEPEND(altq_hfsc, altq_red, 1, 1, 1);
2191 MODULE_DEPEND(altq_hfsc, altq_rio, 1, 1, 1);
2192
2193 #endif /* KLD_MODULE */
2194 #endif /* ALTQ3_COMPAT */
2195
2196 #endif /* ALTQ_HFSC */
2197