altq_hfsc.c revision 1.24.62.1 1 /* $NetBSD: altq_hfsc.c,v 1.24.62.1 2017/08/12 04:44:32 snj Exp $ */
2 /* $KAME: altq_hfsc.c,v 1.26 2005/04/13 03:44:24 suz Exp $ */
3
4 /*
5 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
6 *
7 * Permission to use, copy, modify, and distribute this software and
8 * its documentation is hereby granted (including for commercial or
9 * for-profit use), provided that both the copyright notice and this
10 * permission notice appear in all copies of the software, derivative
11 * works, or modified versions, and any portions thereof.
12 *
13 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
14 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS
15 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
16 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
21 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
22 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
25 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26 * DAMAGE.
27 *
28 * Carnegie Mellon encourages (but does not require) users of this
29 * software to return any improvements or extensions that they make,
30 * and to grant Carnegie Mellon the rights to redistribute these
31 * changes without encumbrance.
32 */
33 /*
34 * H-FSC is described in Proceedings of SIGCOMM'97,
35 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
36 * Real-Time and Priority Service"
37 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
38 *
39 * Oleg Cherevko <olwi (at) aq.ml.com.ua> added the upperlimit for link-sharing.
40 * when a class has an upperlimit, the fit-time is computed from the
41 * upperlimit service curve. the link-sharing scheduler does not schedule
42 * a class whose fit-time exceeds the current time.
43 */
44
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: altq_hfsc.c,v 1.24.62.1 2017/08/12 04:44:32 snj Exp $");
47
48 #ifdef _KERNEL_OPT
49 #include "opt_altq.h"
50 #include "opt_inet.h"
51 #include "pf.h"
52 #endif
53
54 #ifdef ALTQ_HFSC /* hfsc is enabled by ALTQ_HFSC option in opt_altq.h */
55
56 #include <sys/param.h>
57 #include <sys/malloc.h>
58 #include <sys/mbuf.h>
59 #include <sys/socket.h>
60 #include <sys/systm.h>
61 #include <sys/errno.h>
62 #include <sys/queue.h>
63 #if 1 /* ALTQ3_COMPAT */
64 #include <sys/sockio.h>
65 #include <sys/proc.h>
66 #include <sys/kernel.h>
67 #endif /* ALTQ3_COMPAT */
68 #include <sys/kauth.h>
69
70 #include <net/if.h>
71 #include <netinet/in.h>
72
73 #if NPF > 0
74 #include <net/pfvar.h>
75 #endif
76 #include <altq/altq.h>
77 #include <altq/altq_hfsc.h>
78 #ifdef ALTQ3_COMPAT
79 #include <altq/altq_conf.h>
80 #endif
81
82 /*
83 * function prototypes
84 */
85 static int hfsc_clear_interface(struct hfsc_if *);
86 static int hfsc_request(struct ifaltq *, int, void *);
87 static void hfsc_purge(struct hfsc_if *);
88 static struct hfsc_class *hfsc_class_create(struct hfsc_if *,
89 struct service_curve *, struct service_curve *, struct service_curve *,
90 struct hfsc_class *, int, int, int);
91 static int hfsc_class_destroy(struct hfsc_class *);
92 static struct hfsc_class *hfsc_nextclass(struct hfsc_class *);
93 static int hfsc_enqueue(struct ifaltq *, struct mbuf *,
94 struct altq_pktattr *);
95 static struct mbuf *hfsc_dequeue(struct ifaltq *, int);
96
97 static int hfsc_addq(struct hfsc_class *, struct mbuf *);
98 static struct mbuf *hfsc_getq(struct hfsc_class *);
99 static struct mbuf *hfsc_pollq(struct hfsc_class *);
100 static void hfsc_purgeq(struct hfsc_class *);
101
102 static void update_cfmin(struct hfsc_class *);
103 static void set_active(struct hfsc_class *, int);
104 static void set_passive(struct hfsc_class *);
105
106 static void init_ed(struct hfsc_class *, int);
107 static void update_ed(struct hfsc_class *, int);
108 static void update_d(struct hfsc_class *, int);
109 static void init_vf(struct hfsc_class *, int);
110 static void update_vf(struct hfsc_class *, int, u_int64_t);
111 static ellist_t *ellist_alloc(void);
112 static void ellist_destroy(ellist_t *);
113 static void ellist_insert(struct hfsc_class *);
114 static void ellist_remove(struct hfsc_class *);
115 static void ellist_update(struct hfsc_class *);
116 struct hfsc_class *ellist_get_mindl(ellist_t *, u_int64_t);
117 static actlist_t *actlist_alloc(void);
118 static void actlist_destroy(actlist_t *);
119 static void actlist_insert(struct hfsc_class *);
120 static void actlist_remove(struct hfsc_class *);
121 static void actlist_update(struct hfsc_class *);
122
123 static struct hfsc_class *actlist_firstfit(struct hfsc_class *,
124 u_int64_t);
125
126 static inline u_int64_t seg_x2y(u_int64_t, u_int64_t);
127 static inline u_int64_t seg_y2x(u_int64_t, u_int64_t);
128 static inline u_int64_t m2sm(u_int);
129 static inline u_int64_t m2ism(u_int);
130 static inline u_int64_t d2dx(u_int);
131 static u_int sm2m(u_int64_t);
132 static u_int dx2d(u_int64_t);
133
134 static void sc2isc(struct service_curve *, struct internal_sc *);
135 static void rtsc_init(struct runtime_sc *, struct internal_sc *,
136 u_int64_t, u_int64_t);
137 static u_int64_t rtsc_y2x(struct runtime_sc *, u_int64_t);
138 static u_int64_t rtsc_x2y(struct runtime_sc *, u_int64_t);
139 static void rtsc_min(struct runtime_sc *, struct internal_sc *,
140 u_int64_t, u_int64_t);
141
142 static void get_class_stats(struct hfsc_classstats *,
143 struct hfsc_class *);
144 static struct hfsc_class *clh_to_clp(struct hfsc_if *, u_int32_t);
145
146
147 #ifdef ALTQ3_COMPAT
148 static struct hfsc_if *hfsc_attach(struct ifaltq *, u_int);
149 static int hfsc_detach(struct hfsc_if *);
150 static int hfsc_class_modify(struct hfsc_class *, struct service_curve *,
151 struct service_curve *, struct service_curve *);
152
153 static int hfsccmd_if_attach(struct hfsc_attach *);
154 static int hfsccmd_if_detach(struct hfsc_interface *);
155 static int hfsccmd_add_class(struct hfsc_add_class *);
156 static int hfsccmd_delete_class(struct hfsc_delete_class *);
157 static int hfsccmd_modify_class(struct hfsc_modify_class *);
158 static int hfsccmd_add_filter(struct hfsc_add_filter *);
159 static int hfsccmd_delete_filter(struct hfsc_delete_filter *);
160 static int hfsccmd_class_stats(struct hfsc_class_stats *);
161
162 altqdev_decl(hfsc);
163 #endif /* ALTQ3_COMPAT */
164
165 /*
166 * macros
167 */
168 #define is_a_parent_class(cl) ((cl)->cl_children != NULL)
169
170 #define HT_INFINITY 0xffffffffffffffffLL /* infinite time value */
171
172 #ifdef ALTQ3_COMPAT
173 /* hif_list keeps all hfsc_if's allocated. */
174 static struct hfsc_if *hif_list = NULL;
175 #endif /* ALTQ3_COMPAT */
176
177 #if NPF > 0
178 int
179 hfsc_pfattach(struct pf_altq *a)
180 {
181 struct ifnet *ifp;
182 int s, error;
183
184 if ((ifp = ifunit(a->ifname)) == NULL || a->altq_disc == NULL)
185 return (EINVAL);
186 s = splnet();
187 error = altq_attach(&ifp->if_snd, ALTQT_HFSC, a->altq_disc,
188 hfsc_enqueue, hfsc_dequeue, hfsc_request, NULL, NULL);
189 splx(s);
190 return (error);
191 }
192
193 int
194 hfsc_add_altq(struct pf_altq *a)
195 {
196 struct hfsc_if *hif;
197 struct ifnet *ifp;
198
199 if ((ifp = ifunit(a->ifname)) == NULL)
200 return (EINVAL);
201 if (!ALTQ_IS_READY(&ifp->if_snd))
202 return (ENODEV);
203
204 hif = malloc(sizeof(struct hfsc_if), M_DEVBUF, M_WAITOK|M_ZERO);
205 if (hif == NULL)
206 return (ENOMEM);
207
208 hif->hif_eligible = ellist_alloc();
209 if (hif->hif_eligible == NULL) {
210 free(hif, M_DEVBUF);
211 return (ENOMEM);
212 }
213
214 hif->hif_ifq = &ifp->if_snd;
215
216 /* keep the state in pf_altq */
217 a->altq_disc = hif;
218
219 return (0);
220 }
221
222 int
223 hfsc_remove_altq(struct pf_altq *a)
224 {
225 struct hfsc_if *hif;
226
227 if ((hif = a->altq_disc) == NULL)
228 return (EINVAL);
229 a->altq_disc = NULL;
230
231 (void)hfsc_clear_interface(hif);
232 (void)hfsc_class_destroy(hif->hif_rootclass);
233
234 ellist_destroy(hif->hif_eligible);
235
236 free(hif, M_DEVBUF);
237
238 return (0);
239 }
240
241 int
242 hfsc_add_queue(struct pf_altq *a)
243 {
244 struct hfsc_if *hif;
245 struct hfsc_class *cl, *parent;
246 struct hfsc_opts *opts;
247 struct service_curve rtsc, lssc, ulsc;
248
249 if ((hif = a->altq_disc) == NULL)
250 return (EINVAL);
251
252 opts = &a->pq_u.hfsc_opts;
253
254 if (a->parent_qid == HFSC_NULLCLASS_HANDLE &&
255 hif->hif_rootclass == NULL)
256 parent = NULL;
257 else if ((parent = clh_to_clp(hif, a->parent_qid)) == NULL)
258 return (EINVAL);
259
260 if (a->qid == 0)
261 return (EINVAL);
262
263 if (clh_to_clp(hif, a->qid) != NULL)
264 return (EBUSY);
265
266 rtsc.m1 = opts->rtsc_m1;
267 rtsc.d = opts->rtsc_d;
268 rtsc.m2 = opts->rtsc_m2;
269 lssc.m1 = opts->lssc_m1;
270 lssc.d = opts->lssc_d;
271 lssc.m2 = opts->lssc_m2;
272 ulsc.m1 = opts->ulsc_m1;
273 ulsc.d = opts->ulsc_d;
274 ulsc.m2 = opts->ulsc_m2;
275
276 cl = hfsc_class_create(hif, &rtsc, &lssc, &ulsc,
277 parent, a->qlimit, opts->flags, a->qid);
278 if (cl == NULL)
279 return (ENOMEM);
280
281 return (0);
282 }
283
284 int
285 hfsc_remove_queue(struct pf_altq *a)
286 {
287 struct hfsc_if *hif;
288 struct hfsc_class *cl;
289
290 if ((hif = a->altq_disc) == NULL)
291 return (EINVAL);
292
293 if ((cl = clh_to_clp(hif, a->qid)) == NULL)
294 return (EINVAL);
295
296 return (hfsc_class_destroy(cl));
297 }
298
299 int
300 hfsc_getqstats(struct pf_altq *a, void *ubuf, int *nbytes)
301 {
302 struct hfsc_if *hif;
303 struct hfsc_class *cl;
304 struct hfsc_classstats stats;
305 int error = 0;
306
307 if ((hif = altq_lookup(a->ifname, ALTQT_HFSC)) == NULL)
308 return (EBADF);
309
310 if ((cl = clh_to_clp(hif, a->qid)) == NULL)
311 return (EINVAL);
312
313 if (*nbytes < sizeof(stats))
314 return (EINVAL);
315
316 memset(&stats, 0, sizeof(stats));
317 get_class_stats(&stats, cl);
318
319 if ((error = copyout((void *)&stats, ubuf, sizeof(stats))) != 0)
320 return (error);
321 *nbytes = sizeof(stats);
322 return (0);
323 }
324 #endif /* NPF > 0 */
325
326 /*
327 * bring the interface back to the initial state by discarding
328 * all the filters and classes except the root class.
329 */
330 static int
331 hfsc_clear_interface(struct hfsc_if *hif)
332 {
333 struct hfsc_class *cl;
334
335 #ifdef ALTQ3_COMPAT
336 /* free the filters for this interface */
337 acc_discard_filters(&hif->hif_classifier, NULL, 1);
338 #endif
339
340 /* clear out the classes */
341 while (hif->hif_rootclass != NULL &&
342 (cl = hif->hif_rootclass->cl_children) != NULL) {
343 /*
344 * remove the first leaf class found in the hierarchy
345 * then start over
346 */
347 for (; cl != NULL; cl = hfsc_nextclass(cl)) {
348 if (!is_a_parent_class(cl)) {
349 (void)hfsc_class_destroy(cl);
350 break;
351 }
352 }
353 }
354
355 return (0);
356 }
357
358 static int
359 hfsc_request(struct ifaltq *ifq, int req, void *arg)
360 {
361 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
362
363 switch (req) {
364 case ALTRQ_PURGE:
365 hfsc_purge(hif);
366 break;
367 }
368 return (0);
369 }
370
371 /* discard all the queued packets on the interface */
372 static void
373 hfsc_purge(struct hfsc_if *hif)
374 {
375 struct hfsc_class *cl;
376
377 for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl))
378 if (!qempty(cl->cl_q))
379 hfsc_purgeq(cl);
380 if (ALTQ_IS_ENABLED(hif->hif_ifq))
381 hif->hif_ifq->ifq_len = 0;
382 }
383
384 struct hfsc_class *
385 hfsc_class_create(struct hfsc_if *hif, struct service_curve *rsc,
386 struct service_curve *fsc, struct service_curve *usc,
387 struct hfsc_class *parent, int qlimit, int flags, int qid)
388 {
389 struct hfsc_class *cl, *p;
390 int i, s;
391
392 if (hif->hif_classes >= HFSC_MAX_CLASSES)
393 return (NULL);
394
395 #ifndef ALTQ_RED
396 if (flags & HFCF_RED) {
397 #ifdef ALTQ_DEBUG
398 printf("hfsc_class_create: RED not configured for HFSC!\n");
399 #endif
400 return (NULL);
401 }
402 #endif
403
404 cl = malloc(sizeof(struct hfsc_class), M_DEVBUF, M_WAITOK|M_ZERO);
405 if (cl == NULL)
406 return (NULL);
407
408 cl->cl_q = malloc(sizeof(class_queue_t), M_DEVBUF, M_WAITOK|M_ZERO);
409 if (cl->cl_q == NULL)
410 goto err_ret;
411
412 cl->cl_actc = actlist_alloc();
413 if (cl->cl_actc == NULL)
414 goto err_ret;
415
416 if (qlimit == 0)
417 qlimit = 50; /* use default */
418 qlimit(cl->cl_q) = qlimit;
419 qtype(cl->cl_q) = Q_DROPTAIL;
420 qlen(cl->cl_q) = 0;
421 cl->cl_flags = flags;
422 #ifdef ALTQ_RED
423 if (flags & (HFCF_RED|HFCF_RIO)) {
424 int red_flags, red_pkttime;
425 u_int m2;
426
427 m2 = 0;
428 if (rsc != NULL && rsc->m2 > m2)
429 m2 = rsc->m2;
430 if (fsc != NULL && fsc->m2 > m2)
431 m2 = fsc->m2;
432 if (usc != NULL && usc->m2 > m2)
433 m2 = usc->m2;
434
435 red_flags = 0;
436 if (flags & HFCF_ECN)
437 red_flags |= REDF_ECN;
438 #ifdef ALTQ_RIO
439 if (flags & HFCF_CLEARDSCP)
440 red_flags |= RIOF_CLEARDSCP;
441 #endif
442 if (m2 < 8)
443 red_pkttime = 1000 * 1000 * 1000; /* 1 sec */
444 else
445 red_pkttime = (int64_t)hif->hif_ifq->altq_ifp->if_mtu
446 * 1000 * 1000 * 1000 / (m2 / 8);
447 if (flags & HFCF_RED) {
448 cl->cl_red = red_alloc(0, 0,
449 qlimit(cl->cl_q) * 10/100,
450 qlimit(cl->cl_q) * 30/100,
451 red_flags, red_pkttime);
452 if (cl->cl_red != NULL)
453 qtype(cl->cl_q) = Q_RED;
454 }
455 #ifdef ALTQ_RIO
456 else {
457 cl->cl_red = (red_t *)rio_alloc(0, NULL,
458 red_flags, red_pkttime);
459 if (cl->cl_red != NULL)
460 qtype(cl->cl_q) = Q_RIO;
461 }
462 #endif
463 }
464 #endif /* ALTQ_RED */
465
466 if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0)) {
467 cl->cl_rsc = malloc(sizeof(struct internal_sc), M_DEVBUF,
468 M_WAITOK|M_ZERO);
469 if (cl->cl_rsc == NULL)
470 goto err_ret;
471 sc2isc(rsc, cl->cl_rsc);
472 rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0);
473 rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0);
474 }
475 if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0)) {
476 cl->cl_fsc = malloc(sizeof(struct internal_sc), M_DEVBUF,
477 M_WAITOK|M_ZERO);
478 if (cl->cl_fsc == NULL)
479 goto err_ret;
480 sc2isc(fsc, cl->cl_fsc);
481 rtsc_init(&cl->cl_virtual, cl->cl_fsc, 0, 0);
482 }
483 if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0)) {
484 cl->cl_usc = malloc(sizeof(struct internal_sc), M_DEVBUF,
485 M_WAITOK|M_ZERO);
486 if (cl->cl_usc == NULL)
487 goto err_ret;
488 sc2isc(usc, cl->cl_usc);
489 rtsc_init(&cl->cl_ulimit, cl->cl_usc, 0, 0);
490 }
491
492 cl->cl_id = hif->hif_classid++;
493 cl->cl_handle = qid;
494 cl->cl_hif = hif;
495 cl->cl_parent = parent;
496
497 s = splnet();
498 hif->hif_classes++;
499
500 /*
501 * find a free slot in the class table. if the slot matching
502 * the lower bits of qid is free, use this slot. otherwise,
503 * use the first free slot.
504 */
505 i = qid % HFSC_MAX_CLASSES;
506 if (hif->hif_class_tbl[i] == NULL)
507 hif->hif_class_tbl[i] = cl;
508 else {
509 for (i = 0; i < HFSC_MAX_CLASSES; i++)
510 if (hif->hif_class_tbl[i] == NULL) {
511 hif->hif_class_tbl[i] = cl;
512 break;
513 }
514 if (i == HFSC_MAX_CLASSES) {
515 splx(s);
516 goto err_ret;
517 }
518 }
519
520 if (flags & HFCF_DEFAULTCLASS)
521 hif->hif_defaultclass = cl;
522
523 if (parent == NULL) {
524 /* this is root class */
525 hif->hif_rootclass = cl;
526 } else {
527 /* add this class to the children list of the parent */
528 if ((p = parent->cl_children) == NULL)
529 parent->cl_children = cl;
530 else {
531 while (p->cl_siblings != NULL)
532 p = p->cl_siblings;
533 p->cl_siblings = cl;
534 }
535 }
536 splx(s);
537
538 return (cl);
539
540 err_ret:
541 if (cl->cl_actc != NULL)
542 actlist_destroy(cl->cl_actc);
543 if (cl->cl_red != NULL) {
544 #ifdef ALTQ_RIO
545 if (q_is_rio(cl->cl_q))
546 rio_destroy((rio_t *)cl->cl_red);
547 #endif
548 #ifdef ALTQ_RED
549 if (q_is_red(cl->cl_q))
550 red_destroy(cl->cl_red);
551 #endif
552 }
553 if (cl->cl_fsc != NULL)
554 free(cl->cl_fsc, M_DEVBUF);
555 if (cl->cl_rsc != NULL)
556 free(cl->cl_rsc, M_DEVBUF);
557 if (cl->cl_usc != NULL)
558 free(cl->cl_usc, M_DEVBUF);
559 if (cl->cl_q != NULL)
560 free(cl->cl_q, M_DEVBUF);
561 free(cl, M_DEVBUF);
562 return (NULL);
563 }
564
565 static int
566 hfsc_class_destroy(struct hfsc_class *cl)
567 {
568 int i, s;
569
570 if (cl == NULL)
571 return (0);
572
573 if (is_a_parent_class(cl))
574 return (EBUSY);
575
576 s = splnet();
577
578 #ifdef ALTQ3_COMPAT
579 /* delete filters referencing to this class */
580 acc_discard_filters(&cl->cl_hif->hif_classifier, cl, 0);
581 #endif /* ALTQ3_COMPAT */
582
583 if (!qempty(cl->cl_q))
584 hfsc_purgeq(cl);
585
586 if (cl->cl_parent == NULL) {
587 /* this is root class */
588 } else {
589 struct hfsc_class *p = cl->cl_parent->cl_children;
590
591 if (p == cl)
592 cl->cl_parent->cl_children = cl->cl_siblings;
593 else do {
594 if (p->cl_siblings == cl) {
595 p->cl_siblings = cl->cl_siblings;
596 break;
597 }
598 } while ((p = p->cl_siblings) != NULL);
599 ASSERT(p != NULL);
600 }
601
602 for (i = 0; i < HFSC_MAX_CLASSES; i++)
603 if (cl->cl_hif->hif_class_tbl[i] == cl) {
604 cl->cl_hif->hif_class_tbl[i] = NULL;
605 break;
606 }
607
608 cl->cl_hif->hif_classes--;
609 splx(s);
610
611 actlist_destroy(cl->cl_actc);
612
613 if (cl->cl_red != NULL) {
614 #ifdef ALTQ_RIO
615 if (q_is_rio(cl->cl_q))
616 rio_destroy((rio_t *)cl->cl_red);
617 #endif
618 #ifdef ALTQ_RED
619 if (q_is_red(cl->cl_q))
620 red_destroy(cl->cl_red);
621 #endif
622 }
623
624 if (cl == cl->cl_hif->hif_rootclass)
625 cl->cl_hif->hif_rootclass = NULL;
626 if (cl == cl->cl_hif->hif_defaultclass)
627 cl->cl_hif->hif_defaultclass = NULL;
628
629 if (cl->cl_usc != NULL)
630 free(cl->cl_usc, M_DEVBUF);
631 if (cl->cl_fsc != NULL)
632 free(cl->cl_fsc, M_DEVBUF);
633 if (cl->cl_rsc != NULL)
634 free(cl->cl_rsc, M_DEVBUF);
635 free(cl->cl_q, M_DEVBUF);
636 free(cl, M_DEVBUF);
637
638 return (0);
639 }
640
641 /*
642 * hfsc_nextclass returns the next class in the tree.
643 * usage:
644 * for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl))
645 * do_something;
646 */
647 static struct hfsc_class *
648 hfsc_nextclass(struct hfsc_class *cl)
649 {
650 if (cl->cl_children != NULL)
651 cl = cl->cl_children;
652 else if (cl->cl_siblings != NULL)
653 cl = cl->cl_siblings;
654 else {
655 while ((cl = cl->cl_parent) != NULL)
656 if (cl->cl_siblings) {
657 cl = cl->cl_siblings;
658 break;
659 }
660 }
661
662 return (cl);
663 }
664
665 /*
666 * hfsc_enqueue is an enqueue function to be registered to
667 * (*altq_enqueue) in struct ifaltq.
668 */
669 static int
670 hfsc_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr)
671 {
672 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
673 struct hfsc_class *cl;
674 struct m_tag *t;
675 int len;
676
677 /* grab class set by classifier */
678 if ((m->m_flags & M_PKTHDR) == 0) {
679 /* should not happen */
680 printf("altq: packet for %s does not have pkthdr\n",
681 ifq->altq_ifp->if_xname);
682 m_freem(m);
683 return (ENOBUFS);
684 }
685 cl = NULL;
686 if ((t = m_tag_find(m, PACKET_TAG_ALTQ_QID, NULL)) != NULL)
687 cl = clh_to_clp(hif, ((struct altq_tag *)(t+1))->qid);
688 #ifdef ALTQ3_COMPAT
689 else if ((ifq->altq_flags & ALTQF_CLASSIFY) && pktattr != NULL)
690 cl = pktattr->pattr_class;
691 #endif
692 if (cl == NULL || is_a_parent_class(cl)) {
693 cl = hif->hif_defaultclass;
694 if (cl == NULL) {
695 m_freem(m);
696 return (ENOBUFS);
697 }
698 }
699 #ifdef ALTQ3_COMPAT
700 if (pktattr != NULL)
701 cl->cl_pktattr = pktattr; /* save proto hdr used by ECN */
702 else
703 #endif
704 cl->cl_pktattr = NULL;
705 len = m_pktlen(m);
706 if (hfsc_addq(cl, m) != 0) {
707 /* drop occurred. mbuf was freed in hfsc_addq. */
708 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, len);
709 return (ENOBUFS);
710 }
711 IFQ_INC_LEN(ifq);
712 cl->cl_hif->hif_packets++;
713
714 /* successfully queued. */
715 if (qlen(cl->cl_q) == 1)
716 set_active(cl, m_pktlen(m));
717
718 return (0);
719 }
720
721 /*
722 * hfsc_dequeue is a dequeue function to be registered to
723 * (*altq_dequeue) in struct ifaltq.
724 *
725 * note: ALTDQ_POLL returns the next packet without removing the packet
726 * from the queue. ALTDQ_REMOVE is a normal dequeue operation.
727 * ALTDQ_REMOVE must return the same packet if called immediately
728 * after ALTDQ_POLL.
729 */
730 static struct mbuf *
731 hfsc_dequeue(struct ifaltq *ifq, int op)
732 {
733 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
734 struct hfsc_class *cl;
735 struct mbuf *m;
736 int len, next_len;
737 int realtime = 0;
738 u_int64_t cur_time;
739
740 if (hif->hif_packets == 0)
741 /* no packet in the tree */
742 return (NULL);
743
744 cur_time = read_machclk();
745
746 if (op == ALTDQ_REMOVE && hif->hif_pollcache != NULL) {
747
748 cl = hif->hif_pollcache;
749 hif->hif_pollcache = NULL;
750 /* check if the class was scheduled by real-time criteria */
751 if (cl->cl_rsc != NULL)
752 realtime = (cl->cl_e <= cur_time);
753 } else {
754 /*
755 * if there are eligible classes, use real-time criteria.
756 * find the class with the minimum deadline among
757 * the eligible classes.
758 */
759 if ((cl = ellist_get_mindl(hif->hif_eligible, cur_time))
760 != NULL) {
761 realtime = 1;
762 } else {
763 #ifdef ALTQ_DEBUG
764 int fits = 0;
765 #endif
766 /*
767 * use link-sharing criteria
768 * get the class with the minimum vt in the hierarchy
769 */
770 cl = hif->hif_rootclass;
771 while (is_a_parent_class(cl)) {
772
773 cl = actlist_firstfit(cl, cur_time);
774 if (cl == NULL) {
775 #ifdef ALTQ_DEBUG
776 if (fits > 0)
777 printf("%d fit but none found\n",fits);
778 #endif
779 return (NULL);
780 }
781 /*
782 * update parent's cl_cvtmin.
783 * don't update if the new vt is smaller.
784 */
785 if (cl->cl_parent->cl_cvtmin < cl->cl_vt)
786 cl->cl_parent->cl_cvtmin = cl->cl_vt;
787 #ifdef ALTQ_DEBUG
788 fits++;
789 #endif
790 }
791 }
792
793 if (op == ALTDQ_POLL) {
794 hif->hif_pollcache = cl;
795 m = hfsc_pollq(cl);
796 return (m);
797 }
798 }
799
800 m = hfsc_getq(cl);
801 if (m == NULL)
802 panic("hfsc_dequeue:");
803 len = m_pktlen(m);
804 cl->cl_hif->hif_packets--;
805 IFQ_DEC_LEN(ifq);
806 PKTCNTR_ADD(&cl->cl_stats.xmit_cnt, len);
807
808 update_vf(cl, len, cur_time);
809 if (realtime)
810 cl->cl_cumul += len;
811
812 if (!qempty(cl->cl_q)) {
813 if (cl->cl_rsc != NULL) {
814 /* update ed */
815 next_len = m_pktlen(qhead(cl->cl_q));
816
817 if (realtime)
818 update_ed(cl, next_len);
819 else
820 update_d(cl, next_len);
821 }
822 } else {
823 /* the class becomes passive */
824 set_passive(cl);
825 }
826
827 return (m);
828 }
829
830 static int
831 hfsc_addq(struct hfsc_class *cl, struct mbuf *m)
832 {
833
834 #ifdef ALTQ_RIO
835 if (q_is_rio(cl->cl_q))
836 return rio_addq((rio_t *)cl->cl_red, cl->cl_q,
837 m, cl->cl_pktattr);
838 #endif
839 #ifdef ALTQ_RED
840 if (q_is_red(cl->cl_q))
841 return red_addq(cl->cl_red, cl->cl_q, m, cl->cl_pktattr);
842 #endif
843 if (qlen(cl->cl_q) >= qlimit(cl->cl_q)) {
844 m_freem(m);
845 return (-1);
846 }
847
848 if (cl->cl_flags & HFCF_CLEARDSCP)
849 write_dsfield(m, cl->cl_pktattr, 0);
850
851 _addq(cl->cl_q, m);
852
853 return (0);
854 }
855
856 static struct mbuf *
857 hfsc_getq(struct hfsc_class *cl)
858 {
859 #ifdef ALTQ_RIO
860 if (q_is_rio(cl->cl_q))
861 return rio_getq((rio_t *)cl->cl_red, cl->cl_q);
862 #endif
863 #ifdef ALTQ_RED
864 if (q_is_red(cl->cl_q))
865 return red_getq(cl->cl_red, cl->cl_q);
866 #endif
867 return _getq(cl->cl_q);
868 }
869
870 static struct mbuf *
871 hfsc_pollq(struct hfsc_class *cl)
872 {
873 return qhead(cl->cl_q);
874 }
875
876 static void
877 hfsc_purgeq(struct hfsc_class *cl)
878 {
879 struct mbuf *m;
880
881 if (qempty(cl->cl_q))
882 return;
883
884 while ((m = _getq(cl->cl_q)) != NULL) {
885 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, m_pktlen(m));
886 m_freem(m);
887 cl->cl_hif->hif_packets--;
888 IFQ_DEC_LEN(cl->cl_hif->hif_ifq);
889 }
890 ASSERT(qlen(cl->cl_q) == 0);
891
892 update_vf(cl, 0, 0); /* remove cl from the actlist */
893 set_passive(cl);
894 }
895
896 static void
897 set_active(struct hfsc_class *cl, int len)
898 {
899 if (cl->cl_rsc != NULL)
900 init_ed(cl, len);
901 if (cl->cl_fsc != NULL)
902 init_vf(cl, len);
903
904 cl->cl_stats.period++;
905 }
906
907 static void
908 set_passive(struct hfsc_class *cl)
909 {
910 if (cl->cl_rsc != NULL)
911 ellist_remove(cl);
912
913 /*
914 * actlist is now handled in update_vf() so that update_vf(cl, 0, 0)
915 * needs to be called explicitly to remove a class from actlist
916 */
917 }
918
919 static void
920 init_ed(struct hfsc_class *cl, int next_len)
921 {
922 u_int64_t cur_time;
923
924 cur_time = read_machclk();
925
926 /* update the deadline curve */
927 rtsc_min(&cl->cl_deadline, cl->cl_rsc, cur_time, cl->cl_cumul);
928
929 /*
930 * update the eligible curve.
931 * for concave, it is equal to the deadline curve.
932 * for convex, it is a linear curve with slope m2.
933 */
934 cl->cl_eligible = cl->cl_deadline;
935 if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) {
936 cl->cl_eligible.dx = 0;
937 cl->cl_eligible.dy = 0;
938 }
939
940 /* compute e and d */
941 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
942 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
943
944 ellist_insert(cl);
945 }
946
947 static void
948 update_ed(struct hfsc_class *cl, int next_len)
949 {
950 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
951 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
952
953 ellist_update(cl);
954 }
955
956 static void
957 update_d(struct hfsc_class *cl, int next_len)
958 {
959 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
960 }
961
962 static void
963 init_vf(struct hfsc_class *cl, int len)
964 {
965 struct hfsc_class *max_cl, *p;
966 u_int64_t vt, f, cur_time;
967 int go_active;
968
969 cur_time = 0;
970 go_active = 1;
971 for ( ; cl->cl_parent != NULL; cl = cl->cl_parent) {
972
973 if (go_active && cl->cl_nactive++ == 0)
974 go_active = 1;
975 else
976 go_active = 0;
977
978 if (go_active) {
979 max_cl = actlist_last(cl->cl_parent->cl_actc);
980 if (max_cl != NULL) {
981 /*
982 * set vt to the average of the min and max
983 * classes. if the parent's period didn't
984 * change, don't decrease vt of the class.
985 */
986 vt = max_cl->cl_vt;
987 if (cl->cl_parent->cl_cvtmin != 0)
988 vt = (cl->cl_parent->cl_cvtmin + vt)/2;
989
990 if (cl->cl_parent->cl_vtperiod !=
991 cl->cl_parentperiod || vt > cl->cl_vt)
992 cl->cl_vt = vt;
993 } else {
994 /*
995 * first child for a new parent backlog period.
996 * add parent's cvtmax to vtoff of children
997 * to make a new vt (vtoff + vt) larger than
998 * the vt in the last period for all children.
999 */
1000 vt = cl->cl_parent->cl_cvtmax;
1001 for (p = cl->cl_parent->cl_children; p != NULL;
1002 p = p->cl_siblings)
1003 p->cl_vtoff += vt;
1004 cl->cl_vt = 0;
1005 cl->cl_parent->cl_cvtmax = 0;
1006 cl->cl_parent->cl_cvtmin = 0;
1007 }
1008 cl->cl_initvt = cl->cl_vt;
1009
1010 /* update the virtual curve */
1011 vt = cl->cl_vt + cl->cl_vtoff;
1012 rtsc_min(&cl->cl_virtual, cl->cl_fsc, vt, cl->cl_total);
1013 if (cl->cl_virtual.x == vt) {
1014 cl->cl_virtual.x -= cl->cl_vtoff;
1015 cl->cl_vtoff = 0;
1016 }
1017 cl->cl_vtadj = 0;
1018
1019 cl->cl_vtperiod++; /* increment vt period */
1020 cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
1021 if (cl->cl_parent->cl_nactive == 0)
1022 cl->cl_parentperiod++;
1023 cl->cl_f = 0;
1024
1025 actlist_insert(cl);
1026
1027 if (cl->cl_usc != NULL) {
1028 /* class has upper limit curve */
1029 if (cur_time == 0)
1030 cur_time = read_machclk();
1031
1032 /* update the ulimit curve */
1033 rtsc_min(&cl->cl_ulimit, cl->cl_usc, cur_time,
1034 cl->cl_total);
1035 /* compute myf */
1036 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,
1037 cl->cl_total);
1038 cl->cl_myfadj = 0;
1039 }
1040 }
1041
1042 if (cl->cl_myf > cl->cl_cfmin)
1043 f = cl->cl_myf;
1044 else
1045 f = cl->cl_cfmin;
1046 if (f != cl->cl_f) {
1047 cl->cl_f = f;
1048 update_cfmin(cl->cl_parent);
1049 }
1050 }
1051 }
1052
1053 static void
1054 update_vf(struct hfsc_class *cl, int len, u_int64_t cur_time)
1055 {
1056 u_int64_t f, myf_bound, delta;
1057 int go_passive;
1058
1059 go_passive = qempty(cl->cl_q);
1060
1061 for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
1062
1063 cl->cl_total += len;
1064
1065 if (cl->cl_fsc == NULL || cl->cl_nactive == 0)
1066 continue;
1067
1068 if (go_passive && --cl->cl_nactive == 0)
1069 go_passive = 1;
1070 else
1071 go_passive = 0;
1072
1073 if (go_passive) {
1074 /* no more active child, going passive */
1075
1076 /* update cvtmax of the parent class */
1077 if (cl->cl_vt > cl->cl_parent->cl_cvtmax)
1078 cl->cl_parent->cl_cvtmax = cl->cl_vt;
1079
1080 /* remove this class from the vt list */
1081 actlist_remove(cl);
1082
1083 update_cfmin(cl->cl_parent);
1084
1085 continue;
1086 }
1087
1088 /*
1089 * update vt and f
1090 */
1091 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total)
1092 - cl->cl_vtoff + cl->cl_vtadj;
1093
1094 /*
1095 * if vt of the class is smaller than cvtmin,
1096 * the class was skipped in the past due to non-fit.
1097 * if so, we need to adjust vtadj.
1098 */
1099 if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
1100 cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
1101 cl->cl_vt = cl->cl_parent->cl_cvtmin;
1102 }
1103
1104 /* update the vt list */
1105 actlist_update(cl);
1106
1107 if (cl->cl_usc != NULL) {
1108 cl->cl_myf = cl->cl_myfadj
1109 + rtsc_y2x(&cl->cl_ulimit, cl->cl_total);
1110
1111 /*
1112 * if myf lags behind by more than one clock tick
1113 * from the current time, adjust myfadj to prevent
1114 * a rate-limited class from going greedy.
1115 * in a steady state under rate-limiting, myf
1116 * fluctuates within one clock tick.
1117 */
1118 myf_bound = cur_time - machclk_per_tick;
1119 if (cl->cl_myf < myf_bound) {
1120 delta = cur_time - cl->cl_myf;
1121 cl->cl_myfadj += delta;
1122 cl->cl_myf += delta;
1123 }
1124 }
1125
1126 /* cl_f is max(cl_myf, cl_cfmin) */
1127 if (cl->cl_myf > cl->cl_cfmin)
1128 f = cl->cl_myf;
1129 else
1130 f = cl->cl_cfmin;
1131 if (f != cl->cl_f) {
1132 cl->cl_f = f;
1133 update_cfmin(cl->cl_parent);
1134 }
1135 }
1136 }
1137
1138 static void
1139 update_cfmin(struct hfsc_class *cl)
1140 {
1141 struct hfsc_class *p;
1142 u_int64_t cfmin;
1143
1144 if (TAILQ_EMPTY(cl->cl_actc)) {
1145 cl->cl_cfmin = 0;
1146 return;
1147 }
1148 cfmin = HT_INFINITY;
1149 TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) {
1150 if (p->cl_f == 0) {
1151 cl->cl_cfmin = 0;
1152 return;
1153 }
1154 if (p->cl_f < cfmin)
1155 cfmin = p->cl_f;
1156 }
1157 cl->cl_cfmin = cfmin;
1158 }
1159
1160 /*
1161 * TAILQ based ellist and actlist implementation
1162 * (ion wanted to make a calendar queue based implementation)
1163 */
1164 /*
1165 * eligible list holds backlogged classes being sorted by their eligible times.
1166 * there is one eligible list per interface.
1167 */
1168
1169 static ellist_t *
1170 ellist_alloc(void)
1171 {
1172 ellist_t *head;
1173
1174 head = malloc(sizeof(ellist_t), M_DEVBUF, M_WAITOK);
1175 TAILQ_INIT(head);
1176 return (head);
1177 }
1178
1179 static void
1180 ellist_destroy(ellist_t *head)
1181 {
1182 free(head, M_DEVBUF);
1183 }
1184
1185 static void
1186 ellist_insert(struct hfsc_class *cl)
1187 {
1188 struct hfsc_if *hif = cl->cl_hif;
1189 struct hfsc_class *p;
1190
1191 /* check the last entry first */
1192 if ((p = TAILQ_LAST(hif->hif_eligible, _eligible)) == NULL ||
1193 p->cl_e <= cl->cl_e) {
1194 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist);
1195 return;
1196 }
1197
1198 TAILQ_FOREACH(p, hif->hif_eligible, cl_ellist) {
1199 if (cl->cl_e < p->cl_e) {
1200 TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
1201 return;
1202 }
1203 }
1204 ASSERT(0); /* should not reach here */
1205 }
1206
1207 static void
1208 ellist_remove(struct hfsc_class *cl)
1209 {
1210 struct hfsc_if *hif = cl->cl_hif;
1211
1212 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
1213 }
1214
1215 static void
1216 ellist_update(struct hfsc_class *cl)
1217 {
1218 struct hfsc_if *hif = cl->cl_hif;
1219 struct hfsc_class *p, *last;
1220
1221 /*
1222 * the eligible time of a class increases monotonically.
1223 * if the next entry has a larger eligible time, nothing to do.
1224 */
1225 p = TAILQ_NEXT(cl, cl_ellist);
1226 if (p == NULL || cl->cl_e <= p->cl_e)
1227 return;
1228
1229 /* check the last entry */
1230 last = TAILQ_LAST(hif->hif_eligible, _eligible);
1231 ASSERT(last != NULL);
1232 if (last->cl_e <= cl->cl_e) {
1233 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
1234 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist);
1235 return;
1236 }
1237
1238 /*
1239 * the new position must be between the next entry
1240 * and the last entry
1241 */
1242 while ((p = TAILQ_NEXT(p, cl_ellist)) != NULL) {
1243 if (cl->cl_e < p->cl_e) {
1244 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
1245 TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
1246 return;
1247 }
1248 }
1249 ASSERT(0); /* should not reach here */
1250 }
1251
1252 /* find the class with the minimum deadline among the eligible classes */
1253 struct hfsc_class *
1254 ellist_get_mindl(ellist_t *head, u_int64_t cur_time)
1255 {
1256 struct hfsc_class *p, *cl = NULL;
1257
1258 TAILQ_FOREACH(p, head, cl_ellist) {
1259 if (p->cl_e > cur_time)
1260 break;
1261 if (cl == NULL || p->cl_d < cl->cl_d)
1262 cl = p;
1263 }
1264 return (cl);
1265 }
1266
1267 /*
1268 * active children list holds backlogged child classes being sorted
1269 * by their virtual time.
1270 * each intermediate class has one active children list.
1271 */
1272 static actlist_t *
1273 actlist_alloc(void)
1274 {
1275 actlist_t *head;
1276
1277 head = malloc(sizeof(actlist_t), M_DEVBUF, M_WAITOK);
1278 TAILQ_INIT(head);
1279 return (head);
1280 }
1281
1282 static void
1283 actlist_destroy(actlist_t *head)
1284 {
1285 free(head, M_DEVBUF);
1286 }
1287 static void
1288 actlist_insert(struct hfsc_class *cl)
1289 {
1290 struct hfsc_class *p;
1291
1292 /* check the last entry first */
1293 if ((p = TAILQ_LAST(cl->cl_parent->cl_actc, _active)) == NULL
1294 || p->cl_vt <= cl->cl_vt) {
1295 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist);
1296 return;
1297 }
1298
1299 TAILQ_FOREACH(p, cl->cl_parent->cl_actc, cl_actlist) {
1300 if (cl->cl_vt < p->cl_vt) {
1301 TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
1302 return;
1303 }
1304 }
1305 ASSERT(0); /* should not reach here */
1306 }
1307
1308 static void
1309 actlist_remove(struct hfsc_class *cl)
1310 {
1311 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1312 }
1313
1314 static void
1315 actlist_update(struct hfsc_class *cl)
1316 {
1317 struct hfsc_class *p, *last;
1318
1319 /*
1320 * the virtual time of a class increases monotonically during its
1321 * backlogged period.
1322 * if the next entry has a larger virtual time, nothing to do.
1323 */
1324 p = TAILQ_NEXT(cl, cl_actlist);
1325 if (p == NULL || cl->cl_vt < p->cl_vt)
1326 return;
1327
1328 /* check the last entry */
1329 last = TAILQ_LAST(cl->cl_parent->cl_actc, _active);
1330 ASSERT(last != NULL);
1331 if (last->cl_vt <= cl->cl_vt) {
1332 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1333 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist);
1334 return;
1335 }
1336
1337 /*
1338 * the new position must be between the next entry
1339 * and the last entry
1340 */
1341 while ((p = TAILQ_NEXT(p, cl_actlist)) != NULL) {
1342 if (cl->cl_vt < p->cl_vt) {
1343 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1344 TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
1345 return;
1346 }
1347 }
1348 ASSERT(0); /* should not reach here */
1349 }
1350
1351 static struct hfsc_class *
1352 actlist_firstfit(struct hfsc_class *cl, u_int64_t cur_time)
1353 {
1354 struct hfsc_class *p;
1355
1356 TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) {
1357 if (p->cl_f <= cur_time)
1358 return (p);
1359 }
1360 return (NULL);
1361 }
1362
1363 /*
1364 * service curve support functions
1365 *
1366 * external service curve parameters
1367 * m: bits/sec
1368 * d: msec
1369 * internal service curve parameters
1370 * sm: (bytes/tsc_interval) << SM_SHIFT
1371 * ism: (tsc_count/byte) << ISM_SHIFT
1372 * dx: tsc_count
1373 *
1374 * SM_SHIFT and ISM_SHIFT are scaled in order to keep effective digits.
1375 * we should be able to handle 100K-1Gbps linkspeed with 200Hz-1GHz CPU
1376 * speed. SM_SHIFT and ISM_SHIFT are selected to have at least 3 effective
1377 * digits in decimal using the following table.
1378 *
1379 * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps
1380 * ----------+-------------------------------------------------------
1381 * bytes/nsec 12.5e-6 125e-6 1250e-6 12500e-6 125000e-6
1382 * sm(500MHz) 25.0e-6 250e-6 2500e-6 25000e-6 250000e-6
1383 * sm(200MHz) 62.5e-6 625e-6 6250e-6 62500e-6 625000e-6
1384 *
1385 * nsec/byte 80000 8000 800 80 8
1386 * ism(500MHz) 40000 4000 400 40 4
1387 * ism(200MHz) 16000 1600 160 16 1.6
1388 */
1389 #define SM_SHIFT 24
1390 #define ISM_SHIFT 10
1391
1392 #define SM_MASK ((1LL << SM_SHIFT) - 1)
1393 #define ISM_MASK ((1LL << ISM_SHIFT) - 1)
1394
1395 static inline u_int64_t
1396 seg_x2y(u_int64_t x, u_int64_t sm)
1397 {
1398 u_int64_t y;
1399
1400 /*
1401 * compute
1402 * y = x * sm >> SM_SHIFT
1403 * but divide it for the upper and lower bits to avoid overflow
1404 */
1405 y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT);
1406 return (y);
1407 }
1408
1409 static inline u_int64_t
1410 seg_y2x(u_int64_t y, u_int64_t ism)
1411 {
1412 u_int64_t x;
1413
1414 if (y == 0)
1415 x = 0;
1416 else if (ism == HT_INFINITY)
1417 x = HT_INFINITY;
1418 else {
1419 x = (y >> ISM_SHIFT) * ism
1420 + (((y & ISM_MASK) * ism) >> ISM_SHIFT);
1421 }
1422 return (x);
1423 }
1424
1425 static inline u_int64_t
1426 m2sm(u_int m)
1427 {
1428 u_int64_t sm;
1429
1430 sm = ((u_int64_t)m << SM_SHIFT) / 8 / machclk_freq;
1431 return (sm);
1432 }
1433
1434 static inline u_int64_t
1435 m2ism(u_int m)
1436 {
1437 u_int64_t ism;
1438
1439 if (m == 0)
1440 ism = HT_INFINITY;
1441 else
1442 ism = ((u_int64_t)machclk_freq << ISM_SHIFT) * 8 / m;
1443 return (ism);
1444 }
1445
1446 static inline u_int64_t
1447 d2dx(u_int d)
1448 {
1449 u_int64_t dx;
1450
1451 dx = ((u_int64_t)d * machclk_freq) / 1000;
1452 return (dx);
1453 }
1454
1455 static u_int
1456 sm2m(u_int64_t sm)
1457 {
1458 u_int64_t m;
1459
1460 m = (sm * 8 * machclk_freq) >> SM_SHIFT;
1461 return ((u_int)m);
1462 }
1463
1464 static u_int
1465 dx2d(u_int64_t dx)
1466 {
1467 u_int64_t d;
1468
1469 d = dx * 1000 / machclk_freq;
1470 return ((u_int)d);
1471 }
1472
1473 static void
1474 sc2isc(struct service_curve *sc, struct internal_sc *isc)
1475 {
1476 isc->sm1 = m2sm(sc->m1);
1477 isc->ism1 = m2ism(sc->m1);
1478 isc->dx = d2dx(sc->d);
1479 isc->dy = seg_x2y(isc->dx, isc->sm1);
1480 isc->sm2 = m2sm(sc->m2);
1481 isc->ism2 = m2ism(sc->m2);
1482 }
1483
1484 /*
1485 * initialize the runtime service curve with the given internal
1486 * service curve starting at (x, y).
1487 */
1488 static void
1489 rtsc_init(struct runtime_sc *rtsc, struct internal_sc * isc, u_int64_t x,
1490 u_int64_t y)
1491 {
1492 rtsc->x = x;
1493 rtsc->y = y;
1494 rtsc->sm1 = isc->sm1;
1495 rtsc->ism1 = isc->ism1;
1496 rtsc->dx = isc->dx;
1497 rtsc->dy = isc->dy;
1498 rtsc->sm2 = isc->sm2;
1499 rtsc->ism2 = isc->ism2;
1500 }
1501
1502 /*
1503 * calculate the y-projection of the runtime service curve by the
1504 * given x-projection value
1505 */
1506 static u_int64_t
1507 rtsc_y2x(struct runtime_sc *rtsc, u_int64_t y)
1508 {
1509 u_int64_t x;
1510
1511 if (y < rtsc->y)
1512 x = rtsc->x;
1513 else if (y <= rtsc->y + rtsc->dy) {
1514 /* x belongs to the 1st segment */
1515 if (rtsc->dy == 0)
1516 x = rtsc->x + rtsc->dx;
1517 else
1518 x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
1519 } else {
1520 /* x belongs to the 2nd segment */
1521 x = rtsc->x + rtsc->dx
1522 + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
1523 }
1524 return (x);
1525 }
1526
1527 static u_int64_t
1528 rtsc_x2y(struct runtime_sc *rtsc, u_int64_t x)
1529 {
1530 u_int64_t y;
1531
1532 if (x <= rtsc->x)
1533 y = rtsc->y;
1534 else if (x <= rtsc->x + rtsc->dx)
1535 /* y belongs to the 1st segment */
1536 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
1537 else
1538 /* y belongs to the 2nd segment */
1539 y = rtsc->y + rtsc->dy
1540 + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
1541 return (y);
1542 }
1543
1544 /*
1545 * update the runtime service curve by taking the minimum of the current
1546 * runtime service curve and the service curve starting at (x, y).
1547 */
1548 static void
1549 rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u_int64_t x,
1550 u_int64_t y)
1551 {
1552 u_int64_t y1, y2, dx, dy;
1553
1554 if (isc->sm1 <= isc->sm2) {
1555 /* service curve is convex */
1556 y1 = rtsc_x2y(rtsc, x);
1557 if (y1 < y)
1558 /* the current rtsc is smaller */
1559 return;
1560 rtsc->x = x;
1561 rtsc->y = y;
1562 return;
1563 }
1564
1565 /*
1566 * service curve is concave
1567 * compute the two y values of the current rtsc
1568 * y1: at x
1569 * y2: at (x + dx)
1570 */
1571 y1 = rtsc_x2y(rtsc, x);
1572 if (y1 <= y) {
1573 /* rtsc is below isc, no change to rtsc */
1574 return;
1575 }
1576
1577 y2 = rtsc_x2y(rtsc, x + isc->dx);
1578 if (y2 >= y + isc->dy) {
1579 /* rtsc is above isc, replace rtsc by isc */
1580 rtsc->x = x;
1581 rtsc->y = y;
1582 rtsc->dx = isc->dx;
1583 rtsc->dy = isc->dy;
1584 return;
1585 }
1586
1587 /*
1588 * the two curves intersect
1589 * compute the offsets (dx, dy) using the reverse
1590 * function of seg_x2y()
1591 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
1592 */
1593 dx = ((y1 - y) << SM_SHIFT) / (isc->sm1 - isc->sm2);
1594 /*
1595 * check if (x, y1) belongs to the 1st segment of rtsc.
1596 * if so, add the offset.
1597 */
1598 if (rtsc->x + rtsc->dx > x)
1599 dx += rtsc->x + rtsc->dx - x;
1600 dy = seg_x2y(dx, isc->sm1);
1601
1602 rtsc->x = x;
1603 rtsc->y = y;
1604 rtsc->dx = dx;
1605 rtsc->dy = dy;
1606 return;
1607 }
1608
1609 static void
1610 get_class_stats(struct hfsc_classstats *sp, struct hfsc_class *cl)
1611 {
1612 sp->class_id = cl->cl_id;
1613 sp->class_handle = cl->cl_handle;
1614
1615 if (cl->cl_rsc != NULL) {
1616 sp->rsc.m1 = sm2m(cl->cl_rsc->sm1);
1617 sp->rsc.d = dx2d(cl->cl_rsc->dx);
1618 sp->rsc.m2 = sm2m(cl->cl_rsc->sm2);
1619 } else {
1620 sp->rsc.m1 = 0;
1621 sp->rsc.d = 0;
1622 sp->rsc.m2 = 0;
1623 }
1624 if (cl->cl_fsc != NULL) {
1625 sp->fsc.m1 = sm2m(cl->cl_fsc->sm1);
1626 sp->fsc.d = dx2d(cl->cl_fsc->dx);
1627 sp->fsc.m2 = sm2m(cl->cl_fsc->sm2);
1628 } else {
1629 sp->fsc.m1 = 0;
1630 sp->fsc.d = 0;
1631 sp->fsc.m2 = 0;
1632 }
1633 if (cl->cl_usc != NULL) {
1634 sp->usc.m1 = sm2m(cl->cl_usc->sm1);
1635 sp->usc.d = dx2d(cl->cl_usc->dx);
1636 sp->usc.m2 = sm2m(cl->cl_usc->sm2);
1637 } else {
1638 sp->usc.m1 = 0;
1639 sp->usc.d = 0;
1640 sp->usc.m2 = 0;
1641 }
1642
1643 sp->total = cl->cl_total;
1644 sp->cumul = cl->cl_cumul;
1645
1646 sp->d = cl->cl_d;
1647 sp->e = cl->cl_e;
1648 sp->vt = cl->cl_vt;
1649 sp->f = cl->cl_f;
1650
1651 sp->initvt = cl->cl_initvt;
1652 sp->vtperiod = cl->cl_vtperiod;
1653 sp->parentperiod = cl->cl_parentperiod;
1654 sp->nactive = cl->cl_nactive;
1655 sp->vtoff = cl->cl_vtoff;
1656 sp->cvtmax = cl->cl_cvtmax;
1657 sp->myf = cl->cl_myf;
1658 sp->cfmin = cl->cl_cfmin;
1659 sp->cvtmin = cl->cl_cvtmin;
1660 sp->myfadj = cl->cl_myfadj;
1661 sp->vtadj = cl->cl_vtadj;
1662
1663 sp->cur_time = read_machclk();
1664 sp->machclk_freq = machclk_freq;
1665
1666 sp->qlength = qlen(cl->cl_q);
1667 sp->qlimit = qlimit(cl->cl_q);
1668 sp->xmit_cnt = cl->cl_stats.xmit_cnt;
1669 sp->drop_cnt = cl->cl_stats.drop_cnt;
1670 sp->period = cl->cl_stats.period;
1671
1672 sp->qtype = qtype(cl->cl_q);
1673 #ifdef ALTQ_RED
1674 if (q_is_red(cl->cl_q))
1675 red_getstats(cl->cl_red, &sp->red[0]);
1676 #endif
1677 #ifdef ALTQ_RIO
1678 if (q_is_rio(cl->cl_q))
1679 rio_getstats((rio_t *)cl->cl_red, &sp->red[0]);
1680 #endif
1681 }
1682
1683 /* convert a class handle to the corresponding class pointer */
1684 static struct hfsc_class *
1685 clh_to_clp(struct hfsc_if *hif, u_int32_t chandle)
1686 {
1687 int i;
1688 struct hfsc_class *cl;
1689
1690 if (chandle == 0)
1691 return (NULL);
1692 /*
1693 * first, try optimistically the slot matching the lower bits of
1694 * the handle. if it fails, do the linear table search.
1695 */
1696 i = chandle % HFSC_MAX_CLASSES;
1697 if ((cl = hif->hif_class_tbl[i]) != NULL && cl->cl_handle == chandle)
1698 return (cl);
1699 for (i = 0; i < HFSC_MAX_CLASSES; i++)
1700 if ((cl = hif->hif_class_tbl[i]) != NULL &&
1701 cl->cl_handle == chandle)
1702 return (cl);
1703 return (NULL);
1704 }
1705
1706 #ifdef ALTQ3_COMPAT
1707 static struct hfsc_if *
1708 hfsc_attach(struct ifaltq *ifq, u_int bandwidth)
1709 {
1710 struct hfsc_if *hif;
1711
1712 hif = malloc(sizeof(struct hfsc_if), M_DEVBUF, M_WAITOK|M_ZERO);
1713 if (hif == NULL)
1714 return (NULL);
1715
1716 hif->hif_eligible = ellist_alloc();
1717 if (hif->hif_eligible == NULL) {
1718 free(hif, M_DEVBUF);
1719 return NULL;
1720 }
1721
1722 hif->hif_ifq = ifq;
1723
1724 /* add this state to the hfsc list */
1725 hif->hif_next = hif_list;
1726 hif_list = hif;
1727
1728 return (hif);
1729 }
1730
1731 static int
1732 hfsc_detach(struct hfsc_if *hif)
1733 {
1734 (void)hfsc_clear_interface(hif);
1735 (void)hfsc_class_destroy(hif->hif_rootclass);
1736
1737 /* remove this interface from the hif list */
1738 if (hif_list == hif)
1739 hif_list = hif->hif_next;
1740 else {
1741 struct hfsc_if *h;
1742
1743 for (h = hif_list; h != NULL; h = h->hif_next)
1744 if (h->hif_next == hif) {
1745 h->hif_next = hif->hif_next;
1746 break;
1747 }
1748 ASSERT(h != NULL);
1749 }
1750
1751 ellist_destroy(hif->hif_eligible);
1752
1753 free(hif, M_DEVBUF);
1754
1755 return (0);
1756 }
1757
1758 static int
1759 hfsc_class_modify(struct hfsc_class *cl, struct service_curve *rsc,
1760 struct service_curve *fsc, struct service_curve *usc)
1761 {
1762 struct internal_sc *rsc_tmp, *fsc_tmp, *usc_tmp;
1763 u_int64_t cur_time;
1764 int s;
1765
1766 rsc_tmp = fsc_tmp = usc_tmp = NULL;
1767 if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0) &&
1768 cl->cl_rsc == NULL) {
1769 rsc_tmp = malloc(sizeof(struct internal_sc), M_DEVBUF,
1770 M_WAITOK);
1771 if (rsc_tmp == NULL)
1772 return (ENOMEM);
1773 }
1774 if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0) &&
1775 cl->cl_fsc == NULL) {
1776 fsc_tmp = malloc(sizeof(struct internal_sc), M_DEVBUF,
1777 M_WAITOK);
1778 if (fsc_tmp == NULL)
1779 return (ENOMEM);
1780 }
1781 if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0) &&
1782 cl->cl_usc == NULL) {
1783 usc_tmp = malloc(sizeof(struct internal_sc), M_DEVBUF,
1784 M_WAITOK);
1785 if (usc_tmp == NULL)
1786 return (ENOMEM);
1787 }
1788
1789 cur_time = read_machclk();
1790 s = splnet();
1791
1792 if (rsc != NULL) {
1793 if (rsc->m1 == 0 && rsc->m2 == 0) {
1794 if (cl->cl_rsc != NULL) {
1795 if (!qempty(cl->cl_q))
1796 hfsc_purgeq(cl);
1797 free(cl->cl_rsc, M_DEVBUF);
1798 cl->cl_rsc = NULL;
1799 }
1800 } else {
1801 if (cl->cl_rsc == NULL)
1802 cl->cl_rsc = rsc_tmp;
1803 sc2isc(rsc, cl->cl_rsc);
1804 rtsc_init(&cl->cl_deadline, cl->cl_rsc, cur_time,
1805 cl->cl_cumul);
1806 cl->cl_eligible = cl->cl_deadline;
1807 if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) {
1808 cl->cl_eligible.dx = 0;
1809 cl->cl_eligible.dy = 0;
1810 }
1811 }
1812 }
1813
1814 if (fsc != NULL) {
1815 if (fsc->m1 == 0 && fsc->m2 == 0) {
1816 if (cl->cl_fsc != NULL) {
1817 if (!qempty(cl->cl_q))
1818 hfsc_purgeq(cl);
1819 free(cl->cl_fsc, M_DEVBUF);
1820 cl->cl_fsc = NULL;
1821 }
1822 } else {
1823 if (cl->cl_fsc == NULL)
1824 cl->cl_fsc = fsc_tmp;
1825 sc2isc(fsc, cl->cl_fsc);
1826 rtsc_init(&cl->cl_virtual, cl->cl_fsc, cl->cl_vt,
1827 cl->cl_total);
1828 }
1829 }
1830
1831 if (usc != NULL) {
1832 if (usc->m1 == 0 && usc->m2 == 0) {
1833 if (cl->cl_usc != NULL) {
1834 free(cl->cl_usc, M_DEVBUF);
1835 cl->cl_usc = NULL;
1836 cl->cl_myf = 0;
1837 }
1838 } else {
1839 if (cl->cl_usc == NULL)
1840 cl->cl_usc = usc_tmp;
1841 sc2isc(usc, cl->cl_usc);
1842 rtsc_init(&cl->cl_ulimit, cl->cl_usc, cur_time,
1843 cl->cl_total);
1844 }
1845 }
1846
1847 if (!qempty(cl->cl_q)) {
1848 if (cl->cl_rsc != NULL)
1849 update_ed(cl, m_pktlen(qhead(cl->cl_q)));
1850 if (cl->cl_fsc != NULL)
1851 update_vf(cl, 0, cur_time);
1852 /* is this enough? */
1853 }
1854
1855 splx(s);
1856
1857 return (0);
1858 }
1859
1860 /*
1861 * hfsc device interface
1862 */
1863 int
1864 hfscopen(dev_t dev, int flag, int fmt,
1865 struct lwp *l)
1866 {
1867 if (machclk_freq == 0)
1868 init_machclk();
1869
1870 if (machclk_freq == 0) {
1871 printf("hfsc: no CPU clock available!\n");
1872 return (ENXIO);
1873 }
1874
1875 /* everything will be done when the queueing scheme is attached. */
1876 return 0;
1877 }
1878
1879 int
1880 hfscclose(dev_t dev, int flag, int fmt,
1881 struct lwp *l)
1882 {
1883 struct hfsc_if *hif;
1884 int err, error = 0;
1885
1886 while ((hif = hif_list) != NULL) {
1887 /* destroy all */
1888 if (ALTQ_IS_ENABLED(hif->hif_ifq))
1889 altq_disable(hif->hif_ifq);
1890
1891 err = altq_detach(hif->hif_ifq);
1892 if (err == 0)
1893 err = hfsc_detach(hif);
1894 if (err != 0 && error == 0)
1895 error = err;
1896 }
1897
1898 return error;
1899 }
1900
1901 int
1902 hfscioctl(dev_t dev, ioctlcmd_t cmd, void *addr, int flag,
1903 struct lwp *l)
1904 {
1905 struct hfsc_if *hif;
1906 struct hfsc_interface *ifacep;
1907 int error = 0;
1908
1909 /* check super-user privilege */
1910 switch (cmd) {
1911 case HFSC_GETSTATS:
1912 break;
1913 default:
1914 #if (__FreeBSD_version > 400000)
1915 if ((error = suser(p)) != 0)
1916 return (error);
1917 #else
1918 if ((error = kauth_authorize_network(l->l_cred,
1919 KAUTH_NETWORK_ALTQ, KAUTH_REQ_NETWORK_ALTQ_HFSC, NULL,
1920 NULL, NULL)) != 0)
1921 return (error);
1922 #endif
1923 break;
1924 }
1925
1926 switch (cmd) {
1927
1928 case HFSC_IF_ATTACH:
1929 error = hfsccmd_if_attach((struct hfsc_attach *)addr);
1930 break;
1931
1932 case HFSC_IF_DETACH:
1933 error = hfsccmd_if_detach((struct hfsc_interface *)addr);
1934 break;
1935
1936 case HFSC_ENABLE:
1937 case HFSC_DISABLE:
1938 case HFSC_CLEAR_HIERARCHY:
1939 ifacep = (struct hfsc_interface *)addr;
1940 if ((hif = altq_lookup(ifacep->hfsc_ifname,
1941 ALTQT_HFSC)) == NULL) {
1942 error = EBADF;
1943 break;
1944 }
1945
1946 switch (cmd) {
1947
1948 case HFSC_ENABLE:
1949 if (hif->hif_defaultclass == NULL) {
1950 #ifdef ALTQ_DEBUG
1951 printf("hfsc: no default class\n");
1952 #endif
1953 error = EINVAL;
1954 break;
1955 }
1956 error = altq_enable(hif->hif_ifq);
1957 break;
1958
1959 case HFSC_DISABLE:
1960 error = altq_disable(hif->hif_ifq);
1961 break;
1962
1963 case HFSC_CLEAR_HIERARCHY:
1964 hfsc_clear_interface(hif);
1965 break;
1966 }
1967 break;
1968
1969 case HFSC_ADD_CLASS:
1970 error = hfsccmd_add_class((struct hfsc_add_class *)addr);
1971 break;
1972
1973 case HFSC_DEL_CLASS:
1974 error = hfsccmd_delete_class((struct hfsc_delete_class *)addr);
1975 break;
1976
1977 case HFSC_MOD_CLASS:
1978 error = hfsccmd_modify_class((struct hfsc_modify_class *)addr);
1979 break;
1980
1981 case HFSC_ADD_FILTER:
1982 error = hfsccmd_add_filter((struct hfsc_add_filter *)addr);
1983 break;
1984
1985 case HFSC_DEL_FILTER:
1986 error = hfsccmd_delete_filter((struct hfsc_delete_filter *)addr);
1987 break;
1988
1989 case HFSC_GETSTATS:
1990 error = hfsccmd_class_stats((struct hfsc_class_stats *)addr);
1991 break;
1992
1993 default:
1994 error = EINVAL;
1995 break;
1996 }
1997 return error;
1998 }
1999
2000 static int
2001 hfsccmd_if_attach(struct hfsc_attach *ap)
2002 {
2003 struct hfsc_if *hif;
2004 struct ifnet *ifp;
2005 int error;
2006
2007 if ((ifp = ifunit(ap->iface.hfsc_ifname)) == NULL)
2008 return (ENXIO);
2009
2010 if ((hif = hfsc_attach(&ifp->if_snd, ap->bandwidth)) == NULL)
2011 return (ENOMEM);
2012
2013 /*
2014 * set HFSC to this ifnet structure.
2015 */
2016 if ((error = altq_attach(&ifp->if_snd, ALTQT_HFSC, hif,
2017 hfsc_enqueue, hfsc_dequeue, hfsc_request,
2018 &hif->hif_classifier, acc_classify)) != 0)
2019 (void)hfsc_detach(hif);
2020
2021 return (error);
2022 }
2023
2024 static int
2025 hfsccmd_if_detach(struct hfsc_interface *ap)
2026 {
2027 struct hfsc_if *hif;
2028 int error;
2029
2030 if ((hif = altq_lookup(ap->hfsc_ifname, ALTQT_HFSC)) == NULL)
2031 return (EBADF);
2032
2033 if (ALTQ_IS_ENABLED(hif->hif_ifq))
2034 altq_disable(hif->hif_ifq);
2035
2036 if ((error = altq_detach(hif->hif_ifq)))
2037 return (error);
2038
2039 return hfsc_detach(hif);
2040 }
2041
2042 static int
2043 hfsccmd_add_class(struct hfsc_add_class *ap)
2044 {
2045 struct hfsc_if *hif;
2046 struct hfsc_class *cl, *parent;
2047 int i;
2048
2049 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2050 return (EBADF);
2051
2052 if (ap->parent_handle == HFSC_NULLCLASS_HANDLE &&
2053 hif->hif_rootclass == NULL)
2054 parent = NULL;
2055 else if ((parent = clh_to_clp(hif, ap->parent_handle)) == NULL)
2056 return (EINVAL);
2057
2058 /* assign a class handle (use a free slot number for now) */
2059 for (i = 1; i < HFSC_MAX_CLASSES; i++)
2060 if (hif->hif_class_tbl[i] == NULL)
2061 break;
2062 if (i == HFSC_MAX_CLASSES)
2063 return (EBUSY);
2064
2065 if ((cl = hfsc_class_create(hif, &ap->service_curve, NULL, NULL,
2066 parent, ap->qlimit, ap->flags, i)) == NULL)
2067 return (ENOMEM);
2068
2069 /* return a class handle to the user */
2070 ap->class_handle = i;
2071
2072 return (0);
2073 }
2074
2075 static int
2076 hfsccmd_delete_class(struct hfsc_delete_class *ap)
2077 {
2078 struct hfsc_if *hif;
2079 struct hfsc_class *cl;
2080
2081 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2082 return (EBADF);
2083
2084 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
2085 return (EINVAL);
2086
2087 return hfsc_class_destroy(cl);
2088 }
2089
2090 static int
2091 hfsccmd_modify_class(struct hfsc_modify_class *ap)
2092 {
2093 struct hfsc_if *hif;
2094 struct hfsc_class *cl;
2095 struct service_curve *rsc = NULL;
2096 struct service_curve *fsc = NULL;
2097 struct service_curve *usc = NULL;
2098
2099 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2100 return (EBADF);
2101
2102 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
2103 return (EINVAL);
2104
2105 if (ap->sctype & HFSC_REALTIMESC)
2106 rsc = &ap->service_curve;
2107 if (ap->sctype & HFSC_LINKSHARINGSC)
2108 fsc = &ap->service_curve;
2109 if (ap->sctype & HFSC_UPPERLIMITSC)
2110 usc = &ap->service_curve;
2111
2112 return hfsc_class_modify(cl, rsc, fsc, usc);
2113 }
2114
2115 static int
2116 hfsccmd_add_filter(struct hfsc_add_filter *ap)
2117 {
2118 struct hfsc_if *hif;
2119 struct hfsc_class *cl;
2120
2121 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2122 return (EBADF);
2123
2124 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
2125 return (EINVAL);
2126
2127 if (is_a_parent_class(cl)) {
2128 #ifdef ALTQ_DEBUG
2129 printf("hfsccmd_add_filter: not a leaf class!\n");
2130 #endif
2131 return (EINVAL);
2132 }
2133
2134 return acc_add_filter(&hif->hif_classifier, &ap->filter,
2135 cl, &ap->filter_handle);
2136 }
2137
2138 static int
2139 hfsccmd_delete_filter(struct hfsc_delete_filter *ap)
2140 {
2141 struct hfsc_if *hif;
2142
2143 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2144 return (EBADF);
2145
2146 return acc_delete_filter(&hif->hif_classifier,
2147 ap->filter_handle);
2148 }
2149
2150 static int
2151 hfsccmd_class_stats(struct hfsc_class_stats *ap)
2152 {
2153 struct hfsc_if *hif;
2154 struct hfsc_class *cl;
2155 struct hfsc_classstats stats, *usp;
2156 int n, nclasses, error;
2157
2158 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2159 return (EBADF);
2160
2161 ap->cur_time = read_machclk();
2162 ap->machclk_freq = machclk_freq;
2163 ap->hif_classes = hif->hif_classes;
2164 ap->hif_packets = hif->hif_packets;
2165
2166 /* skip the first N classes in the tree */
2167 nclasses = ap->nskip;
2168 for (cl = hif->hif_rootclass, n = 0; cl != NULL && n < nclasses;
2169 cl = hfsc_nextclass(cl), n++)
2170 ;
2171 if (n != nclasses)
2172 return (EINVAL);
2173
2174 /* then, read the next N classes in the tree */
2175 nclasses = ap->nclasses;
2176 usp = ap->stats;
2177 for (n = 0; cl != NULL && n < nclasses; cl = hfsc_nextclass(cl), n++) {
2178
2179 get_class_stats(&stats, cl);
2180
2181 if ((error = copyout((void *)&stats, (void *)usp++,
2182 sizeof(stats))) != 0)
2183 return (error);
2184 }
2185
2186 ap->nclasses = n;
2187
2188 return (0);
2189 }
2190
2191 #ifdef KLD_MODULE
2192
2193 static struct altqsw hfsc_sw =
2194 {"hfsc", hfscopen, hfscclose, hfscioctl};
2195
2196 ALTQ_MODULE(altq_hfsc, ALTQT_HFSC, &hfsc_sw);
2197 MODULE_DEPEND(altq_hfsc, altq_red, 1, 1, 1);
2198 MODULE_DEPEND(altq_hfsc, altq_rio, 1, 1, 1);
2199
2200 #endif /* KLD_MODULE */
2201 #endif /* ALTQ3_COMPAT */
2202
2203 #endif /* ALTQ_HFSC */
2204