altq_hfsc.c revision 1.18 1 /* $NetBSD: altq_hfsc.c,v 1.18 2006/10/12 01:30:42 christos Exp $ */
2 /* $KAME: altq_hfsc.c,v 1.9 2001/10/26 04:56:11 kjc Exp $ */
3
4 /*
5 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
6 *
7 * Permission to use, copy, modify, and distribute this software and
8 * its documentation is hereby granted (including for commercial or
9 * for-profit use), provided that both the copyright notice and this
10 * permission notice appear in all copies of the software, derivative
11 * works, or modified versions, and any portions thereof, and that
12 * both notices appear in supporting documentation, and that credit
13 * is given to Carnegie Mellon University in all publications reporting
14 * on direct or indirect use of this code or its derivatives.
15 *
16 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
17 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS
18 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
24 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
25 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
26 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
28 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
29 * DAMAGE.
30 *
31 * Carnegie Mellon encourages (but does not require) users of this
32 * software to return any improvements or extensions that they make,
33 * and to grant Carnegie Mellon the rights to redistribute these
34 * changes without encumbrance.
35 */
36 /*
37 * H-FSC is described in Proceedings of SIGCOMM'97,
38 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
39 * Real-Time and Priority Service"
40 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: altq_hfsc.c,v 1.18 2006/10/12 01:30:42 christos Exp $");
45
46 #if defined(__FreeBSD__) || defined(__NetBSD__)
47 #include "opt_altq.h"
48 #if (__FreeBSD__ != 2)
49 #include "opt_inet.h"
50 #ifdef __FreeBSD__
51 #include "opt_inet6.h"
52 #endif
53 #endif
54 #endif /* __FreeBSD__ || __NetBSD__ */
55
56 #ifdef ALTQ_HFSC /* hfsc is enabled by ALTQ_HFSC option in opt_altq.h */
57
58 #include <sys/param.h>
59 #include <sys/malloc.h>
60 #include <sys/mbuf.h>
61 #include <sys/socket.h>
62 #include <sys/sockio.h>
63 #include <sys/systm.h>
64 #include <sys/proc.h>
65 #include <sys/errno.h>
66 #include <sys/kernel.h>
67 #include <sys/queue.h>
68 #include <sys/kauth.h>
69
70 #include <net/if.h>
71 #include <net/if_types.h>
72
73 #include <altq/altq.h>
74 #include <altq/altq_conf.h>
75 #include <altq/altq_hfsc.h>
76
77 /*
78 * function prototypes
79 */
80 static struct hfsc_if *hfsc_attach __P((struct ifaltq *, u_int));
81 static int hfsc_detach __P((struct hfsc_if *));
82 static int hfsc_clear_interface __P((struct hfsc_if *));
83 static int hfsc_request __P((struct ifaltq *, int, void *));
84 static void hfsc_purge __P((struct hfsc_if *));
85 static struct hfsc_class *hfsc_class_create __P((struct hfsc_if *,
86 struct service_curve *, struct hfsc_class *, int, int));
87 static int hfsc_class_destroy __P((struct hfsc_class *));
88 static int hfsc_class_modify __P((struct hfsc_class *,
89 struct service_curve *, struct service_curve *));
90 static struct hfsc_class *hfsc_nextclass __P((struct hfsc_class *));
91
92 static int hfsc_enqueue __P((struct ifaltq *, struct mbuf *,
93 struct altq_pktattr *));
94 static struct mbuf *hfsc_dequeue __P((struct ifaltq *, int));
95
96 static int hfsc_addq __P((struct hfsc_class *, struct mbuf *));
97 static struct mbuf *hfsc_getq __P((struct hfsc_class *));
98 static struct mbuf *hfsc_pollq __P((struct hfsc_class *));
99 static void hfsc_purgeq __P((struct hfsc_class *));
100
101 static void set_active __P((struct hfsc_class *, int));
102 static void set_passive __P((struct hfsc_class *));
103
104 static void init_ed __P((struct hfsc_class *, int));
105 static void update_ed __P((struct hfsc_class *, int));
106 static void update_d __P((struct hfsc_class *, int));
107 static void init_v __P((struct hfsc_class *, int));
108 static void update_v __P((struct hfsc_class *, int));
109 static ellist_t *ellist_alloc __P((void));
110 static void ellist_destroy __P((ellist_t *));
111 static void ellist_insert __P((struct hfsc_class *));
112 static void ellist_remove __P((struct hfsc_class *));
113 static void ellist_update __P((struct hfsc_class *));
114 struct hfsc_class *ellist_get_mindl __P((ellist_t *));
115 static actlist_t *actlist_alloc __P((void));
116 static void actlist_destroy __P((actlist_t *));
117 static void actlist_insert __P((struct hfsc_class *));
118 static void actlist_remove __P((struct hfsc_class *));
119 static void actlist_update __P((struct hfsc_class *));
120
121 static inline u_int64_t seg_x2y __P((u_int64_t, u_int64_t));
122 static inline u_int64_t seg_y2x __P((u_int64_t, u_int64_t));
123 static inline u_int64_t m2sm __P((u_int));
124 static inline u_int64_t m2ism __P((u_int));
125 static inline u_int64_t d2dx __P((u_int));
126 static u_int sm2m __P((u_int64_t));
127 static u_int dx2d __P((u_int64_t));
128
129 static void sc2isc __P((struct service_curve *, struct internal_sc *));
130 static void rtsc_init __P((struct runtime_sc *, struct internal_sc *,
131 u_int64_t, u_int64_t));
132 static u_int64_t rtsc_y2x __P((struct runtime_sc *, u_int64_t));
133 static u_int64_t rtsc_x2y __P((struct runtime_sc *, u_int64_t));
134 static void rtsc_min __P((struct runtime_sc *, struct internal_sc *,
135 u_int64_t, u_int64_t));
136
137 int hfscopen __P((dev_t, int, int, struct lwp *));
138 int hfscclose __P((dev_t, int, int, struct lwp *));
139 int hfscioctl __P((dev_t, ioctlcmd_t, caddr_t, int, struct lwp *));
140 static int hfsccmd_if_attach __P((struct hfsc_attach *));
141 static int hfsccmd_if_detach __P((struct hfsc_interface *));
142 static int hfsccmd_add_class __P((struct hfsc_add_class *));
143 static int hfsccmd_delete_class __P((struct hfsc_delete_class *));
144 static int hfsccmd_modify_class __P((struct hfsc_modify_class *));
145 static int hfsccmd_add_filter __P((struct hfsc_add_filter *));
146 static int hfsccmd_delete_filter __P((struct hfsc_delete_filter *));
147 static int hfsccmd_class_stats __P((struct hfsc_class_stats *));
148 static void get_class_stats __P((struct hfsc_basic_class_stats *,
149 struct hfsc_class *));
150 static struct hfsc_class *clh_to_clp __P((struct hfsc_if *, u_long));
151 static u_long clp_to_clh __P((struct hfsc_class *));
152
153 /*
154 * macros
155 */
156 #define is_a_parent_class(cl) ((cl)->cl_children != NULL)
157
158 /* hif_list keeps all hfsc_if's allocated. */
159 static struct hfsc_if *hif_list = NULL;
160
161 static struct hfsc_if *
162 hfsc_attach(ifq, bandwidth)
163 struct ifaltq *ifq;
164 u_int bandwidth;
165 {
166 struct hfsc_if *hif;
167 struct service_curve root_sc;
168
169 hif = malloc(sizeof(struct hfsc_if), M_DEVBUF, M_WAITOK|M_ZERO);
170 if (hif == NULL)
171 return (NULL);
172
173 hif->hif_eligible = ellist_alloc();
174 if (hif->hif_eligible == NULL) {
175 free(hif, M_DEVBUF);
176 return NULL;
177 }
178
179 hif->hif_ifq = ifq;
180
181 /*
182 * create root class
183 */
184 root_sc.m1 = bandwidth;
185 root_sc.d = 0;
186 root_sc.m2 = bandwidth;
187 if ((hif->hif_rootclass =
188 hfsc_class_create(hif, &root_sc, NULL, 0, 0)) == NULL) {
189 free(hif, M_DEVBUF);
190 return (NULL);
191 }
192
193 /* add this state to the hfsc list */
194 hif->hif_next = hif_list;
195 hif_list = hif;
196
197 return (hif);
198 }
199
200 static int
201 hfsc_detach(hif)
202 struct hfsc_if *hif;
203 {
204 (void)hfsc_clear_interface(hif);
205 (void)hfsc_class_destroy(hif->hif_rootclass);
206
207 /* remove this interface from the hif list */
208 if (hif_list == hif)
209 hif_list = hif->hif_next;
210 else {
211 struct hfsc_if *h;
212
213 for (h = hif_list; h != NULL; h = h->hif_next)
214 if (h->hif_next == hif) {
215 h->hif_next = hif->hif_next;
216 break;
217 }
218 ASSERT(h != NULL);
219 }
220
221 ellist_destroy(hif->hif_eligible);
222
223 free(hif, M_DEVBUF);
224
225 return (0);
226 }
227
228 /*
229 * bring the interface back to the initial state by discarding
230 * all the filters and classes except the root class.
231 */
232 static int
233 hfsc_clear_interface(hif)
234 struct hfsc_if *hif;
235 {
236 struct hfsc_class *cl;
237
238 /* free the filters for this interface */
239 acc_discard_filters(&hif->hif_classifier, NULL, 1);
240
241 /* clear out the classes */
242 while ((cl = hif->hif_rootclass->cl_children) != NULL) {
243 /*
244 * remove the first leaf class found in the hierarchy
245 * then start over
246 */
247 for (; cl != NULL; cl = hfsc_nextclass(cl)) {
248 if (!is_a_parent_class(cl)) {
249 (void)hfsc_class_destroy(cl);
250 break;
251 }
252 }
253 }
254
255 return (0);
256 }
257
258 static int
259 hfsc_request(struct ifaltq *ifq, int req, void *arg __unused)
260 {
261 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
262
263 switch (req) {
264 case ALTRQ_PURGE:
265 hfsc_purge(hif);
266 break;
267 }
268 return (0);
269 }
270
271 /* discard all the queued packets on the interface */
272 static void
273 hfsc_purge(hif)
274 struct hfsc_if *hif;
275 {
276 struct hfsc_class *cl;
277
278 for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl))
279 if (!qempty(cl->cl_q))
280 hfsc_purgeq(cl);
281 if (ALTQ_IS_ENABLED(hif->hif_ifq))
282 hif->hif_ifq->ifq_len = 0;
283 }
284
285 struct hfsc_class *
286 hfsc_class_create(hif, sc, parent, qlimit, flags)
287 struct hfsc_if *hif;
288 struct service_curve *sc;
289 struct hfsc_class *parent;
290 int qlimit, flags;
291 {
292 struct hfsc_class *cl, *p;
293 int s;
294
295 #ifndef ALTQ_RED
296 if (flags & HFCF_RED) {
297 printf("hfsc_class_create: RED not configured for HFSC!\n");
298 return (NULL);
299 }
300 #endif
301
302 cl = malloc(sizeof(struct hfsc_class), M_DEVBUF, M_WAITOK|M_ZERO);
303 if (cl == NULL)
304 return (NULL);
305
306 cl->cl_q = malloc(sizeof(class_queue_t), M_DEVBUF, M_WAITOK|M_ZERO);
307 if (cl->cl_q == NULL)
308 goto err_ret;
309
310 cl->cl_actc = actlist_alloc();
311 if (cl->cl_actc == NULL)
312 goto err_ret;
313
314 if (qlimit == 0)
315 qlimit = 50; /* use default */
316 qlimit(cl->cl_q) = qlimit;
317 qtype(cl->cl_q) = Q_DROPTAIL;
318 qlen(cl->cl_q) = 0;
319 cl->cl_flags = flags;
320 #ifdef ALTQ_RED
321 if (flags & (HFCF_RED|HFCF_RIO)) {
322 int red_flags, red_pkttime;
323
324 red_flags = 0;
325 if (flags & HFCF_ECN)
326 red_flags |= REDF_ECN;
327 #ifdef ALTQ_RIO
328 if (flags & HFCF_CLEARDSCP)
329 red_flags |= RIOF_CLEARDSCP;
330 #endif
331 if (sc->m2 < 8)
332 red_pkttime = 1000 * 1000 * 1000; /* 1 sec */
333 else
334 red_pkttime = (int64_t)hif->hif_ifq->altq_ifp->if_mtu
335 * 1000 * 1000 * 1000 / (sc->m2 / 8);
336 if (flags & HFCF_RED) {
337 cl->cl_red = red_alloc(0, 0, 0, 0,
338 red_flags, red_pkttime);
339 if (cl->cl_red != NULL)
340 qtype(cl->cl_q) = Q_RED;
341 }
342 #ifdef ALTQ_RIO
343 else {
344 cl->cl_red = (red_t *)rio_alloc(0, NULL,
345 red_flags, red_pkttime);
346 if (cl->cl_red != NULL)
347 qtype(cl->cl_q) = Q_RIO;
348 }
349 #endif
350 }
351 #endif /* ALTQ_RED */
352
353 if (sc != NULL && (sc->m1 != 0 || sc->m2 != 0)) {
354 cl->cl_rsc = malloc(sizeof(struct internal_sc), M_DEVBUF,
355 M_WAITOK|M_ZERO);
356 if (cl->cl_rsc == NULL)
357 goto err_ret;
358 sc2isc(sc, cl->cl_rsc);
359 rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0);
360 rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0);
361
362 cl->cl_fsc = malloc(sizeof(struct internal_sc), M_DEVBUF,
363 M_WAITOK|M_ZERO);
364 if (cl->cl_fsc == NULL)
365 goto err_ret;
366 sc2isc(sc, cl->cl_fsc);
367 rtsc_init(&cl->cl_virtual, cl->cl_fsc, 0, 0);
368 }
369
370 cl->cl_id = hif->hif_classid++;
371 cl->cl_handle = (u_long)cl; /* XXX: just a pointer to this class */
372 cl->cl_hif = hif;
373 cl->cl_parent = parent;
374
375 s = splnet();
376 hif->hif_classes++;
377 if (flags & HFCF_DEFAULTCLASS)
378 hif->hif_defaultclass = cl;
379
380 /* add this class to the children list of the parent */
381 if (parent == NULL) {
382 /* this is root class */
383 }
384 else if ((p = parent->cl_children) == NULL)
385 parent->cl_children = cl;
386 else {
387 while (p->cl_siblings != NULL)
388 p = p->cl_siblings;
389 p->cl_siblings = cl;
390 }
391 splx(s);
392
393 return (cl);
394
395 err_ret:
396 if (cl->cl_actc != NULL)
397 actlist_destroy(cl->cl_actc);
398 if (cl->cl_red != NULL) {
399 #ifdef ALTQ_RIO
400 if (q_is_rio(cl->cl_q))
401 rio_destroy((rio_t *)cl->cl_red);
402 #endif
403 #ifdef ALTQ_RED
404 if (q_is_red(cl->cl_q))
405 red_destroy(cl->cl_red);
406 #endif
407 }
408 if (cl->cl_fsc != NULL)
409 free(cl->cl_fsc, M_DEVBUF);
410 if (cl->cl_rsc != NULL)
411 free(cl->cl_rsc, M_DEVBUF);
412 if (cl->cl_q != NULL)
413 free(cl->cl_q, M_DEVBUF);
414 free(cl, M_DEVBUF);
415 return (NULL);
416 }
417
418 static int
419 hfsc_class_destroy(cl)
420 struct hfsc_class *cl;
421 {
422 int s;
423
424 if (is_a_parent_class(cl))
425 return (EBUSY);
426
427 s = splnet();
428
429 /* delete filters referencing to this class */
430 acc_discard_filters(&cl->cl_hif->hif_classifier, cl, 0);
431
432 if (!qempty(cl->cl_q))
433 hfsc_purgeq(cl);
434
435 if (cl->cl_parent == NULL) {
436 /* this is root class */
437 } else {
438 struct hfsc_class *p = cl->cl_parent->cl_children;
439
440 if (p == cl)
441 cl->cl_parent->cl_children = cl->cl_siblings;
442 else do {
443 if (p->cl_siblings == cl) {
444 p->cl_siblings = cl->cl_siblings;
445 break;
446 }
447 } while ((p = p->cl_siblings) != NULL);
448 ASSERT(p != NULL);
449 }
450 cl->cl_hif->hif_classes--;
451 splx(s);
452
453 actlist_destroy(cl->cl_actc);
454
455 if (cl->cl_red != NULL) {
456 #ifdef ALTQ_RIO
457 if (q_is_rio(cl->cl_q))
458 rio_destroy((rio_t *)cl->cl_red);
459 #endif
460 #ifdef ALTQ_RED
461 if (q_is_red(cl->cl_q))
462 red_destroy(cl->cl_red);
463 #endif
464 }
465 if (cl->cl_fsc != NULL)
466 free(cl->cl_fsc, M_DEVBUF);
467 if (cl->cl_rsc != NULL)
468 free(cl->cl_rsc, M_DEVBUF);
469 free(cl->cl_q, M_DEVBUF);
470 free(cl, M_DEVBUF);
471
472 return (0);
473 }
474
475 static int
476 hfsc_class_modify(cl, rsc, fsc)
477 struct hfsc_class *cl;
478 struct service_curve *rsc, *fsc;
479 {
480 struct internal_sc *rsc_tmp, *fsc_tmp;
481 int s;
482
483 if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0) &&
484 cl->cl_rsc == NULL) {
485 rsc_tmp = malloc(sizeof(struct internal_sc), M_DEVBUF,
486 M_WAITOK|M_ZERO);
487 if (rsc_tmp == NULL)
488 return (ENOMEM);
489 } else
490 rsc_tmp = NULL;
491 if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0) &&
492 cl->cl_fsc == NULL) {
493 fsc_tmp = malloc(sizeof(struct internal_sc), M_DEVBUF,
494 M_WAITOK|M_ZERO);
495 if (fsc_tmp == NULL)
496 return (ENOMEM);
497 } else
498 fsc_tmp = NULL;
499
500 s = splnet();
501 if (!qempty(cl->cl_q))
502 hfsc_purgeq(cl);
503
504 if (rsc != NULL) {
505 if (rsc->m1 == 0 && rsc->m2 == 0) {
506 if (cl->cl_rsc != NULL) {
507 free(cl->cl_rsc, M_DEVBUF);
508 cl->cl_rsc = NULL;
509 }
510 } else {
511 if (cl->cl_rsc == NULL)
512 cl->cl_rsc = rsc_tmp;
513 sc2isc(rsc, cl->cl_rsc);
514 rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0);
515 rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0);
516 }
517 }
518
519 if (fsc != NULL) {
520 if (fsc->m1 == 0 && fsc->m2 == 0) {
521 if (cl->cl_fsc != NULL) {
522 free(cl->cl_fsc, M_DEVBUF);
523 cl->cl_fsc = NULL;
524 }
525 } else {
526 if (cl->cl_fsc == NULL)
527 cl->cl_fsc = fsc_tmp;
528 sc2isc(fsc, cl->cl_fsc);
529 rtsc_init(&cl->cl_virtual, cl->cl_fsc, 0, 0);
530 }
531 }
532 splx(s);
533
534 return (0);
535 }
536
537 /*
538 * hfsc_nextclass returns the next class in the tree.
539 * usage:
540 * for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl))
541 * do_something;
542 */
543 static struct hfsc_class *
544 hfsc_nextclass(cl)
545 struct hfsc_class *cl;
546 {
547 if (cl->cl_children != NULL)
548 cl = cl->cl_children;
549 else if (cl->cl_siblings != NULL)
550 cl = cl->cl_siblings;
551 else {
552 while ((cl = cl->cl_parent) != NULL)
553 if (cl->cl_siblings) {
554 cl = cl->cl_siblings;
555 break;
556 }
557 }
558
559 return (cl);
560 }
561
562 /*
563 * hfsc_enqueue is an enqueue function to be registered to
564 * (*altq_enqueue) in struct ifaltq.
565 */
566 static int
567 hfsc_enqueue(ifq, m, pktattr)
568 struct ifaltq *ifq;
569 struct mbuf *m;
570 struct altq_pktattr *pktattr;
571 {
572 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
573 struct hfsc_class *cl;
574 int len;
575
576 /* grab class set by classifier */
577 if (pktattr == NULL || (cl = pktattr->pattr_class) == NULL)
578 cl = hif->hif_defaultclass;
579 cl->cl_pktattr = pktattr; /* save proto hdr used by ECN */
580
581 len = m_pktlen(m);
582 if (hfsc_addq(cl, m) != 0) {
583 /* drop occurred. mbuf was freed in hfsc_addq. */
584 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, len);
585 return (ENOBUFS);
586 }
587 IFQ_INC_LEN(ifq);
588 cl->cl_hif->hif_packets++;
589
590 /* successfully queued. */
591 if (qlen(cl->cl_q) == 1)
592 set_active(cl, m_pktlen(m));
593
594 #ifdef HFSC_PKTLOG
595 /* put the logging_hook here */
596 #endif
597 return (0);
598 }
599
600 /*
601 * hfsc_dequeue is a dequeue function to be registered to
602 * (*altq_dequeue) in struct ifaltq.
603 *
604 * note: ALTDQ_POLL returns the next packet without removing the packet
605 * from the queue. ALTDQ_REMOVE is a normal dequeue operation.
606 * ALTDQ_REMOVE must return the same packet if called immediately
607 * after ALTDQ_POLL.
608 */
609 static struct mbuf *
610 hfsc_dequeue(ifq, op)
611 struct ifaltq *ifq;
612 int op;
613 {
614 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
615 struct hfsc_class *cl;
616 struct mbuf *m;
617 int len, next_len;
618 int realtime = 0;
619
620 if (hif->hif_packets == 0)
621 /* no packet in the tree */
622 return (NULL);
623
624 if (op == ALTDQ_REMOVE && hif->hif_pollcache != NULL) {
625 u_int64_t cur_time;
626
627 cl = hif->hif_pollcache;
628 hif->hif_pollcache = NULL;
629 /* check if the class was scheduled by real-time criteria */
630 if (cl->cl_rsc != NULL) {
631 cur_time = read_machclk();
632 realtime = (cl->cl_e <= cur_time);
633 }
634 } else {
635 /*
636 * if there are eligible classes, use real-time criteria.
637 * find the class with the minimum deadline among
638 * the eligible classes.
639 */
640 if ((cl = ellist_get_mindl(hif->hif_eligible)) != NULL) {
641 realtime = 1;
642 } else {
643 /*
644 * use link-sharing criteria
645 * get the class with the minimum vt in the hierarchy
646 */
647 cl = hif->hif_rootclass;
648 while (is_a_parent_class(cl)) {
649 cl = actlist_first(cl->cl_actc);
650 if (cl == NULL)
651 return (NULL);
652 }
653 }
654
655 if (op == ALTDQ_POLL) {
656 hif->hif_pollcache = cl;
657 m = hfsc_pollq(cl);
658 return (m);
659 }
660 }
661
662 m = hfsc_getq(cl);
663 len = m_pktlen(m);
664 cl->cl_hif->hif_packets--;
665 IFQ_DEC_LEN(ifq);
666 PKTCNTR_ADD(&cl->cl_stats.xmit_cnt, len);
667
668 update_v(cl, len);
669 if (realtime)
670 cl->cl_cumul += len;
671
672 if (!qempty(cl->cl_q)) {
673 if (cl->cl_rsc != NULL) {
674 /* update ed */
675 next_len = m_pktlen(qhead(cl->cl_q));
676
677 if (realtime)
678 update_ed(cl, next_len);
679 else
680 update_d(cl, next_len);
681 }
682 } else {
683 /* the class becomes passive */
684 set_passive(cl);
685 }
686
687 #ifdef HFSC_PKTLOG
688 /* put the logging_hook here */
689 #endif
690
691 return (m);
692 }
693
694 static int
695 hfsc_addq(cl, m)
696 struct hfsc_class *cl;
697 struct mbuf *m;
698 {
699
700 #ifdef ALTQ_RIO
701 if (q_is_rio(cl->cl_q))
702 return rio_addq((rio_t *)cl->cl_red, cl->cl_q,
703 m, cl->cl_pktattr);
704 #endif
705 #ifdef ALTQ_RED
706 if (q_is_red(cl->cl_q))
707 return red_addq(cl->cl_red, cl->cl_q, m, cl->cl_pktattr);
708 #endif
709 if (qlen(cl->cl_q) >= qlimit(cl->cl_q)) {
710 m_freem(m);
711 return (-1);
712 }
713
714 if (cl->cl_flags & HFCF_CLEARDSCP)
715 write_dsfield(m, cl->cl_pktattr, 0);
716
717 _addq(cl->cl_q, m);
718
719 return (0);
720 }
721
722 static struct mbuf *
723 hfsc_getq(cl)
724 struct hfsc_class *cl;
725 {
726 #ifdef ALTQ_RIO
727 if (q_is_rio(cl->cl_q))
728 return rio_getq((rio_t *)cl->cl_red, cl->cl_q);
729 #endif
730 #ifdef ALTQ_RED
731 if (q_is_red(cl->cl_q))
732 return red_getq(cl->cl_red, cl->cl_q);
733 #endif
734 return _getq(cl->cl_q);
735 }
736
737 static struct mbuf *
738 hfsc_pollq(cl)
739 struct hfsc_class *cl;
740 {
741 return qhead(cl->cl_q);
742 }
743
744 static void
745 hfsc_purgeq(cl)
746 struct hfsc_class *cl;
747 {
748 struct mbuf *m;
749
750 if (qempty(cl->cl_q))
751 return;
752
753 while ((m = _getq(cl->cl_q)) != NULL) {
754 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, m_pktlen(m));
755 m_freem(m);
756 }
757 ASSERT(qlen(cl->cl_q) == 0);
758
759 set_passive(cl);
760 }
761
762 static void
763 set_active(cl, len)
764 struct hfsc_class *cl;
765 int len;
766 {
767 if (cl->cl_rsc != NULL)
768 init_ed(cl, len);
769 if (cl->cl_fsc != NULL)
770 init_v(cl, len);
771
772 cl->cl_stats.period++;
773 }
774
775 static void
776 set_passive(cl)
777 struct hfsc_class *cl;
778 {
779 if (cl->cl_rsc != NULL)
780 ellist_remove(cl);
781
782 if (cl->cl_fsc != NULL) {
783 while (cl->cl_parent != NULL) {
784 if (--cl->cl_nactive == 0) {
785 /* remove this class from the vt list */
786 actlist_remove(cl);
787 } else
788 /* still has active children */
789 break;
790
791 /* go up to the parent class */
792 cl = cl->cl_parent;
793 }
794 }
795 }
796
797 static void
798 init_ed(cl, next_len)
799 struct hfsc_class *cl;
800 int next_len;
801 {
802 u_int64_t cur_time;
803
804 cur_time = read_machclk();
805
806 /* update the deadline curve */
807 rtsc_min(&cl->cl_deadline, cl->cl_rsc, cur_time, cl->cl_cumul);
808
809 /*
810 * update the eligible curve.
811 * for concave, it is equal to the deadline curve.
812 * for convex, it is a linear curve with slope m2.
813 */
814 cl->cl_eligible = cl->cl_deadline;
815 if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) {
816 cl->cl_eligible.dx = 0;
817 cl->cl_eligible.dy = 0;
818 }
819
820 /* compute e and d */
821 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
822 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
823
824 ellist_insert(cl);
825 }
826
827 static void
828 update_ed(cl, next_len)
829 struct hfsc_class *cl;
830 int next_len;
831 {
832 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
833 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
834
835 ellist_update(cl);
836 }
837
838 static void
839 update_d(cl, next_len)
840 struct hfsc_class *cl;
841 int next_len;
842 {
843 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
844 }
845
846 static void
847 init_v(struct hfsc_class *cl, int len __unused)
848 {
849 struct hfsc_class *min_cl, *max_cl;
850
851 while (cl->cl_parent != NULL) {
852
853 if (cl->cl_nactive++ > 0)
854 /* already active */
855 break;
856
857 /*
858 * if parent became idle while this class was idle.
859 * reset vt and the runtime service curve.
860 */
861 if (cl->cl_parent->cl_nactive == 0 ||
862 cl->cl_parent->cl_vtperiod != cl->cl_parentperiod) {
863 cl->cl_vt = 0;
864 rtsc_init(&cl->cl_virtual, cl->cl_fsc,
865 0, cl->cl_total);
866 }
867 min_cl = actlist_first(cl->cl_parent->cl_actc);
868 if (min_cl != NULL) {
869 u_int64_t vt;
870
871 /*
872 * set vt to the average of the min and max classes.
873 * if the parent's period didn't change,
874 * don't decrease vt of the class.
875 */
876 max_cl = actlist_last(cl->cl_parent->cl_actc);
877 vt = (min_cl->cl_vt + max_cl->cl_vt) / 2;
878 if (cl->cl_parent->cl_vtperiod != cl->cl_parentperiod
879 || vt > cl->cl_vt)
880 cl->cl_vt = vt;
881 }
882
883 /* update the virtual curve */
884 rtsc_min(&cl->cl_virtual, cl->cl_fsc, cl->cl_vt, cl->cl_total);
885
886 cl->cl_vtperiod++; /* increment vt period */
887 cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
888 if (cl->cl_parent->cl_nactive == 0)
889 cl->cl_parentperiod++;
890
891 actlist_insert(cl);
892
893 /* go up to the parent class */
894 cl = cl->cl_parent;
895 }
896 }
897
898 static void
899 update_v(cl, len)
900 struct hfsc_class *cl;
901 int len;
902 {
903 while (cl->cl_parent != NULL) {
904
905 cl->cl_total += len;
906
907 if (cl->cl_fsc != NULL) {
908 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total);
909
910 /* update the vt list */
911 actlist_update(cl);
912 }
913
914 /* go up to the parent class */
915 cl = cl->cl_parent;
916 }
917 }
918
919 /*
920 * TAILQ based ellist and actlist implementation
921 * (ion wanted to make a calendar queue based implementation)
922 */
923 /*
924 * eligible list holds backlogged classes being sorted by their eligible times.
925 * there is one eligible list per interface.
926 */
927
928 static ellist_t *
929 ellist_alloc()
930 {
931 ellist_t *head;
932
933 head = malloc(sizeof(ellist_t), M_DEVBUF, M_WAITOK);
934 if (head != NULL)
935 TAILQ_INIT(head);
936 return (head);
937 }
938
939 static void
940 ellist_destroy(head)
941 ellist_t *head;
942 {
943 free(head, M_DEVBUF);
944 }
945
946 static void
947 ellist_insert(cl)
948 struct hfsc_class *cl;
949 {
950 struct hfsc_if *hif = cl->cl_hif;
951 struct hfsc_class *p;
952
953 /* check the last entry first */
954 if ((p = TAILQ_LAST(hif->hif_eligible, _eligible)) == NULL ||
955 p->cl_e <= cl->cl_e) {
956 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist);
957 return;
958 }
959
960 TAILQ_FOREACH(p, hif->hif_eligible, cl_ellist) {
961 if (cl->cl_e < p->cl_e) {
962 TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
963 return;
964 }
965 }
966 ASSERT(0); /* should not reach here */
967 }
968
969 static void
970 ellist_remove(cl)
971 struct hfsc_class *cl;
972 {
973 struct hfsc_if *hif = cl->cl_hif;
974
975 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
976 }
977
978 static void
979 ellist_update(cl)
980 struct hfsc_class *cl;
981 {
982 struct hfsc_if *hif = cl->cl_hif;
983 struct hfsc_class *p, *last;
984
985 /*
986 * the eligible time of a class increases monotonically.
987 * if the next entry has a larger eligible time, nothing to do.
988 */
989 p = TAILQ_NEXT(cl, cl_ellist);
990 if (p == NULL || cl->cl_e <= p->cl_e)
991 return;
992
993 /* check the last entry */
994 last = TAILQ_LAST(hif->hif_eligible, _eligible);
995 ASSERT(last != NULL);
996 if (last->cl_e <= cl->cl_e) {
997 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
998 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist);
999 return;
1000 }
1001
1002 /*
1003 * the new position must be between the next entry
1004 * and the last entry
1005 */
1006 while ((p = TAILQ_NEXT(p, cl_ellist)) != NULL) {
1007 if (cl->cl_e < p->cl_e) {
1008 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
1009 TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
1010 return;
1011 }
1012 }
1013 ASSERT(0); /* should not reach here */
1014 }
1015
1016 /* find the class with the minimum deadline among the eligible classes */
1017 struct hfsc_class *
1018 ellist_get_mindl(head)
1019 ellist_t *head;
1020 {
1021 struct hfsc_class *p, *cl = NULL;
1022 u_int64_t cur_time;
1023
1024 cur_time = read_machclk();
1025
1026 TAILQ_FOREACH(p, head, cl_ellist) {
1027 if (p->cl_e > cur_time)
1028 break;
1029 if (cl == NULL || p->cl_d < cl->cl_d)
1030 cl = p;
1031 }
1032 return (cl);
1033 }
1034
1035 /*
1036 * active children list holds backlogged child classes being sorted
1037 * by their virtual time.
1038 * each intermediate class has one active children list.
1039 */
1040 static actlist_t *
1041 actlist_alloc()
1042 {
1043 actlist_t *head;
1044
1045 head = malloc(sizeof(actlist_t), M_DEVBUF, M_WAITOK);
1046 if (head != NULL)
1047 TAILQ_INIT(head);
1048 return (head);
1049 }
1050
1051 static void
1052 actlist_destroy(head)
1053 actlist_t *head;
1054 {
1055 free(head, M_DEVBUF);
1056 }
1057 static void
1058 actlist_insert(cl)
1059 struct hfsc_class *cl;
1060 {
1061 struct hfsc_class *p;
1062
1063 /* check the last entry first */
1064 if ((p = TAILQ_LAST(cl->cl_parent->cl_actc, _active)) == NULL
1065 || p->cl_vt <= cl->cl_vt) {
1066 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist);
1067 return;
1068 }
1069
1070 TAILQ_FOREACH(p, cl->cl_parent->cl_actc, cl_actlist) {
1071 if (cl->cl_vt < p->cl_vt) {
1072 TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
1073 return;
1074 }
1075 }
1076 ASSERT(0); /* should not reach here */
1077 }
1078
1079 static void
1080 actlist_remove(cl)
1081 struct hfsc_class *cl;
1082 {
1083 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1084 }
1085
1086 static void
1087 actlist_update(cl)
1088 struct hfsc_class *cl;
1089 {
1090 struct hfsc_class *p, *last;
1091
1092 /*
1093 * the virtual time of a class increases monotonically during its
1094 * backlogged period.
1095 * if the next entry has a larger virtual time, nothing to do.
1096 */
1097 p = TAILQ_NEXT(cl, cl_actlist);
1098 if (p == NULL || cl->cl_vt <= p->cl_vt)
1099 return;
1100
1101 /* check the last entry */
1102 last = TAILQ_LAST(cl->cl_parent->cl_actc, _active);
1103 ASSERT(last != NULL);
1104 if (last->cl_vt <= cl->cl_vt) {
1105 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1106 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist);
1107 return;
1108 }
1109
1110 /*
1111 * the new position must be between the next entry
1112 * and the last entry
1113 */
1114 while ((p = TAILQ_NEXT(p, cl_actlist)) != NULL) {
1115 if (cl->cl_vt < p->cl_vt) {
1116 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1117 TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
1118 return;
1119 }
1120 }
1121 ASSERT(0); /* should not reach here */
1122 }
1123
1124 /*
1125 * service curve support functions
1126 *
1127 * external service curve parameters
1128 * m: bits/sec
1129 * d: msec
1130 * internal service curve parameters
1131 * sm: (bytes/tsc_interval) << SM_SHIFT
1132 * ism: (tsc_count/byte) << ISM_SHIFT
1133 * dx: tsc_count
1134 *
1135 * SM_SHIFT and ISM_SHIFT are scaled in order to keep effective digits.
1136 * we should be able to handle 100K-1Gbps linkspeed with 200Hz-1GHz CPU
1137 * speed. SM_SHIFT and ISM_SHIFT are selected to have at least 3 effective
1138 * digits in decimal using the following table.
1139 *
1140 * bits/set 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps
1141 * ----------+-------------------------------------------------------
1142 * bytes/nsec 12.5e-6 125e-6 1250e-6 12500e-6 125000e-6
1143 * sm(500MHz) 25.0e-6 250e-6 2500e-6 25000e-6 250000e-6
1144 * sm(200MHz) 62.5e-6 625e-6 6250e-6 62500e-6 625000e-6
1145 *
1146 * nsec/byte 80000 8000 800 80 8
1147 * ism(500MHz) 40000 4000 400 40 4
1148 * ism(200MHz) 16000 1600 160 16 1.6
1149 */
1150 #define SM_SHIFT 24
1151 #define ISM_SHIFT 10
1152
1153 #define SC_LARGEVAL (1LL << 32)
1154 #define SC_INFINITY 0xffffffffffffffffLL
1155
1156 static inline u_int64_t
1157 seg_x2y(x, sm)
1158 u_int64_t x;
1159 u_int64_t sm;
1160 {
1161 u_int64_t y;
1162
1163 if (x < SC_LARGEVAL)
1164 y = x * sm >> SM_SHIFT;
1165 else
1166 y = (x >> SM_SHIFT) * sm;
1167 return (y);
1168 }
1169
1170 static inline u_int64_t
1171 seg_y2x(y, ism)
1172 u_int64_t y;
1173 u_int64_t ism;
1174 {
1175 u_int64_t x;
1176
1177 if (y == 0)
1178 x = 0;
1179 else if (ism == SC_INFINITY)
1180 x = SC_INFINITY;
1181 else if (y < SC_LARGEVAL)
1182 x = y * ism >> ISM_SHIFT;
1183 else
1184 x = (y >> ISM_SHIFT) * ism;
1185 return (x);
1186 }
1187
1188 static inline u_int64_t
1189 m2sm(m)
1190 u_int m;
1191 {
1192 u_int64_t sm;
1193
1194 sm = ((u_int64_t)m << SM_SHIFT) / 8 / machclk_freq;
1195 return (sm);
1196 }
1197
1198 static inline u_int64_t
1199 m2ism(m)
1200 u_int m;
1201 {
1202 u_int64_t ism;
1203
1204 if (m == 0)
1205 ism = SC_INFINITY;
1206 else
1207 ism = ((u_int64_t)machclk_freq << ISM_SHIFT) * 8 / m;
1208 return (ism);
1209 }
1210
1211 static inline u_int64_t
1212 d2dx(d)
1213 u_int d;
1214 {
1215 u_int64_t dx;
1216
1217 dx = ((u_int64_t)d * machclk_freq) / 1000;
1218 return (dx);
1219 }
1220
1221 static u_int
1222 sm2m(sm)
1223 u_int64_t sm;
1224 {
1225 u_int64_t m;
1226
1227 m = (sm * 8 * machclk_freq) >> SM_SHIFT;
1228 return ((u_int)m);
1229 }
1230
1231 static u_int
1232 dx2d(dx)
1233 u_int64_t dx;
1234 {
1235 u_int64_t d;
1236
1237 d = dx * 1000 / machclk_freq;
1238 return ((u_int)d);
1239 }
1240
1241 static void
1242 sc2isc(sc, isc)
1243 struct service_curve *sc;
1244 struct internal_sc *isc;
1245 {
1246 isc->sm1 = m2sm(sc->m1);
1247 isc->ism1 = m2ism(sc->m1);
1248 isc->dx = d2dx(sc->d);
1249 isc->dy = seg_x2y(isc->dx, isc->sm1);
1250 isc->sm2 = m2sm(sc->m2);
1251 isc->ism2 = m2ism(sc->m2);
1252 }
1253
1254 /*
1255 * initialize the runtime service curve with the given internal
1256 * service curve starting at (x, y).
1257 */
1258 static void
1259 rtsc_init(rtsc, isc, x, y)
1260 struct runtime_sc *rtsc;
1261 struct internal_sc *isc;
1262 u_int64_t x, y;
1263 {
1264 rtsc->x = x;
1265 rtsc->y = y;
1266 rtsc->sm1 = isc->sm1;
1267 rtsc->ism1 = isc->ism1;
1268 rtsc->dx = isc->dx;
1269 rtsc->dy = isc->dy;
1270 rtsc->sm2 = isc->sm2;
1271 rtsc->ism2 = isc->ism2;
1272 }
1273
1274 /*
1275 * calculate the y-projection of the runtime service curve by the
1276 * given x-projection value
1277 */
1278 static u_int64_t
1279 rtsc_y2x(rtsc, y)
1280 struct runtime_sc *rtsc;
1281 u_int64_t y;
1282 {
1283 u_int64_t x;
1284
1285 if (y < rtsc->y)
1286 x = rtsc->x;
1287 else if (y <= rtsc->y + rtsc->dy) {
1288 /* x belongs to the 1st segment */
1289 if (rtsc->dy == 0)
1290 x = rtsc->x + rtsc->dx;
1291 else
1292 x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
1293 } else {
1294 /* x belongs to the 2nd segment */
1295 x = rtsc->x + rtsc->dx
1296 + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
1297 }
1298 return (x);
1299 }
1300
1301 static u_int64_t
1302 rtsc_x2y(rtsc, x)
1303 struct runtime_sc *rtsc;
1304 u_int64_t x;
1305 {
1306 u_int64_t y;
1307
1308 if (x <= rtsc->x)
1309 y = rtsc->y;
1310 else if (x <= rtsc->x + rtsc->dx)
1311 /* y belongs to the 1st segment */
1312 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
1313 else
1314 /* y belongs to the 2nd segment */
1315 y = rtsc->y + rtsc->dy
1316 + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
1317 return (y);
1318 }
1319
1320 /*
1321 * update the runtime service curve by taking the minimum of the current
1322 * runtime service curve and the service curve starting at (x, y).
1323 */
1324 static void
1325 rtsc_min(rtsc, isc, x, y)
1326 struct runtime_sc *rtsc;
1327 struct internal_sc *isc;
1328 u_int64_t x, y;
1329 {
1330 u_int64_t y1, y2, dx, dy;
1331
1332 if (isc->sm1 <= isc->sm2) {
1333 /* service curve is convex */
1334 y1 = rtsc_x2y(rtsc, x);
1335 if (y1 < y)
1336 /* the current rtsc is smaller */
1337 return;
1338 rtsc->x = x;
1339 rtsc->y = y;
1340 return;
1341 }
1342
1343 /*
1344 * service curve is concave
1345 * compute the two y values of the current rtsc
1346 * y1: at x
1347 * y2: at (x + dx)
1348 */
1349 y1 = rtsc_x2y(rtsc, x);
1350 if (y1 <= y) {
1351 /* rtsc is below isc, no change to rtsc */
1352 return;
1353 }
1354
1355 y2 = rtsc_x2y(rtsc, x + isc->dx);
1356 if (y2 >= y + isc->dy) {
1357 /* rtsc is above isc, replace rtsc by isc */
1358 rtsc->x = x;
1359 rtsc->y = y;
1360 rtsc->dx = isc->dx;
1361 rtsc->dy = isc->dy;
1362 return;
1363 }
1364
1365 /*
1366 * the two curves intersect
1367 * compute the offsets (dx, dy) using the reverse
1368 * function of seg_x2y()
1369 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
1370 */
1371 dx = ((y1 - y) << SM_SHIFT) / (isc->sm1 - isc->sm2);
1372 /*
1373 * check if (x, y1) belongs to the 1st segment of rtsc.
1374 * if so, add the offset.
1375 */
1376 if (rtsc->x + rtsc->dx > x)
1377 dx += rtsc->x + rtsc->dx - x;
1378 dy = seg_x2y(dx, isc->sm1);
1379
1380 rtsc->x = x;
1381 rtsc->y = y;
1382 rtsc->dx = dx;
1383 rtsc->dy = dy;
1384 return;
1385 }
1386
1387 /*
1388 * hfsc device interface
1389 */
1390 int
1391 hfscopen(dev_t dev __unused, int flag __unused, int fmt __unused,
1392 struct lwp *l __unused)
1393 {
1394 if (machclk_freq == 0)
1395 init_machclk();
1396
1397 if (machclk_freq == 0) {
1398 printf("hfsc: no CPU clock available!\n");
1399 return (ENXIO);
1400 }
1401
1402 /* everything will be done when the queueing scheme is attached. */
1403 return 0;
1404 }
1405
1406 int
1407 hfscclose(dev_t dev __unused, int flag __unused, int fmt __unused,
1408 struct lwp *l __unused)
1409 {
1410 struct hfsc_if *hif;
1411 int err, error = 0;
1412
1413 while ((hif = hif_list) != NULL) {
1414 /* destroy all */
1415 if (ALTQ_IS_ENABLED(hif->hif_ifq))
1416 altq_disable(hif->hif_ifq);
1417
1418 err = altq_detach(hif->hif_ifq);
1419 if (err == 0)
1420 err = hfsc_detach(hif);
1421 if (err != 0 && error == 0)
1422 error = err;
1423 }
1424
1425 return error;
1426 }
1427
1428 int
1429 hfscioctl(dev_t dev __unused, ioctlcmd_t cmd, caddr_t addr, int flag __unused,
1430 struct lwp *l)
1431 {
1432 struct hfsc_if *hif;
1433 struct hfsc_interface *ifacep;
1434 int error = 0;
1435
1436 /* check super-user privilege */
1437 switch (cmd) {
1438 case HFSC_GETSTATS:
1439 break;
1440 default:
1441 #if (__FreeBSD_version > 400000)
1442 if ((error = suser(p)) != 0)
1443 return (error);
1444 #else
1445 if ((error = kauth_authorize_generic(l->l_cred,
1446 KAUTH_GENERIC_ISSUSER, &l->l_acflag)) != 0)
1447 return (error);
1448 #endif
1449 break;
1450 }
1451
1452 switch (cmd) {
1453
1454 case HFSC_IF_ATTACH:
1455 error = hfsccmd_if_attach((struct hfsc_attach *)addr);
1456 break;
1457
1458 case HFSC_IF_DETACH:
1459 error = hfsccmd_if_detach((struct hfsc_interface *)addr);
1460 break;
1461
1462 case HFSC_ENABLE:
1463 case HFSC_DISABLE:
1464 case HFSC_CLEAR_HIERARCHY:
1465 ifacep = (struct hfsc_interface *)addr;
1466 if ((hif = altq_lookup(ifacep->hfsc_ifname,
1467 ALTQT_HFSC)) == NULL) {
1468 error = EBADF;
1469 break;
1470 }
1471
1472 switch (cmd) {
1473
1474 case HFSC_ENABLE:
1475 if (hif->hif_defaultclass == NULL) {
1476 #if 1
1477 printf("hfsc: no default class\n");
1478 #endif
1479 error = EINVAL;
1480 break;
1481 }
1482 error = altq_enable(hif->hif_ifq);
1483 break;
1484
1485 case HFSC_DISABLE:
1486 error = altq_disable(hif->hif_ifq);
1487 break;
1488
1489 case HFSC_CLEAR_HIERARCHY:
1490 hfsc_clear_interface(hif);
1491 break;
1492 }
1493 break;
1494
1495 case HFSC_ADD_CLASS:
1496 error = hfsccmd_add_class((struct hfsc_add_class *)addr);
1497 break;
1498
1499 case HFSC_DEL_CLASS:
1500 error = hfsccmd_delete_class((struct hfsc_delete_class *)addr);
1501 break;
1502
1503 case HFSC_MOD_CLASS:
1504 error = hfsccmd_modify_class((struct hfsc_modify_class *)addr);
1505 break;
1506
1507 case HFSC_ADD_FILTER:
1508 error = hfsccmd_add_filter((struct hfsc_add_filter *)addr);
1509 break;
1510
1511 case HFSC_DEL_FILTER:
1512 error = hfsccmd_delete_filter((struct hfsc_delete_filter *)addr);
1513 break;
1514
1515 case HFSC_GETSTATS:
1516 error = hfsccmd_class_stats((struct hfsc_class_stats *)addr);
1517 break;
1518
1519 default:
1520 error = EINVAL;
1521 break;
1522 }
1523 return error;
1524 }
1525
1526 static int
1527 hfsccmd_if_attach(ap)
1528 struct hfsc_attach *ap;
1529 {
1530 struct hfsc_if *hif;
1531 struct ifnet *ifp;
1532 int error;
1533
1534 if ((ifp = ifunit(ap->iface.hfsc_ifname)) == NULL)
1535 return (ENXIO);
1536
1537 if ((hif = hfsc_attach(&ifp->if_snd, ap->bandwidth)) == NULL)
1538 return (ENOMEM);
1539
1540 /*
1541 * set HFSC to this ifnet structure.
1542 */
1543 if ((error = altq_attach(&ifp->if_snd, ALTQT_HFSC, hif,
1544 hfsc_enqueue, hfsc_dequeue, hfsc_request,
1545 &hif->hif_classifier, acc_classify)) != 0)
1546 (void)hfsc_detach(hif);
1547
1548 return (error);
1549 }
1550
1551 static int
1552 hfsccmd_if_detach(ap)
1553 struct hfsc_interface *ap;
1554 {
1555 struct hfsc_if *hif;
1556 int error;
1557
1558 if ((hif = altq_lookup(ap->hfsc_ifname, ALTQT_HFSC)) == NULL)
1559 return (EBADF);
1560
1561 if (ALTQ_IS_ENABLED(hif->hif_ifq))
1562 altq_disable(hif->hif_ifq);
1563
1564 if ((error = altq_detach(hif->hif_ifq)))
1565 return (error);
1566
1567 return hfsc_detach(hif);
1568 }
1569
1570 static int
1571 hfsccmd_add_class(ap)
1572 struct hfsc_add_class *ap;
1573 {
1574 struct hfsc_if *hif;
1575 struct hfsc_class *cl, *parent;
1576
1577 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
1578 return (EBADF);
1579
1580 if ((parent = clh_to_clp(hif, ap->parent_handle)) == NULL) {
1581 if (ap->parent_handle == HFSC_ROOTCLASS_HANDLE)
1582 parent = hif->hif_rootclass;
1583 else
1584 return (EINVAL);
1585 }
1586
1587 if ((cl = hfsc_class_create(hif, &ap->service_curve, parent,
1588 ap->qlimit, ap->flags)) == NULL)
1589 return (ENOMEM);
1590
1591 /* return a class handle to the user */
1592 ap->class_handle = clp_to_clh(cl);
1593 return (0);
1594 }
1595
1596 static int
1597 hfsccmd_delete_class(ap)
1598 struct hfsc_delete_class *ap;
1599 {
1600 struct hfsc_if *hif;
1601 struct hfsc_class *cl;
1602
1603 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
1604 return (EBADF);
1605
1606 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
1607 return (EINVAL);
1608
1609 return hfsc_class_destroy(cl);
1610 }
1611
1612 static int
1613 hfsccmd_modify_class(ap)
1614 struct hfsc_modify_class *ap;
1615 {
1616 struct hfsc_if *hif;
1617 struct hfsc_class *cl;
1618 struct service_curve *rsc = NULL;
1619 struct service_curve *fsc = NULL;
1620
1621 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
1622 return (EBADF);
1623
1624 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
1625 return (EINVAL);
1626
1627 if (ap->sctype & HFSC_REALTIMESC)
1628 rsc = &ap->service_curve;
1629 if (ap->sctype & HFSC_LINKSHARINGSC)
1630 fsc = &ap->service_curve;
1631
1632 return hfsc_class_modify(cl, rsc, fsc);
1633 }
1634
1635 static int
1636 hfsccmd_add_filter(ap)
1637 struct hfsc_add_filter *ap;
1638 {
1639 struct hfsc_if *hif;
1640 struct hfsc_class *cl;
1641
1642 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
1643 return (EBADF);
1644
1645 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
1646 return (EINVAL);
1647
1648 if (is_a_parent_class(cl)) {
1649 #if 1
1650 printf("hfsccmd_add_filter: not a leaf class!\n");
1651 #endif
1652 return (EINVAL);
1653 }
1654
1655 return acc_add_filter(&hif->hif_classifier, &ap->filter,
1656 cl, &ap->filter_handle);
1657 }
1658
1659 static int
1660 hfsccmd_delete_filter(ap)
1661 struct hfsc_delete_filter *ap;
1662 {
1663 struct hfsc_if *hif;
1664
1665 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
1666 return (EBADF);
1667
1668 return acc_delete_filter(&hif->hif_classifier,
1669 ap->filter_handle);
1670 }
1671
1672 static int
1673 hfsccmd_class_stats(ap)
1674 struct hfsc_class_stats *ap;
1675 {
1676 struct hfsc_if *hif;
1677 struct hfsc_class *cl;
1678 struct hfsc_basic_class_stats stats, *usp;
1679 int n, nclasses, error;
1680
1681 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
1682 return (EBADF);
1683
1684 ap->cur_time = read_machclk();
1685 ap->hif_classes = hif->hif_classes;
1686 ap->hif_packets = hif->hif_packets;
1687
1688 /* skip the first N classes in the tree */
1689 nclasses = ap->nskip;
1690 for (cl = hif->hif_rootclass, n = 0; cl != NULL && n < nclasses;
1691 cl = hfsc_nextclass(cl), n++)
1692 ;
1693 if (n != nclasses)
1694 return (EINVAL);
1695
1696 /* then, read the next N classes in the tree */
1697 nclasses = ap->nclasses;
1698 usp = ap->stats;
1699 for (n = 0; cl != NULL && n < nclasses; cl = hfsc_nextclass(cl), n++) {
1700
1701 get_class_stats(&stats, cl);
1702
1703 if ((error = copyout((caddr_t)&stats, (caddr_t)usp++,
1704 sizeof(stats))) != 0)
1705 return (error);
1706 }
1707
1708 ap->nclasses = n;
1709
1710 return (0);
1711 }
1712
1713 static void get_class_stats(sp, cl)
1714 struct hfsc_basic_class_stats *sp;
1715 struct hfsc_class *cl;
1716 {
1717 sp->class_id = cl->cl_id;
1718 sp->class_handle = clp_to_clh(cl);
1719
1720 if (cl->cl_rsc != NULL) {
1721 sp->rsc.m1 = sm2m(cl->cl_rsc->sm1);
1722 sp->rsc.d = dx2d(cl->cl_rsc->dx);
1723 sp->rsc.m2 = sm2m(cl->cl_rsc->sm2);
1724 } else {
1725 sp->rsc.m1 = 0;
1726 sp->rsc.d = 0;
1727 sp->rsc.m2 = 0;
1728 }
1729 if (cl->cl_fsc != NULL) {
1730 sp->fsc.m1 = sm2m(cl->cl_fsc->sm1);
1731 sp->fsc.d = dx2d(cl->cl_fsc->dx);
1732 sp->fsc.m2 = sm2m(cl->cl_fsc->sm2);
1733 } else {
1734 sp->fsc.m1 = 0;
1735 sp->fsc.d = 0;
1736 sp->fsc.m2 = 0;
1737 }
1738
1739 sp->total = cl->cl_total;
1740 sp->cumul = cl->cl_cumul;
1741
1742 sp->d = cl->cl_d;
1743 sp->e = cl->cl_e;
1744 sp->vt = cl->cl_vt;
1745
1746 sp->qlength = qlen(cl->cl_q);
1747 sp->xmit_cnt = cl->cl_stats.xmit_cnt;
1748 sp->drop_cnt = cl->cl_stats.drop_cnt;
1749 sp->period = cl->cl_stats.period;
1750
1751 sp->qtype = qtype(cl->cl_q);
1752 #ifdef ALTQ_RED
1753 if (q_is_red(cl->cl_q))
1754 red_getstats(cl->cl_red, &sp->red[0]);
1755 #endif
1756 #ifdef ALTQ_RIO
1757 if (q_is_rio(cl->cl_q))
1758 rio_getstats((rio_t *)cl->cl_red, &sp->red[0]);
1759 #endif
1760 }
1761
1762 /* convert a class handle to the corresponding class pointer */
1763 static struct hfsc_class *
1764 clh_to_clp(hif, chandle)
1765 struct hfsc_if *hif;
1766 u_long chandle;
1767 {
1768 struct hfsc_class *cl;
1769
1770 cl = (struct hfsc_class *)chandle;
1771 if (chandle != ALIGN(cl)) {
1772 #if 1
1773 printf("clh_to_cl: unaligned pointer %p\n", cl);
1774 #endif
1775 return (NULL);
1776 }
1777
1778 if (cl == NULL || cl->cl_handle != chandle || cl->cl_hif != hif)
1779 return (NULL);
1780
1781 return (cl);
1782 }
1783
1784 /* convert a class pointer to the corresponding class handle */
1785 static u_long
1786 clp_to_clh(cl)
1787 struct hfsc_class *cl;
1788 {
1789 if (cl->cl_parent == NULL)
1790 return (HFSC_ROOTCLASS_HANDLE); /* XXX */
1791 return (cl->cl_handle);
1792 }
1793
1794 #ifdef KLD_MODULE
1795
1796 static struct altqsw hfsc_sw =
1797 {"hfsc", hfscopen, hfscclose, hfscioctl};
1798
1799 ALTQ_MODULE(altq_hfsc, ALTQT_HFSC, &hfsc_sw);
1800
1801 #endif /* KLD_MODULE */
1802
1803 #endif /* ALTQ_HFSC */
1804