Home | History | Annotate | Line # | Download | only in altq
altq_jobs.c revision 1.3
      1 /*	$NetBSD: altq_jobs.c,v 1.3 2006/10/20 22:04:13 elad Exp $	*/
      2 /*	$KAME: altq_jobs.c,v 1.11 2005/04/13 03:44:25 suz Exp $	*/
      3 /*
      4  * Copyright (c) 2001, the Rector and Board of Visitors of the
      5  * University of Virginia.
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms,
      9  * with or without modification, are permitted provided
     10  * that the following conditions are met:
     11  *
     12  * Redistributions of source code must retain the above
     13  * copyright notice, this list of conditions and the following
     14  * disclaimer.
     15  *
     16  * Redistributions in binary form must reproduce the above
     17  * copyright notice, this list of conditions and the following
     18  * disclaimer in the documentation and/or other materials provided
     19  * with the distribution.
     20  *
     21  * Neither the name of the University of Virginia nor the names
     22  * of its contributors may be used to endorse or promote products
     23  * derived from this software without specific prior written
     24  * permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
     27  * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
     28  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
     29  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     30  * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
     31  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
     32  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     33  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     34  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
     35  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
     37  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
     38  * THE POSSIBILITY OF SUCH DAMAGE.
     39  */
     40 /*
     41  * JoBS - altq prototype implementation
     42  *
     43  * Author: Nicolas Christin <nicolas (at) cs.virginia.edu>
     44  *
     45  * JoBS algorithms originally devised and proposed by
     46  * Nicolas Christin and Jorg Liebeherr.
     47  * Grateful acknowledgments to Tarek Abdelzaher for his help and
     48  * comments, and to Kenjiro Cho for some helpful advice.
     49  * Contributed by the Multimedia Networks Group at the University
     50  * of Virginia.
     51  *
     52  * Papers and additional info can be found at
     53  * http://qosbox.cs.virginia.edu
     54  *
     55  */
     56 
     57 /*
     58  * JoBS queue
     59  */
     60 
     61 #include <sys/cdefs.h>
     62 __KERNEL_RCSID(0, "$NetBSD: altq_jobs.c,v 1.3 2006/10/20 22:04:13 elad Exp $");
     63 
     64 #ifdef _KERNEL_OPT
     65 #include "opt_altq.h"
     66 #include "opt_inet.h"
     67 #endif
     68 
     69 #ifdef ALTQ_JOBS  /* jobs is enabled by ALTQ_JOBS option in opt_altq.h */
     70 
     71 #include <sys/param.h>
     72 #include <sys/malloc.h>
     73 #include <sys/mbuf.h>
     74 #include <sys/socket.h>
     75 #include <sys/sockio.h>
     76 #include <sys/systm.h>
     77 #include <sys/proc.h>
     78 #include <sys/errno.h>
     79 #include <sys/kernel.h>
     80 #include <sys/queue.h>
     81 #include <sys/kauth.h>
     82 
     83 #ifdef __FreeBSD__
     84 #include <sys/limits.h>
     85 #endif
     86 
     87 #include <net/if.h>
     88 #include <net/if_types.h>
     89 
     90 #include <altq/altq.h>
     91 #include <altq/altq_conf.h>
     92 #include <altq/altq_jobs.h>
     93 
     94 #ifdef ALTQ3_COMPAT
     95 /*
     96  * function prototypes
     97  */
     98 static struct jobs_if *jobs_attach(struct ifaltq *, u_int, u_int, u_int);
     99 static int jobs_detach(struct jobs_if *);
    100 static int jobs_clear_interface(struct jobs_if *);
    101 static int jobs_request(struct ifaltq *, int, void *);
    102 static void jobs_purge(struct jobs_if *);
    103 static struct jobs_class *jobs_class_create(struct jobs_if *,
    104     int, int64_t, int64_t, int64_t, int64_t, int64_t, int);
    105 static int jobs_class_destroy(struct jobs_class *);
    106 static int jobs_enqueue(struct ifaltq *, struct mbuf *, struct altq_pktattr *);
    107 static struct mbuf *jobs_dequeue(struct ifaltq *, int);
    108 
    109 static int jobs_addq(struct jobs_class *, struct mbuf *, struct jobs_if*);
    110 static struct mbuf *jobs_getq(struct jobs_class *);
    111 static struct mbuf *jobs_pollq(struct jobs_class *);
    112 static void jobs_purgeq(struct jobs_class *);
    113 
    114 static int jobscmd_if_attach(struct jobs_attach *);
    115 static int jobscmd_if_detach(struct jobs_interface *);
    116 static int jobscmd_add_class(struct jobs_add_class *);
    117 static int jobscmd_delete_class(struct jobs_delete_class *);
    118 static int jobscmd_modify_class(struct jobs_modify_class *);
    119 static int jobscmd_add_filter(struct jobs_add_filter *);
    120 static int jobscmd_delete_filter(struct jobs_delete_filter *);
    121 static int jobscmd_class_stats(struct jobs_class_stats *);
    122 static void get_class_stats(struct class_stats *, struct jobs_class *);
    123 static struct jobs_class *clh_to_clp(struct jobs_if *, u_long);
    124 static u_long clp_to_clh(struct jobs_class *);
    125 
    126 static TSLIST *tslist_alloc(void);
    127 static void tslist_destroy(struct jobs_class *);
    128 static int tslist_enqueue(struct jobs_class *, u_int64_t);
    129 static void tslist_dequeue(struct jobs_class *);
    130 static void tslist_drop(struct jobs_class *);
    131 
    132 static int enforce_wc(struct jobs_if *);
    133 static int64_t* adjust_rates_rdc(struct jobs_if *);
    134 static int64_t* assign_rate_drops_adc(struct jobs_if *);
    135 static int64_t* update_error(struct jobs_if *);
    136 static int min_rates_adc(struct jobs_if *);
    137 static int64_t proj_delay(struct jobs_if *, int);
    138 static int pick_dropped_rlc(struct jobs_if *);
    139 
    140 altqdev_decl(jobs);
    141 
    142 /* jif_list keeps all jobs_if's allocated. */
    143 static struct jobs_if *jif_list = NULL;
    144 
    145 typedef unsigned long long ull;
    146 
    147 /* setup functions */
    148 
    149 static struct jobs_if *
    150 jobs_attach(struct ifaltq *ifq, u_int bandwidth, u_int qlimit, u_int separate)
    151 {
    152 	struct jobs_if *jif;
    153 
    154 	jif = malloc(sizeof(struct jobs_if), M_DEVBUF, M_WAITOK|M_ZERO);
    155 	if (jif == NULL)
    156 	        return (NULL);
    157 
    158 	jif->jif_bandwidth = bandwidth;
    159 	jif->jif_qlimit = qlimit;
    160 	jif->jif_separate = separate;
    161 #ifdef ALTQ_DEBUG
    162 	printf("JoBS bandwidth = %d bps\n", (int)bandwidth);
    163 	printf("JoBS buffer size = %d pkts [%s]\n",
    164 	       (int)qlimit, separate?"separate buffers":"shared buffer");
    165 #endif
    166 	jif->jif_maxpri = -1;
    167 	jif->jif_ifq = ifq;
    168 
    169 	jif->wc_cycles_enqueue = 0;
    170 	jif->avg_cycles_enqueue = 0;
    171 	jif->avg_cycles2_enqueue = 0;
    172 	jif->bc_cycles_enqueue = INFINITY;
    173 	jif->wc_cycles_dequeue = 0;
    174 	jif->avg_cycles_dequeue = 0;
    175 	jif->avg_cycles2_dequeue = 0;
    176 	jif->bc_cycles_dequeue = INFINITY;
    177 	jif->total_enqueued = 0;
    178 	jif->total_dequeued = 0;
    179 
    180 	/* add this state to the jobs list */
    181 	jif->jif_next = jif_list;
    182 	jif_list = jif;
    183 
    184 	return (jif);
    185 }
    186 
    187 static int
    188 jobs_detach(struct jobs_if *jif)
    189 {
    190 	(void)jobs_clear_interface(jif);
    191 
    192 	/* remove this interface from the jif list */
    193 	if (jif_list == jif)
    194 		jif_list = jif->jif_next;
    195 	else {
    196 		struct jobs_if *p;
    197 
    198 		for (p = jif_list; p != NULL; p = p->jif_next)
    199 			if (p->jif_next == jif) {
    200 				p->jif_next = jif->jif_next;
    201 				break;
    202 			}
    203 		ASSERT(p != NULL);
    204 	}
    205 	free(jif, M_DEVBUF);
    206 	return (0);
    207 }
    208 
    209 /*
    210  * bring the interface back to the initial state by discarding
    211  * all the filters and classes.
    212  */
    213 static int
    214 jobs_clear_interface(struct jobs_if *jif)
    215 {
    216 	struct jobs_class	*cl;
    217 	int	pri;
    218 
    219 	/* free the filters for this interface */
    220 	acc_discard_filters(&jif->jif_classifier, NULL, 1);
    221 
    222 	/* clear out the classes */
    223 	for (pri = 0; pri <= jif->jif_maxpri; pri++)
    224 		if ((cl = jif->jif_classes[pri]) != NULL)
    225 			jobs_class_destroy(cl);
    226 
    227 	return (0);
    228 }
    229 
    230 static int
    231 jobs_request(struct ifaltq *ifq, int req, void *arg __unused)
    232 {
    233 	struct jobs_if	*jif = (struct jobs_if *)ifq->altq_disc;
    234 
    235 	switch (req) {
    236 	case ALTRQ_PURGE:
    237 		jobs_purge(jif);
    238 		break;
    239 	}
    240 	return (0);
    241 }
    242 
    243 /* discard all the queued packets on the interface */
    244 static void
    245 jobs_purge(struct jobs_if *jif)
    246 {
    247 	struct jobs_class *cl;
    248 	int pri;
    249 
    250 	for (pri = 0; pri <= jif->jif_maxpri; pri++) {
    251 		if ((cl = jif->jif_classes[pri]) != NULL && !qempty(cl->cl_q))
    252 			jobs_purgeq(cl);
    253 	}
    254 	if (ALTQ_IS_ENABLED(jif->jif_ifq))
    255 		jif->jif_ifq->ifq_len = 0;
    256 }
    257 
    258 static struct jobs_class *
    259 jobs_class_create(struct jobs_if *jif, int pri, int64_t adc, int64_t rdc,
    260     int64_t alc, int64_t rlc, int64_t arc, int flags)
    261 {
    262 	struct jobs_class *cl, *scan1, *scan2;
    263 	int s;
    264 	int class_exists1, class_exists2;
    265 	int i, j;
    266 	int64_t tmp[JOBS_MAXPRI];
    267 	u_int64_t now;
    268 
    269 	if ((cl = jif->jif_classes[pri]) != NULL) {
    270 		/* modify the class instead of creating a new one */
    271 		s = splnet();
    272 		if (!qempty(cl->cl_q))
    273 			jobs_purgeq(cl);
    274 		splx(s);
    275 	} else {
    276 		cl = malloc(sizeof(struct jobs_class), M_DEVBUF,
    277 		    M_WAITOK|M_ZERO);
    278 		if (cl == NULL)
    279 			return (NULL);
    280 
    281 		cl->cl_q = malloc(sizeof(class_queue_t), M_DEVBUF,
    282 		    M_WAITOK|M_ZERO);
    283 		if (cl->cl_q == NULL)
    284 			goto err_ret;
    285 
    286 		cl->arv_tm = tslist_alloc();
    287 		if (cl->arv_tm == NULL)
    288 			goto err_ret;
    289 	}
    290 
    291 	jif->jif_classes[pri] = cl;
    292 
    293 	if (flags & JOCF_DEFAULTCLASS)
    294 		jif->jif_default = cl;
    295 
    296 	qtype(cl->cl_q) = Q_DROPTAIL;
    297 	qlen(cl->cl_q) = 0;
    298 	cl->service_rate = 0;
    299 	cl->min_rate_adc = 0;
    300 	cl->current_loss = 0;
    301 	cl->cl_period = 0;
    302 	PKTCNTR_RESET(&cl->cl_arrival);
    303 	PKTCNTR_RESET(&cl->cl_rin);
    304 	PKTCNTR_RESET(&cl->cl_rout);
    305 	PKTCNTR_RESET(&cl->cl_rout_th);
    306 	PKTCNTR_RESET(&cl->cl_dropcnt);
    307 	PKTCNTR_RESET(&cl->st_arrival);
    308 	PKTCNTR_RESET(&cl->st_rin);
    309 	PKTCNTR_RESET(&cl->st_rout);
    310 	PKTCNTR_RESET(&cl->st_dropcnt);
    311 	cl->st_service_rate = 0;
    312 	cl->cl_lastdel = 0;
    313 	cl->cl_avgdel = 0;
    314 	cl->adc_violations = 0;
    315 
    316 	if (adc == -1) {
    317 		cl->concerned_adc = 0;
    318 		adc = INFINITY;
    319 	} else
    320 		cl->concerned_adc = 1;
    321 
    322 	if (alc == -1) {
    323 		cl->concerned_alc = 0;
    324 		alc = INFINITY;
    325 	} else
    326 		cl->concerned_alc = 1;
    327 
    328 	if (rdc == -1) {
    329 		rdc = 0;
    330 		cl->concerned_rdc = 0;
    331 	} else
    332 		cl->concerned_rdc = 1;
    333 
    334 	if (rlc == -1) {
    335 		rlc = 0;
    336 		cl->concerned_rlc = 0;
    337 	} else
    338 		cl->concerned_rlc = 1;
    339 
    340 	if (arc == -1) {
    341 		arc = 0;
    342 		cl->concerned_arc = 0;
    343 	} else
    344 		cl->concerned_arc = 1;
    345 
    346 	cl->cl_rdc=rdc;
    347 
    348 	if (cl->concerned_adc) {
    349 		/* adc is given in us, convert it to clock ticks */
    350 		cl->cl_adc = (u_int64_t)(adc*machclk_freq/GRANULARITY);
    351 	} else
    352 		cl->cl_adc = adc;
    353 
    354 	if (cl->concerned_arc) {
    355 		/* arc is given in bps, convert it to internal unit */
    356 		cl->cl_arc = (u_int64_t)(bps_to_internal(arc));
    357 	} else
    358 		cl->cl_arc = arc;
    359 
    360 	cl->cl_rlc=rlc;
    361 	cl->cl_alc=alc;
    362 	cl->delay_prod_others = 0;
    363 	cl->loss_prod_others = 0;
    364 	cl->cl_flags = flags;
    365 	cl->cl_pri = pri;
    366 	if (pri > jif->jif_maxpri)
    367 		jif->jif_maxpri = pri;
    368 	cl->cl_jif = jif;
    369 	cl->cl_handle = (u_long)cl;  /* just a pointer to this class */
    370 
    371 	/*
    372 	 * update delay_prod_others and loss_prod_others
    373 	 * in all classes if needed
    374 	 */
    375 
    376 	if (cl->concerned_rdc) {
    377 		for (i = 0; i <= jif->jif_maxpri; i++) {
    378 			scan1 = jif->jif_classes[i];
    379 			class_exists1 = (scan1 != NULL);
    380 			if (class_exists1) {
    381 				tmp[i] = 1;
    382 				for (j = 0; j <= i-1; j++) {
    383 					scan2 = jif->jif_classes[j];
    384 					class_exists2 = (scan2 != NULL);
    385 					if (class_exists2
    386 					    && scan2->concerned_rdc)
    387 						tmp[i] *= scan2->cl_rdc;
    388 				}
    389 			} else
    390 				tmp[i] = 0;
    391 		}
    392 
    393 		for (i = 0; i <= jif->jif_maxpri; i++) {
    394 			scan1 = jif->jif_classes[i];
    395 			class_exists1 = (scan1 != NULL);
    396 			if (class_exists1) {
    397 				scan1->delay_prod_others = 1;
    398 				for (j = 0; j <= jif->jif_maxpri; j++) {
    399 					scan2 = jif->jif_classes[j];
    400 					class_exists2 = (scan2 != NULL);
    401 					if (class_exists2 && j != i
    402 					    && scan2->concerned_rdc)
    403 						scan1->delay_prod_others *= tmp[j];
    404 				}
    405 			}
    406 		}
    407 	}
    408 
    409 	if (cl->concerned_rlc) {
    410 		for (i = 0; i <= jif->jif_maxpri; i++) {
    411 			scan1 = jif->jif_classes[i];
    412 			class_exists1 = (scan1 != NULL);
    413 			if (class_exists1) {
    414 				tmp[i] = 1;
    415 				for (j = 0; j <= i-1; j++) {
    416 					scan2 = jif->jif_classes[j];
    417 					class_exists2 = (scan2 != NULL);
    418 					if (class_exists2
    419 					    && scan2->concerned_rlc)
    420 						tmp[i] *= scan2->cl_rlc;
    421 				}
    422 			} else
    423 				tmp[i] = 0;
    424 		}
    425 
    426 		for (i = 0; i <= jif->jif_maxpri; i++) {
    427 			scan1 = jif->jif_classes[i];
    428 			class_exists1 = (scan1 != NULL);
    429 			if (class_exists1) {
    430 				scan1->loss_prod_others = 1;
    431 				for (j = 0; j <= jif->jif_maxpri; j++) {
    432 					scan2 = jif->jif_classes[j];
    433 					class_exists2 = (scan2 != NULL);
    434 					if (class_exists2 && j != i
    435 					    && scan2->concerned_rlc)
    436 						scan1->loss_prod_others *= tmp[j];
    437 				}
    438 			}
    439 		}
    440 	}
    441 
    442 	now = read_machclk();
    443 	cl->idletime = now;
    444 	return (cl);
    445 
    446  err_ret:
    447 	if (cl->cl_q != NULL)
    448 		free(cl->cl_q, M_DEVBUF);
    449 	if (cl->arv_tm != NULL)
    450 		free(cl->arv_tm, M_DEVBUF);
    451 
    452 	free(cl, M_DEVBUF);
    453 	return (NULL);
    454 }
    455 
    456 static int
    457 jobs_class_destroy(struct jobs_class *cl)
    458 {
    459 	struct jobs_if *jif;
    460 	int s, pri;
    461 
    462 	s = splnet();
    463 
    464 	/* delete filters referencing to this class */
    465 	acc_discard_filters(&cl->cl_jif->jif_classifier, cl, 0);
    466 
    467 	if (!qempty(cl->cl_q))
    468 		jobs_purgeq(cl);
    469 
    470 	jif = cl->cl_jif;
    471 	jif->jif_classes[cl->cl_pri] = NULL;
    472 	if (jif->jif_maxpri == cl->cl_pri) {
    473 		for (pri = cl->cl_pri; pri >= 0; pri--)
    474 			if (jif->jif_classes[pri] != NULL) {
    475 				jif->jif_maxpri = pri;
    476 				break;
    477 			}
    478 		if (pri < 0)
    479 			jif->jif_maxpri = -1;
    480 	}
    481 	splx(s);
    482 
    483 	tslist_destroy(cl);
    484 	free(cl->cl_q, M_DEVBUF);
    485 	free(cl, M_DEVBUF);
    486 	return (0);
    487 }
    488 
    489 /*
    490  * jobs_enqueue is an enqueue function to be registered to
    491  * (*altq_enqueue) in struct ifaltq.
    492  */
    493 static int
    494 jobs_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr)
    495 {
    496 	struct jobs_if	*jif = (struct jobs_if *)ifq->altq_disc;
    497 	struct jobs_class *cl, *scan;
    498 	int len;
    499 	int return_flag;
    500 	int pri;
    501 	u_int64_t now;
    502 	u_int64_t old_arv;
    503 	int64_t* delta_rate;
    504 	u_int64_t tstamp1, tstamp2, cycles; /* used for benchmarking only */
    505 
    506 	jif->total_enqueued++;
    507 	now = read_machclk();
    508 	tstamp1 = now;
    509 
    510 	return_flag = 0;
    511 
    512 	/* proceed with packet enqueuing */
    513 
    514 	if (IFQ_IS_EMPTY(ifq)) {
    515 		for (pri=0; pri <= jif->jif_maxpri; pri++) {
    516 			scan = jif->jif_classes[pri];
    517 			if (scan != NULL) {
    518 				/*
    519 				 * reset all quantities, except:
    520 				 * average delay, number of violations
    521 				 */
    522 				PKTCNTR_RESET(&scan->cl_rin);
    523 				PKTCNTR_RESET(&scan->cl_rout);
    524 				PKTCNTR_RESET(&scan->cl_rout_th);
    525 				PKTCNTR_RESET(&scan->cl_arrival);
    526 				PKTCNTR_RESET(&scan->cl_dropcnt);
    527 				scan->cl_lastdel = 0;
    528 				scan->current_loss = 0;
    529 				scan->service_rate = 0;
    530 				scan->idletime = now;
    531 				scan->cl_last_rate_update = now;
    532 			}
    533 		}
    534 	}
    535 
    536 	/* grab class set by classifier */
    537 	if (pktattr == NULL || (cl = pktattr->pattr_class) == NULL)
    538 		cl = jif->jif_default;
    539 
    540 	len = m_pktlen(m);
    541 	old_arv = cl->cl_arrival.bytes;
    542 	PKTCNTR_ADD(&cl->cl_arrival, (int)len);
    543 	PKTCNTR_ADD(&cl->cl_rin, (int)len);
    544 	PKTCNTR_ADD(&cl->st_arrival, (int)len);
    545 	PKTCNTR_ADD(&cl->st_rin, (int)len);
    546 
    547 	if (cl->cl_arrival.bytes < old_arv) {
    548 		/* deals w/ overflow */
    549 		for (pri=0; pri <= jif->jif_maxpri; pri++) {
    550 			scan = jif->jif_classes[pri];
    551 			if (scan != NULL) {
    552 				/*
    553 				 * reset all quantities, except:
    554 				 * average delay, number of violations
    555 				 */
    556 				PKTCNTR_RESET(&scan->cl_rin);
    557 				PKTCNTR_RESET(&scan->cl_rout);
    558 				PKTCNTR_RESET(&scan->cl_rout_th);
    559 				PKTCNTR_RESET(&scan->cl_arrival);
    560 				PKTCNTR_RESET(&scan->cl_dropcnt);
    561 				scan->current_loss = 0;
    562 				scan->service_rate = 0;
    563 				scan->idletime = now;
    564 				scan->cl_last_rate_update = now;
    565 			}
    566 		}
    567 		PKTCNTR_ADD(&cl->cl_arrival, (int)len);
    568 		PKTCNTR_ADD(&cl->cl_rin, (int)len);
    569 	}
    570 
    571 	if (cl->cl_arrival.bytes > cl->cl_rin.bytes)
    572 		cl->current_loss =
    573 		    ((cl->cl_arrival.bytes - cl->cl_rin.bytes) << SCALE_LOSS)
    574 		    / cl->cl_arrival.bytes;
    575 	else
    576 		cl->current_loss = 0;
    577 
    578 	/* for MDRR: update theoretical value of the output curve */
    579 
    580 	for (pri=0; pri <= jif->jif_maxpri; pri++) {
    581 		scan = jif->jif_classes[pri];
    582 		if (scan != NULL) {
    583 			if (scan->cl_last_rate_update == scan->idletime
    584 			    || scan->cl_last_rate_update == 0)
    585 				scan->cl_last_rate_update = now; /* initial case */
    586 			else
    587 				scan->cl_rout_th.bytes +=
    588 				    delay_diff(now, scan->cl_last_rate_update)
    589 				    * scan->service_rate;
    590 
    591 			/*
    592 			 * we don't really care about packets here
    593 			 * WARNING: rout_th is SCALED
    594 			 * (b/c of the service rate)
    595 			 * for precision, as opposed to rout.
    596 			 */
    597 
    598 			scan->cl_last_rate_update = now;
    599 		}
    600 	}
    601 
    602 	if (jobs_addq(cl, m, jif) != 0)
    603 		return_flag = ENOBUFS; /* signals there's a buffer overflow */
    604 	else
    605 		IFQ_INC_LEN(ifq);
    606 
    607 	/* successfully queued. */
    608 
    609 	enforce_wc(jif);
    610 
    611 	if (!min_rates_adc(jif)) {
    612 		delta_rate = assign_rate_drops_adc(jif);
    613 		if (delta_rate != NULL) {
    614 			for (pri = 0; pri <= jif->jif_maxpri; pri++)
    615 			  if ((cl = jif->jif_classes[pri]) != NULL &&
    616 			      !qempty(cl->cl_q))
    617 				cl->service_rate += delta_rate[pri];
    618 			free(delta_rate, M_DEVBUF);
    619 		}
    620 	}
    621 
    622 	delta_rate = adjust_rates_rdc(jif);
    623 
    624 	if (delta_rate != NULL) {
    625 		for (pri = 0; pri <= jif->jif_maxpri; pri++)
    626 			if ((cl = jif->jif_classes[pri]) != NULL &&
    627 			    !qempty(cl->cl_q))
    628 				cl->service_rate += delta_rate[pri];
    629 		free(delta_rate, M_DEVBUF);
    630 	}
    631 
    632 	tstamp2 = read_machclk();
    633 	cycles = delay_diff(tstamp2, tstamp1);
    634 	if (cycles > jif->wc_cycles_enqueue)
    635 		jif->wc_cycles_enqueue=cycles;
    636 	if (cycles < jif->bc_cycles_enqueue)
    637 		jif->bc_cycles_enqueue=cycles;
    638 
    639 	jif->avg_cycles_enqueue += cycles;
    640 	jif->avg_cycles2_enqueue += cycles * cycles;
    641 
    642 	return (return_flag);
    643 }
    644 
    645 /*
    646  * jobs_dequeue is a dequeue function to be registered to
    647  * (*altq_dequeue) in struct ifaltq.
    648  *
    649  * note: ALTDQ_POLL returns the next packet without removing the packet
    650  *	from the queue.  ALTDQ_REMOVE is a normal dequeue operation.
    651  *	ALTDQ_REMOVE must return the same packet if called immediately
    652  *	after ALTDQ_POLL.
    653  */
    654 
    655 static struct mbuf *
    656 jobs_dequeue(struct ifaltq *ifq, int op)
    657 {
    658 	struct jobs_if	*jif = (struct jobs_if *)ifq->altq_disc;
    659 	struct jobs_class *cl;
    660 	struct mbuf *m;
    661 	int pri;
    662 	int svc_class;
    663 	int64_t max_error;
    664 	int64_t error;
    665 	u_int64_t now;
    666 	u_int64_t tstamp1, tstamp2, cycles;
    667 
    668 	jif->total_dequeued++;
    669 
    670 	now = read_machclk();
    671 	tstamp1 = now;
    672 
    673 	if (IFQ_IS_EMPTY(ifq)) {
    674 		/* no packet in the queue */
    675 		for (pri=0; pri <= jif->jif_maxpri; pri++) {
    676 		  cl = jif->jif_classes[pri];
    677 		  if (cl != NULL)
    678 		    cl->idletime = now;
    679 		}
    680 
    681 		tstamp2 = read_machclk();
    682 		cycles = delay_diff(tstamp2, tstamp1);
    683 		if (cycles > jif->wc_cycles_dequeue)
    684 			jif->wc_cycles_dequeue = cycles;
    685 		if (cycles < jif->bc_cycles_dequeue)
    686 			jif->bc_cycles_dequeue = cycles;
    687 
    688 		jif->avg_cycles_dequeue += cycles;
    689 		jif->avg_cycles2_dequeue += cycles * cycles;
    690 
    691 		return (NULL);
    692 	}
    693 
    694 	/*
    695 	 * select the class whose actual tranmissions are the furthest
    696 	 * from the promised transmissions
    697 	 */
    698 
    699 	max_error = -1;
    700 	svc_class = -1;
    701 
    702 	for (pri=0; pri <= jif->jif_maxpri; pri++) {
    703 		if (((cl = jif->jif_classes[pri]) != NULL)
    704 		    && !qempty(cl->cl_q)) {
    705 			error = (int64_t)cl->cl_rout_th.bytes
    706 			    -(int64_t)scale_rate(cl->cl_rout.bytes);
    707 			if (max_error == -1) {
    708 				max_error = error;
    709 				svc_class = pri;
    710 			} else if (error > max_error) {
    711 				max_error = error;
    712 				svc_class = pri;
    713 			}
    714 		}
    715 	}
    716 
    717 	if (svc_class != -1)
    718 		cl = jif->jif_classes[svc_class];
    719 	else
    720 		cl = NULL;
    721 
    722 	if (op == ALTDQ_POLL) {
    723 		tstamp2 = read_machclk();
    724 		cycles = delay_diff(tstamp2, tstamp1);
    725 		if (cycles > jif->wc_cycles_dequeue)
    726 			jif->wc_cycles_dequeue = cycles;
    727 		if (cycles < jif->bc_cycles_dequeue)
    728 			jif->bc_cycles_dequeue = cycles;
    729 
    730 		jif->avg_cycles_dequeue += cycles;
    731 		jif->avg_cycles2_dequeue += cycles * cycles;
    732 
    733 		return (jobs_pollq(cl));
    734 	}
    735 
    736 	if (cl != NULL)
    737 		m = jobs_getq(cl);
    738 	else
    739 		m = NULL;
    740 
    741 	if (m != NULL) {
    742 		IFQ_DEC_LEN(ifq);
    743 		if (qempty(cl->cl_q))
    744 			cl->cl_period++;
    745 
    746 		cl->cl_lastdel = (u_int64_t)delay_diff(now,
    747 		    tslist_first(cl->arv_tm)->timestamp);
    748 		if (cl->concerned_adc
    749 		    && (int64_t)cl->cl_lastdel > cl->cl_adc)
    750 			cl->adc_violations++;
    751 		cl->cl_avgdel  += ticks_to_secs(GRANULARITY*cl->cl_lastdel);
    752 
    753 		PKTCNTR_ADD(&cl->cl_rout, m_pktlen(m));
    754 		PKTCNTR_ADD(&cl->st_rout, m_pktlen(m));
    755 	}
    756 	if (cl != NULL)
    757 		tslist_dequeue(cl);		/* dequeue the timestamp */
    758 
    759 	tstamp2 = read_machclk();
    760 	cycles = delay_diff(tstamp2, tstamp1);
    761 	if (cycles > jif->wc_cycles_dequeue)
    762 		jif->wc_cycles_dequeue = cycles;
    763 	if (cycles < jif->bc_cycles_dequeue)
    764 		jif->bc_cycles_dequeue = cycles;
    765 
    766 	jif->avg_cycles_dequeue += cycles;
    767 	jif->avg_cycles2_dequeue += cycles * cycles;
    768 
    769 	return (m);
    770 }
    771 
    772 static int
    773 jobs_addq(struct jobs_class *cl, struct mbuf *m, struct jobs_if *jif)
    774 {
    775 	int victim;
    776 	u_int64_t len;
    777 	u_int64_t now;
    778 	struct jobs_class* victim_class;
    779 
    780 	victim = -1;
    781 	victim_class = NULL;
    782 	len = 0;
    783 
    784 	now = read_machclk();
    785 
    786 	if (jif->jif_separate && qlen(cl->cl_q) >= jif->jif_qlimit) {
    787 		/*
    788 		 * separate buffers: no guarantees on packet drops
    789 		 * can be offered
    790 		 * thus we drop the incoming packet
    791 		 */
    792 		len = (u_int64_t)m_pktlen(m);
    793 		PKTCNTR_ADD(&cl->cl_dropcnt, (int)len);
    794 		PKTCNTR_SUB(&cl->cl_rin, (int)len);
    795 		PKTCNTR_ADD(&cl->st_dropcnt, (int)len);
    796 		PKTCNTR_SUB(&cl->st_rin, (int)len);
    797 		cl->current_loss += (len << SCALE_LOSS)
    798 		    /cl->cl_arrival.bytes;
    799 		m_freem(m);
    800 		return (-1);
    801 
    802 	} else if (!jif->jif_separate
    803 		   && jif->jif_ifq->ifq_len >= jif->jif_qlimit) {
    804 		/* shared buffer: supports guarantees on losses */
    805 		if (!cl->concerned_rlc) {
    806 			if (!cl->concerned_alc) {
    807 				/*
    808 				 * no ALC, no RLC on this class:
    809 				 * drop the incoming packet
    810 				 */
    811 				len = (u_int64_t)m_pktlen(m);
    812 				PKTCNTR_ADD(&cl->cl_dropcnt, (int)len);
    813 				PKTCNTR_SUB(&cl->cl_rin, (int)len);
    814 				PKTCNTR_ADD(&cl->st_dropcnt, (int)len);
    815 				PKTCNTR_SUB(&cl->st_rin, (int)len);
    816 				cl->current_loss += (len << SCALE_LOSS)/cl->cl_arrival.bytes;
    817 				m_freem(m);
    818 				return (-1);
    819 			} else {
    820 				/*
    821 				 * no RLC, but an ALC:
    822 				 * drop the incoming packet if possible
    823 				 */
    824 				len = (u_int64_t)m_pktlen(m);
    825 				if (cl->current_loss + (len << SCALE_LOSS)
    826 				    / cl->cl_arrival.bytes <= cl->cl_alc) {
    827 					PKTCNTR_ADD(&cl->cl_dropcnt, (int)len);
    828 					PKTCNTR_SUB(&cl->cl_rin, (int)len);
    829 					PKTCNTR_ADD(&cl->st_dropcnt, (int)len);
    830 					PKTCNTR_SUB(&cl->st_rin, (int)len);
    831 					cl->current_loss += (len << SCALE_LOSS)/cl->cl_arrival.bytes;
    832 					m_freem(m);
    833 					return (-1);
    834 				} else {
    835 					/*
    836 					 * the ALC would be violated:
    837 					 * pick another class
    838 					 */
    839 					_addq(cl->cl_q, m);
    840 					tslist_enqueue(cl, now);
    841 
    842 					victim = pick_dropped_rlc(jif);
    843 
    844 					if (victim == -1) {
    845 						/*
    846 						 * something went wrong
    847 						 * let us discard
    848 						 * the incoming packet,
    849 						 * regardless of what
    850 						 * may happen...
    851 						 */
    852 						victim_class = cl;
    853 					} else
    854 						victim_class = jif->jif_classes[victim];
    855 
    856 					if (victim_class != NULL) {
    857 						/*
    858 						 * test for safety
    859 						 * purposes...
    860 						 * it must be true
    861 						 */
    862 						m = _getq_tail(victim_class->cl_q);
    863 						len = (u_int64_t)m_pktlen(m);
    864 						PKTCNTR_ADD(&victim_class->cl_dropcnt, (int)len);
    865 						PKTCNTR_SUB(&victim_class->cl_rin, (int)len);
    866 						PKTCNTR_ADD(&victim_class->st_dropcnt, (int)len);
    867 						PKTCNTR_SUB(&victim_class->st_rin, (int)len);
    868 						victim_class->current_loss += (len << SCALE_LOSS)/victim_class->cl_arrival.bytes;
    869 						m_freem(m); /* the packet is trashed here */
    870 						tslist_drop(victim_class); /* and its timestamp as well */
    871 					}
    872 					return (-1);
    873 				}
    874 			}
    875 		} else {
    876 			/*
    877 			 * RLC on that class:
    878 			 * pick class according to RLCs
    879 			 */
    880 			_addq(cl->cl_q, m);
    881 			tslist_enqueue(cl, now);
    882 
    883 			victim = pick_dropped_rlc(jif);
    884 			if (victim == -1) {
    885 				/*
    886 				 * something went wrong
    887 				 * let us discard the incoming packet,
    888 				 * regardless of what may happen...
    889 				 */
    890 				victim_class = cl;
    891 			} else
    892 				victim_class = jif->jif_classes[victim];
    893 
    894 			if (victim_class != NULL) {
    895 				/*
    896 				 * test for safety purposes...
    897 				 * it must be true
    898 				 */
    899 				m = _getq_tail(victim_class->cl_q);
    900 				len = (u_int64_t)m_pktlen(m);
    901 				PKTCNTR_ADD(&victim_class->cl_dropcnt, (int)len);
    902 				PKTCNTR_SUB(&victim_class->cl_rin, (int)len);
    903 				PKTCNTR_ADD(&victim_class->st_dropcnt, (int)len);
    904 				PKTCNTR_SUB(&victim_class->st_rin, (int)len);
    905 				victim_class->current_loss += (len << SCALE_LOSS)/victim_class->cl_arrival.bytes;
    906 				m_freem(m); /* the packet is trashed here */
    907 				tslist_drop(victim_class); /* and its timestamp as well */
    908 			}
    909 			return (-1);
    910 		}
    911 	}
    912 	/* else: no drop */
    913 
    914 	_addq(cl->cl_q, m);
    915 	tslist_enqueue(cl, now);
    916 
    917 	return (0);
    918 }
    919 
    920 static struct mbuf *
    921 jobs_getq(struct jobs_class *cl)
    922 {
    923 	return _getq(cl->cl_q);
    924 }
    925 
    926 static struct mbuf *
    927 jobs_pollq(struct jobs_class *cl)
    928 {
    929 	return qhead(cl->cl_q);
    930 }
    931 
    932 static void
    933 jobs_purgeq(struct jobs_class *cl)
    934 {
    935 	struct mbuf *m;
    936 
    937 	if (qempty(cl->cl_q))
    938 		return;
    939 
    940 	while ((m = _getq(cl->cl_q)) != NULL) {
    941 		PKTCNTR_ADD(&cl->cl_dropcnt, m_pktlen(m));
    942 		PKTCNTR_ADD(&cl->st_dropcnt, m_pktlen(m));
    943 		m_freem(m);
    944 		tslist_drop(cl);
    945 	}
    946 	ASSERT(qlen(cl->cl_q) == 0);
    947 }
    948 
    949 /*
    950  * timestamp list support routines
    951  *
    952  * this implementation has been revamped and
    953  * now uses a TAILQ structure.
    954  * timestamp list holds class timestamps
    955  * there is one timestamp list per class.
    956  */
    957 static TSLIST *
    958 tslist_alloc(void)
    959 {
    960 	TSLIST *list_init;
    961 
    962 	list_init = malloc(sizeof(TSLIST), M_DEVBUF, M_WAITOK);
    963 	TAILQ_INIT(list_init);
    964 	return (list_init);
    965 }
    966 
    967 static void
    968 tslist_destroy(struct jobs_class *cl)
    969 {
    970 	while (tslist_first(cl->arv_tm) != NULL)
    971 		tslist_dequeue(cl);
    972 
    973 	free(cl->arv_tm, M_DEVBUF);
    974 }
    975 
    976 static int
    977 tslist_enqueue(struct jobs_class *cl, u_int64_t arv)
    978 {
    979 	TSENTRY *pushed;
    980 	pushed = malloc(sizeof(TSENTRY), M_DEVBUF, M_WAITOK);
    981 	if (pushed == NULL)
    982 		return (0);
    983 
    984 	pushed->timestamp = arv;
    985 	TAILQ_INSERT_TAIL(cl->arv_tm, pushed, ts_list);
    986 	return (1);
    987 }
    988 
    989 static void
    990 tslist_dequeue(struct jobs_class *cl)
    991 {
    992 	TSENTRY *popped;
    993 	popped = tslist_first(cl->arv_tm);
    994 	if (popped != NULL) {
    995 		  TAILQ_REMOVE(cl->arv_tm, popped, ts_list);
    996 		  free(popped, M_DEVBUF);
    997 	}
    998 	return;
    999 }
   1000 
   1001 static void
   1002 tslist_drop(struct jobs_class *cl)
   1003 {
   1004 	TSENTRY *popped;
   1005 	popped = tslist_last(cl->arv_tm);
   1006 	if (popped != NULL) {
   1007 		  TAILQ_REMOVE(cl->arv_tm, popped, ts_list);
   1008 		  free(popped, M_DEVBUF);
   1009 	}
   1010 	return;
   1011 }
   1012 
   1013 /*
   1014  * rate allocation support routines
   1015  */
   1016 /*
   1017  * enforce_wc: enforce that backlogged classes have non-zero
   1018  * service rate, and that non-backlogged classes have zero
   1019  * service rate.
   1020  */
   1021 
   1022 static int
   1023 enforce_wc(struct jobs_if *jif)
   1024 {
   1025 	struct jobs_class *cl;
   1026 
   1027 	int64_t active_classes;
   1028 	int pri;
   1029 	int is_backlogged, class_exists, updated;
   1030 
   1031 	updated = 0;
   1032 	active_classes = 0;
   1033 
   1034 	for (pri = 0; pri <= jif->jif_maxpri; pri++) {
   1035 		cl = jif->jif_classes[pri];
   1036 		class_exists = (cl != NULL);
   1037 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1038 
   1039 		if (is_backlogged)
   1040 			active_classes++;
   1041 		if ((is_backlogged && cl->service_rate <= 0)
   1042 		    ||(class_exists
   1043 		       && !is_backlogged && cl->service_rate > 0))
   1044 			updated = 1;
   1045 	}
   1046 
   1047 	if (updated) {
   1048 		for (pri = 0; pri <= jif->jif_maxpri; pri++) {
   1049 			cl = jif->jif_classes[pri];
   1050 			class_exists = (cl != NULL);
   1051 			is_backlogged = (class_exists && !qempty(cl->cl_q));
   1052 
   1053 			if (class_exists && !is_backlogged)
   1054 				cl->service_rate = 0;
   1055 			else if (is_backlogged)
   1056 				cl->service_rate = (int64_t)(bps_to_internal((u_int64_t)jif->jif_bandwidth)/active_classes);
   1057 		}
   1058 	}
   1059 
   1060 	return (updated);
   1061 }
   1062 
   1063 /*
   1064  * adjust_rates_rdc: compute the service rates adjustments
   1065  * needed to realize the desired proportional delay differentiation.
   1066  * essentially, the rate adjustement delta_rate = prop_control*error,
   1067  * where error is the difference between the measured "weighted"
   1068  * delay and the mean of the weighted delays. see paper for more
   1069  * information.
   1070  * prop_control has slightly changed since the INFOCOM paper,
   1071  * this condition seems to provide better results.
   1072  */
   1073 
   1074 static int64_t *
   1075 adjust_rates_rdc(struct jobs_if *jif)
   1076 {
   1077 	int64_t *result;
   1078 	int64_t credit, available, lower_bound, upper_bound;
   1079 	int64_t bk;
   1080 	int i, j;
   1081 	int rdc_classes, active_classes;
   1082 	int class_exists, is_backlogged;
   1083 	struct jobs_class *cl;
   1084 	int64_t *error;
   1085 	int64_t prop_control;
   1086 	u_int64_t max_prod;
   1087 	u_int64_t min_share;
   1088 	u_int64_t max_avg_pkt_size;
   1089 
   1090 	/*
   1091 	 * min_share is scaled
   1092 	 * to avoid dealing with doubles
   1093 	 */
   1094 	active_classes = 0;
   1095 	rdc_classes = 0;
   1096 	max_prod = 0;
   1097 	max_avg_pkt_size = 0;
   1098 
   1099 	upper_bound = (int64_t)jif->jif_bandwidth;
   1100 
   1101 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1102 		cl = jif->jif_classes[i];
   1103 		class_exists = (cl != NULL);
   1104 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1105 		if (is_backlogged) {
   1106 			active_classes++;
   1107 			if (cl->concerned_rdc)
   1108 				rdc_classes++;
   1109 			else
   1110 				upper_bound -=
   1111 				    internal_to_bps(cl->service_rate);
   1112 		}
   1113 	}
   1114 
   1115 	result = malloc((jif->jif_maxpri+1)*sizeof(int64_t),
   1116 	    M_DEVBUF, M_WAITOK);
   1117 
   1118 	if (result == NULL)
   1119 		return NULL;
   1120 
   1121 	for (i = 0; i <= jif->jif_maxpri; i++)
   1122 		result[i] = 0;
   1123 
   1124 	if (upper_bound <= 0 || rdc_classes == 0)
   1125 		return result;
   1126 
   1127 	credit = 0;
   1128 	lower_bound = 0;
   1129 	min_share = ((u_int64_t)1 << SCALE_SHARE);
   1130 	bk = 0;
   1131 
   1132 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1133 		cl = jif->jif_classes[i];
   1134 		class_exists = (cl != NULL);
   1135 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1136 		if (is_backlogged && cl->concerned_rdc)
   1137 			bk += cl->cl_rin.bytes;
   1138 	}
   1139 
   1140 	if (bk == 0)
   1141 		return (result);
   1142 
   1143 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1144 		cl = jif->jif_classes[i];
   1145 		class_exists = (cl != NULL);
   1146 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1147 		if (is_backlogged
   1148 		    && (cl->cl_rin.bytes << SCALE_SHARE)/bk < min_share)
   1149 			min_share = (cl->cl_rin.bytes << SCALE_SHARE)/bk;
   1150 		if (is_backlogged && cl->concerned_rdc
   1151 		    && cl->delay_prod_others > max_prod)
   1152 			max_prod = cl->delay_prod_others;
   1153 
   1154 		if (is_backlogged && cl->concerned_rdc
   1155 		    && cl->cl_rin.bytes > max_avg_pkt_size*cl->cl_rin.packets)
   1156 			max_avg_pkt_size = (u_int64_t)((u_int)cl->cl_rin.bytes/(u_int)cl->cl_rin.packets);
   1157 	}
   1158 
   1159 	error = update_error(jif);
   1160 	if (!error)
   1161 		return (NULL);
   1162 
   1163 	prop_control = (upper_bound*upper_bound*min_share)
   1164 	    /(max_prod*(max_avg_pkt_size << 2));
   1165 
   1166 	prop_control = bps_to_internal(ticks_to_secs(prop_control)); /* in BT-1 */
   1167 
   1168 	credit = 0;
   1169 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1170 		cl = jif->jif_classes[i];
   1171 		class_exists = (cl != NULL);
   1172 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1173 		if (is_backlogged && cl->concerned_rdc) {
   1174 			result[i] = -prop_control*error[i]; /* in BT-1 */
   1175 			result[i] >>= (SCALE_SHARE);
   1176 		}
   1177 	}
   1178 
   1179 	free(error, M_DEVBUF); /* we don't need these anymore */
   1180 
   1181 	/* saturation */
   1182 
   1183 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1184 		cl = jif->jif_classes[i];
   1185 		class_exists = (cl != NULL);
   1186 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1187 
   1188 		if (is_backlogged && cl->concerned_rdc)
   1189 			lower_bound += cl->min_rate_adc;
   1190 		/*
   1191 		 * note: if there's no ADC or ARC on cl,
   1192 		 * this is equal to zero, which is fine
   1193 		 */
   1194 	}
   1195 
   1196 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1197 		cl = jif->jif_classes[i];
   1198 		class_exists = (cl != NULL);
   1199 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1200 
   1201 		if (is_backlogged && cl->concerned_rdc
   1202 		    && result[i] + cl->service_rate > upper_bound) {
   1203 			for (j = 0; j <= jif->jif_maxpri; j++) {
   1204 				cl = jif->jif_classes[j];
   1205 				class_exists = (cl != NULL);
   1206 				is_backlogged = (class_exists
   1207 						 && !qempty(cl->cl_q));
   1208 				if (is_backlogged && cl->concerned_rdc) {
   1209 					if (j == i)
   1210 						result[j] = upper_bound
   1211 						    -cl->service_rate
   1212 						    + cl->min_rate_adc
   1213 						    - lower_bound;
   1214 					else
   1215 						result[j] =
   1216 						    -cl->service_rate
   1217 						    +cl->min_rate_adc;
   1218 				}
   1219 			}
   1220 			return result;
   1221 		}
   1222 
   1223 		cl = jif->jif_classes[i];
   1224 		/* do this again since it may have been modified */
   1225 		class_exists = (cl != NULL);
   1226 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1227 
   1228 		if (is_backlogged && cl->concerned_rdc
   1229 		    && result[i] + cl->service_rate < cl->min_rate_adc) {
   1230 			credit += cl->service_rate+result[i]
   1231 			    -cl->min_rate_adc;
   1232 			/* "credit" is in fact a negative number */
   1233 			result[i] = -cl->service_rate+cl->min_rate_adc;
   1234 		}
   1235 	}
   1236 
   1237 	for (i = jif->jif_maxpri; (i >= 0 && credit < 0); i--) {
   1238 		cl = jif->jif_classes[i];
   1239 		class_exists = (cl != NULL);
   1240 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1241 
   1242 		if (is_backlogged && cl->concerned_rdc) {
   1243 			available = result[i]
   1244 			    + cl->service_rate-cl->min_rate_adc;
   1245 			if (available >= -credit) {
   1246 				result[i] += credit;
   1247 				credit = 0;
   1248 			} else {
   1249 				result[i] -= available;
   1250 				credit += available;
   1251 			}
   1252 		}
   1253 	}
   1254 	return result;
   1255 }
   1256 
   1257 /*
   1258  * assign_rate_drops_adc: returns the adjustment needed to
   1259  * the service rates to meet the absolute delay/rate constraints
   1260  * (delay/throughput bounds) and drops traffic if need be.
   1261  * see tech. report UVA/T.R. CS-2000-24/CS-2001-21 for more info.
   1262  */
   1263 
   1264 static int64_t *
   1265 assign_rate_drops_adc(struct jobs_if *jif)
   1266 {
   1267 	int64_t *result;
   1268 	int class_exists, is_backlogged;
   1269 	struct jobs_class *cl;
   1270 
   1271 	int64_t *c, *n, *k;
   1272 	int64_t *available;
   1273 
   1274 	int lowest, highest;
   1275 	int keep_going;
   1276 	int i;
   1277 	u_int64_t now, oldest_arv;
   1278 	int64_t	remaining_time;
   1279 	struct mbuf* pkt;
   1280 	u_int64_t len;
   1281 
   1282 	now = read_machclk();
   1283 	oldest_arv = now;
   1284 
   1285 	result = malloc((jif->jif_maxpri+1)*sizeof(int64_t), M_DEVBUF, M_WAITOK);
   1286 	if (result == NULL)
   1287 		return NULL;
   1288 	c = malloc((jif->jif_maxpri+1)*sizeof(u_int64_t), M_DEVBUF, M_WAITOK);
   1289 	if (c == NULL)
   1290 		return NULL;
   1291 	n = malloc((jif->jif_maxpri+1)*sizeof(u_int64_t), M_DEVBUF, M_WAITOK);
   1292 	if (n == NULL)
   1293 		return NULL;
   1294 	k = malloc((jif->jif_maxpri+1)*sizeof(u_int64_t), M_DEVBUF, M_WAITOK);
   1295 	if (k == NULL)
   1296 		return NULL;
   1297 	available = malloc((jif->jif_maxpri+1)*sizeof(int64_t), M_DEVBUF, M_WAITOK);
   1298 	if (available == NULL)
   1299 		return NULL;
   1300 
   1301 	for (i = 0; i <= jif->jif_maxpri; i++)
   1302 		result[i] = 0;
   1303 
   1304 	keep_going = 1;
   1305 
   1306 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1307 		cl = jif->jif_classes[i];
   1308 		class_exists = (cl != NULL);
   1309 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1310 
   1311 		if (is_backlogged) {
   1312 			if (cl->concerned_adc) {
   1313 				/*
   1314 				 * get the arrival time of the oldest
   1315 				 * class-i packet
   1316 				 */
   1317 				if (tslist_first(cl->arv_tm) == NULL)
   1318 					oldest_arv = now; /* NOTREACHED */
   1319 				else
   1320 					oldest_arv = (tslist_first(cl->arv_tm))->timestamp;
   1321 
   1322 				n[i] = cl->service_rate;
   1323 				k[i] = scale_rate((int64_t)(cl->cl_rin.bytes - cl->cl_rout.bytes));
   1324 
   1325 				remaining_time = cl->cl_adc
   1326 				    - (int64_t)delay_diff(now, oldest_arv);
   1327 				if (remaining_time > 0) {
   1328 					c[i] = remaining_time;
   1329 					/*
   1330 					 * c is the remaining time before
   1331 					 * the deadline is violated
   1332 					 * (in ticks)
   1333 					 */
   1334 					available[i] = n[i]-k[i]/c[i];
   1335 				} else {
   1336 					/*
   1337 					 * deadline has passed...
   1338 					 * we allocate the whole link
   1339 					 * capacity to hopefully
   1340 					 * solve the problem
   1341 					 */
   1342 					c[i] = 0;
   1343 					available[i] = -((int64_t)bps_to_internal((u_int64_t)jif->jif_bandwidth));
   1344 				}
   1345 				if (cl->concerned_arc) {
   1346 					/*
   1347 					 * there's an ARC in addition
   1348 					 * to the ADC
   1349 					 */
   1350 					if (n[i] - cl->cl_arc < available[i])
   1351 						available[i] = n[i]
   1352 						    - cl->cl_arc;
   1353 				}
   1354 			} else if (cl->concerned_arc) {
   1355 				/*
   1356 				 * backlogged, concerned by ARC
   1357 				 * but not by ADC
   1358 				 */
   1359 				n[i] = cl->service_rate;
   1360 				available[i] = n[i] - cl->cl_arc;
   1361 			} else {
   1362 				/*
   1363 				 * backlogged but not concerned by ADC
   1364 				 * or ARC -> can give everything
   1365 				 */
   1366 				n[i] = cl->service_rate;
   1367 				available[i] = n[i];
   1368 			}
   1369 		} else {
   1370 			/* not backlogged */
   1371 			n[i] = 0;
   1372 			k[i] = 0;
   1373 			c[i] = 0;
   1374 			if (class_exists)
   1375 				available[i] = cl->service_rate;
   1376 			else
   1377 				available[i] = 0;
   1378 		}
   1379 	}
   1380 
   1381 	/* step 1: adjust rates (greedy algorithm) */
   1382 
   1383 	highest = 0;
   1384 	lowest  = jif->jif_maxpri;
   1385 
   1386 	while (highest < jif->jif_maxpri+1 && available[highest] >= 0)
   1387 		highest++; /* which is the highest class that needs more service? */
   1388 	while (lowest > 0 && available[lowest] <= 0)
   1389 		lowest--;  /* which is the lowest class that needs less service? */
   1390 
   1391 	while (highest != jif->jif_maxpri+1 && lowest != -1) {
   1392 		/* give the excess service from lowest to highest */
   1393 		if (available[lowest]+available[highest] > 0) {
   1394 			/*
   1395 			 * still some "credit" left
   1396 			 * give all that is needed by "highest"
   1397 			 */
   1398 			n[lowest]  += available[highest];
   1399 			n[highest] -= available[highest];
   1400 			available[lowest]  += available[highest];
   1401 			available[highest] = 0;
   1402 
   1403 			while (highest < jif->jif_maxpri+1
   1404 			       && available[highest] >= 0)
   1405 				highest++;  /* which is the highest class that needs more service now? */
   1406 
   1407 		} else if (available[lowest]+available[highest] == 0) {
   1408 			/* no more credit left but it's fine */
   1409 			n[lowest]  += available[highest];
   1410 			n[highest] -= available[highest];
   1411 			available[highest] = 0;
   1412 			available[lowest]  = 0;
   1413 
   1414 			while (highest < jif->jif_maxpri+1
   1415 			       && available[highest] >= 0)
   1416 				highest++;  /* which is the highest class that needs more service? */
   1417 			while (lowest >= 0 && available[lowest] <= 0)
   1418 				lowest--;   /* which is the lowest class that needs less service? */
   1419 
   1420 		} else if (available[lowest]+available[highest] < 0) {
   1421 			/*
   1422 			 * no more credit left and we need to switch
   1423 			 * to another class
   1424 			 */
   1425 			n[lowest]  -= available[lowest];
   1426 			n[highest] += available[lowest];
   1427 			available[highest] += available[lowest];
   1428 			available[lowest]  = 0;
   1429 
   1430 			while ((lowest >= 0)&&(available[lowest] <= 0))
   1431 				lowest--;  /* which is the lowest class that needs less service? */
   1432 		}
   1433 	}
   1434 
   1435 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1436 		cl = jif->jif_classes[i];
   1437 		class_exists = (cl != NULL);
   1438 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1439 		if (is_backlogged) {
   1440 			result[i] = n[i] - cl->service_rate;
   1441 		} else {
   1442 			if (class_exists)
   1443 				result[i] = - cl->service_rate;
   1444 			else
   1445 				result[i] = 0;
   1446 		}
   1447 	}
   1448 
   1449 	/* step 2: adjust drops (for ADC) */
   1450 
   1451 	if (highest != jif->jif_maxpri+1) {
   1452 		/* some class(es) still need(s) additional service */
   1453 		for (i = 0; i <= jif->jif_maxpri; i++) {
   1454 			cl = jif->jif_classes[i];
   1455 			class_exists = (cl != NULL);
   1456 			is_backlogged = (class_exists
   1457 					 && !qempty(cl->cl_q));
   1458 			if (is_backlogged && available[i] < 0) {
   1459 				if (cl->concerned_adc) {
   1460 					k[i] = c[i]*n[i];
   1461 					while (keep_going && scale_rate((int64_t)(cl->cl_rin.bytes-cl->cl_rout.bytes)) > k[i]) {
   1462 						pkt = qtail(cl->cl_q);
   1463 						if (pkt != NULL) {
   1464 							/* "safeguard" test (a packet SHOULD be in there) */
   1465 							len = (u_int64_t)m_pktlen(pkt);
   1466 							/* access packet at the tail */
   1467 							if (cl->concerned_alc
   1468 							    && cl->current_loss+(len << SCALE_LOSS)/cl->cl_arrival.bytes > cl->cl_alc) {
   1469 								keep_going = 0; /* relax ADC in favor of ALC */
   1470 							} else {
   1471 								/* drop packet at the tail of the class-i queue, update values */
   1472 								pkt = _getq_tail(cl->cl_q);
   1473 								len = (u_int64_t)m_pktlen(pkt);
   1474 								PKTCNTR_ADD(&cl->cl_dropcnt, (int)len);
   1475 								PKTCNTR_SUB(&cl->cl_rin, (int)len);
   1476 								PKTCNTR_ADD(&cl->st_dropcnt, (int)len);
   1477 								PKTCNTR_SUB(&cl->st_rin, (int)len);
   1478 								cl->current_loss += (len << SCALE_LOSS)/cl->cl_arrival.bytes;
   1479 								m_freem(pkt); /* the packet is trashed here */
   1480 								tslist_drop(cl);
   1481 								IFQ_DEC_LEN(cl->cl_jif->jif_ifq);
   1482 							}
   1483 						} else
   1484 							keep_going = 0; /* NOTREACHED */
   1485 					}
   1486 					k[i] = scale_rate((int64_t)(cl->cl_rin.bytes-cl->cl_rout.bytes));
   1487 				}
   1488 				/*
   1489 				 * n[i] is the max rate we can give.
   1490 				 * the above drops as much as possible
   1491 				 * to respect a delay bound.
   1492 				 * for throughput bounds,
   1493 				 * there's nothing that can be done
   1494 				 * after the greedy reallocation.
   1495 				 */
   1496 			}
   1497 		}
   1498 	}
   1499 
   1500 	/* update the values of min_rate_adc */
   1501 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1502 		cl = jif->jif_classes[i];
   1503 		class_exists = (cl != NULL);
   1504 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1505 		if (is_backlogged && cl->concerned_adc) {
   1506 			if (c[i] != 0) {
   1507 				if (cl->concerned_adc
   1508 				    && !cl->concerned_arc)
   1509 					cl->min_rate_adc = k[i]/c[i];
   1510 				else
   1511 					cl->min_rate_adc = n[i];
   1512 			} else
   1513 				cl->min_rate_adc = (int64_t)bps_to_internal((u_int64_t)jif->jif_bandwidth);
   1514 		} else if (is_backlogged && cl->concerned_arc)
   1515 			cl->min_rate_adc = n[i]; /* the best we can give */
   1516 		else {
   1517 			if (class_exists)
   1518 				cl->min_rate_adc = 0;
   1519 		}
   1520 	}
   1521 
   1522 	free(c, M_DEVBUF);
   1523 	free(n, M_DEVBUF);
   1524 	free(k, M_DEVBUF);
   1525 	free(available, M_DEVBUF);
   1526 
   1527 	return (result);
   1528 }
   1529 
   1530 /*
   1531  * update_error: returns the difference between the mean weighted
   1532  * delay and the weighted delay for each class. if proportional
   1533  * delay differentiation is perfectly achieved, it should return
   1534  * zero for each class.
   1535  */
   1536 static int64_t *
   1537 update_error(struct jobs_if *jif)
   1538 {
   1539 	int i;
   1540 	int active_classes, backlogged_classes;
   1541 	u_int64_t mean_weighted_delay;
   1542 	u_int64_t delays[JOBS_MAXPRI];
   1543 	int64_t* error;
   1544 	int class_exists, is_backlogged;
   1545 	struct jobs_class *cl;
   1546 
   1547 	error = malloc(sizeof(int64_t)*(jif->jif_maxpri+1), M_DEVBUF,
   1548 	    M_WAITOK|M_ZERO);
   1549 
   1550 	if (error == NULL)
   1551 		return NULL;
   1552 
   1553 	mean_weighted_delay = 0;
   1554 	active_classes = 0;
   1555 	backlogged_classes = 0;
   1556 
   1557 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1558 		cl = jif->jif_classes[i];
   1559 		class_exists = (cl != NULL);
   1560 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1561 
   1562 		if (is_backlogged) {
   1563 			backlogged_classes++;
   1564 			if (cl->concerned_rdc) {
   1565 				delays[i] = proj_delay(jif, i);
   1566 				mean_weighted_delay += cl->delay_prod_others*delays[i];
   1567 				active_classes ++;
   1568 			}
   1569 		}
   1570 	}
   1571 
   1572 	if (active_classes == 0)
   1573 		return error;
   1574 	else
   1575 		mean_weighted_delay /= active_classes;
   1576 
   1577 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1578 		cl = jif->jif_classes[i];
   1579 		class_exists = (cl != NULL);
   1580 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1581 
   1582 		if (is_backlogged && cl->concerned_rdc)
   1583 			error[i] = ((int64_t)mean_weighted_delay)-((int64_t)cl->delay_prod_others*delays[i]);
   1584 		else
   1585 			error[i] = 0; /*
   1586 				       * either the class isn't concerned,
   1587 				       * or it's not backlogged.
   1588 				       * in any case, the rate shouldn't
   1589 				       * be adjusted.
   1590 				       */
   1591 	}
   1592 	return error;
   1593 }
   1594 
   1595 /*
   1596  * min_rates_adc: computes the minimum service rates needed in
   1597  * each class to meet the absolute delay bounds. if, for any
   1598  * class i, the current service rate of class i is less than
   1599  * the computed minimum service rate, this function returns
   1600  * false, true otherwise.
   1601  */
   1602 static int
   1603 min_rates_adc(struct jobs_if *jif)
   1604 {
   1605 	int result;
   1606 	int i;
   1607 	int class_exists, is_backlogged;
   1608 	int64_t remaining_time;
   1609 	struct jobs_class *cl;
   1610 	result = 1;
   1611 
   1612 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1613 		cl = jif->jif_classes[i];
   1614 		class_exists = (cl != NULL);
   1615 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1616 		if (is_backlogged && cl->concerned_adc) {
   1617 			remaining_time = cl->cl_adc - proj_delay(jif, i);
   1618 			if (remaining_time > 0 ) {
   1619 				/* min rate needed for ADC */
   1620 				cl->min_rate_adc = scale_rate((int64_t)(cl->cl_rin.bytes-cl->cl_rout.bytes))/remaining_time;
   1621 				if (cl->concerned_arc
   1622 				    && cl->cl_arc > cl->min_rate_adc) {
   1623 					/* min rate needed for ADC + ARC */
   1624 					cl->min_rate_adc = cl->cl_arc;
   1625 				}
   1626 			} else {
   1627 				/* the deadline has been exceeded: give the whole link capacity to hopefully fix the situation */
   1628 				cl->min_rate_adc = (int64_t)bps_to_internal((u_int64_t)jif->jif_bandwidth);
   1629 			}
   1630       		} else if (is_backlogged && cl->concerned_arc)
   1631 			cl->min_rate_adc = cl->cl_arc; 			/* no ADC, an ARC */
   1632 		else if (class_exists)
   1633 			cl->min_rate_adc = 0;	/*
   1634 						 * either the class is not
   1635 						 * backlogged
   1636 						 * or there is no ADC and
   1637 						 * no ARC
   1638 						 */
   1639 		if (is_backlogged && cl->min_rate_adc > cl->service_rate)
   1640 			result = 0;
   1641 	}
   1642 
   1643 	return result;
   1644 }
   1645 
   1646 /*
   1647  * proj_delay: computes the difference between the current time
   1648  * and the time the oldest class-i packet still in the class-i
   1649  * queue i arrived in the system.
   1650  */
   1651 static int64_t
   1652 proj_delay(struct jobs_if *jif, int i)
   1653 {
   1654 	u_int64_t now;
   1655 	int class_exists, is_backlogged;
   1656 	struct jobs_class *cl;
   1657 
   1658 	now = read_machclk();
   1659 	cl = jif->jif_classes[i];
   1660 	class_exists = (cl != NULL);
   1661 	is_backlogged = (class_exists && !qempty(cl->cl_q));
   1662 
   1663 	if (is_backlogged)
   1664 		return ((int64_t)delay_diff(now, tslist_first(cl->arv_tm)->timestamp));
   1665 
   1666 	return (0); /* NOTREACHED */
   1667 }
   1668 
   1669 /*
   1670  * pick_dropped_rlc: returns the class index of the class to be
   1671  * dropped for meeting the relative loss constraints.
   1672  */
   1673 static int
   1674 pick_dropped_rlc(struct jobs_if *jif)
   1675 {
   1676 	int64_t mean;
   1677 	int64_t* loss_error;
   1678 	int i, active_classes, backlogged_classes;
   1679 	int class_exists, is_backlogged;
   1680 	int class_dropped;
   1681 	int64_t max_error;
   1682 	int64_t max_alc;
   1683 	struct mbuf* pkt;
   1684 	struct jobs_class *cl;
   1685 	u_int64_t len;
   1686 
   1687 	loss_error = malloc(sizeof(int64_t)*(jif->jif_maxpri+1),
   1688 	    M_DEVBUF, M_WAITOK);
   1689 
   1690 	if (loss_error == NULL)
   1691 		return -1;
   1692 
   1693 	class_dropped = -1;
   1694 	max_error = 0;
   1695 	mean = 0;
   1696 	active_classes = 0;
   1697 	backlogged_classes = 0;
   1698 
   1699 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1700 		cl = jif->jif_classes[i];
   1701 		class_exists = (cl != NULL);
   1702 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1703 		if (is_backlogged) {
   1704 			backlogged_classes ++;
   1705 			if (cl->concerned_rlc) {
   1706 				mean += cl->loss_prod_others
   1707 				    * cl->current_loss;
   1708 				active_classes++;
   1709 			}
   1710 		}
   1711 	}
   1712 
   1713 	if (active_classes > 0)
   1714 		mean /= active_classes;
   1715 
   1716 	if (active_classes == 0)
   1717 		class_dropped = JOBS_MAXPRI+1; /*
   1718 						* no classes are concerned
   1719 						* by RLCs (JOBS_MAXPRI+1
   1720 						* means "ignore RLC" here)
   1721 						*/
   1722 	else {
   1723 		for (i = 0; i <= jif->jif_maxpri; i++) {
   1724 			cl = jif->jif_classes[i];
   1725 			class_exists = (cl != NULL);
   1726 			is_backlogged = (class_exists
   1727 					 && !qempty(cl->cl_q));
   1728 
   1729 			if ((is_backlogged)&&(cl->cl_rlc))
   1730 				loss_error[i]=cl->loss_prod_others
   1731 				    *cl->current_loss-mean;
   1732 			else
   1733 				loss_error[i] = INFINITY;
   1734 		}
   1735 
   1736 		for (i = 0; i <= jif->jif_maxpri; i++) {
   1737 			cl = jif->jif_classes[i];
   1738 			class_exists = (cl != NULL);
   1739 			is_backlogged = (class_exists
   1740 					 && !qempty(cl->cl_q));
   1741 			if (is_backlogged && loss_error[i] <= max_error) {
   1742 				/*
   1743 				 * find out which class is the most
   1744 				 * below the mean.
   1745 				 * it's the one that needs to be dropped
   1746 				 * ties are broken in favor of the higher
   1747 				 * priority classes (i.e., if two classes
   1748 				 * present the same deviation, the lower
   1749 				 * priority class will get dropped).
   1750 				 */
   1751 				max_error = loss_error[i];
   1752 				class_dropped = i;
   1753 			}
   1754 		}
   1755 
   1756 		if (class_dropped != -1) {
   1757 			cl = jif->jif_classes[class_dropped];
   1758 			pkt = qtail(cl->cl_q);
   1759 			if (pkt != NULL) {
   1760 				/*
   1761 				 * "safeguard" test (a packet SHOULD be
   1762 				 * in there)
   1763 				 */
   1764 				len = (u_int64_t)m_pktlen(pkt);
   1765 				/* access packet at the tail */
   1766 				if (cl->current_loss+(len << SCALE_LOSS)/cl->cl_arrival.bytes > cl->cl_alc) {
   1767 					/*
   1768 					 * the class to drop for meeting
   1769 					 * the RLC will defeat the ALC:
   1770 					 * ignore RLC.
   1771 					 */
   1772 					class_dropped = JOBS_MAXPRI+1;
   1773 				}
   1774 			} else
   1775 				class_dropped = JOBS_MAXPRI+1; /* NOTREACHED */
   1776 		} else
   1777 			class_dropped = JOBS_MAXPRI+1;
   1778 	}
   1779 
   1780 	if (class_dropped == JOBS_MAXPRI+1) {
   1781 		max_alc = -((int64_t)1 << SCALE_LOSS);
   1782 		for (i = jif->jif_maxpri; i >= 0; i--) {
   1783 			cl = jif->jif_classes[i];
   1784 			class_exists = (cl != NULL);
   1785 			is_backlogged = (class_exists
   1786 					 && !qempty(cl->cl_q));
   1787 			if (is_backlogged) {
   1788 				if (cl->concerned_alc && cl->cl_alc - cl->current_loss > max_alc) {
   1789 					max_alc = cl->cl_alc-cl->current_loss; /* pick the class which is the furthest from its ALC */
   1790 					class_dropped = i;
   1791 				} else if (!cl->concerned_alc && ((int64_t) 1 << SCALE_LOSS)-cl->current_loss > max_alc) {
   1792 					max_alc = ((int64_t) 1 << SCALE_LOSS)-cl->current_loss;
   1793 					class_dropped = i;
   1794 				}
   1795 			}
   1796 		}
   1797 	}
   1798 
   1799 	free(loss_error, M_DEVBUF);
   1800 	return (class_dropped);
   1801 }
   1802 
   1803 /*
   1804  * ALTQ binding/setup functions
   1805  */
   1806 /*
   1807  * jobs device interface
   1808  */
   1809 int
   1810 jobsopen(dev_t dev __unused, int flag __unused, int fmt __unused,
   1811     struct lwp *l __unused)
   1812 {
   1813 	if (machclk_freq == 0)
   1814 		init_machclk();
   1815 
   1816 	if (machclk_freq == 0) {
   1817 		printf("jobs: no CPU clock available!\n");
   1818 		return (ENXIO);
   1819 	}
   1820 	/* everything will be done when the queueing scheme is attached. */
   1821 	return 0;
   1822 }
   1823 
   1824 int
   1825 jobsclose(dev_t dev __unused, int flag __unused, int fmt __unused,
   1826     struct lwp *l __unused)
   1827 {
   1828 	struct jobs_if *jif;
   1829 	int err, error = 0;
   1830 
   1831 	while ((jif = jif_list) != NULL) {
   1832 		/* destroy all */
   1833 		if (ALTQ_IS_ENABLED(jif->jif_ifq))
   1834 			altq_disable(jif->jif_ifq);
   1835 
   1836 		err = altq_detach(jif->jif_ifq);
   1837 		if (err == 0)
   1838 			err = jobs_detach(jif);
   1839 		if (err != 0 && error == 0)
   1840 			error = err;
   1841 	}
   1842 
   1843 	return error;
   1844 }
   1845 
   1846 int
   1847 jobsioctl(dev_t dev __unused, ioctlcmd_t cmd, caddr_t addr, int flag __unused,
   1848     struct lwp *l)
   1849 {
   1850 	struct jobs_if *jif;
   1851 	struct jobs_interface *ifacep;
   1852 	struct proc *p = l->l_proc;
   1853 	int	error = 0;
   1854 
   1855 	/* check super-user privilege */
   1856 	switch (cmd) {
   1857 	case JOBS_GETSTATS:
   1858 		break;
   1859 	default:
   1860 #if (__FreeBSD_version > 400000)
   1861 		if ((error = suser(p)) != 0)
   1862 			return (error);
   1863 #else
   1864 		if ((error = kauth_authorize_network(p->p_cred,
   1865 		    KAUTH_NETWORK_ALTQ, KAUTH_REQ_NETWORK_ALTQ_JOBS, NULL,
   1866 		    NULL, NULL)) != 0)
   1867 			return (error);
   1868 #endif
   1869 		break;
   1870 	}
   1871 
   1872 	switch (cmd) {
   1873 
   1874 	case JOBS_IF_ATTACH:
   1875 		error = jobscmd_if_attach((struct jobs_attach *)addr);
   1876 		break;
   1877 
   1878 	case JOBS_IF_DETACH:
   1879 		error = jobscmd_if_detach((struct jobs_interface *)addr);
   1880 		break;
   1881 
   1882 	case JOBS_ENABLE:
   1883 	case JOBS_DISABLE:
   1884 	case JOBS_CLEAR:
   1885 		ifacep = (struct jobs_interface *)addr;
   1886 		if ((jif = altq_lookup(ifacep->jobs_ifname,
   1887 				       ALTQT_JOBS)) == NULL) {
   1888 			error = EBADF;
   1889 			break;
   1890 		}
   1891 
   1892 		switch (cmd) {
   1893 		case JOBS_ENABLE:
   1894 			if (jif->jif_default == NULL) {
   1895 #if 1
   1896 				printf("jobs: no default class\n");
   1897 #endif
   1898 				error = EINVAL;
   1899 				break;
   1900 			}
   1901 			error = altq_enable(jif->jif_ifq);
   1902 			break;
   1903 
   1904 		case JOBS_DISABLE:
   1905 			error = altq_disable(jif->jif_ifq);
   1906 			break;
   1907 
   1908 		case JOBS_CLEAR:
   1909 			jobs_clear_interface(jif);
   1910 			break;
   1911 		}
   1912 		break;
   1913 
   1914 		case JOBS_ADD_CLASS:
   1915 			error = jobscmd_add_class((struct jobs_add_class *)addr);
   1916 			break;
   1917 
   1918 	case JOBS_DEL_CLASS:
   1919 		error = jobscmd_delete_class((struct jobs_delete_class *)addr);
   1920 		break;
   1921 
   1922 	case JOBS_MOD_CLASS:
   1923 		error = jobscmd_modify_class((struct jobs_modify_class *)addr);
   1924 		break;
   1925 
   1926 	case JOBS_ADD_FILTER:
   1927 		error = jobscmd_add_filter((struct jobs_add_filter *)addr);
   1928 		break;
   1929 
   1930 	case JOBS_DEL_FILTER:
   1931 		error = jobscmd_delete_filter((struct jobs_delete_filter *)addr);
   1932 		break;
   1933 
   1934 	case JOBS_GETSTATS:
   1935 		error = jobscmd_class_stats((struct jobs_class_stats *)addr);
   1936 		break;
   1937 
   1938 	default:
   1939 		error = EINVAL;
   1940 		break;
   1941 	}
   1942 	return error;
   1943 }
   1944 
   1945 static int
   1946 jobscmd_if_attach(struct jobs_attach *ap)
   1947 {
   1948 	struct jobs_if *jif;
   1949 	struct ifnet *ifp;
   1950 	int error;
   1951 
   1952 	if ((ifp = ifunit(ap->iface.jobs_ifname)) == NULL)
   1953 		return (ENXIO);
   1954 	if ((jif = jobs_attach(&ifp->if_snd, ap->bandwidth, ap->qlimit, ap->separate)) == NULL)
   1955 		return (ENOMEM);
   1956 
   1957 	/*
   1958 	 * set JOBS to this ifnet structure.
   1959 	 */
   1960 	if ((error = altq_attach(&ifp->if_snd, ALTQT_JOBS, jif,
   1961 				 jobs_enqueue, jobs_dequeue, jobs_request,
   1962 				 &jif->jif_classifier, acc_classify)) != 0)
   1963 		(void)jobs_detach(jif);
   1964 
   1965 	return (error);
   1966 }
   1967 
   1968 static int
   1969 jobscmd_if_detach(struct jobs_interface *ap)
   1970 {
   1971 	struct jobs_if *jif;
   1972 	int error;
   1973 
   1974 	if ((jif = altq_lookup(ap->jobs_ifname, ALTQT_JOBS)) == NULL)
   1975 		return (EBADF);
   1976 
   1977 	if (ALTQ_IS_ENABLED(jif->jif_ifq))
   1978 		altq_disable(jif->jif_ifq);
   1979 
   1980 	if ((error = altq_detach(jif->jif_ifq)))
   1981 		return (error);
   1982 
   1983 	return jobs_detach(jif);
   1984 }
   1985 
   1986 static int
   1987 jobscmd_add_class(struct jobs_add_class *ap)
   1988 {
   1989 	struct jobs_if *jif;
   1990 	struct jobs_class *cl;
   1991 
   1992 	if ((jif = altq_lookup(ap->iface.jobs_ifname, ALTQT_JOBS)) == NULL)
   1993 		return (EBADF);
   1994 
   1995 	if (ap->pri < 0 || ap->pri >= JOBS_MAXPRI)
   1996 		return (EINVAL);
   1997 
   1998 	if ((cl = jobs_class_create(jif, ap->pri,
   1999 				    ap->cl_adc, ap->cl_rdc,
   2000 				    ap->cl_alc, ap->cl_rlc, ap-> cl_arc,
   2001 				    ap->flags)) == NULL)
   2002 		return (ENOMEM);
   2003 
   2004 	/* return a class handle to the user */
   2005 	ap->class_handle = clp_to_clh(cl);
   2006 	return (0);
   2007 }
   2008 
   2009 static int
   2010 jobscmd_delete_class(struct jobs_delete_class *ap)
   2011 {
   2012 	struct jobs_if *jif;
   2013 	struct jobs_class *cl;
   2014 
   2015 	if ((jif = altq_lookup(ap->iface.jobs_ifname, ALTQT_JOBS)) == NULL)
   2016 		return (EBADF);
   2017 
   2018 	if ((cl = clh_to_clp(jif, ap->class_handle)) == NULL)
   2019 		return (EINVAL);
   2020 
   2021 	return jobs_class_destroy(cl);
   2022 }
   2023 
   2024 static int
   2025 jobscmd_modify_class(struct jobs_modify_class *ap)
   2026 {
   2027 	struct jobs_if *jif;
   2028 	struct jobs_class *cl;
   2029 
   2030 	if ((jif = altq_lookup(ap->iface.jobs_ifname, ALTQT_JOBS)) == NULL)
   2031 		return (EBADF);
   2032 
   2033 	if (ap->pri < 0 || ap->pri >= JOBS_MAXPRI)
   2034 		return (EINVAL);
   2035 
   2036 	if ((cl = clh_to_clp(jif, ap->class_handle)) == NULL)
   2037 		return (EINVAL);
   2038 
   2039 	/*
   2040 	 * if priority is changed, move the class to the new priority
   2041 	 */
   2042 	if (jif->jif_classes[ap->pri] != cl) {
   2043 		if (jif->jif_classes[ap->pri] != NULL)
   2044 			return (EEXIST);
   2045 		jif->jif_classes[cl->cl_pri] = NULL;
   2046 		jif->jif_classes[ap->pri] = cl;
   2047 		cl->cl_pri = ap->pri;
   2048 	}
   2049 
   2050 	/* call jobs_class_create to change class parameters */
   2051 	if ((cl = jobs_class_create(jif, ap->pri,
   2052 				    ap->cl_adc, ap->cl_rdc,
   2053 				    ap->cl_alc, ap->cl_rlc, ap->cl_arc,
   2054 				    ap->flags)) == NULL)
   2055 		return (ENOMEM);
   2056 	return 0;
   2057 }
   2058 
   2059 static int
   2060 jobscmd_add_filter(struct jobs_add_filter *ap)
   2061 {
   2062 	struct jobs_if *jif;
   2063 	struct jobs_class *cl;
   2064 
   2065 	if ((jif = altq_lookup(ap->iface.jobs_ifname, ALTQT_JOBS)) == NULL)
   2066 		return (EBADF);
   2067 
   2068 	if ((cl = clh_to_clp(jif, ap->class_handle)) == NULL)
   2069 		return (EINVAL);
   2070 
   2071 	return acc_add_filter(&jif->jif_classifier, &ap->filter,
   2072 			      cl, &ap->filter_handle);
   2073 }
   2074 
   2075 static int
   2076 jobscmd_delete_filter(struct jobs_delete_filter *ap)
   2077 {
   2078 	struct jobs_if *jif;
   2079 
   2080 	if ((jif = altq_lookup(ap->iface.jobs_ifname, ALTQT_JOBS)) == NULL)
   2081 		return (EBADF);
   2082 
   2083 	return acc_delete_filter(&jif->jif_classifier, ap->filter_handle);
   2084 }
   2085 
   2086 static int
   2087 jobscmd_class_stats(struct jobs_class_stats *ap)
   2088 {
   2089 	struct jobs_if *jif;
   2090 	struct jobs_class *cl;
   2091 	struct class_stats stats, *usp;
   2092 	int pri, error;
   2093 
   2094 	if ((jif = altq_lookup(ap->iface.jobs_ifname, ALTQT_JOBS)) == NULL)
   2095 		return (EBADF);
   2096 
   2097 	ap->maxpri = jif->jif_maxpri;
   2098 
   2099 	/* then, read the next N classes */
   2100 	usp = ap->stats;
   2101 	for (pri = 0; pri <= jif->jif_maxpri; pri++) {
   2102 		cl = jif->jif_classes[pri];
   2103 		if (cl != NULL)
   2104 			get_class_stats(&stats, cl);
   2105 		else
   2106 			(void)memset(&stats, 0, sizeof(stats));
   2107 		if ((error = copyout((caddr_t)&stats, (caddr_t)usp++,
   2108 				     sizeof(stats))) != 0)
   2109 			return (error);
   2110 	}
   2111 	return (0);
   2112 }
   2113 
   2114 static void
   2115 get_class_stats(struct class_stats *sp, struct jobs_class *cl)
   2116 {
   2117 	u_int64_t now;
   2118 	now = read_machclk();
   2119 
   2120 	sp->class_handle = clp_to_clh(cl);
   2121 	sp->qlength = qlen(cl->cl_q);
   2122 
   2123 	sp->period = cl->cl_period;
   2124 	sp->rin = cl->st_rin;
   2125 	sp->arrival = cl->st_arrival;
   2126 	sp->arrivalbusy = cl->cl_arrival;
   2127 	sp->rout = cl->st_rout;
   2128 	sp->dropcnt = cl->cl_dropcnt;
   2129 
   2130 	/*  PKTCNTR_RESET(&cl->st_arrival);*/
   2131 	PKTCNTR_RESET(&cl->st_rin);
   2132 	PKTCNTR_RESET(&cl->st_rout);
   2133 
   2134 	sp->totallength = cl->cl_jif->jif_ifq->ifq_len;
   2135 	sp->lastdel = ticks_to_secs(GRANULARITY*cl->cl_lastdel);
   2136 	sp->avgdel = cl->cl_avgdel;
   2137 
   2138 	cl->cl_avgdel = 0;
   2139 
   2140 	sp->busylength = ticks_to_secs(1000*delay_diff(now, cl->idletime));
   2141 	sp->adc_violations = cl->adc_violations;
   2142 
   2143 	sp->wc_cycles_enqueue = cl->cl_jif->wc_cycles_enqueue;
   2144 	sp->wc_cycles_dequeue = cl->cl_jif->wc_cycles_dequeue;
   2145 	sp->bc_cycles_enqueue = cl->cl_jif->bc_cycles_enqueue;
   2146 	sp->bc_cycles_dequeue = cl->cl_jif->bc_cycles_dequeue;
   2147 	sp->avg_cycles_enqueue = cl->cl_jif->avg_cycles_enqueue;
   2148 	sp->avg_cycles_dequeue = cl->cl_jif->avg_cycles_dequeue;
   2149 	sp->avg_cycles2_enqueue = cl->cl_jif->avg_cycles2_enqueue;
   2150 	sp->avg_cycles2_dequeue = cl->cl_jif->avg_cycles2_dequeue;
   2151 	sp->total_enqueued = cl->cl_jif->total_enqueued;
   2152 	sp->total_dequeued = cl->cl_jif->total_dequeued;
   2153 }
   2154 
   2155 /* convert a class handle to the corresponding class pointer */
   2156 static struct jobs_class *
   2157 clh_to_clp(struct jobs_if *jif, u_long chandle)
   2158 {
   2159 	struct jobs_class *cl;
   2160 
   2161 	cl = (struct jobs_class *)chandle;
   2162 	if (chandle != ALIGN(cl)) {
   2163 #if 1
   2164 		printf("clh_to_cl: unaligned pointer %p\n", cl);
   2165 #endif
   2166 		return (NULL);
   2167 	}
   2168 
   2169 	if (cl == NULL || cl->cl_handle != chandle || cl->cl_jif != jif)
   2170 		return (NULL);
   2171 	return (cl);
   2172 }
   2173 
   2174 /* convert a class pointer to the corresponding class handle */
   2175 static u_long
   2176 clp_to_clh(struct jobs_class *cl)
   2177 {
   2178 	return (cl->cl_handle);
   2179 }
   2180 
   2181 #ifdef KLD_MODULE
   2182 
   2183 static struct altqsw jobs_sw =
   2184 	{"jobs", jobsopen, jobsclose, jobsioctl};
   2185 
   2186 ALTQ_MODULE(altq_jobs, ALTQT_JOBS, &jobs_sw);
   2187 
   2188 #endif /* KLD_MODULE */
   2189 
   2190 #endif /* ALTQ3_COMPAT */
   2191 #endif /* ALTQ_JOBS */
   2192