altq_jobs.c revision 1.1.2.2       1 /*	$NetBSD: altq_jobs.c,v 1.1.2.2 2006/06/09 19:52:35 peter Exp $	*/
      2 /*	$KAME: altq_jobs.c,v 1.11 2005/04/13 03:44:25 suz Exp $	*/
      3 /*
      4  * Copyright (c) 2001, the Rector and Board of Visitors of the
      5  * University of Virginia.
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms,
      9  * with or without modification, are permitted provided
     10  * that the following conditions are met:
     11  *
     12  * Redistributions of source code must retain the above
     13  * copyright notice, this list of conditions and the following
     14  * disclaimer.
     15  *
     16  * Redistributions in binary form must reproduce the above
     17  * copyright notice, this list of conditions and the following
     18  * disclaimer in the documentation and/or other materials provided
     19  * with the distribution.
     20  *
     21  * Neither the name of the University of Virginia nor the names
     22  * of its contributors may be used to endorse or promote products
     23  * derived from this software without specific prior written
     24  * permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
     27  * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
     28  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
     29  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     30  * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
     31  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
     32  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     33  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     34  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
     35  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
     37  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
     38  * THE POSSIBILITY OF SUCH DAMAGE.
     39  */
     40 /*
     41  * JoBS - altq prototype implementation
     42  *
     43  * Author: Nicolas Christin <nicolas (at) cs.virginia.edu>
     44  *
     45  * JoBS algorithms originally devised and proposed by
     46  * Nicolas Christin and Jorg Liebeherr.
     47  * Grateful acknowledgments to Tarek Abdelzaher for his help and
     48  * comments, and to Kenjiro Cho for some helpful advice.
     49  * Contributed by the Multimedia Networks Group at the University
     50  * of Virginia.
     51  *
     52  * Papers and additional info can be found at
     53  * http://qosbox.cs.virginia.edu
     54  *
     55  */
     56 
     57 /*
     58  * JoBS queue
     59  */
     60 
     61 #include <sys/cdefs.h>
     62 __KERNEL_RCSID(0, "$NetBSD: altq_jobs.c,v 1.1.2.2 2006/06/09 19:52:35 peter Exp $");
     63 
     64 #ifdef _KERNEL_OPT
     65 #include "opt_altq.h"
     66 #include "opt_inet.h"
     67 #endif
     68 
     69 #ifdef ALTQ_JOBS  /* jobs is enabled by ALTQ_JOBS option in opt_altq.h */
     70 
     71 #include <sys/param.h>
     72 #include <sys/malloc.h>
     73 #include <sys/mbuf.h>
     74 #include <sys/socket.h>
     75 #include <sys/sockio.h>
     76 #include <sys/systm.h>
     77 #include <sys/proc.h>
     78 #include <sys/errno.h>
     79 #include <sys/kernel.h>
     80 #include <sys/queue.h>
     81 #include <sys/kauth.h>
     82 
     83 #ifdef __FreeBSD__
     84 #include <sys/limits.h>
     85 #endif
     86 
     87 #include <net/if.h>
     88 #include <net/if_types.h>
     89 
     90 #include <altq/altq.h>
     91 #include <altq/altq_conf.h>
     92 #include <altq/altq_jobs.h>
     93 
     94 #ifdef ALTQ3_COMPAT
     95 /*
     96  * function prototypes
     97  */
     98 static struct jobs_if *jobs_attach(struct ifaltq *, u_int, u_int, u_int);
     99 static int jobs_detach(struct jobs_if *);
    100 static int jobs_clear_interface(struct jobs_if *);
    101 static int jobs_request(struct ifaltq *, int, void *);
    102 static void jobs_purge(struct jobs_if *);
    103 static struct jobs_class *jobs_class_create(struct jobs_if *,
    104     int, int64_t, int64_t, int64_t, int64_t, int64_t, int);
    105 static int jobs_class_destroy(struct jobs_class *);
    106 static int jobs_enqueue(struct ifaltq *, struct mbuf *, struct altq_pktattr *);
    107 static struct mbuf *jobs_dequeue(struct ifaltq *, int);
    108 
    109 static int jobs_addq(struct jobs_class *, struct mbuf *, struct jobs_if*);
    110 static struct mbuf *jobs_getq(struct jobs_class *);
    111 static struct mbuf *jobs_pollq(struct jobs_class *);
    112 static void jobs_purgeq(struct jobs_class *);
    113 
    114 static int jobscmd_if_attach(struct jobs_attach *);
    115 static int jobscmd_if_detach(struct jobs_interface *);
    116 static int jobscmd_add_class(struct jobs_add_class *);
    117 static int jobscmd_delete_class(struct jobs_delete_class *);
    118 static int jobscmd_modify_class(struct jobs_modify_class *);
    119 static int jobscmd_add_filter(struct jobs_add_filter *);
    120 static int jobscmd_delete_filter(struct jobs_delete_filter *);
    121 static int jobscmd_class_stats(struct jobs_class_stats *);
    122 static void get_class_stats(struct class_stats *, struct jobs_class *);
    123 static struct jobs_class *clh_to_clp(struct jobs_if *, u_long);
    124 static u_long clp_to_clh(struct jobs_class *);
    125 
    126 static TSLIST *tslist_alloc(void);
    127 static void tslist_destroy(struct jobs_class *);
    128 static int tslist_enqueue(struct jobs_class *, u_int64_t);
    129 static void tslist_dequeue(struct jobs_class *);
    130 static void tslist_drop(struct jobs_class *);
    131 
    132 static int enforce_wc(struct jobs_if *);
    133 static int64_t* adjust_rates_rdc(struct jobs_if *);
    134 static int64_t* assign_rate_drops_adc(struct jobs_if *);
    135 static int64_t* update_error(struct jobs_if *);
    136 static int min_rates_adc(struct jobs_if *);
    137 static int64_t proj_delay(struct jobs_if *, int);
    138 static int pick_dropped_rlc(struct jobs_if *);
    139 
    140 altqdev_decl(jobs);
    141 
    142 /* jif_list keeps all jobs_if's allocated. */
    143 static struct jobs_if *jif_list = NULL;
    144 
    145 typedef unsigned long long ull;
    146 
    147 /* setup functions */
    148 
    149 static struct jobs_if *
    150 jobs_attach(ifq, bandwidth, qlimit, separate)
    151 	struct ifaltq *ifq;
    152 	u_int bandwidth;
    153 	u_int qlimit;
    154 	u_int separate;
    155 {
    156 	struct jobs_if *jif;
    157 
    158 	jif = malloc(sizeof(struct jobs_if), M_DEVBUF, M_WAITOK|M_ZERO);
    159 	if (jif == NULL)
    160 	        return (NULL);
    161 
    162 	jif->jif_bandwidth = bandwidth;
    163 	jif->jif_qlimit = qlimit;
    164 	jif->jif_separate = separate;
    165 #ifdef ALTQ_DEBUG
    166 	printf("JoBS bandwidth = %d bps\n", (int)bandwidth);
    167 	printf("JoBS buffer size = %d pkts [%s]\n",
    168 	       (int)qlimit, separate?"separate buffers":"shared buffer");
    169 #endif
    170 	jif->jif_maxpri = -1;
    171 	jif->jif_ifq = ifq;
    172 
    173 	jif->wc_cycles_enqueue = 0;
    174 	jif->avg_cycles_enqueue = 0;
    175 	jif->avg_cycles2_enqueue = 0;
    176 	jif->bc_cycles_enqueue = INFINITY;
    177 	jif->wc_cycles_dequeue = 0;
    178 	jif->avg_cycles_dequeue = 0;
    179 	jif->avg_cycles2_dequeue = 0;
    180 	jif->bc_cycles_dequeue = INFINITY;
    181 	jif->total_enqueued = 0;
    182 	jif->total_dequeued = 0;
    183 
    184 	/* add this state to the jobs list */
    185 	jif->jif_next = jif_list;
    186 	jif_list = jif;
    187 
    188 	return (jif);
    189 }
    190 
    191 static int
    192 jobs_detach(jif)
    193 	struct jobs_if *jif;
    194 {
    195 	(void)jobs_clear_interface(jif);
    196 
    197 	/* remove this interface from the jif list */
    198 	if (jif_list == jif)
    199 		jif_list = jif->jif_next;
    200 	else {
    201 		struct jobs_if *p;
    202 
    203 		for (p = jif_list; p != NULL; p = p->jif_next)
    204 			if (p->jif_next == jif) {
    205 				p->jif_next = jif->jif_next;
    206 				break;
    207 			}
    208 		ASSERT(p != NULL);
    209 	}
    210 	free(jif, M_DEVBUF);
    211 	return (0);
    212 }
    213 
    214 /*
    215  * bring the interface back to the initial state by discarding
    216  * all the filters and classes.
    217  */
    218 static int
    219 jobs_clear_interface(jif)
    220 	struct jobs_if	*jif;
    221 {
    222 	struct jobs_class	*cl;
    223 	int	pri;
    224 
    225 	/* free the filters for this interface */
    226 	acc_discard_filters(&jif->jif_classifier, NULL, 1);
    227 
    228 	/* clear out the classes */
    229 	for (pri = 0; pri <= jif->jif_maxpri; pri++)
    230 		if ((cl = jif->jif_classes[pri]) != NULL)
    231 			jobs_class_destroy(cl);
    232 
    233 	return (0);
    234 }
    235 
    236 static int
    237 jobs_request(ifq, req, arg)
    238 	struct ifaltq *ifq;
    239 	int req;
    240 	void *arg;
    241 {
    242 	struct jobs_if	*jif = (struct jobs_if *)ifq->altq_disc;
    243 
    244 	switch (req) {
    245 	case ALTRQ_PURGE:
    246 		jobs_purge(jif);
    247 		break;
    248 	}
    249 	return (0);
    250 }
    251 
    252 /* discard all the queued packets on the interface */
    253 static void
    254 jobs_purge(jif)
    255 	struct jobs_if *jif;
    256 {
    257 	struct jobs_class *cl;
    258 	int pri;
    259 
    260 	for (pri = 0; pri <= jif->jif_maxpri; pri++) {
    261 		if ((cl = jif->jif_classes[pri]) != NULL && !qempty(cl->cl_q))
    262 			jobs_purgeq(cl);
    263 	}
    264 	if (ALTQ_IS_ENABLED(jif->jif_ifq))
    265 		jif->jif_ifq->ifq_len = 0;
    266 }
    267 
    268 static struct jobs_class *
    269 jobs_class_create(jif, pri, adc, rdc, alc, rlc, arc, flags)
    270 	struct jobs_if *jif;
    271 	int pri;
    272 	int64_t adc, rdc, alc, rlc, arc;
    273 	int flags;
    274 {
    275 	struct jobs_class *cl, *scan1, *scan2;
    276 	int s;
    277 	int class_exists1, class_exists2;
    278 	int i, j;
    279 	int64_t tmp[JOBS_MAXPRI];
    280 	u_int64_t now;
    281 
    282 	if ((cl = jif->jif_classes[pri]) != NULL) {
    283 		/* modify the class instead of creating a new one */
    284 		s = splnet();
    285 		if (!qempty(cl->cl_q))
    286 			jobs_purgeq(cl);
    287 		splx(s);
    288 	} else {
    289 		cl = malloc(sizeof(struct jobs_class), M_DEVBUF,
    290 		    M_WAITOK|M_ZERO);
    291 		if (cl == NULL)
    292 			return (NULL);
    293 
    294 		cl->cl_q = malloc(sizeof(class_queue_t), M_DEVBUF,
    295 		    M_WAITOK|M_ZERO);
    296 		if (cl->cl_q == NULL)
    297 			goto err_ret;
    298 
    299 		cl->arv_tm = tslist_alloc();
    300 		if (cl->arv_tm == NULL)
    301 			goto err_ret;
    302 	}
    303 
    304 	jif->jif_classes[pri] = cl;
    305 
    306 	if (flags & JOCF_DEFAULTCLASS)
    307 		jif->jif_default = cl;
    308 
    309 	qtype(cl->cl_q) = Q_DROPTAIL;
    310 	qlen(cl->cl_q) = 0;
    311 	cl->service_rate = 0;
    312 	cl->min_rate_adc = 0;
    313 	cl->current_loss = 0;
    314 	cl->cl_period = 0;
    315 	PKTCNTR_RESET(&cl->cl_arrival);
    316 	PKTCNTR_RESET(&cl->cl_rin);
    317 	PKTCNTR_RESET(&cl->cl_rout);
    318 	PKTCNTR_RESET(&cl->cl_rout_th);
    319 	PKTCNTR_RESET(&cl->cl_dropcnt);
    320 	PKTCNTR_RESET(&cl->st_arrival);
    321 	PKTCNTR_RESET(&cl->st_rin);
    322 	PKTCNTR_RESET(&cl->st_rout);
    323 	PKTCNTR_RESET(&cl->st_dropcnt);
    324 	cl->st_service_rate = 0;
    325 	cl->cl_lastdel = 0;
    326 	cl->cl_avgdel = 0;
    327 	cl->adc_violations = 0;
    328 
    329 	if (adc == -1) {
    330 		cl->concerned_adc = 0;
    331 		adc = INFINITY;
    332 	} else
    333 		cl->concerned_adc = 1;
    334 
    335 	if (alc == -1) {
    336 		cl->concerned_alc = 0;
    337 		alc = INFINITY;
    338 	} else
    339 		cl->concerned_alc = 1;
    340 
    341 	if (rdc == -1) {
    342 		rdc = 0;
    343 		cl->concerned_rdc = 0;
    344 	} else
    345 		cl->concerned_rdc = 1;
    346 
    347 	if (rlc == -1) {
    348 		rlc = 0;
    349 		cl->concerned_rlc = 0;
    350 	} else
    351 		cl->concerned_rlc = 1;
    352 
    353 	if (arc == -1) {
    354 		arc = 0;
    355 		cl->concerned_arc = 0;
    356 	} else
    357 		cl->concerned_arc = 1;
    358 
    359 	cl->cl_rdc=rdc;
    360 
    361 	if (cl->concerned_adc) {
    362 		/* adc is given in us, convert it to clock ticks */
    363 		cl->cl_adc = (u_int64_t)(adc*machclk_freq/GRANULARITY);
    364 	} else
    365 		cl->cl_adc = adc;
    366 
    367 	if (cl->concerned_arc) {
    368 		/* arc is given in bps, convert it to internal unit */
    369 		cl->cl_arc = (u_int64_t)(bps_to_internal(arc));
    370 	} else
    371 		cl->cl_arc = arc;
    372 
    373 	cl->cl_rlc=rlc;
    374 	cl->cl_alc=alc;
    375 	cl->delay_prod_others = 0;
    376 	cl->loss_prod_others = 0;
    377 	cl->cl_flags = flags;
    378 	cl->cl_pri = pri;
    379 	if (pri > jif->jif_maxpri)
    380 		jif->jif_maxpri = pri;
    381 	cl->cl_jif = jif;
    382 	cl->cl_handle = (u_long)cl;  /* just a pointer to this class */
    383 
    384 	/*
    385 	 * update delay_prod_others and loss_prod_others
    386 	 * in all classes if needed
    387 	 */
    388 
    389 	if (cl->concerned_rdc) {
    390 		for (i = 0; i <= jif->jif_maxpri; i++) {
    391 			scan1 = jif->jif_classes[i];
    392 			class_exists1 = (scan1 != NULL);
    393 			if (class_exists1) {
    394 				tmp[i] = 1;
    395 				for (j = 0; j <= i-1; j++) {
    396 					scan2 = jif->jif_classes[j];
    397 					class_exists2 = (scan2 != NULL);
    398 					if (class_exists2
    399 					    && scan2->concerned_rdc)
    400 						tmp[i] *= scan2->cl_rdc;
    401 				}
    402 			} else
    403 				tmp[i] = 0;
    404 		}
    405 
    406 		for (i = 0; i <= jif->jif_maxpri; i++) {
    407 			scan1 = jif->jif_classes[i];
    408 			class_exists1 = (scan1 != NULL);
    409 			if (class_exists1) {
    410 				scan1->delay_prod_others = 1;
    411 				for (j = 0; j <= jif->jif_maxpri; j++) {
    412 					scan2 = jif->jif_classes[j];
    413 					class_exists2 = (scan2 != NULL);
    414 					if (class_exists2 && j != i
    415 					    && scan2->concerned_rdc)
    416 						scan1->delay_prod_others *= tmp[j];
    417 				}
    418 			}
    419 		}
    420 	}
    421 
    422 	if (cl->concerned_rlc) {
    423 		for (i = 0; i <= jif->jif_maxpri; i++) {
    424 			scan1 = jif->jif_classes[i];
    425 			class_exists1 = (scan1 != NULL);
    426 			if (class_exists1) {
    427 				tmp[i] = 1;
    428 				for (j = 0; j <= i-1; j++) {
    429 					scan2 = jif->jif_classes[j];
    430 					class_exists2 = (scan2 != NULL);
    431 					if (class_exists2
    432 					    && scan2->concerned_rlc)
    433 						tmp[i] *= scan2->cl_rlc;
    434 				}
    435 			} else
    436 				tmp[i] = 0;
    437 		}
    438 
    439 		for (i = 0; i <= jif->jif_maxpri; i++) {
    440 			scan1 = jif->jif_classes[i];
    441 			class_exists1 = (scan1 != NULL);
    442 			if (class_exists1) {
    443 				scan1->loss_prod_others = 1;
    444 				for (j = 0; j <= jif->jif_maxpri; j++) {
    445 					scan2 = jif->jif_classes[j];
    446 					class_exists2 = (scan2 != NULL);
    447 					if (class_exists2 && j != i
    448 					    && scan2->concerned_rlc)
    449 						scan1->loss_prod_others *= tmp[j];
    450 				}
    451 			}
    452 		}
    453 	}
    454 
    455 	now = read_machclk();
    456 	cl->idletime = now;
    457 	return (cl);
    458 
    459  err_ret:
    460 	if (cl->cl_q != NULL)
    461 		free(cl->cl_q, M_DEVBUF);
    462 	if (cl->arv_tm != NULL)
    463 		free(cl->arv_tm, M_DEVBUF);
    464 
    465 	free(cl, M_DEVBUF);
    466 	return (NULL);
    467 }
    468 
    469 static int
    470 jobs_class_destroy(cl)
    471 	struct jobs_class *cl;
    472 {
    473 	struct jobs_if *jif;
    474 	int s, pri;
    475 
    476 	s = splnet();
    477 
    478 	/* delete filters referencing to this class */
    479 	acc_discard_filters(&cl->cl_jif->jif_classifier, cl, 0);
    480 
    481 	if (!qempty(cl->cl_q))
    482 		jobs_purgeq(cl);
    483 
    484 	jif = cl->cl_jif;
    485 	jif->jif_classes[cl->cl_pri] = NULL;
    486 	if (jif->jif_maxpri == cl->cl_pri) {
    487 		for (pri = cl->cl_pri; pri >= 0; pri--)
    488 			if (jif->jif_classes[pri] != NULL) {
    489 				jif->jif_maxpri = pri;
    490 				break;
    491 			}
    492 		if (pri < 0)
    493 			jif->jif_maxpri = -1;
    494 	}
    495 	splx(s);
    496 
    497 	tslist_destroy(cl);
    498 	free(cl->cl_q, M_DEVBUF);
    499 	free(cl, M_DEVBUF);
    500 	return (0);
    501 }
    502 
    503 /*
    504  * jobs_enqueue is an enqueue function to be registered to
    505  * (*altq_enqueue) in struct ifaltq.
    506  */
    507 static int
    508 jobs_enqueue(ifq, m, pktattr)
    509 	struct ifaltq *ifq;
    510 	struct mbuf *m;
    511 	struct altq_pktattr *pktattr;
    512 {
    513 	struct jobs_if	*jif = (struct jobs_if *)ifq->altq_disc;
    514 	struct jobs_class *cl, *scan;
    515 	int len;
    516 	int return_flag;
    517 	int pri;
    518 	u_int64_t now;
    519 	u_int64_t old_arv;
    520 	int64_t* delta_rate;
    521 	u_int64_t tstamp1, tstamp2, cycles; /* used for benchmarking only */
    522 
    523 	jif->total_enqueued++;
    524 	now = read_machclk();
    525 	tstamp1 = now;
    526 
    527 	return_flag = 0;
    528 
    529 	/* proceed with packet enqueuing */
    530 
    531 	if (IFQ_IS_EMPTY(ifq)) {
    532 		for (pri=0; pri <= jif->jif_maxpri; pri++) {
    533 			scan = jif->jif_classes[pri];
    534 			if (scan != NULL) {
    535 				/*
    536 				 * reset all quantities, except:
    537 				 * average delay, number of violations
    538 				 */
    539 				PKTCNTR_RESET(&scan->cl_rin);
    540 				PKTCNTR_RESET(&scan->cl_rout);
    541 				PKTCNTR_RESET(&scan->cl_rout_th);
    542 				PKTCNTR_RESET(&scan->cl_arrival);
    543 				PKTCNTR_RESET(&scan->cl_dropcnt);
    544 				scan->cl_lastdel = 0;
    545 				scan->current_loss = 0;
    546 				scan->service_rate = 0;
    547 				scan->idletime = now;
    548 				scan->cl_last_rate_update = now;
    549 			}
    550 		}
    551 	}
    552 
    553 	/* grab class set by classifier */
    554 	if (pktattr == NULL || (cl = pktattr->pattr_class) == NULL)
    555 		cl = jif->jif_default;
    556 
    557 	len = m_pktlen(m);
    558 	old_arv = cl->cl_arrival.bytes;
    559 	PKTCNTR_ADD(&cl->cl_arrival, (int)len);
    560 	PKTCNTR_ADD(&cl->cl_rin, (int)len);
    561 	PKTCNTR_ADD(&cl->st_arrival, (int)len);
    562 	PKTCNTR_ADD(&cl->st_rin, (int)len);
    563 
    564 	if (cl->cl_arrival.bytes < old_arv) {
    565 		/* deals w/ overflow */
    566 		for (pri=0; pri <= jif->jif_maxpri; pri++) {
    567 			scan = jif->jif_classes[pri];
    568 			if (scan != NULL) {
    569 				/*
    570 				 * reset all quantities, except:
    571 				 * average delay, number of violations
    572 				 */
    573 				PKTCNTR_RESET(&scan->cl_rin);
    574 				PKTCNTR_RESET(&scan->cl_rout);
    575 				PKTCNTR_RESET(&scan->cl_rout_th);
    576 				PKTCNTR_RESET(&scan->cl_arrival);
    577 				PKTCNTR_RESET(&scan->cl_dropcnt);
    578 				scan->current_loss = 0;
    579 				scan->service_rate = 0;
    580 				scan->idletime = now;
    581 				scan->cl_last_rate_update = now;
    582 			}
    583 		}
    584 		PKTCNTR_ADD(&cl->cl_arrival, (int)len);
    585 		PKTCNTR_ADD(&cl->cl_rin, (int)len);
    586 	}
    587 
    588 	if (cl->cl_arrival.bytes > cl->cl_rin.bytes)
    589 		cl->current_loss =
    590 		    ((cl->cl_arrival.bytes - cl->cl_rin.bytes) << SCALE_LOSS)
    591 		    / cl->cl_arrival.bytes;
    592 	else
    593 		cl->current_loss = 0;
    594 
    595 	/* for MDRR: update theoretical value of the output curve */
    596 
    597 	for (pri=0; pri <= jif->jif_maxpri; pri++) {
    598 		scan = jif->jif_classes[pri];
    599 		if (scan != NULL) {
    600 			if (scan->cl_last_rate_update == scan->idletime
    601 			    || scan->cl_last_rate_update == 0)
    602 				scan->cl_last_rate_update = now; /* initial case */
    603 			else
    604 				scan->cl_rout_th.bytes +=
    605 				    delay_diff(now, scan->cl_last_rate_update)
    606 				    * scan->service_rate;
    607 
    608 			/*
    609 			 * we don't really care about packets here
    610 			 * WARNING: rout_th is SCALED
    611 			 * (b/c of the service rate)
    612 			 * for precision, as opposed to rout.
    613 			 */
    614 
    615 			scan->cl_last_rate_update = now;
    616 		}
    617 	}
    618 
    619 	if (jobs_addq(cl, m, jif) != 0)
    620 		return_flag = ENOBUFS; /* signals there's a buffer overflow */
    621 	else
    622 		IFQ_INC_LEN(ifq);
    623 
    624 	/* successfully queued. */
    625 
    626 	enforce_wc(jif);
    627 
    628 	if (!min_rates_adc(jif)) {
    629 		delta_rate = assign_rate_drops_adc(jif);
    630 		if (delta_rate != NULL) {
    631 			for (pri = 0; pri <= jif->jif_maxpri; pri++)
    632 			  if ((cl = jif->jif_classes[pri]) != NULL &&
    633 			      !qempty(cl->cl_q))
    634 				cl->service_rate += delta_rate[pri];
    635 			free(delta_rate, M_DEVBUF);
    636 		}
    637 	}
    638 
    639 	delta_rate = adjust_rates_rdc(jif);
    640 
    641 	if (delta_rate != NULL) {
    642 		for (pri = 0; pri <= jif->jif_maxpri; pri++)
    643 			if ((cl = jif->jif_classes[pri]) != NULL &&
    644 			    !qempty(cl->cl_q))
    645 				cl->service_rate += delta_rate[pri];
    646 		free(delta_rate, M_DEVBUF);
    647 	}
    648 
    649 	tstamp2 = read_machclk();
    650 	cycles = delay_diff(tstamp2, tstamp1);
    651 	if (cycles > jif->wc_cycles_enqueue)
    652 		jif->wc_cycles_enqueue=cycles;
    653 	if (cycles < jif->bc_cycles_enqueue)
    654 		jif->bc_cycles_enqueue=cycles;
    655 
    656 	jif->avg_cycles_enqueue += cycles;
    657 	jif->avg_cycles2_enqueue += cycles * cycles;
    658 
    659 	return (return_flag);
    660 }
    661 
    662 /*
    663  * jobs_dequeue is a dequeue function to be registered to
    664  * (*altq_dequeue) in struct ifaltq.
    665  *
    666  * note: ALTDQ_POLL returns the next packet without removing the packet
    667  *	from the queue.  ALTDQ_REMOVE is a normal dequeue operation.
    668  *	ALTDQ_REMOVE must return the same packet if called immediately
    669  *	after ALTDQ_POLL.
    670  */
    671 
    672 static struct mbuf *
    673 jobs_dequeue(ifq, op)
    674 	struct ifaltq	*ifq;
    675 	int		op;
    676 {
    677 	struct jobs_if	*jif = (struct jobs_if *)ifq->altq_disc;
    678 	struct jobs_class *cl;
    679 	struct mbuf *m;
    680 	int pri;
    681 	int svc_class;
    682 	int64_t max_error;
    683 	int64_t error;
    684 	u_int64_t now;
    685 	u_int64_t tstamp1, tstamp2, cycles;
    686 
    687 	jif->total_dequeued++;
    688 
    689 	now = read_machclk();
    690 	tstamp1 = now;
    691 
    692 	if (IFQ_IS_EMPTY(ifq)) {
    693 		/* no packet in the queue */
    694 		for (pri=0; pri <= jif->jif_maxpri; pri++) {
    695 		  cl = jif->jif_classes[pri];
    696 		  if (cl != NULL)
    697 		    cl->idletime = now;
    698 		}
    699 
    700 		tstamp2 = read_machclk();
    701 		cycles = delay_diff(tstamp2, tstamp1);
    702 		if (cycles > jif->wc_cycles_dequeue)
    703 			jif->wc_cycles_dequeue = cycles;
    704 		if (cycles < jif->bc_cycles_dequeue)
    705 			jif->bc_cycles_dequeue = cycles;
    706 
    707 		jif->avg_cycles_dequeue += cycles;
    708 		jif->avg_cycles2_dequeue += cycles * cycles;
    709 
    710 		return (NULL);
    711 	}
    712 
    713 	/*
    714 	 * select the class whose actual tranmissions are the furthest
    715 	 * from the promised transmissions
    716 	 */
    717 
    718 	max_error = -1;
    719 	svc_class = -1;
    720 
    721 	for (pri=0; pri <= jif->jif_maxpri; pri++) {
    722 		if (((cl = jif->jif_classes[pri]) != NULL)
    723 		    && !qempty(cl->cl_q)) {
    724 			error = (int64_t)cl->cl_rout_th.bytes
    725 			    -(int64_t)scale_rate(cl->cl_rout.bytes);
    726 			if (max_error == -1) {
    727 				max_error = error;
    728 				svc_class = pri;
    729 			} else if (error > max_error) {
    730 				max_error = error;
    731 				svc_class = pri;
    732 			}
    733 		}
    734 	}
    735 
    736 	if (svc_class != -1)
    737 		cl = jif->jif_classes[svc_class];
    738 	else
    739 		cl = NULL;
    740 
    741 	if (op == ALTDQ_POLL) {
    742 		tstamp2 = read_machclk();
    743 		cycles = delay_diff(tstamp2, tstamp1);
    744 		if (cycles > jif->wc_cycles_dequeue)
    745 			jif->wc_cycles_dequeue = cycles;
    746 		if (cycles < jif->bc_cycles_dequeue)
    747 			jif->bc_cycles_dequeue = cycles;
    748 
    749 		jif->avg_cycles_dequeue += cycles;
    750 		jif->avg_cycles2_dequeue += cycles * cycles;
    751 
    752 		return (jobs_pollq(cl));
    753 	}
    754 
    755 	if (cl != NULL)
    756 		m = jobs_getq(cl);
    757 	else
    758 		m = NULL;
    759 
    760 	if (m != NULL) {
    761 		IFQ_DEC_LEN(ifq);
    762 		if (qempty(cl->cl_q))
    763 			cl->cl_period++;
    764 
    765 		cl->cl_lastdel = (u_int64_t)delay_diff(now,
    766 		    tslist_first(cl->arv_tm)->timestamp);
    767 		if (cl->concerned_adc
    768 		    && (int64_t)cl->cl_lastdel > cl->cl_adc)
    769 			cl->adc_violations++;
    770 		cl->cl_avgdel  += ticks_to_secs(GRANULARITY*cl->cl_lastdel);
    771 
    772 		PKTCNTR_ADD(&cl->cl_rout, m_pktlen(m));
    773 		PKTCNTR_ADD(&cl->st_rout, m_pktlen(m));
    774 	}
    775 	if (cl != NULL)
    776 		tslist_dequeue(cl);		/* dequeue the timestamp */
    777 
    778 	tstamp2 = read_machclk();
    779 	cycles = delay_diff(tstamp2, tstamp1);
    780 	if (cycles > jif->wc_cycles_dequeue)
    781 		jif->wc_cycles_dequeue = cycles;
    782 	if (cycles < jif->bc_cycles_dequeue)
    783 		jif->bc_cycles_dequeue = cycles;
    784 
    785 	jif->avg_cycles_dequeue += cycles;
    786 	jif->avg_cycles2_dequeue += cycles * cycles;
    787 
    788 	return (m);
    789 }
    790 
    791 static int
    792 jobs_addq(cl, m, jif)
    793 	struct jobs_class *cl;
    794 	struct mbuf *m;
    795 	struct jobs_if *jif;
    796 {
    797 	int victim;
    798 	u_int64_t len;
    799 	u_int64_t now;
    800 	struct jobs_class* victim_class;
    801 
    802 	victim = -1;
    803 	victim_class = NULL;
    804 	len = 0;
    805 
    806 	now = read_machclk();
    807 
    808 	if (jif->jif_separate && qlen(cl->cl_q) >= jif->jif_qlimit) {
    809 		/*
    810 		 * separate buffers: no guarantees on packet drops
    811 		 * can be offered
    812 		 * thus we drop the incoming packet
    813 		 */
    814 		len = (u_int64_t)m_pktlen(m);
    815 		PKTCNTR_ADD(&cl->cl_dropcnt, (int)len);
    816 		PKTCNTR_SUB(&cl->cl_rin, (int)len);
    817 		PKTCNTR_ADD(&cl->st_dropcnt, (int)len);
    818 		PKTCNTR_SUB(&cl->st_rin, (int)len);
    819 		cl->current_loss += (len << SCALE_LOSS)
    820 		    /cl->cl_arrival.bytes;
    821 		m_freem(m);
    822 		return (-1);
    823 
    824 	} else if (!jif->jif_separate
    825 		   && jif->jif_ifq->ifq_len >= jif->jif_qlimit) {
    826 		/* shared buffer: supports guarantees on losses */
    827 		if (!cl->concerned_rlc) {
    828 			if (!cl->concerned_alc) {
    829 				/*
    830 				 * no ALC, no RLC on this class:
    831 				 * drop the incoming packet
    832 				 */
    833 				len = (u_int64_t)m_pktlen(m);
    834 				PKTCNTR_ADD(&cl->cl_dropcnt, (int)len);
    835 				PKTCNTR_SUB(&cl->cl_rin, (int)len);
    836 				PKTCNTR_ADD(&cl->st_dropcnt, (int)len);
    837 				PKTCNTR_SUB(&cl->st_rin, (int)len);
    838 				cl->current_loss += (len << SCALE_LOSS)/cl->cl_arrival.bytes;
    839 				m_freem(m);
    840 				return (-1);
    841 			} else {
    842 				/*
    843 				 * no RLC, but an ALC:
    844 				 * drop the incoming packet if possible
    845 				 */
    846 				len = (u_int64_t)m_pktlen(m);
    847 				if (cl->current_loss + (len << SCALE_LOSS)
    848 				    / cl->cl_arrival.bytes <= cl->cl_alc) {
    849 					PKTCNTR_ADD(&cl->cl_dropcnt, (int)len);
    850 					PKTCNTR_SUB(&cl->cl_rin, (int)len);
    851 					PKTCNTR_ADD(&cl->st_dropcnt, (int)len);
    852 					PKTCNTR_SUB(&cl->st_rin, (int)len);
    853 					cl->current_loss += (len << SCALE_LOSS)/cl->cl_arrival.bytes;
    854 					m_freem(m);
    855 					return (-1);
    856 				} else {
    857 					/*
    858 					 * the ALC would be violated:
    859 					 * pick another class
    860 					 */
    861 					_addq(cl->cl_q, m);
    862 					tslist_enqueue(cl, now);
    863 
    864 					victim = pick_dropped_rlc(jif);
    865 
    866 					if (victim == -1) {
    867 						/*
    868 						 * something went wrong
    869 						 * let us discard
    870 						 * the incoming packet,
    871 						 * regardless of what
    872 						 * may happen...
    873 						 */
    874 						victim_class = cl;
    875 					} else
    876 						victim_class = jif->jif_classes[victim];
    877 
    878 					if (victim_class != NULL) {
    879 						/*
    880 						 * test for safety
    881 						 * purposes...
    882 						 * it must be true
    883 						 */
    884 						m = _getq_tail(victim_class->cl_q);
    885 						len = (u_int64_t)m_pktlen(m);
    886 						PKTCNTR_ADD(&victim_class->cl_dropcnt, (int)len);
    887 						PKTCNTR_SUB(&victim_class->cl_rin, (int)len);
    888 						PKTCNTR_ADD(&victim_class->st_dropcnt, (int)len);
    889 						PKTCNTR_SUB(&victim_class->st_rin, (int)len);
    890 						victim_class->current_loss += (len << SCALE_LOSS)/victim_class->cl_arrival.bytes;
    891 						m_freem(m); /* the packet is trashed here */
    892 						tslist_drop(victim_class); /* and its timestamp as well */
    893 					}
    894 					return (-1);
    895 				}
    896 			}
    897 		} else {
    898 			/*
    899 			 * RLC on that class:
    900 			 * pick class according to RLCs
    901 			 */
    902 			_addq(cl->cl_q, m);
    903 			tslist_enqueue(cl, now);
    904 
    905 			victim = pick_dropped_rlc(jif);
    906 			if (victim == -1) {
    907 				/*
    908 				 * something went wrong
    909 				 * let us discard the incoming packet,
    910 				 * regardless of what may happen...
    911 				 */
    912 				victim_class = cl;
    913 			} else
    914 				victim_class = jif->jif_classes[victim];
    915 
    916 			if (victim_class != NULL) {
    917 				/*
    918 				 * test for safety purposes...
    919 				 * it must be true
    920 				 */
    921 				m = _getq_tail(victim_class->cl_q);
    922 				len = (u_int64_t)m_pktlen(m);
    923 				PKTCNTR_ADD(&victim_class->cl_dropcnt, (int)len);
    924 				PKTCNTR_SUB(&victim_class->cl_rin, (int)len);
    925 				PKTCNTR_ADD(&victim_class->st_dropcnt, (int)len);
    926 				PKTCNTR_SUB(&victim_class->st_rin, (int)len);
    927 				victim_class->current_loss += (len << SCALE_LOSS)/victim_class->cl_arrival.bytes;
    928 				m_freem(m); /* the packet is trashed here */
    929 				tslist_drop(victim_class); /* and its timestamp as well */
    930 			}
    931 			return (-1);
    932 		}
    933 	}
    934 	/* else: no drop */
    935 
    936 	_addq(cl->cl_q, m);
    937 	tslist_enqueue(cl, now);
    938 
    939 	return (0);
    940 }
    941 
    942 static struct mbuf *
    943 jobs_getq(cl)
    944 	struct jobs_class *cl;
    945 {
    946 	return _getq(cl->cl_q);
    947 }
    948 
    949 static struct mbuf *
    950 jobs_pollq(cl)
    951 	struct jobs_class *cl;
    952 {
    953 	return qhead(cl->cl_q);
    954 }
    955 
    956 static void
    957 jobs_purgeq(cl)
    958 	struct jobs_class *cl;
    959 {
    960 	struct mbuf *m;
    961 
    962 	if (qempty(cl->cl_q))
    963 		return;
    964 
    965 	while ((m = _getq(cl->cl_q)) != NULL) {
    966 		PKTCNTR_ADD(&cl->cl_dropcnt, m_pktlen(m));
    967 		PKTCNTR_ADD(&cl->st_dropcnt, m_pktlen(m));
    968 		m_freem(m);
    969 		tslist_drop(cl);
    970 	}
    971 	ASSERT(qlen(cl->cl_q) == 0);
    972 }
    973 
    974 /*
    975  * timestamp list support routines
    976  *
    977  * this implementation has been revamped and
    978  * now uses a TAILQ structure.
    979  * timestamp list holds class timestamps
    980  * there is one timestamp list per class.
    981  */
    982 static TSLIST *
    983 tslist_alloc()
    984 {
    985 	TSLIST *list_init;
    986 
    987 	list_init = malloc(sizeof(TSLIST), M_DEVBUF, M_WAITOK);
    988 	TAILQ_INIT(list_init);
    989 	return (list_init);
    990 }
    991 
    992 static void
    993 tslist_destroy(cl)
    994 	struct jobs_class *cl;
    995 {
    996 	while (tslist_first(cl->arv_tm) != NULL)
    997 		tslist_dequeue(cl);
    998 
    999 	free(cl->arv_tm, M_DEVBUF);
   1000 }
   1001 
   1002 static int
   1003 tslist_enqueue(cl, arv)
   1004 	struct jobs_class *cl;
   1005 	u_int64_t arv;
   1006 {
   1007 	TSENTRY *pushed;
   1008 	pushed = malloc(sizeof(TSENTRY), M_DEVBUF, M_WAITOK);
   1009 	if (pushed == NULL)
   1010 		return (0);
   1011 
   1012 	pushed->timestamp = arv;
   1013 	TAILQ_INSERT_TAIL(cl->arv_tm, pushed, ts_list);
   1014 	return (1);
   1015 }
   1016 
   1017 static void
   1018 tslist_dequeue(cl)
   1019      struct jobs_class *cl;
   1020 {
   1021 	TSENTRY *popped;
   1022 	popped = tslist_first(cl->arv_tm);
   1023 	if (popped != NULL) {
   1024 		  TAILQ_REMOVE(cl->arv_tm, popped, ts_list);
   1025 		  free(popped, M_DEVBUF);
   1026 	}
   1027 	return;
   1028 }
   1029 
   1030 static void
   1031 tslist_drop(cl)
   1032 	struct jobs_class *cl;
   1033 {
   1034 	TSENTRY *popped;
   1035 	popped = tslist_last(cl->arv_tm);
   1036 	if (popped != NULL) {
   1037 		  TAILQ_REMOVE(cl->arv_tm, popped, ts_list);
   1038 		  free(popped, M_DEVBUF);
   1039 	}
   1040 	return;
   1041 }
   1042 
   1043 /*
   1044  * rate allocation support routines
   1045  */
   1046 /*
   1047  * enforce_wc: enforce that backlogged classes have non-zero
   1048  * service rate, and that non-backlogged classes have zero
   1049  * service rate.
   1050  */
   1051 
   1052 static int
   1053 enforce_wc(jif)
   1054 	struct jobs_if *jif;
   1055 {
   1056 	struct jobs_class *cl;
   1057 
   1058 	int64_t active_classes;
   1059 	int pri;
   1060 	int is_backlogged, class_exists, updated;
   1061 
   1062 	updated = 0;
   1063 	active_classes = 0;
   1064 
   1065 	for (pri = 0; pri <= jif->jif_maxpri; pri++) {
   1066 		cl = jif->jif_classes[pri];
   1067 		class_exists = (cl != NULL);
   1068 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1069 
   1070 		if (is_backlogged)
   1071 			active_classes++;
   1072 		if ((is_backlogged && cl->service_rate <= 0)
   1073 		    ||(class_exists
   1074 		       && !is_backlogged && cl->service_rate > 0))
   1075 			updated = 1;
   1076 	}
   1077 
   1078 	if (updated) {
   1079 		for (pri = 0; pri <= jif->jif_maxpri; pri++) {
   1080 			cl = jif->jif_classes[pri];
   1081 			class_exists = (cl != NULL);
   1082 			is_backlogged = (class_exists && !qempty(cl->cl_q));
   1083 
   1084 			if (class_exists && !is_backlogged)
   1085 				cl->service_rate = 0;
   1086 			else if (is_backlogged)
   1087 				cl->service_rate = (int64_t)(bps_to_internal((u_int64_t)jif->jif_bandwidth)/active_classes);
   1088 		}
   1089 	}
   1090 
   1091 	return (updated);
   1092 }
   1093 
   1094 /*
   1095  * adjust_rates_rdc: compute the service rates adjustments
   1096  * needed to realize the desired proportional delay differentiation.
   1097  * essentially, the rate adjustement delta_rate = prop_control*error,
   1098  * where error is the difference between the measured "weighted"
   1099  * delay and the mean of the weighted delays. see paper for more
   1100  * information.
   1101  * prop_control has slightly changed since the INFOCOM paper,
   1102  * this condition seems to provide better results.
   1103  */
   1104 
   1105 static int64_t *
   1106 adjust_rates_rdc(jif)
   1107 	struct jobs_if *jif;
   1108 {
   1109 	int64_t* result;
   1110 	int64_t  credit, available, lower_bound, upper_bound;
   1111 	int64_t  bk;
   1112 	int i, j;
   1113 	int rdc_classes, active_classes;
   1114 	int class_exists, is_backlogged;
   1115 	struct jobs_class *cl;
   1116 	int64_t* error;
   1117 	int64_t prop_control;
   1118 	u_int64_t max_prod;
   1119 	u_int64_t min_share;
   1120 	u_int64_t max_avg_pkt_size;
   1121 
   1122 	/*
   1123 	 * min_share is scaled
   1124 	 * to avoid dealing with doubles
   1125 	 */
   1126 	active_classes = 0;
   1127 	rdc_classes = 0;
   1128 	max_prod = 0;
   1129 	max_avg_pkt_size = 0;
   1130 
   1131 	upper_bound = (int64_t)jif->jif_bandwidth;
   1132 
   1133 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1134 		cl = jif->jif_classes[i];
   1135 		class_exists = (cl != NULL);
   1136 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1137 		if (is_backlogged) {
   1138 			active_classes++;
   1139 			if (cl->concerned_rdc)
   1140 				rdc_classes++;
   1141 			else
   1142 				upper_bound -=
   1143 				    internal_to_bps(cl->service_rate);
   1144 		}
   1145 	}
   1146 
   1147 	result = malloc((jif->jif_maxpri+1)*sizeof(int64_t),
   1148 	    M_DEVBUF, M_WAITOK);
   1149 
   1150 	if (result == NULL)
   1151 		return NULL;
   1152 
   1153 	for (i = 0; i <= jif->jif_maxpri; i++)
   1154 		result[i] = 0;
   1155 
   1156 	if (upper_bound <= 0 || rdc_classes == 0)
   1157 		return result;
   1158 
   1159 	credit = 0;
   1160 	lower_bound = 0;
   1161 	min_share = ((u_int64_t)1 << SCALE_SHARE);
   1162 	bk = 0;
   1163 
   1164 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1165 		cl = jif->jif_classes[i];
   1166 		class_exists = (cl != NULL);
   1167 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1168 		if (is_backlogged && cl->concerned_rdc)
   1169 			bk += cl->cl_rin.bytes;
   1170 	}
   1171 
   1172 	if (bk == 0)
   1173 		return (result);
   1174 
   1175 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1176 		cl = jif->jif_classes[i];
   1177 		class_exists = (cl != NULL);
   1178 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1179 		if (is_backlogged
   1180 		    && (cl->cl_rin.bytes << SCALE_SHARE)/bk < min_share)
   1181 			min_share = (cl->cl_rin.bytes << SCALE_SHARE)/bk;
   1182 		if (is_backlogged && cl->concerned_rdc
   1183 		    && cl->delay_prod_others > max_prod)
   1184 			max_prod = cl->delay_prod_others;
   1185 
   1186 		if (is_backlogged && cl->concerned_rdc
   1187 		    && cl->cl_rin.bytes > max_avg_pkt_size*cl->cl_rin.packets)
   1188 			max_avg_pkt_size = (u_int64_t)((u_int)cl->cl_rin.bytes/(u_int)cl->cl_rin.packets);
   1189 	}
   1190 
   1191 	error = update_error(jif);
   1192 	if (!error)
   1193 		return (NULL);
   1194 
   1195 	prop_control = (upper_bound*upper_bound*min_share)
   1196 	    /(max_prod*(max_avg_pkt_size << 2));
   1197 
   1198 	prop_control = bps_to_internal(ticks_to_secs(prop_control)); /* in BT-1 */
   1199 
   1200 	credit = 0;
   1201 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1202 		cl = jif->jif_classes[i];
   1203 		class_exists = (cl != NULL);
   1204 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1205 		if (is_backlogged && cl->concerned_rdc) {
   1206 			result[i] = -prop_control*error[i]; /* in BT-1 */
   1207 			result[i] >>= (SCALE_SHARE);
   1208 		}
   1209 	}
   1210 
   1211 	free(error, M_DEVBUF); /* we don't need these anymore */
   1212 
   1213 	/* saturation */
   1214 
   1215 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1216 		cl = jif->jif_classes[i];
   1217 		class_exists = (cl != NULL);
   1218 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1219 
   1220 		if (is_backlogged && cl->concerned_rdc)
   1221 			lower_bound += cl->min_rate_adc;
   1222 		/*
   1223 		 * note: if there's no ADC or ARC on cl,
   1224 		 * this is equal to zero, which is fine
   1225 		 */
   1226 	}
   1227 
   1228 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1229 		cl = jif->jif_classes[i];
   1230 		class_exists = (cl != NULL);
   1231 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1232 
   1233 		if (is_backlogged && cl->concerned_rdc
   1234 		    && result[i] + cl->service_rate > upper_bound) {
   1235 			for (j = 0; j <= jif->jif_maxpri; j++) {
   1236 				cl = jif->jif_classes[j];
   1237 				class_exists = (cl != NULL);
   1238 				is_backlogged = (class_exists
   1239 						 && !qempty(cl->cl_q));
   1240 				if (is_backlogged && cl->concerned_rdc) {
   1241 					if (j == i)
   1242 						result[j] = upper_bound
   1243 						    -cl->service_rate
   1244 						    + cl->min_rate_adc
   1245 						    - lower_bound;
   1246 					else
   1247 						result[j] =
   1248 						    -cl->service_rate
   1249 						    +cl->min_rate_adc;
   1250 				}
   1251 			}
   1252 			return result;
   1253 		}
   1254 
   1255 		cl = jif->jif_classes[i];
   1256 		/* do this again since it may have been modified */
   1257 		class_exists = (cl != NULL);
   1258 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1259 
   1260 		if (is_backlogged && cl->concerned_rdc
   1261 		    && result[i] + cl->service_rate < cl->min_rate_adc) {
   1262 			credit += cl->service_rate+result[i]
   1263 			    -cl->min_rate_adc;
   1264 			/* "credit" is in fact a negative number */
   1265 			result[i] = -cl->service_rate+cl->min_rate_adc;
   1266 		}
   1267 	}
   1268 
   1269 	for (i = jif->jif_maxpri; (i >= 0 && credit < 0); i--) {
   1270 		cl = jif->jif_classes[i];
   1271 		class_exists = (cl != NULL);
   1272 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1273 
   1274 		if (is_backlogged && cl->concerned_rdc) {
   1275 			available = result[i]
   1276 			    + cl->service_rate-cl->min_rate_adc;
   1277 			if (available >= -credit) {
   1278 				result[i] += credit;
   1279 				credit = 0;
   1280 			} else {
   1281 				result[i] -= available;
   1282 				credit += available;
   1283 			}
   1284 		}
   1285 	}
   1286 	return result;
   1287 }
   1288 
   1289 /*
   1290  * assign_rate_drops_adc: returns the adjustment needed to
   1291  * the service rates to meet the absolute delay/rate constraints
   1292  * (delay/throughput bounds) and drops traffic if need be.
   1293  * see tech. report UVA/T.R. CS-2000-24/CS-2001-21 for more info.
   1294  */
   1295 
   1296 static int64_t *
   1297 assign_rate_drops_adc(jif)
   1298 	struct jobs_if* jif;
   1299 {
   1300 	int64_t* result;
   1301 	int class_exists, is_backlogged;
   1302 	struct jobs_class *cl;
   1303 
   1304 	int64_t *c, *n, *k;
   1305 	int64_t *available;
   1306 
   1307 	int lowest, highest;
   1308 	int keep_going;
   1309 	int i;
   1310 	u_int64_t now, oldest_arv;
   1311 	int64_t	remaining_time;
   1312 	struct mbuf* pkt;
   1313 	u_int64_t len;
   1314 
   1315 	now = read_machclk();
   1316 	oldest_arv = now;
   1317 
   1318 	result = malloc((jif->jif_maxpri+1)*sizeof(int64_t), M_DEVBUF, M_WAITOK);
   1319 	if (result == NULL)
   1320 		return NULL;
   1321 	c = malloc((jif->jif_maxpri+1)*sizeof(u_int64_t), M_DEVBUF, M_WAITOK);
   1322 	if (c == NULL)
   1323 		return NULL;
   1324 	n = malloc((jif->jif_maxpri+1)*sizeof(u_int64_t), M_DEVBUF, M_WAITOK);
   1325 	if (n == NULL)
   1326 		return NULL;
   1327 	k = malloc((jif->jif_maxpri+1)*sizeof(u_int64_t), M_DEVBUF, M_WAITOK);
   1328 	if (k == NULL)
   1329 		return NULL;
   1330 	available = malloc((jif->jif_maxpri+1)*sizeof(int64_t), M_DEVBUF, M_WAITOK);
   1331 	if (available == NULL)
   1332 		return NULL;
   1333 
   1334 	for (i = 0; i <= jif->jif_maxpri; i++)
   1335 		result[i] = 0;
   1336 
   1337 	keep_going = 1;
   1338 
   1339 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1340 		cl = jif->jif_classes[i];
   1341 		class_exists = (cl != NULL);
   1342 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1343 
   1344 		if (is_backlogged) {
   1345 			if (cl->concerned_adc) {
   1346 				/*
   1347 				 * get the arrival time of the oldest
   1348 				 * class-i packet
   1349 				 */
   1350 				if (tslist_first(cl->arv_tm) == NULL)
   1351 					oldest_arv = now; /* NOTREACHED */
   1352 				else
   1353 					oldest_arv = (tslist_first(cl->arv_tm))->timestamp;
   1354 
   1355 				n[i] = cl->service_rate;
   1356 				k[i] = scale_rate((int64_t)(cl->cl_rin.bytes - cl->cl_rout.bytes));
   1357 
   1358 				remaining_time = cl->cl_adc
   1359 				    - (int64_t)delay_diff(now, oldest_arv);
   1360 				if (remaining_time > 0) {
   1361 					c[i] = remaining_time;
   1362 					/*
   1363 					 * c is the remaining time before
   1364 					 * the deadline is violated
   1365 					 * (in ticks)
   1366 					 */
   1367 					available[i] = n[i]-k[i]/c[i];
   1368 				} else {
   1369 					/*
   1370 					 * deadline has passed...
   1371 					 * we allocate the whole link
   1372 					 * capacity to hopefully
   1373 					 * solve the problem
   1374 					 */
   1375 					c[i] = 0;
   1376 					available[i] = -((int64_t)bps_to_internal((u_int64_t)jif->jif_bandwidth));
   1377 				}
   1378 				if (cl->concerned_arc) {
   1379 					/*
   1380 					 * there's an ARC in addition
   1381 					 * to the ADC
   1382 					 */
   1383 					if (n[i] - cl->cl_arc < available[i])
   1384 						available[i] = n[i]
   1385 						    - cl->cl_arc;
   1386 				}
   1387 			} else if (cl->concerned_arc) {
   1388 				/*
   1389 				 * backlogged, concerned by ARC
   1390 				 * but not by ADC
   1391 				 */
   1392 				n[i] = cl->service_rate;
   1393 				available[i] = n[i] - cl->cl_arc;
   1394 			} else {
   1395 				/*
   1396 				 * backlogged but not concerned by ADC
   1397 				 * or ARC -> can give everything
   1398 				 */
   1399 				n[i] = cl->service_rate;
   1400 				available[i] = n[i];
   1401 			}
   1402 		} else {
   1403 			/* not backlogged */
   1404 			n[i] = 0;
   1405 			k[i] = 0;
   1406 			c[i] = 0;
   1407 			if (class_exists)
   1408 				available[i] = cl->service_rate;
   1409 			else
   1410 				available[i] = 0;
   1411 		}
   1412 	}
   1413 
   1414 	/* step 1: adjust rates (greedy algorithm) */
   1415 
   1416 	highest = 0;
   1417 	lowest  = jif->jif_maxpri;
   1418 
   1419 	while (highest < jif->jif_maxpri+1 && available[highest] >= 0)
   1420 		highest++; /* which is the highest class that needs more service? */
   1421 	while (lowest > 0 && available[lowest] <= 0)
   1422 		lowest--;  /* which is the lowest class that needs less service? */
   1423 
   1424 	while (highest != jif->jif_maxpri+1 && lowest != -1) {
   1425 		/* give the excess service from lowest to highest */
   1426 		if (available[lowest]+available[highest] > 0) {
   1427 			/*
   1428 			 * still some "credit" left
   1429 			 * give all that is needed by "highest"
   1430 			 */
   1431 			n[lowest]  += available[highest];
   1432 			n[highest] -= available[highest];
   1433 			available[lowest]  += available[highest];
   1434 			available[highest] = 0;
   1435 
   1436 			while (highest < jif->jif_maxpri+1
   1437 			       && available[highest] >= 0)
   1438 				highest++;  /* which is the highest class that needs more service now? */
   1439 
   1440 		} else if (available[lowest]+available[highest] == 0) {
   1441 			/* no more credit left but it's fine */
   1442 			n[lowest]  += available[highest];
   1443 			n[highest] -= available[highest];
   1444 			available[highest] = 0;
   1445 			available[lowest]  = 0;
   1446 
   1447 			while (highest < jif->jif_maxpri+1
   1448 			       && available[highest] >= 0)
   1449 				highest++;  /* which is the highest class that needs more service? */
   1450 			while (lowest >= 0 && available[lowest] <= 0)
   1451 				lowest--;   /* which is the lowest class that needs less service? */
   1452 
   1453 		} else if (available[lowest]+available[highest] < 0) {
   1454 			/*
   1455 			 * no more credit left and we need to switch
   1456 			 * to another class
   1457 			 */
   1458 			n[lowest]  -= available[lowest];
   1459 			n[highest] += available[lowest];
   1460 			available[highest] += available[lowest];
   1461 			available[lowest]  = 0;
   1462 
   1463 			while ((lowest >= 0)&&(available[lowest] <= 0))
   1464 				lowest--;  /* which is the lowest class that needs less service? */
   1465 		}
   1466 	}
   1467 
   1468 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1469 		cl = jif->jif_classes[i];
   1470 		class_exists = (cl != NULL);
   1471 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1472 		if (is_backlogged) {
   1473 			result[i] = n[i] - cl->service_rate;
   1474 		} else {
   1475 			if (class_exists)
   1476 				result[i] = - cl->service_rate;
   1477 			else
   1478 				result[i] = 0;
   1479 		}
   1480 	}
   1481 
   1482 	/* step 2: adjust drops (for ADC) */
   1483 
   1484 	if (highest != jif->jif_maxpri+1) {
   1485 		/* some class(es) still need(s) additional service */
   1486 		for (i = 0; i <= jif->jif_maxpri; i++) {
   1487 			cl = jif->jif_classes[i];
   1488 			class_exists = (cl != NULL);
   1489 			is_backlogged = (class_exists
   1490 					 && !qempty(cl->cl_q));
   1491 			if (is_backlogged && available[i] < 0) {
   1492 				if (cl->concerned_adc) {
   1493 					k[i] = c[i]*n[i];
   1494 					while (keep_going && scale_rate((int64_t)(cl->cl_rin.bytes-cl->cl_rout.bytes)) > k[i]) {
   1495 						pkt = qtail(cl->cl_q);
   1496 						if (pkt != NULL) {
   1497 							/* "safeguard" test (a packet SHOULD be in there) */
   1498 							len = (u_int64_t)m_pktlen(pkt);
   1499 							/* access packet at the tail */
   1500 							if (cl->concerned_alc
   1501 							    && cl->current_loss+(len << SCALE_LOSS)/cl->cl_arrival.bytes > cl->cl_alc) {
   1502 								keep_going = 0; /* relax ADC in favor of ALC */
   1503 							} else {
   1504 								/* drop packet at the tail of the class-i queue, update values */
   1505 								pkt = _getq_tail(cl->cl_q);
   1506 								len = (u_int64_t)m_pktlen(pkt);
   1507 								PKTCNTR_ADD(&cl->cl_dropcnt, (int)len);
   1508 								PKTCNTR_SUB(&cl->cl_rin, (int)len);
   1509 								PKTCNTR_ADD(&cl->st_dropcnt, (int)len);
   1510 								PKTCNTR_SUB(&cl->st_rin, (int)len);
   1511 								cl->current_loss += (len << SCALE_LOSS)/cl->cl_arrival.bytes;
   1512 								m_freem(pkt); /* the packet is trashed here */
   1513 								tslist_drop(cl);
   1514 								IFQ_DEC_LEN(cl->cl_jif->jif_ifq);
   1515 							}
   1516 						} else
   1517 							keep_going = 0; /* NOTREACHED */
   1518 					}
   1519 					k[i] = scale_rate((int64_t)(cl->cl_rin.bytes-cl->cl_rout.bytes));
   1520 				}
   1521 				/*
   1522 				 * n[i] is the max rate we can give.
   1523 				 * the above drops as much as possible
   1524 				 * to respect a delay bound.
   1525 				 * for throughput bounds,
   1526 				 * there's nothing that can be done
   1527 				 * after the greedy reallocation.
   1528 				 */
   1529 			}
   1530 		}
   1531 	}
   1532 
   1533 	/* update the values of min_rate_adc */
   1534 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1535 		cl = jif->jif_classes[i];
   1536 		class_exists = (cl != NULL);
   1537 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1538 		if (is_backlogged && cl->concerned_adc) {
   1539 			if (c[i] != 0) {
   1540 				if (cl->concerned_adc
   1541 				    && !cl->concerned_arc)
   1542 					cl->min_rate_adc = k[i]/c[i];
   1543 				else
   1544 					cl->min_rate_adc = n[i];
   1545 			} else
   1546 				cl->min_rate_adc = (int64_t)bps_to_internal((u_int64_t)jif->jif_bandwidth);
   1547 		} else if (is_backlogged && cl->concerned_arc)
   1548 			cl->min_rate_adc = n[i]; /* the best we can give */
   1549 		else {
   1550 			if (class_exists)
   1551 				cl->min_rate_adc = 0;
   1552 		}
   1553 	}
   1554 
   1555 	free(c, M_DEVBUF);
   1556 	free(n, M_DEVBUF);
   1557 	free(k, M_DEVBUF);
   1558 	free(available, M_DEVBUF);
   1559 
   1560 	return (result);
   1561 }
   1562 
   1563 /*
   1564  * update_error: returns the difference between the mean weighted
   1565  * delay and the weighted delay for each class. if proportional
   1566  * delay differentiation is perfectly achieved, it should return
   1567  * zero for each class.
   1568  */
   1569 static int64_t *
   1570 update_error(jif)
   1571 	struct jobs_if *jif;
   1572 {
   1573 	int i;
   1574 	int active_classes, backlogged_classes;
   1575 	u_int64_t mean_weighted_delay;
   1576 	u_int64_t delays[JOBS_MAXPRI];
   1577 	int64_t* error;
   1578 	int class_exists, is_backlogged;
   1579 	struct jobs_class *cl;
   1580 
   1581 	error = malloc(sizeof(int64_t)*(jif->jif_maxpri+1), M_DEVBUF,
   1582 	    M_WAITOK|M_ZERO);
   1583 
   1584 	if (error == NULL)
   1585 		return NULL;
   1586 
   1587 	mean_weighted_delay = 0;
   1588 	active_classes = 0;
   1589 	backlogged_classes = 0;
   1590 
   1591 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1592 		cl = jif->jif_classes[i];
   1593 		class_exists = (cl != NULL);
   1594 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1595 
   1596 		if (is_backlogged) {
   1597 			backlogged_classes++;
   1598 			if (cl->concerned_rdc) {
   1599 				delays[i] = proj_delay(jif, i);
   1600 				mean_weighted_delay += cl->delay_prod_others*delays[i];
   1601 				active_classes ++;
   1602 			}
   1603 		}
   1604 	}
   1605 
   1606 	if (active_classes == 0)
   1607 		return error;
   1608 	else
   1609 		mean_weighted_delay /= active_classes;
   1610 
   1611 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1612 		cl = jif->jif_classes[i];
   1613 		class_exists = (cl != NULL);
   1614 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1615 
   1616 		if (is_backlogged && cl->concerned_rdc)
   1617 			error[i] = ((int64_t)mean_weighted_delay)-((int64_t)cl->delay_prod_others*delays[i]);
   1618 		else
   1619 			error[i] = 0; /*
   1620 				       * either the class isn't concerned,
   1621 				       * or it's not backlogged.
   1622 				       * in any case, the rate shouldn't
   1623 				       * be adjusted.
   1624 				       */
   1625 	}
   1626 	return error;
   1627 }
   1628 
   1629 /*
   1630  * min_rates_adc: computes the minimum service rates needed in
   1631  * each class to meet the absolute delay bounds. if, for any
   1632  * class i, the current service rate of class i is less than
   1633  * the computed minimum service rate, this function returns
   1634  * false, true otherwise.
   1635  */
   1636 static int
   1637 min_rates_adc(jif)
   1638 	struct jobs_if *jif;
   1639 {
   1640 	int result;
   1641 	int i;
   1642 	int class_exists, is_backlogged;
   1643 	int64_t remaining_time;
   1644 	struct jobs_class *cl;
   1645 	result = 1;
   1646 
   1647 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1648 		cl = jif->jif_classes[i];
   1649 		class_exists = (cl != NULL);
   1650 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1651 		if (is_backlogged && cl->concerned_adc) {
   1652 			remaining_time = cl->cl_adc - proj_delay(jif, i);
   1653 			if (remaining_time > 0 ) {
   1654 				/* min rate needed for ADC */
   1655 				cl->min_rate_adc = scale_rate((int64_t)(cl->cl_rin.bytes-cl->cl_rout.bytes))/remaining_time;
   1656 				if (cl->concerned_arc
   1657 				    && cl->cl_arc > cl->min_rate_adc) {
   1658 					/* min rate needed for ADC + ARC */
   1659 					cl->min_rate_adc = cl->cl_arc;
   1660 				}
   1661 			} else {
   1662 				/* the deadline has been exceeded: give the whole link capacity to hopefully fix the situation */
   1663 				cl->min_rate_adc = (int64_t)bps_to_internal((u_int64_t)jif->jif_bandwidth);
   1664 			}
   1665       		} else if (is_backlogged && cl->concerned_arc)
   1666 			cl->min_rate_adc = cl->cl_arc; 			/* no ADC, an ARC */
   1667 		else if (class_exists)
   1668 			cl->min_rate_adc = 0;	/*
   1669 						 * either the class is not
   1670 						 * backlogged
   1671 						 * or there is no ADC and
   1672 						 * no ARC
   1673 						 */
   1674 		if (is_backlogged && cl->min_rate_adc > cl->service_rate)
   1675 			result = 0;
   1676 	}
   1677 
   1678 	return result;
   1679 }
   1680 
   1681 /*
   1682  * proj_delay: computes the difference between the current time
   1683  * and the time the oldest class-i packet still in the class-i
   1684  * queue i arrived in the system.
   1685  */
   1686 static int64_t
   1687 proj_delay(jif, i)
   1688 	struct jobs_if *jif;
   1689 	int i;
   1690 {
   1691 	u_int64_t now;
   1692 	int class_exists, is_backlogged;
   1693 	struct jobs_class *cl;
   1694 
   1695 	now = read_machclk();
   1696 	cl = jif->jif_classes[i];
   1697 	class_exists = (cl != NULL);
   1698 	is_backlogged = (class_exists && !qempty(cl->cl_q));
   1699 
   1700 	if (is_backlogged)
   1701 		return ((int64_t)delay_diff(now, tslist_first(cl->arv_tm)->timestamp));
   1702 
   1703 	return (0); /* NOTREACHED */
   1704 }
   1705 
   1706 /*
   1707  * pick_dropped_rlc: returns the class index of the class to be
   1708  * dropped for meeting the relative loss constraints.
   1709  */
   1710 static int
   1711 pick_dropped_rlc(jif)
   1712 	struct jobs_if *jif;
   1713 {
   1714 	int64_t mean;
   1715 	int64_t* loss_error;
   1716 	int i, active_classes, backlogged_classes;
   1717 	int class_exists, is_backlogged;
   1718 	int class_dropped;
   1719 	int64_t max_error;
   1720 	int64_t max_alc;
   1721 	struct mbuf* pkt;
   1722 	struct jobs_class *cl;
   1723 	u_int64_t len;
   1724 
   1725 	loss_error = malloc(sizeof(int64_t)*(jif->jif_maxpri+1),
   1726 	    M_DEVBUF, M_WAITOK);
   1727 
   1728 	if (loss_error == NULL)
   1729 		return -1;
   1730 
   1731 	class_dropped = -1;
   1732 	max_error = 0;
   1733 	mean = 0;
   1734 	active_classes = 0;
   1735 	backlogged_classes = 0;
   1736 
   1737 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1738 		cl = jif->jif_classes[i];
   1739 		class_exists = (cl != NULL);
   1740 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1741 		if (is_backlogged) {
   1742 			backlogged_classes ++;
   1743 			if (cl->concerned_rlc) {
   1744 				mean += cl->loss_prod_others
   1745 				    * cl->current_loss;
   1746 				active_classes++;
   1747 			}
   1748 		}
   1749 	}
   1750 
   1751 	if (active_classes > 0)
   1752 		mean /= active_classes;
   1753 
   1754 	if (active_classes == 0)
   1755 		class_dropped = JOBS_MAXPRI+1; /*
   1756 						* no classes are concerned
   1757 						* by RLCs (JOBS_MAXPRI+1
   1758 						* means "ignore RLC" here)
   1759 						*/
   1760 	else {
   1761 		for (i = 0; i <= jif->jif_maxpri; i++) {
   1762 			cl = jif->jif_classes[i];
   1763 			class_exists = (cl != NULL);
   1764 			is_backlogged = (class_exists
   1765 					 && !qempty(cl->cl_q));
   1766 
   1767 			if ((is_backlogged)&&(cl->cl_rlc))
   1768 				loss_error[i]=cl->loss_prod_others
   1769 				    *cl->current_loss-mean;
   1770 			else
   1771 				loss_error[i] = INFINITY;
   1772 		}
   1773 
   1774 		for (i = 0; i <= jif->jif_maxpri; i++) {
   1775 			cl = jif->jif_classes[i];
   1776 			class_exists = (cl != NULL);
   1777 			is_backlogged = (class_exists
   1778 					 && !qempty(cl->cl_q));
   1779 			if (is_backlogged && loss_error[i] <= max_error) {
   1780 				/*
   1781 				 * find out which class is the most
   1782 				 * below the mean.
   1783 				 * it's the one that needs to be dropped
   1784 				 * ties are broken in favor of the higher
   1785 				 * priority classes (i.e., if two classes
   1786 				 * present the same deviation, the lower
   1787 				 * priority class will get dropped).
   1788 				 */
   1789 				max_error = loss_error[i];
   1790 				class_dropped = i;
   1791 			}
   1792 		}
   1793 
   1794 		if (class_dropped != -1) {
   1795 			cl = jif->jif_classes[class_dropped];
   1796 			pkt = qtail(cl->cl_q);
   1797 			if (pkt != NULL) {
   1798 				/*
   1799 				 * "safeguard" test (a packet SHOULD be
   1800 				 * in there)
   1801 				 */
   1802 				len = (u_int64_t)m_pktlen(pkt);
   1803 				/* access packet at the tail */
   1804 				if (cl->current_loss+(len << SCALE_LOSS)/cl->cl_arrival.bytes > cl->cl_alc) {
   1805 					/*
   1806 					 * the class to drop for meeting
   1807 					 * the RLC will defeat the ALC:
   1808 					 * ignore RLC.
   1809 					 */
   1810 					class_dropped = JOBS_MAXPRI+1;
   1811 				}
   1812 			} else
   1813 				class_dropped = JOBS_MAXPRI+1; /* NOTREACHED */
   1814 		} else
   1815 			class_dropped = JOBS_MAXPRI+1;
   1816 	}
   1817 
   1818 	if (class_dropped == JOBS_MAXPRI+1) {
   1819 		max_alc = -((int64_t)1 << SCALE_LOSS);
   1820 		for (i = jif->jif_maxpri; i >= 0; i--) {
   1821 			cl = jif->jif_classes[i];
   1822 			class_exists = (cl != NULL);
   1823 			is_backlogged = (class_exists
   1824 					 && !qempty(cl->cl_q));
   1825 			if (is_backlogged) {
   1826 				if (cl->concerned_alc && cl->cl_alc - cl->current_loss > max_alc) {
   1827 					max_alc = cl->cl_alc-cl->current_loss; /* pick the class which is the furthest from its ALC */
   1828 					class_dropped = i;
   1829 				} else if (!cl->concerned_alc && ((int64_t) 1 << SCALE_LOSS)-cl->current_loss > max_alc) {
   1830 					max_alc = ((int64_t) 1 << SCALE_LOSS)-cl->current_loss;
   1831 					class_dropped = i;
   1832 				}
   1833 			}
   1834 		}
   1835 	}
   1836 
   1837 	free(loss_error, M_DEVBUF);
   1838 	return (class_dropped);
   1839 }
   1840 
   1841 /*
   1842  * ALTQ binding/setup functions
   1843  */
   1844 /*
   1845  * jobs device interface
   1846  */
   1847 int
   1848 jobsopen(dev, flag, fmt, l)
   1849 	dev_t dev;
   1850 	int flag, fmt;
   1851 	struct lwp *l;
   1852 {
   1853 	if (machclk_freq == 0)
   1854 		init_machclk();
   1855 
   1856 	if (machclk_freq == 0) {
   1857 		printf("jobs: no CPU clock available!\n");
   1858 		return (ENXIO);
   1859 	}
   1860 	/* everything will be done when the queueing scheme is attached. */
   1861 	return 0;
   1862 }
   1863 
   1864 int
   1865 jobsclose(dev, flag, fmt, l)
   1866 	dev_t dev;
   1867 	int flag, fmt;
   1868 	struct lwp *l;
   1869 {
   1870 	struct jobs_if *jif;
   1871 	int err, error = 0;
   1872 
   1873 	while ((jif = jif_list) != NULL) {
   1874 		/* destroy all */
   1875 		if (ALTQ_IS_ENABLED(jif->jif_ifq))
   1876 			altq_disable(jif->jif_ifq);
   1877 
   1878 		err = altq_detach(jif->jif_ifq);
   1879 		if (err == 0)
   1880 			err = jobs_detach(jif);
   1881 		if (err != 0 && error == 0)
   1882 			error = err;
   1883 	}
   1884 
   1885 	return error;
   1886 }
   1887 
   1888 int
   1889 jobsioctl(dev, cmd, addr, flag, l)
   1890 	dev_t dev;
   1891 	ioctlcmd_t cmd;
   1892 	caddr_t addr;
   1893 	int flag;
   1894 	struct lwp *l;
   1895 {
   1896 	struct jobs_if *jif;
   1897 	struct jobs_interface *ifacep;
   1898 	struct proc *p = l->l_proc;
   1899 	int	error = 0;
   1900 
   1901 	/* check super-user privilege */
   1902 	switch (cmd) {
   1903 	case JOBS_GETSTATS:
   1904 		break;
   1905 	default:
   1906 #if (__FreeBSD_version > 400000)
   1907 		if ((error = suser(p)) != 0)
   1908 			return (error);
   1909 #else
   1910 		if ((error = kauth_authorize_generic(p->p_cred,
   1911 		    KAUTH_GENERIC_ISSUSER, &p->p_acflag)) != 0)
   1912 			return (error);
   1913 #endif
   1914 		break;
   1915 	}
   1916 
   1917 	switch (cmd) {
   1918 
   1919 	case JOBS_IF_ATTACH:
   1920 		error = jobscmd_if_attach((struct jobs_attach *)addr);
   1921 		break;
   1922 
   1923 	case JOBS_IF_DETACH:
   1924 		error = jobscmd_if_detach((struct jobs_interface *)addr);
   1925 		break;
   1926 
   1927 	case JOBS_ENABLE:
   1928 	case JOBS_DISABLE:
   1929 	case JOBS_CLEAR:
   1930 		ifacep = (struct jobs_interface *)addr;
   1931 		if ((jif = altq_lookup(ifacep->jobs_ifname,
   1932 				       ALTQT_JOBS)) == NULL) {
   1933 			error = EBADF;
   1934 			break;
   1935 		}
   1936 
   1937 		switch (cmd) {
   1938 		case JOBS_ENABLE:
   1939 			if (jif->jif_default == NULL) {
   1940 #if 1
   1941 				printf("jobs: no default class\n");
   1942 #endif
   1943 				error = EINVAL;
   1944 				break;
   1945 			}
   1946 			error = altq_enable(jif->jif_ifq);
   1947 			break;
   1948 
   1949 		case JOBS_DISABLE:
   1950 			error = altq_disable(jif->jif_ifq);
   1951 			break;
   1952 
   1953 		case JOBS_CLEAR:
   1954 			jobs_clear_interface(jif);
   1955 			break;
   1956 		}
   1957 		break;
   1958 
   1959 		case JOBS_ADD_CLASS:
   1960 			error = jobscmd_add_class((struct jobs_add_class *)addr);
   1961 			break;
   1962 
   1963 	case JOBS_DEL_CLASS:
   1964 		error = jobscmd_delete_class((struct jobs_delete_class *)addr);
   1965 		break;
   1966 
   1967 	case JOBS_MOD_CLASS:
   1968 		error = jobscmd_modify_class((struct jobs_modify_class *)addr);
   1969 		break;
   1970 
   1971 	case JOBS_ADD_FILTER:
   1972 		error = jobscmd_add_filter((struct jobs_add_filter *)addr);
   1973 		break;
   1974 
   1975 	case JOBS_DEL_FILTER:
   1976 		error = jobscmd_delete_filter((struct jobs_delete_filter *)addr);
   1977 		break;
   1978 
   1979 	case JOBS_GETSTATS:
   1980 		error = jobscmd_class_stats((struct jobs_class_stats *)addr);
   1981 		break;
   1982 
   1983 	default:
   1984 		error = EINVAL;
   1985 		break;
   1986 	}
   1987 	return error;
   1988 }
   1989 
   1990 static int
   1991 jobscmd_if_attach(ap)
   1992 	struct jobs_attach *ap;
   1993 {
   1994 	struct jobs_if *jif;
   1995 	struct ifnet *ifp;
   1996 	int error;
   1997 
   1998 	if ((ifp = ifunit(ap->iface.jobs_ifname)) == NULL)
   1999 		return (ENXIO);
   2000 	if ((jif = jobs_attach(&ifp->if_snd, ap->bandwidth, ap->qlimit, ap->separate)) == NULL)
   2001 		return (ENOMEM);
   2002 
   2003 	/*
   2004 	 * set JOBS to this ifnet structure.
   2005 	 */
   2006 	if ((error = altq_attach(&ifp->if_snd, ALTQT_JOBS, jif,
   2007 				 jobs_enqueue, jobs_dequeue, jobs_request,
   2008 				 &jif->jif_classifier, acc_classify)) != 0)
   2009 		(void)jobs_detach(jif);
   2010 
   2011 	return (error);
   2012 }
   2013 
   2014 static int
   2015 jobscmd_if_detach(ap)
   2016 	struct jobs_interface *ap;
   2017 {
   2018 	struct jobs_if *jif;
   2019 	int error;
   2020 
   2021 	if ((jif = altq_lookup(ap->jobs_ifname, ALTQT_JOBS)) == NULL)
   2022 		return (EBADF);
   2023 
   2024 	if (ALTQ_IS_ENABLED(jif->jif_ifq))
   2025 		altq_disable(jif->jif_ifq);
   2026 
   2027 	if ((error = altq_detach(jif->jif_ifq)))
   2028 		return (error);
   2029 
   2030 	return jobs_detach(jif);
   2031 }
   2032 
   2033 static int
   2034 jobscmd_add_class(ap)
   2035 	struct jobs_add_class *ap;
   2036 {
   2037 	struct jobs_if *jif;
   2038 	struct jobs_class *cl;
   2039 
   2040 	if ((jif = altq_lookup(ap->iface.jobs_ifname, ALTQT_JOBS)) == NULL)
   2041 		return (EBADF);
   2042 
   2043 	if (ap->pri < 0 || ap->pri >= JOBS_MAXPRI)
   2044 		return (EINVAL);
   2045 
   2046 	if ((cl = jobs_class_create(jif, ap->pri,
   2047 				    ap->cl_adc, ap->cl_rdc,
   2048 				    ap->cl_alc, ap->cl_rlc, ap-> cl_arc,
   2049 				    ap->flags)) == NULL)
   2050 		return (ENOMEM);
   2051 
   2052 	/* return a class handle to the user */
   2053 	ap->class_handle = clp_to_clh(cl);
   2054 	return (0);
   2055 }
   2056 
   2057 static int
   2058 jobscmd_delete_class(ap)
   2059 	struct jobs_delete_class *ap;
   2060 {
   2061 	struct jobs_if *jif;
   2062 	struct jobs_class *cl;
   2063 
   2064 	if ((jif = altq_lookup(ap->iface.jobs_ifname, ALTQT_JOBS)) == NULL)
   2065 		return (EBADF);
   2066 
   2067 	if ((cl = clh_to_clp(jif, ap->class_handle)) == NULL)
   2068 		return (EINVAL);
   2069 
   2070 	return jobs_class_destroy(cl);
   2071 }
   2072 
   2073 static int
   2074 jobscmd_modify_class(ap)
   2075 	struct jobs_modify_class *ap;
   2076 {
   2077 	struct jobs_if *jif;
   2078 	struct jobs_class *cl;
   2079 
   2080 	if ((jif = altq_lookup(ap->iface.jobs_ifname, ALTQT_JOBS)) == NULL)
   2081 		return (EBADF);
   2082 
   2083 	if (ap->pri < 0 || ap->pri >= JOBS_MAXPRI)
   2084 		return (EINVAL);
   2085 
   2086 	if ((cl = clh_to_clp(jif, ap->class_handle)) == NULL)
   2087 		return (EINVAL);
   2088 
   2089 	/*
   2090 	 * if priority is changed, move the class to the new priority
   2091 	 */
   2092 	if (jif->jif_classes[ap->pri] != cl) {
   2093 		if (jif->jif_classes[ap->pri] != NULL)
   2094 			return (EEXIST);
   2095 		jif->jif_classes[cl->cl_pri] = NULL;
   2096 		jif->jif_classes[ap->pri] = cl;
   2097 		cl->cl_pri = ap->pri;
   2098 	}
   2099 
   2100 	/* call jobs_class_create to change class parameters */
   2101 	if ((cl = jobs_class_create(jif, ap->pri,
   2102 				    ap->cl_adc, ap->cl_rdc,
   2103 				    ap->cl_alc, ap->cl_rlc, ap->cl_arc,
   2104 				    ap->flags)) == NULL)
   2105 		return (ENOMEM);
   2106 	return 0;
   2107 }
   2108 
   2109 static int
   2110 jobscmd_add_filter(ap)
   2111 	struct jobs_add_filter *ap;
   2112 {
   2113 	struct jobs_if *jif;
   2114 	struct jobs_class *cl;
   2115 
   2116 	if ((jif = altq_lookup(ap->iface.jobs_ifname, ALTQT_JOBS)) == NULL)
   2117 		return (EBADF);
   2118 
   2119 	if ((cl = clh_to_clp(jif, ap->class_handle)) == NULL)
   2120 		return (EINVAL);
   2121 
   2122 	return acc_add_filter(&jif->jif_classifier, &ap->filter,
   2123 			      cl, &ap->filter_handle);
   2124 }
   2125 
   2126 static int
   2127 jobscmd_delete_filter(ap)
   2128 	struct jobs_delete_filter *ap;
   2129 {
   2130 	struct jobs_if *jif;
   2131 
   2132 	if ((jif = altq_lookup(ap->iface.jobs_ifname, ALTQT_JOBS)) == NULL)
   2133 		return (EBADF);
   2134 
   2135 		return acc_delete_filter(&jif->jif_classifier,
   2136 					 ap->filter_handle);
   2137 }
   2138 
   2139 static int
   2140 jobscmd_class_stats(ap)
   2141 	struct jobs_class_stats *ap;
   2142 {
   2143 	struct jobs_if *jif;
   2144 	struct jobs_class *cl;
   2145 	struct class_stats stats, *usp;
   2146 	int pri, error;
   2147 
   2148 	if ((jif = altq_lookup(ap->iface.jobs_ifname, ALTQT_JOBS)) == NULL)
   2149 		return (EBADF);
   2150 
   2151 	ap->maxpri = jif->jif_maxpri;
   2152 
   2153 	/* then, read the next N classes */
   2154 	usp = ap->stats;
   2155 	for (pri = 0; pri <= jif->jif_maxpri; pri++) {
   2156 		cl = jif->jif_classes[pri];
   2157 		if (cl != NULL)
   2158 			get_class_stats(&stats, cl);
   2159 		else
   2160 			(void)memset(&stats, 0, sizeof(stats));
   2161 		if ((error = copyout((caddr_t)&stats, (caddr_t)usp++,
   2162 				     sizeof(stats))) != 0)
   2163 			return (error);
   2164 	}
   2165 	return (0);
   2166 }
   2167 
   2168 static void get_class_stats(sp, cl)
   2169 	struct class_stats *sp;
   2170 	struct jobs_class *cl;
   2171 {
   2172 	u_int64_t now;
   2173 	now = read_machclk();
   2174 
   2175 	sp->class_handle = clp_to_clh(cl);
   2176 	sp->qlength = qlen(cl->cl_q);
   2177 
   2178 	sp->period = cl->cl_period;
   2179 	sp->rin = cl->st_rin;
   2180 	sp->arrival = cl->st_arrival;
   2181 	sp->arrivalbusy = cl->cl_arrival;
   2182 	sp->rout = cl->st_rout;
   2183 	sp->dropcnt = cl->cl_dropcnt;
   2184 
   2185 	/*  PKTCNTR_RESET(&cl->st_arrival);*/
   2186 	PKTCNTR_RESET(&cl->st_rin);
   2187 	PKTCNTR_RESET(&cl->st_rout);
   2188 
   2189 	sp->totallength = cl->cl_jif->jif_ifq->ifq_len;
   2190 	sp->lastdel = ticks_to_secs(GRANULARITY*cl->cl_lastdel);
   2191 	sp->avgdel = cl->cl_avgdel;
   2192 
   2193 	cl->cl_avgdel = 0;
   2194 
   2195 	sp->busylength = ticks_to_secs(1000*delay_diff(now, cl->idletime));
   2196 	sp->adc_violations = cl->adc_violations;
   2197 
   2198 	sp->wc_cycles_enqueue = cl->cl_jif->wc_cycles_enqueue;
   2199 	sp->wc_cycles_dequeue = cl->cl_jif->wc_cycles_dequeue;
   2200 	sp->bc_cycles_enqueue = cl->cl_jif->bc_cycles_enqueue;
   2201 	sp->bc_cycles_dequeue = cl->cl_jif->bc_cycles_dequeue;
   2202 	sp->avg_cycles_enqueue = cl->cl_jif->avg_cycles_enqueue;
   2203 	sp->avg_cycles_dequeue = cl->cl_jif->avg_cycles_dequeue;
   2204 	sp->avg_cycles2_enqueue = cl->cl_jif->avg_cycles2_enqueue;
   2205 	sp->avg_cycles2_dequeue = cl->cl_jif->avg_cycles2_dequeue;
   2206 	sp->total_enqueued = cl->cl_jif->total_enqueued;
   2207 	sp->total_dequeued = cl->cl_jif->total_dequeued;
   2208 }
   2209 
   2210 /* convert a class handle to the corresponding class pointer */
   2211 static struct jobs_class *
   2212 clh_to_clp(jif, chandle)
   2213 	struct jobs_if *jif;
   2214 	u_long chandle;
   2215 {
   2216 	struct jobs_class *cl;
   2217 
   2218 	cl = (struct jobs_class *)chandle;
   2219 	if (chandle != ALIGN(cl)) {
   2220 #if 1
   2221 		printf("clh_to_cl: unaligned pointer %p\n", cl);
   2222 #endif
   2223 		return (NULL);
   2224 	}
   2225 
   2226 	if (cl == NULL || cl->cl_handle != chandle || cl->cl_jif != jif)
   2227 		return (NULL);
   2228 	return (cl);
   2229 }
   2230 
   2231 /* convert a class pointer to the corresponding class handle */
   2232 static u_long
   2233 clp_to_clh(cl)
   2234 	struct jobs_class *cl;
   2235 {
   2236 	return (cl->cl_handle);
   2237 }
   2238 
   2239 #ifdef KLD_MODULE
   2240 
   2241 static struct altqsw jobs_sw =
   2242 	{"jobs", jobsopen, jobsclose, jobsioctl};
   2243 
   2244 ALTQ_MODULE(altq_jobs, ALTQT_JOBS, &jobs_sw);
   2245 
   2246 #endif /* KLD_MODULE */
   2247 
   2248 #endif /* ALTQ3_COMPAT */
   2249 #endif /* ALTQ_JOBS */
   2250