Home | History | Annotate | Line # | Download | only in altq
altq_jobs.c revision 1.1.2.1
      1 /*	$NetBSD: altq_jobs.c,v 1.1.2.1 2006/03/18 12:08:18 peter Exp $	*/
      2 /*	$KAME: altq_jobs.c,v 1.11 2005/04/13 03:44:25 suz Exp $	*/
      3 /*
      4  * Copyright (c) 2001, the Rector and Board of Visitors of the
      5  * University of Virginia.
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms,
      9  * with or without modification, are permitted provided
     10  * that the following conditions are met:
     11  *
     12  * Redistributions of source code must retain the above
     13  * copyright notice, this list of conditions and the following
     14  * disclaimer.
     15  *
     16  * Redistributions in binary form must reproduce the above
     17  * copyright notice, this list of conditions and the following
     18  * disclaimer in the documentation and/or other materials provided
     19  * with the distribution.
     20  *
     21  * Neither the name of the University of Virginia nor the names
     22  * of its contributors may be used to endorse or promote products
     23  * derived from this software without specific prior written
     24  * permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
     27  * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
     28  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
     29  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     30  * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
     31  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
     32  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     33  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     34  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
     35  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
     37  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
     38  * THE POSSIBILITY OF SUCH DAMAGE.
     39  */
     40 /*
     41  * JoBS - altq prototype implementation
     42  *
     43  * Author: Nicolas Christin <nicolas (at) cs.virginia.edu>
     44  *
     45  * JoBS algorithms originally devised and proposed by
     46  * Nicolas Christin and Jorg Liebeherr.
     47  * Grateful acknowledgments to Tarek Abdelzaher for his help and
     48  * comments, and to Kenjiro Cho for some helpful advice.
     49  * Contributed by the Multimedia Networks Group at the University
     50  * of Virginia.
     51  *
     52  * Papers and additional info can be found at
     53  * http://qosbox.cs.virginia.edu
     54  *
     55  */
     56 
     57 /*
     58  * JoBS queue
     59  */
     60 
     61 #include <sys/cdefs.h>
     62 __KERNEL_RCSID(0, "$NetBSD: altq_jobs.c,v 1.1.2.1 2006/03/18 12:08:18 peter Exp $");
     63 
     64 #ifdef _KERNEL_OPT
     65 #include "opt_altq.h"
     66 #include "opt_inet.h"
     67 #endif
     68 
     69 #ifdef ALTQ_JOBS  /* jobs is enabled by ALTQ_JOBS option in opt_altq.h */
     70 
     71 #include <sys/param.h>
     72 #include <sys/malloc.h>
     73 #include <sys/mbuf.h>
     74 #include <sys/socket.h>
     75 #include <sys/sockio.h>
     76 #include <sys/systm.h>
     77 #include <sys/proc.h>
     78 #include <sys/errno.h>
     79 #include <sys/kernel.h>
     80 #include <sys/queue.h>
     81 
     82 #ifdef __FreeBSD__
     83 #include <sys/limits.h>
     84 #endif
     85 
     86 #include <net/if.h>
     87 #include <net/if_types.h>
     88 
     89 #include <altq/altq.h>
     90 #include <altq/altq_conf.h>
     91 #include <altq/altq_jobs.h>
     92 
     93 #ifdef ALTQ3_COMPAT
     94 /*
     95  * function prototypes
     96  */
     97 static struct jobs_if *jobs_attach(struct ifaltq *, u_int, u_int, u_int);
     98 static int jobs_detach(struct jobs_if *);
     99 static int jobs_clear_interface(struct jobs_if *);
    100 static int jobs_request(struct ifaltq *, int, void *);
    101 static void jobs_purge(struct jobs_if *);
    102 static struct jobs_class *jobs_class_create(struct jobs_if *,
    103     int, int64_t, int64_t, int64_t, int64_t, int64_t, int);
    104 static int jobs_class_destroy(struct jobs_class *);
    105 static int jobs_enqueue(struct ifaltq *, struct mbuf *, struct altq_pktattr *);
    106 static struct mbuf *jobs_dequeue(struct ifaltq *, int);
    107 
    108 static int jobs_addq(struct jobs_class *, struct mbuf *, struct jobs_if*);
    109 static struct mbuf *jobs_getq(struct jobs_class *);
    110 static struct mbuf *jobs_pollq(struct jobs_class *);
    111 static void jobs_purgeq(struct jobs_class *);
    112 
    113 static int jobscmd_if_attach(struct jobs_attach *);
    114 static int jobscmd_if_detach(struct jobs_interface *);
    115 static int jobscmd_add_class(struct jobs_add_class *);
    116 static int jobscmd_delete_class(struct jobs_delete_class *);
    117 static int jobscmd_modify_class(struct jobs_modify_class *);
    118 static int jobscmd_add_filter(struct jobs_add_filter *);
    119 static int jobscmd_delete_filter(struct jobs_delete_filter *);
    120 static int jobscmd_class_stats(struct jobs_class_stats *);
    121 static void get_class_stats(struct class_stats *, struct jobs_class *);
    122 static struct jobs_class *clh_to_clp(struct jobs_if *, u_long);
    123 static u_long clp_to_clh(struct jobs_class *);
    124 
    125 static TSLIST *tslist_alloc(void);
    126 static void tslist_destroy(struct jobs_class *);
    127 static int tslist_enqueue(struct jobs_class *, u_int64_t);
    128 static void tslist_dequeue(struct jobs_class *);
    129 static void tslist_drop(struct jobs_class *);
    130 
    131 static int enforce_wc(struct jobs_if *);
    132 static int64_t* adjust_rates_rdc(struct jobs_if *);
    133 static int64_t* assign_rate_drops_adc(struct jobs_if *);
    134 static int64_t* update_error(struct jobs_if *);
    135 static int min_rates_adc(struct jobs_if *);
    136 static int64_t proj_delay(struct jobs_if *, int);
    137 static int pick_dropped_rlc(struct jobs_if *);
    138 
    139 altqdev_decl(jobs);
    140 
    141 /* jif_list keeps all jobs_if's allocated. */
    142 static struct jobs_if *jif_list = NULL;
    143 
    144 typedef unsigned long long ull;
    145 
    146 /* setup functions */
    147 
    148 static struct jobs_if *
    149 jobs_attach(ifq, bandwidth, qlimit, separate)
    150 	struct ifaltq *ifq;
    151 	u_int bandwidth;
    152 	u_int qlimit;
    153 	u_int separate;
    154 {
    155 	struct jobs_if *jif;
    156 
    157 	MALLOC(jif, struct jobs_if *, sizeof(struct jobs_if),
    158 	       M_DEVBUF, M_WAITOK);
    159 
    160 	if (jif == NULL)
    161 	        return (NULL);
    162 
    163 	(void)memset(jif, 0, sizeof(struct jobs_if));
    164 	jif->jif_bandwidth = bandwidth;
    165 	jif->jif_qlimit = qlimit;
    166 	jif->jif_separate = separate;
    167 #ifdef ALTQ_DEBUG
    168 	printf("JoBS bandwidth = %d bps\n", (int)bandwidth);
    169 	printf("JoBS buffer size = %d pkts [%s]\n",
    170 	       (int)qlimit, separate?"separate buffers":"shared buffer");
    171 #endif
    172 	jif->jif_maxpri = -1;
    173 	jif->jif_ifq = ifq;
    174 
    175 	jif->wc_cycles_enqueue = 0;
    176 	jif->avg_cycles_enqueue = 0;
    177 	jif->avg_cycles2_enqueue = 0;
    178 	jif->bc_cycles_enqueue = INFINITY;
    179 	jif->wc_cycles_dequeue = 0;
    180 	jif->avg_cycles_dequeue = 0;
    181 	jif->avg_cycles2_dequeue = 0;
    182 	jif->bc_cycles_dequeue = INFINITY;
    183 	jif->total_enqueued = 0;
    184 	jif->total_dequeued = 0;
    185 
    186 	/* add this state to the jobs list */
    187 	jif->jif_next = jif_list;
    188 	jif_list = jif;
    189 
    190 	return (jif);
    191 }
    192 
    193 static int
    194 jobs_detach(jif)
    195 	struct jobs_if *jif;
    196 {
    197 	(void)jobs_clear_interface(jif);
    198 
    199 	/* remove this interface from the jif list */
    200 	if (jif_list == jif)
    201 		jif_list = jif->jif_next;
    202 	else {
    203 		struct jobs_if *p;
    204 
    205 		for (p = jif_list; p != NULL; p = p->jif_next)
    206 			if (p->jif_next == jif) {
    207 				p->jif_next = jif->jif_next;
    208 				break;
    209 			}
    210 		ASSERT(p != NULL);
    211 	}
    212 	FREE(jif, M_DEVBUF);
    213 	return (0);
    214 }
    215 
    216 /*
    217  * bring the interface back to the initial state by discarding
    218  * all the filters and classes.
    219  */
    220 static int
    221 jobs_clear_interface(jif)
    222 	struct jobs_if	*jif;
    223 {
    224 	struct jobs_class	*cl;
    225 	int	pri;
    226 
    227 	/* free the filters for this interface */
    228 	acc_discard_filters(&jif->jif_classifier, NULL, 1);
    229 
    230 	/* clear out the classes */
    231 	for (pri = 0; pri <= jif->jif_maxpri; pri++)
    232 		if ((cl = jif->jif_classes[pri]) != NULL)
    233 			jobs_class_destroy(cl);
    234 
    235 	return (0);
    236 }
    237 
    238 static int
    239 jobs_request(ifq, req, arg)
    240 	struct ifaltq *ifq;
    241 	int req;
    242 	void *arg;
    243 {
    244 	struct jobs_if	*jif = (struct jobs_if *)ifq->altq_disc;
    245 
    246 	switch (req) {
    247 	case ALTRQ_PURGE:
    248 		jobs_purge(jif);
    249 		break;
    250 	}
    251 	return (0);
    252 }
    253 
    254 /* discard all the queued packets on the interface */
    255 static void
    256 jobs_purge(jif)
    257 	struct jobs_if *jif;
    258 {
    259 	struct jobs_class *cl;
    260 	int pri;
    261 
    262 	for (pri = 0; pri <= jif->jif_maxpri; pri++) {
    263 		if ((cl = jif->jif_classes[pri]) != NULL && !qempty(cl->cl_q))
    264 			jobs_purgeq(cl);
    265 	}
    266 	if (ALTQ_IS_ENABLED(jif->jif_ifq))
    267 		jif->jif_ifq->ifq_len = 0;
    268 }
    269 
    270 static struct jobs_class *
    271 jobs_class_create(jif, pri, adc, rdc, alc, rlc, arc, flags)
    272 	struct jobs_if *jif;
    273 	int pri;
    274 	int64_t adc, rdc, alc, rlc, arc;
    275 	int flags;
    276 {
    277 	struct jobs_class *cl, *scan1, *scan2;
    278 	int s;
    279 	int class_exists1, class_exists2;
    280 	int i, j;
    281 	int64_t tmp[JOBS_MAXPRI];
    282 	u_int64_t now;
    283 
    284 	if ((cl = jif->jif_classes[pri]) != NULL) {
    285 		/* modify the class instead of creating a new one */
    286 		s = splnet();
    287 		if (!qempty(cl->cl_q))
    288 			jobs_purgeq(cl);
    289 		splx(s);
    290 	} else {
    291 		MALLOC(cl, struct jobs_class *, sizeof(struct jobs_class),
    292 		       M_DEVBUF, M_WAITOK);
    293 		if (cl == NULL)
    294 			return (NULL);
    295 		(void)memset(cl, 0, sizeof(struct jobs_class));
    296 
    297 		MALLOC(cl->cl_q, class_queue_t *, sizeof(class_queue_t),
    298 		       M_DEVBUF, M_WAITOK);
    299 		if (cl->cl_q == NULL)
    300 			goto err_ret;
    301 		(void)memset(cl->cl_q, 0, sizeof(class_queue_t));
    302 
    303 		cl->arv_tm = tslist_alloc();
    304 		if (cl->arv_tm == NULL)
    305 			goto err_ret;
    306 	}
    307 
    308 	jif->jif_classes[pri] = cl;
    309 
    310 	if (flags & JOCF_DEFAULTCLASS)
    311 		jif->jif_default = cl;
    312 
    313 	qtype(cl->cl_q) = Q_DROPTAIL;
    314 	qlen(cl->cl_q) = 0;
    315 	cl->service_rate = 0;
    316 	cl->min_rate_adc = 0;
    317 	cl->current_loss = 0;
    318 	cl->cl_period = 0;
    319 	PKTCNTR_RESET(&cl->cl_arrival);
    320 	PKTCNTR_RESET(&cl->cl_rin);
    321 	PKTCNTR_RESET(&cl->cl_rout);
    322 	PKTCNTR_RESET(&cl->cl_rout_th);
    323 	PKTCNTR_RESET(&cl->cl_dropcnt);
    324 	PKTCNTR_RESET(&cl->st_arrival);
    325 	PKTCNTR_RESET(&cl->st_rin);
    326 	PKTCNTR_RESET(&cl->st_rout);
    327 	PKTCNTR_RESET(&cl->st_dropcnt);
    328 	cl->st_service_rate = 0;
    329 	cl->cl_lastdel = 0;
    330 	cl->cl_avgdel = 0;
    331 	cl->adc_violations = 0;
    332 
    333 	if (adc == -1) {
    334 		cl->concerned_adc = 0;
    335 		adc = INFINITY;
    336 	} else
    337 		cl->concerned_adc = 1;
    338 
    339 	if (alc == -1) {
    340 		cl->concerned_alc = 0;
    341 		alc = INFINITY;
    342 	} else
    343 		cl->concerned_alc = 1;
    344 
    345 	if (rdc == -1) {
    346 		rdc = 0;
    347 		cl->concerned_rdc = 0;
    348 	} else
    349 		cl->concerned_rdc = 1;
    350 
    351 	if (rlc == -1) {
    352 		rlc = 0;
    353 		cl->concerned_rlc = 0;
    354 	} else
    355 		cl->concerned_rlc = 1;
    356 
    357 	if (arc == -1) {
    358 		arc = 0;
    359 		cl->concerned_arc = 0;
    360 	} else
    361 		cl->concerned_arc = 1;
    362 
    363 	cl->cl_rdc=rdc;
    364 
    365 	if (cl->concerned_adc) {
    366 		/* adc is given in us, convert it to clock ticks */
    367 		cl->cl_adc = (u_int64_t)(adc*machclk_freq/GRANULARITY);
    368 	} else
    369 		cl->cl_adc = adc;
    370 
    371 	if (cl->concerned_arc) {
    372 		/* arc is given in bps, convert it to internal unit */
    373 		cl->cl_arc = (u_int64_t)(bps_to_internal(arc));
    374 	} else
    375 		cl->cl_arc = arc;
    376 
    377 	cl->cl_rlc=rlc;
    378 	cl->cl_alc=alc;
    379 	cl->delay_prod_others = 0;
    380 	cl->loss_prod_others = 0;
    381 	cl->cl_flags = flags;
    382 	cl->cl_pri = pri;
    383 	if (pri > jif->jif_maxpri)
    384 		jif->jif_maxpri = pri;
    385 	cl->cl_jif = jif;
    386 	cl->cl_handle = (u_long)cl;  /* just a pointer to this class */
    387 
    388 	/*
    389 	 * update delay_prod_others and loss_prod_others
    390 	 * in all classes if needed
    391 	 */
    392 
    393 	if (cl->concerned_rdc) {
    394 		for (i = 0; i <= jif->jif_maxpri; i++) {
    395 			scan1 = jif->jif_classes[i];
    396 			class_exists1 = (scan1 != NULL);
    397 			if (class_exists1) {
    398 				tmp[i] = 1;
    399 				for (j = 0; j <= i-1; j++) {
    400 					scan2 = jif->jif_classes[j];
    401 					class_exists2 = (scan2 != NULL);
    402 					if (class_exists2
    403 					    && scan2->concerned_rdc)
    404 						tmp[i] *= scan2->cl_rdc;
    405 				}
    406 			} else
    407 				tmp[i] = 0;
    408 		}
    409 
    410 		for (i = 0; i <= jif->jif_maxpri; i++) {
    411 			scan1 = jif->jif_classes[i];
    412 			class_exists1 = (scan1 != NULL);
    413 			if (class_exists1) {
    414 				scan1->delay_prod_others = 1;
    415 				for (j = 0; j <= jif->jif_maxpri; j++) {
    416 					scan2 = jif->jif_classes[j];
    417 					class_exists2 = (scan2 != NULL);
    418 					if (class_exists2 && j != i
    419 					    && scan2->concerned_rdc)
    420 						scan1->delay_prod_others *= tmp[j];
    421 				}
    422 			}
    423 		}
    424 	}
    425 
    426 	if (cl->concerned_rlc) {
    427 		for (i = 0; i <= jif->jif_maxpri; i++) {
    428 			scan1 = jif->jif_classes[i];
    429 			class_exists1 = (scan1 != NULL);
    430 			if (class_exists1) {
    431 				tmp[i] = 1;
    432 				for (j = 0; j <= i-1; j++) {
    433 					scan2 = jif->jif_classes[j];
    434 					class_exists2 = (scan2 != NULL);
    435 					if (class_exists2
    436 					    && scan2->concerned_rlc)
    437 						tmp[i] *= scan2->cl_rlc;
    438 				}
    439 			} else
    440 				tmp[i] = 0;
    441 		}
    442 
    443 		for (i = 0; i <= jif->jif_maxpri; i++) {
    444 			scan1 = jif->jif_classes[i];
    445 			class_exists1 = (scan1 != NULL);
    446 			if (class_exists1) {
    447 				scan1->loss_prod_others = 1;
    448 				for (j = 0; j <= jif->jif_maxpri; j++) {
    449 					scan2 = jif->jif_classes[j];
    450 					class_exists2 = (scan2 != NULL);
    451 					if (class_exists2 && j != i
    452 					    && scan2->concerned_rlc)
    453 						scan1->loss_prod_others *= tmp[j];
    454 				}
    455 			}
    456 		}
    457 	}
    458 
    459 	now = read_machclk();
    460 	cl->idletime = now;
    461 	return (cl);
    462 
    463  err_ret:
    464 	if (cl->cl_q != NULL)
    465 		FREE(cl->cl_q, M_DEVBUF);
    466 	if (cl->arv_tm != NULL)
    467 		FREE(cl->arv_tm, M_DEVBUF);
    468 
    469 	FREE(cl, M_DEVBUF);
    470 	return (NULL);
    471 }
    472 
    473 static int
    474 jobs_class_destroy(cl)
    475 	struct jobs_class *cl;
    476 {
    477 	struct jobs_if *jif;
    478 	int s, pri;
    479 
    480 	s = splnet();
    481 
    482 	/* delete filters referencing to this class */
    483 	acc_discard_filters(&cl->cl_jif->jif_classifier, cl, 0);
    484 
    485 	if (!qempty(cl->cl_q))
    486 		jobs_purgeq(cl);
    487 
    488 	jif = cl->cl_jif;
    489 	jif->jif_classes[cl->cl_pri] = NULL;
    490 	if (jif->jif_maxpri == cl->cl_pri) {
    491 		for (pri = cl->cl_pri; pri >= 0; pri--)
    492 			if (jif->jif_classes[pri] != NULL) {
    493 				jif->jif_maxpri = pri;
    494 				break;
    495 			}
    496 		if (pri < 0)
    497 			jif->jif_maxpri = -1;
    498 	}
    499 	splx(s);
    500 
    501 	tslist_destroy(cl);
    502 	FREE(cl->cl_q, M_DEVBUF);
    503 	FREE(cl, M_DEVBUF);
    504 	return (0);
    505 }
    506 
    507 /*
    508  * jobs_enqueue is an enqueue function to be registered to
    509  * (*altq_enqueue) in struct ifaltq.
    510  */
    511 static int
    512 jobs_enqueue(ifq, m, pktattr)
    513 	struct ifaltq *ifq;
    514 	struct mbuf *m;
    515 	struct altq_pktattr *pktattr;
    516 {
    517 	struct jobs_if	*jif = (struct jobs_if *)ifq->altq_disc;
    518 	struct jobs_class *cl, *scan;
    519 	int len;
    520 	int return_flag;
    521 	int pri;
    522 	u_int64_t now;
    523 	u_int64_t old_arv;
    524 	int64_t* delta_rate;
    525 	u_int64_t tstamp1, tstamp2, cycles; /* used for benchmarking only */
    526 
    527 	jif->total_enqueued++;
    528 	now = read_machclk();
    529 	tstamp1 = now;
    530 
    531 	return_flag = 0;
    532 
    533 	/* proceed with packet enqueuing */
    534 
    535 	if (IFQ_IS_EMPTY(ifq)) {
    536 		for (pri=0; pri <= jif->jif_maxpri; pri++) {
    537 			scan = jif->jif_classes[pri];
    538 			if (scan != NULL) {
    539 				/*
    540 				 * reset all quantities, except:
    541 				 * average delay, number of violations
    542 				 */
    543 				PKTCNTR_RESET(&scan->cl_rin);
    544 				PKTCNTR_RESET(&scan->cl_rout);
    545 				PKTCNTR_RESET(&scan->cl_rout_th);
    546 				PKTCNTR_RESET(&scan->cl_arrival);
    547 				PKTCNTR_RESET(&scan->cl_dropcnt);
    548 				scan->cl_lastdel = 0;
    549 				scan->current_loss = 0;
    550 				scan->service_rate = 0;
    551 				scan->idletime = now;
    552 				scan->cl_last_rate_update = now;
    553 			}
    554 		}
    555 	}
    556 
    557 	/* grab class set by classifier */
    558 	if (pktattr == NULL || (cl = pktattr->pattr_class) == NULL)
    559 		cl = jif->jif_default;
    560 
    561 	len = m_pktlen(m);
    562 	old_arv = cl->cl_arrival.bytes;
    563 	PKTCNTR_ADD(&cl->cl_arrival, (int)len);
    564 	PKTCNTR_ADD(&cl->cl_rin, (int)len);
    565 	PKTCNTR_ADD(&cl->st_arrival, (int)len);
    566 	PKTCNTR_ADD(&cl->st_rin, (int)len);
    567 
    568 	if (cl->cl_arrival.bytes < old_arv) {
    569 		/* deals w/ overflow */
    570 		for (pri=0; pri <= jif->jif_maxpri; pri++) {
    571 			scan = jif->jif_classes[pri];
    572 			if (scan != NULL) {
    573 				/*
    574 				 * reset all quantities, except:
    575 				 * average delay, number of violations
    576 				 */
    577 				PKTCNTR_RESET(&scan->cl_rin);
    578 				PKTCNTR_RESET(&scan->cl_rout);
    579 				PKTCNTR_RESET(&scan->cl_rout_th);
    580 				PKTCNTR_RESET(&scan->cl_arrival);
    581 				PKTCNTR_RESET(&scan->cl_dropcnt);
    582 				scan->current_loss = 0;
    583 				scan->service_rate = 0;
    584 				scan->idletime = now;
    585 				scan->cl_last_rate_update = now;
    586 			}
    587 		}
    588 		PKTCNTR_ADD(&cl->cl_arrival, (int)len);
    589 		PKTCNTR_ADD(&cl->cl_rin, (int)len);
    590 	}
    591 
    592 	if (cl->cl_arrival.bytes > cl->cl_rin.bytes)
    593 		cl->current_loss =
    594 		    ((cl->cl_arrival.bytes - cl->cl_rin.bytes) << SCALE_LOSS)
    595 		    / cl->cl_arrival.bytes;
    596 	else
    597 		cl->current_loss = 0;
    598 
    599 	/* for MDRR: update theoretical value of the output curve */
    600 
    601 	for (pri=0; pri <= jif->jif_maxpri; pri++) {
    602 		scan = jif->jif_classes[pri];
    603 		if (scan != NULL) {
    604 			if (scan->cl_last_rate_update == scan->idletime
    605 			    || scan->cl_last_rate_update == 0)
    606 				scan->cl_last_rate_update = now; /* initial case */
    607 			else
    608 				scan->cl_rout_th.bytes +=
    609 				    delay_diff(now, scan->cl_last_rate_update)
    610 				    * scan->service_rate;
    611 
    612 			/*
    613 			 * we don't really care about packets here
    614 			 * WARNING: rout_th is SCALED
    615 			 * (b/c of the service rate)
    616 			 * for precision, as opposed to rout.
    617 			 */
    618 
    619 			scan->cl_last_rate_update = now;
    620 		}
    621 	}
    622 
    623 	if (jobs_addq(cl, m, jif) != 0)
    624 		return_flag = ENOBUFS; /* signals there's a buffer overflow */
    625 	else
    626 		IFQ_INC_LEN(ifq);
    627 
    628 	/* successfully queued. */
    629 
    630 	enforce_wc(jif);
    631 
    632 	if (!min_rates_adc(jif)) {
    633 		delta_rate = assign_rate_drops_adc(jif);
    634 		if (delta_rate != NULL) {
    635 			for (pri = 0; pri <= jif->jif_maxpri; pri++)
    636 			  if ((cl = jif->jif_classes[pri]) != NULL &&
    637 			      !qempty(cl->cl_q))
    638 				cl->service_rate += delta_rate[pri];
    639 			FREE(delta_rate, M_DEVBUF);
    640 		}
    641 	}
    642 
    643 	delta_rate = adjust_rates_rdc(jif);
    644 
    645 	if (delta_rate != NULL) {
    646 		for (pri = 0; pri <= jif->jif_maxpri; pri++)
    647 			if ((cl = jif->jif_classes[pri]) != NULL &&
    648 			    !qempty(cl->cl_q))
    649 				cl->service_rate += delta_rate[pri];
    650 		FREE(delta_rate, M_DEVBUF);
    651 	}
    652 
    653 	tstamp2 = read_machclk();
    654 	cycles = delay_diff(tstamp2, tstamp1);
    655 	if (cycles > jif->wc_cycles_enqueue)
    656 		jif->wc_cycles_enqueue=cycles;
    657 	if (cycles < jif->bc_cycles_enqueue)
    658 		jif->bc_cycles_enqueue=cycles;
    659 
    660 	jif->avg_cycles_enqueue += cycles;
    661 	jif->avg_cycles2_enqueue += cycles * cycles;
    662 
    663 	return (return_flag);
    664 }
    665 
    666 /*
    667  * jobs_dequeue is a dequeue function to be registered to
    668  * (*altq_dequeue) in struct ifaltq.
    669  *
    670  * note: ALTDQ_POLL returns the next packet without removing the packet
    671  *	from the queue.  ALTDQ_REMOVE is a normal dequeue operation.
    672  *	ALTDQ_REMOVE must return the same packet if called immediately
    673  *	after ALTDQ_POLL.
    674  */
    675 
    676 static struct mbuf *
    677 jobs_dequeue(ifq, op)
    678 	struct ifaltq	*ifq;
    679 	int		op;
    680 {
    681 	struct jobs_if	*jif = (struct jobs_if *)ifq->altq_disc;
    682 	struct jobs_class *cl;
    683 	struct mbuf *m;
    684 	int pri;
    685 	int svc_class;
    686 	int64_t max_error;
    687 	int64_t error;
    688 	u_int64_t now;
    689 	u_int64_t tstamp1, tstamp2, cycles;
    690 
    691 	jif->total_dequeued++;
    692 
    693 	now = read_machclk();
    694 	tstamp1 = now;
    695 
    696 	if (IFQ_IS_EMPTY(ifq)) {
    697 		/* no packet in the queue */
    698 		for (pri=0; pri <= jif->jif_maxpri; pri++) {
    699 		  cl = jif->jif_classes[pri];
    700 		  if (cl != NULL)
    701 		    cl->idletime = now;
    702 		}
    703 
    704 		tstamp2 = read_machclk();
    705 		cycles = delay_diff(tstamp2, tstamp1);
    706 		if (cycles > jif->wc_cycles_dequeue)
    707 			jif->wc_cycles_dequeue = cycles;
    708 		if (cycles < jif->bc_cycles_dequeue)
    709 			jif->bc_cycles_dequeue = cycles;
    710 
    711 		jif->avg_cycles_dequeue += cycles;
    712 		jif->avg_cycles2_dequeue += cycles * cycles;
    713 
    714 		return (NULL);
    715 	}
    716 
    717 	/*
    718 	 * select the class whose actual tranmissions are the furthest
    719 	 * from the promised transmissions
    720 	 */
    721 
    722 	max_error = -1;
    723 	svc_class = -1;
    724 
    725 	for (pri=0; pri <= jif->jif_maxpri; pri++) {
    726 		if (((cl = jif->jif_classes[pri]) != NULL)
    727 		    && !qempty(cl->cl_q)) {
    728 			error = (int64_t)cl->cl_rout_th.bytes
    729 			    -(int64_t)scale_rate(cl->cl_rout.bytes);
    730 			if (max_error == -1) {
    731 				max_error = error;
    732 				svc_class = pri;
    733 			} else if (error > max_error) {
    734 				max_error = error;
    735 				svc_class = pri;
    736 			}
    737 		}
    738 	}
    739 
    740 	if (svc_class != -1)
    741 		cl = jif->jif_classes[svc_class];
    742 	else
    743 		cl = NULL;
    744 
    745 	if (op == ALTDQ_POLL) {
    746 		tstamp2 = read_machclk();
    747 		cycles = delay_diff(tstamp2, tstamp1);
    748 		if (cycles > jif->wc_cycles_dequeue)
    749 			jif->wc_cycles_dequeue = cycles;
    750 		if (cycles < jif->bc_cycles_dequeue)
    751 			jif->bc_cycles_dequeue = cycles;
    752 
    753 		jif->avg_cycles_dequeue += cycles;
    754 		jif->avg_cycles2_dequeue += cycles * cycles;
    755 
    756 		return (jobs_pollq(cl));
    757 	}
    758 
    759 	if (cl != NULL)
    760 		m = jobs_getq(cl);
    761 	else
    762 		m = NULL;
    763 
    764 	if (m != NULL) {
    765 		IFQ_DEC_LEN(ifq);
    766 		if (qempty(cl->cl_q))
    767 			cl->cl_period++;
    768 
    769 		cl->cl_lastdel = (u_int64_t)delay_diff(now,
    770 		    tslist_first(cl->arv_tm)->timestamp);
    771 		if (cl->concerned_adc
    772 		    && (int64_t)cl->cl_lastdel > cl->cl_adc)
    773 			cl->adc_violations++;
    774 		cl->cl_avgdel  += ticks_to_secs(GRANULARITY*cl->cl_lastdel);
    775 
    776 		PKTCNTR_ADD(&cl->cl_rout, m_pktlen(m));
    777 		PKTCNTR_ADD(&cl->st_rout, m_pktlen(m));
    778 	}
    779 	if (cl != NULL)
    780 		tslist_dequeue(cl);		/* dequeue the timestamp */
    781 
    782 	tstamp2 = read_machclk();
    783 	cycles = delay_diff(tstamp2, tstamp1);
    784 	if (cycles > jif->wc_cycles_dequeue)
    785 		jif->wc_cycles_dequeue = cycles;
    786 	if (cycles < jif->bc_cycles_dequeue)
    787 		jif->bc_cycles_dequeue = cycles;
    788 
    789 	jif->avg_cycles_dequeue += cycles;
    790 	jif->avg_cycles2_dequeue += cycles * cycles;
    791 
    792 	return (m);
    793 }
    794 
    795 static int
    796 jobs_addq(cl, m, jif)
    797 	struct jobs_class *cl;
    798 	struct mbuf *m;
    799 	struct jobs_if *jif;
    800 {
    801 	int victim;
    802 	u_int64_t len;
    803 	u_int64_t now;
    804 	struct jobs_class* victim_class;
    805 
    806 	victim = -1;
    807 	victim_class = NULL;
    808 	len = 0;
    809 
    810 	now = read_machclk();
    811 
    812 	if (jif->jif_separate && qlen(cl->cl_q) >= jif->jif_qlimit) {
    813 		/*
    814 		 * separate buffers: no guarantees on packet drops
    815 		 * can be offered
    816 		 * thus we drop the incoming packet
    817 		 */
    818 		len = (u_int64_t)m_pktlen(m);
    819 		PKTCNTR_ADD(&cl->cl_dropcnt, (int)len);
    820 		PKTCNTR_SUB(&cl->cl_rin, (int)len);
    821 		PKTCNTR_ADD(&cl->st_dropcnt, (int)len);
    822 		PKTCNTR_SUB(&cl->st_rin, (int)len);
    823 		cl->current_loss += (len << SCALE_LOSS)
    824 		    /cl->cl_arrival.bytes;
    825 		m_freem(m);
    826 		return (-1);
    827 
    828 	} else if (!jif->jif_separate
    829 		   && jif->jif_ifq->ifq_len >= jif->jif_qlimit) {
    830 		/* shared buffer: supports guarantees on losses */
    831 		if (!cl->concerned_rlc) {
    832 			if (!cl->concerned_alc) {
    833 				/*
    834 				 * no ALC, no RLC on this class:
    835 				 * drop the incoming packet
    836 				 */
    837 				len = (u_int64_t)m_pktlen(m);
    838 				PKTCNTR_ADD(&cl->cl_dropcnt, (int)len);
    839 				PKTCNTR_SUB(&cl->cl_rin, (int)len);
    840 				PKTCNTR_ADD(&cl->st_dropcnt, (int)len);
    841 				PKTCNTR_SUB(&cl->st_rin, (int)len);
    842 				cl->current_loss += (len << SCALE_LOSS)/cl->cl_arrival.bytes;
    843 				m_freem(m);
    844 				return (-1);
    845 			} else {
    846 				/*
    847 				 * no RLC, but an ALC:
    848 				 * drop the incoming packet if possible
    849 				 */
    850 				len = (u_int64_t)m_pktlen(m);
    851 				if (cl->current_loss + (len << SCALE_LOSS)
    852 				    / cl->cl_arrival.bytes <= cl->cl_alc) {
    853 					PKTCNTR_ADD(&cl->cl_dropcnt, (int)len);
    854 					PKTCNTR_SUB(&cl->cl_rin, (int)len);
    855 					PKTCNTR_ADD(&cl->st_dropcnt, (int)len);
    856 					PKTCNTR_SUB(&cl->st_rin, (int)len);
    857 					cl->current_loss += (len << SCALE_LOSS)/cl->cl_arrival.bytes;
    858 					m_freem(m);
    859 					return (-1);
    860 				} else {
    861 					/*
    862 					 * the ALC would be violated:
    863 					 * pick another class
    864 					 */
    865 					_addq(cl->cl_q, m);
    866 					tslist_enqueue(cl, now);
    867 
    868 					victim = pick_dropped_rlc(jif);
    869 
    870 					if (victim == -1) {
    871 						/*
    872 						 * something went wrong
    873 						 * let us discard
    874 						 * the incoming packet,
    875 						 * regardless of what
    876 						 * may happen...
    877 						 */
    878 						victim_class = cl;
    879 					} else
    880 						victim_class = jif->jif_classes[victim];
    881 
    882 					if (victim_class != NULL) {
    883 						/*
    884 						 * test for safety
    885 						 * purposes...
    886 						 * it must be true
    887 						 */
    888 						m = _getq_tail(victim_class->cl_q);
    889 						len = (u_int64_t)m_pktlen(m);
    890 						PKTCNTR_ADD(&victim_class->cl_dropcnt, (int)len);
    891 						PKTCNTR_SUB(&victim_class->cl_rin, (int)len);
    892 						PKTCNTR_ADD(&victim_class->st_dropcnt, (int)len);
    893 						PKTCNTR_SUB(&victim_class->st_rin, (int)len);
    894 						victim_class->current_loss += (len << SCALE_LOSS)/victim_class->cl_arrival.bytes;
    895 						m_freem(m); /* the packet is trashed here */
    896 						tslist_drop(victim_class); /* and its timestamp as well */
    897 					}
    898 					return (-1);
    899 				}
    900 			}
    901 		} else {
    902 			/*
    903 			 * RLC on that class:
    904 			 * pick class according to RLCs
    905 			 */
    906 			_addq(cl->cl_q, m);
    907 			tslist_enqueue(cl, now);
    908 
    909 			victim = pick_dropped_rlc(jif);
    910 			if (victim == -1) {
    911 				/*
    912 				 * something went wrong
    913 				 * let us discard the incoming packet,
    914 				 * regardless of what may happen...
    915 				 */
    916 				victim_class = cl;
    917 			} else
    918 				victim_class = jif->jif_classes[victim];
    919 
    920 			if (victim_class != NULL) {
    921 				/*
    922 				 * test for safety purposes...
    923 				 * it must be true
    924 				 */
    925 				m = _getq_tail(victim_class->cl_q);
    926 				len = (u_int64_t)m_pktlen(m);
    927 				PKTCNTR_ADD(&victim_class->cl_dropcnt, (int)len);
    928 				PKTCNTR_SUB(&victim_class->cl_rin, (int)len);
    929 				PKTCNTR_ADD(&victim_class->st_dropcnt, (int)len);
    930 				PKTCNTR_SUB(&victim_class->st_rin, (int)len);
    931 				victim_class->current_loss += (len << SCALE_LOSS)/victim_class->cl_arrival.bytes;
    932 				m_freem(m); /* the packet is trashed here */
    933 				tslist_drop(victim_class); /* and its timestamp as well */
    934 			}
    935 			return (-1);
    936 		}
    937 	}
    938 	/* else: no drop */
    939 
    940 	_addq(cl->cl_q, m);
    941 	tslist_enqueue(cl, now);
    942 
    943 	return (0);
    944 }
    945 
    946 static struct mbuf *
    947 jobs_getq(cl)
    948 	struct jobs_class *cl;
    949 {
    950 	return _getq(cl->cl_q);
    951 }
    952 
    953 static struct mbuf *
    954 jobs_pollq(cl)
    955 	struct jobs_class *cl;
    956 {
    957 	return qhead(cl->cl_q);
    958 }
    959 
    960 static void
    961 jobs_purgeq(cl)
    962 	struct jobs_class *cl;
    963 {
    964 	struct mbuf *m;
    965 
    966 	if (qempty(cl->cl_q))
    967 		return;
    968 
    969 	while ((m = _getq(cl->cl_q)) != NULL) {
    970 		PKTCNTR_ADD(&cl->cl_dropcnt, m_pktlen(m));
    971 		PKTCNTR_ADD(&cl->st_dropcnt, m_pktlen(m));
    972 		m_freem(m);
    973 		tslist_drop(cl);
    974 	}
    975 	ASSERT(qlen(cl->cl_q) == 0);
    976 }
    977 
    978 /*
    979  * timestamp list support routines
    980  *
    981  * this implementation has been revamped and
    982  * now uses a TAILQ structure.
    983  * timestamp list holds class timestamps
    984  * there is one timestamp list per class.
    985  */
    986 static TSLIST *
    987 tslist_alloc()
    988 {
    989 	TSLIST *list_init;
    990 
    991 	MALLOC(list_init, TSLIST *, sizeof(TSLIST), M_DEVBUF, M_WAITOK);
    992 	TAILQ_INIT(list_init);
    993 	return (list_init);
    994 }
    995 
    996 static void
    997 tslist_destroy(cl)
    998 	struct jobs_class *cl;
    999 {
   1000 	while (tslist_first(cl->arv_tm) != NULL)
   1001 		tslist_dequeue(cl);
   1002 
   1003 	FREE(cl->arv_tm, M_DEVBUF);
   1004 }
   1005 
   1006 static int
   1007 tslist_enqueue(cl, arv)
   1008 	struct jobs_class *cl;
   1009 	u_int64_t arv;
   1010 {
   1011 	TSENTRY *pushed;
   1012 	MALLOC(pushed, TSENTRY*, sizeof(TSENTRY), M_DEVBUF, M_WAITOK);
   1013 	if (pushed == NULL)
   1014 		return (0);
   1015 
   1016 	pushed->timestamp = arv;
   1017 	TAILQ_INSERT_TAIL(cl->arv_tm, pushed, ts_list);
   1018 	return (1);
   1019 }
   1020 
   1021 static void
   1022 tslist_dequeue(cl)
   1023      struct jobs_class *cl;
   1024 {
   1025 	TSENTRY *popped;
   1026 	popped = tslist_first(cl->arv_tm);
   1027 	if (popped != NULL) {
   1028 		  TAILQ_REMOVE(cl->arv_tm, popped, ts_list);
   1029 		  FREE(popped, M_DEVBUF);
   1030 	}
   1031 	return;
   1032 }
   1033 
   1034 static void
   1035 tslist_drop(cl)
   1036 	struct jobs_class *cl;
   1037 {
   1038 	TSENTRY *popped;
   1039 	popped = tslist_last(cl->arv_tm);
   1040 	if (popped != NULL) {
   1041 		  TAILQ_REMOVE(cl->arv_tm, popped, ts_list);
   1042 		  FREE(popped, M_DEVBUF);
   1043 	}
   1044 	return;
   1045 }
   1046 
   1047 /*
   1048  * rate allocation support routines
   1049  */
   1050 /*
   1051  * enforce_wc: enforce that backlogged classes have non-zero
   1052  * service rate, and that non-backlogged classes have zero
   1053  * service rate.
   1054  */
   1055 
   1056 static int
   1057 enforce_wc(jif)
   1058 	struct jobs_if *jif;
   1059 {
   1060 	struct jobs_class *cl;
   1061 
   1062 	int64_t active_classes;
   1063 	int pri;
   1064 	int is_backlogged, class_exists, updated;
   1065 
   1066 	updated = 0;
   1067 	active_classes = 0;
   1068 
   1069 	for (pri = 0; pri <= jif->jif_maxpri; pri++) {
   1070 		cl = jif->jif_classes[pri];
   1071 		class_exists = (cl != NULL);
   1072 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1073 
   1074 		if (is_backlogged)
   1075 			active_classes++;
   1076 		if ((is_backlogged && cl->service_rate <= 0)
   1077 		    ||(class_exists
   1078 		       && !is_backlogged && cl->service_rate > 0))
   1079 			updated = 1;
   1080 	}
   1081 
   1082 	if (updated) {
   1083 		for (pri = 0; pri <= jif->jif_maxpri; pri++) {
   1084 			cl = jif->jif_classes[pri];
   1085 			class_exists = (cl != NULL);
   1086 			is_backlogged = (class_exists && !qempty(cl->cl_q));
   1087 
   1088 			if (class_exists && !is_backlogged)
   1089 				cl->service_rate = 0;
   1090 			else if (is_backlogged)
   1091 				cl->service_rate = (int64_t)(bps_to_internal((u_int64_t)jif->jif_bandwidth)/active_classes);
   1092 		}
   1093 	}
   1094 
   1095 	return (updated);
   1096 }
   1097 
   1098 /*
   1099  * adjust_rates_rdc: compute the service rates adjustments
   1100  * needed to realize the desired proportional delay differentiation.
   1101  * essentially, the rate adjustement delta_rate = prop_control*error,
   1102  * where error is the difference between the measured "weighted"
   1103  * delay and the mean of the weighted delays. see paper for more
   1104  * information.
   1105  * prop_control has slightly changed since the INFOCOM paper,
   1106  * this condition seems to provide better results.
   1107  */
   1108 
   1109 static int64_t *
   1110 adjust_rates_rdc(jif)
   1111 	struct jobs_if *jif;
   1112 {
   1113 	int64_t* result;
   1114 	int64_t  credit, available, lower_bound, upper_bound;
   1115 	int64_t  bk;
   1116 	int i, j;
   1117 	int rdc_classes, active_classes;
   1118 	int class_exists, is_backlogged;
   1119 	struct jobs_class *cl;
   1120 	int64_t* error;
   1121 	int64_t prop_control;
   1122 	u_int64_t max_prod;
   1123 	u_int64_t min_share;
   1124 	u_int64_t max_avg_pkt_size;
   1125 
   1126 	/*
   1127 	 * min_share is scaled
   1128 	 * to avoid dealing with doubles
   1129 	 */
   1130 	active_classes = 0;
   1131 	rdc_classes = 0;
   1132 	max_prod = 0;
   1133 	max_avg_pkt_size = 0;
   1134 
   1135 	upper_bound = (int64_t)jif->jif_bandwidth;
   1136 
   1137 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1138 		cl = jif->jif_classes[i];
   1139 		class_exists = (cl != NULL);
   1140 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1141 		if (is_backlogged) {
   1142 			active_classes++;
   1143 			if (cl->concerned_rdc)
   1144 				rdc_classes++;
   1145 			else
   1146 				upper_bound -=
   1147 				    internal_to_bps(cl->service_rate);
   1148 		}
   1149 	}
   1150 
   1151 	MALLOC(result, int64_t*, (jif->jif_maxpri+1)*sizeof(int64_t),
   1152 	    M_DEVBUF, M_WAITOK);
   1153 
   1154 	if (result == NULL)
   1155 		return NULL;
   1156 
   1157 	for (i = 0; i <= jif->jif_maxpri; i++)
   1158 		result[i] = 0;
   1159 
   1160 	if (upper_bound <= 0 || rdc_classes == 0)
   1161 		return result;
   1162 
   1163 	credit = 0;
   1164 	lower_bound = 0;
   1165 	min_share = ((u_int64_t)1 << SCALE_SHARE);
   1166 	bk = 0;
   1167 
   1168 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1169 		cl = jif->jif_classes[i];
   1170 		class_exists = (cl != NULL);
   1171 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1172 		if (is_backlogged && cl->concerned_rdc)
   1173 			bk += cl->cl_rin.bytes;
   1174 	}
   1175 
   1176 	if (bk == 0)
   1177 		return (result);
   1178 
   1179 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1180 		cl = jif->jif_classes[i];
   1181 		class_exists = (cl != NULL);
   1182 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1183 		if (is_backlogged
   1184 		    && (cl->cl_rin.bytes << SCALE_SHARE)/bk < min_share)
   1185 			min_share = (cl->cl_rin.bytes << SCALE_SHARE)/bk;
   1186 		if (is_backlogged && cl->concerned_rdc
   1187 		    && cl->delay_prod_others > max_prod)
   1188 			max_prod = cl->delay_prod_others;
   1189 
   1190 		if (is_backlogged && cl->concerned_rdc
   1191 		    && cl->cl_rin.bytes > max_avg_pkt_size*cl->cl_rin.packets)
   1192 			max_avg_pkt_size = (u_int64_t)((u_int)cl->cl_rin.bytes/(u_int)cl->cl_rin.packets);
   1193 	}
   1194 
   1195 	error = update_error(jif);
   1196 	if (!error)
   1197 		return (NULL);
   1198 
   1199 	prop_control = (upper_bound*upper_bound*min_share)
   1200 	    /(max_prod*(max_avg_pkt_size << 2));
   1201 
   1202 	prop_control = bps_to_internal(ticks_to_secs(prop_control)); /* in BT-1 */
   1203 
   1204 	credit = 0;
   1205 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1206 		cl = jif->jif_classes[i];
   1207 		class_exists = (cl != NULL);
   1208 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1209 		if (is_backlogged && cl->concerned_rdc) {
   1210 			result[i] = -prop_control*error[i]; /* in BT-1 */
   1211 			result[i] >>= (SCALE_SHARE);
   1212 		}
   1213 	}
   1214 
   1215 	FREE(error, M_DEVBUF); /* we don't need these anymore */
   1216 
   1217 	/* saturation */
   1218 
   1219 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1220 		cl = jif->jif_classes[i];
   1221 		class_exists = (cl != NULL);
   1222 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1223 
   1224 		if (is_backlogged && cl->concerned_rdc)
   1225 			lower_bound += cl->min_rate_adc;
   1226 		/*
   1227 		 * note: if there's no ADC or ARC on cl,
   1228 		 * this is equal to zero, which is fine
   1229 		 */
   1230 	}
   1231 
   1232 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1233 		cl = jif->jif_classes[i];
   1234 		class_exists = (cl != NULL);
   1235 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1236 
   1237 		if (is_backlogged && cl->concerned_rdc
   1238 		    && result[i] + cl->service_rate > upper_bound) {
   1239 			for (j = 0; j <= jif->jif_maxpri; j++) {
   1240 				cl = jif->jif_classes[j];
   1241 				class_exists = (cl != NULL);
   1242 				is_backlogged = (class_exists
   1243 						 && !qempty(cl->cl_q));
   1244 				if (is_backlogged && cl->concerned_rdc) {
   1245 					if (j == i)
   1246 						result[j] = upper_bound
   1247 						    -cl->service_rate
   1248 						    + cl->min_rate_adc
   1249 						    - lower_bound;
   1250 					else
   1251 						result[j] =
   1252 						    -cl->service_rate
   1253 						    +cl->min_rate_adc;
   1254 				}
   1255 			}
   1256 			return result;
   1257 		}
   1258 
   1259 		cl = jif->jif_classes[i];
   1260 		/* do this again since it may have been modified */
   1261 		class_exists = (cl != NULL);
   1262 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1263 
   1264 		if (is_backlogged && cl->concerned_rdc
   1265 		    && result[i] + cl->service_rate < cl->min_rate_adc) {
   1266 			credit += cl->service_rate+result[i]
   1267 			    -cl->min_rate_adc;
   1268 			/* "credit" is in fact a negative number */
   1269 			result[i] = -cl->service_rate+cl->min_rate_adc;
   1270 		}
   1271 	}
   1272 
   1273 	for (i = jif->jif_maxpri; (i >= 0 && credit < 0); i--) {
   1274 		cl = jif->jif_classes[i];
   1275 		class_exists = (cl != NULL);
   1276 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1277 
   1278 		if (is_backlogged && cl->concerned_rdc) {
   1279 			available = result[i]
   1280 			    + cl->service_rate-cl->min_rate_adc;
   1281 			if (available >= -credit) {
   1282 				result[i] += credit;
   1283 				credit = 0;
   1284 			} else {
   1285 				result[i] -= available;
   1286 				credit += available;
   1287 			}
   1288 		}
   1289 	}
   1290 	return result;
   1291 }
   1292 
   1293 /*
   1294  * assign_rate_drops_adc: returns the adjustment needed to
   1295  * the service rates to meet the absolute delay/rate constraints
   1296  * (delay/throughput bounds) and drops traffic if need be.
   1297  * see tech. report UVA/T.R. CS-2000-24/CS-2001-21 for more info.
   1298  */
   1299 
   1300 static int64_t *
   1301 assign_rate_drops_adc(jif)
   1302 	struct jobs_if* jif;
   1303 {
   1304 	int64_t* result;
   1305 	int class_exists, is_backlogged;
   1306 	struct jobs_class *cl;
   1307 
   1308 	int64_t *c, *n, *k;
   1309 	int64_t *available;
   1310 
   1311 	int lowest, highest;
   1312 	int keep_going;
   1313 	int i;
   1314 	u_int64_t now, oldest_arv;
   1315 	int64_t	remaining_time;
   1316 	struct mbuf* pkt;
   1317 	u_int64_t len;
   1318 
   1319 	now = read_machclk();
   1320 	oldest_arv = now;
   1321 
   1322 	MALLOC(result, int64_t*, (jif->jif_maxpri+1)*sizeof(int64_t), M_DEVBUF, M_WAITOK);
   1323 	if (result == NULL)
   1324 		return NULL;
   1325 	MALLOC(c, int64_t*, (jif->jif_maxpri+1)*sizeof(u_int64_t), M_DEVBUF, M_WAITOK);
   1326 	if (c == NULL)
   1327 		return NULL;
   1328 	MALLOC(n, int64_t*, (jif->jif_maxpri+1)*sizeof(u_int64_t), M_DEVBUF, M_WAITOK);
   1329 	if (n == NULL)
   1330 		return NULL;
   1331 	MALLOC(k, int64_t*, (jif->jif_maxpri+1)*sizeof(u_int64_t), M_DEVBUF, M_WAITOK);
   1332 	if (k == NULL)
   1333 		return NULL;
   1334 	MALLOC(available, int64_t*, (jif->jif_maxpri+1)*sizeof(int64_t), M_DEVBUF, M_WAITOK);
   1335 	if (available == NULL)
   1336 		return NULL;
   1337 
   1338 	for (i = 0;i <= jif->jif_maxpri; i++)
   1339 		result[i] = 0;
   1340 
   1341 	keep_going = 1;
   1342 
   1343 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1344 		cl = jif->jif_classes[i];
   1345 		class_exists = (cl != NULL);
   1346 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1347 
   1348 		if (is_backlogged) {
   1349 			if (cl->concerned_adc) {
   1350 				/*
   1351 				 * get the arrival time of the oldest
   1352 				 * class-i packet
   1353 				 */
   1354 				if (tslist_first(cl->arv_tm) == NULL)
   1355 					oldest_arv = now; /* NOTREACHED */
   1356 				else
   1357 					oldest_arv = (tslist_first(cl->arv_tm))->timestamp;
   1358 
   1359 				n[i] = cl->service_rate;
   1360 				k[i] = scale_rate((int64_t)(cl->cl_rin.bytes - cl->cl_rout.bytes));
   1361 
   1362 				remaining_time = cl->cl_adc
   1363 				    - (int64_t)delay_diff(now, oldest_arv);
   1364 				if (remaining_time > 0) {
   1365 					c[i] = remaining_time;
   1366 					/*
   1367 					 * c is the remaining time before
   1368 					 * the deadline is violated
   1369 					 * (in ticks)
   1370 					 */
   1371 					available[i] = n[i]-k[i]/c[i];
   1372 				} else {
   1373 					/*
   1374 					 * deadline has passed...
   1375 					 * we allocate the whole link
   1376 					 * capacity to hopefully
   1377 					 * solve the problem
   1378 					 */
   1379 					c[i] = 0;
   1380 					available[i] = -((int64_t)bps_to_internal((u_int64_t)jif->jif_bandwidth));
   1381 				}
   1382 				if (cl->concerned_arc) {
   1383 					/*
   1384 					 * there's an ARC in addition
   1385 					 * to the ADC
   1386 					 */
   1387 					if (n[i] - cl->cl_arc < available[i])
   1388 						available[i] = n[i]
   1389 						    - cl->cl_arc;
   1390 				}
   1391 			} else if (cl->concerned_arc) {
   1392 				/*
   1393 				 * backlogged, concerned by ARC
   1394 				 * but not by ADC
   1395 				 */
   1396 				n[i] = cl->service_rate;
   1397 				available[i] = n[i] - cl->cl_arc;
   1398 			} else {
   1399 				/*
   1400 				 * backlogged but not concerned by ADC
   1401 				 * or ARC -> can give everything
   1402 				 */
   1403 				n[i] = cl->service_rate;
   1404 				available[i] = n[i];
   1405 			}
   1406 		} else {
   1407 			/* not backlogged */
   1408 			n[i] = 0;
   1409 			k[i] = 0;
   1410 			c[i] = 0;
   1411 			if (class_exists)
   1412 				available[i] = cl->service_rate;
   1413 			else
   1414 				available[i] = 0;
   1415 		}
   1416 	}
   1417 
   1418 	/* step 1: adjust rates (greedy algorithm) */
   1419 
   1420 	highest = 0;
   1421 	lowest  = jif->jif_maxpri;
   1422 
   1423 	while (highest < jif->jif_maxpri+1 && available[highest] >= 0)
   1424 		highest++; /* which is the highest class that needs more service? */
   1425 	while (lowest > 0 && available[lowest] <= 0)
   1426 		lowest--;  /* which is the lowest class that needs less service? */
   1427 
   1428 	while (highest != jif->jif_maxpri+1 && lowest != -1) {
   1429 		/* give the excess service from lowest to highest */
   1430 		if (available[lowest]+available[highest] > 0) {
   1431 			/*
   1432 			 * still some "credit" left
   1433 			 * give all that is needed by "highest"
   1434 			 */
   1435 			n[lowest]  += available[highest];
   1436 			n[highest] -= available[highest];
   1437 			available[lowest]  += available[highest];
   1438 			available[highest] = 0;
   1439 
   1440 			while (highest < jif->jif_maxpri+1
   1441 			       && available[highest] >= 0)
   1442 				highest++;  /* which is the highest class that needs more service now? */
   1443 
   1444 		} else if (available[lowest]+available[highest] == 0) {
   1445 			/* no more credit left but it's fine */
   1446 			n[lowest]  += available[highest];
   1447 			n[highest] -= available[highest];
   1448 			available[highest] = 0;
   1449 			available[lowest]  = 0;
   1450 
   1451 			while (highest < jif->jif_maxpri+1
   1452 			       && available[highest] >= 0)
   1453 				highest++;  /* which is the highest class that needs more service? */
   1454 			while (lowest >= 0 && available[lowest] <= 0)
   1455 				lowest--;   /* which is the lowest class that needs less service? */
   1456 
   1457 		} else if (available[lowest]+available[highest] < 0) {
   1458 			/*
   1459 			 * no more credit left and we need to switch
   1460 			 * to another class
   1461 			 */
   1462 			n[lowest]  -= available[lowest];
   1463 			n[highest] += available[lowest];
   1464 			available[highest] += available[lowest];
   1465 			available[lowest]  = 0;
   1466 
   1467 			while ((lowest >= 0)&&(available[lowest] <= 0))
   1468 				lowest--;  /* which is the lowest class that needs less service? */
   1469 		}
   1470 	}
   1471 
   1472 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1473 		cl = jif->jif_classes[i];
   1474 		class_exists = (cl != NULL);
   1475 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1476 		if (is_backlogged) {
   1477 			result[i] = n[i] - cl->service_rate;
   1478 		} else {
   1479 			if (class_exists)
   1480 				result[i] = - cl->service_rate;
   1481 			else
   1482 				result[i] = 0;
   1483 		}
   1484 	}
   1485 
   1486 	/* step 2: adjust drops (for ADC) */
   1487 
   1488 	if (highest != jif->jif_maxpri+1) {
   1489 		/* some class(es) still need(s) additional service */
   1490 		for (i = 0; i <= jif->jif_maxpri; i++) {
   1491 			cl = jif->jif_classes[i];
   1492 			class_exists = (cl != NULL);
   1493 			is_backlogged = (class_exists
   1494 					 && !qempty(cl->cl_q));
   1495 			if (is_backlogged && available[i] < 0) {
   1496 				if (cl->concerned_adc) {
   1497 					k[i] = c[i]*n[i];
   1498 					while (keep_going && scale_rate((int64_t)(cl->cl_rin.bytes-cl->cl_rout.bytes)) > k[i]) {
   1499 						pkt = qtail(cl->cl_q);
   1500 						if (pkt != NULL) {
   1501 							/* "safeguard" test (a packet SHOULD be in there) */
   1502 							len = (u_int64_t)m_pktlen(pkt);
   1503 							/* access packet at the tail */
   1504 							if (cl->concerned_alc
   1505 							    && cl->current_loss+(len << SCALE_LOSS)/cl->cl_arrival.bytes > cl->cl_alc) {
   1506 								keep_going = 0; /* relax ADC in favor of ALC */
   1507 							} else {
   1508 								/* drop packet at the tail of the class-i queue, update values */
   1509 								pkt = _getq_tail(cl->cl_q);
   1510 								len = (u_int64_t)m_pktlen(pkt);
   1511 								PKTCNTR_ADD(&cl->cl_dropcnt, (int)len);
   1512 								PKTCNTR_SUB(&cl->cl_rin, (int)len);
   1513 								PKTCNTR_ADD(&cl->st_dropcnt, (int)len);
   1514 								PKTCNTR_SUB(&cl->st_rin, (int)len);
   1515 								cl->current_loss += (len << SCALE_LOSS)/cl->cl_arrival.bytes;
   1516 								m_freem(pkt); /* the packet is trashed here */
   1517 								tslist_drop(cl);
   1518 								IFQ_DEC_LEN(cl->cl_jif->jif_ifq);
   1519 							}
   1520 						} else
   1521 							keep_going = 0; /* NOTREACHED */
   1522 					}
   1523 					k[i] = scale_rate((int64_t)(cl->cl_rin.bytes-cl->cl_rout.bytes));
   1524 				}
   1525 				/*
   1526 				 * n[i] is the max rate we can give.
   1527 				 * the above drops as much as possible
   1528 				 * to respect a delay bound.
   1529 				 * for throughput bounds,
   1530 				 * there's nothing that can be done
   1531 				 * after the greedy reallocation.
   1532 				 */
   1533 			}
   1534 		}
   1535 	}
   1536 
   1537 	/* update the values of min_rate_adc */
   1538 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1539 		cl = jif->jif_classes[i];
   1540 		class_exists = (cl != NULL);
   1541 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1542 		if (is_backlogged && cl->concerned_adc) {
   1543 			if (c[i] != 0) {
   1544 				if (cl->concerned_adc
   1545 				    && !cl->concerned_arc)
   1546 					cl->min_rate_adc = k[i]/c[i];
   1547 				else
   1548 					cl->min_rate_adc = n[i];
   1549 			} else
   1550 				cl->min_rate_adc = (int64_t)bps_to_internal((u_int64_t)jif->jif_bandwidth);
   1551 		} else if (is_backlogged && cl->concerned_arc)
   1552 			cl->min_rate_adc = n[i]; /* the best we can give */
   1553 		else {
   1554 			if (class_exists)
   1555 				cl->min_rate_adc = 0;
   1556 		}
   1557 	}
   1558 
   1559 	FREE(c, M_DEVBUF);
   1560 	FREE(n, M_DEVBUF);
   1561 	FREE(k, M_DEVBUF);
   1562 	FREE(available, M_DEVBUF);
   1563 
   1564 	return (result);
   1565 }
   1566 
   1567 /*
   1568  * update_error: returns the difference between the mean weighted
   1569  * delay and the weighted delay for each class. if proportional
   1570  * delay differentiation is perfectly achieved, it should return
   1571  * zero for each class.
   1572  */
   1573 static int64_t *
   1574 update_error(jif)
   1575 	struct jobs_if *jif;
   1576 {
   1577 	int i;
   1578 	int active_classes, backlogged_classes;
   1579 	u_int64_t mean_weighted_delay;
   1580 	u_int64_t delays[JOBS_MAXPRI];
   1581 	int64_t* error;
   1582 	int class_exists, is_backlogged;
   1583 	struct jobs_class *cl;
   1584 
   1585 	MALLOC(error, int64_t*, sizeof(int64_t)*(jif->jif_maxpri+1), M_DEVBUF, M_WAITOK);
   1586 
   1587 	if (error == NULL)
   1588 		return NULL;
   1589 
   1590 	(void)memset(error, 0, sizeof(int64_t)*(jif->jif_maxpri+1));
   1591 
   1592 	mean_weighted_delay = 0;
   1593 	active_classes = 0;
   1594 	backlogged_classes = 0;
   1595 
   1596 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1597 		cl = jif->jif_classes[i];
   1598 		class_exists = (cl != NULL);
   1599 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1600 
   1601 		if (is_backlogged) {
   1602 			backlogged_classes++;
   1603 			if (cl->concerned_rdc) {
   1604 				delays[i] = proj_delay(jif, i);
   1605 				mean_weighted_delay += cl->delay_prod_others*delays[i];
   1606 				active_classes ++;
   1607 			}
   1608 		}
   1609 	}
   1610 
   1611 	if (active_classes == 0)
   1612 		return error;
   1613 	else
   1614 		mean_weighted_delay /= active_classes;
   1615 
   1616 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1617 		cl = jif->jif_classes[i];
   1618 		class_exists = (cl != NULL);
   1619 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1620 
   1621 		if (is_backlogged && cl->concerned_rdc)
   1622 			error[i] = ((int64_t)mean_weighted_delay)-((int64_t)cl->delay_prod_others*delays[i]);
   1623 		else
   1624 			error[i] = 0; /*
   1625 				       * either the class isn't concerned,
   1626 				       * or it's not backlogged.
   1627 				       * in any case, the rate shouldn't
   1628 				       * be adjusted.
   1629 				       */
   1630 	}
   1631 	return error;
   1632 }
   1633 
   1634 /*
   1635  * min_rates_adc: computes the minimum service rates needed in
   1636  * each class to meet the absolute delay bounds. if, for any
   1637  * class i, the current service rate of class i is less than
   1638  * the computed minimum service rate, this function returns
   1639  * false, true otherwise.
   1640  */
   1641 static int
   1642 min_rates_adc(jif)
   1643 	struct jobs_if *jif;
   1644 {
   1645 	int result;
   1646 	int i;
   1647 	int class_exists, is_backlogged;
   1648 	int64_t remaining_time;
   1649 	struct jobs_class *cl;
   1650 	result = 1;
   1651 
   1652 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1653 		cl = jif->jif_classes[i];
   1654 		class_exists = (cl != NULL);
   1655 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1656 		if (is_backlogged && cl->concerned_adc) {
   1657 			remaining_time = cl->cl_adc - proj_delay(jif, i);
   1658 			if (remaining_time > 0 ) {
   1659 				/* min rate needed for ADC */
   1660 				cl->min_rate_adc = scale_rate((int64_t)(cl->cl_rin.bytes-cl->cl_rout.bytes))/remaining_time;
   1661 				if (cl->concerned_arc
   1662 				    && cl->cl_arc > cl->min_rate_adc) {
   1663 					/* min rate needed for ADC + ARC */
   1664 					cl->min_rate_adc = cl->cl_arc;
   1665 				}
   1666 			} else {
   1667 				/* the deadline has been exceeded: give the whole link capacity to hopefully fix the situation */
   1668 				cl->min_rate_adc = (int64_t)bps_to_internal((u_int64_t)jif->jif_bandwidth);
   1669 			}
   1670       		} else if (is_backlogged && cl->concerned_arc)
   1671 			cl->min_rate_adc = cl->cl_arc; 			/* no ADC, an ARC */
   1672 		else if (class_exists)
   1673 			cl->min_rate_adc = 0;	/*
   1674 						 * either the class is not
   1675 						 * backlogged
   1676 						 * or there is no ADC and
   1677 						 * no ARC
   1678 						 */
   1679 		if (is_backlogged && cl->min_rate_adc > cl->service_rate)
   1680 			result = 0;
   1681 	}
   1682 
   1683 	return result;
   1684 }
   1685 
   1686 /*
   1687  * proj_delay: computes the difference between the current time
   1688  * and the time the oldest class-i packet still in the class-i
   1689  * queue i arrived in the system.
   1690  */
   1691 static int64_t
   1692 proj_delay(jif, i)
   1693 	struct jobs_if *jif;
   1694 	int i;
   1695 {
   1696 	u_int64_t now;
   1697 	int class_exists, is_backlogged;
   1698 	struct jobs_class *cl;
   1699 
   1700 	now = read_machclk();
   1701 	cl = jif->jif_classes[i];
   1702 	class_exists = (cl != NULL);
   1703 	is_backlogged = (class_exists && !qempty(cl->cl_q));
   1704 
   1705 	if (is_backlogged)
   1706 		return ((int64_t)delay_diff(now, tslist_first(cl->arv_tm)->timestamp));
   1707 
   1708 	return (0); /* NOTREACHED */
   1709 }
   1710 
   1711 /*
   1712  * pick_dropped_rlc: returns the class index of the class to be
   1713  * dropped for meeting the relative loss constraints.
   1714  */
   1715 static int
   1716 pick_dropped_rlc(jif)
   1717 	struct jobs_if *jif;
   1718 {
   1719 	int64_t mean;
   1720 	int64_t* loss_error;
   1721 	int i, active_classes, backlogged_classes;
   1722 	int class_exists, is_backlogged;
   1723 	int class_dropped;
   1724 	int64_t max_error;
   1725 	int64_t max_alc;
   1726 	struct mbuf* pkt;
   1727 	struct jobs_class *cl;
   1728 	u_int64_t len;
   1729 
   1730 	MALLOC(loss_error, int64_t *, sizeof(int64_t)*(jif->jif_maxpri+1), M_DEVBUF, M_WAITOK);
   1731 
   1732 	if (loss_error == NULL)
   1733 		return -1;
   1734 
   1735 	class_dropped = -1;
   1736 	max_error = 0;
   1737 	mean = 0;
   1738 	active_classes = 0;
   1739 	backlogged_classes = 0;
   1740 
   1741 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1742 		cl = jif->jif_classes[i];
   1743 		class_exists = (cl != NULL);
   1744 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1745 		if (is_backlogged) {
   1746 			backlogged_classes ++;
   1747 			if (cl->concerned_rlc) {
   1748 				mean += cl->loss_prod_others
   1749 				    * cl->current_loss;
   1750 				active_classes++;
   1751 			}
   1752 		}
   1753 	}
   1754 
   1755 	if (active_classes > 0)
   1756 		mean /= active_classes;
   1757 
   1758 	if (active_classes == 0)
   1759 		class_dropped = JOBS_MAXPRI+1; /*
   1760 						* no classes are concerned
   1761 						* by RLCs (JOBS_MAXPRI+1
   1762 						* means "ignore RLC" here)
   1763 						*/
   1764 	else {
   1765 		for (i = 0; i <= jif->jif_maxpri; i++) {
   1766 			cl = jif->jif_classes[i];
   1767 			class_exists = (cl != NULL);
   1768 			is_backlogged = (class_exists
   1769 					 && !qempty(cl->cl_q));
   1770 
   1771 			if ((is_backlogged)&&(cl->cl_rlc))
   1772 				loss_error[i]=cl->loss_prod_others
   1773 				    *cl->current_loss-mean;
   1774 			else
   1775 				loss_error[i] = INFINITY;
   1776 		}
   1777 
   1778 		for (i = 0; i <= jif->jif_maxpri; i++) {
   1779 			cl = jif->jif_classes[i];
   1780 			class_exists = (cl != NULL);
   1781 			is_backlogged = (class_exists
   1782 					 && !qempty(cl->cl_q));
   1783 			if (is_backlogged && loss_error[i] <= max_error) {
   1784 				/*
   1785 				 * find out which class is the most
   1786 				 * below the mean.
   1787 				 * it's the one that needs to be dropped
   1788 				 * ties are broken in favor of the higher
   1789 				 * priority classes (i.e., if two classes
   1790 				 * present the same deviation, the lower
   1791 				 * priority class will get dropped).
   1792 				 */
   1793 				max_error = loss_error[i];
   1794 				class_dropped = i;
   1795 			}
   1796 		}
   1797 
   1798 		if (class_dropped != -1) {
   1799 			cl = jif->jif_classes[class_dropped];
   1800 			pkt = qtail(cl->cl_q);
   1801 			if (pkt != NULL) {
   1802 				/*
   1803 				 * "safeguard" test (a packet SHOULD be
   1804 				 * in there)
   1805 				 */
   1806 				len = (u_int64_t)m_pktlen(pkt);
   1807 				/* access packet at the tail */
   1808 				if (cl->current_loss+(len << SCALE_LOSS)/cl->cl_arrival.bytes > cl->cl_alc) {
   1809 					/*
   1810 					 * the class to drop for meeting
   1811 					 * the RLC will defeat the ALC:
   1812 					 * ignore RLC.
   1813 					 */
   1814 					class_dropped = JOBS_MAXPRI+1;
   1815 				}
   1816 			} else
   1817 				class_dropped = JOBS_MAXPRI+1; /* NOTREACHED */
   1818 		} else
   1819 			class_dropped = JOBS_MAXPRI+1;
   1820 	}
   1821 
   1822 	if (class_dropped == JOBS_MAXPRI+1) {
   1823 		max_alc = -((int64_t)1 << SCALE_LOSS);
   1824 		for (i = jif->jif_maxpri; i >= 0; i--) {
   1825 			cl = jif->jif_classes[i];
   1826 			class_exists = (cl != NULL);
   1827 			is_backlogged = (class_exists
   1828 					 && !qempty(cl->cl_q));
   1829 			if (is_backlogged) {
   1830 				if (cl->concerned_alc && cl->cl_alc - cl->current_loss > max_alc) {
   1831 					max_alc = cl->cl_alc-cl->current_loss; /* pick the class which is the furthest from its ALC */
   1832 					class_dropped = i;
   1833 				} else if (!cl->concerned_alc && ((int64_t) 1 << SCALE_LOSS)-cl->current_loss > max_alc) {
   1834 					max_alc = ((int64_t) 1 << SCALE_LOSS)-cl->current_loss;
   1835 					class_dropped = i;
   1836 				}
   1837 			}
   1838 		}
   1839 	}
   1840 
   1841 	FREE(loss_error, M_DEVBUF);
   1842 	return (class_dropped);
   1843 }
   1844 
   1845 /*
   1846  * ALTQ binding/setup functions
   1847  */
   1848 /*
   1849  * jobs device interface
   1850  */
   1851 int
   1852 jobsopen(dev, flag, fmt, l)
   1853 	dev_t dev;
   1854 	int flag, fmt;
   1855 	struct lwp *l;
   1856 {
   1857 	if (machclk_freq == 0)
   1858 		init_machclk();
   1859 
   1860 	if (machclk_freq == 0) {
   1861 		printf("jobs: no CPU clock available!\n");
   1862 		return (ENXIO);
   1863 	}
   1864 	/* everything will be done when the queueing scheme is attached. */
   1865 	return 0;
   1866 }
   1867 
   1868 int
   1869 jobsclose(dev, flag, fmt, l)
   1870 	dev_t dev;
   1871 	int flag, fmt;
   1872 	struct lwp *l;
   1873 {
   1874 	struct jobs_if *jif;
   1875 	int err, error = 0;
   1876 
   1877 	while ((jif = jif_list) != NULL) {
   1878 		/* destroy all */
   1879 		if (ALTQ_IS_ENABLED(jif->jif_ifq))
   1880 			altq_disable(jif->jif_ifq);
   1881 
   1882 		err = altq_detach(jif->jif_ifq);
   1883 		if (err == 0)
   1884 			err = jobs_detach(jif);
   1885 		if (err != 0 && error == 0)
   1886 			error = err;
   1887 	}
   1888 
   1889 	return error;
   1890 }
   1891 
   1892 int
   1893 jobsioctl(dev, cmd, addr, flag, l)
   1894 	dev_t dev;
   1895 	ioctlcmd_t cmd;
   1896 	caddr_t addr;
   1897 	int flag;
   1898 	struct lwp *l;
   1899 {
   1900 	struct jobs_if *jif;
   1901 	struct jobs_interface *ifacep;
   1902 	struct proc *p = l->l_proc;
   1903 	int	error = 0;
   1904 
   1905 	/* check super-user privilege */
   1906 	switch (cmd) {
   1907 	case JOBS_GETSTATS:
   1908 		break;
   1909 	default:
   1910 #if (__FreeBSD_version > 400000)
   1911 		if ((error = suser(p)) != 0)
   1912 			return (error);
   1913 #else
   1914 		if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
   1915 			return (error);
   1916 #endif
   1917 		break;
   1918 	}
   1919 
   1920 	switch (cmd) {
   1921 
   1922 	case JOBS_IF_ATTACH:
   1923 		error = jobscmd_if_attach((struct jobs_attach *)addr);
   1924 		break;
   1925 
   1926 	case JOBS_IF_DETACH:
   1927 		error = jobscmd_if_detach((struct jobs_interface *)addr);
   1928 		break;
   1929 
   1930 	case JOBS_ENABLE:
   1931 	case JOBS_DISABLE:
   1932 	case JOBS_CLEAR:
   1933 		ifacep = (struct jobs_interface *)addr;
   1934 		if ((jif = altq_lookup(ifacep->jobs_ifname,
   1935 				       ALTQT_JOBS)) == NULL) {
   1936 			error = EBADF;
   1937 			break;
   1938 		}
   1939 
   1940 		switch (cmd) {
   1941 		case JOBS_ENABLE:
   1942 			if (jif->jif_default == NULL) {
   1943 #if 1
   1944 				printf("jobs: no default class\n");
   1945 #endif
   1946 				error = EINVAL;
   1947 				break;
   1948 			}
   1949 			error = altq_enable(jif->jif_ifq);
   1950 			break;
   1951 
   1952 		case JOBS_DISABLE:
   1953 			error = altq_disable(jif->jif_ifq);
   1954 			break;
   1955 
   1956 		case JOBS_CLEAR:
   1957 			jobs_clear_interface(jif);
   1958 			break;
   1959 		}
   1960 		break;
   1961 
   1962 		case JOBS_ADD_CLASS:
   1963 			error = jobscmd_add_class((struct jobs_add_class *)addr);
   1964 			break;
   1965 
   1966 	case JOBS_DEL_CLASS:
   1967 		error = jobscmd_delete_class((struct jobs_delete_class *)addr);
   1968 		break;
   1969 
   1970 	case JOBS_MOD_CLASS:
   1971 		error = jobscmd_modify_class((struct jobs_modify_class *)addr);
   1972 		break;
   1973 
   1974 	case JOBS_ADD_FILTER:
   1975 		error = jobscmd_add_filter((struct jobs_add_filter *)addr);
   1976 		break;
   1977 
   1978 	case JOBS_DEL_FILTER:
   1979 		error = jobscmd_delete_filter((struct jobs_delete_filter *)addr);
   1980 		break;
   1981 
   1982 	case JOBS_GETSTATS:
   1983 		error = jobscmd_class_stats((struct jobs_class_stats *)addr);
   1984 		break;
   1985 
   1986 	default:
   1987 		error = EINVAL;
   1988 		break;
   1989 	}
   1990 	return error;
   1991 }
   1992 
   1993 static int
   1994 jobscmd_if_attach(ap)
   1995 	struct jobs_attach *ap;
   1996 {
   1997 	struct jobs_if *jif;
   1998 	struct ifnet *ifp;
   1999 	int error;
   2000 
   2001 	if ((ifp = ifunit(ap->iface.jobs_ifname)) == NULL)
   2002 		return (ENXIO);
   2003 	if ((jif = jobs_attach(&ifp->if_snd, ap->bandwidth, ap->qlimit, ap->separate)) == NULL)
   2004 		return (ENOMEM);
   2005 
   2006 	/*
   2007 	 * set JOBS to this ifnet structure.
   2008 	 */
   2009 	if ((error = altq_attach(&ifp->if_snd, ALTQT_JOBS, jif,
   2010 				 jobs_enqueue, jobs_dequeue, jobs_request,
   2011 				 &jif->jif_classifier, acc_classify)) != 0)
   2012 		(void)jobs_detach(jif);
   2013 
   2014 	return (error);
   2015 }
   2016 
   2017 static int
   2018 jobscmd_if_detach(ap)
   2019 	struct jobs_interface *ap;
   2020 {
   2021 	struct jobs_if *jif;
   2022 	int error;
   2023 
   2024 	if ((jif = altq_lookup(ap->jobs_ifname, ALTQT_JOBS)) == NULL)
   2025 		return (EBADF);
   2026 
   2027 	if (ALTQ_IS_ENABLED(jif->jif_ifq))
   2028 		altq_disable(jif->jif_ifq);
   2029 
   2030 	if ((error = altq_detach(jif->jif_ifq)))
   2031 		return (error);
   2032 
   2033 	return jobs_detach(jif);
   2034 }
   2035 
   2036 static int
   2037 jobscmd_add_class(ap)
   2038 	struct jobs_add_class *ap;
   2039 {
   2040 	struct jobs_if *jif;
   2041 	struct jobs_class *cl;
   2042 
   2043 	if ((jif = altq_lookup(ap->iface.jobs_ifname, ALTQT_JOBS)) == NULL)
   2044 		return (EBADF);
   2045 
   2046 	if (ap->pri < 0 || ap->pri >= JOBS_MAXPRI)
   2047 		return (EINVAL);
   2048 
   2049 	if ((cl = jobs_class_create(jif, ap->pri,
   2050 				    ap->cl_adc, ap->cl_rdc,
   2051 				    ap->cl_alc, ap->cl_rlc, ap-> cl_arc,
   2052 				    ap->flags)) == NULL)
   2053 		return (ENOMEM);
   2054 
   2055 	/* return a class handle to the user */
   2056 	ap->class_handle = clp_to_clh(cl);
   2057 	return (0);
   2058 }
   2059 
   2060 static int
   2061 jobscmd_delete_class(ap)
   2062 	struct jobs_delete_class *ap;
   2063 {
   2064 	struct jobs_if *jif;
   2065 	struct jobs_class *cl;
   2066 
   2067 	if ((jif = altq_lookup(ap->iface.jobs_ifname, ALTQT_JOBS)) == NULL)
   2068 		return (EBADF);
   2069 
   2070 	if ((cl = clh_to_clp(jif, ap->class_handle)) == NULL)
   2071 		return (EINVAL);
   2072 
   2073 	return jobs_class_destroy(cl);
   2074 }
   2075 
   2076 static int
   2077 jobscmd_modify_class(ap)
   2078 	struct jobs_modify_class *ap;
   2079 {
   2080 	struct jobs_if *jif;
   2081 	struct jobs_class *cl;
   2082 
   2083 	if ((jif = altq_lookup(ap->iface.jobs_ifname, ALTQT_JOBS)) == NULL)
   2084 		return (EBADF);
   2085 
   2086 	if (ap->pri < 0 || ap->pri >= JOBS_MAXPRI)
   2087 		return (EINVAL);
   2088 
   2089 	if ((cl = clh_to_clp(jif, ap->class_handle)) == NULL)
   2090 		return (EINVAL);
   2091 
   2092 	/*
   2093 	 * if priority is changed, move the class to the new priority
   2094 	 */
   2095 	if (jif->jif_classes[ap->pri] != cl) {
   2096 		if (jif->jif_classes[ap->pri] != NULL)
   2097 			return (EEXIST);
   2098 		jif->jif_classes[cl->cl_pri] = NULL;
   2099 		jif->jif_classes[ap->pri] = cl;
   2100 		cl->cl_pri = ap->pri;
   2101 	}
   2102 
   2103 	/* call jobs_class_create to change class parameters */
   2104 	if ((cl = jobs_class_create(jif, ap->pri,
   2105 				    ap->cl_adc, ap->cl_rdc,
   2106 				    ap->cl_alc, ap->cl_rlc, ap->cl_arc,
   2107 				    ap->flags)) == NULL)
   2108 		return (ENOMEM);
   2109 	return 0;
   2110 }
   2111 
   2112 static int
   2113 jobscmd_add_filter(ap)
   2114 	struct jobs_add_filter *ap;
   2115 {
   2116 	struct jobs_if *jif;
   2117 	struct jobs_class *cl;
   2118 
   2119 	if ((jif = altq_lookup(ap->iface.jobs_ifname, ALTQT_JOBS)) == NULL)
   2120 		return (EBADF);
   2121 
   2122 	if ((cl = clh_to_clp(jif, ap->class_handle)) == NULL)
   2123 		return (EINVAL);
   2124 
   2125 	return acc_add_filter(&jif->jif_classifier, &ap->filter,
   2126 			      cl, &ap->filter_handle);
   2127 }
   2128 
   2129 static int
   2130 jobscmd_delete_filter(ap)
   2131 	struct jobs_delete_filter *ap;
   2132 {
   2133 	struct jobs_if *jif;
   2134 
   2135 	if ((jif = altq_lookup(ap->iface.jobs_ifname, ALTQT_JOBS)) == NULL)
   2136 		return (EBADF);
   2137 
   2138 		return acc_delete_filter(&jif->jif_classifier,
   2139 					 ap->filter_handle);
   2140 }
   2141 
   2142 static int
   2143 jobscmd_class_stats(ap)
   2144 	struct jobs_class_stats *ap;
   2145 {
   2146 	struct jobs_if *jif;
   2147 	struct jobs_class *cl;
   2148 	struct class_stats stats, *usp;
   2149 	int pri, error;
   2150 
   2151 	if ((jif = altq_lookup(ap->iface.jobs_ifname, ALTQT_JOBS)) == NULL)
   2152 		return (EBADF);
   2153 
   2154 	ap->maxpri = jif->jif_maxpri;
   2155 
   2156 	/* then, read the next N classes */
   2157 	usp = ap->stats;
   2158 	for (pri = 0; pri <= jif->jif_maxpri; pri++) {
   2159 		cl = jif->jif_classes[pri];
   2160 		if (cl != NULL)
   2161 			get_class_stats(&stats, cl);
   2162 		else
   2163 			(void)memset(&stats, 0, sizeof(stats));
   2164 		if ((error = copyout((caddr_t)&stats, (caddr_t)usp++,
   2165 				     sizeof(stats))) != 0)
   2166 			return (error);
   2167 	}
   2168 	return (0);
   2169 }
   2170 
   2171 static void get_class_stats(sp, cl)
   2172 	struct class_stats *sp;
   2173 	struct jobs_class *cl;
   2174 {
   2175 	u_int64_t now;
   2176 	now = read_machclk();
   2177 
   2178 	sp->class_handle = clp_to_clh(cl);
   2179 	sp->qlength = qlen(cl->cl_q);
   2180 
   2181 	sp->period = cl->cl_period;
   2182 	sp->rin = cl->st_rin;
   2183 	sp->arrival = cl->st_arrival;
   2184 	sp->arrivalbusy = cl->cl_arrival;
   2185 	sp->rout = cl->st_rout;
   2186 	sp->dropcnt = cl->cl_dropcnt;
   2187 
   2188 	/*  PKTCNTR_RESET(&cl->st_arrival);*/
   2189 	PKTCNTR_RESET(&cl->st_rin);
   2190 	PKTCNTR_RESET(&cl->st_rout);
   2191 
   2192 	sp->totallength = cl->cl_jif->jif_ifq->ifq_len;
   2193 	sp->lastdel = ticks_to_secs(GRANULARITY*cl->cl_lastdel);
   2194 	sp->avgdel = cl->cl_avgdel;
   2195 
   2196 	cl->cl_avgdel = 0;
   2197 
   2198 	sp->busylength = ticks_to_secs(1000*delay_diff(now, cl->idletime));
   2199 	sp->adc_violations = cl->adc_violations;
   2200 
   2201 	sp->wc_cycles_enqueue = cl->cl_jif->wc_cycles_enqueue;
   2202 	sp->wc_cycles_dequeue = cl->cl_jif->wc_cycles_dequeue;
   2203 	sp->bc_cycles_enqueue = cl->cl_jif->bc_cycles_enqueue;
   2204 	sp->bc_cycles_dequeue = cl->cl_jif->bc_cycles_dequeue;
   2205 	sp->avg_cycles_enqueue = cl->cl_jif->avg_cycles_enqueue;
   2206 	sp->avg_cycles_dequeue = cl->cl_jif->avg_cycles_dequeue;
   2207 	sp->avg_cycles2_enqueue = cl->cl_jif->avg_cycles2_enqueue;
   2208 	sp->avg_cycles2_dequeue = cl->cl_jif->avg_cycles2_dequeue;
   2209 	sp->total_enqueued = cl->cl_jif->total_enqueued;
   2210 	sp->total_dequeued = cl->cl_jif->total_dequeued;
   2211 }
   2212 
   2213 /* convert a class handle to the corresponding class pointer */
   2214 static struct jobs_class *
   2215 clh_to_clp(jif, chandle)
   2216 	struct jobs_if *jif;
   2217 	u_long chandle;
   2218 {
   2219 	struct jobs_class *cl;
   2220 
   2221 	cl = (struct jobs_class *)chandle;
   2222 	if (chandle != ALIGN(cl)) {
   2223 #if 1
   2224 		printf("clh_to_cl: unaligned pointer %p\n", cl);
   2225 #endif
   2226 		return (NULL);
   2227 	}
   2228 
   2229 	if (cl == NULL || cl->cl_handle != chandle || cl->cl_jif != jif)
   2230 		return (NULL);
   2231 	return (cl);
   2232 }
   2233 
   2234 /* convert a class pointer to the corresponding class handle */
   2235 static u_long
   2236 clp_to_clh(cl)
   2237 	struct jobs_class *cl;
   2238 {
   2239 	return (cl->cl_handle);
   2240 }
   2241 
   2242 #ifdef KLD_MODULE
   2243 
   2244 static struct altqsw jobs_sw =
   2245 	{"jobs", jobsopen, jobsclose, jobsioctl};
   2246 
   2247 ALTQ_MODULE(altq_jobs, ALTQT_JOBS, &jobs_sw);
   2248 
   2249 #endif /* KLD_MODULE */
   2250 
   2251 #endif /* ALTQ3_COMPAT */
   2252 #endif /* ALTQ_JOBS */
   2253