Home | History | Annotate | Line # | Download | only in altq
altq_jobs.c revision 1.7
      1 /*	$NetBSD: altq_jobs.c,v 1.7 2014/08/18 03:14:12 riastradh Exp $	*/
      2 /*	$KAME: altq_jobs.c,v 1.11 2005/04/13 03:44:25 suz Exp $	*/
      3 /*
      4  * Copyright (c) 2001, the Rector and Board of Visitors of the
      5  * University of Virginia.
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms,
      9  * with or without modification, are permitted provided
     10  * that the following conditions are met:
     11  *
     12  * Redistributions of source code must retain the above
     13  * copyright notice, this list of conditions and the following
     14  * disclaimer.
     15  *
     16  * Redistributions in binary form must reproduce the above
     17  * copyright notice, this list of conditions and the following
     18  * disclaimer in the documentation and/or other materials provided
     19  * with the distribution.
     20  *
     21  * Neither the name of the University of Virginia nor the names
     22  * of its contributors may be used to endorse or promote products
     23  * derived from this software without specific prior written
     24  * permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
     27  * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
     28  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
     29  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     30  * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
     31  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
     32  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     33  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     34  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
     35  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
     37  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
     38  * THE POSSIBILITY OF SUCH DAMAGE.
     39  */
     40 /*
     41  * JoBS - altq prototype implementation
     42  *
     43  * Author: Nicolas Christin <nicolas (at) cs.virginia.edu>
     44  *
     45  * JoBS algorithms originally devised and proposed by
     46  * Nicolas Christin and Jorg Liebeherr.
     47  * Grateful acknowledgments to Tarek Abdelzaher for his help and
     48  * comments, and to Kenjiro Cho for some helpful advice.
     49  * Contributed by the Multimedia Networks Group at the University
     50  * of Virginia.
     51  *
     52  * Papers and additional info can be found at
     53  * http://qosbox.cs.virginia.edu
     54  *
     55  */
     56 
     57 /*
     58  * JoBS queue
     59  */
     60 
     61 #include <sys/cdefs.h>
     62 __KERNEL_RCSID(0, "$NetBSD: altq_jobs.c,v 1.7 2014/08/18 03:14:12 riastradh Exp $");
     63 
     64 #ifdef _KERNEL_OPT
     65 #include "opt_altq.h"
     66 #include "opt_inet.h"
     67 #endif
     68 
     69 #ifdef ALTQ_JOBS  /* jobs is enabled by ALTQ_JOBS option in opt_altq.h */
     70 
     71 #include <sys/param.h>
     72 #include <sys/malloc.h>
     73 #include <sys/mbuf.h>
     74 #include <sys/socket.h>
     75 #include <sys/sockio.h>
     76 #include <sys/systm.h>
     77 #include <sys/proc.h>
     78 #include <sys/errno.h>
     79 #include <sys/kernel.h>
     80 #include <sys/queue.h>
     81 #include <sys/kauth.h>
     82 
     83 #ifdef __FreeBSD__
     84 #include <sys/limits.h>
     85 #endif
     86 
     87 #include <net/if.h>
     88 #include <net/if_types.h>
     89 
     90 #include <altq/altq.h>
     91 #include <altq/altq_conf.h>
     92 #include <altq/altq_jobs.h>
     93 
     94 #ifdef ALTQ3_COMPAT
     95 /*
     96  * function prototypes
     97  */
     98 static struct jobs_if *jobs_attach(struct ifaltq *, u_int, u_int, u_int);
     99 static int jobs_detach(struct jobs_if *);
    100 static int jobs_clear_interface(struct jobs_if *);
    101 static int jobs_request(struct ifaltq *, int, void *);
    102 static void jobs_purge(struct jobs_if *);
    103 static struct jobs_class *jobs_class_create(struct jobs_if *,
    104     int, int64_t, int64_t, int64_t, int64_t, int64_t, int);
    105 static int jobs_class_destroy(struct jobs_class *);
    106 static int jobs_enqueue(struct ifaltq *, struct mbuf *, struct altq_pktattr *);
    107 static struct mbuf *jobs_dequeue(struct ifaltq *, int);
    108 
    109 static int jobs_addq(struct jobs_class *, struct mbuf *, struct jobs_if*);
    110 static struct mbuf *jobs_getq(struct jobs_class *);
    111 static struct mbuf *jobs_pollq(struct jobs_class *);
    112 static void jobs_purgeq(struct jobs_class *);
    113 
    114 static int jobscmd_if_attach(struct jobs_attach *);
    115 static int jobscmd_if_detach(struct jobs_interface *);
    116 static int jobscmd_add_class(struct jobs_add_class *);
    117 static int jobscmd_delete_class(struct jobs_delete_class *);
    118 static int jobscmd_modify_class(struct jobs_modify_class *);
    119 static int jobscmd_add_filter(struct jobs_add_filter *);
    120 static int jobscmd_delete_filter(struct jobs_delete_filter *);
    121 static int jobscmd_class_stats(struct jobs_class_stats *);
    122 static void get_class_stats(struct class_stats *, struct jobs_class *);
    123 static struct jobs_class *clh_to_clp(struct jobs_if *, u_long);
    124 static u_long clp_to_clh(struct jobs_class *);
    125 
    126 static TSLIST *tslist_alloc(void);
    127 static void tslist_destroy(struct jobs_class *);
    128 static int tslist_enqueue(struct jobs_class *, u_int64_t);
    129 static void tslist_dequeue(struct jobs_class *);
    130 static void tslist_drop(struct jobs_class *);
    131 
    132 static int enforce_wc(struct jobs_if *);
    133 static int64_t* adjust_rates_rdc(struct jobs_if *);
    134 static int64_t* assign_rate_drops_adc(struct jobs_if *);
    135 static int64_t* update_error(struct jobs_if *);
    136 static int min_rates_adc(struct jobs_if *);
    137 static int64_t proj_delay(struct jobs_if *, int);
    138 static int pick_dropped_rlc(struct jobs_if *);
    139 
    140 altqdev_decl(jobs);
    141 
    142 /* jif_list keeps all jobs_if's allocated. */
    143 static struct jobs_if *jif_list = NULL;
    144 
    145 typedef unsigned long long ull;
    146 
    147 /* setup functions */
    148 
    149 static struct jobs_if *
    150 jobs_attach(struct ifaltq *ifq, u_int bandwidth, u_int qlimit, u_int separate)
    151 {
    152 	struct jobs_if *jif;
    153 
    154 	jif = malloc(sizeof(struct jobs_if), M_DEVBUF, M_WAITOK|M_ZERO);
    155 	if (jif == NULL)
    156 	        return (NULL);
    157 
    158 	jif->jif_bandwidth = bandwidth;
    159 	jif->jif_qlimit = qlimit;
    160 	jif->jif_separate = separate;
    161 #ifdef ALTQ_DEBUG
    162 	printf("JoBS bandwidth = %d bps\n", (int)bandwidth);
    163 	printf("JoBS buffer size = %d pkts [%s]\n",
    164 	       (int)qlimit, separate?"separate buffers":"shared buffer");
    165 #endif
    166 	jif->jif_maxpri = -1;
    167 	jif->jif_ifq = ifq;
    168 
    169 	jif->wc_cycles_enqueue = 0;
    170 	jif->avg_cycles_enqueue = 0;
    171 	jif->avg_cycles2_enqueue = 0;
    172 	jif->bc_cycles_enqueue = ALTQ_INFINITY;
    173 	jif->wc_cycles_dequeue = 0;
    174 	jif->avg_cycles_dequeue = 0;
    175 	jif->avg_cycles2_dequeue = 0;
    176 	jif->bc_cycles_dequeue = ALTQ_INFINITY;
    177 	jif->total_enqueued = 0;
    178 	jif->total_dequeued = 0;
    179 
    180 	/* add this state to the jobs list */
    181 	jif->jif_next = jif_list;
    182 	jif_list = jif;
    183 
    184 	return (jif);
    185 }
    186 
    187 static int
    188 jobs_detach(struct jobs_if *jif)
    189 {
    190 	(void)jobs_clear_interface(jif);
    191 
    192 	/* remove this interface from the jif list */
    193 	if (jif_list == jif)
    194 		jif_list = jif->jif_next;
    195 	else {
    196 		struct jobs_if *p;
    197 
    198 		for (p = jif_list; p != NULL; p = p->jif_next)
    199 			if (p->jif_next == jif) {
    200 				p->jif_next = jif->jif_next;
    201 				break;
    202 			}
    203 		ASSERT(p != NULL);
    204 	}
    205 	free(jif, M_DEVBUF);
    206 	return (0);
    207 }
    208 
    209 /*
    210  * bring the interface back to the initial state by discarding
    211  * all the filters and classes.
    212  */
    213 static int
    214 jobs_clear_interface(struct jobs_if *jif)
    215 {
    216 	struct jobs_class	*cl;
    217 	int	pri;
    218 
    219 	/* free the filters for this interface */
    220 	acc_discard_filters(&jif->jif_classifier, NULL, 1);
    221 
    222 	/* clear out the classes */
    223 	for (pri = 0; pri <= jif->jif_maxpri; pri++)
    224 		if ((cl = jif->jif_classes[pri]) != NULL)
    225 			jobs_class_destroy(cl);
    226 
    227 	return (0);
    228 }
    229 
    230 static int
    231 jobs_request(struct ifaltq *ifq, int req, void *arg)
    232 {
    233 	struct jobs_if	*jif = (struct jobs_if *)ifq->altq_disc;
    234 
    235 	switch (req) {
    236 	case ALTRQ_PURGE:
    237 		jobs_purge(jif);
    238 		break;
    239 	}
    240 	return (0);
    241 }
    242 
    243 /* discard all the queued packets on the interface */
    244 static void
    245 jobs_purge(struct jobs_if *jif)
    246 {
    247 	struct jobs_class *cl;
    248 	int pri;
    249 
    250 	for (pri = 0; pri <= jif->jif_maxpri; pri++) {
    251 		if ((cl = jif->jif_classes[pri]) != NULL && !qempty(cl->cl_q))
    252 			jobs_purgeq(cl);
    253 	}
    254 	if (ALTQ_IS_ENABLED(jif->jif_ifq))
    255 		jif->jif_ifq->ifq_len = 0;
    256 }
    257 
    258 static struct jobs_class *
    259 jobs_class_create(struct jobs_if *jif, int pri, int64_t adc, int64_t rdc,
    260     int64_t alc, int64_t rlc, int64_t arc, int flags)
    261 {
    262 	struct jobs_class *cl, *scan1, *scan2;
    263 	int s;
    264 	int class_exists1, class_exists2;
    265 	int i, j;
    266 	int64_t tmp[JOBS_MAXPRI];
    267 	u_int64_t now;
    268 
    269 	if ((cl = jif->jif_classes[pri]) != NULL) {
    270 		/* modify the class instead of creating a new one */
    271 		s = splnet();
    272 		if (!qempty(cl->cl_q))
    273 			jobs_purgeq(cl);
    274 		splx(s);
    275 	} else {
    276 		cl = malloc(sizeof(struct jobs_class), M_DEVBUF,
    277 		    M_WAITOK|M_ZERO);
    278 		if (cl == NULL)
    279 			return (NULL);
    280 
    281 		cl->cl_q = malloc(sizeof(class_queue_t), M_DEVBUF,
    282 		    M_WAITOK|M_ZERO);
    283 		if (cl->cl_q == NULL)
    284 			goto err_ret;
    285 
    286 		cl->arv_tm = tslist_alloc();
    287 		if (cl->arv_tm == NULL)
    288 			goto err_ret;
    289 	}
    290 
    291 	jif->jif_classes[pri] = cl;
    292 
    293 	if (flags & JOCF_DEFAULTCLASS)
    294 		jif->jif_default = cl;
    295 
    296 	qtype(cl->cl_q) = Q_DROPTAIL;
    297 	qlen(cl->cl_q) = 0;
    298 	cl->service_rate = 0;
    299 	cl->min_rate_adc = 0;
    300 	cl->current_loss = 0;
    301 	cl->cl_period = 0;
    302 	PKTCNTR_RESET(&cl->cl_arrival);
    303 	PKTCNTR_RESET(&cl->cl_rin);
    304 	PKTCNTR_RESET(&cl->cl_rout);
    305 	PKTCNTR_RESET(&cl->cl_rout_th);
    306 	PKTCNTR_RESET(&cl->cl_dropcnt);
    307 	PKTCNTR_RESET(&cl->st_arrival);
    308 	PKTCNTR_RESET(&cl->st_rin);
    309 	PKTCNTR_RESET(&cl->st_rout);
    310 	PKTCNTR_RESET(&cl->st_dropcnt);
    311 	cl->st_service_rate = 0;
    312 	cl->cl_lastdel = 0;
    313 	cl->cl_avgdel = 0;
    314 	cl->adc_violations = 0;
    315 
    316 	if (adc == -1) {
    317 		cl->concerned_adc = 0;
    318 		adc = ALTQ_INFINITY;
    319 	} else
    320 		cl->concerned_adc = 1;
    321 
    322 	if (alc == -1) {
    323 		cl->concerned_alc = 0;
    324 		alc = ALTQ_INFINITY;
    325 	} else
    326 		cl->concerned_alc = 1;
    327 
    328 	if (rdc == -1) {
    329 		rdc = 0;
    330 		cl->concerned_rdc = 0;
    331 	} else
    332 		cl->concerned_rdc = 1;
    333 
    334 	if (rlc == -1) {
    335 		rlc = 0;
    336 		cl->concerned_rlc = 0;
    337 	} else
    338 		cl->concerned_rlc = 1;
    339 
    340 	if (arc == -1) {
    341 		arc = 0;
    342 		cl->concerned_arc = 0;
    343 	} else
    344 		cl->concerned_arc = 1;
    345 
    346 	cl->cl_rdc=rdc;
    347 
    348 	if (cl->concerned_adc) {
    349 		/* adc is given in us, convert it to clock ticks */
    350 		cl->cl_adc = (u_int64_t)(adc*machclk_freq/GRANULARITY);
    351 	} else
    352 		cl->cl_adc = adc;
    353 
    354 	if (cl->concerned_arc) {
    355 		/* arc is given in bps, convert it to internal unit */
    356 		cl->cl_arc = (u_int64_t)(bps_to_internal(arc));
    357 	} else
    358 		cl->cl_arc = arc;
    359 
    360 	cl->cl_rlc=rlc;
    361 	cl->cl_alc=alc;
    362 	cl->delay_prod_others = 0;
    363 	cl->loss_prod_others = 0;
    364 	cl->cl_flags = flags;
    365 	cl->cl_pri = pri;
    366 	if (pri > jif->jif_maxpri)
    367 		jif->jif_maxpri = pri;
    368 	cl->cl_jif = jif;
    369 	cl->cl_handle = (u_long)cl;  /* just a pointer to this class */
    370 
    371 	/*
    372 	 * update delay_prod_others and loss_prod_others
    373 	 * in all classes if needed
    374 	 */
    375 
    376 	if (cl->concerned_rdc) {
    377 		for (i = 0; i <= jif->jif_maxpri; i++) {
    378 			scan1 = jif->jif_classes[i];
    379 			class_exists1 = (scan1 != NULL);
    380 			if (class_exists1) {
    381 				tmp[i] = 1;
    382 				for (j = 0; j <= i-1; j++) {
    383 					scan2 = jif->jif_classes[j];
    384 					class_exists2 = (scan2 != NULL);
    385 					if (class_exists2
    386 					    && scan2->concerned_rdc)
    387 						tmp[i] *= scan2->cl_rdc;
    388 				}
    389 			} else
    390 				tmp[i] = 0;
    391 		}
    392 
    393 		for (i = 0; i <= jif->jif_maxpri; i++) {
    394 			scan1 = jif->jif_classes[i];
    395 			class_exists1 = (scan1 != NULL);
    396 			if (class_exists1) {
    397 				scan1->delay_prod_others = 1;
    398 				for (j = 0; j <= jif->jif_maxpri; j++) {
    399 					scan2 = jif->jif_classes[j];
    400 					class_exists2 = (scan2 != NULL);
    401 					if (class_exists2 && j != i
    402 					    && scan2->concerned_rdc)
    403 						scan1->delay_prod_others *= tmp[j];
    404 				}
    405 			}
    406 		}
    407 	}
    408 
    409 	if (cl->concerned_rlc) {
    410 		for (i = 0; i <= jif->jif_maxpri; i++) {
    411 			scan1 = jif->jif_classes[i];
    412 			class_exists1 = (scan1 != NULL);
    413 			if (class_exists1) {
    414 				tmp[i] = 1;
    415 				for (j = 0; j <= i-1; j++) {
    416 					scan2 = jif->jif_classes[j];
    417 					class_exists2 = (scan2 != NULL);
    418 					if (class_exists2
    419 					    && scan2->concerned_rlc)
    420 						tmp[i] *= scan2->cl_rlc;
    421 				}
    422 			} else
    423 				tmp[i] = 0;
    424 		}
    425 
    426 		for (i = 0; i <= jif->jif_maxpri; i++) {
    427 			scan1 = jif->jif_classes[i];
    428 			class_exists1 = (scan1 != NULL);
    429 			if (class_exists1) {
    430 				scan1->loss_prod_others = 1;
    431 				for (j = 0; j <= jif->jif_maxpri; j++) {
    432 					scan2 = jif->jif_classes[j];
    433 					class_exists2 = (scan2 != NULL);
    434 					if (class_exists2 && j != i
    435 					    && scan2->concerned_rlc)
    436 						scan1->loss_prod_others *= tmp[j];
    437 				}
    438 			}
    439 		}
    440 	}
    441 
    442 	now = read_machclk();
    443 	cl->idletime = now;
    444 	return (cl);
    445 
    446  err_ret:
    447 	if (cl->cl_q != NULL)
    448 		free(cl->cl_q, M_DEVBUF);
    449 	if (cl->arv_tm != NULL)
    450 		free(cl->arv_tm, M_DEVBUF);
    451 
    452 	free(cl, M_DEVBUF);
    453 	return (NULL);
    454 }
    455 
    456 static int
    457 jobs_class_destroy(struct jobs_class *cl)
    458 {
    459 	struct jobs_if *jif;
    460 	int s, pri;
    461 
    462 	s = splnet();
    463 
    464 	/* delete filters referencing to this class */
    465 	acc_discard_filters(&cl->cl_jif->jif_classifier, cl, 0);
    466 
    467 	if (!qempty(cl->cl_q))
    468 		jobs_purgeq(cl);
    469 
    470 	jif = cl->cl_jif;
    471 	jif->jif_classes[cl->cl_pri] = NULL;
    472 	if (jif->jif_maxpri == cl->cl_pri) {
    473 		for (pri = cl->cl_pri; pri >= 0; pri--)
    474 			if (jif->jif_classes[pri] != NULL) {
    475 				jif->jif_maxpri = pri;
    476 				break;
    477 			}
    478 		if (pri < 0)
    479 			jif->jif_maxpri = -1;
    480 	}
    481 	splx(s);
    482 
    483 	tslist_destroy(cl);
    484 	free(cl->cl_q, M_DEVBUF);
    485 	free(cl, M_DEVBUF);
    486 	return (0);
    487 }
    488 
    489 /*
    490  * jobs_enqueue is an enqueue function to be registered to
    491  * (*altq_enqueue) in struct ifaltq.
    492  */
    493 static int
    494 jobs_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr)
    495 {
    496 	struct jobs_if	*jif = (struct jobs_if *)ifq->altq_disc;
    497 	struct jobs_class *cl, *scan;
    498 	int len;
    499 	int return_flag;
    500 	int pri;
    501 	u_int64_t now;
    502 	u_int64_t old_arv;
    503 	int64_t* delta_rate;
    504 	u_int64_t tstamp1, tstamp2, cycles; /* used for benchmarking only */
    505 
    506 	jif->total_enqueued++;
    507 	now = read_machclk();
    508 	tstamp1 = now;
    509 
    510 	return_flag = 0;
    511 
    512 	/* proceed with packet enqueuing */
    513 
    514 	if (IFQ_IS_EMPTY(ifq)) {
    515 		for (pri=0; pri <= jif->jif_maxpri; pri++) {
    516 			scan = jif->jif_classes[pri];
    517 			if (scan != NULL) {
    518 				/*
    519 				 * reset all quantities, except:
    520 				 * average delay, number of violations
    521 				 */
    522 				PKTCNTR_RESET(&scan->cl_rin);
    523 				PKTCNTR_RESET(&scan->cl_rout);
    524 				PKTCNTR_RESET(&scan->cl_rout_th);
    525 				PKTCNTR_RESET(&scan->cl_arrival);
    526 				PKTCNTR_RESET(&scan->cl_dropcnt);
    527 				scan->cl_lastdel = 0;
    528 				scan->current_loss = 0;
    529 				scan->service_rate = 0;
    530 				scan->idletime = now;
    531 				scan->cl_last_rate_update = now;
    532 			}
    533 		}
    534 	}
    535 
    536 	/* grab class set by classifier */
    537 	if (pktattr == NULL || (cl = pktattr->pattr_class) == NULL)
    538 		cl = jif->jif_default;
    539 
    540 	len = m_pktlen(m);
    541 	old_arv = cl->cl_arrival.bytes;
    542 	PKTCNTR_ADD(&cl->cl_arrival, (int)len);
    543 	PKTCNTR_ADD(&cl->cl_rin, (int)len);
    544 	PKTCNTR_ADD(&cl->st_arrival, (int)len);
    545 	PKTCNTR_ADD(&cl->st_rin, (int)len);
    546 
    547 	if (cl->cl_arrival.bytes < old_arv) {
    548 		/* deals w/ overflow */
    549 		for (pri=0; pri <= jif->jif_maxpri; pri++) {
    550 			scan = jif->jif_classes[pri];
    551 			if (scan != NULL) {
    552 				/*
    553 				 * reset all quantities, except:
    554 				 * average delay, number of violations
    555 				 */
    556 				PKTCNTR_RESET(&scan->cl_rin);
    557 				PKTCNTR_RESET(&scan->cl_rout);
    558 				PKTCNTR_RESET(&scan->cl_rout_th);
    559 				PKTCNTR_RESET(&scan->cl_arrival);
    560 				PKTCNTR_RESET(&scan->cl_dropcnt);
    561 				scan->current_loss = 0;
    562 				scan->service_rate = 0;
    563 				scan->idletime = now;
    564 				scan->cl_last_rate_update = now;
    565 			}
    566 		}
    567 		PKTCNTR_ADD(&cl->cl_arrival, (int)len);
    568 		PKTCNTR_ADD(&cl->cl_rin, (int)len);
    569 	}
    570 
    571 	if (cl->cl_arrival.bytes > cl->cl_rin.bytes)
    572 		cl->current_loss =
    573 		    ((cl->cl_arrival.bytes - cl->cl_rin.bytes) << SCALE_LOSS)
    574 		    / cl->cl_arrival.bytes;
    575 	else
    576 		cl->current_loss = 0;
    577 
    578 	/* for MDRR: update theoretical value of the output curve */
    579 
    580 	for (pri=0; pri <= jif->jif_maxpri; pri++) {
    581 		scan = jif->jif_classes[pri];
    582 		if (scan != NULL) {
    583 			if (scan->cl_last_rate_update == scan->idletime
    584 			    || scan->cl_last_rate_update == 0)
    585 				scan->cl_last_rate_update = now; /* initial case */
    586 			else
    587 				scan->cl_rout_th.bytes +=
    588 				    delay_diff(now, scan->cl_last_rate_update)
    589 				    * scan->service_rate;
    590 
    591 			/*
    592 			 * we don't really care about packets here
    593 			 * WARNING: rout_th is SCALED
    594 			 * (b/c of the service rate)
    595 			 * for precision, as opposed to rout.
    596 			 */
    597 
    598 			scan->cl_last_rate_update = now;
    599 		}
    600 	}
    601 
    602 	if (jobs_addq(cl, m, jif) != 0)
    603 		return_flag = ENOBUFS; /* signals there's a buffer overflow */
    604 	else
    605 		IFQ_INC_LEN(ifq);
    606 
    607 	/* successfully queued. */
    608 
    609 	enforce_wc(jif);
    610 
    611 	if (!min_rates_adc(jif)) {
    612 		delta_rate = assign_rate_drops_adc(jif);
    613 		if (delta_rate != NULL) {
    614 			for (pri = 0; pri <= jif->jif_maxpri; pri++)
    615 			  if ((cl = jif->jif_classes[pri]) != NULL &&
    616 			      !qempty(cl->cl_q))
    617 				cl->service_rate += delta_rate[pri];
    618 			free(delta_rate, M_DEVBUF);
    619 		}
    620 	}
    621 
    622 	delta_rate = adjust_rates_rdc(jif);
    623 
    624 	if (delta_rate != NULL) {
    625 		for (pri = 0; pri <= jif->jif_maxpri; pri++)
    626 			if ((cl = jif->jif_classes[pri]) != NULL &&
    627 			    !qempty(cl->cl_q))
    628 				cl->service_rate += delta_rate[pri];
    629 		free(delta_rate, M_DEVBUF);
    630 	}
    631 
    632 	tstamp2 = read_machclk();
    633 	cycles = delay_diff(tstamp2, tstamp1);
    634 	if (cycles > jif->wc_cycles_enqueue)
    635 		jif->wc_cycles_enqueue=cycles;
    636 	if (cycles < jif->bc_cycles_enqueue)
    637 		jif->bc_cycles_enqueue=cycles;
    638 
    639 	jif->avg_cycles_enqueue += cycles;
    640 	jif->avg_cycles2_enqueue += cycles * cycles;
    641 
    642 	return (return_flag);
    643 }
    644 
    645 /*
    646  * jobs_dequeue is a dequeue function to be registered to
    647  * (*altq_dequeue) in struct ifaltq.
    648  *
    649  * note: ALTDQ_POLL returns the next packet without removing the packet
    650  *	from the queue.  ALTDQ_REMOVE is a normal dequeue operation.
    651  *	ALTDQ_REMOVE must return the same packet if called immediately
    652  *	after ALTDQ_POLL.
    653  */
    654 
    655 static struct mbuf *
    656 jobs_dequeue(struct ifaltq *ifq, int op)
    657 {
    658 	struct jobs_if	*jif = (struct jobs_if *)ifq->altq_disc;
    659 	struct jobs_class *cl;
    660 	struct mbuf *m;
    661 	int pri;
    662 	int svc_class;
    663 	int64_t max_error;
    664 	int64_t error;
    665 	u_int64_t now;
    666 	u_int64_t tstamp1, tstamp2, cycles;
    667 
    668 	jif->total_dequeued++;
    669 
    670 	now = read_machclk();
    671 	tstamp1 = now;
    672 
    673 	if (IFQ_IS_EMPTY(ifq)) {
    674 		/* no packet in the queue */
    675 		for (pri=0; pri <= jif->jif_maxpri; pri++) {
    676 		  cl = jif->jif_classes[pri];
    677 		  if (cl != NULL)
    678 		    cl->idletime = now;
    679 		}
    680 
    681 		tstamp2 = read_machclk();
    682 		cycles = delay_diff(tstamp2, tstamp1);
    683 		if (cycles > jif->wc_cycles_dequeue)
    684 			jif->wc_cycles_dequeue = cycles;
    685 		if (cycles < jif->bc_cycles_dequeue)
    686 			jif->bc_cycles_dequeue = cycles;
    687 
    688 		jif->avg_cycles_dequeue += cycles;
    689 		jif->avg_cycles2_dequeue += cycles * cycles;
    690 
    691 		return (NULL);
    692 	}
    693 
    694 	/*
    695 	 * select the class whose actual tranmissions are the furthest
    696 	 * from the promised transmissions
    697 	 */
    698 
    699 	max_error = -1;
    700 	svc_class = -1;
    701 
    702 	for (pri=0; pri <= jif->jif_maxpri; pri++) {
    703 		if (((cl = jif->jif_classes[pri]) != NULL)
    704 		    && !qempty(cl->cl_q)) {
    705 			error = (int64_t)cl->cl_rout_th.bytes
    706 			    -(int64_t)scale_rate(cl->cl_rout.bytes);
    707 			if (max_error == -1) {
    708 				max_error = error;
    709 				svc_class = pri;
    710 			} else if (error > max_error) {
    711 				max_error = error;
    712 				svc_class = pri;
    713 			}
    714 		}
    715 	}
    716 
    717 	if (svc_class != -1)
    718 		cl = jif->jif_classes[svc_class];
    719 	else
    720 		cl = NULL;
    721 
    722 	if (op == ALTDQ_POLL) {
    723 		tstamp2 = read_machclk();
    724 		cycles = delay_diff(tstamp2, tstamp1);
    725 		if (cycles > jif->wc_cycles_dequeue)
    726 			jif->wc_cycles_dequeue = cycles;
    727 		if (cycles < jif->bc_cycles_dequeue)
    728 			jif->bc_cycles_dequeue = cycles;
    729 
    730 		jif->avg_cycles_dequeue += cycles;
    731 		jif->avg_cycles2_dequeue += cycles * cycles;
    732 
    733 		return (jobs_pollq(cl));
    734 	}
    735 
    736 	if (cl != NULL)
    737 		m = jobs_getq(cl);
    738 	else
    739 		m = NULL;
    740 
    741 	if (m != NULL) {
    742 		IFQ_DEC_LEN(ifq);
    743 		if (qempty(cl->cl_q))
    744 			cl->cl_period++;
    745 
    746 		cl->cl_lastdel = (u_int64_t)delay_diff(now,
    747 		    tslist_first(cl->arv_tm)->timestamp);
    748 		if (cl->concerned_adc
    749 		    && (int64_t)cl->cl_lastdel > cl->cl_adc)
    750 			cl->adc_violations++;
    751 		cl->cl_avgdel  += ticks_to_secs(GRANULARITY*cl->cl_lastdel);
    752 
    753 		PKTCNTR_ADD(&cl->cl_rout, m_pktlen(m));
    754 		PKTCNTR_ADD(&cl->st_rout, m_pktlen(m));
    755 	}
    756 	if (cl != NULL)
    757 		tslist_dequeue(cl);		/* dequeue the timestamp */
    758 
    759 	tstamp2 = read_machclk();
    760 	cycles = delay_diff(tstamp2, tstamp1);
    761 	if (cycles > jif->wc_cycles_dequeue)
    762 		jif->wc_cycles_dequeue = cycles;
    763 	if (cycles < jif->bc_cycles_dequeue)
    764 		jif->bc_cycles_dequeue = cycles;
    765 
    766 	jif->avg_cycles_dequeue += cycles;
    767 	jif->avg_cycles2_dequeue += cycles * cycles;
    768 
    769 	return (m);
    770 }
    771 
    772 static int
    773 jobs_addq(struct jobs_class *cl, struct mbuf *m, struct jobs_if *jif)
    774 {
    775 	int victim;
    776 	u_int64_t len;
    777 	u_int64_t now;
    778 	struct jobs_class* victim_class;
    779 
    780 	victim = -1;
    781 	victim_class = NULL;
    782 	len = 0;
    783 
    784 	now = read_machclk();
    785 
    786 	if (jif->jif_separate && qlen(cl->cl_q) >= jif->jif_qlimit) {
    787 		/*
    788 		 * separate buffers: no guarantees on packet drops
    789 		 * can be offered
    790 		 * thus we drop the incoming packet
    791 		 */
    792 		len = (u_int64_t)m_pktlen(m);
    793 		PKTCNTR_ADD(&cl->cl_dropcnt, (int)len);
    794 		PKTCNTR_SUB(&cl->cl_rin, (int)len);
    795 		PKTCNTR_ADD(&cl->st_dropcnt, (int)len);
    796 		PKTCNTR_SUB(&cl->st_rin, (int)len);
    797 		cl->current_loss += (len << SCALE_LOSS)
    798 		    /cl->cl_arrival.bytes;
    799 		m_freem(m);
    800 		return (-1);
    801 
    802 	} else if (!jif->jif_separate
    803 		   && jif->jif_ifq->ifq_len >= jif->jif_qlimit) {
    804 		/* shared buffer: supports guarantees on losses */
    805 		if (!cl->concerned_rlc) {
    806 			if (!cl->concerned_alc) {
    807 				/*
    808 				 * no ALC, no RLC on this class:
    809 				 * drop the incoming packet
    810 				 */
    811 				len = (u_int64_t)m_pktlen(m);
    812 				PKTCNTR_ADD(&cl->cl_dropcnt, (int)len);
    813 				PKTCNTR_SUB(&cl->cl_rin, (int)len);
    814 				PKTCNTR_ADD(&cl->st_dropcnt, (int)len);
    815 				PKTCNTR_SUB(&cl->st_rin, (int)len);
    816 				cl->current_loss += (len << SCALE_LOSS)/cl->cl_arrival.bytes;
    817 				m_freem(m);
    818 				return (-1);
    819 			} else {
    820 				/*
    821 				 * no RLC, but an ALC:
    822 				 * drop the incoming packet if possible
    823 				 */
    824 				len = (u_int64_t)m_pktlen(m);
    825 				if (cl->current_loss + (len << SCALE_LOSS)
    826 				    / cl->cl_arrival.bytes <= cl->cl_alc) {
    827 					PKTCNTR_ADD(&cl->cl_dropcnt, (int)len);
    828 					PKTCNTR_SUB(&cl->cl_rin, (int)len);
    829 					PKTCNTR_ADD(&cl->st_dropcnt, (int)len);
    830 					PKTCNTR_SUB(&cl->st_rin, (int)len);
    831 					cl->current_loss += (len << SCALE_LOSS)/cl->cl_arrival.bytes;
    832 					m_freem(m);
    833 					return (-1);
    834 				} else {
    835 					/*
    836 					 * the ALC would be violated:
    837 					 * pick another class
    838 					 */
    839 					_addq(cl->cl_q, m);
    840 					tslist_enqueue(cl, now);
    841 
    842 					victim = pick_dropped_rlc(jif);
    843 
    844 					if (victim == -1) {
    845 						/*
    846 						 * something went wrong
    847 						 * let us discard
    848 						 * the incoming packet,
    849 						 * regardless of what
    850 						 * may happen...
    851 						 */
    852 						victim_class = cl;
    853 					} else
    854 						victim_class = jif->jif_classes[victim];
    855 
    856 					if (victim_class != NULL) {
    857 						/*
    858 						 * test for safety
    859 						 * purposes...
    860 						 * it must be true
    861 						 */
    862 						m = _getq_tail(victim_class->cl_q);
    863 						len = (u_int64_t)m_pktlen(m);
    864 						PKTCNTR_ADD(&victim_class->cl_dropcnt, (int)len);
    865 						PKTCNTR_SUB(&victim_class->cl_rin, (int)len);
    866 						PKTCNTR_ADD(&victim_class->st_dropcnt, (int)len);
    867 						PKTCNTR_SUB(&victim_class->st_rin, (int)len);
    868 						victim_class->current_loss += (len << SCALE_LOSS)/victim_class->cl_arrival.bytes;
    869 						m_freem(m); /* the packet is trashed here */
    870 						tslist_drop(victim_class); /* and its timestamp as well */
    871 					}
    872 					return (-1);
    873 				}
    874 			}
    875 		} else {
    876 			/*
    877 			 * RLC on that class:
    878 			 * pick class according to RLCs
    879 			 */
    880 			_addq(cl->cl_q, m);
    881 			tslist_enqueue(cl, now);
    882 
    883 			victim = pick_dropped_rlc(jif);
    884 			if (victim == -1) {
    885 				/*
    886 				 * something went wrong
    887 				 * let us discard the incoming packet,
    888 				 * regardless of what may happen...
    889 				 */
    890 				victim_class = cl;
    891 			} else
    892 				victim_class = jif->jif_classes[victim];
    893 
    894 			if (victim_class != NULL) {
    895 				/*
    896 				 * test for safety purposes...
    897 				 * it must be true
    898 				 */
    899 				m = _getq_tail(victim_class->cl_q);
    900 				len = (u_int64_t)m_pktlen(m);
    901 				PKTCNTR_ADD(&victim_class->cl_dropcnt, (int)len);
    902 				PKTCNTR_SUB(&victim_class->cl_rin, (int)len);
    903 				PKTCNTR_ADD(&victim_class->st_dropcnt, (int)len);
    904 				PKTCNTR_SUB(&victim_class->st_rin, (int)len);
    905 				victim_class->current_loss += (len << SCALE_LOSS)/victim_class->cl_arrival.bytes;
    906 				m_freem(m); /* the packet is trashed here */
    907 				tslist_drop(victim_class); /* and its timestamp as well */
    908 			}
    909 			return (-1);
    910 		}
    911 	}
    912 	/* else: no drop */
    913 
    914 	_addq(cl->cl_q, m);
    915 	tslist_enqueue(cl, now);
    916 
    917 	return (0);
    918 }
    919 
    920 static struct mbuf *
    921 jobs_getq(struct jobs_class *cl)
    922 {
    923 	return _getq(cl->cl_q);
    924 }
    925 
    926 static struct mbuf *
    927 jobs_pollq(struct jobs_class *cl)
    928 {
    929 	return qhead(cl->cl_q);
    930 }
    931 
    932 static void
    933 jobs_purgeq(struct jobs_class *cl)
    934 {
    935 	struct mbuf *m;
    936 
    937 	if (qempty(cl->cl_q))
    938 		return;
    939 
    940 	while ((m = _getq(cl->cl_q)) != NULL) {
    941 		PKTCNTR_ADD(&cl->cl_dropcnt, m_pktlen(m));
    942 		PKTCNTR_ADD(&cl->st_dropcnt, m_pktlen(m));
    943 		m_freem(m);
    944 		tslist_drop(cl);
    945 	}
    946 	ASSERT(qlen(cl->cl_q) == 0);
    947 }
    948 
    949 /*
    950  * timestamp list support routines
    951  *
    952  * this implementation has been revamped and
    953  * now uses a TAILQ structure.
    954  * timestamp list holds class timestamps
    955  * there is one timestamp list per class.
    956  */
    957 static TSLIST *
    958 tslist_alloc(void)
    959 {
    960 	TSLIST *list_init;
    961 
    962 	list_init = malloc(sizeof(TSLIST), M_DEVBUF, M_WAITOK);
    963 	TAILQ_INIT(list_init);
    964 	return (list_init);
    965 }
    966 
    967 static void
    968 tslist_destroy(struct jobs_class *cl)
    969 {
    970 	while (tslist_first(cl->arv_tm) != NULL)
    971 		tslist_dequeue(cl);
    972 
    973 	free(cl->arv_tm, M_DEVBUF);
    974 }
    975 
    976 static int
    977 tslist_enqueue(struct jobs_class *cl, u_int64_t arv)
    978 {
    979 	TSENTRY *pushed;
    980 	pushed = malloc(sizeof(TSENTRY), M_DEVBUF, M_WAITOK);
    981 	if (pushed == NULL)
    982 		return (0);
    983 
    984 	pushed->timestamp = arv;
    985 	TAILQ_INSERT_TAIL(cl->arv_tm, pushed, ts_list);
    986 	return (1);
    987 }
    988 
    989 static void
    990 tslist_dequeue(struct jobs_class *cl)
    991 {
    992 	TSENTRY *popped;
    993 	popped = tslist_first(cl->arv_tm);
    994 	if (popped != NULL) {
    995 		  TAILQ_REMOVE(cl->arv_tm, popped, ts_list);
    996 		  free(popped, M_DEVBUF);
    997 	}
    998 	return;
    999 }
   1000 
   1001 static void
   1002 tslist_drop(struct jobs_class *cl)
   1003 {
   1004 	TSENTRY *popped;
   1005 	popped = tslist_last(cl->arv_tm);
   1006 	if (popped != NULL) {
   1007 		  TAILQ_REMOVE(cl->arv_tm, popped, ts_list);
   1008 		  free(popped, M_DEVBUF);
   1009 	}
   1010 	return;
   1011 }
   1012 
   1013 /*
   1014  * rate allocation support routines
   1015  */
   1016 /*
   1017  * enforce_wc: enforce that backlogged classes have non-zero
   1018  * service rate, and that non-backlogged classes have zero
   1019  * service rate.
   1020  */
   1021 
   1022 static int
   1023 enforce_wc(struct jobs_if *jif)
   1024 {
   1025 	struct jobs_class *cl;
   1026 
   1027 	int64_t active_classes;
   1028 	int pri;
   1029 	int is_backlogged, class_exists, updated;
   1030 
   1031 	updated = 0;
   1032 	active_classes = 0;
   1033 
   1034 	for (pri = 0; pri <= jif->jif_maxpri; pri++) {
   1035 		cl = jif->jif_classes[pri];
   1036 		class_exists = (cl != NULL);
   1037 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1038 
   1039 		if (is_backlogged)
   1040 			active_classes++;
   1041 		if ((is_backlogged && cl->service_rate <= 0)
   1042 		    ||(class_exists
   1043 		       && !is_backlogged && cl->service_rate > 0))
   1044 			updated = 1;
   1045 	}
   1046 
   1047 	if (updated) {
   1048 		for (pri = 0; pri <= jif->jif_maxpri; pri++) {
   1049 			cl = jif->jif_classes[pri];
   1050 			class_exists = (cl != NULL);
   1051 			is_backlogged = (class_exists && !qempty(cl->cl_q));
   1052 
   1053 			if (class_exists && !is_backlogged)
   1054 				cl->service_rate = 0;
   1055 			else if (is_backlogged)
   1056 				cl->service_rate = (int64_t)(bps_to_internal((u_int64_t)jif->jif_bandwidth)/active_classes);
   1057 		}
   1058 	}
   1059 
   1060 	return (updated);
   1061 }
   1062 
   1063 /*
   1064  * adjust_rates_rdc: compute the service rates adjustments
   1065  * needed to realize the desired proportional delay differentiation.
   1066  * essentially, the rate adjustement delta_rate = prop_control*error,
   1067  * where error is the difference between the measured "weighted"
   1068  * delay and the mean of the weighted delays. see paper for more
   1069  * information.
   1070  * prop_control has slightly changed since the INFOCOM paper,
   1071  * this condition seems to provide better results.
   1072  */
   1073 
   1074 static int64_t *
   1075 adjust_rates_rdc(struct jobs_if *jif)
   1076 {
   1077 	int64_t *result;
   1078 	int64_t credit, available, lower_bound, upper_bound;
   1079 	int64_t bk;
   1080 	int i, j;
   1081 	int rdc_classes, active_classes;
   1082 	int class_exists, is_backlogged;
   1083 	struct jobs_class *cl;
   1084 	int64_t *error;
   1085 	int64_t prop_control;
   1086 	u_int64_t max_prod;
   1087 	u_int64_t min_share;
   1088 	u_int64_t max_avg_pkt_size;
   1089 
   1090 	/*
   1091 	 * min_share is scaled
   1092 	 * to avoid dealing with doubles
   1093 	 */
   1094 	active_classes = 0;
   1095 	rdc_classes = 0;
   1096 	max_prod = 0;
   1097 	max_avg_pkt_size = 0;
   1098 
   1099 	upper_bound = (int64_t)jif->jif_bandwidth;
   1100 
   1101 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1102 		cl = jif->jif_classes[i];
   1103 		class_exists = (cl != NULL);
   1104 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1105 		if (is_backlogged) {
   1106 			active_classes++;
   1107 			if (cl->concerned_rdc)
   1108 				rdc_classes++;
   1109 			else
   1110 				upper_bound -=
   1111 				    internal_to_bps(cl->service_rate);
   1112 		}
   1113 	}
   1114 
   1115 	result = malloc((jif->jif_maxpri+1)*sizeof(int64_t),
   1116 	    M_DEVBUF, M_WAITOK);
   1117 
   1118 	if (result == NULL)
   1119 		return NULL;
   1120 
   1121 	for (i = 0; i <= jif->jif_maxpri; i++)
   1122 		result[i] = 0;
   1123 
   1124 	if (upper_bound <= 0 || rdc_classes == 0)
   1125 		return result;
   1126 
   1127 	credit = 0;
   1128 	lower_bound = 0;
   1129 	min_share = ((u_int64_t)1 << SCALE_SHARE);
   1130 	bk = 0;
   1131 
   1132 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1133 		cl = jif->jif_classes[i];
   1134 		class_exists = (cl != NULL);
   1135 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1136 		if (is_backlogged && cl->concerned_rdc)
   1137 			bk += cl->cl_rin.bytes;
   1138 	}
   1139 
   1140 	if (bk == 0)
   1141 		return (result);
   1142 
   1143 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1144 		cl = jif->jif_classes[i];
   1145 		class_exists = (cl != NULL);
   1146 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1147 		if (is_backlogged
   1148 		    && (cl->cl_rin.bytes << SCALE_SHARE)/bk < min_share)
   1149 			min_share = (cl->cl_rin.bytes << SCALE_SHARE)/bk;
   1150 		if (is_backlogged && cl->concerned_rdc
   1151 		    && cl->delay_prod_others > max_prod)
   1152 			max_prod = cl->delay_prod_others;
   1153 
   1154 		if (is_backlogged && cl->concerned_rdc
   1155 		    && cl->cl_rin.bytes > max_avg_pkt_size*cl->cl_rin.packets)
   1156 			max_avg_pkt_size = (u_int64_t)((u_int)cl->cl_rin.bytes/(u_int)cl->cl_rin.packets);
   1157 	}
   1158 
   1159 	error = update_error(jif);
   1160 	if (!error)
   1161 		goto fail;
   1162 
   1163 	prop_control = (upper_bound*upper_bound*min_share)
   1164 	    /(max_prod*(max_avg_pkt_size << 2));
   1165 
   1166 	prop_control = bps_to_internal(ticks_to_secs(prop_control)); /* in BT-1 */
   1167 
   1168 	credit = 0;
   1169 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1170 		cl = jif->jif_classes[i];
   1171 		class_exists = (cl != NULL);
   1172 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1173 		if (is_backlogged && cl->concerned_rdc) {
   1174 			result[i] = -prop_control*error[i]; /* in BT-1 */
   1175 			result[i] >>= (SCALE_SHARE);
   1176 		}
   1177 	}
   1178 
   1179 	free(error, M_DEVBUF); /* we don't need these anymore */
   1180 
   1181 	/* saturation */
   1182 
   1183 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1184 		cl = jif->jif_classes[i];
   1185 		class_exists = (cl != NULL);
   1186 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1187 
   1188 		if (is_backlogged && cl->concerned_rdc)
   1189 			lower_bound += cl->min_rate_adc;
   1190 		/*
   1191 		 * note: if there's no ADC or ARC on cl,
   1192 		 * this is equal to zero, which is fine
   1193 		 */
   1194 	}
   1195 
   1196 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1197 		cl = jif->jif_classes[i];
   1198 		class_exists = (cl != NULL);
   1199 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1200 
   1201 		if (is_backlogged && cl->concerned_rdc
   1202 		    && result[i] + cl->service_rate > upper_bound) {
   1203 			for (j = 0; j <= jif->jif_maxpri; j++) {
   1204 				cl = jif->jif_classes[j];
   1205 				class_exists = (cl != NULL);
   1206 				is_backlogged = (class_exists
   1207 						 && !qempty(cl->cl_q));
   1208 				if (is_backlogged && cl->concerned_rdc) {
   1209 					if (j == i)
   1210 						result[j] = upper_bound
   1211 						    -cl->service_rate
   1212 						    + cl->min_rate_adc
   1213 						    - lower_bound;
   1214 					else
   1215 						result[j] =
   1216 						    -cl->service_rate
   1217 						    +cl->min_rate_adc;
   1218 				}
   1219 			}
   1220 			return result;
   1221 		}
   1222 
   1223 		cl = jif->jif_classes[i];
   1224 		/* do this again since it may have been modified */
   1225 		class_exists = (cl != NULL);
   1226 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1227 
   1228 		if (is_backlogged && cl->concerned_rdc
   1229 		    && result[i] + cl->service_rate < cl->min_rate_adc) {
   1230 			credit += cl->service_rate+result[i]
   1231 			    -cl->min_rate_adc;
   1232 			/* "credit" is in fact a negative number */
   1233 			result[i] = -cl->service_rate+cl->min_rate_adc;
   1234 		}
   1235 	}
   1236 
   1237 	for (i = jif->jif_maxpri; (i >= 0 && credit < 0); i--) {
   1238 		cl = jif->jif_classes[i];
   1239 		class_exists = (cl != NULL);
   1240 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1241 
   1242 		if (is_backlogged && cl->concerned_rdc) {
   1243 			available = result[i]
   1244 			    + cl->service_rate-cl->min_rate_adc;
   1245 			if (available >= -credit) {
   1246 				result[i] += credit;
   1247 				credit = 0;
   1248 			} else {
   1249 				result[i] -= available;
   1250 				credit += available;
   1251 			}
   1252 		}
   1253 	}
   1254 	return result;
   1255 
   1256 fail:	free(result, M_DEVBUF);
   1257 	return NULL;
   1258 }
   1259 
   1260 /*
   1261  * assign_rate_drops_adc: returns the adjustment needed to
   1262  * the service rates to meet the absolute delay/rate constraints
   1263  * (delay/throughput bounds) and drops traffic if need be.
   1264  * see tech. report UVA/T.R. CS-2000-24/CS-2001-21 for more info.
   1265  */
   1266 
   1267 static int64_t *
   1268 assign_rate_drops_adc(struct jobs_if *jif)
   1269 {
   1270 	int64_t *result;
   1271 	int class_exists, is_backlogged;
   1272 	struct jobs_class *cl;
   1273 
   1274 	int64_t *c, *n, *k;
   1275 	int64_t *available;
   1276 
   1277 	int lowest, highest;
   1278 	int keep_going;
   1279 	int i;
   1280 	u_int64_t now, oldest_arv;
   1281 	int64_t	remaining_time;
   1282 	struct mbuf* pkt;
   1283 	u_int64_t len;
   1284 
   1285 	now = read_machclk();
   1286 	oldest_arv = now;
   1287 
   1288 	result = malloc((jif->jif_maxpri+1)*sizeof(int64_t), M_DEVBUF, M_WAITOK);
   1289 	if (result == NULL)
   1290 		goto fail0;
   1291 	c = malloc((jif->jif_maxpri+1)*sizeof(u_int64_t), M_DEVBUF, M_WAITOK);
   1292 	if (c == NULL)
   1293 		goto fail1;
   1294 	n = malloc((jif->jif_maxpri+1)*sizeof(u_int64_t), M_DEVBUF, M_WAITOK);
   1295 	if (n == NULL)
   1296 		goto fail2;
   1297 	k = malloc((jif->jif_maxpri+1)*sizeof(u_int64_t), M_DEVBUF, M_WAITOK);
   1298 	if (k == NULL)
   1299 		goto fail3;
   1300 	available = malloc((jif->jif_maxpri+1)*sizeof(int64_t), M_DEVBUF, M_WAITOK);
   1301 	if (available == NULL)
   1302 		goto fail4;
   1303 
   1304 	for (i = 0; i <= jif->jif_maxpri; i++)
   1305 		result[i] = 0;
   1306 
   1307 	keep_going = 1;
   1308 
   1309 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1310 		cl = jif->jif_classes[i];
   1311 		class_exists = (cl != NULL);
   1312 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1313 
   1314 		if (is_backlogged) {
   1315 			if (cl->concerned_adc) {
   1316 				/*
   1317 				 * get the arrival time of the oldest
   1318 				 * class-i packet
   1319 				 */
   1320 				if (tslist_first(cl->arv_tm) == NULL)
   1321 					oldest_arv = now; /* NOTREACHED */
   1322 				else
   1323 					oldest_arv = (tslist_first(cl->arv_tm))->timestamp;
   1324 
   1325 				n[i] = cl->service_rate;
   1326 				k[i] = scale_rate((int64_t)(cl->cl_rin.bytes - cl->cl_rout.bytes));
   1327 
   1328 				remaining_time = cl->cl_adc
   1329 				    - (int64_t)delay_diff(now, oldest_arv);
   1330 				if (remaining_time > 0) {
   1331 					c[i] = remaining_time;
   1332 					/*
   1333 					 * c is the remaining time before
   1334 					 * the deadline is violated
   1335 					 * (in ticks)
   1336 					 */
   1337 					available[i] = n[i]-k[i]/c[i];
   1338 				} else {
   1339 					/*
   1340 					 * deadline has passed...
   1341 					 * we allocate the whole link
   1342 					 * capacity to hopefully
   1343 					 * solve the problem
   1344 					 */
   1345 					c[i] = 0;
   1346 					available[i] = -((int64_t)bps_to_internal((u_int64_t)jif->jif_bandwidth));
   1347 				}
   1348 				if (cl->concerned_arc) {
   1349 					/*
   1350 					 * there's an ARC in addition
   1351 					 * to the ADC
   1352 					 */
   1353 					if (n[i] - cl->cl_arc < available[i])
   1354 						available[i] = n[i]
   1355 						    - cl->cl_arc;
   1356 				}
   1357 			} else if (cl->concerned_arc) {
   1358 				/*
   1359 				 * backlogged, concerned by ARC
   1360 				 * but not by ADC
   1361 				 */
   1362 				n[i] = cl->service_rate;
   1363 				available[i] = n[i] - cl->cl_arc;
   1364 			} else {
   1365 				/*
   1366 				 * backlogged but not concerned by ADC
   1367 				 * or ARC -> can give everything
   1368 				 */
   1369 				n[i] = cl->service_rate;
   1370 				available[i] = n[i];
   1371 			}
   1372 		} else {
   1373 			/* not backlogged */
   1374 			n[i] = 0;
   1375 			k[i] = 0;
   1376 			c[i] = 0;
   1377 			if (class_exists)
   1378 				available[i] = cl->service_rate;
   1379 			else
   1380 				available[i] = 0;
   1381 		}
   1382 	}
   1383 
   1384 	/* step 1: adjust rates (greedy algorithm) */
   1385 
   1386 	highest = 0;
   1387 	lowest  = jif->jif_maxpri;
   1388 
   1389 	while (highest < jif->jif_maxpri+1 && available[highest] >= 0)
   1390 		highest++; /* which is the highest class that needs more service? */
   1391 	while (lowest > 0 && available[lowest] <= 0)
   1392 		lowest--;  /* which is the lowest class that needs less service? */
   1393 
   1394 	while (highest != jif->jif_maxpri+1 && lowest != -1) {
   1395 		/* give the excess service from lowest to highest */
   1396 		if (available[lowest]+available[highest] > 0) {
   1397 			/*
   1398 			 * still some "credit" left
   1399 			 * give all that is needed by "highest"
   1400 			 */
   1401 			n[lowest]  += available[highest];
   1402 			n[highest] -= available[highest];
   1403 			available[lowest]  += available[highest];
   1404 			available[highest] = 0;
   1405 
   1406 			while (highest < jif->jif_maxpri+1
   1407 			       && available[highest] >= 0)
   1408 				highest++;  /* which is the highest class that needs more service now? */
   1409 
   1410 		} else if (available[lowest]+available[highest] == 0) {
   1411 			/* no more credit left but it's fine */
   1412 			n[lowest]  += available[highest];
   1413 			n[highest] -= available[highest];
   1414 			available[highest] = 0;
   1415 			available[lowest]  = 0;
   1416 
   1417 			while (highest < jif->jif_maxpri+1
   1418 			       && available[highest] >= 0)
   1419 				highest++;  /* which is the highest class that needs more service? */
   1420 			while (lowest >= 0 && available[lowest] <= 0)
   1421 				lowest--;   /* which is the lowest class that needs less service? */
   1422 
   1423 		} else if (available[lowest]+available[highest] < 0) {
   1424 			/*
   1425 			 * no more credit left and we need to switch
   1426 			 * to another class
   1427 			 */
   1428 			n[lowest]  -= available[lowest];
   1429 			n[highest] += available[lowest];
   1430 			available[highest] += available[lowest];
   1431 			available[lowest]  = 0;
   1432 
   1433 			while ((lowest >= 0)&&(available[lowest] <= 0))
   1434 				lowest--;  /* which is the lowest class that needs less service? */
   1435 		}
   1436 	}
   1437 
   1438 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1439 		cl = jif->jif_classes[i];
   1440 		class_exists = (cl != NULL);
   1441 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1442 		if (is_backlogged) {
   1443 			result[i] = n[i] - cl->service_rate;
   1444 		} else {
   1445 			if (class_exists)
   1446 				result[i] = - cl->service_rate;
   1447 			else
   1448 				result[i] = 0;
   1449 		}
   1450 	}
   1451 
   1452 	/* step 2: adjust drops (for ADC) */
   1453 
   1454 	if (highest != jif->jif_maxpri+1) {
   1455 		/* some class(es) still need(s) additional service */
   1456 		for (i = 0; i <= jif->jif_maxpri; i++) {
   1457 			cl = jif->jif_classes[i];
   1458 			class_exists = (cl != NULL);
   1459 			is_backlogged = (class_exists
   1460 					 && !qempty(cl->cl_q));
   1461 			if (is_backlogged && available[i] < 0) {
   1462 				if (cl->concerned_adc) {
   1463 					k[i] = c[i]*n[i];
   1464 					while (keep_going && scale_rate((int64_t)(cl->cl_rin.bytes-cl->cl_rout.bytes)) > k[i]) {
   1465 						pkt = qtail(cl->cl_q);
   1466 						if (pkt != NULL) {
   1467 							/* "safeguard" test (a packet SHOULD be in there) */
   1468 							len = (u_int64_t)m_pktlen(pkt);
   1469 							/* access packet at the tail */
   1470 							if (cl->concerned_alc
   1471 							    && cl->current_loss+(len << SCALE_LOSS)/cl->cl_arrival.bytes > cl->cl_alc) {
   1472 								keep_going = 0; /* relax ADC in favor of ALC */
   1473 							} else {
   1474 								/* drop packet at the tail of the class-i queue, update values */
   1475 								pkt = _getq_tail(cl->cl_q);
   1476 								len = (u_int64_t)m_pktlen(pkt);
   1477 								PKTCNTR_ADD(&cl->cl_dropcnt, (int)len);
   1478 								PKTCNTR_SUB(&cl->cl_rin, (int)len);
   1479 								PKTCNTR_ADD(&cl->st_dropcnt, (int)len);
   1480 								PKTCNTR_SUB(&cl->st_rin, (int)len);
   1481 								cl->current_loss += (len << SCALE_LOSS)/cl->cl_arrival.bytes;
   1482 								m_freem(pkt); /* the packet is trashed here */
   1483 								tslist_drop(cl);
   1484 								IFQ_DEC_LEN(cl->cl_jif->jif_ifq);
   1485 							}
   1486 						} else
   1487 							keep_going = 0; /* NOTREACHED */
   1488 					}
   1489 					k[i] = scale_rate((int64_t)(cl->cl_rin.bytes-cl->cl_rout.bytes));
   1490 				}
   1491 				/*
   1492 				 * n[i] is the max rate we can give.
   1493 				 * the above drops as much as possible
   1494 				 * to respect a delay bound.
   1495 				 * for throughput bounds,
   1496 				 * there's nothing that can be done
   1497 				 * after the greedy reallocation.
   1498 				 */
   1499 			}
   1500 		}
   1501 	}
   1502 
   1503 	/* update the values of min_rate_adc */
   1504 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1505 		cl = jif->jif_classes[i];
   1506 		class_exists = (cl != NULL);
   1507 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1508 		if (is_backlogged && cl->concerned_adc) {
   1509 			if (c[i] != 0) {
   1510 				if (cl->concerned_adc
   1511 				    && !cl->concerned_arc)
   1512 					cl->min_rate_adc = k[i]/c[i];
   1513 				else
   1514 					cl->min_rate_adc = n[i];
   1515 			} else
   1516 				cl->min_rate_adc = (int64_t)bps_to_internal((u_int64_t)jif->jif_bandwidth);
   1517 		} else if (is_backlogged && cl->concerned_arc)
   1518 			cl->min_rate_adc = n[i]; /* the best we can give */
   1519 		else {
   1520 			if (class_exists)
   1521 				cl->min_rate_adc = 0;
   1522 		}
   1523 	}
   1524 
   1525 	free(c, M_DEVBUF);
   1526 	free(n, M_DEVBUF);
   1527 	free(k, M_DEVBUF);
   1528 	free(available, M_DEVBUF);
   1529 
   1530 	return (result);
   1531 
   1532 fail5: __unused
   1533 	free(available, M_DEVBUF);
   1534 fail4:	free(k, M_DEVBUF);
   1535 fail3:	free(n, M_DEVBUF);
   1536 fail2:	free(c, M_DEVBUF);
   1537 fail1:	free(result, M_DEVBUF);
   1538 fail0:	return NULL;
   1539 }
   1540 
   1541 /*
   1542  * update_error: returns the difference between the mean weighted
   1543  * delay and the weighted delay for each class. if proportional
   1544  * delay differentiation is perfectly achieved, it should return
   1545  * zero for each class.
   1546  */
   1547 static int64_t *
   1548 update_error(struct jobs_if *jif)
   1549 {
   1550 	int i;
   1551 	int active_classes, backlogged_classes;
   1552 	u_int64_t mean_weighted_delay;
   1553 	u_int64_t delays[JOBS_MAXPRI];
   1554 	int64_t* error;
   1555 	int class_exists, is_backlogged;
   1556 	struct jobs_class *cl;
   1557 
   1558 	error = malloc(sizeof(int64_t)*(jif->jif_maxpri+1), M_DEVBUF,
   1559 	    M_WAITOK|M_ZERO);
   1560 
   1561 	if (error == NULL)
   1562 		return NULL;
   1563 
   1564 	mean_weighted_delay = 0;
   1565 	active_classes = 0;
   1566 	backlogged_classes = 0;
   1567 
   1568 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1569 		cl = jif->jif_classes[i];
   1570 		class_exists = (cl != NULL);
   1571 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1572 
   1573 		if (is_backlogged) {
   1574 			backlogged_classes++;
   1575 			if (cl->concerned_rdc) {
   1576 				delays[i] = proj_delay(jif, i);
   1577 				mean_weighted_delay += cl->delay_prod_others*delays[i];
   1578 				active_classes ++;
   1579 			}
   1580 		}
   1581 	}
   1582 
   1583 	if (active_classes == 0)
   1584 		return error;
   1585 	else
   1586 		mean_weighted_delay /= active_classes;
   1587 
   1588 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1589 		cl = jif->jif_classes[i];
   1590 		class_exists = (cl != NULL);
   1591 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1592 
   1593 		if (is_backlogged && cl->concerned_rdc)
   1594 			error[i] = ((int64_t)mean_weighted_delay)-((int64_t)cl->delay_prod_others*delays[i]);
   1595 		else
   1596 			error[i] = 0; /*
   1597 				       * either the class isn't concerned,
   1598 				       * or it's not backlogged.
   1599 				       * in any case, the rate shouldn't
   1600 				       * be adjusted.
   1601 				       */
   1602 	}
   1603 	return error;
   1604 }
   1605 
   1606 /*
   1607  * min_rates_adc: computes the minimum service rates needed in
   1608  * each class to meet the absolute delay bounds. if, for any
   1609  * class i, the current service rate of class i is less than
   1610  * the computed minimum service rate, this function returns
   1611  * false, true otherwise.
   1612  */
   1613 static int
   1614 min_rates_adc(struct jobs_if *jif)
   1615 {
   1616 	int result;
   1617 	int i;
   1618 	int class_exists, is_backlogged;
   1619 	int64_t remaining_time;
   1620 	struct jobs_class *cl;
   1621 	result = 1;
   1622 
   1623 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1624 		cl = jif->jif_classes[i];
   1625 		class_exists = (cl != NULL);
   1626 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1627 		if (is_backlogged && cl->concerned_adc) {
   1628 			remaining_time = cl->cl_adc - proj_delay(jif, i);
   1629 			if (remaining_time > 0 ) {
   1630 				/* min rate needed for ADC */
   1631 				cl->min_rate_adc = scale_rate((int64_t)(cl->cl_rin.bytes-cl->cl_rout.bytes))/remaining_time;
   1632 				if (cl->concerned_arc
   1633 				    && cl->cl_arc > cl->min_rate_adc) {
   1634 					/* min rate needed for ADC + ARC */
   1635 					cl->min_rate_adc = cl->cl_arc;
   1636 				}
   1637 			} else {
   1638 				/* the deadline has been exceeded: give the whole link capacity to hopefully fix the situation */
   1639 				cl->min_rate_adc = (int64_t)bps_to_internal((u_int64_t)jif->jif_bandwidth);
   1640 			}
   1641       		} else if (is_backlogged && cl->concerned_arc)
   1642 			cl->min_rate_adc = cl->cl_arc; 			/* no ADC, an ARC */
   1643 		else if (class_exists)
   1644 			cl->min_rate_adc = 0;	/*
   1645 						 * either the class is not
   1646 						 * backlogged
   1647 						 * or there is no ADC and
   1648 						 * no ARC
   1649 						 */
   1650 		if (is_backlogged && cl->min_rate_adc > cl->service_rate)
   1651 			result = 0;
   1652 	}
   1653 
   1654 	return result;
   1655 }
   1656 
   1657 /*
   1658  * proj_delay: computes the difference between the current time
   1659  * and the time the oldest class-i packet still in the class-i
   1660  * queue i arrived in the system.
   1661  */
   1662 static int64_t
   1663 proj_delay(struct jobs_if *jif, int i)
   1664 {
   1665 	u_int64_t now;
   1666 	int class_exists, is_backlogged;
   1667 	struct jobs_class *cl;
   1668 
   1669 	now = read_machclk();
   1670 	cl = jif->jif_classes[i];
   1671 	class_exists = (cl != NULL);
   1672 	is_backlogged = (class_exists && !qempty(cl->cl_q));
   1673 
   1674 	if (is_backlogged)
   1675 		return ((int64_t)delay_diff(now, tslist_first(cl->arv_tm)->timestamp));
   1676 
   1677 	return (0); /* NOTREACHED */
   1678 }
   1679 
   1680 /*
   1681  * pick_dropped_rlc: returns the class index of the class to be
   1682  * dropped for meeting the relative loss constraints.
   1683  */
   1684 static int
   1685 pick_dropped_rlc(struct jobs_if *jif)
   1686 {
   1687 	int64_t mean;
   1688 	int64_t* loss_error;
   1689 	int i, active_classes, backlogged_classes;
   1690 	int class_exists, is_backlogged;
   1691 	int class_dropped;
   1692 	int64_t max_error;
   1693 	int64_t max_alc;
   1694 	struct mbuf* pkt;
   1695 	struct jobs_class *cl;
   1696 	u_int64_t len;
   1697 
   1698 	loss_error = malloc(sizeof(int64_t)*(jif->jif_maxpri+1),
   1699 	    M_DEVBUF, M_WAITOK);
   1700 
   1701 	if (loss_error == NULL)
   1702 		return -1;
   1703 
   1704 	class_dropped = -1;
   1705 	max_error = 0;
   1706 	mean = 0;
   1707 	active_classes = 0;
   1708 	backlogged_classes = 0;
   1709 
   1710 	for (i = 0; i <= jif->jif_maxpri; i++) {
   1711 		cl = jif->jif_classes[i];
   1712 		class_exists = (cl != NULL);
   1713 		is_backlogged = (class_exists && !qempty(cl->cl_q));
   1714 		if (is_backlogged) {
   1715 			backlogged_classes ++;
   1716 			if (cl->concerned_rlc) {
   1717 				mean += cl->loss_prod_others
   1718 				    * cl->current_loss;
   1719 				active_classes++;
   1720 			}
   1721 		}
   1722 	}
   1723 
   1724 	if (active_classes > 0)
   1725 		mean /= active_classes;
   1726 
   1727 	if (active_classes == 0)
   1728 		class_dropped = JOBS_MAXPRI+1; /*
   1729 						* no classes are concerned
   1730 						* by RLCs (JOBS_MAXPRI+1
   1731 						* means "ignore RLC" here)
   1732 						*/
   1733 	else {
   1734 		for (i = 0; i <= jif->jif_maxpri; i++) {
   1735 			cl = jif->jif_classes[i];
   1736 			class_exists = (cl != NULL);
   1737 			is_backlogged = (class_exists
   1738 					 && !qempty(cl->cl_q));
   1739 
   1740 			if ((is_backlogged)&&(cl->cl_rlc))
   1741 				loss_error[i]=cl->loss_prod_others
   1742 				    *cl->current_loss-mean;
   1743 			else
   1744 				loss_error[i] = ALTQ_INFINITY;
   1745 		}
   1746 
   1747 		for (i = 0; i <= jif->jif_maxpri; i++) {
   1748 			cl = jif->jif_classes[i];
   1749 			class_exists = (cl != NULL);
   1750 			is_backlogged = (class_exists
   1751 					 && !qempty(cl->cl_q));
   1752 			if (is_backlogged && loss_error[i] <= max_error) {
   1753 				/*
   1754 				 * find out which class is the most
   1755 				 * below the mean.
   1756 				 * it's the one that needs to be dropped
   1757 				 * ties are broken in favor of the higher
   1758 				 * priority classes (i.e., if two classes
   1759 				 * present the same deviation, the lower
   1760 				 * priority class will get dropped).
   1761 				 */
   1762 				max_error = loss_error[i];
   1763 				class_dropped = i;
   1764 			}
   1765 		}
   1766 
   1767 		if (class_dropped != -1) {
   1768 			cl = jif->jif_classes[class_dropped];
   1769 			pkt = qtail(cl->cl_q);
   1770 			if (pkt != NULL) {
   1771 				/*
   1772 				 * "safeguard" test (a packet SHOULD be
   1773 				 * in there)
   1774 				 */
   1775 				len = (u_int64_t)m_pktlen(pkt);
   1776 				/* access packet at the tail */
   1777 				if (cl->current_loss+(len << SCALE_LOSS)/cl->cl_arrival.bytes > cl->cl_alc) {
   1778 					/*
   1779 					 * the class to drop for meeting
   1780 					 * the RLC will defeat the ALC:
   1781 					 * ignore RLC.
   1782 					 */
   1783 					class_dropped = JOBS_MAXPRI+1;
   1784 				}
   1785 			} else
   1786 				class_dropped = JOBS_MAXPRI+1; /* NOTREACHED */
   1787 		} else
   1788 			class_dropped = JOBS_MAXPRI+1;
   1789 	}
   1790 
   1791 	if (class_dropped == JOBS_MAXPRI+1) {
   1792 		max_alc = -((int64_t)1 << SCALE_LOSS);
   1793 		for (i = jif->jif_maxpri; i >= 0; i--) {
   1794 			cl = jif->jif_classes[i];
   1795 			class_exists = (cl != NULL);
   1796 			is_backlogged = (class_exists
   1797 					 && !qempty(cl->cl_q));
   1798 			if (is_backlogged) {
   1799 				if (cl->concerned_alc && cl->cl_alc - cl->current_loss > max_alc) {
   1800 					max_alc = cl->cl_alc-cl->current_loss; /* pick the class which is the furthest from its ALC */
   1801 					class_dropped = i;
   1802 				} else if (!cl->concerned_alc && ((int64_t) 1 << SCALE_LOSS)-cl->current_loss > max_alc) {
   1803 					max_alc = ((int64_t) 1 << SCALE_LOSS)-cl->current_loss;
   1804 					class_dropped = i;
   1805 				}
   1806 			}
   1807 		}
   1808 	}
   1809 
   1810 	free(loss_error, M_DEVBUF);
   1811 	return (class_dropped);
   1812 }
   1813 
   1814 /*
   1815  * ALTQ binding/setup functions
   1816  */
   1817 /*
   1818  * jobs device interface
   1819  */
   1820 int
   1821 jobsopen(dev_t dev, int flag, int fmt,
   1822     struct lwp *l)
   1823 {
   1824 	if (machclk_freq == 0)
   1825 		init_machclk();
   1826 
   1827 	if (machclk_freq == 0) {
   1828 		printf("jobs: no CPU clock available!\n");
   1829 		return (ENXIO);
   1830 	}
   1831 	/* everything will be done when the queueing scheme is attached. */
   1832 	return 0;
   1833 }
   1834 
   1835 int
   1836 jobsclose(dev_t dev, int flag, int fmt,
   1837     struct lwp *l)
   1838 {
   1839 	struct jobs_if *jif;
   1840 	int err, error = 0;
   1841 
   1842 	while ((jif = jif_list) != NULL) {
   1843 		/* destroy all */
   1844 		if (ALTQ_IS_ENABLED(jif->jif_ifq))
   1845 			altq_disable(jif->jif_ifq);
   1846 
   1847 		err = altq_detach(jif->jif_ifq);
   1848 		if (err == 0)
   1849 			err = jobs_detach(jif);
   1850 		if (err != 0 && error == 0)
   1851 			error = err;
   1852 	}
   1853 
   1854 	return error;
   1855 }
   1856 
   1857 int
   1858 jobsioctl(dev_t dev, ioctlcmd_t cmd, void *addr, int flag,
   1859     struct lwp *l)
   1860 {
   1861 	struct jobs_if *jif;
   1862 	struct jobs_interface *ifacep;
   1863 	struct proc *p = l->l_proc;
   1864 	int	error = 0;
   1865 
   1866 	/* check super-user privilege */
   1867 	switch (cmd) {
   1868 	case JOBS_GETSTATS:
   1869 		break;
   1870 	default:
   1871 #if (__FreeBSD_version > 400000)
   1872 		if ((error = suser(p)) != 0)
   1873 			return (error);
   1874 #else
   1875 		if ((error = kauth_authorize_network(p->p_cred,
   1876 		    KAUTH_NETWORK_ALTQ, KAUTH_REQ_NETWORK_ALTQ_JOBS, NULL,
   1877 		    NULL, NULL)) != 0)
   1878 			return (error);
   1879 #endif
   1880 		break;
   1881 	}
   1882 
   1883 	switch (cmd) {
   1884 
   1885 	case JOBS_IF_ATTACH:
   1886 		error = jobscmd_if_attach((struct jobs_attach *)addr);
   1887 		break;
   1888 
   1889 	case JOBS_IF_DETACH:
   1890 		error = jobscmd_if_detach((struct jobs_interface *)addr);
   1891 		break;
   1892 
   1893 	case JOBS_ENABLE:
   1894 	case JOBS_DISABLE:
   1895 	case JOBS_CLEAR:
   1896 		ifacep = (struct jobs_interface *)addr;
   1897 		if ((jif = altq_lookup(ifacep->jobs_ifname,
   1898 				       ALTQT_JOBS)) == NULL) {
   1899 			error = EBADF;
   1900 			break;
   1901 		}
   1902 
   1903 		switch (cmd) {
   1904 		case JOBS_ENABLE:
   1905 			if (jif->jif_default == NULL) {
   1906 #if 1
   1907 				printf("jobs: no default class\n");
   1908 #endif
   1909 				error = EINVAL;
   1910 				break;
   1911 			}
   1912 			error = altq_enable(jif->jif_ifq);
   1913 			break;
   1914 
   1915 		case JOBS_DISABLE:
   1916 			error = altq_disable(jif->jif_ifq);
   1917 			break;
   1918 
   1919 		case JOBS_CLEAR:
   1920 			jobs_clear_interface(jif);
   1921 			break;
   1922 		}
   1923 		break;
   1924 
   1925 		case JOBS_ADD_CLASS:
   1926 			error = jobscmd_add_class((struct jobs_add_class *)addr);
   1927 			break;
   1928 
   1929 	case JOBS_DEL_CLASS:
   1930 		error = jobscmd_delete_class((struct jobs_delete_class *)addr);
   1931 		break;
   1932 
   1933 	case JOBS_MOD_CLASS:
   1934 		error = jobscmd_modify_class((struct jobs_modify_class *)addr);
   1935 		break;
   1936 
   1937 	case JOBS_ADD_FILTER:
   1938 		error = jobscmd_add_filter((struct jobs_add_filter *)addr);
   1939 		break;
   1940 
   1941 	case JOBS_DEL_FILTER:
   1942 		error = jobscmd_delete_filter((struct jobs_delete_filter *)addr);
   1943 		break;
   1944 
   1945 	case JOBS_GETSTATS:
   1946 		error = jobscmd_class_stats((struct jobs_class_stats *)addr);
   1947 		break;
   1948 
   1949 	default:
   1950 		error = EINVAL;
   1951 		break;
   1952 	}
   1953 	return error;
   1954 }
   1955 
   1956 static int
   1957 jobscmd_if_attach(struct jobs_attach *ap)
   1958 {
   1959 	struct jobs_if *jif;
   1960 	struct ifnet *ifp;
   1961 	int error;
   1962 
   1963 	if ((ifp = ifunit(ap->iface.jobs_ifname)) == NULL)
   1964 		return (ENXIO);
   1965 	if ((jif = jobs_attach(&ifp->if_snd, ap->bandwidth, ap->qlimit, ap->separate)) == NULL)
   1966 		return (ENOMEM);
   1967 
   1968 	/*
   1969 	 * set JOBS to this ifnet structure.
   1970 	 */
   1971 	if ((error = altq_attach(&ifp->if_snd, ALTQT_JOBS, jif,
   1972 				 jobs_enqueue, jobs_dequeue, jobs_request,
   1973 				 &jif->jif_classifier, acc_classify)) != 0)
   1974 		(void)jobs_detach(jif);
   1975 
   1976 	return (error);
   1977 }
   1978 
   1979 static int
   1980 jobscmd_if_detach(struct jobs_interface *ap)
   1981 {
   1982 	struct jobs_if *jif;
   1983 	int error;
   1984 
   1985 	if ((jif = altq_lookup(ap->jobs_ifname, ALTQT_JOBS)) == NULL)
   1986 		return (EBADF);
   1987 
   1988 	if (ALTQ_IS_ENABLED(jif->jif_ifq))
   1989 		altq_disable(jif->jif_ifq);
   1990 
   1991 	if ((error = altq_detach(jif->jif_ifq)))
   1992 		return (error);
   1993 
   1994 	return jobs_detach(jif);
   1995 }
   1996 
   1997 static int
   1998 jobscmd_add_class(struct jobs_add_class *ap)
   1999 {
   2000 	struct jobs_if *jif;
   2001 	struct jobs_class *cl;
   2002 
   2003 	if ((jif = altq_lookup(ap->iface.jobs_ifname, ALTQT_JOBS)) == NULL)
   2004 		return (EBADF);
   2005 
   2006 	if (ap->pri < 0 || ap->pri >= JOBS_MAXPRI)
   2007 		return (EINVAL);
   2008 
   2009 	if ((cl = jobs_class_create(jif, ap->pri,
   2010 				    ap->cl_adc, ap->cl_rdc,
   2011 				    ap->cl_alc, ap->cl_rlc, ap-> cl_arc,
   2012 				    ap->flags)) == NULL)
   2013 		return (ENOMEM);
   2014 
   2015 	/* return a class handle to the user */
   2016 	ap->class_handle = clp_to_clh(cl);
   2017 	return (0);
   2018 }
   2019 
   2020 static int
   2021 jobscmd_delete_class(struct jobs_delete_class *ap)
   2022 {
   2023 	struct jobs_if *jif;
   2024 	struct jobs_class *cl;
   2025 
   2026 	if ((jif = altq_lookup(ap->iface.jobs_ifname, ALTQT_JOBS)) == NULL)
   2027 		return (EBADF);
   2028 
   2029 	if ((cl = clh_to_clp(jif, ap->class_handle)) == NULL)
   2030 		return (EINVAL);
   2031 
   2032 	return jobs_class_destroy(cl);
   2033 }
   2034 
   2035 static int
   2036 jobscmd_modify_class(struct jobs_modify_class *ap)
   2037 {
   2038 	struct jobs_if *jif;
   2039 	struct jobs_class *cl;
   2040 
   2041 	if ((jif = altq_lookup(ap->iface.jobs_ifname, ALTQT_JOBS)) == NULL)
   2042 		return (EBADF);
   2043 
   2044 	if (ap->pri < 0 || ap->pri >= JOBS_MAXPRI)
   2045 		return (EINVAL);
   2046 
   2047 	if ((cl = clh_to_clp(jif, ap->class_handle)) == NULL)
   2048 		return (EINVAL);
   2049 
   2050 	/*
   2051 	 * if priority is changed, move the class to the new priority
   2052 	 */
   2053 	if (jif->jif_classes[ap->pri] != cl) {
   2054 		if (jif->jif_classes[ap->pri] != NULL)
   2055 			return (EEXIST);
   2056 		jif->jif_classes[cl->cl_pri] = NULL;
   2057 		jif->jif_classes[ap->pri] = cl;
   2058 		cl->cl_pri = ap->pri;
   2059 	}
   2060 
   2061 	/* call jobs_class_create to change class parameters */
   2062 	if ((cl = jobs_class_create(jif, ap->pri,
   2063 				    ap->cl_adc, ap->cl_rdc,
   2064 				    ap->cl_alc, ap->cl_rlc, ap->cl_arc,
   2065 				    ap->flags)) == NULL)
   2066 		return (ENOMEM);
   2067 	return 0;
   2068 }
   2069 
   2070 static int
   2071 jobscmd_add_filter(struct jobs_add_filter *ap)
   2072 {
   2073 	struct jobs_if *jif;
   2074 	struct jobs_class *cl;
   2075 
   2076 	if ((jif = altq_lookup(ap->iface.jobs_ifname, ALTQT_JOBS)) == NULL)
   2077 		return (EBADF);
   2078 
   2079 	if ((cl = clh_to_clp(jif, ap->class_handle)) == NULL)
   2080 		return (EINVAL);
   2081 
   2082 	return acc_add_filter(&jif->jif_classifier, &ap->filter,
   2083 			      cl, &ap->filter_handle);
   2084 }
   2085 
   2086 static int
   2087 jobscmd_delete_filter(struct jobs_delete_filter *ap)
   2088 {
   2089 	struct jobs_if *jif;
   2090 
   2091 	if ((jif = altq_lookup(ap->iface.jobs_ifname, ALTQT_JOBS)) == NULL)
   2092 		return (EBADF);
   2093 
   2094 	return acc_delete_filter(&jif->jif_classifier, ap->filter_handle);
   2095 }
   2096 
   2097 static int
   2098 jobscmd_class_stats(struct jobs_class_stats *ap)
   2099 {
   2100 	struct jobs_if *jif;
   2101 	struct jobs_class *cl;
   2102 	struct class_stats stats, *usp;
   2103 	int pri, error;
   2104 
   2105 	if ((jif = altq_lookup(ap->iface.jobs_ifname, ALTQT_JOBS)) == NULL)
   2106 		return (EBADF);
   2107 
   2108 	ap->maxpri = jif->jif_maxpri;
   2109 
   2110 	/* then, read the next N classes */
   2111 	usp = ap->stats;
   2112 	for (pri = 0; pri <= jif->jif_maxpri; pri++) {
   2113 		cl = jif->jif_classes[pri];
   2114 		if (cl != NULL)
   2115 			get_class_stats(&stats, cl);
   2116 		else
   2117 			(void)memset(&stats, 0, sizeof(stats));
   2118 		if ((error = copyout((void *)&stats, (void *)usp++,
   2119 				     sizeof(stats))) != 0)
   2120 			return (error);
   2121 	}
   2122 	return (0);
   2123 }
   2124 
   2125 static void
   2126 get_class_stats(struct class_stats *sp, struct jobs_class *cl)
   2127 {
   2128 	u_int64_t now;
   2129 	now = read_machclk();
   2130 
   2131 	sp->class_handle = clp_to_clh(cl);
   2132 	sp->qlength = qlen(cl->cl_q);
   2133 
   2134 	sp->period = cl->cl_period;
   2135 	sp->rin = cl->st_rin;
   2136 	sp->arrival = cl->st_arrival;
   2137 	sp->arrivalbusy = cl->cl_arrival;
   2138 	sp->rout = cl->st_rout;
   2139 	sp->dropcnt = cl->cl_dropcnt;
   2140 
   2141 	/*  PKTCNTR_RESET(&cl->st_arrival);*/
   2142 	PKTCNTR_RESET(&cl->st_rin);
   2143 	PKTCNTR_RESET(&cl->st_rout);
   2144 
   2145 	sp->totallength = cl->cl_jif->jif_ifq->ifq_len;
   2146 	sp->lastdel = ticks_to_secs(GRANULARITY*cl->cl_lastdel);
   2147 	sp->avgdel = cl->cl_avgdel;
   2148 
   2149 	cl->cl_avgdel = 0;
   2150 
   2151 	sp->busylength = ticks_to_secs(1000*delay_diff(now, cl->idletime));
   2152 	sp->adc_violations = cl->adc_violations;
   2153 
   2154 	sp->wc_cycles_enqueue = cl->cl_jif->wc_cycles_enqueue;
   2155 	sp->wc_cycles_dequeue = cl->cl_jif->wc_cycles_dequeue;
   2156 	sp->bc_cycles_enqueue = cl->cl_jif->bc_cycles_enqueue;
   2157 	sp->bc_cycles_dequeue = cl->cl_jif->bc_cycles_dequeue;
   2158 	sp->avg_cycles_enqueue = cl->cl_jif->avg_cycles_enqueue;
   2159 	sp->avg_cycles_dequeue = cl->cl_jif->avg_cycles_dequeue;
   2160 	sp->avg_cycles2_enqueue = cl->cl_jif->avg_cycles2_enqueue;
   2161 	sp->avg_cycles2_dequeue = cl->cl_jif->avg_cycles2_dequeue;
   2162 	sp->total_enqueued = cl->cl_jif->total_enqueued;
   2163 	sp->total_dequeued = cl->cl_jif->total_dequeued;
   2164 }
   2165 
   2166 /* convert a class handle to the corresponding class pointer */
   2167 static struct jobs_class *
   2168 clh_to_clp(struct jobs_if *jif, u_long chandle)
   2169 {
   2170 	struct jobs_class *cl;
   2171 
   2172 	cl = (struct jobs_class *)chandle;
   2173 	if (chandle != ALIGN(cl)) {
   2174 #if 1
   2175 		printf("clh_to_cl: unaligned pointer %p\n", cl);
   2176 #endif
   2177 		return (NULL);
   2178 	}
   2179 
   2180 	if (cl == NULL || cl->cl_handle != chandle || cl->cl_jif != jif)
   2181 		return (NULL);
   2182 	return (cl);
   2183 }
   2184 
   2185 /* convert a class pointer to the corresponding class handle */
   2186 static u_long
   2187 clp_to_clh(struct jobs_class *cl)
   2188 {
   2189 	return (cl->cl_handle);
   2190 }
   2191 
   2192 #ifdef KLD_MODULE
   2193 
   2194 static struct altqsw jobs_sw =
   2195 	{"jobs", jobsopen, jobsclose, jobsioctl};
   2196 
   2197 ALTQ_MODULE(altq_jobs, ALTQT_JOBS, &jobs_sw);
   2198 
   2199 #endif /* KLD_MODULE */
   2200 
   2201 #endif /* ALTQ3_COMPAT */
   2202 #endif /* ALTQ_JOBS */
   2203