1 /* $NetBSD: altq_jobs.c,v 1.15 2025/08/18 20:59:56 andvar Exp $ */ 2 /* $KAME: altq_jobs.c,v 1.11 2005/04/13 03:44:25 suz Exp $ */ 3 /* 4 * Copyright (c) 2001, the Rector and Board of Visitors of the 5 * University of Virginia. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, 9 * with or without modification, are permitted provided 10 * that the following conditions are met: 11 * 12 * Redistributions of source code must retain the above 13 * copyright notice, this list of conditions and the following 14 * disclaimer. 15 * 16 * Redistributions in binary form must reproduce the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer in the documentation and/or other materials provided 19 * with the distribution. 20 * 21 * Neither the name of the University of Virginia nor the names 22 * of its contributors may be used to endorse or promote products 23 * derived from this software without specific prior written 24 * permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND 27 * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, 28 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 29 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 30 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE 31 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, 32 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 33 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 37 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 38 * THE POSSIBILITY OF SUCH DAMAGE. 39 */ 40 /* 41 * JoBS - altq prototype implementation 42 * 43 * Author: Nicolas Christin <nicolas (at) cs.virginia.edu> 44 * 45 * JoBS algorithms originally devised and proposed by 46 * Nicolas Christin and Jorg Liebeherr. 47 * Grateful acknowledgments to Tarek Abdelzaher for his help and 48 * comments, and to Kenjiro Cho for some helpful advice. 49 * Contributed by the Multimedia Networks Group at the University 50 * of Virginia. 51 * 52 * Papers and additional info can be found at 53 * http://qosbox.cs.virginia.edu 54 * 55 */ 56 57 /* 58 * JoBS queue 59 */ 60 61 #include <sys/cdefs.h> 62 __KERNEL_RCSID(0, "$NetBSD: altq_jobs.c,v 1.15 2025/08/18 20:59:56 andvar Exp $"); 63 64 #ifdef _KERNEL_OPT 65 #include "opt_altq.h" 66 #include "opt_inet.h" 67 #endif 68 69 #ifdef ALTQ_JOBS /* jobs is enabled by ALTQ_JOBS option in opt_altq.h */ 70 71 #include <sys/param.h> 72 #include <sys/malloc.h> 73 #include <sys/mbuf.h> 74 #include <sys/socket.h> 75 #include <sys/sockio.h> 76 #include <sys/systm.h> 77 #include <sys/proc.h> 78 #include <sys/errno.h> 79 #include <sys/kernel.h> 80 #include <sys/queue.h> 81 #include <sys/kauth.h> 82 83 #ifdef __FreeBSD__ 84 #include <sys/limits.h> 85 #endif 86 87 #include <net/if.h> 88 #include <net/if_types.h> 89 90 #include <altq/altq.h> 91 #include <altq/altq_conf.h> 92 #include <altq/altq_jobs.h> 93 94 #ifdef ALTQ3_COMPAT 95 /* 96 * function prototypes 97 */ 98 static struct jobs_if *jobs_attach(struct ifaltq *, u_int, u_int, u_int); 99 static void jobs_detach(struct jobs_if *); 100 static int jobs_clear_interface(struct jobs_if *); 101 static int jobs_request(struct ifaltq *, int, void *); 102 static void jobs_purge(struct jobs_if *); 103 static struct jobs_class *jobs_class_create(struct jobs_if *, 104 int, int64_t, int64_t, int64_t, int64_t, int64_t, int); 105 static int jobs_class_destroy(struct jobs_class *); 106 static int jobs_enqueue(struct ifaltq *, struct mbuf *); 107 static struct mbuf *jobs_dequeue(struct ifaltq *, int); 108 109 static int jobs_addq(struct jobs_class *, struct mbuf *, struct jobs_if*); 110 static struct mbuf *jobs_getq(struct jobs_class *); 111 static struct mbuf *jobs_pollq(struct jobs_class *); 112 static void jobs_purgeq(struct jobs_class *); 113 114 static int jobscmd_if_attach(struct jobs_attach *); 115 static int jobscmd_if_detach(struct jobs_interface *); 116 static int jobscmd_add_class(struct jobs_add_class *); 117 static int jobscmd_delete_class(struct jobs_delete_class *); 118 static int jobscmd_modify_class(struct jobs_modify_class *); 119 static int jobscmd_add_filter(struct jobs_add_filter *); 120 static int jobscmd_delete_filter(struct jobs_delete_filter *); 121 static int jobscmd_class_stats(struct jobs_class_stats *); 122 static void get_class_stats(struct class_stats *, struct jobs_class *); 123 static struct jobs_class *clh_to_clp(struct jobs_if *, u_long); 124 static u_long clp_to_clh(struct jobs_class *); 125 126 static TSLIST *tslist_alloc(void); 127 static void tslist_destroy(struct jobs_class *); 128 static int tslist_enqueue(struct jobs_class *, u_int64_t); 129 static void tslist_dequeue(struct jobs_class *); 130 static void tslist_drop(struct jobs_class *); 131 132 static int enforce_wc(struct jobs_if *); 133 static int64_t* adjust_rates_rdc(struct jobs_if *); 134 static int64_t* assign_rate_drops_adc(struct jobs_if *); 135 static int64_t* update_error(struct jobs_if *); 136 static int min_rates_adc(struct jobs_if *); 137 static int64_t proj_delay(struct jobs_if *, int); 138 static int pick_dropped_rlc(struct jobs_if *); 139 140 altqdev_decl(jobs); 141 142 /* jif_list keeps all jobs_if's allocated. */ 143 static struct jobs_if *jif_list = NULL; 144 145 typedef unsigned long long ull; 146 147 /* setup functions */ 148 149 static struct jobs_if * 150 jobs_attach(struct ifaltq *ifq, u_int bandwidth, u_int qlimit, u_int separate) 151 { 152 struct jobs_if *jif; 153 154 jif = malloc(sizeof(struct jobs_if), M_DEVBUF, M_WAITOK|M_ZERO); 155 if (jif == NULL) 156 return NULL; 157 158 jif->jif_bandwidth = bandwidth; 159 jif->jif_qlimit = qlimit; 160 jif->jif_separate = separate; 161 #ifdef ALTQ_DEBUG 162 printf("JoBS bandwidth = %d bps\n", (int)bandwidth); 163 printf("JoBS buffer size = %d pkts [%s]\n", 164 (int)qlimit, separate?"separate buffers":"shared buffer"); 165 #endif 166 jif->jif_maxpri = -1; 167 jif->jif_ifq = ifq; 168 169 jif->wc_cycles_enqueue = 0; 170 jif->avg_cycles_enqueue = 0; 171 jif->avg_cycles2_enqueue = 0; 172 jif->bc_cycles_enqueue = ALTQ_INFINITY; 173 jif->wc_cycles_dequeue = 0; 174 jif->avg_cycles_dequeue = 0; 175 jif->avg_cycles2_dequeue = 0; 176 jif->bc_cycles_dequeue = ALTQ_INFINITY; 177 jif->total_enqueued = 0; 178 jif->total_dequeued = 0; 179 180 /* add this state to the jobs list */ 181 jif->jif_next = jif_list; 182 jif_list = jif; 183 184 return jif; 185 } 186 187 static void 188 jobs_detach(struct jobs_if *jif) 189 { 190 (void)jobs_clear_interface(jif); 191 192 /* remove this interface from the jif list */ 193 if (jif_list == jif) 194 jif_list = jif->jif_next; 195 else { 196 struct jobs_if *p; 197 198 for (p = jif_list; p != NULL; p = p->jif_next) 199 if (p->jif_next == jif) { 200 p->jif_next = jif->jif_next; 201 break; 202 } 203 ASSERT(p != NULL); 204 } 205 free(jif, M_DEVBUF); 206 } 207 208 /* 209 * bring the interface back to the initial state by discarding 210 * all the filters and classes. 211 */ 212 static int 213 jobs_clear_interface(struct jobs_if *jif) 214 { 215 struct jobs_class *cl; 216 int pri; 217 218 /* free the filters for this interface */ 219 acc_discard_filters(&jif->jif_classifier, NULL, 1); 220 221 /* clear out the classes */ 222 for (pri = 0; pri <= jif->jif_maxpri; pri++) 223 if ((cl = jif->jif_classes[pri]) != NULL) 224 jobs_class_destroy(cl); 225 226 return 0; 227 } 228 229 static int 230 jobs_request(struct ifaltq *ifq, int req, void *arg) 231 { 232 struct jobs_if *jif = (struct jobs_if *)ifq->altq_disc; 233 234 switch (req) { 235 case ALTRQ_PURGE: 236 jobs_purge(jif); 237 break; 238 } 239 return 0; 240 } 241 242 /* discard all the queued packets on the interface */ 243 static void 244 jobs_purge(struct jobs_if *jif) 245 { 246 struct jobs_class *cl; 247 int pri; 248 249 for (pri = 0; pri <= jif->jif_maxpri; pri++) { 250 if ((cl = jif->jif_classes[pri]) != NULL && !qempty(cl->cl_q)) 251 jobs_purgeq(cl); 252 } 253 if (ALTQ_IS_ENABLED(jif->jif_ifq)) 254 jif->jif_ifq->ifq_len = 0; 255 } 256 257 static struct jobs_class * 258 jobs_class_create(struct jobs_if *jif, int pri, int64_t adc, int64_t rdc, 259 int64_t alc, int64_t rlc, int64_t arc, int flags) 260 { 261 struct jobs_class *cl, *scan1, *scan2; 262 int s; 263 int class_exists1, class_exists2; 264 int i, j; 265 int64_t tmp[JOBS_MAXPRI]; 266 u_int64_t now; 267 268 if ((cl = jif->jif_classes[pri]) != NULL) { 269 /* modify the class instead of creating a new one */ 270 s = splnet(); 271 if (!qempty(cl->cl_q)) 272 jobs_purgeq(cl); 273 splx(s); 274 } else { 275 cl = malloc(sizeof(struct jobs_class), M_DEVBUF, 276 M_WAITOK|M_ZERO); 277 if (cl == NULL) 278 return NULL; 279 280 cl->cl_q = malloc(sizeof(class_queue_t), M_DEVBUF, 281 M_WAITOK|M_ZERO); 282 if (cl->cl_q == NULL) 283 goto err_ret; 284 285 cl->arv_tm = tslist_alloc(); 286 if (cl->arv_tm == NULL) 287 goto err_ret; 288 } 289 290 jif->jif_classes[pri] = cl; 291 292 if (flags & JOCF_DEFAULTCLASS) 293 jif->jif_default = cl; 294 295 qtype(cl->cl_q) = Q_DROPTAIL; 296 qlen(cl->cl_q) = 0; 297 cl->service_rate = 0; 298 cl->min_rate_adc = 0; 299 cl->current_loss = 0; 300 cl->cl_period = 0; 301 PKTCNTR_RESET(&cl->cl_arrival); 302 PKTCNTR_RESET(&cl->cl_rin); 303 PKTCNTR_RESET(&cl->cl_rout); 304 PKTCNTR_RESET(&cl->cl_rout_th); 305 PKTCNTR_RESET(&cl->cl_dropcnt); 306 PKTCNTR_RESET(&cl->st_arrival); 307 PKTCNTR_RESET(&cl->st_rin); 308 PKTCNTR_RESET(&cl->st_rout); 309 PKTCNTR_RESET(&cl->st_dropcnt); 310 cl->st_service_rate = 0; 311 cl->cl_lastdel = 0; 312 cl->cl_avgdel = 0; 313 cl->adc_violations = 0; 314 315 if (adc == -1) { 316 cl->concerned_adc = 0; 317 adc = ALTQ_INFINITY; 318 } else 319 cl->concerned_adc = 1; 320 321 if (alc == -1) { 322 cl->concerned_alc = 0; 323 alc = ALTQ_INFINITY; 324 } else 325 cl->concerned_alc = 1; 326 327 if (rdc == -1) { 328 rdc = 0; 329 cl->concerned_rdc = 0; 330 } else 331 cl->concerned_rdc = 1; 332 333 if (rlc == -1) { 334 rlc = 0; 335 cl->concerned_rlc = 0; 336 } else 337 cl->concerned_rlc = 1; 338 339 if (arc == -1) { 340 arc = 0; 341 cl->concerned_arc = 0; 342 } else 343 cl->concerned_arc = 1; 344 345 cl->cl_rdc=rdc; 346 347 if (cl->concerned_adc) { 348 /* adc is given in us, convert it to clock ticks */ 349 cl->cl_adc = (u_int64_t)(adc*machclk_freq/GRANULARITY); 350 } else 351 cl->cl_adc = adc; 352 353 if (cl->concerned_arc) { 354 /* arc is given in bps, convert it to internal unit */ 355 cl->cl_arc = (u_int64_t)(bps_to_internal(arc)); 356 } else 357 cl->cl_arc = arc; 358 359 cl->cl_rlc=rlc; 360 cl->cl_alc=alc; 361 cl->delay_prod_others = 0; 362 cl->loss_prod_others = 0; 363 cl->cl_flags = flags; 364 cl->cl_pri = pri; 365 if (pri > jif->jif_maxpri) 366 jif->jif_maxpri = pri; 367 cl->cl_jif = jif; 368 cl->cl_handle = (u_long)cl; /* just a pointer to this class */ 369 370 /* 371 * update delay_prod_others and loss_prod_others 372 * in all classes if needed 373 */ 374 375 if (cl->concerned_rdc) { 376 for (i = 0; i <= jif->jif_maxpri; i++) { 377 scan1 = jif->jif_classes[i]; 378 class_exists1 = (scan1 != NULL); 379 if (class_exists1) { 380 tmp[i] = 1; 381 for (j = 0; j <= i-1; j++) { 382 scan2 = jif->jif_classes[j]; 383 class_exists2 = (scan2 != NULL); 384 if (class_exists2 385 && scan2->concerned_rdc) 386 tmp[i] *= scan2->cl_rdc; 387 } 388 } else 389 tmp[i] = 0; 390 } 391 392 for (i = 0; i <= jif->jif_maxpri; i++) { 393 scan1 = jif->jif_classes[i]; 394 class_exists1 = (scan1 != NULL); 395 if (class_exists1) { 396 scan1->delay_prod_others = 1; 397 for (j = 0; j <= jif->jif_maxpri; j++) { 398 scan2 = jif->jif_classes[j]; 399 class_exists2 = (scan2 != NULL); 400 if (class_exists2 && j != i 401 && scan2->concerned_rdc) 402 scan1->delay_prod_others *= tmp[j]; 403 } 404 } 405 } 406 } 407 408 if (cl->concerned_rlc) { 409 for (i = 0; i <= jif->jif_maxpri; i++) { 410 scan1 = jif->jif_classes[i]; 411 class_exists1 = (scan1 != NULL); 412 if (class_exists1) { 413 tmp[i] = 1; 414 for (j = 0; j <= i-1; j++) { 415 scan2 = jif->jif_classes[j]; 416 class_exists2 = (scan2 != NULL); 417 if (class_exists2 418 && scan2->concerned_rlc) 419 tmp[i] *= scan2->cl_rlc; 420 } 421 } else 422 tmp[i] = 0; 423 } 424 425 for (i = 0; i <= jif->jif_maxpri; i++) { 426 scan1 = jif->jif_classes[i]; 427 class_exists1 = (scan1 != NULL); 428 if (class_exists1) { 429 scan1->loss_prod_others = 1; 430 for (j = 0; j <= jif->jif_maxpri; j++) { 431 scan2 = jif->jif_classes[j]; 432 class_exists2 = (scan2 != NULL); 433 if (class_exists2 && j != i 434 && scan2->concerned_rlc) 435 scan1->loss_prod_others *= tmp[j]; 436 } 437 } 438 } 439 } 440 441 now = read_machclk(); 442 cl->idletime = now; 443 return cl; 444 445 err_ret: 446 if (cl->cl_q != NULL) 447 free(cl->cl_q, M_DEVBUF); 448 if (cl->arv_tm != NULL) 449 free(cl->arv_tm, M_DEVBUF); 450 451 free(cl, M_DEVBUF); 452 return NULL; 453 } 454 455 static int 456 jobs_class_destroy(struct jobs_class *cl) 457 { 458 struct jobs_if *jif; 459 int s, pri; 460 461 s = splnet(); 462 463 /* delete filters referencing to this class */ 464 acc_discard_filters(&cl->cl_jif->jif_classifier, cl, 0); 465 466 if (!qempty(cl->cl_q)) 467 jobs_purgeq(cl); 468 469 jif = cl->cl_jif; 470 jif->jif_classes[cl->cl_pri] = NULL; 471 if (jif->jif_maxpri == cl->cl_pri) { 472 for (pri = cl->cl_pri; pri >= 0; pri--) 473 if (jif->jif_classes[pri] != NULL) { 474 jif->jif_maxpri = pri; 475 break; 476 } 477 if (pri < 0) 478 jif->jif_maxpri = -1; 479 } 480 splx(s); 481 482 tslist_destroy(cl); 483 free(cl->cl_q, M_DEVBUF); 484 free(cl, M_DEVBUF); 485 return 0; 486 } 487 488 /* 489 * jobs_enqueue is an enqueue function to be registered to 490 * (*altq_enqueue) in struct ifaltq. 491 */ 492 static int 493 jobs_enqueue(struct ifaltq *ifq, struct mbuf *m) 494 { 495 struct jobs_if *jif = (struct jobs_if *)ifq->altq_disc; 496 struct jobs_class *cl, *scan; 497 int len; 498 int return_flag; 499 int pri; 500 u_int64_t now; 501 u_int64_t old_arv; 502 int64_t* delta_rate; 503 u_int64_t tstamp1, tstamp2, cycles; /* used for benchmarking only */ 504 505 jif->total_enqueued++; 506 now = read_machclk(); 507 tstamp1 = now; 508 509 return_flag = 0; 510 511 /* proceed with packet enqueuing */ 512 513 if (IFQ_IS_EMPTY(ifq)) { 514 for (pri=0; pri <= jif->jif_maxpri; pri++) { 515 scan = jif->jif_classes[pri]; 516 if (scan != NULL) { 517 /* 518 * reset all quantities, except: 519 * average delay, number of violations 520 */ 521 PKTCNTR_RESET(&scan->cl_rin); 522 PKTCNTR_RESET(&scan->cl_rout); 523 PKTCNTR_RESET(&scan->cl_rout_th); 524 PKTCNTR_RESET(&scan->cl_arrival); 525 PKTCNTR_RESET(&scan->cl_dropcnt); 526 scan->cl_lastdel = 0; 527 scan->current_loss = 0; 528 scan->service_rate = 0; 529 scan->idletime = now; 530 scan->cl_last_rate_update = now; 531 } 532 } 533 } 534 535 /* grab class set by classifier */ 536 if ((cl = m->m_pkthdr.pattr_class) == NULL) 537 cl = jif->jif_default; 538 539 len = m_pktlen(m); 540 old_arv = cl->cl_arrival.bytes; 541 PKTCNTR_ADD(&cl->cl_arrival, (int)len); 542 PKTCNTR_ADD(&cl->cl_rin, (int)len); 543 PKTCNTR_ADD(&cl->st_arrival, (int)len); 544 PKTCNTR_ADD(&cl->st_rin, (int)len); 545 546 if (cl->cl_arrival.bytes < old_arv) { 547 /* deals w/ overflow */ 548 for (pri=0; pri <= jif->jif_maxpri; pri++) { 549 scan = jif->jif_classes[pri]; 550 if (scan != NULL) { 551 /* 552 * reset all quantities, except: 553 * average delay, number of violations 554 */ 555 PKTCNTR_RESET(&scan->cl_rin); 556 PKTCNTR_RESET(&scan->cl_rout); 557 PKTCNTR_RESET(&scan->cl_rout_th); 558 PKTCNTR_RESET(&scan->cl_arrival); 559 PKTCNTR_RESET(&scan->cl_dropcnt); 560 scan->current_loss = 0; 561 scan->service_rate = 0; 562 scan->idletime = now; 563 scan->cl_last_rate_update = now; 564 } 565 } 566 PKTCNTR_ADD(&cl->cl_arrival, (int)len); 567 PKTCNTR_ADD(&cl->cl_rin, (int)len); 568 } 569 570 if (cl->cl_arrival.bytes > cl->cl_rin.bytes) 571 cl->current_loss = 572 ((cl->cl_arrival.bytes - cl->cl_rin.bytes) << SCALE_LOSS) 573 / cl->cl_arrival.bytes; 574 else 575 cl->current_loss = 0; 576 577 /* for MDRR: update theoretical value of the output curve */ 578 579 for (pri=0; pri <= jif->jif_maxpri; pri++) { 580 scan = jif->jif_classes[pri]; 581 if (scan != NULL) { 582 if (scan->cl_last_rate_update == scan->idletime 583 || scan->cl_last_rate_update == 0) 584 scan->cl_last_rate_update = now; /* initial case */ 585 else 586 scan->cl_rout_th.bytes += 587 delay_diff(now, scan->cl_last_rate_update) 588 * scan->service_rate; 589 590 /* 591 * we don't really care about packets here 592 * WARNING: rout_th is SCALED 593 * (b/c of the service rate) 594 * for precision, as opposed to rout. 595 */ 596 597 scan->cl_last_rate_update = now; 598 } 599 } 600 601 if (jobs_addq(cl, m, jif) != 0) 602 return_flag = ENOBUFS; /* signals there's a buffer overflow */ 603 else 604 IFQ_INC_LEN(ifq); 605 606 /* successfully queued. */ 607 608 enforce_wc(jif); 609 610 if (!min_rates_adc(jif)) { 611 delta_rate = assign_rate_drops_adc(jif); 612 if (delta_rate != NULL) { 613 for (pri = 0; pri <= jif->jif_maxpri; pri++) 614 if ((cl = jif->jif_classes[pri]) != NULL && 615 !qempty(cl->cl_q)) 616 cl->service_rate += delta_rate[pri]; 617 free(delta_rate, M_DEVBUF); 618 } 619 } 620 621 delta_rate = adjust_rates_rdc(jif); 622 623 if (delta_rate != NULL) { 624 for (pri = 0; pri <= jif->jif_maxpri; pri++) 625 if ((cl = jif->jif_classes[pri]) != NULL && 626 !qempty(cl->cl_q)) 627 cl->service_rate += delta_rate[pri]; 628 free(delta_rate, M_DEVBUF); 629 } 630 631 tstamp2 = read_machclk(); 632 cycles = delay_diff(tstamp2, tstamp1); 633 if (cycles > jif->wc_cycles_enqueue) 634 jif->wc_cycles_enqueue=cycles; 635 if (cycles < jif->bc_cycles_enqueue) 636 jif->bc_cycles_enqueue=cycles; 637 638 jif->avg_cycles_enqueue += cycles; 639 jif->avg_cycles2_enqueue += cycles * cycles; 640 641 return return_flag; 642 } 643 644 /* 645 * jobs_dequeue is a dequeue function to be registered to 646 * (*altq_dequeue) in struct ifaltq. 647 * 648 * note: ALTDQ_POLL returns the next packet without removing the packet 649 * from the queue. ALTDQ_REMOVE is a normal dequeue operation. 650 * ALTDQ_REMOVE must return the same packet if called immediately 651 * after ALTDQ_POLL. 652 */ 653 654 static struct mbuf * 655 jobs_dequeue(struct ifaltq *ifq, int op) 656 { 657 struct jobs_if *jif = (struct jobs_if *)ifq->altq_disc; 658 struct jobs_class *cl; 659 struct mbuf *m; 660 int pri; 661 int svc_class; 662 int64_t max_error; 663 int64_t error; 664 u_int64_t now; 665 u_int64_t tstamp1, tstamp2, cycles; 666 667 jif->total_dequeued++; 668 669 now = read_machclk(); 670 tstamp1 = now; 671 672 if (IFQ_IS_EMPTY(ifq)) { 673 /* no packet in the queue */ 674 for (pri=0; pri <= jif->jif_maxpri; pri++) { 675 cl = jif->jif_classes[pri]; 676 if (cl != NULL) 677 cl->idletime = now; 678 } 679 680 tstamp2 = read_machclk(); 681 cycles = delay_diff(tstamp2, tstamp1); 682 if (cycles > jif->wc_cycles_dequeue) 683 jif->wc_cycles_dequeue = cycles; 684 if (cycles < jif->bc_cycles_dequeue) 685 jif->bc_cycles_dequeue = cycles; 686 687 jif->avg_cycles_dequeue += cycles; 688 jif->avg_cycles2_dequeue += cycles * cycles; 689 690 return NULL; 691 } 692 693 /* 694 * select the class whose actual transmissions are the furthest 695 * from the promised transmissions 696 */ 697 698 max_error = -1; 699 svc_class = -1; 700 701 for (pri=0; pri <= jif->jif_maxpri; pri++) { 702 if (((cl = jif->jif_classes[pri]) != NULL) 703 && !qempty(cl->cl_q)) { 704 error = (int64_t)cl->cl_rout_th.bytes 705 -(int64_t)scale_rate(cl->cl_rout.bytes); 706 if (max_error == -1) { 707 max_error = error; 708 svc_class = pri; 709 } else if (error > max_error) { 710 max_error = error; 711 svc_class = pri; 712 } 713 } 714 } 715 716 if (svc_class != -1) 717 cl = jif->jif_classes[svc_class]; 718 else 719 cl = NULL; 720 721 if (op == ALTDQ_POLL) { 722 tstamp2 = read_machclk(); 723 cycles = delay_diff(tstamp2, tstamp1); 724 if (cycles > jif->wc_cycles_dequeue) 725 jif->wc_cycles_dequeue = cycles; 726 if (cycles < jif->bc_cycles_dequeue) 727 jif->bc_cycles_dequeue = cycles; 728 729 jif->avg_cycles_dequeue += cycles; 730 jif->avg_cycles2_dequeue += cycles * cycles; 731 732 return (jobs_pollq(cl)); 733 } 734 735 if (cl != NULL) 736 m = jobs_getq(cl); 737 else 738 m = NULL; 739 740 if (m != NULL) { 741 IFQ_DEC_LEN(ifq); 742 if (qempty(cl->cl_q)) 743 cl->cl_period++; 744 745 cl->cl_lastdel = (u_int64_t)delay_diff(now, 746 tslist_first(cl->arv_tm)->timestamp); 747 if (cl->concerned_adc 748 && (int64_t)cl->cl_lastdel > cl->cl_adc) 749 cl->adc_violations++; 750 cl->cl_avgdel += ticks_to_secs(GRANULARITY*cl->cl_lastdel); 751 752 PKTCNTR_ADD(&cl->cl_rout, m_pktlen(m)); 753 PKTCNTR_ADD(&cl->st_rout, m_pktlen(m)); 754 } 755 if (cl != NULL) 756 tslist_dequeue(cl); /* dequeue the timestamp */ 757 758 tstamp2 = read_machclk(); 759 cycles = delay_diff(tstamp2, tstamp1); 760 if (cycles > jif->wc_cycles_dequeue) 761 jif->wc_cycles_dequeue = cycles; 762 if (cycles < jif->bc_cycles_dequeue) 763 jif->bc_cycles_dequeue = cycles; 764 765 jif->avg_cycles_dequeue += cycles; 766 jif->avg_cycles2_dequeue += cycles * cycles; 767 768 return m; 769 } 770 771 static int 772 jobs_addq(struct jobs_class *cl, struct mbuf *m, struct jobs_if *jif) 773 { 774 int victim; 775 u_int64_t len; 776 u_int64_t now; 777 struct jobs_class* victim_class; 778 779 victim = -1; 780 victim_class = NULL; 781 len = 0; 782 783 now = read_machclk(); 784 785 if (jif->jif_separate && qlen(cl->cl_q) >= jif->jif_qlimit) { 786 /* 787 * separate buffers: no guarantees on packet drops 788 * can be offered 789 * thus we drop the incoming packet 790 */ 791 len = (u_int64_t)m_pktlen(m); 792 PKTCNTR_ADD(&cl->cl_dropcnt, (int)len); 793 PKTCNTR_SUB(&cl->cl_rin, (int)len); 794 PKTCNTR_ADD(&cl->st_dropcnt, (int)len); 795 PKTCNTR_SUB(&cl->st_rin, (int)len); 796 cl->current_loss += (len << SCALE_LOSS) 797 /cl->cl_arrival.bytes; 798 m_freem(m); 799 return (-1); 800 801 } else if (!jif->jif_separate 802 && jif->jif_ifq->ifq_len >= jif->jif_qlimit) { 803 /* shared buffer: supports guarantees on losses */ 804 if (!cl->concerned_rlc) { 805 if (!cl->concerned_alc) { 806 /* 807 * no ALC, no RLC on this class: 808 * drop the incoming packet 809 */ 810 len = (u_int64_t)m_pktlen(m); 811 PKTCNTR_ADD(&cl->cl_dropcnt, (int)len); 812 PKTCNTR_SUB(&cl->cl_rin, (int)len); 813 PKTCNTR_ADD(&cl->st_dropcnt, (int)len); 814 PKTCNTR_SUB(&cl->st_rin, (int)len); 815 cl->current_loss += (len << SCALE_LOSS)/cl->cl_arrival.bytes; 816 m_freem(m); 817 return (-1); 818 } else { 819 /* 820 * no RLC, but an ALC: 821 * drop the incoming packet if possible 822 */ 823 len = (u_int64_t)m_pktlen(m); 824 if (cl->current_loss + (len << SCALE_LOSS) 825 / cl->cl_arrival.bytes <= cl->cl_alc) { 826 PKTCNTR_ADD(&cl->cl_dropcnt, (int)len); 827 PKTCNTR_SUB(&cl->cl_rin, (int)len); 828 PKTCNTR_ADD(&cl->st_dropcnt, (int)len); 829 PKTCNTR_SUB(&cl->st_rin, (int)len); 830 cl->current_loss += (len << SCALE_LOSS)/cl->cl_arrival.bytes; 831 m_freem(m); 832 return (-1); 833 } else { 834 /* 835 * the ALC would be violated: 836 * pick another class 837 */ 838 _addq(cl->cl_q, m); 839 tslist_enqueue(cl, now); 840 841 victim = pick_dropped_rlc(jif); 842 843 if (victim == -1) { 844 /* 845 * something went wrong 846 * let us discard 847 * the incoming packet, 848 * regardless of what 849 * may happen... 850 */ 851 victim_class = cl; 852 } else 853 victim_class = jif->jif_classes[victim]; 854 855 if (victim_class != NULL) { 856 /* 857 * test for safety 858 * purposes... 859 * it must be true 860 */ 861 m = _getq_tail(victim_class->cl_q); 862 len = (u_int64_t)m_pktlen(m); 863 PKTCNTR_ADD(&victim_class->cl_dropcnt, (int)len); 864 PKTCNTR_SUB(&victim_class->cl_rin, (int)len); 865 PKTCNTR_ADD(&victim_class->st_dropcnt, (int)len); 866 PKTCNTR_SUB(&victim_class->st_rin, (int)len); 867 victim_class->current_loss += (len << SCALE_LOSS)/victim_class->cl_arrival.bytes; 868 m_freem(m); /* the packet is trashed here */ 869 tslist_drop(victim_class); /* and its timestamp as well */ 870 } 871 return (-1); 872 } 873 } 874 } else { 875 /* 876 * RLC on that class: 877 * pick class according to RLCs 878 */ 879 _addq(cl->cl_q, m); 880 tslist_enqueue(cl, now); 881 882 victim = pick_dropped_rlc(jif); 883 if (victim == -1) { 884 /* 885 * something went wrong 886 * let us discard the incoming packet, 887 * regardless of what may happen... 888 */ 889 victim_class = cl; 890 } else 891 victim_class = jif->jif_classes[victim]; 892 893 if (victim_class != NULL) { 894 /* 895 * test for safety purposes... 896 * it must be true 897 */ 898 m = _getq_tail(victim_class->cl_q); 899 len = (u_int64_t)m_pktlen(m); 900 PKTCNTR_ADD(&victim_class->cl_dropcnt, (int)len); 901 PKTCNTR_SUB(&victim_class->cl_rin, (int)len); 902 PKTCNTR_ADD(&victim_class->st_dropcnt, (int)len); 903 PKTCNTR_SUB(&victim_class->st_rin, (int)len); 904 victim_class->current_loss += (len << SCALE_LOSS)/victim_class->cl_arrival.bytes; 905 m_freem(m); /* the packet is trashed here */ 906 tslist_drop(victim_class); /* and its timestamp as well */ 907 } 908 return -1; 909 } 910 } 911 /* else: no drop */ 912 913 _addq(cl->cl_q, m); 914 tslist_enqueue(cl, now); 915 916 return 0; 917 } 918 919 static struct mbuf * 920 jobs_getq(struct jobs_class *cl) 921 { 922 return _getq(cl->cl_q); 923 } 924 925 static struct mbuf * 926 jobs_pollq(struct jobs_class *cl) 927 { 928 return qhead(cl->cl_q); 929 } 930 931 static void 932 jobs_purgeq(struct jobs_class *cl) 933 { 934 struct mbuf *m; 935 936 if (qempty(cl->cl_q)) 937 return; 938 939 while ((m = _getq(cl->cl_q)) != NULL) { 940 PKTCNTR_ADD(&cl->cl_dropcnt, m_pktlen(m)); 941 PKTCNTR_ADD(&cl->st_dropcnt, m_pktlen(m)); 942 m_freem(m); 943 tslist_drop(cl); 944 } 945 ASSERT(qlen(cl->cl_q) == 0); 946 } 947 948 /* 949 * timestamp list support routines 950 * 951 * this implementation has been revamped and 952 * now uses a TAILQ structure. 953 * timestamp list holds class timestamps 954 * there is one timestamp list per class. 955 */ 956 static TSLIST * 957 tslist_alloc(void) 958 { 959 TSLIST *list_init; 960 961 list_init = malloc(sizeof(TSLIST), M_DEVBUF, M_WAITOK); 962 TAILQ_INIT(list_init); 963 return list_init; 964 } 965 966 static void 967 tslist_destroy(struct jobs_class *cl) 968 { 969 while (tslist_first(cl->arv_tm) != NULL) 970 tslist_dequeue(cl); 971 972 free(cl->arv_tm, M_DEVBUF); 973 } 974 975 static int 976 tslist_enqueue(struct jobs_class *cl, u_int64_t arv) 977 { 978 TSENTRY *pushed; 979 pushed = malloc(sizeof(TSENTRY), M_DEVBUF, M_WAITOK); 980 if (pushed == NULL) 981 return 0; 982 983 pushed->timestamp = arv; 984 TAILQ_INSERT_TAIL(cl->arv_tm, pushed, ts_list); 985 return 1; 986 } 987 988 static void 989 tslist_dequeue(struct jobs_class *cl) 990 { 991 TSENTRY *popped; 992 popped = tslist_first(cl->arv_tm); 993 if (popped != NULL) { 994 TAILQ_REMOVE(cl->arv_tm, popped, ts_list); 995 free(popped, M_DEVBUF); 996 } 997 return; 998 } 999 1000 static void 1001 tslist_drop(struct jobs_class *cl) 1002 { 1003 TSENTRY *popped; 1004 popped = tslist_last(cl->arv_tm); 1005 if (popped != NULL) { 1006 TAILQ_REMOVE(cl->arv_tm, popped, ts_list); 1007 free(popped, M_DEVBUF); 1008 } 1009 return; 1010 } 1011 1012 /* 1013 * rate allocation support routines 1014 */ 1015 /* 1016 * enforce_wc: enforce that backlogged classes have non-zero 1017 * service rate, and that non-backlogged classes have zero 1018 * service rate. 1019 */ 1020 1021 static int 1022 enforce_wc(struct jobs_if *jif) 1023 { 1024 struct jobs_class *cl; 1025 1026 int64_t active_classes; 1027 int pri; 1028 int is_backlogged, class_exists, updated; 1029 1030 updated = 0; 1031 active_classes = 0; 1032 1033 for (pri = 0; pri <= jif->jif_maxpri; pri++) { 1034 cl = jif->jif_classes[pri]; 1035 class_exists = (cl != NULL); 1036 is_backlogged = (class_exists && !qempty(cl->cl_q)); 1037 1038 if (is_backlogged) 1039 active_classes++; 1040 if ((is_backlogged && cl->service_rate <= 0) 1041 ||(class_exists 1042 && !is_backlogged && cl->service_rate > 0)) 1043 updated = 1; 1044 } 1045 1046 if (updated) { 1047 for (pri = 0; pri <= jif->jif_maxpri; pri++) { 1048 cl = jif->jif_classes[pri]; 1049 class_exists = (cl != NULL); 1050 is_backlogged = (class_exists && !qempty(cl->cl_q)); 1051 1052 if (class_exists && !is_backlogged) 1053 cl->service_rate = 0; 1054 else if (is_backlogged) 1055 cl->service_rate = (int64_t)(bps_to_internal((u_int64_t)jif->jif_bandwidth)/active_classes); 1056 } 1057 } 1058 1059 return (updated); 1060 } 1061 1062 /* 1063 * adjust_rates_rdc: compute the service rates adjustments 1064 * needed to realize the desired proportional delay differentiation. 1065 * essentially, the rate adjustement delta_rate = prop_control*error, 1066 * where error is the difference between the measured "weighted" 1067 * delay and the mean of the weighted delays. see paper for more 1068 * information. 1069 * prop_control has slightly changed since the INFOCOM paper, 1070 * this condition seems to provide better results. 1071 */ 1072 1073 static int64_t * 1074 adjust_rates_rdc(struct jobs_if *jif) 1075 { 1076 int64_t *result; 1077 int64_t credit, available, lower_bound, upper_bound; 1078 int64_t bk; 1079 int i, j; 1080 int rdc_classes, active_classes; 1081 int class_exists, is_backlogged; 1082 struct jobs_class *cl; 1083 int64_t *error; 1084 int64_t prop_control; 1085 u_int64_t max_prod; 1086 u_int64_t min_share; 1087 u_int64_t max_avg_pkt_size; 1088 1089 /* 1090 * min_share is scaled 1091 * to avoid dealing with doubles 1092 */ 1093 active_classes = 0; 1094 rdc_classes = 0; 1095 max_prod = 0; 1096 max_avg_pkt_size = 0; 1097 1098 upper_bound = (int64_t)jif->jif_bandwidth; 1099 1100 for (i = 0; i <= jif->jif_maxpri; i++) { 1101 cl = jif->jif_classes[i]; 1102 class_exists = (cl != NULL); 1103 is_backlogged = (class_exists && !qempty(cl->cl_q)); 1104 if (is_backlogged) { 1105 active_classes++; 1106 if (cl->concerned_rdc) 1107 rdc_classes++; 1108 else 1109 upper_bound -= 1110 internal_to_bps(cl->service_rate); 1111 } 1112 } 1113 1114 result = malloc((jif->jif_maxpri+1)*sizeof(int64_t), 1115 M_DEVBUF, M_WAITOK); 1116 1117 if (result == NULL) 1118 return NULL; 1119 1120 for (i = 0; i <= jif->jif_maxpri; i++) 1121 result[i] = 0; 1122 1123 if (upper_bound <= 0 || rdc_classes == 0) 1124 return result; 1125 1126 credit = 0; 1127 lower_bound = 0; 1128 min_share = ((u_int64_t)1 << SCALE_SHARE); 1129 bk = 0; 1130 1131 for (i = 0; i <= jif->jif_maxpri; i++) { 1132 cl = jif->jif_classes[i]; 1133 class_exists = (cl != NULL); 1134 is_backlogged = (class_exists && !qempty(cl->cl_q)); 1135 if (is_backlogged && cl->concerned_rdc) 1136 bk += cl->cl_rin.bytes; 1137 } 1138 1139 if (bk == 0) 1140 return result; 1141 1142 for (i = 0; i <= jif->jif_maxpri; i++) { 1143 cl = jif->jif_classes[i]; 1144 class_exists = (cl != NULL); 1145 is_backlogged = (class_exists && !qempty(cl->cl_q)); 1146 if (is_backlogged 1147 && (cl->cl_rin.bytes << SCALE_SHARE)/bk < min_share) 1148 min_share = (cl->cl_rin.bytes << SCALE_SHARE)/bk; 1149 if (is_backlogged && cl->concerned_rdc 1150 && cl->delay_prod_others > max_prod) 1151 max_prod = cl->delay_prod_others; 1152 1153 if (is_backlogged && cl->concerned_rdc 1154 && cl->cl_rin.bytes > max_avg_pkt_size*cl->cl_rin.packets) 1155 max_avg_pkt_size = (u_int64_t)((u_int)cl->cl_rin.bytes/(u_int)cl->cl_rin.packets); 1156 } 1157 1158 error = update_error(jif); 1159 if (!error) 1160 goto fail; 1161 1162 prop_control = (upper_bound*upper_bound*min_share) 1163 /(max_prod*(max_avg_pkt_size << 2)); 1164 1165 prop_control = bps_to_internal(ticks_to_secs(prop_control)); /* in BT-1 */ 1166 1167 credit = 0; 1168 for (i = 0; i <= jif->jif_maxpri; i++) { 1169 cl = jif->jif_classes[i]; 1170 class_exists = (cl != NULL); 1171 is_backlogged = (class_exists && !qempty(cl->cl_q)); 1172 if (is_backlogged && cl->concerned_rdc) { 1173 result[i] = -prop_control*error[i]; /* in BT-1 */ 1174 result[i] >>= (SCALE_SHARE); 1175 } 1176 } 1177 1178 free(error, M_DEVBUF); /* we don't need these anymore */ 1179 1180 /* saturation */ 1181 1182 for (i = 0; i <= jif->jif_maxpri; i++) { 1183 cl = jif->jif_classes[i]; 1184 class_exists = (cl != NULL); 1185 is_backlogged = (class_exists && !qempty(cl->cl_q)); 1186 1187 if (is_backlogged && cl->concerned_rdc) 1188 lower_bound += cl->min_rate_adc; 1189 /* 1190 * note: if there's no ADC or ARC on cl, 1191 * this is equal to zero, which is fine 1192 */ 1193 } 1194 1195 for (i = 0; i <= jif->jif_maxpri; i++) { 1196 cl = jif->jif_classes[i]; 1197 class_exists = (cl != NULL); 1198 is_backlogged = (class_exists && !qempty(cl->cl_q)); 1199 1200 if (is_backlogged && cl->concerned_rdc 1201 && result[i] + cl->service_rate > upper_bound) { 1202 for (j = 0; j <= jif->jif_maxpri; j++) { 1203 cl = jif->jif_classes[j]; 1204 class_exists = (cl != NULL); 1205 is_backlogged = (class_exists 1206 && !qempty(cl->cl_q)); 1207 if (is_backlogged && cl->concerned_rdc) { 1208 if (j == i) 1209 result[j] = upper_bound 1210 -cl->service_rate 1211 + cl->min_rate_adc 1212 - lower_bound; 1213 else 1214 result[j] = 1215 -cl->service_rate 1216 +cl->min_rate_adc; 1217 } 1218 } 1219 return result; 1220 } 1221 1222 cl = jif->jif_classes[i]; 1223 /* do this again since it may have been modified */ 1224 class_exists = (cl != NULL); 1225 is_backlogged = (class_exists && !qempty(cl->cl_q)); 1226 1227 if (is_backlogged && cl->concerned_rdc 1228 && result[i] + cl->service_rate < cl->min_rate_adc) { 1229 credit += cl->service_rate+result[i] 1230 -cl->min_rate_adc; 1231 /* "credit" is in fact a negative number */ 1232 result[i] = -cl->service_rate+cl->min_rate_adc; 1233 } 1234 } 1235 1236 for (i = jif->jif_maxpri; (i >= 0 && credit < 0); i--) { 1237 cl = jif->jif_classes[i]; 1238 class_exists = (cl != NULL); 1239 is_backlogged = (class_exists && !qempty(cl->cl_q)); 1240 1241 if (is_backlogged && cl->concerned_rdc) { 1242 available = result[i] 1243 + cl->service_rate-cl->min_rate_adc; 1244 if (available >= -credit) { 1245 result[i] += credit; 1246 credit = 0; 1247 } else { 1248 result[i] -= available; 1249 credit += available; 1250 } 1251 } 1252 } 1253 return result; 1254 1255 fail: free(result, M_DEVBUF); 1256 return NULL; 1257 } 1258 1259 /* 1260 * assign_rate_drops_adc: returns the adjustment needed to 1261 * the service rates to meet the absolute delay/rate constraints 1262 * (delay/throughput bounds) and drops traffic if need be. 1263 * see tech. report UVA/T.R. CS-2000-24/CS-2001-21 for more info. 1264 */ 1265 1266 static int64_t * 1267 assign_rate_drops_adc(struct jobs_if *jif) 1268 { 1269 int64_t *result; 1270 int class_exists, is_backlogged; 1271 struct jobs_class *cl; 1272 1273 int64_t *c, *n, *k; 1274 int64_t *available; 1275 1276 int lowest, highest; 1277 int keep_going; 1278 int i; 1279 u_int64_t now, oldest_arv; 1280 int64_t remaining_time; 1281 struct mbuf* pkt; 1282 u_int64_t len; 1283 1284 now = read_machclk(); 1285 oldest_arv = now; 1286 1287 result = malloc((jif->jif_maxpri+1)*sizeof(int64_t), M_DEVBUF, M_WAITOK); 1288 if (result == NULL) 1289 goto fail0; 1290 c = malloc((jif->jif_maxpri+1)*sizeof(u_int64_t), M_DEVBUF, M_WAITOK); 1291 if (c == NULL) 1292 goto fail1; 1293 n = malloc((jif->jif_maxpri+1)*sizeof(u_int64_t), M_DEVBUF, M_WAITOK); 1294 if (n == NULL) 1295 goto fail2; 1296 k = malloc((jif->jif_maxpri+1)*sizeof(u_int64_t), M_DEVBUF, M_WAITOK); 1297 if (k == NULL) 1298 goto fail3; 1299 available = malloc((jif->jif_maxpri+1)*sizeof(int64_t), M_DEVBUF, M_WAITOK); 1300 if (available == NULL) 1301 goto fail4; 1302 1303 for (i = 0; i <= jif->jif_maxpri; i++) 1304 result[i] = 0; 1305 1306 keep_going = 1; 1307 1308 for (i = 0; i <= jif->jif_maxpri; i++) { 1309 cl = jif->jif_classes[i]; 1310 class_exists = (cl != NULL); 1311 is_backlogged = (class_exists && !qempty(cl->cl_q)); 1312 1313 if (is_backlogged) { 1314 if (cl->concerned_adc) { 1315 /* 1316 * get the arrival time of the oldest 1317 * class-i packet 1318 */ 1319 if (tslist_first(cl->arv_tm) == NULL) 1320 oldest_arv = now; /* NOTREACHED */ 1321 else 1322 oldest_arv = (tslist_first(cl->arv_tm))->timestamp; 1323 1324 n[i] = cl->service_rate; 1325 k[i] = scale_rate((int64_t)(cl->cl_rin.bytes - cl->cl_rout.bytes)); 1326 1327 remaining_time = cl->cl_adc 1328 - (int64_t)delay_diff(now, oldest_arv); 1329 if (remaining_time > 0) { 1330 c[i] = remaining_time; 1331 /* 1332 * c is the remaining time before 1333 * the deadline is violated 1334 * (in ticks) 1335 */ 1336 available[i] = n[i]-k[i]/c[i]; 1337 } else { 1338 /* 1339 * deadline has passed... 1340 * we allocate the whole link 1341 * capacity to hopefully 1342 * solve the problem 1343 */ 1344 c[i] = 0; 1345 available[i] = -((int64_t)bps_to_internal((u_int64_t)jif->jif_bandwidth)); 1346 } 1347 if (cl->concerned_arc) { 1348 /* 1349 * there's an ARC in addition 1350 * to the ADC 1351 */ 1352 if (n[i] - cl->cl_arc < available[i]) 1353 available[i] = n[i] 1354 - cl->cl_arc; 1355 } 1356 } else if (cl->concerned_arc) { 1357 /* 1358 * backlogged, concerned by ARC 1359 * but not by ADC 1360 */ 1361 n[i] = cl->service_rate; 1362 available[i] = n[i] - cl->cl_arc; 1363 } else { 1364 /* 1365 * backlogged but not concerned by ADC 1366 * or ARC -> can give everything 1367 */ 1368 n[i] = cl->service_rate; 1369 available[i] = n[i]; 1370 } 1371 } else { 1372 /* not backlogged */ 1373 n[i] = 0; 1374 k[i] = 0; 1375 c[i] = 0; 1376 if (class_exists) 1377 available[i] = cl->service_rate; 1378 else 1379 available[i] = 0; 1380 } 1381 } 1382 1383 /* step 1: adjust rates (greedy algorithm) */ 1384 1385 highest = 0; 1386 lowest = jif->jif_maxpri; 1387 1388 while (highest < jif->jif_maxpri+1 && available[highest] >= 0) 1389 highest++; /* which is the highest class that needs more service? */ 1390 while (lowest > 0 && available[lowest] <= 0) 1391 lowest--; /* which is the lowest class that needs less service? */ 1392 1393 while (highest != jif->jif_maxpri+1 && lowest != -1) { 1394 /* give the excess service from lowest to highest */ 1395 if (available[lowest]+available[highest] > 0) { 1396 /* 1397 * still some "credit" left 1398 * give all that is needed by "highest" 1399 */ 1400 n[lowest] += available[highest]; 1401 n[highest] -= available[highest]; 1402 available[lowest] += available[highest]; 1403 available[highest] = 0; 1404 1405 while (highest < jif->jif_maxpri+1 1406 && available[highest] >= 0) 1407 highest++; /* which is the highest class that needs more service now? */ 1408 1409 } else if (available[lowest]+available[highest] == 0) { 1410 /* no more credit left but it's fine */ 1411 n[lowest] += available[highest]; 1412 n[highest] -= available[highest]; 1413 available[highest] = 0; 1414 available[lowest] = 0; 1415 1416 while (highest < jif->jif_maxpri+1 1417 && available[highest] >= 0) 1418 highest++; /* which is the highest class that needs more service? */ 1419 while (lowest >= 0 && available[lowest] <= 0) 1420 lowest--; /* which is the lowest class that needs less service? */ 1421 1422 } else if (available[lowest]+available[highest] < 0) { 1423 /* 1424 * no more credit left and we need to switch 1425 * to another class 1426 */ 1427 n[lowest] -= available[lowest]; 1428 n[highest] += available[lowest]; 1429 available[highest] += available[lowest]; 1430 available[lowest] = 0; 1431 1432 while ((lowest >= 0)&&(available[lowest] <= 0)) 1433 lowest--; /* which is the lowest class that needs less service? */ 1434 } 1435 } 1436 1437 for (i = 0; i <= jif->jif_maxpri; i++) { 1438 cl = jif->jif_classes[i]; 1439 class_exists = (cl != NULL); 1440 is_backlogged = (class_exists && !qempty(cl->cl_q)); 1441 if (is_backlogged) { 1442 result[i] = n[i] - cl->service_rate; 1443 } else { 1444 if (class_exists) 1445 result[i] = - cl->service_rate; 1446 else 1447 result[i] = 0; 1448 } 1449 } 1450 1451 /* step 2: adjust drops (for ADC) */ 1452 1453 if (highest != jif->jif_maxpri+1) { 1454 /* some class(es) still need(s) additional service */ 1455 for (i = 0; i <= jif->jif_maxpri; i++) { 1456 cl = jif->jif_classes[i]; 1457 class_exists = (cl != NULL); 1458 is_backlogged = (class_exists 1459 && !qempty(cl->cl_q)); 1460 if (is_backlogged && available[i] < 0) { 1461 if (cl->concerned_adc) { 1462 k[i] = c[i]*n[i]; 1463 while (keep_going && scale_rate((int64_t)(cl->cl_rin.bytes-cl->cl_rout.bytes)) > k[i]) { 1464 pkt = qtail(cl->cl_q); 1465 if (pkt != NULL) { 1466 /* "safeguard" test (a packet SHOULD be in there) */ 1467 len = (u_int64_t)m_pktlen(pkt); 1468 /* access packet at the tail */ 1469 if (cl->concerned_alc 1470 && cl->current_loss+(len << SCALE_LOSS)/cl->cl_arrival.bytes > cl->cl_alc) { 1471 keep_going = 0; /* relax ADC in favor of ALC */ 1472 } else { 1473 /* drop packet at the tail of the class-i queue, update values */ 1474 pkt = _getq_tail(cl->cl_q); 1475 len = (u_int64_t)m_pktlen(pkt); 1476 PKTCNTR_ADD(&cl->cl_dropcnt, (int)len); 1477 PKTCNTR_SUB(&cl->cl_rin, (int)len); 1478 PKTCNTR_ADD(&cl->st_dropcnt, (int)len); 1479 PKTCNTR_SUB(&cl->st_rin, (int)len); 1480 cl->current_loss += (len << SCALE_LOSS)/cl->cl_arrival.bytes; 1481 m_freem(pkt); /* the packet is trashed here */ 1482 tslist_drop(cl); 1483 IFQ_DEC_LEN(cl->cl_jif->jif_ifq); 1484 } 1485 } else 1486 keep_going = 0; /* NOTREACHED */ 1487 } 1488 k[i] = scale_rate((int64_t)(cl->cl_rin.bytes-cl->cl_rout.bytes)); 1489 } 1490 /* 1491 * n[i] is the max rate we can give. 1492 * the above drops as much as possible 1493 * to respect a delay bound. 1494 * for throughput bounds, 1495 * there's nothing that can be done 1496 * after the greedy reallocation. 1497 */ 1498 } 1499 } 1500 } 1501 1502 /* update the values of min_rate_adc */ 1503 for (i = 0; i <= jif->jif_maxpri; i++) { 1504 cl = jif->jif_classes[i]; 1505 class_exists = (cl != NULL); 1506 is_backlogged = (class_exists && !qempty(cl->cl_q)); 1507 if (is_backlogged && cl->concerned_adc) { 1508 if (c[i] != 0) { 1509 if (cl->concerned_adc 1510 && !cl->concerned_arc) 1511 cl->min_rate_adc = k[i]/c[i]; 1512 else 1513 cl->min_rate_adc = n[i]; 1514 } else 1515 cl->min_rate_adc = (int64_t)bps_to_internal((u_int64_t)jif->jif_bandwidth); 1516 } else if (is_backlogged && cl->concerned_arc) 1517 cl->min_rate_adc = n[i]; /* the best we can give */ 1518 else { 1519 if (class_exists) 1520 cl->min_rate_adc = 0; 1521 } 1522 } 1523 1524 free(c, M_DEVBUF); 1525 free(n, M_DEVBUF); 1526 free(k, M_DEVBUF); 1527 free(available, M_DEVBUF); 1528 1529 return result; 1530 1531 fail5: __unused 1532 free(available, M_DEVBUF); 1533 fail4: free(k, M_DEVBUF); 1534 fail3: free(n, M_DEVBUF); 1535 fail2: free(c, M_DEVBUF); 1536 fail1: free(result, M_DEVBUF); 1537 fail0: return NULL; 1538 } 1539 1540 /* 1541 * update_error: returns the difference between the mean weighted 1542 * delay and the weighted delay for each class. if proportional 1543 * delay differentiation is perfectly achieved, it should return 1544 * zero for each class. 1545 */ 1546 static int64_t * 1547 update_error(struct jobs_if *jif) 1548 { 1549 int i; 1550 int active_classes; 1551 u_int64_t mean_weighted_delay; 1552 u_int64_t delays[JOBS_MAXPRI]; 1553 int64_t* error; 1554 int class_exists, is_backlogged; 1555 struct jobs_class *cl; 1556 1557 error = malloc(sizeof(int64_t)*(jif->jif_maxpri+1), M_DEVBUF, 1558 M_WAITOK|M_ZERO); 1559 1560 if (error == NULL) 1561 return NULL; 1562 1563 mean_weighted_delay = 0; 1564 active_classes = 0; 1565 1566 for (i = 0; i <= jif->jif_maxpri; i++) { 1567 cl = jif->jif_classes[i]; 1568 class_exists = (cl != NULL); 1569 is_backlogged = (class_exists && !qempty(cl->cl_q)); 1570 1571 if (is_backlogged) { 1572 if (cl->concerned_rdc) { 1573 delays[i] = proj_delay(jif, i); 1574 mean_weighted_delay += cl->delay_prod_others*delays[i]; 1575 active_classes ++; 1576 } 1577 } 1578 } 1579 1580 if (active_classes == 0) 1581 return error; 1582 else 1583 mean_weighted_delay /= active_classes; 1584 1585 for (i = 0; i <= jif->jif_maxpri; i++) { 1586 cl = jif->jif_classes[i]; 1587 class_exists = (cl != NULL); 1588 is_backlogged = (class_exists && !qempty(cl->cl_q)); 1589 1590 if (is_backlogged && cl->concerned_rdc) 1591 error[i] = ((int64_t)mean_weighted_delay)-((int64_t)cl->delay_prod_others*delays[i]); 1592 else 1593 error[i] = 0; /* 1594 * either the class isn't concerned, 1595 * or it's not backlogged. 1596 * in any case, the rate shouldn't 1597 * be adjusted. 1598 */ 1599 } 1600 return error; 1601 } 1602 1603 /* 1604 * min_rates_adc: computes the minimum service rates needed in 1605 * each class to meet the absolute delay bounds. if, for any 1606 * class i, the current service rate of class i is less than 1607 * the computed minimum service rate, this function returns 1608 * false, true otherwise. 1609 */ 1610 static int 1611 min_rates_adc(struct jobs_if *jif) 1612 { 1613 int result; 1614 int i; 1615 int class_exists, is_backlogged; 1616 int64_t remaining_time; 1617 struct jobs_class *cl; 1618 result = 1; 1619 1620 for (i = 0; i <= jif->jif_maxpri; i++) { 1621 cl = jif->jif_classes[i]; 1622 class_exists = (cl != NULL); 1623 is_backlogged = (class_exists && !qempty(cl->cl_q)); 1624 if (is_backlogged && cl->concerned_adc) { 1625 remaining_time = cl->cl_adc - proj_delay(jif, i); 1626 if (remaining_time > 0 ) { 1627 /* min rate needed for ADC */ 1628 cl->min_rate_adc = scale_rate((int64_t)(cl->cl_rin.bytes-cl->cl_rout.bytes))/remaining_time; 1629 if (cl->concerned_arc 1630 && cl->cl_arc > cl->min_rate_adc) { 1631 /* min rate needed for ADC + ARC */ 1632 cl->min_rate_adc = cl->cl_arc; 1633 } 1634 } else { 1635 /* the deadline has been exceeded: give the whole link capacity to hopefully fix the situation */ 1636 cl->min_rate_adc = (int64_t)bps_to_internal((u_int64_t)jif->jif_bandwidth); 1637 } 1638 } else if (is_backlogged && cl->concerned_arc) 1639 cl->min_rate_adc = cl->cl_arc; /* no ADC, an ARC */ 1640 else if (class_exists) 1641 cl->min_rate_adc = 0; /* 1642 * either the class is not 1643 * backlogged 1644 * or there is no ADC and 1645 * no ARC 1646 */ 1647 if (is_backlogged && cl->min_rate_adc > cl->service_rate) 1648 result = 0; 1649 } 1650 1651 return result; 1652 } 1653 1654 /* 1655 * proj_delay: computes the difference between the current time 1656 * and the time the oldest class-i packet still in the class-i 1657 * queue i arrived in the system. 1658 */ 1659 static int64_t 1660 proj_delay(struct jobs_if *jif, int i) 1661 { 1662 u_int64_t now; 1663 int class_exists, is_backlogged; 1664 struct jobs_class *cl; 1665 1666 now = read_machclk(); 1667 cl = jif->jif_classes[i]; 1668 class_exists = (cl != NULL); 1669 is_backlogged = (class_exists && !qempty(cl->cl_q)); 1670 1671 if (is_backlogged) 1672 return ((int64_t)delay_diff(now, tslist_first(cl->arv_tm)->timestamp)); 1673 1674 return 0; /* NOTREACHED */ 1675 } 1676 1677 /* 1678 * pick_dropped_rlc: returns the class index of the class to be 1679 * dropped for meeting the relative loss constraints. 1680 */ 1681 static int 1682 pick_dropped_rlc(struct jobs_if *jif) 1683 { 1684 int64_t mean; 1685 int64_t* loss_error; 1686 int i, active_classes; 1687 int class_exists, is_backlogged; 1688 int class_dropped; 1689 int64_t max_error; 1690 int64_t max_alc; 1691 struct mbuf* pkt; 1692 struct jobs_class *cl; 1693 u_int64_t len; 1694 1695 loss_error = malloc(sizeof(int64_t)*(jif->jif_maxpri+1), 1696 M_DEVBUF, M_WAITOK); 1697 1698 if (loss_error == NULL) 1699 return -1; 1700 1701 class_dropped = -1; 1702 max_error = 0; 1703 mean = 0; 1704 active_classes = 0; 1705 1706 for (i = 0; i <= jif->jif_maxpri; i++) { 1707 cl = jif->jif_classes[i]; 1708 class_exists = (cl != NULL); 1709 is_backlogged = (class_exists && !qempty(cl->cl_q)); 1710 if (is_backlogged) { 1711 if (cl->concerned_rlc) { 1712 mean += cl->loss_prod_others 1713 * cl->current_loss; 1714 active_classes++; 1715 } 1716 } 1717 } 1718 1719 if (active_classes > 0) 1720 mean /= active_classes; 1721 1722 if (active_classes == 0) 1723 class_dropped = JOBS_MAXPRI+1; /* 1724 * no classes are concerned 1725 * by RLCs (JOBS_MAXPRI+1 1726 * means "ignore RLC" here) 1727 */ 1728 else { 1729 for (i = 0; i <= jif->jif_maxpri; i++) { 1730 cl = jif->jif_classes[i]; 1731 class_exists = (cl != NULL); 1732 is_backlogged = (class_exists 1733 && !qempty(cl->cl_q)); 1734 1735 if ((is_backlogged)&&(cl->cl_rlc)) 1736 loss_error[i]=cl->loss_prod_others 1737 *cl->current_loss-mean; 1738 else 1739 loss_error[i] = ALTQ_INFINITY; 1740 } 1741 1742 for (i = 0; i <= jif->jif_maxpri; i++) { 1743 cl = jif->jif_classes[i]; 1744 class_exists = (cl != NULL); 1745 is_backlogged = (class_exists 1746 && !qempty(cl->cl_q)); 1747 if (is_backlogged && loss_error[i] <= max_error) { 1748 /* 1749 * find out which class is the most 1750 * below the mean. 1751 * it's the one that needs to be dropped 1752 * ties are broken in favor of the higher 1753 * priority classes (i.e., if two classes 1754 * present the same deviation, the lower 1755 * priority class will get dropped). 1756 */ 1757 max_error = loss_error[i]; 1758 class_dropped = i; 1759 } 1760 } 1761 1762 if (class_dropped != -1) { 1763 cl = jif->jif_classes[class_dropped]; 1764 pkt = qtail(cl->cl_q); 1765 if (pkt != NULL) { 1766 /* 1767 * "safeguard" test (a packet SHOULD be 1768 * in there) 1769 */ 1770 len = (u_int64_t)m_pktlen(pkt); 1771 /* access packet at the tail */ 1772 if (cl->current_loss+(len << SCALE_LOSS)/cl->cl_arrival.bytes > cl->cl_alc) { 1773 /* 1774 * the class to drop for meeting 1775 * the RLC will defeat the ALC: 1776 * ignore RLC. 1777 */ 1778 class_dropped = JOBS_MAXPRI+1; 1779 } 1780 } else 1781 class_dropped = JOBS_MAXPRI+1; /* NOTREACHED */ 1782 } else 1783 class_dropped = JOBS_MAXPRI+1; 1784 } 1785 1786 if (class_dropped == JOBS_MAXPRI+1) { 1787 max_alc = -((int64_t)1 << SCALE_LOSS); 1788 for (i = jif->jif_maxpri; i >= 0; i--) { 1789 cl = jif->jif_classes[i]; 1790 class_exists = (cl != NULL); 1791 is_backlogged = (class_exists 1792 && !qempty(cl->cl_q)); 1793 if (is_backlogged) { 1794 if (cl->concerned_alc && cl->cl_alc - cl->current_loss > max_alc) { 1795 max_alc = cl->cl_alc-cl->current_loss; /* pick the class which is the furthest from its ALC */ 1796 class_dropped = i; 1797 } else if (!cl->concerned_alc && ((int64_t) 1 << SCALE_LOSS)-cl->current_loss > max_alc) { 1798 max_alc = ((int64_t) 1 << SCALE_LOSS)-cl->current_loss; 1799 class_dropped = i; 1800 } 1801 } 1802 } 1803 } 1804 1805 free(loss_error, M_DEVBUF); 1806 return (class_dropped); 1807 } 1808 1809 /* 1810 * ALTQ binding/setup functions 1811 */ 1812 /* 1813 * jobs device interface 1814 */ 1815 int 1816 jobsopen(dev_t dev, int flag, int fmt, 1817 struct lwp *l) 1818 { 1819 if (machclk_freq == 0) 1820 init_machclk(); 1821 1822 if (machclk_freq == 0) { 1823 printf("jobs: no CPU clock available!\n"); 1824 return ENXIO; 1825 } 1826 /* everything will be done when the queueing scheme is attached. */ 1827 return 0; 1828 } 1829 1830 int 1831 jobsclose(dev_t dev, int flag, int fmt, 1832 struct lwp *l) 1833 { 1834 struct jobs_if *jif; 1835 1836 while ((jif = jif_list) != NULL) { 1837 /* destroy all */ 1838 if (ALTQ_IS_ENABLED(jif->jif_ifq)) 1839 altq_disable(jif->jif_ifq); 1840 1841 int error = altq_detach(jif->jif_ifq); 1842 switch (error) { 1843 case 0: 1844 case ENXIO: /* already disabled */ 1845 break; 1846 default: 1847 return error; 1848 } 1849 jobs_detach(jif); 1850 } 1851 1852 return 0; 1853 } 1854 1855 int 1856 jobsioctl(dev_t dev, ioctlcmd_t cmd, void *addr, int flag, 1857 struct lwp *l) 1858 { 1859 struct jobs_if *jif; 1860 struct jobs_interface *ifacep; 1861 int error = 0; 1862 1863 /* check super-user privilege */ 1864 switch (cmd) { 1865 case JOBS_GETSTATS: 1866 break; 1867 default: 1868 if ((error = kauth_authorize_network(l->l_cred, 1869 KAUTH_NETWORK_ALTQ, KAUTH_REQ_NETWORK_ALTQ_JOBS, NULL, 1870 NULL, NULL)) != 0) 1871 return (error); 1872 break; 1873 } 1874 1875 switch (cmd) { 1876 1877 case JOBS_IF_ATTACH: 1878 error = jobscmd_if_attach((struct jobs_attach *)addr); 1879 break; 1880 1881 case JOBS_IF_DETACH: 1882 error = jobscmd_if_detach((struct jobs_interface *)addr); 1883 break; 1884 1885 case JOBS_ENABLE: 1886 case JOBS_DISABLE: 1887 case JOBS_CLEAR: 1888 ifacep = (struct jobs_interface *)addr; 1889 if ((jif = altq_lookup(ifacep->jobs_ifname, 1890 ALTQT_JOBS)) == NULL) { 1891 error = EBADF; 1892 break; 1893 } 1894 1895 switch (cmd) { 1896 case JOBS_ENABLE: 1897 if (jif->jif_default == NULL) { 1898 #if 1 1899 printf("jobs: no default class\n"); 1900 #endif 1901 error = EINVAL; 1902 break; 1903 } 1904 error = altq_enable(jif->jif_ifq); 1905 break; 1906 1907 case JOBS_DISABLE: 1908 error = altq_disable(jif->jif_ifq); 1909 break; 1910 1911 case JOBS_CLEAR: 1912 jobs_clear_interface(jif); 1913 break; 1914 } 1915 break; 1916 1917 case JOBS_ADD_CLASS: 1918 error = jobscmd_add_class((struct jobs_add_class *)addr); 1919 break; 1920 1921 case JOBS_DEL_CLASS: 1922 error = jobscmd_delete_class((struct jobs_delete_class *)addr); 1923 break; 1924 1925 case JOBS_MOD_CLASS: 1926 error = jobscmd_modify_class((struct jobs_modify_class *)addr); 1927 break; 1928 1929 case JOBS_ADD_FILTER: 1930 error = jobscmd_add_filter((struct jobs_add_filter *)addr); 1931 break; 1932 1933 case JOBS_DEL_FILTER: 1934 error = jobscmd_delete_filter((struct jobs_delete_filter *)addr); 1935 break; 1936 1937 case JOBS_GETSTATS: 1938 error = jobscmd_class_stats((struct jobs_class_stats *)addr); 1939 break; 1940 1941 default: 1942 error = EINVAL; 1943 break; 1944 } 1945 return error; 1946 } 1947 1948 static int 1949 jobscmd_if_attach(struct jobs_attach *ap) 1950 { 1951 struct jobs_if *jif; 1952 struct ifnet *ifp; 1953 int error; 1954 1955 if ((ifp = ifunit(ap->iface.jobs_ifname)) == NULL) 1956 return ENXIO; 1957 if ((jif = jobs_attach(&ifp->if_snd, ap->bandwidth, ap->qlimit, ap->separate)) == NULL) 1958 return ENOMEM; 1959 1960 /* 1961 * set JOBS to this ifnet structure. 1962 */ 1963 if ((error = altq_attach(&ifp->if_snd, ALTQT_JOBS, jif, 1964 jobs_enqueue, jobs_dequeue, jobs_request, 1965 &jif->jif_classifier, acc_classify)) != 0) 1966 jobs_detach(jif); 1967 1968 return error; 1969 } 1970 1971 static int 1972 jobscmd_if_detach(struct jobs_interface *ap) 1973 { 1974 struct jobs_if *jif; 1975 int error; 1976 1977 if ((jif = altq_lookup(ap->jobs_ifname, ALTQT_JOBS)) == NULL) 1978 return EBADF; 1979 1980 if (ALTQ_IS_ENABLED(jif->jif_ifq)) 1981 altq_disable(jif->jif_ifq); 1982 1983 if ((error = altq_detach(jif->jif_ifq))) 1984 return error; 1985 1986 jobs_detach(jif); 1987 return 0; 1988 } 1989 1990 static int 1991 jobscmd_add_class(struct jobs_add_class *ap) 1992 { 1993 struct jobs_if *jif; 1994 struct jobs_class *cl; 1995 1996 if ((jif = altq_lookup(ap->iface.jobs_ifname, ALTQT_JOBS)) == NULL) 1997 return EBADF; 1998 1999 if (ap->pri < 0 || ap->pri >= JOBS_MAXPRI) 2000 return EINVAL; 2001 2002 if ((cl = jobs_class_create(jif, ap->pri, 2003 ap->cl_adc, ap->cl_rdc, 2004 ap->cl_alc, ap->cl_rlc, ap-> cl_arc, 2005 ap->flags)) == NULL) 2006 return ENOMEM; 2007 2008 /* return a class handle to the user */ 2009 ap->class_handle = clp_to_clh(cl); 2010 return 0; 2011 } 2012 2013 static int 2014 jobscmd_delete_class(struct jobs_delete_class *ap) 2015 { 2016 struct jobs_if *jif; 2017 struct jobs_class *cl; 2018 2019 if ((jif = altq_lookup(ap->iface.jobs_ifname, ALTQT_JOBS)) == NULL) 2020 return EBADF; 2021 2022 if ((cl = clh_to_clp(jif, ap->class_handle)) == NULL) 2023 return EINVAL; 2024 2025 return jobs_class_destroy(cl); 2026 } 2027 2028 static int 2029 jobscmd_modify_class(struct jobs_modify_class *ap) 2030 { 2031 struct jobs_if *jif; 2032 struct jobs_class *cl; 2033 2034 if ((jif = altq_lookup(ap->iface.jobs_ifname, ALTQT_JOBS)) == NULL) 2035 return EBADF; 2036 2037 if (ap->pri < 0 || ap->pri >= JOBS_MAXPRI) 2038 return EINVAL; 2039 2040 if ((cl = clh_to_clp(jif, ap->class_handle)) == NULL) 2041 return EINVAL; 2042 2043 /* 2044 * if priority is changed, move the class to the new priority 2045 */ 2046 if (jif->jif_classes[ap->pri] != cl) { 2047 if (jif->jif_classes[ap->pri] != NULL) 2048 return EEXIST; 2049 jif->jif_classes[cl->cl_pri] = NULL; 2050 jif->jif_classes[ap->pri] = cl; 2051 cl->cl_pri = ap->pri; 2052 } 2053 2054 /* call jobs_class_create to change class parameters */ 2055 if ((cl = jobs_class_create(jif, ap->pri, 2056 ap->cl_adc, ap->cl_rdc, 2057 ap->cl_alc, ap->cl_rlc, ap->cl_arc, 2058 ap->flags)) == NULL) 2059 return ENOMEM; 2060 return 0; 2061 } 2062 2063 static int 2064 jobscmd_add_filter(struct jobs_add_filter *ap) 2065 { 2066 struct jobs_if *jif; 2067 struct jobs_class *cl; 2068 2069 if ((jif = altq_lookup(ap->iface.jobs_ifname, ALTQT_JOBS)) == NULL) 2070 return EBADF; 2071 2072 if ((cl = clh_to_clp(jif, ap->class_handle)) == NULL) 2073 return EINVAL; 2074 2075 return acc_add_filter(&jif->jif_classifier, &ap->filter, 2076 cl, &ap->filter_handle); 2077 } 2078 2079 static int 2080 jobscmd_delete_filter(struct jobs_delete_filter *ap) 2081 { 2082 struct jobs_if *jif; 2083 2084 if ((jif = altq_lookup(ap->iface.jobs_ifname, ALTQT_JOBS)) == NULL) 2085 return EBADF; 2086 2087 return acc_delete_filter(&jif->jif_classifier, ap->filter_handle); 2088 } 2089 2090 static int 2091 jobscmd_class_stats(struct jobs_class_stats *ap) 2092 { 2093 struct jobs_if *jif; 2094 struct jobs_class *cl; 2095 struct class_stats stats, *usp; 2096 int pri, error; 2097 2098 if ((jif = altq_lookup(ap->iface.jobs_ifname, ALTQT_JOBS)) == NULL) 2099 return EBADF; 2100 2101 ap->maxpri = jif->jif_maxpri; 2102 2103 /* then, read the next N classes */ 2104 usp = ap->stats; 2105 for (pri = 0; pri <= jif->jif_maxpri; pri++) { 2106 cl = jif->jif_classes[pri]; 2107 (void)memset(&stats, 0, sizeof(stats)); 2108 if (cl != NULL) 2109 get_class_stats(&stats, cl); 2110 if ((error = copyout((void *)&stats, (void *)usp++, 2111 sizeof(stats))) != 0) 2112 return error; 2113 } 2114 return 0; 2115 } 2116 2117 static void 2118 get_class_stats(struct class_stats *sp, struct jobs_class *cl) 2119 { 2120 u_int64_t now; 2121 now = read_machclk(); 2122 2123 sp->class_handle = clp_to_clh(cl); 2124 sp->qlength = qlen(cl->cl_q); 2125 2126 sp->period = cl->cl_period; 2127 sp->rin = cl->st_rin; 2128 sp->arrival = cl->st_arrival; 2129 sp->arrivalbusy = cl->cl_arrival; 2130 sp->rout = cl->st_rout; 2131 sp->dropcnt = cl->cl_dropcnt; 2132 2133 /* PKTCNTR_RESET(&cl->st_arrival);*/ 2134 PKTCNTR_RESET(&cl->st_rin); 2135 PKTCNTR_RESET(&cl->st_rout); 2136 2137 sp->totallength = cl->cl_jif->jif_ifq->ifq_len; 2138 sp->lastdel = ticks_to_secs(GRANULARITY*cl->cl_lastdel); 2139 sp->avgdel = cl->cl_avgdel; 2140 2141 cl->cl_avgdel = 0; 2142 2143 sp->busylength = ticks_to_secs(1000*delay_diff(now, cl->idletime)); 2144 sp->adc_violations = cl->adc_violations; 2145 2146 sp->wc_cycles_enqueue = cl->cl_jif->wc_cycles_enqueue; 2147 sp->wc_cycles_dequeue = cl->cl_jif->wc_cycles_dequeue; 2148 sp->bc_cycles_enqueue = cl->cl_jif->bc_cycles_enqueue; 2149 sp->bc_cycles_dequeue = cl->cl_jif->bc_cycles_dequeue; 2150 sp->avg_cycles_enqueue = cl->cl_jif->avg_cycles_enqueue; 2151 sp->avg_cycles_dequeue = cl->cl_jif->avg_cycles_dequeue; 2152 sp->avg_cycles2_enqueue = cl->cl_jif->avg_cycles2_enqueue; 2153 sp->avg_cycles2_dequeue = cl->cl_jif->avg_cycles2_dequeue; 2154 sp->total_enqueued = cl->cl_jif->total_enqueued; 2155 sp->total_dequeued = cl->cl_jif->total_dequeued; 2156 } 2157 2158 /* convert a class handle to the corresponding class pointer */ 2159 static struct jobs_class * 2160 clh_to_clp(struct jobs_if *jif, u_long chandle) 2161 { 2162 struct jobs_class *cl; 2163 2164 cl = (struct jobs_class *)chandle; 2165 if (chandle != ALIGN(cl)) { 2166 #if 1 2167 printf("clh_to_cl: unaligned pointer %p\n", cl); 2168 #endif 2169 return NULL; 2170 } 2171 2172 if (cl == NULL || cl->cl_handle != chandle || cl->cl_jif != jif) 2173 return NULL; 2174 return cl; 2175 } 2176 2177 /* convert a class pointer to the corresponding class handle */ 2178 static u_long 2179 clp_to_clh(struct jobs_class *cl) 2180 { 2181 return (cl->cl_handle); 2182 } 2183 2184 #ifdef KLD_MODULE 2185 2186 static struct altqsw jobs_sw = 2187 {"jobs", jobsopen, jobsclose, jobsioctl}; 2188 2189 ALTQ_MODULE(altq_jobs, ALTQT_JOBS, &jobs_sw); 2190 2191 #endif /* KLD_MODULE */ 2192 2193 #endif /* ALTQ3_COMPAT */ 2194 #endif /* ALTQ_JOBS */ 2195