Lines Matching refs:cl
215 struct jobs_class *cl;
223 if ((cl = jif->jif_classes[pri]) != NULL)
224 jobs_class_destroy(cl);
246 struct jobs_class *cl;
250 if ((cl = jif->jif_classes[pri]) != NULL && !qempty(cl->cl_q))
251 jobs_purgeq(cl);
261 struct jobs_class *cl, *scan1, *scan2;
268 if ((cl = jif->jif_classes[pri]) != NULL) {
271 if (!qempty(cl->cl_q))
272 jobs_purgeq(cl);
275 cl = malloc(sizeof(struct jobs_class), M_DEVBUF,
277 if (cl == NULL)
280 cl->cl_q = malloc(sizeof(class_queue_t), M_DEVBUF,
282 if (cl->cl_q == NULL)
285 cl->arv_tm = tslist_alloc();
286 if (cl->arv_tm == NULL)
290 jif->jif_classes[pri] = cl;
293 jif->jif_default = cl;
295 qtype(cl->cl_q) = Q_DROPTAIL;
296 qlen(cl->cl_q) = 0;
297 cl->service_rate = 0;
298 cl->min_rate_adc = 0;
299 cl->current_loss = 0;
300 cl->cl_period = 0;
301 PKTCNTR_RESET(&cl->cl_arrival);
302 PKTCNTR_RESET(&cl->cl_rin);
303 PKTCNTR_RESET(&cl->cl_rout);
304 PKTCNTR_RESET(&cl->cl_rout_th);
305 PKTCNTR_RESET(&cl->cl_dropcnt);
306 PKTCNTR_RESET(&cl->st_arrival);
307 PKTCNTR_RESET(&cl->st_rin);
308 PKTCNTR_RESET(&cl->st_rout);
309 PKTCNTR_RESET(&cl->st_dropcnt);
310 cl->st_service_rate = 0;
311 cl->cl_lastdel = 0;
312 cl->cl_avgdel = 0;
313 cl->adc_violations = 0;
316 cl->concerned_adc = 0;
319 cl->concerned_adc = 1;
322 cl->concerned_alc = 0;
325 cl->concerned_alc = 1;
329 cl->concerned_rdc = 0;
331 cl->concerned_rdc = 1;
335 cl->concerned_rlc = 0;
337 cl->concerned_rlc = 1;
341 cl->concerned_arc = 0;
343 cl->concerned_arc = 1;
345 cl->cl_rdc=rdc;
347 if (cl->concerned_adc) {
349 cl->cl_adc = (u_int64_t)(adc*machclk_freq/GRANULARITY);
351 cl->cl_adc = adc;
353 if (cl->concerned_arc) {
355 cl->cl_arc = (u_int64_t)(bps_to_internal(arc));
357 cl->cl_arc = arc;
359 cl->cl_rlc=rlc;
360 cl->cl_alc=alc;
361 cl->delay_prod_others = 0;
362 cl->loss_prod_others = 0;
363 cl->cl_flags = flags;
364 cl->cl_pri = pri;
367 cl->cl_jif = jif;
368 cl->cl_handle = (u_long)cl; /* just a pointer to this class */
375 if (cl->concerned_rdc) {
408 if (cl->concerned_rlc) {
442 cl->idletime = now;
443 return cl;
446 if (cl->cl_q != NULL)
447 free(cl->cl_q, M_DEVBUF);
448 if (cl->arv_tm != NULL)
449 free(cl->arv_tm, M_DEVBUF);
451 free(cl, M_DEVBUF);
456 jobs_class_destroy(struct jobs_class *cl)
464 acc_discard_filters(&cl->cl_jif->jif_classifier, cl, 0);
466 if (!qempty(cl->cl_q))
467 jobs_purgeq(cl);
469 jif = cl->cl_jif;
470 jif->jif_classes[cl->cl_pri] = NULL;
471 if (jif->jif_maxpri == cl->cl_pri) {
472 for (pri = cl->cl_pri; pri >= 0; pri--)
482 tslist_destroy(cl);
483 free(cl->cl_q, M_DEVBUF);
484 free(cl, M_DEVBUF);
496 struct jobs_class *cl, *scan;
536 if ((cl = m->m_pkthdr.pattr_class) == NULL)
537 cl = jif->jif_default;
540 old_arv = cl->cl_arrival.bytes;
541 PKTCNTR_ADD(&cl->cl_arrival, (int)len);
542 PKTCNTR_ADD(&cl->cl_rin, (int)len);
543 PKTCNTR_ADD(&cl->st_arrival, (int)len);
544 PKTCNTR_ADD(&cl->st_rin, (int)len);
546 if (cl->cl_arrival.bytes < old_arv) {
566 PKTCNTR_ADD(&cl->cl_arrival, (int)len);
567 PKTCNTR_ADD(&cl->cl_rin, (int)len);
570 if (cl->cl_arrival.bytes > cl->cl_rin.bytes)
571 cl->current_loss =
572 ((cl->cl_arrival.bytes - cl->cl_rin.bytes) << SCALE_LOSS)
573 / cl->cl_arrival.bytes;
575 cl->current_loss = 0;
601 if (jobs_addq(cl, m, jif) != 0)
614 if ((cl = jif->jif_classes[pri]) != NULL &&
615 !qempty(cl->cl_q))
616 cl->service_rate += delta_rate[pri];
625 if ((cl
626 !qempty(cl->cl_q))
627 cl->service_rate += delta_rate[pri];
658 struct jobs_class *cl;
675 cl = jif->jif_classes[pri];
676 if (cl != NULL)
677 cl->idletime = now;
702 if (((cl = jif->jif_classes[pri]) != NULL)
703 && !qempty(cl->cl_q)) {
704 error = (int64_t)cl->cl_rout_th.bytes
705 -(int64_t)scale_rate(cl->cl_rout.bytes);
717 cl = jif->jif_classes[svc_class];
719 cl = NULL;
732 return (jobs_pollq(cl));
735 if (cl != NULL)
736 m = jobs_getq(cl);
742 if (qempty(cl->cl_q))
743 cl->cl_period++;
745 cl->cl_lastdel = (u_int64_t)delay_diff(now,
746 tslist_first(cl->arv_tm)->timestamp);
747 if (cl->concerned_adc
748 && (int64_t)cl->cl_lastdel > cl->cl_adc)
749 cl->adc_violations++;
750 cl->cl_avgdel += ticks_to_secs(GRANULARITY*cl->cl_lastdel);
752 PKTCNTR_ADD(&cl->cl_rout, m_pktlen(m));
753 PKTCNTR_ADD(&cl->st_rout, m_pktlen(m));
755 if (cl != NULL)
756 tslist_dequeue(cl); /* dequeue the timestamp */
772 jobs_addq(struct jobs_class *cl, struct mbuf *m, struct jobs_if *jif)
785 if (jif->jif_separate && qlen(cl->cl_q) >= jif->jif_qlimit) {
792 PKTCNTR_ADD(&cl->cl_dropcnt, (int)len);
793 PKTCNTR_SUB(&cl->cl_rin, (int)len);
794 PKTCNTR_ADD(&cl->st_dropcnt, (int)len);
795 PKTCNTR_SUB(&cl->st_rin, (int)len);
796 cl->current_loss += (len << SCALE_LOSS)
797 /cl->cl_arrival.bytes;
804 if (!cl->concerned_rlc) {
805 if (!cl->concerned_alc) {
811 PKTCNTR_ADD(&cl->cl_dropcnt, (int)len);
812 PKTCNTR_SUB(&cl->cl_rin, (int)len);
813 PKTCNTR_ADD(&cl->st_dropcnt, (int)len);
814 PKTCNTR_SUB(&cl->st_rin, (int)len);
815 cl->current_loss += (len << SCALE_LOSS)/cl->cl_arrival.bytes;
824 if (cl->current_loss + (len << SCALE_LOSS)
825 / cl->cl_arrival.bytes <= cl->cl_alc) {
826 PKTCNTR_ADD(&cl->cl_dropcnt, (int)len);
827 PKTCNTR_SUB(&cl->cl_rin, (int)len);
828 PKTCNTR_ADD(&cl->st_dropcnt, (int)len);
829 PKTCNTR_SUB(&cl->st_rin, (int)len);
830 cl->current_loss += (len << SCALE_LOSS)/cl->cl_arrival.bytes;
838 _addq(cl->cl_q, m);
839 tslist_enqueue(cl, now);
851 victim_class = cl;
879 _addq(cl->cl_q, m);
880 tslist_enqueue(cl, now);
889 victim_class = cl;
913 _addq(cl->cl_q, m);
914 tslist_enqueue(cl, now);
920 jobs_getq(struct jobs_class *cl)
922 return _getq(cl->cl_q);
926 jobs_pollq(struct jobs_class *cl)
928 return qhead(cl->cl_q);
932 jobs_purgeq(struct jobs_class *cl)
936 if (qempty(cl->cl_q))
939 while ((m = _getq(cl->cl_q)) != NULL) {
940 PKTCNTR_ADD(&cl->cl_dropcnt, m_pktlen(m));
941 PKTCNTR_ADD(&cl->st_dropcnt, m_pktlen(m));
943 tslist_drop(cl);
945 ASSERT(qlen(cl->cl_q) == 0);
967 tslist_destroy(struct jobs_class *cl)
969 while (tslist_first(cl->arv_tm) != NULL)
970 tslist_dequeue(cl);
972 free(cl->arv_tm, M_DEVBUF);
976 tslist_enqueue(struct jobs_class *cl, u_int64_t arv)
984 TAILQ_INSERT_TAIL(cl->arv_tm, pushed, ts_list);
989 tslist_dequeue(struct jobs_class *cl)
992 popped = tslist_first(cl->arv_tm);
994 TAILQ_REMOVE(cl->arv_tm, popped, ts_list);
1001 tslist_drop(struct jobs_class *cl)
1004 popped = tslist_last(cl->arv_tm);
1006 TAILQ_REMOVE(cl->arv_tm, popped, ts_list);
1024 struct jobs_class *cl;
1034 cl = jif->jif_classes[pri];
1035 class_exists = (cl != NULL);
1036 is_backlogged = (class_exists && !qempty(cl->cl_q));
1040 if ((is_backlogged && cl->service_rate <= 0)
1042 && !is_backlogged && cl->service_rate > 0))
1048 cl = jif->jif_classes[pri];
1049 class_exists = (cl != NULL);
1050 is_backlogged = (class_exists && !qempty(cl->cl_q));
1053 cl->service_rate = 0;
1055 cl->service_rate = (int64_t)(bps_to_internal((u_int64_t)jif->jif_bandwidth)/active_classes);
1082 struct jobs_class *cl;
1101 cl = jif->jif_classes[i];
1102 class_exists = (cl != NULL);
1103 is_backlogged = (class_exists && !qempty(cl->cl_q));
1106 if (cl->concerned_rdc)
1110 internal_to_bps(cl->service_rate);
1132 cl = jif->jif_classes[i];
1133 class_exists = (cl != NULL);
1134 is_backlogged = (class_exists && !qempty(cl->cl_q));
1135 if (is_backlogged && cl->concerned_rdc)
1136 bk += cl->cl_rin.bytes;
1143 cl = jif->jif_classes[i];
1144 class_exists = (cl != NULL);
1145 is_backlogged = (class_exists && !qempty(cl->cl_q));
1147 && (cl->cl_rin.bytes << SCALE_SHARE)/bk < min_share)
1148 min_share = (cl->cl_rin.bytes << SCALE_SHARE)/bk;
1149 if (is_backlogged && cl->concerned_rdc
1150 && cl->delay_prod_others > max_prod)
1151 max_prod = cl->delay_prod_others;
1153 if (is_backlogged && cl->concerned_rdc
1154 && cl->cl_rin.bytes > max_avg_pkt_size*cl->cl_rin.packets)
1155 max_avg_pkt_size = (u_int64_t)((u_int)cl->cl_rin.bytes/(u_int)cl->cl_rin.packets);
1169 cl = jif->jif_classes[i];
1170 class_exists = (cl != NULL);
1171 is_backlogged = (class_exists && !qempty(cl->cl_q));
1172 if (is_backlogged && cl->concerned_rdc) {
1183 cl = jif->jif_classes[i];
1184 class_exists = (cl != NULL);
1185 is_backlogged = (class_exists && !qempty(cl->cl_q));
1187 if (is_backlogged && cl->concerned_rdc)
1188 lower_bound += cl->min_rate_adc;
1190 * note: if there's no ADC or ARC on cl,
1196 cl = jif->jif_classes[i];
1197 class_exists = (cl != NULL);
1198 is_backlogged = (class_exists && !qempty(cl->cl_q));
1200 if (is_backlogged && cl->concerned_rdc
1201 && result[i] + cl->service_rate > upper_bound) {
1203 cl = jif->jif_classes[j];
1204 class_exists = (cl != NULL);
1206 && !qempty(cl->cl_q));
1207 if (is_backlogged && cl->concerned_rdc) {
1210 -cl->service_rate
1211 + cl->min_rate_adc
1215 -cl->service_rate
1216 +cl->min_rate_adc;
1222 cl = jif->jif_classes[i];
1224 class_exists = (cl != NULL);
1225 is_backlogged = (class_exists && !qempty(cl->cl_q));
1227 if (is_backlogged && cl->concerned_rdc
1228 && result[i] + cl->service_rate < cl->min_rate_adc) {
1229 credit += cl->service_rate+result[i]
1230 -cl->min_rate_adc;
1232 result[i] = -cl->service_rate+cl->min_rate_adc;
1237 cl = jif->jif_classes[i];
1238 class_exists = (cl != NULL);
1239 is_backlogged = (class_exists && !qempty(cl->cl_q));
1241 if (is_backlogged && cl->concerned_rdc) {
1243 + cl->service_rate-cl->min_rate_adc;
1271 struct jobs_class *cl;
1309 cl = jif->jif_classes[i];
1310 class_exists = (cl != NULL);
1311 is_backlogged = (class_exists && !qempty(cl->cl_q));
1314 if (cl->concerned_adc) {
1319 if (tslist_first(cl->arv_tm) == NULL)
1322 oldest_arv = (tslist_first(cl->arv_tm))->timestamp;
1324 n[i] = cl->service_rate;
1325 k[i] = scale_rate((int64_t)(cl->cl_rin.bytes - cl->cl_rout.bytes));
1327 remaining_time = cl->cl_adc
1347 if (cl->concerned_arc) {
1352 if (n[i] - cl->cl_arc < available[i])
1354 - cl->cl_arc;
1356 } else if (cl->concerned_arc) {
1361 n[i] = cl->service_rate;
1362 available[i] = n[i] - cl->cl_arc;
1368 n[i] = cl->service_rate;
1377 available[i] = cl->service_rate;
1438 cl = jif->jif_classes[i];
1439 class_exists = (cl != NULL);
1440 is_backlogged = (class_exists && !qempty(cl->cl_q));
1442 result[i] = n[i] - cl->service_rate;
1445 result[i] = - cl->service_rate;
1456 cl = jif->jif_classes[i];
1457 class_exists = (cl != NULL);
1459 && !qempty(cl->cl_q));
1461 if (cl->concerned_adc) {
1463 while (keep_going && scale_rate((int64_t)(cl->cl_rin.bytes-cl->cl_rout.bytes)) > k[i]) {
1464 pkt = qtail(cl->cl_q);
1469 if (cl->concerned_alc
1470 && cl->current_loss+(len << SCALE_LOSS)/cl->cl_arrival.bytes > cl->cl_alc) {
1474 pkt = _getq_tail(cl->cl_q);
1476 PKTCNTR_ADD(&cl->cl_dropcnt, (int)len);
1477 PKTCNTR_SUB(&cl->cl_rin, (int)len);
1478 PKTCNTR_ADD(&cl->st_dropcnt, (int)len);
1479 PKTCNTR_SUB(&cl->st_rin, (int)len);
1480 cl->current_loss += (len << SCALE_LOSS)/cl->cl_arrival.bytes;
1482 tslist_drop(cl);
1483 IFQ_DEC_LEN(cl->cl_jif->jif_ifq);
1488 k[i] = scale_rate((int64_t)(cl->cl_rin.bytes-cl->cl_rout.bytes));
1504 cl = jif->jif_classes[i];
1505 class_exists = (cl != NULL);
1506 is_backlogged = (class_exists && !qempty(cl->cl_q));
1507 if (is_backlogged && cl->concerned_adc) {
1509 if (cl->concerned_adc
1510 && !cl->concerned_arc)
1511 cl->min_rate_adc = k[i]/c[i];
1513 cl->min_rate_adc = n[i];
1515 cl->min_rate_adc = (int64_t)bps_to_internal((u_int64_t)jif->jif_bandwidth);
1516 } else if (is_backlogged && cl->concerned_arc)
1517 cl->min_rate_adc = n[i]; /* the best we can give */
1520 cl->min_rate_adc = 0;
1555 struct jobs_class *cl;
1567 cl = jif->jif_classes[i];
1568 class_exists = (cl != NULL);
1569 is_backlogged = (class_exists && !qempty(cl->cl_q));
1572 if (cl->concerned_rdc) {
1574 mean_weighted_delay += cl->delay_prod_others*delays[i];
1586 cl = jif->jif_classes[i];
1587 class_exists = (cl != NULL);
1588 is_backlogged = (class_exists && !qempty(cl->cl_q));
1590 if (is_backlogged && cl->concerned_rdc)
1591 error[i] = ((int64_t)mean_weighted_delay)-((int64_t)cl->delay_prod_others*delays[i]);
1617 struct jobs_class *cl;
1621 cl = jif->jif_classes[i];
1622 class_exists = (cl != NULL);
1623 is_backlogged = (class_exists && !qempty(cl->cl_q));
1624 if (is_backlogged && cl->concerned_adc) {
1625 remaining_time = cl->cl_adc - proj_delay(jif, i);
1628 cl->min_rate_adc = scale_rate((int64_t)(cl->cl_rin.bytes-cl->cl_rout.bytes))/remaining_time;
1629 if (cl->concerned_arc
1630 && cl->cl_arc > cl->min_rate_adc) {
1632 cl->min_rate_adc = cl->cl_arc;
1636 cl->min_rate_adc = (int64_t)bps_to_internal((u_int64_t)jif->jif_bandwidth);
1638 } else if (is_backlogged && cl->concerned_arc)
1639 cl->min_rate_adc = cl->cl_arc; /* no ADC, an ARC */
1641 cl->min_rate_adc = 0; /*
1647 if (is_backlogged && cl->min_rate_adc > cl->service_rate)
1664 struct jobs_class *cl;
1667 cl = jif->jif_classes[i];
1668 class_exists = (cl != NULL);
1669 is_backlogged = (class_exists && !qempty(cl->cl_q));
1672 return ((int64_t)delay_diff(now, tslist_first(cl->arv_tm)->timestamp));
1692 struct jobs_class *cl;
1707 cl = jif->jif_classes[i];
1708 class_exists = (cl != NULL);
1709 is_backlogged = (class_exists && !qempty(cl->cl_q));
1711 if (cl->concerned_rlc) {
1712 mean += cl->loss_prod_others
1713 * cl->current_loss;
1730 cl = jif->jif_classes[i];
1731 class_exists = (cl != NULL);
1733 && !qempty(cl->cl_q));
1735 if ((is_backlogged)&&(cl->cl_rlc))
1736 loss_error[i]=cl->loss_prod_others
1737 *cl->current_loss-mean;
1743 cl = jif->jif_classes[i];
1744 class_exists = (cl != NULL);
1746 && !qempty(cl->cl_q));
1763 cl = jif->jif_classes[class_dropped];
1764 pkt = qtail(cl->cl_q);
1772 if (cl->current_loss+(len << SCALE_LOSS)/cl->cl_arrival.bytes > cl->cl_alc) {
1789 cl = jif->jif_classes[i];
1790 class_exists = (cl != NULL);
1792 && !qempty(cl->cl_q));
1794 if (cl->concerned_alc && cl->cl_alc - cl->current_loss > max_alc) {
1795 max_alc = cl->cl_alc-cl->current_loss; /* pick the class which is the furthest from its ALC */
1797 } else if (!cl->concerned_alc && ((int64_t) 1 << SCALE_LOSS)-cl->current_loss > max_alc) {
1798 max_alc = ((int64_t) 1 << SCALE_LOSS)-cl->current_loss;
1994 struct jobs_class *cl;
2002 if ((cl = jobs_class_create(jif, ap->pri,
2009 ap->class_handle = clp_to_clh(cl);
2017 struct jobs_class *cl;
2022 if ((cl = clh_to_clp(jif, ap->class_handle)) == NULL)
2025 return jobs_class_destroy(cl);
2032 struct jobs_class *cl;
2040 if ((cl = clh_to_clp(jif, ap->class_handle)) == NULL)
2046 if (jif->jif_classes[ap->pri] != cl) {
2049 jif->jif_classes[cl->cl_pri] = NULL;
2050 jif->jif_classes[ap->pri] = cl;
2051 cl->cl_pri = ap->pri;
2055 if ((cl = jobs_class_create(jif, ap->pri,
2067 struct jobs_class *cl;
2072 if ((cl = clh_to_clp(jif, ap->class_handle)) == NULL)
2076 cl, &ap->filter_handle);
2094 struct jobs_class *cl;
2106 cl = jif->jif_classes[pri];
2108 if (cl != NULL)
2109 get_class_stats(&stats, cl);
2118 get_class_stats(struct class_stats *sp, struct jobs_class *cl)
2123 sp->class_handle = clp_to_clh(cl);
2124 sp->qlength = qlen(cl->cl_q);
2126 sp->period = cl->cl_period;
2127 sp->rin = cl->st_rin;
2128 sp->arrival = cl->st_arrival;
2129 sp->arrivalbusy = cl->cl_arrival;
2130 sp->rout = cl->st_rout;
2131 sp->dropcnt = cl->cl_dropcnt;
2133 /* PKTCNTR_RESET(&cl->st_arrival);*/
2134 PKTCNTR_RESET(&cl->st_rin);
2135 PKTCNTR_RESET(&cl->st_rout);
2137 sp->totallength = cl->cl_jif->jif_ifq->ifq_len;
2138 sp->lastdel = ticks_to_secs(GRANULARITY*cl->cl_lastdel);
2139 sp->avgdel = cl->cl_avgdel;
2141 cl->cl_avgdel = 0;
2143 sp->busylength = ticks_to_secs(1000*delay_diff(now, cl->idletime));
2144 sp->adc_violations = cl->adc_violations;
2146 sp->wc_cycles_enqueue = cl->cl_jif->wc_cycles_enqueue;
2147 sp->wc_cycles_dequeue = cl->cl_jif->wc_cycles_dequeue;
2148 sp->bc_cycles_enqueue = cl->cl_jif->bc_cycles_enqueue;
2149 sp->bc_cycles_dequeue = cl->cl_jif->bc_cycles_dequeue;
2150 sp->avg_cycles_enqueue = cl->cl_jif->avg_cycles_enqueue;
2151 sp->avg_cycles_dequeue = cl->cl_jif->avg_cycles_dequeue;
2152 sp->avg_cycles2_enqueue = cl->cl_jif->avg_cycles2_enqueue;
2153 sp->avg_cycles2_dequeue = cl->cl_jif->avg_cycles2_dequeue;
2154 sp->total_enqueued = cl->cl_jif->total_enqueued;
2155 sp->total_dequeued = cl->cl_jif->total_dequeued;
2162 struct jobs_class *cl;
2164 cl = (struct jobs_class *)chandle;
2165 if (chandle != ALIGN(cl)) {
2167 printf("clh_to_cl: unaligned pointer %p\n", cl);
2172 if (cl == NULL || cl->cl_handle != chandle || cl->cl_jif != jif)
2174 return cl;
2179 clp_to_clh(struct jobs_class *cl)
2181 return (cl->cl_handle);