Lines Matching refs:cl
203 struct rm_class *cl;
226 cl = malloc(sizeof(struct rm_class), M_DEVBUF, M_WAITOK|M_ZERO);
227 if (cl == NULL)
229 CALLOUT_INIT(&cl->callout_);
231 cl->q_ = malloc(sizeof(class_queue_t), M_DEVBUF, M_WAITOK|M_ZERO);
232 if (cl->q_ == NULL) {
233 free(cl, M_DEVBUF);
240 cl->children_ = NULL;
241 cl->parent_ = parent;
242 cl->borrow_ = borrow;
243 cl->leaf_ = 1;
244 cl->ifdat_ = ifd;
245 cl->pri_ = pri;
246 cl->allotment_ = (u_int)(RM_PS_PER_SEC / psecPerByte); /* Bytes per sec */
247 cl->depth_ = 0;
248 cl->qthresh_ = 0;
249 cl->ps_per_byte_ = psecPerByte;
251 qlimit(cl->q_) = maxq;
252 qtype(cl->q_) = Q_DROPHEAD;
253 qlen(cl->q_) = 0;
254 cl->flags_ = flags;
257 cl->minidle_ = ((int64_t)minidle * (int64_t)psecPerByte) / 8;
258 if (cl->minidle_ > 0)
259 cl->minidle_ = 0;
261 cl->minidle_ = minidle;
263 cl->maxidle_ = ((int64_t)maxidle * (int64_t)psecPerByte) / 8;
264 if (cl->maxidle_ == 0)
265 cl->maxidle_ = 1;
267 cl->avgidle_ = cl->maxidle_;
268 cl->offtime_ = (((int64_t)offtime * (int64_t)psecPerByte) / 8) >> RM_FILTER_GAIN;
269 if (cl->offtime_ == 0)
270 cl->offtime_ = 1;
272 cl->avgidle_ = 0;
273 cl->offtime_ = (offtime * nsecPerByte) / 8;
275 cl->overlimit = action;
293 cl->red_ = red_alloc(0, 0,
294 qlimit(cl->q_) * 10/100,
295 qlimit(cl->q_) * 30/100,
297 if (cl->red_ != NULL)
298 qtype(cl->q_) = Q_RED;
302 cl->red_ = (red_t *)rio_alloc(0, NULL,
304 if (cl->red_ != NULL)
305 qtype(cl->q_) = Q_RIO;
317 cl->peer_ = peer;
320 peer->peer_ = cl;
322 ifd->active_[pri] = cl;
323 cl->peer_ = cl;
326 if (cl->parent_) {
327 cl->next_ = parent->children_;
328 parent->children_ = cl;
336 rmc_depth_compute(cl);
343 ifd->alloc_[pri] += cl->allotment_;
347 return cl;
351 rmc_modclass(struct rm_class *cl, uint64_t psecPerByte, int maxq, u_int maxidle,
358 ifd = cl->ifdat_;
359 old_allotment = cl->allotment_;
362 cl->allotment_ = (u_int)(RM_PS_PER_SEC / psecPerByte); /* Bytes per sec */
363 cl->qthresh_ = 0;
364 cl->ps_per_byte_ = psecPerByte;
366 qlimit(cl->q_) = maxq;
369 cl->minidle_ = ((int64_t)minidle * (int64_t)psecPerByte) / 8;
370 if (cl->minidle_ > 0)
371 cl->minidle_ = 0;
373 cl->minidle_ = minidle;
375 cl->maxidle_ = ((int64_t)maxidle * (int64_t)psecPerByte) / 8;
376 if (cl->maxidle_ == 0)
377 cl->maxidle_ = 1;
379 cl->avgidle_ = cl->maxidle_;
380 cl->offtime_ = (((int64_t)offtime * (int64_t)psecPerByte) / 8) >> RM_FILTER_GAIN;
381 if (cl->offtime_ == 0)
382 cl->offtime_ = 1;
384 cl->avgidle_ = 0;
385 cl->offtime_ = (offtime * nsecPerByte) / 8;
392 ifd->alloc_[cl->pri_] += cl->allotment_ - old_allotment;
412 struct rm_class *cl, *clh;
432 clh = cl = ifd->active_[i];
436 cl->w_allotment_ = 0;
438 cl->w_allotment_ = cl->allotment_ /
440 cl = cl->peer_;
441 } while ((cl != NULL) && (cl != clh));
457 * rmc_depth_compute(struct rm_class *cl) - This function computes the
458 * appropriate depth of class 'cl' and its ancestors.
464 rmc_depth_compute(struct rm_class *cl)
466 rm_class_t *t = cl, *p;
483 * rmc_depth_recompute(struct rm_class *cl) - This function re-computes
490 rmc_depth_recompute(rm_class_t *cl)
495 p = cl;
520 if (cl->depth_ >= 1) {
521 if (cl->children_ == NULL) {
522 cl->depth_ = 0;
523 } else if ((t = cl->children_) != NULL) {
530 rmc_depth_compute(cl);
537 * rmc_delete_class(struct rm_ifdat *ifdat, struct rm_class *cl) - This
545 rmc_delete_class(struct rm_ifdat *ifd, struct rm_class *cl)
550 ASSERT(cl->children_ == NULL);
552 if (cl->sleeping_)
553 CALLOUT_STOP(&cl->callout_);
561 rmc_dropall(cl);
567 if (cl->parent_ != NULL) {
568 head = cl->parent_->children_;
571 ASSERT(head == cl);
572 cl->parent_->children_ = NULL;
573 cl->parent_->leaf_ = 1;
575 if (p == cl) {
576 if (cl == head)
577 cl->parent_->children_ = cl->next_;
579 previous->next_ = cl->next_;
580 cl->next_ = NULL;
592 if ((p = ifd->active_[cl->pri_]) != NULL) {
595 * level, then look for class(cl) in the priority level.
598 while (p->peer_ != cl)
600 p->peer_ = cl->peer_;
602 if (ifd->active_[cl->pri_] == cl)
603 ifd->active_[cl->pri_] = cl->peer_;
605 ASSERT(p == cl);
606 ifd->active_[cl->pri_] = NULL;
614 ifd->alloc_[cl->pri_] -= cl->allotment_;
615 ifd->num_[cl->pri_]--;
623 rmc_depth_recompute(cl->parent_);
633 if (cl->red_ != NULL) {
635 if (q_is_rio(cl->q_))
636 rio_destroy((rio_t *)cl->red_);
639 if (q_is_red(cl->q_))
640 red_destroy(cl->red_);
643 free(cl->q_, M_DEVBUF);
644 free(cl, M_DEVBUF);
740 * rmc_queue_packet(struct rm_class *cl, mbuf_t *m) - Add packet given by
741 * mbuf 'm' to queue for resource class 'cl'. This routine is called
750 rmc_queue_packet(struct rm_class *cl, mbuf_t *m)
753 struct rm_ifdat *ifd = cl->ifdat_;
754 int cpri = cl->pri_;
755 int is_empty = qempty(cl->q_);
759 if (TS_LT(&cl->undertime_, &now)) {
760 if (ifd->cutoff_ > cl->depth_)
761 ifd->cutoff_ = cl->depth_;
762 CBQTRACE(rmc_queue_packet, 'ffoc', cl->depth_);
771 struct rm_class *borrow = cl->borrow_;
784 else if ((ifd->cutoff_ > 1) && cl->borrow_) {
785 if (TS_LT(&cl->borrow_->undertime_, &now)) {
786 ifd->cutoff_ = cl->borrow_->depth_;
788 cl->borrow_->depth_);
794 if (_rmc_addq(cl, m) < 0)
799 CBQTRACE(rmc_queue_packet, 'ytpe', cl->stats_.handle);
803 if (qlen(cl->q_) > qlimit(cl->q_)) {
805 rmc_drop_action(cl);
844 rmc_satisfied(struct rm_class *cl, struct timespec *now)
848 if (cl == NULL)
850 if (TS_LT(now, &cl->undertime_))
852 if (cl->depth_ == 0) {
853 if (!cl->sleeping_ && (qlen(cl->q_) > cl->qthresh_))
858 if (cl->children_ != NULL) {
859 p = cl->children_;
871 * Return 1 if class 'cl' is under limit or can borrow from a parent,
877 rmc_under_limit(struct rm_class *cl, struct timespec *now)
879 rm_class_t *p = cl;
881 struct rm_ifdat *ifd = cl->ifdat_;
882 int sleeping = cl->sleeping_;
886 * If cl is the root class, then always return that it is
889 if (cl->parent_ == NULL)
892 if (!TS_LT(now, &cl->undertime_)) {
895 CALLOUT_STOP(&cl->callout_);
896 cl->sleeping_ = 0;
897 cl->undertime_.tv_sec = 0;
904 if (((cl = cl->borrow_) == NULL) ||
905 (cl->depth_ > ifd->cutoff_)) {
910 if (cl != NULL)
922 if (cl != NULL) {
924 top = cl;
937 top = cl;
938 } while (cl->undertime_.tv_sec && TS_LT(now, &cl->undertime_));
940 if (cl != p)
941 ifd->borrowed_[ifd->qi_] = cl;
965 struct rm_class *cl = NULL, *first = NULL;
978 cl = ifd->pollcache_;
979 cpri = cl->pri_;
982 if (cl->undertime_.tv_sec != 0 &&
983 rmc_under_limit(cl, &now) == 0)
984 first = cl;
1007 * "M[cl->pri_])" times "cl->allotment" is greater than
1011 cl = ifd->active_[cpri];
1012 ASSERT(cl != NULL);
1014 if ((deficit < 2) && (cl->bytes_alloc_ <= 0))
1015 cl->bytes_alloc_ += cl->w_allotment_;
1016 if (!qempty(cl->q_)) {
1017 if ((cl->undertime_.tv_sec == 0) ||
1018 rmc_under_limit(cl, &now)) {
1019 if (cl->bytes_alloc_ > 0 || deficit > 1)
1028 else if (first == NULL && cl->borrow_ != NULL)
1029 first = cl; /* borrowing candidate */
1032 cl->bytes_alloc_ = 0;
1033 cl = cl->peer_;
1034 } while (cl != ifd->active_[cpri]);
1066 cl = first;
1067 cpri = cl->pri_;
1069 if (cl->sleeping_)
1070 CALLOUT_STOP(&cl->callout_);
1071 cl->sleeping_ = 0;
1072 cl->undertime_.tv_sec = 0;
1074 ifd->borrowed_[ifd->qi_] = cl->borrow_;
1075 ifd->cutoff_ = cl->borrow_->depth_;
1082 m = _rmc_getq(cl);
1085 if (qempty(cl->q_))
1091 if (cl->bytes_alloc_ > 0)
1092 cl->bytes_alloc_ -= m_pktlen(m);
1094 if ((cl->bytes_alloc_ <= 0) || first == cl)
1095 ifd->active_[cl->pri_] = cl->peer_;
1097 ifd->active_[cl->pri_] = cl;
1099 ifd->class_[ifd->qi_] = cl;
1106 m = _rmc_pollq(cl);
1107 ifd->pollcache_ = cl;
1123 struct rm_class *cl, *first = NULL;
1133 cl = ifd->pollcache_;
1134 cpri = cl->pri_;
1148 cl = ifd->active_[cpri];
1149 ASSERT(cl != NULL);
1151 if (!qempty(cl->q_)) {
1152 if ((cl->undertime_.tv_sec == 0) ||
1153 rmc_under_limit(cl, &now))
1155 if (first == NULL && cl->borrow_ != NULL)
1156 first = cl;
1158 cl = cl->peer_;
1159 } while (cl != ifd->active_[cpri]);
1181 cl = first;
1182 cpri = cl->pri_;
1184 if (cl->sleeping_)
1185 CALLOUT_STOP(&cl->callout_);
1186 cl->sleeping_ = 0;
1187 cl->undertime_.tv_sec = 0;
1189 ifd->borrowed_[ifd->qi_] = cl->borrow_;
1190 ifd->cutoff_ = cl->borrow_->depth_;
1197 m = _rmc_getq(cl);
1200 if (qempty(cl->q_))
1203 ifd->active_[cpri] = cl->peer_;
1205 ifd->class_[ifd->qi_] = cl;
1212 m = _rmc_pollq(cl);
1213 ifd->pollcache_ = cl;
1264 rm_class_t *cl, *cl0, *borrowed;
1271 if ((cl = ifd->class_[ifd->qo_]) == NULL)
1274 cl0 = cl;
1279 PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen);
1326 while (cl != NULL) {
1327 TS_DELTA(&ifd->ifnow_, &cl->last_, idle);
1333 cl->avgidle_ = cl->maxidle_;
1337 pkt_time = pktlen * (int64_t)cl->ps_per_byte_;
1340 pkt_time = pktlen * cl->ns_per_byte_ / 1000;
1344 avgidle = cl->avgidle_;
1346 cl->avgidle_ = avgidle;
1350 CBQTRACE(rmc_update_class_util, 'milo', cl->stats_.handle);
1356 if (avgidle < cl->minidle_)
1357 avgidle = cl->avgidle_ = cl->minidle_;
1362 TS_ADD_DELTA(nowp, tidle, &cl->undertime_);
1363 ++cl->stats_.over;
1365 cl->avgidle_ =
1366 (avgidle > cl->maxidle_) ? cl->maxidle_ : avgidle;
1367 cl->undertime_.tv_sec = 0;
1368 if (cl->sleeping_) {
1369 CALLOUT_STOP(&cl->callout_);
1370 cl->sleeping_ = 0;
1375 if (borrows != cl)
1376 ++cl->stats_.borrows;
1380 cl->last_ = ifd->ifnow_;
1381 cl->last_pkttime_ = pkt_time;
1384 if (cl->parent_ == NULL && cl != cl0) {
1386 PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen);
1390 cl = cl->parent_;
1396 cl = ifd->class_[ifd->qo_];
1399 if ((qlen(cl->q_) <= 0) || TS_LT(nowp, &borrowed->undertime_)) {
1407 if ((qlen(cl->q_) <= 1) || TS_LT(&now, &borrowed->undertime_)) {
1431 * rmc_drop_action(struct rm_class *cl) - Generic (not protocol-specific)
1440 rmc_drop_action(struct rm_class *cl)
1442 struct rm_ifdat *ifd = cl->ifdat_;
1444 ASSERT(qlen(cl->q_) > 0);
1445 _rmc_dropq(cl);
1446 if (qempty(cl->q_))
1447 ifd->na_[cl->pri_]--;
1451 rmc_dropall(struct rm_class *cl)
1453 struct rm_ifdat *ifd = cl->ifdat_;
1455 if (!qempty(cl->q_)) {
1456 _flushq(cl->q_);
1458 ifd->na_[cl->pri_]--;
1479 * rmc_delay_action(struct rm_class *cl) - This function is the generic CBQ
1490 rmc_delay_action(struct rm_class *cl, struct rm_class *borrow)
1495 cl->stats_.overactions++;
1497 TS_DELTA(&borrow->undertime_, &cl->overtime_, ndelay);
1499 TS_DELTA(&cl->undertime_, &cl->overtime_, ndelay);
1501 ndelay += cl->offtime_;
1504 if (!cl->sleeping_) {
1505 CBQTRACE(rmc_delay_action, 'yled', cl->stats_.handle);
1511 extradelay = cl->offtime_;
1522 extradelay -= cl->last_pkttime_;
1525 TS_ADD_DELTA(&cl->undertime_, extradelay, &cl->undertime_);
1529 cl->sleeping_ = 1;
1530 cl->stats_.delays++;
1542 t = tvhzto(&cl->undertime_);
1545 t = tshzto(&cl->undertime_) + 1;
1549 CALLOUT_RESET(&cl->callout_, t,
1550 (timeout_t *)rmc_restart, (void *)cl);
1572 rmc_restart(struct rm_class *cl)
1574 struct rm_ifdat *ifd = cl->ifdat_;
1578 if (cl->sleeping_) {
1579 cl->sleeping_ = 0;
1580 cl->undertime_.tv_sec = 0;
1583 CBQTRACE(rmc_restart, 'trts', cl->stats_.handle);
1592 * rmc_root_overlimit(struct rm_class *cl) - This the generic overlimit
1599 rmc_root_overlimit(struct rm_class *cl,
1612 _rmc_addq(rm_class_t *cl, mbuf_t *m)
1615 if (q_is_rio(cl->q_))
1616 return rio_addq((rio_t *)cl->red_, cl->q_, m, cl->pktattr_);
1619 if (q_is_red(cl->q_))
1620 return red_addq(cl->red_, cl->q_, m, cl->pktattr_);
1623 if (cl->flags_ & RMCF_CLEARDSCP)
1624 write_dsfield(m, cl->pktattr_, 0);
1626 _addq(cl->q_, m);
1632 _rmc_dropq(rm_class_t *cl)
1636 if ((m = _getq(cl->q_)) != NULL)
1641 _rmc_getq(rm_class_t *cl)
1644 if (q_is_rio(cl->q_))
1645 return rio_getq((rio_t *)cl->red_, cl->q_);
1648 if (q_is_red(cl->q_))
1649 return red_getq(cl->red_, cl->q_);
1651 return _getq(cl->q_);
1655 _rmc_pollq(rm_class_t *cl)
1657 return qhead(cl->q_);