Lines Matching defs:ifd
82 #define reset_cutoff(ifd) { ifd->cutoff_ = RM_MAXDEPTH; }
141 * 'pri' on the interface given by 'ifd'.
198 rmc_newclass(int pri, struct rm_ifdat *ifd, uint64_t psecPerByte,
244 cl->ifdat_ = ifd;
315 if ((peer = ifd->active_[pri]) != NULL) {
318 while (peer->peer_ != ifd->active_[pri])
322 ifd->active_[pri] = cl;
341 if (ifd->wrr_) {
342 ifd->num_[pri]++;
343 ifd->alloc_[pri] += cl->allotment_;
344 rmc_wrr_set_weights(ifd);
354 struct rm_ifdat *ifd;
358 ifd = cl->ifdat_;
391 if (ifd->wrr_) {
392 ifd->alloc_[cl->pri_] += cl->allotment_ - old_allotment;
393 rmc_wrr_set_weights(ifd);
409 rmc_wrr_set_weights(struct rm_ifdat *ifd)
419 if (ifd->num_[i] == 0)
420 ifd->M_[i] = 0;
422 ifd->M_[i] = ifd->alloc_[i] /
423 (ifd->num_[i] * ifd->maxpkt_);
431 if (ifd->active_[i] != NULL) {
432 clh = cl = ifd->active_[i];
435 if (ifd->M_[i] == 0)
439 ifd->M_[i];
447 rmc_get_weight(struct rm_ifdat *ifd, int pri)
450 return (ifd->M_[pri]);
545 rmc_delete_class(struct rm_ifdat *ifd, struct rm_class *cl)
592 if ((p = ifd->active_[cl->pri_]) != NULL) {
602 if (ifd->active_[cl->pri_] == cl)
603 ifd->active_[cl->pri_] = cl->peer_;
606 ifd->active_[cl->pri_] = NULL;
613 if (ifd->wrr_) {
614 ifd->alloc_[cl->pri_] -= cl->allotment_;
615 ifd->num_[cl->pri_]--;
616 rmc_wrr_set_weights(ifd);
625 rmc_depth_recompute(ifd->root_);
651 * associated with the output portion of interface 'ifp'. 'ifd' is
665 rmc_init(struct ifaltq *ifq, struct rm_ifdat *ifd, uint64_t psecPerByte,
683 (void)memset((char *)ifd, 0, sizeof (*ifd));
684 ifd->ifq_ = ifq;
685 ifd->restart = restart;
686 ifd->maxqueued_ = maxqueued;
687 ifd->ps_per_byte_ = psecPerByte;
688 ifd->maxpkt_ = mtu;
689 ifd->wrr_ = (flags & RMCF_WRR) ? 1 : 0;
690 ifd->efficient_ = (flags & RMCF_EFFICIENT) ? 1 : 0;
692 ifd->maxiftime_ = mtu * psecPerByte / 1000 / 1000 * 16;
694 ifd->maxiftime_ /= 4;
697 reset_cutoff(ifd);
698 CBQTRACE(rmc_init, 'INIT', ifd->cutoff_);
704 ifd->alloc_[i] = 0;
705 ifd->M_[i] = 0;
706 ifd->num_[i] = 0;
707 ifd->na_[i] = 0;
708 ifd->active_[i] = NULL;
714 ifd->qi_ = 0;
715 ifd->qo_ = 0;
717 ifd->class_[i] = NULL;
718 ifd->curlen_[i] = 0;
719 ifd->borrowed_[i] = NULL;
725 if ((ifd->root_ = rmc_newclass(0, ifd,
733 ifd->root_->depth_ = 0;
753 struct rm_ifdat *ifd = cl->ifdat_;
758 if (ifd->cutoff_ > 0) {
760 if (ifd->cutoff_ > cl->depth_)
761 ifd->cutoff_ = cl->depth_;
774 borrow->depth_ < ifd->cutoff_) {
776 ifd->cutoff_ = borrow->depth_;
777 CBQTRACE(rmc_queue_packet, 'ffob', ifd->cutoff_);
784 else if ((ifd->cutoff_ > 1) && cl->borrow_) {
786 ifd->cutoff_ = cl->borrow_->depth_;
800 ifd->na_[cpri]++;
813 * rmc_tl_satisfied(struct rm_ifdat *ifd, struct timespec *now) - Check all
818 rmc_tl_satisfied(struct rm_ifdat *ifd, struct timespec *now)
824 if ((bp = ifd->active_[i]) != NULL) {
828 ifd->cutoff_ = p->depth_;
836 reset_cutoff(ifd);
881 struct rm_ifdat *ifd = cl->ifdat_;
884 ifd->borrowed_[ifd->qi_] = NULL;
905 (cl->depth_ > ifd->cutoff_)) {
925 CBQTRACE(rmc_under_limit, 'ffou', ifd->cutoff_);
941 ifd->borrowed_[ifd->qi_] = cl;
963 _rmc_wrr_dequeue_next(struct rm_ifdat *ifd, int op)
977 if (op == ALTDQ_REMOVE && ifd->pollcache_) {
978 cl = ifd->pollcache_;
980 if (ifd->efficient_) {
986 ifd->pollcache_ = NULL;
991 ifd->pollcache_ = NULL;
992 ifd->borrowed_[ifd->qi_] = NULL;
998 if (ifd->na_[cpri] == 0)
1011 cl = ifd->active_[cpri];
1025 ifd->borrowed_[ifd->qi_] = NULL;
1034 } while (cl != ifd->active_[cpri]);
1049 if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) {
1050 ifd->cutoff_++;
1051 CBQTRACE(_rmc_wrr_dequeue_next, 'ojda', ifd->cutoff_);
1060 reset_cutoff(ifd);
1061 CBQTRACE(_rmc_wrr_dequeue_next, 'otsr', ifd->cutoff_);
1063 if (!ifd->efficient_ || first == NULL)
1074 ifd->borrowed_[ifd->qi_] = cl->borrow_;
1075 ifd->cutoff_ = cl->borrow_->depth_;
1086 ifd->na_[cpri]--;
1095 ifd->active_[cl->pri_] = cl->peer_;
1097 ifd->active_[cl->pri_] = cl;
1099 ifd->class_[ifd->qi_] = cl;
1100 ifd->curlen_[ifd->qi_] = m_pktlen(m);
1101 ifd->now_[ifd->qi_] = now;
1102 ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_;
1103 ifd->queued_++;
1107 ifd->pollcache_ = cl;
1119 _rmc_prr_dequeue_next(struct rm_ifdat *ifd, int op)
1132 if (op == ALTDQ_REMOVE && ifd->pollcache_) {
1133 cl = ifd->pollcache_;
1135 ifd->pollcache_ = NULL;
1139 ifd->pollcache_ = NULL;
1140 ifd->borrowed_[ifd->qi_] = NULL;
1146 if (ifd->na_[cpri] == 0)
1148 cl = ifd->active_[cpri];
1159 } while (cl != ifd->active_[cpri]);
1167 if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) {
1168 ifd->cutoff_++;
1177 reset_cutoff(ifd);
1178 if (!ifd->efficient_ || first == NULL)
1189 ifd->borrowed_[ifd->qi_] = cl->borrow_;
1190 ifd->cutoff_ = cl->borrow_->depth_;
1201 ifd->na_[cpri]--;
1203 ifd->active_[cpri] = cl->peer_;
1205 ifd->class_[ifd->qi_] = cl;
1206 ifd->curlen_[ifd->qi_] = m_pktlen(m);
1207 ifd->now_[ifd->qi_] = now;
1208 ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_;
1209 ifd->queued_++;
1213 ifd->pollcache_ = cl;
1220 * rmc_dequeue_next(struct rm_ifdat *ifd, struct timespec *now) - this function
1233 rmc_dequeue_next(struct rm_ifdat *ifd, int mode)
1235 if (ifd->queued_ >= ifd->maxqueued_)
1237 else if (ifd->wrr_)
1238 return (_rmc_wrr_dequeue_next(ifd, mode));
1240 return (_rmc_prr_dequeue_next(ifd, mode));
1259 rmc_update_class_util(struct rm_ifdat *ifd)
1271 if ((cl = ifd->class_[ifd->qo_]) == NULL)
1275 pktlen = (int64_t)ifdifd->qo_];
1276 borrowed = ifd->borrowed_[ifd->qo_];
1289 * as a result, ifd->qi_ and ifd->qo_ are always synced.
1291 nowp = &ifd->now_[ifd->qo_];
1294 pkt_time = (int64_t)ifd->curlen_[ifd->qo_] * (int64_t)ifd->ps_per_byte_;
1297 pkt_time = ifd->curlen_[ifd->qo_] * ifd->ns_per_byte_ / 1000;
1299 if (ifd->ifq_->altq_ifp->if_type == IFT_PPP) {
1300 if (TS_LT(nowp, &ifd->ifnow_)) {
1309 TS_DELTA(&ifd->ifnow_, nowp, iftime);
1310 if (iftime+pkt_time < ifd->maxiftime_) {
1311 TS_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_);
1313 TS_ADD_DELTA(nowp, ifd->maxiftime_, &ifd->ifnow_);
1316 TS_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_);
1319 if (TS_LT(nowp, &ifd->ifnow_)) {
1320 TS_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_);
1322 TS_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_);
1327 TS_DELTA(&ifd->ifnow_, &cl->last_, idle);
1380 cl->last_ = ifd->ifnow_;
1396 cl = ifd->class_[ifd->qo_];
1397 if (borrowed && (ifd->cutoff_ >= borrowed->depth_)) {
1400 rmc_tl_satisfied(ifd, nowp);
1401 CBQTRACE(rmc_update_class_util, 'broe', ifd->cutoff_);
1403 ifd->cutoff_ = borrowed->depth_;
1408 reset_cutoff(ifd);
1410 rmc_tl_satisfied(ifd, &now);
1412 CBQTRACE(rmc_update_class_util, 'broe', ifd->cutoff_);
1414 ifd->cutoff_ = borrowed->depth_;
1423 ifd->borrowed_[ifd->qo_] = NULL;
1424 ifd->class_[ifd->qo_] = NULL;
1425 ifd->qo_ = (ifd->qo_ + 1) % ifd->maxqueued_;
1426 ifd->queued_--;
1442 struct rm_ifdat *ifd = cl->ifdat_;
1447 ifd->na_[cl->pri_]--;
1453 struct rm_ifdat *ifd = cl->ifdat_;
1458 ifd->na_[cl->pri_]--;
1574 struct rm_ifdat *ifd = cl->ifdat_;
1582 if (ifd->queued_ < ifd->maxqueued_ && ifd->restart != NULL) {
1584 (ifd->restart)(ifd->ifq_);