Home | History | Annotate | Download | only in kern

Lines Matching refs:cc

118 #define BUCKET(cc, rel, abs)						\
121 ? &(cc)->cc_wheel[MASKWHEEL(0, (abs))] \
122 : &(cc)->cc_wheel[MASKWHEEL(1, (abs)) + WHEELSIZE] \
124 ? &(cc)->cc_wheel[MASKWHEEL(2, (abs)) + 2*WHEELSIZE] \
125 : &(cc)->cc_wheel[MASKWHEEL(3, (abs)) + 3*WHEELSIZE])
127 #define MOVEBUCKET(cc, wheel, time) \
128 CIRCQ_APPEND(&(cc)->cc_todo, \
129 &(cc)->cc_wheel[MASKWHEEL((wheel), (time)) + (wheel)*WHEELSIZE])
262 struct callout_cpu *cc;
266 cc = c->c_cpu;
267 lock = cc->cc_lock;
269 if (__predict_true(cc == c->c_cpu))
279 callout_running_somewhere_else(callout_impl_t *c, struct callout_cpu *cc)
281 KASSERT(c->c_cpu == cc);
283 return cc->cc_active == c && cc->cc_lwp != curlwp;
295 struct callout_cpu *cc;
300 cc = &callout_cpu0;
301 cc->cc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
302 CIRCQ_INIT(&cc->cc_todo);
304 CIRCQ_INIT(&cc->cc_wheel[b]);
305 curcpu()->ci_data.cpu_callout = cc;
318 struct callout_cpu *cc;
321 if ((cc = ci->ci_data.cpu_callout) == NULL) {
322 cc = kmem_zalloc(sizeof(*cc), KM_SLEEP);
323 cc->cc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
324 CIRCQ_INIT(&cc->cc_todo);
326 CIRCQ_INIT(&cc->cc_wheel[b]);
335 sleepq_init(&cc->cc_sleepq);
337 snprintf(cc->cc_name1, sizeof(cc->cc_name1), "late/%u",
339 evcnt_attach_dynamic(&cc->cc_ev_late, EVCNT_TYPE_MISC,
340 NULL, "callout", cc->cc_name1);
342 snprintf(cc->cc_name2, sizeof(cc->cc_name2), "wait/%u",
344 evcnt_attach_dynamic(&cc->cc_ev_block, EVCNT_TYPE_MISC,
345 NULL, "callout", cc->cc_name2);
347 cc->cc_cpu = ci;
348 ci->ci_data.cpu_callout = cc;
361 struct callout_cpu *cc;
367 cc = curcpu()->ci_data.cpu_callout;
370 if (__predict_true((flags & CALLOUT_MPSAFE) != 0 && cc != NULL)) {
372 c->c_cpu = cc;
418 struct callout_cpu *cc, *occ;
448 cc = curcpu()->ci_data.cpu_callout;
449 if ((c->c_flags & CALLOUT_BOUND) != 0 || cc == occ ||
450 !mutex_tryenter(cc->cc_lock)) {
457 c->c_cpu = cc;
458 c->c_time = to_ticks + cc->cc_ticks;
460 CIRCQ_INSERT(&c->c_list, &cc->cc_todo);
461 mutex_spin_exit(cc->cc_lock);
464 occ->cc_cpu, cc->cc_cpu);
586 struct callout_cpu *cc;
603 cc = c->c_cpu;
604 if (__predict_true(!callout_running_somewhere_else(c, cc)))
621 cc->cc_nwait++;
622 cc->cc_ev_block.ev_count++;
623 nlocks = sleepq_enter(&cc->cc_sleepq, l, cc->cc_lock);
624 sleepq_enqueue(&cc->cc_sleepq, cc, "callout",
662 struct callout_cpu *cc;
671 cc = ci->ci_data.cpu_callout;
673 if (c->c_cpu != cc) {
681 c->c_cpu = cc;
789 struct callout_cpu *cc;
792 cc = curcpu()->ci_data.cpu_callout;
793 mutex_spin_enter(cc->cc_lock);
795 ticks = ++cc->cc_ticks;
797 MOVEBUCKET(cc, 0, ticks);
799 MOVEBUCKET(cc, 1, ticks);
801 MOVEBUCKET(cc, 2, ticks);
803 MOVEBUCKET(cc, 3, ticks);
807 needsoftclock = !CIRCQ_EMPTY(&cc->cc_todo);
808 mutex_spin_exit(cc->cc_lock);
824 struct callout_cpu *cc;
833 cc = l->l_cpu->ci_data.cpu_callout;
835 mutex_spin_enter(cc->cc_lock);
836 cc->cc_lwp = l;
837 while (!CIRCQ_EMPTY(&cc->cc_todo)) {
838 c = CIRCQ_FIRST(&cc->cc_todo);
841 KASSERT(c->c_cpu == cc);
847 ticks = cc->cc_ticks;
850 CIRCQ_INSERT(&c->c_list, BUCKET(cc, delta, c->c_time));
854 cc->cc_ev_late.ev_count++;
861 cc->cc_active = c;
864 mutex_spin_exit(cc->cc_lock);
877 mutex_spin_enter(cc->cc_lock);
884 cc->cc_active = NULL;
885 if ((count = cc->cc_nwait) != 0) {
886 cc->cc_nwait = 0;
888 sleepq_wake(&cc->cc_sleepq, cc, count, cc->cc_lock);
889 mutex_spin_enter(cc->cc_lock);
892 cc->cc_lwp = NULL;
893 mutex_spin_exit(cc->cc_lock);
899 db_show_callout_bucket(struct callout_cpu *cc, struct callout_circq *kbucket,
917 b = (bucket - cc->cc_wheel);
921 c->c_time - cc->cc_ticks, b / WHEELSIZE, b,
931 struct callout_cpu *cc;
948 sizeof(cc), (char *)&cc);
949 db_read_bytes((db_addr_t)cc, sizeof(ccb), (char *)&ccb);
950 db_show_callout_bucket(&ccb, &cc->cc_todo, &ccb.cc_todo);
956 sizeof(cc), (char *)&cc);
957 db_read_bytes((db_addr_t)cc, sizeof(ccb), (char *)&ccb);
958 db_show_callout_bucket(&ccb, &cc->cc_wheel[b],