HomeSort by: relevance | last modified time | path
    Searched refs:ci_schedstate (Results 1 - 25 of 36) sorted by relevancy

1 2

  /src/sys/kern/
kern_cpu.c 231 if ((ci->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0)
235 if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0)
239 cs->cs_lastmod = (int32_t)ci->ci_schedstate.spc_lastmod;
241 (ci->ci_schedstate.spc_lastmod >> 32);
322 spc = &ci->ci_schedstate;
329 mspc = &target_ci->ci_schedstate;
355 mspc = &mci->ci_schedstate;
394 spc = &ci->ci_schedstate;
410 spc = &ci->ci_schedstate;
427 if ((ci2->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0
    [all...]
subr_cpu.c 275 ci->ci_schedstate.spc_flags |=
297 ci->ci_schedstate.spc_flags &=
320 ci->ci_schedstate.spc_flags &=
373 ci3->ci_schedstate.spc_flags |= SPCF_CORE1ST;
379 if ((ci->ci_schedstate.spc_flags & SPCF_CORE1ST) == 0) {
385 if ((ci2->ci_schedstate.spc_flags &
394 if ((ci3->ci_schedstate.spc_flags & SPCF_PACKAGE1ST) != 0) {
398 ci3->ci_schedstate.spc_flags |= SPCF_PACKAGE1ST;
410 if ((ci2->ci_schedstate.spc_flags & SPCF_PACKAGE1ST)
445 ci->ci_schedstate.spc_flags |= SPCF_1STCLASS
    [all...]
kern_runq.c 143 spc = &ci->ci_schedstate;
194 spc = &ci->ci_schedstate;
195 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
265 spc = &l->l_cpu->ci_schedstate;
326 spc = &ci->ci_schedstate;
420 KASSERT(lwp_locked(l, ci->ci_schedstate.spc_mutex));
455 const struct schedstate_percpu *spc = &ci->ci_schedstate;
476 struct schedstate_percpu *spc = &curcpu()->ci_schedstate;
503 bestspc = &bestci->ci_schedstate;
523 curspc = &curci->ci_schedstate;
    [all...]
kern_idle.c 54 spc = &ci->ci_schedstate;
94 KASSERT(l->l_mutex == l->l_cpu->ci_schedstate.spc_lwplock);
kern_clock.c 325 if ((int)(--ci->ci_schedstate.spc_schedticks) <= 0) {
327 ci->ci_schedstate.spc_schedticks = hardscheddiv;
330 if ((--ci->ci_schedstate.spc_ticks) <= 0)
414 struct schedstate_percpu *spc = &ci->ci_schedstate;
kern_synch.c 297 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
322 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
620 KASSERT(mutex_owned(curcpu()->ci_schedstate.spc_mutex));
631 spc = &ci->ci_schedstate;
895 KASSERT(l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex);
908 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
917 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
951 oldlock = lwp_setlock(l, l->l_cpu->ci_schedstate.spc_mutex);
1063 spc = &ci->ci_schedstate;
1092 spc = &ci->ci_schedstate;
    [all...]
sched_m2.c 280 struct schedstate_percpu *spc = &l->l_cpu->ci_schedstate;
298 struct schedstate_percpu *spc = &ci->ci_schedstate;
subr_interrupt.c 71 spc = &ci->ci_schedstate;
96 spc = &ci->ci_schedstate;
sys_pset.c 224 spc = &ci->ci_schedstate;
322 ispc = &ici->ci_schedstate;
kern_sleepq.c 140 spc = &ci->ci_schedstate;
170 spc = &ci->ci_schedstate;
kern_kthread.c 116 lwp_unlock_to(l, ci->ci_schedstate.spc_lwplock);
sched_4bsd.c 104 struct schedstate_percpu *spc = &ci->ci_schedstate;
init_main.c 729 ci->ci_schedstate.spc_lastmod = time_second;
817 curcpu()->ci_schedstate.spc_flags |= SPCF_RUNNING;
kern_lwp.c 400 l->l_mutex = l->l_cpu->ci_schedstate.spc_lwplock;
837 KASSERT(l2->l_mutex == l2->l_cpu->ci_schedstate.spc_lwplock);
1295 lwp_unlock_to(l, l->l_cpu->ci_schedstate.spc_lwplock);
1403 tspc = &tci->ci_schedstate;
  /src/sys/arch/sparc/sparc/
timer_sun4.c 137 if ((++cpuinfo.ci_schedstate.spc_schedticks & 7) == 0) {
timer_sun4m.c 215 if ((++cpuinfo.ci_schedstate.spc_schedticks & 7) == 0 && schedhz != 0) {
timer_msiiep.c 288 if ((++cpuinfo.ci_schedstate.spc_schedticks & 7) == 0) {
  /src/sys/rump/librump/rumpkern/
scheduler.c 174 ci->ci_schedstate.spc_mutex =
176 ci->ci_schedstate.spc_flags = SPCF_RUNNING;
381 l->l_mutex = rcpu->rcpu_ci->ci_schedstate.spc_mutex;
405 KASSERT(l->l_mutex == l->l_cpu->ci_schedstate.spc_mutex);
  /src/sys/sys/
lwp.h 411 mutex_spin_enter(ci->ci_schedstate.spc_mutex);
417 mutex_spin_exit(ci->ci_schedstate.spc_mutex);
423 struct schedstate_percpu *spc1 = &ci1->ci_schedstate;
424 struct schedstate_percpu *spc2 = &ci2->ci_schedstate;
cpu_data.h 200 #define ci_schedstate ci_data.cpu_schedstate macro
  /src/sys/miscfs/procfs/
procfs_linux.c 299 curcpu()->ci_schedstate.spc_cp_time[CP_USER],
300 curcpu()->ci_schedstate.spc_cp_time[CP_NICE],
301 curcpu()->ci_schedstate.spc_cp_time[CP_SYS] /*+ [CP_INTR]*/,
302 curcpu()->ci_schedstate.spc_cp_time[CP_IDLE]);
319 CPUNAME->ci_schedstate.spc_cp_time[CP_USER],
320 CPUNAME->ci_schedstate.spc_cp_time[CP_NICE],
321 CPUNAME->ci_schedstate.spc_cp_time[CP_SYS],
322 CPUNAME->ci_schedstate.spc_cp_time[CP_IDLE]);
590 idle = curcpu()->ci_schedstate.spc_cp_time[CP_IDLE];
  /src/sys/arch/macppc/dev/
lightbar.c 279 sc->sc_cpu[0]->ci_schedstate.spc_cp_time,
287 sc->sc_cpu[1]->ci_schedstate.spc_cp_time,
  /src/sys/arch/alpha/pci/
pci_machdep.c 287 if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0 ||
302 if (ci->ci_schedstate.spc_flags & SPCF_NOINTR) {
440 (current_ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0) {
  /src/sys/arch/sparc64/sparc64/
clock.c 738 if ((int)(--ci->ci_schedstate.spc_schedticks) <= 0) {
740 ci->ci_schedstate.spc_schedticks = statscheddiv;
  /src/sys/arch/x86/x86/
intr.c 566 if ((lci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) {
586 if ((ci->ci_schedstate.spc_flags &
1687 if ((ici->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) {
1707 if ((nci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) {
1772 if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0) {
1960 if ((newci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) {
2223 if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0) {

Completed in 24 milliseconds

1 2