HomeSort by: relevance | last modified time | path
    Searched refs:l_cpu (Results 1 - 25 of 75) sorted by relevancy

1 2 3

  /src/sys/sys/
userret.h 54 KASSERTMSG(l->l_cpu->ci_biglock_count == 0, "kernel_lock leaked");
56 exception = l->l_cpu->ci_want_resched | (l->l_flag & LW_USERRET);
  /src/sys/rump/librump/rumpkern/arch/x86/
rump_x86_cpu.c 66 return curlwp->l_cpu;
  /src/sys/arch/powerpc/include/
userret.h 67 || l->l_cpu->ci_data.cpu_pcu_curlwp[PCU_FPU] == l,
69 tf, tf->tf_srr1, l->l_cpu->ci_data.cpu_pcu_curlwp[PCU_FPU], l);
  /src/sys/kern/
kern_idle.c 94 KASSERT(l->l_mutex == l->l_cpu->ci_schedstate.spc_lwplock);
96 spc_lock(l->l_cpu);
116 if (ci != lwp0.l_cpu) {
kern_synch.c 297 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
300 spc_lock(l->l_cpu);
322 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
325 spc_lock(l->l_cpu);
342 needed = l->l_cpu->ci_want_resched;
433 spc_lock(l->l_cpu);
566 KASSERT(newl->l_cpu == ci);
601 * NOTE: l->l_cpu is not changed in this routine, because an LWP never
602 * changes its own l_cpu (that would screw up curcpu on many ports and could
603 * cause all kinds of other evil stuff). l_cpu is always changed by som
    [all...]
kern_runq.c 149 if (ci == lwp0.l_cpu) {
183 * l_cpu.
193 ci = l->l_cpu;
195 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
265 spc = &l->l_cpu->ci_schedstate;
418 struct cpu_info *ci = l->l_cpu;
577 ci = l->l_cpu;
592 if (sched_migratable(l, curlwp->l_cpu) && eprio >
593 curlwp->l_cpu->ci_schedstate.spc_maxpriority) {
594 return curlwp->l_cpu;
    [all...]
kern_lwp.c 175 * lock and matches lwp::l_cpu.
180 * This is a per-CPU lock and matches lwp::l_cpu.
278 .l_cpu = LWP0_CPU_INFO,
399 l->l_cpu = curcpu();
400 l->l_mutex = l->l_cpu->ci_schedstate.spc_lwplock;
418 * The value of l->l_cpu must still be valid at this point.
420 KASSERT(l->l_cpu != NULL);
835 KASSERT(l2->l_cpu != NULL);
837 KASSERT(l2->l_mutex == l2->l_cpu->ci_schedstate.spc_lwplock);
1239 spc_lock(l->l_cpu);
    [all...]
kern_softint.c 736 l->l_cpu->ci_data.cpu_softints &= ~si->si_machdep;
743 spc_lock(l->l_cpu);
814 "onproc=%p => l_stat=%d l_flag=%08x l_cpu=%d\n"
815 "curlwp=%p => l_stat=%d l_flag=%08x l_cpu=%d\n"
816 "pinned=%p => l_stat=%d l_flag=%08x l_cpu=%d\n",
818 onproc->l_flag, cpu_index(onproc->l_cpu), curlwp,
820 cpu_index(curlwp->l_cpu), pinned, pinned->l_stat,
821 pinned->l_flag, cpu_index(pinned->l_cpu));
864 spc_lock(l->l_cpu);
kern_sleepq.c 139 ci = l->l_cpu;
168 l->l_cpu = sched_takecpu(l);
169 ci = l->l_cpu;
390 spc_lock(l->l_cpu);
kern_kthread.c 115 if (ci != l->l_cpu) {
118 l->l_cpu = ci;
  /src/sys/rump/include/machine/
cpu.h 81 #define curcpu() (curlwp->l_cpu)
  /src/sys/arch/arm/arm/
arm_machdep.c 272 if (l->l_cpu != curcpu()) {
274 intr_ipi_send(l->l_cpu->ci_kcpuset, IPI_AST);
294 idepth = l->l_cpu->ci_intr_depth;
296 cpl = l->l_cpu->ci_cpl;
  /src/sys/rump/librump/rumpkern/
scheduler.c 199 struct rumpcpu *rcpu = cpuinfo_to_rumpcpu(l->l_cpu);
209 struct rumpcpu *rcpu = cpuinfo_to_rumpcpu(l->l_cpu);
380 l->l_cpu = l->l_target_cpu = ci;
405 KASSERT(l->l_mutex == l->l_cpu->ci_schedstate.spc_mutex);
451 rump_softint_run(l->l_cpu);
462 ci = l->l_cpu;
lwproc.c 353 l->l_cpu = &rump_bootcpu;
473 newlwp->l_cpu = newlwp->l_target_cpu = l->l_cpu;
  /src/sys/arch/mips/mips/
mips_dsp.c 96 l->l_cpu->ci_ev_dsp_saves.ev_count++;
142 l->l_cpu->ci_ev_dsp_loads.ev_count++;
mips_fpu.c 98 l->l_cpu->ci_ev_fpu_saves.ev_count++;
220 l->l_cpu->ci_ev_fpu_loads.ev_count++;
mips_softint.c 62 struct cpu_info * const ci = l->l_cpu;
  /src/sys/arch/or1k/include/
cpu.h 78 return curlwp->l_cpu;
  /src/sys/ddb/
db_proc.c 200 if (l.l_cpu != NULL) {
202 &l.l_cpu->ci_data.cpu_index,
345 if (l.l_cpu != NULL) {
347 &l.l_cpu->ci_data.cpu_index,
  /src/sys/arch/riscv/riscv/
softint_machdep.c 73 lwp_t ** lp = &l->l_cpu->ci_softlwps[level];
  /src/sys/arch/aarch64/aarch64/
cpu_machdep.c 100 lwp_t ** lp = &l->l_cpu->ci_softlwps[level];
283 KASSERT(l->l_cpu == curcpu());
295 if (l->l_cpu != curcpu()) {
297 intr_ipi_send(l->l_cpu->ci_kcpuset, IPI_AST);
  /src/sys/arch/sparc/include/
cpu.h 484 (l)->l_cpu->ci_want_ast = 1; \
487 if ((l)->l_cpu->ci_cpuid != cpu_number()) \
488 XCALL0(sparc_noop, 1U << (l)->l_cpu->ci_cpuid); \
  /src/sys/arch/powerpc/powerpc/
softint_machdep.c 109 struct cpu_info * const ci = l->l_cpu;
  /src/sys/arch/alpha/include/
cpu.h 179 #define curcpu() curlwp->l_cpu
  /src/sys/arch/powerpc/ibm4xx/
ibm4xx_machdep.c 262 KASSERT(lwp0.l_cpu == curcpu());
355 KASSERT(lwp0.l_cpu != NULL);

Completed in 25 milliseconds

1 2 3