HomeSort by: relevance | last modified time | path
    Searched refs:LP_INTR (Results 1 - 18 of 18) sorted by relevancy

  /src/sys/kern/
kern_synch.c 193 KASSERT((l->l_pflag & LP_INTR) == 0);
219 KASSERT((l->l_pflag & LP_INTR) == 0);
400 if (__predict_false((l->l_pflag & LP_INTR) != 0)) {
475 (l->l_flag & LW_IDLE) != 0 || (l->l_pflag & LP_INTR) != 0 ||
643 if ((l->l_pflag & LP_INTR) != 0) {
678 KASSERT((l->l_pflag & LP_INTR) == 0);
subr_cpu.c 137 return (curlwp->l_pflag & LP_INTR) != 0;
148 * - Softints (LP_INTR) never migrate between CPUs.
154 * We combine the LP_INTR, LP_BOUND, and l_nopreempt test into
162 return __predict_true(((pflag & (LP_INTR|LP_BOUND)) | nopreempt)
kern_kthread.c 128 l->l_pflag |= LP_INTR;
kern_clock.c 506 if (CLKF_INTR(frame) || (curlwp->l_pflag & LP_INTR) != 0) {
kern_condvar.c 133 KASSERT((l->l_pflag & LP_INTR) == 0 || panicstr != NULL);
sys_pset.c 393 if (t->l_pflag & (LP_BOUND | LP_INTR))
kern_cpu.c 344 if (l->l_cpu != ci || (l->l_pflag & (LP_BOUND | LP_INTR))) {
kern_resource.c 480 (l->l_pflag & (LP_INTR | LP_TIMEINTR)) != LP_INTR) {
kern_softint.c 650 KASSERT((l->l_pflag & LP_INTR) != 0);
subr_lockdebug.c 638 if ((l->l_pflag & LP_INTR) == 0) {
kern_runq.c 347 } else if (pri >= sched_kpreempt_pri && (l->l_pflag & LP_INTR) == 0) {
  /src/sys/rump/librump/rumpkern/
threads.c 201 l->l_pflag |= LP_INTR;
scheduler.c 450 if ((l->l_pflag & LP_INTR) == 0)
  /src/sys/sys/
lwp.h 285 #define LP_INTR 0x00000040 /* Soft interrupt handler */
  /src/sys/arch/amd64/amd64/
trap.c 446 if (cpu_intr_p() || (l->l_pflag & LP_INTR) != 0) {
  /src/sys/arch/x86/x86/
fpu.c 521 while ((l->l_pflag & LP_INTR) && (l->l_switchto != NULL))
  /src/sys/arch/i386/i386/
trap.c 567 if (cpu_intr_p() || (l->l_pflag & LP_INTR) != 0) {
  /src/sys/arch/powerpc/booke/
e500_intr.c 516 KASSERT((curlwp->l_pflag & LP_INTR) == 0 || ipl != IPL_NONE);

Completed in 24 milliseconds