Home | History | Annotate | Line # | Download | only in kern
      1 /*	$NetBSD: kern_idle.c,v 1.36 2024/03/01 04:32:38 mrg Exp $	*/
      2 
      3 /*-
      4  * Copyright (c)2002, 2006, 2007 YAMAMOTO Takashi,
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26  * SUCH DAMAGE.
     27  */
     28 
     29 #include <sys/cdefs.h>
     30 
     31 __KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.36 2024/03/01 04:32:38 mrg Exp $");
     32 
     33 #include <sys/param.h>
     34 #include <sys/cpu.h>
     35 #include <sys/idle.h>
     36 #include <sys/kthread.h>
     37 #include <sys/lockdebug.h>
     38 #include <sys/kmem.h>
     39 #include <sys/proc.h>
     40 #include <sys/atomic.h>
     41 
     42 #include <uvm/uvm.h>	/* uvm_idle */
     43 
     44 void
     45 idle_loop(void *dummy)
     46 {
     47 	struct cpu_info *ci = curcpu();
     48 	struct schedstate_percpu *spc;
     49 	struct lwp *l = curlwp;
     50 
     51 	KASSERT(l->l_blcnt == 0);
     52 
     53 	lwp_lock(l);
     54 	spc = &ci->ci_schedstate;
     55 	KASSERT(lwp_locked(l, spc->spc_lwplock));
     56 	kcpuset_atomic_set(kcpuset_running, cpu_index(ci));
     57 	/* Update start time for this thread. */
     58 	binuptime(&l->l_stime);
     59 	spc->spc_flags |= SPCF_RUNNING;
     60 	KASSERT((l->l_pflag & LP_RUNNING) != 0);
     61 	l->l_stat = LSIDL;
     62 	lwp_unlock(l);
     63 
     64 	/*
     65 	 * Use spl0() here to ensure that we have the correct interrupt
     66 	 * priority.  This may be the first thread running on the CPU,
     67 	 * in which case we took an odd route to get here.
     68 	 */
     69 	spl0();
     70 
     71 	for (;;) {
     72 		LOCKDEBUG_BARRIER(NULL, 0);
     73 		KASSERT((l->l_flag & LW_IDLE) != 0);
     74 		KASSERT(ci == curcpu());
     75 		KASSERT(l == curlwp);
     76 		KASSERT(CURCPU_IDLE_P());
     77 		KASSERT(l->l_priority == PRI_IDLE);
     78 		KASSERTMSG(l->l_nopreempt == 0, "lwp %p nopreempt %d",
     79 		    l, l->l_nopreempt);
     80 
     81 		sched_idle();
     82 		if (!sched_curcpu_runnable_p()) {
     83 			if ((spc->spc_flags & SPCF_OFFLINE) == 0) {
     84 				uvm_idle();
     85 			}
     86 			if (!sched_curcpu_runnable_p()) {
     87 				cpu_idle();
     88 				if (!sched_curcpu_runnable_p() &&
     89 				    !ci->ci_want_resched) {
     90 					continue;
     91 				}
     92 			}
     93 		}
     94 		KASSERT(l->l_mutex == l->l_cpu->ci_schedstate.spc_lwplock);
     95 		lwp_lock(l);
     96 		spc_lock(l->l_cpu);
     97 		mi_switch(l);
     98 		KASSERT(curlwp == l);
     99 		KASSERT(l->l_stat == LSIDL);
    100 	}
    101 }
    102 
    103 int
    104 create_idle_lwp(struct cpu_info *ci)
    105 {
    106 	lwp_t *l;
    107 	int error;
    108 
    109 	KASSERT(ci->ci_data.cpu_idlelwp == NULL);
    110 	error = kthread_create(PRI_IDLE, KTHREAD_MPSAFE | KTHREAD_IDLE,
    111 	    ci, idle_loop, NULL, &l, "idle/%u", ci->ci_index);
    112 	if (error != 0)
    113 		panic("create_idle_lwp: error %d", error);
    114 	lwp_lock(l);
    115 	l->l_flag |= LW_IDLE;
    116 	if (ci != lwp0.l_cpu) {
    117 		/*
    118 		 * For secondary CPUs, the idle LWP is the first to run, and
    119 		 * it's directly entered from MD code without a trip through
    120 		 * mi_switch().  Make the picture look good in case the CPU
    121 		 * takes an interrupt before it calls idle_loop().
    122 		 */
    123 		l->l_stat = LSIDL;
    124 		l->l_pflag |= LP_RUNNING;
    125 		ci->ci_onproc = l;
    126 	}
    127 	lwp_unlock(l);
    128 	ci->ci_data.cpu_idlelwp = l;
    129 
    130 	return error;
    131 }
    132