Home | History | Annotate | Line # | Download | only in kern
kern_idle.c revision 1.34.20.1
      1 /*	$NetBSD: kern_idle.c,v 1.34.20.1 2024/09/11 10:09:19 martin Exp $	*/
      2 
      3 /*-
      4  * Copyright (c)2002, 2006, 2007 YAMAMOTO Takashi,
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26  * SUCH DAMAGE.
     27  */
     28 
     29 #include <sys/cdefs.h>
     30 
     31 __KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.34.20.1 2024/09/11 10:09:19 martin Exp $");
     32 
     33 #include <sys/param.h>
     34 #include <sys/cpu.h>
     35 #include <sys/idle.h>
     36 #include <sys/kthread.h>
     37 #include <sys/lockdebug.h>
     38 #include <sys/kmem.h>
     39 #include <sys/proc.h>
     40 #include <sys/atomic.h>
     41 
     42 #include <uvm/uvm.h>	/* uvm_idle */
     43 
     44 void
     45 idle_loop(void *dummy)
     46 {
     47 	struct cpu_info *ci = curcpu();
     48 	struct schedstate_percpu *spc;
     49 	struct lwp *l = curlwp;
     50 
     51 	lwp_lock(l);
     52 	spc = &ci->ci_schedstate;
     53 	KASSERT(lwp_locked(l, spc->spc_lwplock));
     54 	kcpuset_atomic_set(kcpuset_running, cpu_index(ci));
     55 	/* Update start time for this thread. */
     56 	binuptime(&l->l_stime);
     57 	spc->spc_flags |= SPCF_RUNNING;
     58 	KASSERT((l->l_pflag & LP_RUNNING) != 0);
     59 	l->l_stat = LSIDL;
     60 	lwp_unlock(l);
     61 
     62 	/*
     63 	 * Use spl0() here to ensure that we have the correct interrupt
     64 	 * priority.  This may be the first thread running on the CPU,
     65 	 * in which case we took an odd route to get here.
     66 	 */
     67 	spl0();
     68 	KERNEL_UNLOCK_ALL(l, NULL);
     69 
     70 	for (;;) {
     71 		LOCKDEBUG_BARRIER(NULL, 0);
     72 		KASSERT((l->l_flag & LW_IDLE) != 0);
     73 		KASSERT(ci == curcpu());
     74 		KASSERT(l == curlwp);
     75 		KASSERT(CURCPU_IDLE_P());
     76 		KASSERT(l->l_priority == PRI_IDLE);
     77 		KASSERTMSG(l->l_nopreempt == 0, "lwp %p nopreempt %d",
     78 		    l, l->l_nopreempt);
     79 
     80 		sched_idle();
     81 		if (!sched_curcpu_runnable_p()) {
     82 			if ((spc->spc_flags & SPCF_OFFLINE) == 0) {
     83 				uvm_idle();
     84 			}
     85 			if (!sched_curcpu_runnable_p()) {
     86 				cpu_idle();
     87 				if (!sched_curcpu_runnable_p() &&
     88 				    !ci->ci_want_resched) {
     89 					continue;
     90 				}
     91 			}
     92 		}
     93 		KASSERT(l->l_mutex == l->l_cpu->ci_schedstate.spc_lwplock);
     94 		lwp_lock(l);
     95 		spc_lock(l->l_cpu);
     96 		mi_switch(l);
     97 		KASSERT(curlwp == l);
     98 		KASSERT(l->l_stat == LSIDL);
     99 	}
    100 }
    101 
    102 int
    103 create_idle_lwp(struct cpu_info *ci)
    104 {
    105 	lwp_t *l;
    106 	int error;
    107 
    108 	KASSERT(ci->ci_data.cpu_idlelwp == NULL);
    109 	error = kthread_create(PRI_IDLE, KTHREAD_MPSAFE | KTHREAD_IDLE,
    110 	    ci, idle_loop, NULL, &l, "idle/%u", ci->ci_index);
    111 	if (error != 0)
    112 		panic("create_idle_lwp: error %d", error);
    113 	lwp_lock(l);
    114 	l->l_flag |= LW_IDLE;
    115 	if (ci != lwp0.l_cpu) {
    116 		/*
    117 		 * For secondary CPUs, the idle LWP is the first to run, and
    118 		 * it's directly entered from MD code without a trip through
    119 		 * mi_switch().  Make the picture look good in case the CPU
    120 		 * takes an interrupt before it calls idle_loop().
    121 		 */
    122 		l->l_stat = LSIDL;
    123 		l->l_pflag |= LP_RUNNING;
    124 		ci->ci_onproc = l;
    125 	}
    126 	lwp_unlock(l);
    127 	ci->ci_data.cpu_idlelwp = l;
    128 
    129 	return error;
    130 }
    131