Home | History | Annotate | Line # | Download | only in kern
kern_idle.c revision 1.34
      1  1.34  riastrad /*	$NetBSD: kern_idle.c,v 1.34 2020/09/05 16:30:12 riastradh Exp $	*/
      2   1.2      yamt 
      3   1.2      yamt /*-
      4   1.2      yamt  * Copyright (c)2002, 2006, 2007 YAMAMOTO Takashi,
      5   1.2      yamt  * All rights reserved.
      6   1.2      yamt  *
      7   1.2      yamt  * Redistribution and use in source and binary forms, with or without
      8   1.2      yamt  * modification, are permitted provided that the following conditions
      9   1.2      yamt  * are met:
     10   1.2      yamt  * 1. Redistributions of source code must retain the above copyright
     11   1.2      yamt  *    notice, this list of conditions and the following disclaimer.
     12   1.2      yamt  * 2. Redistributions in binary form must reproduce the above copyright
     13   1.2      yamt  *    notice, this list of conditions and the following disclaimer in the
     14   1.2      yamt  *    documentation and/or other materials provided with the distribution.
     15   1.2      yamt  *
     16   1.2      yamt  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     17   1.2      yamt  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     18   1.2      yamt  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     19   1.2      yamt  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     20   1.2      yamt  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     21   1.2      yamt  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     22   1.2      yamt  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     23   1.2      yamt  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     24   1.2      yamt  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     25   1.2      yamt  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26   1.2      yamt  * SUCH DAMAGE.
     27   1.2      yamt  */
     28   1.2      yamt 
     29   1.2      yamt #include <sys/cdefs.h>
     30   1.2      yamt 
     31  1.34  riastrad __KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.34 2020/09/05 16:30:12 riastradh Exp $");
     32   1.2      yamt 
     33   1.2      yamt #include <sys/param.h>
     34   1.2      yamt #include <sys/cpu.h>
     35   1.2      yamt #include <sys/idle.h>
     36   1.7        ad #include <sys/kthread.h>
     37   1.2      yamt #include <sys/lockdebug.h>
     38   1.2      yamt #include <sys/kmem.h>
     39   1.4        ad #include <sys/proc.h>
     40  1.13        ad #include <sys/atomic.h>
     41   1.2      yamt 
     42  1.29        ad #include <uvm/uvm.h>	/* uvm_idle */
     43   1.2      yamt 
     44   1.2      yamt void
     45   1.2      yamt idle_loop(void *dummy)
     46   1.2      yamt {
     47   1.2      yamt 	struct cpu_info *ci = curcpu();
     48  1.19     rmind 	struct schedstate_percpu *spc;
     49   1.2      yamt 	struct lwp *l = curlwp;
     50   1.2      yamt 
     51  1.31        ad 	lwp_lock(l);
     52  1.31        ad 	spc = &ci->ci_schedstate;
     53  1.31        ad 	KASSERT(lwp_locked(l, spc->spc_lwplock));
     54  1.25     rmind 	kcpuset_atomic_set(kcpuset_running, cpu_index(ci));
     55   1.6        ad 	/* Update start time for this thread. */
     56  1.10      yamt 	binuptime(&l->l_stime);
     57  1.26        ad 	spc->spc_flags |= SPCF_RUNNING;
     58  1.32        ad 	KASSERT((l->l_pflag & LP_RUNNING) != 0);
     59  1.33        ad 	l->l_stat = LSIDL;
     60   1.9        ad 	lwp_unlock(l);
     61   1.6        ad 
     62  1.22        ad 	/*
     63  1.22        ad 	 * Use spl0() here to ensure that we have the correct interrupt
     64  1.22        ad 	 * priority.  This may be the first thread running on the CPU,
     65  1.26        ad 	 * in which case we took an odd route to get here.
     66  1.22        ad 	 */
     67  1.22        ad 	spl0();
     68  1.26        ad 	KERNEL_UNLOCK_ALL(l, NULL);
     69  1.14        ad 
     70  1.19     rmind 	for (;;) {
     71   1.2      yamt 		LOCKDEBUG_BARRIER(NULL, 0);
     72   1.2      yamt 		KASSERT((l->l_flag & LW_IDLE) != 0);
     73   1.2      yamt 		KASSERT(ci == curcpu());
     74   1.2      yamt 		KASSERT(l == curlwp);
     75   1.2      yamt 		KASSERT(CURCPU_IDLE_P());
     76   1.7        ad 		KASSERT(l->l_priority == PRI_IDLE);
     77   1.2      yamt 
     78  1.19     rmind 		sched_idle();
     79  1.20        ad 		if (!sched_curcpu_runnable_p()) {
     80  1.21        ad 			if ((spc->spc_flags & SPCF_OFFLINE) == 0) {
     81  1.29        ad 				uvm_idle();
     82  1.21        ad 			}
     83  1.20        ad 			if (!sched_curcpu_runnable_p()) {
     84  1.20        ad 				cpu_idle();
     85  1.20        ad 				if (!sched_curcpu_runnable_p() &&
     86  1.20        ad 				    !ci->ci_want_resched) {
     87  1.20        ad 					continue;
     88  1.20        ad 				}
     89   1.2      yamt 			}
     90   1.2      yamt 		}
     91  1.11        ad 		KASSERT(l->l_mutex == l->l_cpu->ci_schedstate.spc_lwplock);
     92   1.2      yamt 		lwp_lock(l);
     93  1.28        ad 		spc_lock(l->l_cpu);
     94   1.2      yamt 		mi_switch(l);
     95   1.2      yamt 		KASSERT(curlwp == l);
     96  1.33        ad 		KASSERT(l->l_stat == LSIDL);
     97   1.2      yamt 	}
     98   1.2      yamt }
     99   1.2      yamt 
    100   1.2      yamt int
    101   1.2      yamt create_idle_lwp(struct cpu_info *ci)
    102   1.2      yamt {
    103   1.7        ad 	lwp_t *l;
    104   1.2      yamt 	int error;
    105   1.2      yamt 
    106   1.2      yamt 	KASSERT(ci->ci_data.cpu_idlelwp == NULL);
    107   1.7        ad 	error = kthread_create(PRI_IDLE, KTHREAD_MPSAFE | KTHREAD_IDLE,
    108  1.12    martin 	    ci, idle_loop, NULL, &l, "idle/%u", ci->ci_index);
    109   1.7        ad 	if (error != 0)
    110   1.7        ad 		panic("create_idle_lwp: error %d", error);
    111   1.7        ad 	lwp_lock(l);
    112   1.7        ad 	l->l_flag |= LW_IDLE;
    113  1.31        ad 	if (ci != lwp0.l_cpu) {
    114  1.31        ad 		/*
    115  1.31        ad 		 * For secondary CPUs, the idle LWP is the first to run, and
    116  1.31        ad 		 * it's directly entered from MD code without a trip through
    117  1.31        ad 		 * mi_switch().  Make the picture look good in case the CPU
    118  1.31        ad 		 * takes an interrupt before it calls idle_loop().
    119  1.31        ad 		 */
    120  1.33        ad 		l->l_stat = LSIDL;
    121  1.32        ad 		l->l_pflag |= LP_RUNNING;
    122  1.31        ad 		ci->ci_onproc = l;
    123  1.31        ad 	}
    124   1.7        ad 	lwp_unlock(l);
    125   1.2      yamt 	ci->ci_data.cpu_idlelwp = l;
    126   1.7        ad 
    127   1.2      yamt 	return error;
    128   1.2      yamt }
    129