Home | History | Annotate | Line # | Download | only in kern
kern_idle.c revision 1.25.48.1
      1  1.25.48.1  martin /*	$NetBSD: kern_idle.c,v 1.25.48.1 2020/04/08 14:08:51 martin Exp $	*/
      2        1.2    yamt 
      3        1.2    yamt /*-
      4        1.2    yamt  * Copyright (c)2002, 2006, 2007 YAMAMOTO Takashi,
      5        1.2    yamt  * All rights reserved.
      6        1.2    yamt  *
      7        1.2    yamt  * Redistribution and use in source and binary forms, with or without
      8        1.2    yamt  * modification, are permitted provided that the following conditions
      9        1.2    yamt  * are met:
     10        1.2    yamt  * 1. Redistributions of source code must retain the above copyright
     11        1.2    yamt  *    notice, this list of conditions and the following disclaimer.
     12        1.2    yamt  * 2. Redistributions in binary form must reproduce the above copyright
     13        1.2    yamt  *    notice, this list of conditions and the following disclaimer in the
     14        1.2    yamt  *    documentation and/or other materials provided with the distribution.
     15        1.2    yamt  *
     16        1.2    yamt  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     17        1.2    yamt  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     18        1.2    yamt  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     19        1.2    yamt  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     20        1.2    yamt  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     21        1.2    yamt  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     22        1.2    yamt  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     23        1.2    yamt  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     24        1.2    yamt  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     25        1.2    yamt  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26        1.2    yamt  * SUCH DAMAGE.
     27        1.2    yamt  */
     28        1.2    yamt 
     29        1.2    yamt #include <sys/cdefs.h>
     30        1.2    yamt 
     31  1.25.48.1  martin __KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.25.48.1 2020/04/08 14:08:51 martin Exp $");
     32        1.2    yamt 
     33        1.2    yamt #include <sys/param.h>
     34        1.2    yamt #include <sys/cpu.h>
     35        1.2    yamt #include <sys/idle.h>
     36        1.7      ad #include <sys/kthread.h>
     37        1.2    yamt #include <sys/lockdebug.h>
     38        1.2    yamt #include <sys/kmem.h>
     39        1.4      ad #include <sys/proc.h>
     40       1.13      ad #include <sys/atomic.h>
     41        1.2    yamt 
     42  1.25.48.1  martin #include <uvm/uvm.h>	/* uvm_idle */
     43        1.2    yamt #include <uvm/uvm_extern.h>
     44        1.2    yamt 
     45        1.2    yamt void
     46        1.2    yamt idle_loop(void *dummy)
     47        1.2    yamt {
     48        1.2    yamt 	struct cpu_info *ci = curcpu();
     49       1.19   rmind 	struct schedstate_percpu *spc;
     50        1.2    yamt 	struct lwp *l = curlwp;
     51        1.2    yamt 
     52  1.25.48.1  martin 	lwp_lock(l);
     53  1.25.48.1  martin 	spc = &ci->ci_schedstate;
     54  1.25.48.1  martin 	KASSERT(lwp_locked(l, spc->spc_lwplock));
     55       1.25   rmind 	kcpuset_atomic_set(kcpuset_running, cpu_index(ci));
     56        1.6      ad 	/* Update start time for this thread. */
     57       1.10    yamt 	binuptime(&l->l_stime);
     58  1.25.48.1  martin 	spc->spc_flags |= SPCF_RUNNING;
     59  1.25.48.1  martin 	KASSERT((l->l_pflag & LP_RUNNING) != 0);
     60  1.25.48.1  martin 	l->l_stat = LSIDL;
     61        1.9      ad 	lwp_unlock(l);
     62        1.6      ad 
     63       1.22      ad 	/*
     64       1.22      ad 	 * Use spl0() here to ensure that we have the correct interrupt
     65       1.22      ad 	 * priority.  This may be the first thread running on the CPU,
     66  1.25.48.1  martin 	 * in which case we took an odd route to get here.
     67       1.22      ad 	 */
     68       1.22      ad 	spl0();
     69        1.2    yamt 	KERNEL_UNLOCK_ALL(l, NULL);
     70  1.25.48.1  martin 
     71       1.19   rmind 	for (;;) {
     72        1.2    yamt 		LOCKDEBUG_BARRIER(NULL, 0);
     73        1.2    yamt 		KASSERT((l->l_flag & LW_IDLE) != 0);
     74        1.2    yamt 		KASSERT(ci == curcpu());
     75        1.2    yamt 		KASSERT(l == curlwp);
     76        1.2    yamt 		KASSERT(CURCPU_IDLE_P());
     77        1.7      ad 		KASSERT(l->l_priority == PRI_IDLE);
     78        1.2    yamt 
     79       1.19   rmind 		sched_idle();
     80       1.20      ad 		if (!sched_curcpu_runnable_p()) {
     81       1.21      ad 			if ((spc->spc_flags & SPCF_OFFLINE) == 0) {
     82  1.25.48.1  martin 				uvm_idle();
     83       1.21      ad 			}
     84       1.20      ad 			if (!sched_curcpu_runnable_p()) {
     85       1.20      ad 				cpu_idle();
     86       1.20      ad 				if (!sched_curcpu_runnable_p() &&
     87       1.20      ad 				    !ci->ci_want_resched) {
     88       1.20      ad 					continue;
     89       1.20      ad 				}
     90        1.2    yamt 			}
     91        1.2    yamt 		}
     92       1.11      ad 		KASSERT(l->l_mutex == l->l_cpu->ci_schedstate.spc_lwplock);
     93        1.2    yamt 		lwp_lock(l);
     94  1.25.48.1  martin 		spc_lock(l->l_cpu);
     95        1.2    yamt 		mi_switch(l);
     96        1.2    yamt 		KASSERT(curlwp == l);
     97  1.25.48.1  martin 		KASSERT(l->l_stat == LSIDL);
     98        1.2    yamt 	}
     99        1.2    yamt }
    100        1.2    yamt 
    101        1.2    yamt int
    102        1.2    yamt create_idle_lwp(struct cpu_info *ci)
    103        1.2    yamt {
    104        1.7      ad 	lwp_t *l;
    105        1.2    yamt 	int error;
    106        1.2    yamt 
    107        1.2    yamt 	KASSERT(ci->ci_data.cpu_idlelwp == NULL);
    108        1.7      ad 	error = kthread_create(PRI_IDLE, KTHREAD_MPSAFE | KTHREAD_IDLE,
    109       1.12  martin 	    ci, idle_loop, NULL, &l, "idle/%u", ci->ci_index);
    110        1.7      ad 	if (error != 0)
    111        1.7      ad 		panic("create_idle_lwp: error %d", error);
    112        1.7      ad 	lwp_lock(l);
    113        1.7      ad 	l->l_flag |= LW_IDLE;
    114  1.25.48.1  martin 	if (ci != lwp0.l_cpu) {
    115  1.25.48.1  martin 		/*
    116  1.25.48.1  martin 		 * For secondary CPUs, the idle LWP is the first to run, and
    117  1.25.48.1  martin 		 * it's directly entered from MD code without a trip through
    118  1.25.48.1  martin 		 * mi_switch().  Make the picture look good in case the CPU
    119  1.25.48.1  martin 		 * takes an interrupt before it calls idle_loop().
    120  1.25.48.1  martin 		 */
    121  1.25.48.1  martin 		l->l_stat = LSIDL;
    122  1.25.48.1  martin 		l->l_pflag |= LP_RUNNING;
    123  1.25.48.1  martin 		ci->ci_onproc = l;
    124  1.25.48.1  martin 	}
    125        1.7      ad 	lwp_unlock(l);
    126        1.2    yamt 	ci->ci_data.cpu_idlelwp = l;
    127        1.7      ad 
    128        1.2    yamt 	return error;
    129        1.2    yamt }
    130