Home | History | Annotate | Line # | Download | only in kern
kern_idle.c revision 1.14
      1 /*	$NetBSD: kern_idle.c,v 1.14 2008/04/24 13:56:30 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c)2002, 2006, 2007 YAMAMOTO Takashi,
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26  * SUCH DAMAGE.
     27  */
     28 
     29 #include <sys/cdefs.h>
     30 
     31 __KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.14 2008/04/24 13:56:30 ad Exp $");
     32 
     33 #include <sys/param.h>
     34 #include <sys/cpu.h>
     35 #include <sys/idle.h>
     36 #include <sys/kthread.h>
     37 #include <sys/lockdebug.h>
     38 #include <sys/kmem.h>
     39 #include <sys/proc.h>
     40 #include <sys/atomic.h>
     41 
     42 #include <uvm/uvm.h>
     43 #include <uvm/uvm_extern.h>
     44 
     45 #if MAXCPUS > 32
     46 #error fix this code
     47 #endif
     48 
     49 static volatile uint32_t *idle_cpus;
     50 
     51 void
     52 idle_loop(void *dummy)
     53 {
     54 	struct cpu_info *ci = curcpu();
     55 	struct lwp *l = curlwp;
     56 	unsigned mask = (1 << cpu_index(ci));
     57 	bool set = false;
     58 	int s;
     59 
     60 	/* Update start time for this thread. */
     61 	lwp_lock(l);
     62 	binuptime(&l->l_stime);
     63 	lwp_unlock(l);
     64 
     65 	s = splsched();
     66 	ci->ci_schedstate.spc_flags |= SPCF_RUNNING;
     67 	splx(s);
     68 
     69 	KERNEL_UNLOCK_ALL(l, NULL);
     70 	l->l_stat = LSONPROC;
     71 	while (1 /* CONSTCOND */) {
     72 		LOCKDEBUG_BARRIER(NULL, 0);
     73 		KASSERT((l->l_flag & LW_IDLE) != 0);
     74 		KASSERT(ci == curcpu());
     75 		KASSERT(l == curlwp);
     76 		KASSERT(CURCPU_IDLE_P());
     77 		KASSERT(l->l_priority == PRI_IDLE);
     78 
     79 		if (uvm.page_idle_zero) {
     80 			if (sched_curcpu_runnable_p()) {
     81 				goto schedule;
     82 			}
     83 			if (!set) {
     84 				set = true;
     85 				atomic_or_32(idle_cpus, mask);
     86 			}
     87 			uvm_pageidlezero();
     88 		}
     89 		if (!sched_curcpu_runnable_p()) {
     90 			if (!set) {
     91 				set = true;
     92 				atomic_or_32(idle_cpus, mask);
     93 			}
     94 			cpu_idle();
     95 			if (!sched_curcpu_runnable_p() &&
     96 			    !ci->ci_want_resched) {
     97 				continue;
     98 			}
     99 		}
    100 schedule:
    101 		if (set) {
    102 			set = false;
    103 			atomic_and_32(idle_cpus, ~mask);
    104 		}
    105 		KASSERT(l->l_mutex == l->l_cpu->ci_schedstate.spc_lwplock);
    106 		lwp_lock(l);
    107 		mi_switch(l);
    108 		KASSERT(curlwp == l);
    109 		KASSERT(l->l_stat == LSONPROC);
    110 	}
    111 }
    112 
    113 /*
    114  * Find an idle CPU and remove from the idle bitmask.  The bitmask
    115  * is always accurate but is "good enough" for the purpose of finding
    116  * a CPU to run a job on.
    117  */
    118 struct cpu_info *
    119 idle_pick(void)
    120 {
    121 	uint32_t mask;
    122 	u_int index;
    123 
    124 	do {
    125 		if ((mask = *idle_cpus) == 0)
    126 			return NULL;
    127 		index = ffs(mask) - 1;
    128 	} while (atomic_cas_32(idle_cpus, mask, mask ^ (1 << index)) != mask);
    129 
    130 	return cpu_lookup_byindex(index);
    131 }
    132 
    133 int
    134 create_idle_lwp(struct cpu_info *ci)
    135 {
    136 	static bool again;
    137 	uintptr_t addr;
    138 	lwp_t *l;
    139 	int error;
    140 
    141 	if (!again) {
    142 		again = true;
    143 		addr = (uintptr_t)kmem_alloc(coherency_unit * 2, KM_SLEEP);
    144 		addr = roundup(addr, coherency_unit);
    145 		idle_cpus = (uint32_t *)addr;
    146 	}
    147 
    148 	KASSERT(ci->ci_data.cpu_idlelwp == NULL);
    149 	error = kthread_create(PRI_IDLE, KTHREAD_MPSAFE | KTHREAD_IDLE,
    150 	    ci, idle_loop, NULL, &l, "idle/%u", ci->ci_index);
    151 	if (error != 0)
    152 		panic("create_idle_lwp: error %d", error);
    153 	lwp_lock(l);
    154 	l->l_flag |= LW_IDLE;
    155 	lwp_unlock(l);
    156 	l->l_cpu = ci;
    157 	ci->ci_data.cpu_idlelwp = l;
    158 
    159 	return error;
    160 }
    161