Home | History | Annotate | Line # | Download | only in kern
kern_idle.c revision 1.13.2.1
      1  1.13.2.1    yamt /*	$NetBSD: kern_idle.c,v 1.13.2.1 2008/05/18 12:35:08 yamt Exp $	*/
      2       1.2    yamt 
      3       1.2    yamt /*-
      4       1.2    yamt  * Copyright (c)2002, 2006, 2007 YAMAMOTO Takashi,
      5       1.2    yamt  * All rights reserved.
      6       1.2    yamt  *
      7       1.2    yamt  * Redistribution and use in source and binary forms, with or without
      8       1.2    yamt  * modification, are permitted provided that the following conditions
      9       1.2    yamt  * are met:
     10       1.2    yamt  * 1. Redistributions of source code must retain the above copyright
     11       1.2    yamt  *    notice, this list of conditions and the following disclaimer.
     12       1.2    yamt  * 2. Redistributions in binary form must reproduce the above copyright
     13       1.2    yamt  *    notice, this list of conditions and the following disclaimer in the
     14       1.2    yamt  *    documentation and/or other materials provided with the distribution.
     15       1.2    yamt  *
     16       1.2    yamt  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     17       1.2    yamt  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     18       1.2    yamt  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     19       1.2    yamt  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     20       1.2    yamt  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     21       1.2    yamt  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     22       1.2    yamt  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     23       1.2    yamt  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     24       1.2    yamt  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     25       1.2    yamt  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26       1.2    yamt  * SUCH DAMAGE.
     27       1.2    yamt  */
     28       1.2    yamt 
     29       1.2    yamt #include <sys/cdefs.h>
     30       1.2    yamt 
     31  1.13.2.1    yamt __KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.13.2.1 2008/05/18 12:35:08 yamt Exp $");
     32       1.2    yamt 
     33       1.2    yamt #include <sys/param.h>
     34       1.2    yamt #include <sys/cpu.h>
     35       1.2    yamt #include <sys/idle.h>
     36       1.7      ad #include <sys/kthread.h>
     37       1.2    yamt #include <sys/lockdebug.h>
     38       1.2    yamt #include <sys/kmem.h>
     39       1.4      ad #include <sys/proc.h>
     40      1.13      ad #include <sys/atomic.h>
     41       1.2    yamt 
     42       1.2    yamt #include <uvm/uvm.h>
     43       1.2    yamt #include <uvm/uvm_extern.h>
     44       1.2    yamt 
     45      1.13      ad #if MAXCPUS > 32
     46      1.13      ad #error fix this code
     47      1.13      ad #endif
     48      1.13      ad 
     49      1.13      ad static volatile uint32_t *idle_cpus;
     50      1.13      ad 
     51       1.2    yamt void
     52       1.2    yamt idle_loop(void *dummy)
     53       1.2    yamt {
     54       1.2    yamt 	struct cpu_info *ci = curcpu();
     55       1.2    yamt 	struct lwp *l = curlwp;
     56  1.13.2.1    yamt 	uint32_t mask = 1 << cpu_index(ci);
     57      1.13      ad 	bool set = false;
     58  1.13.2.1    yamt 	int s;
     59       1.2    yamt 
     60       1.6      ad 	/* Update start time for this thread. */
     61       1.9      ad 	lwp_lock(l);
     62      1.10    yamt 	binuptime(&l->l_stime);
     63       1.9      ad 	lwp_unlock(l);
     64       1.6      ad 
     65  1.13.2.1    yamt 	s = splsched();
     66  1.13.2.1    yamt 	ci->ci_schedstate.spc_flags |= SPCF_RUNNING;
     67  1.13.2.1    yamt 	splx(s);
     68  1.13.2.1    yamt 
     69       1.2    yamt 	KERNEL_UNLOCK_ALL(l, NULL);
     70       1.2    yamt 	l->l_stat = LSONPROC;
     71       1.2    yamt 	while (1 /* CONSTCOND */) {
     72       1.2    yamt 		LOCKDEBUG_BARRIER(NULL, 0);
     73       1.2    yamt 		KASSERT((l->l_flag & LW_IDLE) != 0);
     74       1.2    yamt 		KASSERT(ci == curcpu());
     75       1.2    yamt 		KASSERT(l == curlwp);
     76       1.2    yamt 		KASSERT(CURCPU_IDLE_P());
     77       1.7      ad 		KASSERT(l->l_priority == PRI_IDLE);
     78       1.2    yamt 
     79       1.2    yamt 		if (uvm.page_idle_zero) {
     80       1.2    yamt 			if (sched_curcpu_runnable_p()) {
     81       1.2    yamt 				goto schedule;
     82       1.2    yamt 			}
     83      1.13      ad 			if (!set) {
     84      1.13      ad 				set = true;
     85      1.13      ad 				atomic_or_32(idle_cpus, mask);
     86      1.13      ad 			}
     87       1.2    yamt 			uvm_pageidlezero();
     88       1.2    yamt 		}
     89       1.2    yamt 		if (!sched_curcpu_runnable_p()) {
     90      1.13      ad 			if (!set) {
     91      1.13      ad 				set = true;
     92      1.13      ad 				atomic_or_32(idle_cpus, mask);
     93      1.13      ad 			}
     94       1.2    yamt 			cpu_idle();
     95       1.5      ad 			if (!sched_curcpu_runnable_p() &&
     96       1.5      ad 			    !ci->ci_want_resched) {
     97       1.2    yamt 				continue;
     98       1.2    yamt 			}
     99       1.2    yamt 		}
    100       1.2    yamt schedule:
    101      1.13      ad 		if (set) {
    102      1.13      ad 			set = false;
    103      1.13      ad 			atomic_and_32(idle_cpus, ~mask);
    104      1.13      ad 		}
    105      1.11      ad 		KASSERT(l->l_mutex == l->l_cpu->ci_schedstate.spc_lwplock);
    106       1.2    yamt 		lwp_lock(l);
    107       1.2    yamt 		mi_switch(l);
    108       1.2    yamt 		KASSERT(curlwp == l);
    109       1.2    yamt 		KASSERT(l->l_stat == LSONPROC);
    110       1.2    yamt 	}
    111       1.2    yamt }
    112       1.2    yamt 
    113      1.13      ad /*
    114      1.13      ad  * Find an idle CPU and remove from the idle bitmask.  The bitmask
    115  1.13.2.1    yamt  * is not always accurate but is "good enough" for the purpose of finding
    116      1.13      ad  * a CPU to run a job on.
    117      1.13      ad  */
    118      1.13      ad struct cpu_info *
    119      1.13      ad idle_pick(void)
    120      1.13      ad {
    121      1.13      ad 	uint32_t mask;
    122      1.13      ad 	u_int index;
    123      1.13      ad 
    124      1.13      ad 	do {
    125      1.13      ad 		if ((mask = *idle_cpus) == 0)
    126      1.13      ad 			return NULL;
    127      1.13      ad 		index = ffs(mask) - 1;
    128      1.13      ad 	} while (atomic_cas_32(idle_cpus, mask, mask ^ (1 << index)) != mask);
    129      1.13      ad 
    130      1.13      ad 	return cpu_lookup_byindex(index);
    131      1.13      ad }
    132      1.13      ad 
    133       1.2    yamt int
    134       1.2    yamt create_idle_lwp(struct cpu_info *ci)
    135       1.2    yamt {
    136      1.13      ad 	static bool again;
    137      1.13      ad 	uintptr_t addr;
    138       1.7      ad 	lwp_t *l;
    139       1.2    yamt 	int error;
    140       1.2    yamt 
    141      1.13      ad 	if (!again) {
    142      1.13      ad 		again = true;
    143      1.13      ad 		addr = (uintptr_t)kmem_alloc(coherency_unit * 2, KM_SLEEP);
    144      1.13      ad 		addr = roundup(addr, coherency_unit);
    145      1.13      ad 		idle_cpus = (uint32_t *)addr;
    146      1.13      ad 	}
    147      1.13      ad 
    148       1.2    yamt 	KASSERT(ci->ci_data.cpu_idlelwp == NULL);
    149       1.7      ad 	error = kthread_create(PRI_IDLE, KTHREAD_MPSAFE | KTHREAD_IDLE,
    150      1.12  martin 	    ci, idle_loop, NULL, &l, "idle/%u", ci->ci_index);
    151       1.7      ad 	if (error != 0)
    152       1.7      ad 		panic("create_idle_lwp: error %d", error);
    153       1.7      ad 	lwp_lock(l);
    154       1.7      ad 	l->l_flag |= LW_IDLE;
    155       1.7      ad 	lwp_unlock(l);
    156       1.2    yamt 	l->l_cpu = ci;
    157       1.2    yamt 	ci->ci_data.cpu_idlelwp = l;
    158       1.7      ad 
    159       1.2    yamt 	return error;
    160       1.2    yamt }
    161