kern_idle.c revision 1.17 1 1.17 ad /* $NetBSD: kern_idle.c,v 1.17 2008/05/24 12:59:06 ad Exp $ */
2 1.2 yamt
3 1.2 yamt /*-
4 1.2 yamt * Copyright (c)2002, 2006, 2007 YAMAMOTO Takashi,
5 1.2 yamt * All rights reserved.
6 1.2 yamt *
7 1.2 yamt * Redistribution and use in source and binary forms, with or without
8 1.2 yamt * modification, are permitted provided that the following conditions
9 1.2 yamt * are met:
10 1.2 yamt * 1. Redistributions of source code must retain the above copyright
11 1.2 yamt * notice, this list of conditions and the following disclaimer.
12 1.2 yamt * 2. Redistributions in binary form must reproduce the above copyright
13 1.2 yamt * notice, this list of conditions and the following disclaimer in the
14 1.2 yamt * documentation and/or other materials provided with the distribution.
15 1.2 yamt *
16 1.2 yamt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 1.2 yamt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 1.2 yamt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 1.2 yamt * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 1.2 yamt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 1.2 yamt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 1.2 yamt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 1.2 yamt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 1.2 yamt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 1.2 yamt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 1.2 yamt * SUCH DAMAGE.
27 1.2 yamt */
28 1.2 yamt
29 1.2 yamt #include <sys/cdefs.h>
30 1.2 yamt
31 1.17 ad __KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.17 2008/05/24 12:59:06 ad Exp $");
32 1.2 yamt
33 1.2 yamt #include <sys/param.h>
34 1.2 yamt #include <sys/cpu.h>
35 1.2 yamt #include <sys/idle.h>
36 1.7 ad #include <sys/kthread.h>
37 1.2 yamt #include <sys/lockdebug.h>
38 1.2 yamt #include <sys/kmem.h>
39 1.4 ad #include <sys/proc.h>
40 1.13 ad #include <sys/atomic.h>
41 1.2 yamt
42 1.2 yamt #include <uvm/uvm.h>
43 1.2 yamt #include <uvm/uvm_extern.h>
44 1.2 yamt
45 1.13 ad #if MAXCPUS > 32
46 1.13 ad #error fix this code
47 1.13 ad #endif
48 1.13 ad
49 1.13 ad static volatile uint32_t *idle_cpus;
50 1.13 ad
51 1.2 yamt void
52 1.2 yamt idle_loop(void *dummy)
53 1.2 yamt {
54 1.2 yamt struct cpu_info *ci = curcpu();
55 1.2 yamt struct lwp *l = curlwp;
56 1.15 yamt uint32_t mask = 1 << cpu_index(ci);
57 1.13 ad bool set = false;
58 1.14 ad int s;
59 1.2 yamt
60 1.17 ad ci->ci_data.cpu_onproc = l;
61 1.17 ad
62 1.6 ad /* Update start time for this thread. */
63 1.9 ad lwp_lock(l);
64 1.10 yamt binuptime(&l->l_stime);
65 1.9 ad lwp_unlock(l);
66 1.6 ad
67 1.14 ad s = splsched();
68 1.14 ad ci->ci_schedstate.spc_flags |= SPCF_RUNNING;
69 1.14 ad splx(s);
70 1.14 ad
71 1.2 yamt KERNEL_UNLOCK_ALL(l, NULL);
72 1.2 yamt l->l_stat = LSONPROC;
73 1.2 yamt while (1 /* CONSTCOND */) {
74 1.2 yamt LOCKDEBUG_BARRIER(NULL, 0);
75 1.2 yamt KASSERT((l->l_flag & LW_IDLE) != 0);
76 1.2 yamt KASSERT(ci == curcpu());
77 1.2 yamt KASSERT(l == curlwp);
78 1.2 yamt KASSERT(CURCPU_IDLE_P());
79 1.7 ad KASSERT(l->l_priority == PRI_IDLE);
80 1.2 yamt
81 1.2 yamt if (uvm.page_idle_zero) {
82 1.2 yamt if (sched_curcpu_runnable_p()) {
83 1.2 yamt goto schedule;
84 1.2 yamt }
85 1.13 ad if (!set) {
86 1.13 ad set = true;
87 1.13 ad atomic_or_32(idle_cpus, mask);
88 1.13 ad }
89 1.2 yamt uvm_pageidlezero();
90 1.2 yamt }
91 1.2 yamt if (!sched_curcpu_runnable_p()) {
92 1.13 ad if (!set) {
93 1.13 ad set = true;
94 1.13 ad atomic_or_32(idle_cpus, mask);
95 1.13 ad }
96 1.2 yamt cpu_idle();
97 1.5 ad if (!sched_curcpu_runnable_p() &&
98 1.5 ad !ci->ci_want_resched) {
99 1.2 yamt continue;
100 1.2 yamt }
101 1.2 yamt }
102 1.2 yamt schedule:
103 1.13 ad if (set) {
104 1.13 ad set = false;
105 1.13 ad atomic_and_32(idle_cpus, ~mask);
106 1.13 ad }
107 1.11 ad KASSERT(l->l_mutex == l->l_cpu->ci_schedstate.spc_lwplock);
108 1.2 yamt lwp_lock(l);
109 1.2 yamt mi_switch(l);
110 1.2 yamt KASSERT(curlwp == l);
111 1.2 yamt KASSERT(l->l_stat == LSONPROC);
112 1.2 yamt }
113 1.2 yamt }
114 1.2 yamt
115 1.13 ad /*
116 1.13 ad * Find an idle CPU and remove from the idle bitmask. The bitmask
117 1.16 yamt * is not always accurate but is "good enough" for the purpose of finding
118 1.13 ad * a CPU to run a job on.
119 1.13 ad */
120 1.13 ad struct cpu_info *
121 1.13 ad idle_pick(void)
122 1.13 ad {
123 1.13 ad uint32_t mask;
124 1.13 ad u_int index;
125 1.13 ad
126 1.13 ad do {
127 1.13 ad if ((mask = *idle_cpus) == 0)
128 1.13 ad return NULL;
129 1.13 ad index = ffs(mask) - 1;
130 1.13 ad } while (atomic_cas_32(idle_cpus, mask, mask ^ (1 << index)) != mask);
131 1.13 ad
132 1.13 ad return cpu_lookup_byindex(index);
133 1.13 ad }
134 1.13 ad
135 1.2 yamt int
136 1.2 yamt create_idle_lwp(struct cpu_info *ci)
137 1.2 yamt {
138 1.13 ad static bool again;
139 1.13 ad uintptr_t addr;
140 1.7 ad lwp_t *l;
141 1.2 yamt int error;
142 1.2 yamt
143 1.13 ad if (!again) {
144 1.13 ad again = true;
145 1.13 ad addr = (uintptr_t)kmem_alloc(coherency_unit * 2, KM_SLEEP);
146 1.13 ad addr = roundup(addr, coherency_unit);
147 1.13 ad idle_cpus = (uint32_t *)addr;
148 1.13 ad }
149 1.13 ad
150 1.2 yamt KASSERT(ci->ci_data.cpu_idlelwp == NULL);
151 1.7 ad error = kthread_create(PRI_IDLE, KTHREAD_MPSAFE | KTHREAD_IDLE,
152 1.12 martin ci, idle_loop, NULL, &l, "idle/%u", ci->ci_index);
153 1.7 ad if (error != 0)
154 1.7 ad panic("create_idle_lwp: error %d", error);
155 1.7 ad lwp_lock(l);
156 1.7 ad l->l_flag |= LW_IDLE;
157 1.7 ad lwp_unlock(l);
158 1.2 yamt l->l_cpu = ci;
159 1.2 yamt ci->ci_data.cpu_idlelwp = l;
160 1.7 ad
161 1.2 yamt return error;
162 1.2 yamt }
163