kern_idle.c revision 1.26 1 /* $NetBSD: kern_idle.c,v 1.26 2019/11/23 19:42:52 ad Exp $ */
2
3 /*-
4 * Copyright (c)2002, 2006, 2007 YAMAMOTO Takashi,
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30
31 __KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.26 2019/11/23 19:42:52 ad Exp $");
32
33 #include <sys/param.h>
34 #include <sys/cpu.h>
35 #include <sys/idle.h>
36 #include <sys/kthread.h>
37 #include <sys/lockdebug.h>
38 #include <sys/kmem.h>
39 #include <sys/proc.h>
40 #include <sys/atomic.h>
41
42 #include <uvm/uvm.h> /* uvm_pageidlezero */
43 #include <uvm/uvm_extern.h>
44
45 void
46 idle_loop(void *dummy)
47 {
48 struct cpu_info *ci = curcpu();
49 struct schedstate_percpu *spc;
50 struct lwp *l = curlwp;
51
52 kcpuset_atomic_set(kcpuset_running, cpu_index(ci));
53 spc = &ci->ci_schedstate;
54 ci->ci_data.cpu_onproc = l;
55
56 /* Update start time for this thread. */
57 lwp_lock(l);
58 KASSERT(lwp_locked(l, spc->spc_lwplock));
59 binuptime(&l->l_stime);
60 spc->spc_flags |= SPCF_RUNNING;
61 l->l_stat = LSONPROC;
62 l->l_pflag |= LP_RUNNING;
63 lwp_unlock(l);
64
65 /*
66 * Use spl0() here to ensure that we have the correct interrupt
67 * priority. This may be the first thread running on the CPU,
68 * in which case we took an odd route to get here.
69 */
70 spl0();
71 KERNEL_UNLOCK_ALL(l, NULL);
72
73 for (;;) {
74 LOCKDEBUG_BARRIER(NULL, 0);
75 KASSERT((l->l_flag & LW_IDLE) != 0);
76 KASSERT(ci == curcpu());
77 KASSERT(l == curlwp);
78 KASSERT(CURCPU_IDLE_P());
79 KASSERT(l->l_priority == PRI_IDLE);
80
81 sched_idle();
82 if (!sched_curcpu_runnable_p()) {
83 if ((spc->spc_flags & SPCF_OFFLINE) == 0) {
84 uvm_pageidlezero();
85 }
86 if (!sched_curcpu_runnable_p()) {
87 cpu_idle();
88 if (!sched_curcpu_runnable_p() &&
89 !ci->ci_want_resched) {
90 continue;
91 }
92 }
93 }
94 KASSERT(l->l_mutex == l->l_cpu->ci_schedstate.spc_lwplock);
95 lwp_lock(l);
96 mi_switch(l);
97 KASSERT(curlwp == l);
98 KASSERT(l->l_stat == LSONPROC);
99 }
100 }
101
102 int
103 create_idle_lwp(struct cpu_info *ci)
104 {
105 lwp_t *l;
106 int error;
107
108 KASSERT(ci->ci_data.cpu_idlelwp == NULL);
109 error = kthread_create(PRI_IDLE, KTHREAD_MPSAFE | KTHREAD_IDLE,
110 ci, idle_loop, NULL, &l, "idle/%u", ci->ci_index);
111 if (error != 0)
112 panic("create_idle_lwp: error %d", error);
113 lwp_lock(l);
114 l->l_flag |= LW_IDLE;
115 lwp_unlock(l);
116 ci->ci_data.cpu_idlelwp = l;
117
118 return error;
119 }
120