kern_idle.c revision 1.36 1 1.36 mrg /* $NetBSD: kern_idle.c,v 1.36 2024/03/01 04:32:38 mrg Exp $ */
2 1.2 yamt
3 1.2 yamt /*-
4 1.2 yamt * Copyright (c)2002, 2006, 2007 YAMAMOTO Takashi,
5 1.2 yamt * All rights reserved.
6 1.2 yamt *
7 1.2 yamt * Redistribution and use in source and binary forms, with or without
8 1.2 yamt * modification, are permitted provided that the following conditions
9 1.2 yamt * are met:
10 1.2 yamt * 1. Redistributions of source code must retain the above copyright
11 1.2 yamt * notice, this list of conditions and the following disclaimer.
12 1.2 yamt * 2. Redistributions in binary form must reproduce the above copyright
13 1.2 yamt * notice, this list of conditions and the following disclaimer in the
14 1.2 yamt * documentation and/or other materials provided with the distribution.
15 1.2 yamt *
16 1.2 yamt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 1.2 yamt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 1.2 yamt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 1.2 yamt * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 1.2 yamt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 1.2 yamt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 1.2 yamt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 1.2 yamt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 1.2 yamt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 1.2 yamt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 1.2 yamt * SUCH DAMAGE.
27 1.2 yamt */
28 1.2 yamt
29 1.2 yamt #include <sys/cdefs.h>
30 1.2 yamt
31 1.36 mrg __KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.36 2024/03/01 04:32:38 mrg Exp $");
32 1.2 yamt
33 1.2 yamt #include <sys/param.h>
34 1.2 yamt #include <sys/cpu.h>
35 1.2 yamt #include <sys/idle.h>
36 1.7 ad #include <sys/kthread.h>
37 1.2 yamt #include <sys/lockdebug.h>
38 1.2 yamt #include <sys/kmem.h>
39 1.4 ad #include <sys/proc.h>
40 1.13 ad #include <sys/atomic.h>
41 1.2 yamt
42 1.29 ad #include <uvm/uvm.h> /* uvm_idle */
43 1.2 yamt
44 1.2 yamt void
45 1.2 yamt idle_loop(void *dummy)
46 1.2 yamt {
47 1.2 yamt struct cpu_info *ci = curcpu();
48 1.19 rmind struct schedstate_percpu *spc;
49 1.2 yamt struct lwp *l = curlwp;
50 1.2 yamt
51 1.35 ad KASSERT(l->l_blcnt == 0);
52 1.35 ad
53 1.31 ad lwp_lock(l);
54 1.31 ad spc = &ci->ci_schedstate;
55 1.31 ad KASSERT(lwp_locked(l, spc->spc_lwplock));
56 1.25 rmind kcpuset_atomic_set(kcpuset_running, cpu_index(ci));
57 1.6 ad /* Update start time for this thread. */
58 1.10 yamt binuptime(&l->l_stime);
59 1.26 ad spc->spc_flags |= SPCF_RUNNING;
60 1.32 ad KASSERT((l->l_pflag & LP_RUNNING) != 0);
61 1.33 ad l->l_stat = LSIDL;
62 1.9 ad lwp_unlock(l);
63 1.6 ad
64 1.22 ad /*
65 1.22 ad * Use spl0() here to ensure that we have the correct interrupt
66 1.22 ad * priority. This may be the first thread running on the CPU,
67 1.26 ad * in which case we took an odd route to get here.
68 1.22 ad */
69 1.22 ad spl0();
70 1.14 ad
71 1.19 rmind for (;;) {
72 1.2 yamt LOCKDEBUG_BARRIER(NULL, 0);
73 1.2 yamt KASSERT((l->l_flag & LW_IDLE) != 0);
74 1.2 yamt KASSERT(ci == curcpu());
75 1.2 yamt KASSERT(l == curlwp);
76 1.2 yamt KASSERT(CURCPU_IDLE_P());
77 1.7 ad KASSERT(l->l_priority == PRI_IDLE);
78 1.36 mrg KASSERTMSG(l->l_nopreempt == 0, "lwp %p nopreempt %d",
79 1.36 mrg l, l->l_nopreempt);
80 1.2 yamt
81 1.19 rmind sched_idle();
82 1.20 ad if (!sched_curcpu_runnable_p()) {
83 1.21 ad if ((spc->spc_flags & SPCF_OFFLINE) == 0) {
84 1.29 ad uvm_idle();
85 1.21 ad }
86 1.20 ad if (!sched_curcpu_runnable_p()) {
87 1.20 ad cpu_idle();
88 1.20 ad if (!sched_curcpu_runnable_p() &&
89 1.20 ad !ci->ci_want_resched) {
90 1.20 ad continue;
91 1.20 ad }
92 1.2 yamt }
93 1.2 yamt }
94 1.11 ad KASSERT(l->l_mutex == l->l_cpu->ci_schedstate.spc_lwplock);
95 1.2 yamt lwp_lock(l);
96 1.28 ad spc_lock(l->l_cpu);
97 1.2 yamt mi_switch(l);
98 1.2 yamt KASSERT(curlwp == l);
99 1.33 ad KASSERT(l->l_stat == LSIDL);
100 1.2 yamt }
101 1.2 yamt }
102 1.2 yamt
103 1.2 yamt int
104 1.2 yamt create_idle_lwp(struct cpu_info *ci)
105 1.2 yamt {
106 1.7 ad lwp_t *l;
107 1.2 yamt int error;
108 1.2 yamt
109 1.2 yamt KASSERT(ci->ci_data.cpu_idlelwp == NULL);
110 1.7 ad error = kthread_create(PRI_IDLE, KTHREAD_MPSAFE | KTHREAD_IDLE,
111 1.12 martin ci, idle_loop, NULL, &l, "idle/%u", ci->ci_index);
112 1.7 ad if (error != 0)
113 1.7 ad panic("create_idle_lwp: error %d", error);
114 1.7 ad lwp_lock(l);
115 1.7 ad l->l_flag |= LW_IDLE;
116 1.31 ad if (ci != lwp0.l_cpu) {
117 1.31 ad /*
118 1.31 ad * For secondary CPUs, the idle LWP is the first to run, and
119 1.31 ad * it's directly entered from MD code without a trip through
120 1.31 ad * mi_switch(). Make the picture look good in case the CPU
121 1.31 ad * takes an interrupt before it calls idle_loop().
122 1.31 ad */
123 1.33 ad l->l_stat = LSIDL;
124 1.32 ad l->l_pflag |= LP_RUNNING;
125 1.31 ad ci->ci_onproc = l;
126 1.31 ad }
127 1.7 ad lwp_unlock(l);
128 1.2 yamt ci->ci_data.cpu_idlelwp = l;
129 1.7 ad
130 1.2 yamt return error;
131 1.2 yamt }
132