kern_kthread.c revision 1.15.18.4 1 1.15.18.4 yamt /* $NetBSD: kern_kthread.c,v 1.15.18.4 2008/01/21 09:46:05 yamt Exp $ */
2 1.1 thorpej
3 1.1 thorpej /*-
4 1.15.18.2 yamt * Copyright (c) 1998, 1999, 2007 The NetBSD Foundation, Inc.
5 1.1 thorpej * All rights reserved.
6 1.1 thorpej *
7 1.1 thorpej * This code is derived from software contributed to The NetBSD Foundation
8 1.1 thorpej * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 1.15.18.2 yamt * NASA Ames Research Center, and by Andrew Doran.
10 1.1 thorpej *
11 1.1 thorpej * Redistribution and use in source and binary forms, with or without
12 1.1 thorpej * modification, are permitted provided that the following conditions
13 1.1 thorpej * are met:
14 1.1 thorpej * 1. Redistributions of source code must retain the above copyright
15 1.1 thorpej * notice, this list of conditions and the following disclaimer.
16 1.1 thorpej * 2. Redistributions in binary form must reproduce the above copyright
17 1.1 thorpej * notice, this list of conditions and the following disclaimer in the
18 1.1 thorpej * documentation and/or other materials provided with the distribution.
19 1.1 thorpej * 3. All advertising materials mentioning features or use of this software
20 1.1 thorpej * must display the following acknowledgement:
21 1.1 thorpej * This product includes software developed by the NetBSD
22 1.1 thorpej * Foundation, Inc. and its contributors.
23 1.1 thorpej * 4. Neither the name of The NetBSD Foundation nor the names of its
24 1.1 thorpej * contributors may be used to endorse or promote products derived
25 1.1 thorpej * from this software without specific prior written permission.
26 1.1 thorpej *
27 1.1 thorpej * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 1.1 thorpej * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 1.1 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 1.1 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 1.1 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 1.1 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 1.1 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 1.1 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 1.1 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 1.1 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 1.1 thorpej * POSSIBILITY OF SUCH DAMAGE.
38 1.1 thorpej */
39 1.12 lukem
40 1.12 lukem #include <sys/cdefs.h>
41 1.15.18.4 yamt __KERNEL_RCSID(0, "$NetBSD: kern_kthread.c,v 1.15.18.4 2008/01/21 09:46:05 yamt Exp $");
42 1.1 thorpej
43 1.1 thorpej #include <sys/param.h>
44 1.1 thorpej #include <sys/systm.h>
45 1.1 thorpej #include <sys/kernel.h>
46 1.1 thorpej #include <sys/kthread.h>
47 1.1 thorpej #include <sys/proc.h>
48 1.15.18.2 yamt #include <sys/sched.h>
49 1.15.18.2 yamt #include <sys/kmem.h>
50 1.15.18.2 yamt
51 1.15.18.2 yamt #include <uvm/uvm_extern.h>
52 1.1 thorpej
53 1.1 thorpej /*
54 1.1 thorpej * note that stdarg.h and the ansi style va_start macro is used for both
55 1.1 thorpej * ansi and traditional c complers.
56 1.1 thorpej * XXX: this requires that stdarg.h define: va_alist and va_dcl
57 1.1 thorpej */
58 1.1 thorpej #include <machine/stdarg.h>
59 1.1 thorpej
60 1.1 thorpej /*
61 1.1 thorpej * Fork a kernel thread. Any process can request this to be done.
62 1.1 thorpej */
63 1.1 thorpej int
64 1.15.18.2 yamt kthread_create(pri_t pri, int flag, struct cpu_info *ci,
65 1.15.18.2 yamt void (*func)(void *), void *arg,
66 1.15.18.2 yamt lwp_t **lp, const char *fmt, ...)
67 1.1 thorpej {
68 1.15.18.2 yamt lwp_t *l;
69 1.15.18.2 yamt vaddr_t uaddr;
70 1.15.18.2 yamt bool inmem;
71 1.1 thorpej int error;
72 1.1 thorpej va_list ap;
73 1.1 thorpej
74 1.15.18.2 yamt inmem = uvm_uarea_alloc(&uaddr);
75 1.15.18.2 yamt if (uaddr == 0)
76 1.15.18.2 yamt return ENOMEM;
77 1.15.18.3 yamt error = lwp_create(&lwp0, &proc0, uaddr, inmem, LWP_DETACHED, NULL,
78 1.15.18.3 yamt 0, func, arg, &l, SCHED_FIFO);
79 1.15.18.2 yamt if (error) {
80 1.15.18.3 yamt uvm_uarea_free(uaddr, curcpu());
81 1.15.18.2 yamt return error;
82 1.15.18.2 yamt }
83 1.15.18.2 yamt uvm_lwp_hold(l);
84 1.15.18.2 yamt if (fmt != NULL) {
85 1.15.18.2 yamt l->l_name = kmem_alloc(MAXCOMLEN, KM_SLEEP);
86 1.15.18.2 yamt if (l->l_name == NULL) {
87 1.15.18.2 yamt lwp_exit(l);
88 1.15.18.2 yamt return ENOMEM;
89 1.15.18.2 yamt }
90 1.15.18.2 yamt va_start(ap, fmt);
91 1.15.18.2 yamt vsnprintf(l->l_name, MAXCOMLEN, fmt, ap);
92 1.15.18.2 yamt va_end(ap);
93 1.15.18.2 yamt }
94 1.15.18.2 yamt
95 1.15.18.2 yamt /*
96 1.15.18.2 yamt * Set parameters.
97 1.15.18.2 yamt */
98 1.15.18.2 yamt if ((flag & KTHREAD_INTR) != 0) {
99 1.15.18.2 yamt KASSERT((flag & KTHREAD_MPSAFE) != 0);
100 1.15.18.2 yamt }
101 1.15.18.2 yamt
102 1.15.18.2 yamt if (pri == PRI_NONE) {
103 1.15.18.2 yamt /* Minimum kernel priority level. */
104 1.15.18.3 yamt pri = PRI_KTHREAD;
105 1.15.18.2 yamt }
106 1.15.18.3 yamt mutex_enter(&proc0.p_smutex);
107 1.15.18.3 yamt lwp_lock(l);
108 1.15.18.2 yamt l->l_priority = pri;
109 1.15.18.2 yamt if (ci != NULL) {
110 1.15.18.2 yamt if (ci != l->l_cpu) {
111 1.15.18.2 yamt lwp_unlock_to(l, ci->ci_schedstate.spc_mutex);
112 1.15.18.2 yamt lwp_lock(l);
113 1.15.18.2 yamt }
114 1.15.18.2 yamt l->l_flag |= LW_BOUND;
115 1.15.18.2 yamt l->l_cpu = ci;
116 1.15.18.2 yamt }
117 1.15.18.2 yamt if ((flag & KTHREAD_INTR) != 0)
118 1.15.18.3 yamt l->l_pflag |= LP_INTR;
119 1.15.18.4 yamt if ((flag & KTHREAD_MPSAFE) == 0)
120 1.15.18.4 yamt l->l_pflag &= ~LP_MPSAFE;
121 1.15.18.2 yamt
122 1.15.18.2 yamt /*
123 1.15.18.2 yamt * Set the new LWP running, unless the caller has requested
124 1.15.18.2 yamt * otherwise.
125 1.15.18.2 yamt */
126 1.15.18.2 yamt if ((flag & KTHREAD_IDLE) == 0) {
127 1.15.18.2 yamt l->l_stat = LSRUN;
128 1.15.18.2 yamt sched_enqueue(l, false);
129 1.15.18.3 yamt lwp_unlock(l);
130 1.15.18.3 yamt } else
131 1.15.18.3 yamt lwp_unlock_to(l, &ci->ci_schedstate.spc_lwplock);
132 1.15.18.2 yamt
133 1.15.18.2 yamt /*
134 1.15.18.2 yamt * The LWP is not created suspended or stopped and cannot be set
135 1.15.18.2 yamt * into those states later, so must be considered runnable.
136 1.15.18.2 yamt */
137 1.15.18.2 yamt proc0.p_nrlwps++;
138 1.15.18.2 yamt mutex_exit(&proc0.p_smutex);
139 1.1 thorpej
140 1.1 thorpej /* All done! */
141 1.15.18.2 yamt if (lp != NULL)
142 1.15.18.2 yamt *lp = l;
143 1.15.18.2 yamt
144 1.1 thorpej return (0);
145 1.1 thorpej }
146 1.1 thorpej
147 1.1 thorpej /*
148 1.1 thorpej * Cause a kernel thread to exit. Assumes the exiting thread is the
149 1.1 thorpej * current context.
150 1.1 thorpej */
151 1.1 thorpej void
152 1.11 thorpej kthread_exit(int ecode)
153 1.1 thorpej {
154 1.15.18.2 yamt const char *name;
155 1.15.18.2 yamt lwp_t *l = curlwp;
156 1.1 thorpej
157 1.15.18.2 yamt /* We can't do much with the exit code, so just report it. */
158 1.15.18.2 yamt if (ecode != 0) {
159 1.15.18.2 yamt if ((name = l->l_name) == NULL)
160 1.15.18.2 yamt name = "unnamed";
161 1.15.18.2 yamt printf("WARNING: kthread `%s' (%d) exits with status %d\n",
162 1.15.18.2 yamt name, l->l_lid, ecode);
163 1.15.18.2 yamt }
164 1.15.18.2 yamt
165 1.15.18.2 yamt /* And exit.. */
166 1.15.18.2 yamt lwp_exit(l);
167 1.1 thorpej
168 1.1 thorpej /*
169 1.1 thorpej * XXX Fool the compiler. Making exit1() __noreturn__ is a can
170 1.1 thorpej * XXX of worms right now.
171 1.1 thorpej */
172 1.15.18.2 yamt for (;;)
173 1.15.18.2 yamt ;
174 1.2 thorpej }
175 1.2 thorpej
176 1.2 thorpej /*
177 1.15.18.2 yamt * Destroy an inactive kthread. The kthread must be in the LSIDL state.
178 1.2 thorpej */
179 1.2 thorpej void
180 1.15.18.2 yamt kthread_destroy(lwp_t *l)
181 1.2 thorpej {
182 1.8 thorpej
183 1.15.18.2 yamt KASSERT((l->l_flag & LW_SYSTEM) != 0);
184 1.15.18.2 yamt KASSERT(l->l_stat == LSIDL);
185 1.2 thorpej
186 1.15.18.2 yamt lwp_exit(l);
187 1.1 thorpej }
188