sys_lwp.c revision 1.1.2.1 1 /* $NetBSD: sys_lwp.c,v 1.1.2.1 2006/10/21 14:26:41 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2006 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams, and Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: sys_lwp.c,v 1.1.2.1 2006/10/21 14:26:41 ad Exp $");
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/pool.h>
45 #include <sys/proc.h>
46 #include <sys/sa.h>
47 #include <sys/savar.h>
48 #include <sys/types.h>
49 #include <sys/syscallargs.h>
50
51 #include <uvm/uvm_extern.h>
52
53 /* ARGSUSED */
54 int
55 sys__lwp_create(struct lwp *l, void *v, register_t *retval)
56 {
57 struct sys__lwp_create_args /* {
58 syscallarg(const ucontext_t *) ucp;
59 syscallarg(u_long) flags;
60 syscallarg(lwpid_t *) new_lwp;
61 } */ *uap = v;
62 struct proc *p = l->l_proc;
63 struct lwp *l2;
64 vaddr_t uaddr;
65 boolean_t inmem;
66 ucontext_t *newuc;
67 int error, lid;
68
69 mutex_enter(&p->p_smutex);
70 if (p->p_flag & (P_SA | P_WEXIT)) {
71 mutex_exit(&p->p_smutex);
72 return EINVAL;
73 }
74 /* XXXAD p->p_flag |= P_NOSA; */
75 mutex_exit(&p->p_smutex);
76
77 newuc = pool_get(&lwp_uc_pool, PR_WAITOK);
78
79 error = copyin(SCARG(uap, ucp), newuc,
80 l->l_proc->p_emul->e_sa->sae_ucsize);
81 if (error) {
82 pool_put(&lwp_uc_pool, newuc);
83 return (error);
84 }
85
86 /* XXX check against resource limits */
87
88 inmem = uvm_uarea_alloc(&uaddr);
89 if (__predict_false(uaddr == 0)) {
90 pool_put(&lwp_uc_pool, newuc);
91 return (ENOMEM);
92 }
93
94 /* XXX flags:
95 * __LWP_ASLWP is probably needed for Solaris compat.
96 */
97
98 newlwp(l, p, uaddr, inmem,
99 SCARG(uap, flags) & LWP_DETACHED,
100 NULL, 0, startlwp, newuc, &l2);
101
102 mutex_enter(&p->p_smutex);
103 lwp_lock(l2);
104 lid = l2->l_lid;
105 if ((SCARG(uap, flags) & LWP_SUSPENDED) == 0) {
106 p->p_nrlwps++;
107 l2->l_stat = LSRUN;
108 mutex_enter(&sched_mutex);
109 lwp_swaplock_linked(l2, &sched_mutex);
110 setrunqueue(l2);
111 } else
112 l2->l_stat = LSSUSPENDED;
113 lwp_unlock(l2);
114 mutex_exit(&p->p_smutex);
115
116 error = copyout(&lid, SCARG(uap, new_lwp), sizeof(lid));
117 if (error) {
118 /* XXX We should destroy the LWP. */
119 return (error);
120 }
121
122 return (0);
123 }
124
125 int
126 sys__lwp_exit(struct lwp *l, void *v, register_t *retval)
127 {
128
129 lwp_exit(l);
130 /* NOTREACHED */
131 return (0);
132 }
133
134 int
135 sys__lwp_self(struct lwp *l, void *v, register_t *retval)
136 {
137
138 *retval = l->l_lid;
139
140 return (0);
141 }
142
143 int
144 sys__lwp_getprivate(struct lwp *l, void *v, register_t *retval)
145 {
146
147 mb_read();
148 *retval = (uintptr_t) l->l_private;
149
150 return (0);
151 }
152
153 int
154 sys__lwp_setprivate(struct lwp *l, void *v, register_t *retval)
155 {
156 struct sys__lwp_setprivate_args /* {
157 syscallarg(void *) ptr;
158 } */ *uap = v;
159
160 l->l_private = SCARG(uap, ptr);
161 mb_write();
162
163 return (0);
164 }
165
166 int
167 sys__lwp_suspend(struct lwp *l, void *v, register_t *retval)
168 {
169 struct sys__lwp_suspend_args /* {
170 syscallarg(lwpid_t) target;
171 } */ *uap = v;
172 struct proc *p = l->l_proc;
173 struct lwp *t;
174 int error;
175
176 mutex_enter(&p->p_smutex);
177
178 if (p->p_flag & P_SA) {
179 mutex_exit(&p->p_smutex);
180 return EINVAL;
181 }
182
183 if ((t = lwp_byid(p, SCARG(uap, target))) == NULL) {
184 mutex_exit(&p->p_smutex);
185 return (ESRCH);
186 }
187
188 /*
189 * Check for deadlock, which is only possible when we're suspending
190 * ourself.
191 */
192 if (t == l && l->l_stat == LSONPROC && p->p_nrlwps == 1) {
193 lwp_unlock(l);
194 mutex_exit(&p->p_smutex);
195 return (EDEADLK);
196 }
197
198 /*
199 * Suspend the LWP. If it's on a different CPU, we need to wait for
200 * it to be preempted, where it will put itself to sleep. If not
201 * suspending ourselves, the LWP will be returned unlocked.
202 */
203 error = lwp_halt(l, t, LSSUSPENDED);
204 mutex_exit(&p->p_smutex);
205
206 /*
207 * If we suspended ourself, we need to sleep now.
208 */
209 if (t == l && !error) {
210 lwp_lock(l);
211 l->l_nvcsw++;
212 mi_switch(t, NULL);
213 }
214
215 return (error);
216 }
217
218 int
219 sys__lwp_continue(struct lwp *l, void *v, register_t *retval)
220 {
221 struct sys__lwp_continue_args /* {
222 syscallarg(lwpid_t) target;
223 } */ *uap = v;
224 int error;
225 struct proc *p = l->l_proc;
226 struct lwp *t;
227
228 error = 0;
229
230 mutex_enter(&p->p_smutex);
231
232 if (p->p_flag & P_SA)
233 error = EINVAL;
234 else if ((t = lwp_byid(p, SCARG(uap, target))) == NULL)
235 error = ESRCH;
236 else if (t == l || t->l_stat != LSSUSPENDED)
237 lwp_unlock(t);
238 else
239 lwp_continue(t);
240
241 mutex_exit(&p->p_smutex);
242
243 return (error);
244 }
245
246 int
247 sys__lwp_wakeup(struct lwp *l, void *v, register_t *retval)
248 {
249 struct sys__lwp_wakeup_args /* {
250 syscallarg(lwpid_t) target;
251 } */ *uap = v;
252 struct lwp *t;
253 struct proc *p;
254 int error;
255
256 p = l->l_proc;
257 mutex_enter(&p->p_smutex);
258
259 if ((t = lwp_byid(p, SCARG(uap, target))) == NULL) {
260 mutex_exit(&p->p_smutex);
261 return ESRCH;
262 }
263
264 if (t->l_stat != LSSLEEP) {
265 error = ENODEV;
266 goto bad;
267 }
268
269 if ((t->l_flag & L_SINTR) == 0) {
270 error = EBUSY;
271 goto bad;
272 }
273
274 /*
275 * Tell ltsleep to wakeup. setrunnable() will release the mutex.
276 */
277 t->l_flag |= L_CANCELLED;
278 setrunnable(t);
279 mutex_exit(&p->p_smutex);
280 return 0;
281
282 bad:
283 lwp_unlock(l);
284 mutex_exit(&p->p_smutex);
285 return error;
286 }
287
288 int
289 sys__lwp_wait(struct lwp *l, void *v, register_t *retval)
290 {
291 struct sys__lwp_wait_args /* {
292 syscallarg(lwpid_t) wait_for;
293 syscallarg(lwpid_t *) departed;
294 } */ *uap = v;
295 struct proc *p = l->l_proc;
296 int error;
297 lwpid_t dep;
298
299 mutex_enter(&p->p_smutex);
300 error = lwp_wait1(l, SCARG(uap, wait_for), &dep, 0);
301 mutex_exit(&p->p_smutex);
302 if (error)
303 return (error);
304
305 if (SCARG(uap, departed)) {
306 error = copyout(&dep, SCARG(uap, departed),
307 sizeof(dep));
308 if (error)
309 return (error);
310 }
311
312 return (0);
313 }
314