linux_sched.c revision 1.19 1 /* $NetBSD: linux_sched.c,v 1.19 2005/06/22 15:10:51 manu Exp $ */
2
3 /*-
4 * Copyright (c) 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center; by Matthias Scheler.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Linux compatibility module. Try to deal with scheduler related syscalls.
42 */
43
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: linux_sched.c,v 1.19 2005/06/22 15:10:51 manu Exp $");
46
47 #include <sys/param.h>
48 #include <sys/mount.h>
49 #include <sys/proc.h>
50 #include <sys/systm.h>
51 #include <sys/sa.h>
52 #include <sys/syscallargs.h>
53 #include <sys/wait.h>
54
55 #include <machine/cpu.h>
56
57 #include <compat/linux/common/linux_types.h>
58 #include <compat/linux/common/linux_signal.h>
59 #include <compat/linux/common/linux_emuldata.h>
60
61 #include <compat/linux/linux_syscallargs.h>
62
63 #include <compat/linux/common/linux_sched.h>
64
65 int
66 linux_sys_clone(l, v, retval)
67 struct lwp *l;
68 void *v;
69 register_t *retval;
70 {
71 struct linux_sys_clone_args /* {
72 syscallarg(int) flags;
73 syscallarg(void *) stack;
74 #ifdef __amd64__
75 syscallarg(void *) parent_tidptr;
76 syscallarg(void *) child_tidptr;
77 #endif
78 } */ *uap = v;
79 int flags, sig;
80 int error;
81 #ifdef __amd64__
82 struct linux_emuldata *led;
83 #endif
84
85 /*
86 * We don't support the Linux CLONE_PID or CLONE_PTRACE flags.
87 */
88 if (SCARG(uap, flags) & (LINUX_CLONE_PID|LINUX_CLONE_PTRACE))
89 return (EINVAL);
90
91 /*
92 * Thread group implies shared signals. Shared signals
93 * imply shared VM. This matches what Linux kernel does.
94 */
95 if (SCARG(uap, flags) & LINUX_CLONE_THREAD
96 && (SCARG(uap, flags) & LINUX_CLONE_SIGHAND) == 0)
97 return (EINVAL);
98 if (SCARG(uap, flags) & LINUX_CLONE_SIGHAND
99 && (SCARG(uap, flags) & LINUX_CLONE_VM) == 0)
100 return (EINVAL);
101
102 flags = 0;
103
104 if (SCARG(uap, flags) & LINUX_CLONE_VM)
105 flags |= FORK_SHAREVM;
106 if (SCARG(uap, flags) & LINUX_CLONE_FS)
107 flags |= FORK_SHARECWD;
108 if (SCARG(uap, flags) & LINUX_CLONE_FILES)
109 flags |= FORK_SHAREFILES;
110 if (SCARG(uap, flags) & LINUX_CLONE_SIGHAND)
111 flags |= FORK_SHARESIGS;
112 if (SCARG(uap, flags) & LINUX_CLONE_VFORK)
113 flags |= FORK_PPWAIT;
114
115 sig = SCARG(uap, flags) & LINUX_CLONE_CSIGNAL;
116 if (sig < 0 || sig >= LINUX__NSIG)
117 return (EINVAL);
118 sig = linux_to_native_signo[sig];
119
120 #ifdef __amd64__
121 led = (struct linux_emuldata *)l->l_proc->p_emuldata;
122
123 if (SCARG(uap, flags) & LINUX_CLONE_PARENT_SETTID) {
124 if (SCARG(uap, parent_tidptr) == NULL) {
125 printf("linux_sys_clone: NULL parent_tidptr\n");
126 return EINVAL;
127 }
128
129 if ((error = copyout(&l->l_proc->p_pid,
130 SCARG(uap, parent_tidptr),
131 sizeof(l->l_proc->p_pid))) != 0)
132 return error;
133 }
134
135 /* CLONE_CHILD_CLEARTID: TID clear in the child on exit() */
136 if (SCARG(uap, flags) & LINUX_CLONE_CHILD_CLEARTID)
137 led->child_clear_tid = SCARG(uap, child_tidptr);
138 else
139 led->child_clear_tid = NULL;
140
141 /* CLONE_CHILD_SETTID: TID set in the child on clone() */
142 if (SCARG(uap, flags) & LINUX_CLONE_CHILD_SETTID)
143 led->child_set_tid = SCARG(uap, child_tidptr);
144 else
145 led->child_set_tid = NULL;
146 #endif
147 /*
148 * Note that Linux does not provide a portable way of specifying
149 * the stack area; the caller must know if the stack grows up
150 * or down. So, we pass a stack size of 0, so that the code
151 * that makes this adjustment is a noop.
152 */
153 if ((error = fork1(l, flags, sig, SCARG(uap, stack), 0,
154 NULL, NULL, retval, NULL)) != 0)
155 return error;
156
157 return 0;
158 }
159
160 int
161 linux_sys_sched_setparam(cl, v, retval)
162 struct lwp *cl;
163 void *v;
164 register_t *retval;
165 {
166 struct linux_sys_sched_setparam_args /* {
167 syscallarg(linux_pid_t) pid;
168 syscallarg(const struct linux_sched_param *) sp;
169 } */ *uap = v;
170 struct proc *cp = cl->l_proc;
171 int error;
172 struct linux_sched_param lp;
173 struct proc *p;
174
175 /*
176 * We only check for valid parameters and return afterwards.
177 */
178
179 if (SCARG(uap, pid) < 0 || SCARG(uap, sp) == NULL)
180 return EINVAL;
181
182 error = copyin(SCARG(uap, sp), &lp, sizeof(lp));
183 if (error)
184 return error;
185
186 if (SCARG(uap, pid) != 0) {
187 struct pcred *pc = cp->p_cred;
188
189 if ((p = pfind(SCARG(uap, pid))) == NULL)
190 return ESRCH;
191 if (!(cp == p ||
192 pc->pc_ucred->cr_uid == 0 ||
193 pc->p_ruid == p->p_cred->p_ruid ||
194 pc->pc_ucred->cr_uid == p->p_cred->p_ruid ||
195 pc->p_ruid == p->p_ucred->cr_uid ||
196 pc->pc_ucred->cr_uid == p->p_ucred->cr_uid))
197 return EPERM;
198 }
199
200 return 0;
201 }
202
203 int
204 linux_sys_sched_getparam(cl, v, retval)
205 struct lwp *cl;
206 void *v;
207 register_t *retval;
208 {
209 struct linux_sys_sched_getparam_args /* {
210 syscallarg(linux_pid_t) pid;
211 syscallarg(struct linux_sched_param *) sp;
212 } */ *uap = v;
213 struct proc *cp = cl->l_proc;
214 struct proc *p;
215 struct linux_sched_param lp;
216
217 /*
218 * We only check for valid parameters and return a dummy priority afterwards.
219 */
220 if (SCARG(uap, pid) < 0 || SCARG(uap, sp) == NULL)
221 return EINVAL;
222
223 if (SCARG(uap, pid) != 0) {
224 struct pcred *pc = cp->p_cred;
225
226 if ((p = pfind(SCARG(uap, pid))) == NULL)
227 return ESRCH;
228 if (!(cp == p ||
229 pc->pc_ucred->cr_uid == 0 ||
230 pc->p_ruid == p->p_cred->p_ruid ||
231 pc->pc_ucred->cr_uid == p->p_cred->p_ruid ||
232 pc->p_ruid == p->p_ucred->cr_uid ||
233 pc->pc_ucred->cr_uid == p->p_ucred->cr_uid))
234 return EPERM;
235 }
236
237 lp.sched_priority = 0;
238 return copyout(&lp, SCARG(uap, sp), sizeof(lp));
239 }
240
241 int
242 linux_sys_sched_setscheduler(cl, v, retval)
243 struct lwp *cl;
244 void *v;
245 register_t *retval;
246 {
247 struct linux_sys_sched_setscheduler_args /* {
248 syscallarg(linux_pid_t) pid;
249 syscallarg(int) policy;
250 syscallarg(cont struct linux_sched_scheduler *) sp;
251 } */ *uap = v;
252 struct proc *cp = cl->l_proc;
253 int error;
254 struct linux_sched_param lp;
255 struct proc *p;
256
257 /*
258 * We only check for valid parameters and return afterwards.
259 */
260
261 if (SCARG(uap, pid) < 0 || SCARG(uap, sp) == NULL)
262 return EINVAL;
263
264 error = copyin(SCARG(uap, sp), &lp, sizeof(lp));
265 if (error)
266 return error;
267
268 if (SCARG(uap, pid) != 0) {
269 struct pcred *pc = cp->p_cred;
270
271 if ((p = pfind(SCARG(uap, pid))) == NULL)
272 return ESRCH;
273 if (!(cp == p ||
274 pc->pc_ucred->cr_uid == 0 ||
275 pc->p_ruid == p->p_cred->p_ruid ||
276 pc->pc_ucred->cr_uid == p->p_cred->p_ruid ||
277 pc->p_ruid == p->p_ucred->cr_uid ||
278 pc->pc_ucred->cr_uid == p->p_ucred->cr_uid))
279 return EPERM;
280 }
281
282 /*
283 * We can't emulate anything put the default scheduling policy.
284 */
285 if (SCARG(uap, policy) != LINUX_SCHED_OTHER || lp.sched_priority != 0)
286 return EINVAL;
287
288 return 0;
289 }
290
291 int
292 linux_sys_sched_getscheduler(cl, v, retval)
293 struct lwp *cl;
294 void *v;
295 register_t *retval;
296 {
297 struct linux_sys_sched_getscheduler_args /* {
298 syscallarg(linux_pid_t) pid;
299 } */ *uap = v;
300 struct proc *cp = cl->l_proc;
301 struct proc *p;
302
303 *retval = -1;
304 /*
305 * We only check for valid parameters and return afterwards.
306 */
307
308 if (SCARG(uap, pid) != 0) {
309 struct pcred *pc = cp->p_cred;
310
311 if ((p = pfind(SCARG(uap, pid))) == NULL)
312 return ESRCH;
313 if (!(cp == p ||
314 pc->pc_ucred->cr_uid == 0 ||
315 pc->p_ruid == p->p_cred->p_ruid ||
316 pc->pc_ucred->cr_uid == p->p_cred->p_ruid ||
317 pc->p_ruid == p->p_ucred->cr_uid ||
318 pc->pc_ucred->cr_uid == p->p_ucred->cr_uid))
319 return EPERM;
320 }
321
322 /*
323 * We can't emulate anything put the default scheduling policy.
324 */
325 *retval = LINUX_SCHED_OTHER;
326 return 0;
327 }
328
329 int
330 linux_sys_sched_yield(cl, v, retval)
331 struct lwp *cl;
332 void *v;
333 register_t *retval;
334 {
335
336 yield();
337 return 0;
338 }
339
340 int
341 linux_sys_sched_get_priority_max(cl, v, retval)
342 struct lwp *cl;
343 void *v;
344 register_t *retval;
345 {
346 struct linux_sys_sched_get_priority_max_args /* {
347 syscallarg(int) policy;
348 } */ *uap = v;
349
350 /*
351 * We can't emulate anything put the default scheduling policy.
352 */
353 if (SCARG(uap, policy) != LINUX_SCHED_OTHER) {
354 *retval = -1;
355 return EINVAL;
356 }
357
358 *retval = 0;
359 return 0;
360 }
361
362 int
363 linux_sys_sched_get_priority_min(cl, v, retval)
364 struct lwp *cl;
365 void *v;
366 register_t *retval;
367 {
368 struct linux_sys_sched_get_priority_min_args /* {
369 syscallarg(int) policy;
370 } */ *uap = v;
371
372 /*
373 * We can't emulate anything put the default scheduling policy.
374 */
375 if (SCARG(uap, policy) != LINUX_SCHED_OTHER) {
376 *retval = -1;
377 return EINVAL;
378 }
379
380 *retval = 0;
381 return 0;
382 }
383
384 #ifndef __m68k__
385 /* Present on everything but m68k */
386 int
387 linux_sys_exit_group(l, v, retval)
388 struct lwp *l;
389 void *v;
390 register_t *retval;
391 {
392 struct linux_sys_exit_group_args /* {
393 syscallarg(int) error_code;
394 } */ *uap = v;
395
396 /*
397 * XXX The calling thread is supposed to kill all threads
398 * in the same thread group (i.e. all threads created
399 * via clone(2) with CLONE_THREAD flag set). This appears
400 * to not be used yet, so the thread group handling
401 * is currently not implemented.
402 */
403
404 exit1(l, W_EXITCODE(SCARG(uap, error_code), 0));
405 /* NOTREACHED */
406 return 0;
407 }
408 #endif /* !__m68k__ */
409
410 #ifdef __amd64__
411 int
412 linux_sys_set_tid_address(l, v, retval)
413 struct lwp *l;
414 void *v;
415 register_t *retval;
416 {
417 struct linux_sys_set_tid_address_args /* {
418 syscallarg(int *) tidptr;
419 } */ *uap = v;
420 struct linux_emuldata *led;
421
422 led = (struct linux_emuldata *)l->l_proc->p_emuldata;
423 led->clear_tid = SCARG(uap, tid);
424
425 *retval = l->l_proc->p_pid;
426
427 return 0;
428 }
429 #endif /* __amd64__ */
430