linux_sched.c revision 1.42.6.1 1 /* $NetBSD: linux_sched.c,v 1.42.6.1 2007/10/23 20:17:00 ad Exp $ */
2
3 /*-
4 * Copyright (c) 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center; by Matthias Scheler.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Linux compatibility module. Try to deal with scheduler related syscalls.
42 */
43
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: linux_sched.c,v 1.42.6.1 2007/10/23 20:17:00 ad Exp $");
46
47 #include <sys/param.h>
48 #include <sys/mount.h>
49 #include <sys/proc.h>
50 #include <sys/systm.h>
51 #include <sys/sysctl.h>
52 #include <sys/malloc.h>
53 #include <sys/syscallargs.h>
54 #include <sys/wait.h>
55 #include <sys/kauth.h>
56 #include <sys/ptrace.h>
57
58 #include <sys/cpu.h>
59
60 #include <compat/linux/common/linux_types.h>
61 #include <compat/linux/common/linux_signal.h>
62 #include <compat/linux/common/linux_machdep.h> /* For LINUX_NPTL */
63 #include <compat/linux/common/linux_emuldata.h>
64
65 #include <compat/linux/linux_syscallargs.h>
66
67 #include <compat/linux/common/linux_sched.h>
68
69 int
70 linux_sys_clone(l, v, retval)
71 struct lwp *l;
72 void *v;
73 register_t *retval;
74 {
75 struct linux_sys_clone_args /* {
76 syscallarg(int) flags;
77 syscallarg(void *) stack;
78 #ifdef LINUX_NPTL
79 syscallarg(void *) parent_tidptr;
80 syscallarg(void *) child_tidptr;
81 #endif
82 } */ *uap = v;
83 int flags, sig;
84 int error;
85 #ifdef LINUX_NPTL
86 struct linux_emuldata *led;
87 #endif
88
89 /*
90 * We don't support the Linux CLONE_PID or CLONE_PTRACE flags.
91 */
92 if (SCARG(uap, flags) & (LINUX_CLONE_PID|LINUX_CLONE_PTRACE))
93 return (EINVAL);
94
95 /*
96 * Thread group implies shared signals. Shared signals
97 * imply shared VM. This matches what Linux kernel does.
98 */
99 if (SCARG(uap, flags) & LINUX_CLONE_THREAD
100 && (SCARG(uap, flags) & LINUX_CLONE_SIGHAND) == 0)
101 return (EINVAL);
102 if (SCARG(uap, flags) & LINUX_CLONE_SIGHAND
103 && (SCARG(uap, flags) & LINUX_CLONE_VM) == 0)
104 return (EINVAL);
105
106 flags = 0;
107
108 if (SCARG(uap, flags) & LINUX_CLONE_VM)
109 flags |= FORK_SHAREVM;
110 if (SCARG(uap, flags) & LINUX_CLONE_FS)
111 flags |= FORK_SHARECWD;
112 if (SCARG(uap, flags) & LINUX_CLONE_FILES)
113 flags |= FORK_SHAREFILES;
114 if (SCARG(uap, flags) & LINUX_CLONE_SIGHAND)
115 flags |= FORK_SHARESIGS;
116 if (SCARG(uap, flags) & LINUX_CLONE_VFORK)
117 flags |= FORK_PPWAIT;
118
119 sig = SCARG(uap, flags) & LINUX_CLONE_CSIGNAL;
120 if (sig < 0 || sig >= LINUX__NSIG)
121 return (EINVAL);
122 sig = linux_to_native_signo[sig];
123
124 #ifdef LINUX_NPTL
125 led = (struct linux_emuldata *)l->l_proc->p_emuldata;
126
127 led->parent_tidptr = SCARG(uap, parent_tidptr);
128 led->child_tidptr = SCARG(uap, child_tidptr);
129 led->clone_flags = SCARG(uap, flags);
130 #endif /* LINUX_NPTL */
131
132 /*
133 * Note that Linux does not provide a portable way of specifying
134 * the stack area; the caller must know if the stack grows up
135 * or down. So, we pass a stack size of 0, so that the code
136 * that makes this adjustment is a noop.
137 */
138 if ((error = fork1(l, flags, sig, SCARG(uap, stack), 0,
139 NULL, NULL, retval, NULL)) != 0)
140 return error;
141
142 return 0;
143 }
144
145 int
146 linux_sys_sched_setparam(struct lwp *cl, void *v, register_t *retval)
147 {
148 struct linux_sys_sched_setparam_args /* {
149 syscallarg(linux_pid_t) pid;
150 syscallarg(const struct linux_sched_param *) sp;
151 } */ *uap = v;
152 int error;
153 struct linux_sched_param lp;
154 struct proc *p;
155
156 /*
157 * We only check for valid parameters and return afterwards.
158 */
159
160 if (SCARG(uap, pid) < 0 || SCARG(uap, sp) == NULL)
161 return EINVAL;
162
163 error = copyin(SCARG(uap, sp), &lp, sizeof(lp));
164 if (error)
165 return error;
166
167 if (SCARG(uap, pid) != 0) {
168 kauth_cred_t pc = cl->l_cred;
169
170 if ((p = pfind(SCARG(uap, pid))) == NULL)
171 return ESRCH;
172 if (!(cl->l_proc == p ||
173 kauth_authorize_generic(pc, KAUTH_GENERIC_ISSUSER, NULL) == 0 ||
174 kauth_cred_getuid(pc) == kauth_cred_getuid(p->p_cred) ||
175 kauth_cred_geteuid(pc) == kauth_cred_getuid(p->p_cred) ||
176 kauth_cred_getuid(pc) == kauth_cred_geteuid(p->p_cred) ||
177 kauth_cred_geteuid(pc) == kauth_cred_geteuid(p->p_cred)))
178 return EPERM;
179 }
180
181 return 0;
182 }
183
184 int
185 linux_sys_sched_getparam(struct lwp *cl, void *v, register_t *retval)
186 {
187 struct linux_sys_sched_getparam_args /* {
188 syscallarg(linux_pid_t) pid;
189 syscallarg(struct linux_sched_param *) sp;
190 } */ *uap = v;
191 struct proc *p;
192 struct linux_sched_param lp;
193
194 /*
195 * We only check for valid parameters and return a dummy priority afterwards.
196 */
197 if (SCARG(uap, pid) < 0 || SCARG(uap, sp) == NULL)
198 return EINVAL;
199
200 if (SCARG(uap, pid) != 0) {
201 kauth_cred_t pc = cl->l_cred;
202
203 if ((p = pfind(SCARG(uap, pid))) == NULL)
204 return ESRCH;
205 if (!(cl->l_proc == p ||
206 kauth_authorize_generic(pc, KAUTH_GENERIC_ISSUSER, NULL) == 0 ||
207 kauth_cred_getuid(pc) == kauth_cred_getuid(p->p_cred) ||
208 kauth_cred_geteuid(pc) == kauth_cred_getuid(p->p_cred) ||
209 kauth_cred_getuid(pc) == kauth_cred_geteuid(p->p_cred) ||
210 kauth_cred_geteuid(pc) == kauth_cred_geteuid(p->p_cred)))
211 return EPERM;
212 }
213
214 lp.sched_priority = 0;
215 return copyout(&lp, SCARG(uap, sp), sizeof(lp));
216 }
217
218 int
219 linux_sys_sched_setscheduler(struct lwp *cl, void *v,
220 register_t *retval)
221 {
222 struct linux_sys_sched_setscheduler_args /* {
223 syscallarg(linux_pid_t) pid;
224 syscallarg(int) policy;
225 syscallarg(cont struct linux_sched_scheduler *) sp;
226 } */ *uap = v;
227 int error;
228 struct linux_sched_param lp;
229 struct proc *p;
230
231 /*
232 * We only check for valid parameters and return afterwards.
233 */
234
235 if (SCARG(uap, pid) < 0 || SCARG(uap, sp) == NULL)
236 return EINVAL;
237
238 error = copyin(SCARG(uap, sp), &lp, sizeof(lp));
239 if (error)
240 return error;
241
242 if (SCARG(uap, pid) != 0) {
243 kauth_cred_t pc = cl->l_cred;
244
245 if ((p = pfind(SCARG(uap, pid))) == NULL)
246 return ESRCH;
247 if (!(cl->l_proc == p ||
248 kauth_authorize_generic(pc, KAUTH_GENERIC_ISSUSER, NULL) == 0 ||
249 kauth_cred_getuid(pc) == kauth_cred_getuid(p->p_cred) ||
250 kauth_cred_geteuid(pc) == kauth_cred_getuid(p->p_cred) ||
251 kauth_cred_getuid(pc) == kauth_cred_geteuid(p->p_cred) ||
252 kauth_cred_geteuid(pc) == kauth_cred_geteuid(p->p_cred)))
253 return EPERM;
254 }
255
256 return 0;
257 /*
258 * We can't emulate anything put the default scheduling policy.
259 */
260 if (SCARG(uap, policy) != LINUX_SCHED_OTHER || lp.sched_priority != 0)
261 return EINVAL;
262
263 return 0;
264 }
265
266 int
267 linux_sys_sched_getscheduler(cl, v, retval)
268 struct lwp *cl;
269 void *v;
270 register_t *retval;
271 {
272 struct linux_sys_sched_getscheduler_args /* {
273 syscallarg(linux_pid_t) pid;
274 } */ *uap = v;
275 struct proc *p;
276
277 *retval = -1;
278 /*
279 * We only check for valid parameters and return afterwards.
280 */
281
282 if (SCARG(uap, pid) != 0) {
283 kauth_cred_t pc = cl->l_cred;
284
285 if ((p = pfind(SCARG(uap, pid))) == NULL)
286 return ESRCH;
287 if (!(cl->l_proc == p ||
288 kauth_authorize_generic(pc, KAUTH_GENERIC_ISSUSER, NULL) == 0 ||
289 kauth_cred_getuid(pc) == kauth_cred_getuid(p->p_cred) ||
290 kauth_cred_geteuid(pc) == kauth_cred_getuid(p->p_cred) ||
291 kauth_cred_getuid(pc) == kauth_cred_geteuid(p->p_cred) ||
292 kauth_cred_geteuid(pc) == kauth_cred_geteuid(p->p_cred)))
293 return EPERM;
294 }
295
296 /*
297 * We can't emulate anything put the default scheduling policy.
298 */
299 *retval = LINUX_SCHED_OTHER;
300 return 0;
301 }
302
303 int
304 linux_sys_sched_yield(struct lwp *cl, void *v,
305 register_t *retval)
306 {
307
308 yield();
309 return 0;
310 }
311
312 int
313 linux_sys_sched_get_priority_max(struct lwp *cl, void *v,
314 register_t *retval)
315 {
316 struct linux_sys_sched_get_priority_max_args /* {
317 syscallarg(int) policy;
318 } */ *uap = v;
319
320 /*
321 * We can't emulate anything put the default scheduling policy.
322 */
323 if (SCARG(uap, policy) != LINUX_SCHED_OTHER) {
324 *retval = -1;
325 return EINVAL;
326 }
327
328 *retval = 0;
329 return 0;
330 }
331
332 int
333 linux_sys_sched_get_priority_min(struct lwp *cl, void *v,
334 register_t *retval)
335 {
336 struct linux_sys_sched_get_priority_min_args /* {
337 syscallarg(int) policy;
338 } */ *uap = v;
339
340 /*
341 * We can't emulate anything put the default scheduling policy.
342 */
343 if (SCARG(uap, policy) != LINUX_SCHED_OTHER) {
344 *retval = -1;
345 return EINVAL;
346 }
347
348 *retval = 0;
349 return 0;
350 }
351
352 #ifndef __m68k__
353 /* Present on everything but m68k */
354 int
355 linux_sys_exit_group(l, v, retval)
356 struct lwp *l;
357 void *v;
358 register_t *retval;
359 {
360 #ifdef LINUX_NPTL
361 struct linux_sys_exit_group_args /* {
362 syscallarg(int) error_code;
363 } */ *uap = v;
364 struct proc *p = l->l_proc;
365 struct linux_emuldata *led = p->p_emuldata;
366 struct linux_emuldata *e;
367
368 if (led->s->flags & LINUX_LES_USE_NPTL) {
369
370 #ifdef DEBUG_LINUX
371 printf("%s:%d, led->s->refs = %d\n", __func__, __LINE__,
372 led->s->refs);
373 #endif
374
375 /*
376 * The calling thread is supposed to kill all threads
377 * in the same thread group (i.e. all threads created
378 * via clone(2) with CLONE_THREAD flag set).
379 *
380 * If there is only one thread, things are quite simple
381 */
382 if (led->s->refs == 1)
383 return sys_exit(l, v, retval);
384
385 #ifdef DEBUG_LINUX
386 printf("%s:%d\n", __func__, __LINE__);
387 #endif
388
389 led->s->flags |= LINUX_LES_INEXITGROUP;
390 led->s->xstat = W_EXITCODE(SCARG(uap, error_code), 0);
391
392 /*
393 * Kill all threads in the group. The emulation exit hook takes
394 * care of hiding the zombies and reporting the exit code
395 * properly.
396 */
397 mutex_enter(&proclist_mutex);
398 LIST_FOREACH(e, &led->s->threads, threads) {
399 if (e->proc == p)
400 continue;
401
402 #ifdef DEBUG_LINUX
403 printf("%s: kill PID %d\n", __func__, e->proc->p_pid);
404 #endif
405 psignal(e->proc, SIGKILL);
406 }
407
408 /* Now, kill ourselves */
409 psignal(p, SIGKILL);
410 mutex_exit(&proclist_mutex);
411
412 return 0;
413
414 }
415 #endif /* LINUX_NPTL */
416
417 return sys_exit(l, v, retval);
418 }
419 #endif /* !__m68k__ */
420
421 #ifdef LINUX_NPTL
422 int
423 linux_sys_set_tid_address(l, v, retval)
424 struct lwp *l;
425 void *v;
426 register_t *retval;
427 {
428 struct linux_sys_set_tid_address_args /* {
429 syscallarg(int *) tidptr;
430 } */ *uap = v;
431 struct linux_emuldata *led;
432
433 led = (struct linux_emuldata *)l->l_proc->p_emuldata;
434 led->clear_tid = SCARG(uap, tid);
435
436 led->s->flags |= LINUX_LES_USE_NPTL;
437
438 *retval = l->l_proc->p_pid;
439
440 return 0;
441 }
442
443 /* ARGUSED1 */
444 int
445 linux_sys_gettid(l, v, retval)
446 struct lwp *l;
447 void *v;
448 register_t *retval;
449 {
450 /* The Linux kernel does it exactly that way */
451 *retval = l->l_proc->p_pid;
452 return 0;
453 }
454
455 #ifdef LINUX_NPTL
456 /* ARGUSED1 */
457 int
458 linux_sys_getpid(l, v, retval)
459 struct lwp *l;
460 void *v;
461 register_t *retval;
462 {
463 struct linux_emuldata *led = l->l_proc->p_emuldata;
464
465 if (led->s->flags & LINUX_LES_USE_NPTL) {
466 /* The Linux kernel does it exactly that way */
467 *retval = led->s->group_pid;
468 } else {
469 *retval = l->l_proc->p_pid;
470 }
471
472 return 0;
473 }
474
475 /* ARGUSED1 */
476 int
477 linux_sys_getppid(l, v, retval)
478 struct lwp *l;
479 void *v;
480 register_t *retval;
481 {
482 struct proc *p = l->l_proc;
483 struct linux_emuldata *led = p->p_emuldata;
484 struct proc *glp;
485 struct proc *pp;
486
487 if (led->s->flags & LINUX_LES_USE_NPTL) {
488
489 /* Find the thread group leader's parent */
490 if ((glp = pfind(led->s->group_pid)) == NULL) {
491 /* Maybe panic... */
492 printf("linux_sys_getppid: missing group leader PID"
493 " %d\n", led->s->group_pid);
494 return -1;
495 }
496 pp = glp->p_pptr;
497
498 /* If this is a Linux process too, return thread group PID */
499 if (pp->p_emul == p->p_emul) {
500 struct linux_emuldata *pled;
501
502 pled = pp->p_emuldata;
503 *retval = pled->s->group_pid;
504 } else {
505 *retval = pp->p_pid;
506 }
507
508 } else {
509 *retval = p->p_pptr->p_pid;
510 }
511
512 return 0;
513 }
514 #endif /* LINUX_NPTL */
515
516 int
517 linux_sys_sched_getaffinity(l, v, retval)
518 struct lwp *l;
519 void *v;
520 register_t *retval;
521 {
522 struct linux_sys_sched_getaffinity_args /* {
523 syscallarg(pid_t) pid;
524 syscallarg(unsigned int) len;
525 syscallarg(unsigned long *) mask;
526 } */ *uap = v;
527 int error;
528 int ret;
529 char *data;
530 int *retp;
531
532 if (SCARG(uap, mask) == NULL)
533 return EINVAL;
534
535 if (SCARG(uap, len) < sizeof(int))
536 return EINVAL;
537
538 if (pfind(SCARG(uap, pid)) == NULL)
539 return ESRCH;
540
541 /*
542 * return the actual number of CPU, tag all of them as available
543 * The result is a mask, the first CPU being in the least significant
544 * bit.
545 */
546 ret = (1 << ncpu) - 1;
547 data = malloc(SCARG(uap, len), M_TEMP, M_WAITOK|M_ZERO);
548 retp = (int *)&data[SCARG(uap, len) - sizeof(ret)];
549 *retp = ret;
550
551 if ((error = copyout(data, SCARG(uap, mask), SCARG(uap, len))) != 0)
552 return error;
553
554 free(data, M_TEMP);
555
556 return 0;
557
558 }
559
560 int
561 linux_sys_sched_setaffinity(l, v, retval)
562 struct lwp *l;
563 void *v;
564 register_t *retval;
565 {
566 struct linux_sys_sched_setaffinity_args /* {
567 syscallarg(pid_t) pid;
568 syscallarg(unsigned int) len;
569 syscallarg(unsigned long *) mask;
570 } */ *uap = v;
571
572 if (pfind(SCARG(uap, pid)) == NULL)
573 return ESRCH;
574
575 /* Let's ignore it */
576 #ifdef DEBUG_LINUX
577 printf("linux_sys_sched_setaffinity\n");
578 #endif
579 return 0;
580 };
581 #endif /* LINUX_NPTL */
582