1 1.89 riastrad /* $NetBSD: sys_lwp.c,v 1.89 2023/10/15 10:29:24 riastradh Exp $ */ 2 1.2 ad 3 1.2 ad /*- 4 1.85 ad * Copyright (c) 2001, 2006, 2007, 2008, 2019, 2020, 2023 5 1.85 ad * The NetBSD Foundation, Inc. 6 1.2 ad * All rights reserved. 7 1.2 ad * 8 1.2 ad * This code is derived from software contributed to The NetBSD Foundation 9 1.2 ad * by Nathan J. Williams, and Andrew Doran. 10 1.2 ad * 11 1.2 ad * Redistribution and use in source and binary forms, with or without 12 1.2 ad * modification, are permitted provided that the following conditions 13 1.2 ad * are met: 14 1.2 ad * 1. Redistributions of source code must retain the above copyright 15 1.2 ad * notice, this list of conditions and the following disclaimer. 16 1.2 ad * 2. Redistributions in binary form must reproduce the above copyright 17 1.2 ad * notice, this list of conditions and the following disclaimer in the 18 1.2 ad * documentation and/or other materials provided with the distribution. 19 1.2 ad * 20 1.2 ad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 1.2 ad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 1.2 ad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 1.2 ad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 1.2 ad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 1.2 ad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 1.2 ad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 1.2 ad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 1.2 ad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 1.2 ad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 1.2 ad * POSSIBILITY OF SUCH DAMAGE. 31 1.2 ad */ 32 1.2 ad 33 1.2 ad /* 34 1.2 ad * Lightweight process (LWP) system calls. See kern_lwp.c for a description 35 1.2 ad * of LWPs. 36 1.2 ad */ 37 1.2 ad 38 1.2 ad #include <sys/cdefs.h> 39 1.89 riastrad __KERNEL_RCSID(0, "$NetBSD: sys_lwp.c,v 1.89 2023/10/15 10:29:24 riastradh Exp $"); 40 1.2 ad 41 1.2 ad #include <sys/param.h> 42 1.89 riastrad 43 1.89 riastrad #include <sys/cpu.h> 44 1.89 riastrad #include <sys/kauth.h> 45 1.89 riastrad #include <sys/kmem.h> 46 1.89 riastrad #include <sys/lwpctl.h> 47 1.2 ad #include <sys/pool.h> 48 1.2 ad #include <sys/proc.h> 49 1.89 riastrad #include <sys/pserialize.h> 50 1.70 kamil #include <sys/ptrace.h> 51 1.2 ad #include <sys/sleepq.h> 52 1.88 riastrad #include <sys/syncobj.h> 53 1.89 riastrad #include <sys/syscallargs.h> 54 1.89 riastrad #include <sys/systm.h> 55 1.89 riastrad #include <sys/types.h> 56 1.2 ad 57 1.2 ad #include <uvm/uvm_extern.h> 58 1.2 ad 59 1.2 ad #define LWP_UNPARK_MAX 1024 60 1.2 ad 61 1.69 maxv static const stack_t lwp_ss_init = SS_INIT; 62 1.69 maxv 63 1.85 ad /* 64 1.85 ad * Parked LWPs get no priority boost on awakening as they blocked on 65 1.85 ad * user space objects. Maybe revisit? 66 1.85 ad */ 67 1.74 ad syncobj_t lwp_park_syncobj = { 68 1.84 riastrad .sobj_name = "lwp_park", 69 1.74 ad .sobj_flag = SOBJ_SLEEPQ_NULL, 70 1.85 ad .sobj_boostpri = PRI_USER, 71 1.63 ozaki .sobj_unsleep = sleepq_unsleep, 72 1.63 ozaki .sobj_changepri = sleepq_changepri, 73 1.63 ozaki .sobj_lendpri = sleepq_lendpri, 74 1.63 ozaki .sobj_owner = syncobj_noowner, 75 1.2 ad }; 76 1.2 ad 77 1.64 kamil static void 78 1.64 kamil mi_startlwp(void *arg) 79 1.64 kamil { 80 1.64 kamil struct lwp *l = curlwp; 81 1.64 kamil struct proc *p = l->l_proc; 82 1.64 kamil 83 1.65 kamil (p->p_emul->e_startlwp)(arg); 84 1.65 kamil 85 1.64 kamil /* If the process is traced, report lwp creation to a debugger */ 86 1.66 kamil if ((p->p_slflag & (PSL_TRACED|PSL_TRACELWP_CREATE)) == 87 1.64 kamil (PSL_TRACED|PSL_TRACELWP_CREATE)) { 88 1.64 kamil /* Paranoid check */ 89 1.82 ad mutex_enter(&proc_lock); 90 1.66 kamil if ((p->p_slflag & (PSL_TRACED|PSL_TRACELWP_CREATE)) != 91 1.66 kamil (PSL_TRACED|PSL_TRACELWP_CREATE)) { 92 1.82 ad mutex_exit(&proc_lock); 93 1.65 kamil return; 94 1.64 kamil } 95 1.64 kamil 96 1.64 kamil mutex_enter(p->p_lock); 97 1.70 kamil eventswitch(TRAP_LWP, PTRACE_LWP_CREATE, l->l_lid); 98 1.64 kamil } 99 1.64 kamil } 100 1.64 kamil 101 1.2 ad int 102 1.72 ad do_lwp_create(lwp_t *l, void *arg, u_long flags, lwp_t **l2, 103 1.59 christos const sigset_t *sigmask, const stack_t *sigstk) 104 1.2 ad { 105 1.2 ad struct proc *p = l->l_proc; 106 1.2 ad vaddr_t uaddr; 107 1.54 martin int error; 108 1.2 ad 109 1.2 ad /* XXX check against resource limits */ 110 1.2 ad 111 1.46 rmind uaddr = uvm_uarea_alloc(); 112 1.54 martin if (__predict_false(uaddr == 0)) 113 1.2 ad return ENOMEM; 114 1.2 ad 115 1.59 christos error = lwp_create(l, p, uaddr, flags & LWP_DETACHED, NULL, 0, 116 1.72 ad mi_startlwp, arg, l2, l->l_class, sigmask, &lwp_ss_init); 117 1.46 rmind if (__predict_false(error)) { 118 1.46 rmind uvm_uarea_free(uaddr); 119 1.18 rmind return error; 120 1.18 rmind } 121 1.2 ad 122 1.2 ad return 0; 123 1.2 ad } 124 1.2 ad 125 1.2 ad int 126 1.54 martin sys__lwp_create(struct lwp *l, const struct sys__lwp_create_args *uap, 127 1.54 martin register_t *retval) 128 1.54 martin { 129 1.54 martin /* { 130 1.54 martin syscallarg(const ucontext_t *) ucp; 131 1.54 martin syscallarg(u_long) flags; 132 1.54 martin syscallarg(lwpid_t *) new_lwp; 133 1.54 martin } */ 134 1.54 martin struct proc *p = l->l_proc; 135 1.57 maxv ucontext_t *newuc; 136 1.72 ad lwp_t *l2; 137 1.54 martin int error; 138 1.54 martin 139 1.54 martin newuc = kmem_alloc(sizeof(ucontext_t), KM_SLEEP); 140 1.54 martin error = copyin(SCARG(uap, ucp), newuc, p->p_emul->e_ucsize); 141 1.54 martin if (error) 142 1.54 martin goto fail; 143 1.54 martin 144 1.54 martin /* validate the ucontext */ 145 1.54 martin if ((newuc->uc_flags & _UC_CPU) == 0) { 146 1.54 martin error = EINVAL; 147 1.54 martin goto fail; 148 1.54 martin } 149 1.54 martin error = cpu_mcontext_validate(l, &newuc->uc_mcontext); 150 1.54 martin if (error) 151 1.54 martin goto fail; 152 1.54 martin 153 1.59 christos const sigset_t *sigmask = newuc->uc_flags & _UC_SIGMASK ? 154 1.59 christos &newuc->uc_sigmask : &l->l_sigmask; 155 1.72 ad error = do_lwp_create(l, newuc, SCARG(uap, flags), &l2, sigmask, 156 1.59 christos &SS_INIT); 157 1.54 martin if (error) 158 1.54 martin goto fail; 159 1.54 martin 160 1.72 ad error = copyout(&l2->l_lid, SCARG(uap, new_lwp), sizeof(l2->l_lid)); 161 1.73 ad if (error == 0) { 162 1.72 ad lwp_start(l2, SCARG(uap, flags)); 163 1.73 ad return 0; 164 1.73 ad } 165 1.73 ad lwp_exit(l2); 166 1.72 ad fail: 167 1.54 martin kmem_free(newuc, sizeof(ucontext_t)); 168 1.54 martin return error; 169 1.54 martin } 170 1.54 martin 171 1.54 martin int 172 1.32 dsl sys__lwp_exit(struct lwp *l, const void *v, register_t *retval) 173 1.2 ad { 174 1.2 ad 175 1.2 ad lwp_exit(l); 176 1.2 ad return 0; 177 1.2 ad } 178 1.2 ad 179 1.2 ad int 180 1.32 dsl sys__lwp_self(struct lwp *l, const void *v, register_t *retval) 181 1.2 ad { 182 1.2 ad 183 1.2 ad *retval = l->l_lid; 184 1.2 ad return 0; 185 1.2 ad } 186 1.2 ad 187 1.2 ad int 188 1.32 dsl sys__lwp_getprivate(struct lwp *l, const void *v, register_t *retval) 189 1.2 ad { 190 1.2 ad 191 1.2 ad *retval = (uintptr_t)l->l_private; 192 1.2 ad return 0; 193 1.2 ad } 194 1.2 ad 195 1.2 ad int 196 1.47 rmind sys__lwp_setprivate(struct lwp *l, const struct sys__lwp_setprivate_args *uap, 197 1.47 rmind register_t *retval) 198 1.2 ad { 199 1.32 dsl /* { 200 1.2 ad syscallarg(void *) ptr; 201 1.32 dsl } */ 202 1.2 ad 203 1.52 chs return lwp_setprivate(l, SCARG(uap, ptr)); 204 1.2 ad } 205 1.2 ad 206 1.2 ad int 207 1.47 rmind sys__lwp_suspend(struct lwp *l, const struct sys__lwp_suspend_args *uap, 208 1.47 rmind register_t *retval) 209 1.2 ad { 210 1.32 dsl /* { 211 1.2 ad syscallarg(lwpid_t) target; 212 1.32 dsl } */ 213 1.2 ad struct proc *p = l->l_proc; 214 1.2 ad struct lwp *t; 215 1.2 ad int error; 216 1.2 ad 217 1.39 ad mutex_enter(p->p_lock); 218 1.2 ad if ((t = lwp_find(p, SCARG(uap, target))) == NULL) { 219 1.39 ad mutex_exit(p->p_lock); 220 1.2 ad return ESRCH; 221 1.2 ad } 222 1.2 ad 223 1.2 ad /* 224 1.2 ad * Check for deadlock, which is only possible when we're suspending 225 1.2 ad * ourself. XXX There is a short race here, as p_nrlwps is only 226 1.2 ad * incremented when an LWP suspends itself on the kernel/user 227 1.2 ad * boundary. It's still possible to kill -9 the process so we 228 1.2 ad * don't bother checking further. 229 1.2 ad */ 230 1.2 ad lwp_lock(t); 231 1.2 ad if ((t == l && p->p_nrlwps == 1) || 232 1.4 pavel (l->l_flag & (LW_WCORE | LW_WEXIT)) != 0) { 233 1.2 ad lwp_unlock(t); 234 1.39 ad mutex_exit(p->p_lock); 235 1.2 ad return EDEADLK; 236 1.2 ad } 237 1.2 ad 238 1.2 ad /* 239 1.2 ad * Suspend the LWP. XXX If it's on a different CPU, we should wait 240 1.2 ad * for it to be preempted, where it will put itself to sleep. 241 1.2 ad * 242 1.2 ad * Suspension of the current LWP will happen on return to userspace. 243 1.2 ad */ 244 1.2 ad error = lwp_suspend(l, t); 245 1.23 rmind if (error) { 246 1.39 ad mutex_exit(p->p_lock); 247 1.23 rmind return error; 248 1.23 rmind } 249 1.23 rmind 250 1.23 rmind /* 251 1.23 rmind * Wait for: 252 1.23 rmind * o process exiting 253 1.23 rmind * o target LWP suspended 254 1.23 rmind * o target LWP not suspended and L_WSUSPEND clear 255 1.23 rmind * o target LWP exited 256 1.23 rmind */ 257 1.23 rmind for (;;) { 258 1.39 ad error = cv_wait_sig(&p->p_lwpcv, p->p_lock); 259 1.23 rmind if (error) { 260 1.23 rmind error = ERESTART; 261 1.23 rmind break; 262 1.23 rmind } 263 1.25 rmind if (lwp_find(p, SCARG(uap, target)) == NULL) { 264 1.25 rmind error = ESRCH; 265 1.25 rmind break; 266 1.25 rmind } 267 1.23 rmind if ((l->l_flag | t->l_flag) & (LW_WCORE | LW_WEXIT)) { 268 1.23 rmind error = ERESTART; 269 1.23 rmind break; 270 1.23 rmind } 271 1.23 rmind if (t->l_stat == LSSUSPENDED || 272 1.23 rmind (t->l_flag & LW_WSUSPEND) == 0) 273 1.23 rmind break; 274 1.23 rmind } 275 1.39 ad mutex_exit(p->p_lock); 276 1.2 ad 277 1.2 ad return error; 278 1.2 ad } 279 1.2 ad 280 1.2 ad int 281 1.47 rmind sys__lwp_continue(struct lwp *l, const struct sys__lwp_continue_args *uap, 282 1.47 rmind register_t *retval) 283 1.2 ad { 284 1.32 dsl /* { 285 1.2 ad syscallarg(lwpid_t) target; 286 1.32 dsl } */ 287 1.2 ad int error; 288 1.2 ad struct proc *p = l->l_proc; 289 1.2 ad struct lwp *t; 290 1.2 ad 291 1.2 ad error = 0; 292 1.2 ad 293 1.39 ad mutex_enter(p->p_lock); 294 1.2 ad if ((t = lwp_find(p, SCARG(uap, target))) == NULL) { 295 1.39 ad mutex_exit(p->p_lock); 296 1.2 ad return ESRCH; 297 1.2 ad } 298 1.2 ad 299 1.2 ad lwp_lock(t); 300 1.2 ad lwp_continue(t); 301 1.39 ad mutex_exit(p->p_lock); 302 1.2 ad 303 1.2 ad return error; 304 1.2 ad } 305 1.2 ad 306 1.2 ad int 307 1.47 rmind sys__lwp_wakeup(struct lwp *l, const struct sys__lwp_wakeup_args *uap, 308 1.47 rmind register_t *retval) 309 1.2 ad { 310 1.32 dsl /* { 311 1.2 ad syscallarg(lwpid_t) target; 312 1.32 dsl } */ 313 1.2 ad struct lwp *t; 314 1.2 ad struct proc *p; 315 1.2 ad int error; 316 1.2 ad 317 1.2 ad p = l->l_proc; 318 1.39 ad mutex_enter(p->p_lock); 319 1.2 ad 320 1.2 ad if ((t = lwp_find(p, SCARG(uap, target))) == NULL) { 321 1.39 ad mutex_exit(p->p_lock); 322 1.2 ad return ESRCH; 323 1.2 ad } 324 1.2 ad 325 1.2 ad lwp_lock(t); 326 1.15 ad t->l_flag |= (LW_CANCELLED | LW_UNPARKED); 327 1.2 ad 328 1.2 ad if (t->l_stat != LSSLEEP) { 329 1.16 ad lwp_unlock(t); 330 1.2 ad error = ENODEV; 331 1.16 ad } else if ((t->l_flag & LW_SINTR) == 0) { 332 1.16 ad lwp_unlock(t); 333 1.2 ad error = EBUSY; 334 1.16 ad } else { 335 1.16 ad /* Wake it up. lwp_unsleep() will release the LWP lock. */ 336 1.46 rmind lwp_unsleep(t, true); 337 1.16 ad error = 0; 338 1.2 ad } 339 1.2 ad 340 1.39 ad mutex_exit(p->p_lock); 341 1.2 ad 342 1.2 ad return error; 343 1.2 ad } 344 1.2 ad 345 1.2 ad int 346 1.47 rmind sys__lwp_wait(struct lwp *l, const struct sys__lwp_wait_args *uap, 347 1.47 rmind register_t *retval) 348 1.2 ad { 349 1.32 dsl /* { 350 1.2 ad syscallarg(lwpid_t) wait_for; 351 1.2 ad syscallarg(lwpid_t *) departed; 352 1.32 dsl } */ 353 1.2 ad struct proc *p = l->l_proc; 354 1.2 ad int error; 355 1.2 ad lwpid_t dep; 356 1.2 ad 357 1.39 ad mutex_enter(p->p_lock); 358 1.55 rmind error = lwp_wait(l, SCARG(uap, wait_for), &dep, false); 359 1.39 ad mutex_exit(p->p_lock); 360 1.2 ad 361 1.55 rmind if (!error && SCARG(uap, departed)) { 362 1.2 ad error = copyout(&dep, SCARG(uap, departed), sizeof(dep)); 363 1.2 ad } 364 1.2 ad 365 1.55 rmind return error; 366 1.2 ad } 367 1.2 ad 368 1.2 ad int 369 1.47 rmind sys__lwp_kill(struct lwp *l, const struct sys__lwp_kill_args *uap, 370 1.47 rmind register_t *retval) 371 1.2 ad { 372 1.32 dsl /* { 373 1.2 ad syscallarg(lwpid_t) target; 374 1.2 ad syscallarg(int) signo; 375 1.32 dsl } */ 376 1.2 ad struct proc *p = l->l_proc; 377 1.2 ad struct lwp *t; 378 1.2 ad ksiginfo_t ksi; 379 1.2 ad int signo = SCARG(uap, signo); 380 1.2 ad int error = 0; 381 1.2 ad 382 1.2 ad if ((u_int)signo >= NSIG) 383 1.2 ad return EINVAL; 384 1.2 ad 385 1.2 ad KSI_INIT(&ksi); 386 1.2 ad ksi.ksi_signo = signo; 387 1.43 ad ksi.ksi_code = SI_LWP; 388 1.2 ad ksi.ksi_pid = p->p_pid; 389 1.2 ad ksi.ksi_uid = kauth_cred_geteuid(l->l_cred); 390 1.2 ad ksi.ksi_lid = SCARG(uap, target); 391 1.2 ad 392 1.82 ad mutex_enter(&proc_lock); 393 1.39 ad mutex_enter(p->p_lock); 394 1.2 ad if ((t = lwp_find(p, ksi.ksi_lid)) == NULL) 395 1.2 ad error = ESRCH; 396 1.2 ad else if (signo != 0) 397 1.2 ad kpsignal2(p, &ksi); 398 1.39 ad mutex_exit(p->p_lock); 399 1.82 ad mutex_exit(&proc_lock); 400 1.2 ad 401 1.2 ad return error; 402 1.2 ad } 403 1.2 ad 404 1.2 ad int 405 1.47 rmind sys__lwp_detach(struct lwp *l, const struct sys__lwp_detach_args *uap, 406 1.47 rmind register_t *retval) 407 1.2 ad { 408 1.32 dsl /* { 409 1.2 ad syscallarg(lwpid_t) target; 410 1.32 dsl } */ 411 1.2 ad struct proc *p; 412 1.2 ad struct lwp *t; 413 1.2 ad lwpid_t target; 414 1.2 ad int error; 415 1.2 ad 416 1.2 ad target = SCARG(uap, target); 417 1.2 ad p = l->l_proc; 418 1.2 ad 419 1.39 ad mutex_enter(p->p_lock); 420 1.2 ad 421 1.2 ad if (l->l_lid == target) 422 1.2 ad t = l; 423 1.2 ad else { 424 1.2 ad /* 425 1.2 ad * We can't use lwp_find() here because the target might 426 1.2 ad * be a zombie. 427 1.2 ad */ 428 1.79 thorpej t = proc_find_lwp(p, target); 429 1.74 ad KASSERT(t == NULL || t->l_lid == target); 430 1.2 ad } 431 1.2 ad 432 1.2 ad /* 433 1.2 ad * If the LWP is already detached, there's nothing to do. 434 1.2 ad * If it's a zombie, we need to clean up after it. LSZOMB 435 1.2 ad * is visible with the proc mutex held. 436 1.2 ad * 437 1.2 ad * After we have detached or released the LWP, kick any 438 1.2 ad * other LWPs that may be sitting in _lwp_wait(), waiting 439 1.2 ad * for the target LWP to exit. 440 1.2 ad */ 441 1.2 ad if (t != NULL && t->l_stat != LSIDL) { 442 1.2 ad if ((t->l_prflag & LPR_DETACHED) == 0) { 443 1.2 ad p->p_ndlwps++; 444 1.2 ad t->l_prflag |= LPR_DETACHED; 445 1.2 ad if (t->l_stat == LSZOMB) { 446 1.17 ad /* Releases proc mutex. */ 447 1.17 ad lwp_free(t, false, false); 448 1.2 ad return 0; 449 1.2 ad } 450 1.2 ad error = 0; 451 1.17 ad 452 1.17 ad /* 453 1.17 ad * Have any LWPs sleeping in lwp_wait() recheck 454 1.17 ad * for deadlock. 455 1.17 ad */ 456 1.17 ad cv_broadcast(&p->p_lwpcv); 457 1.2 ad } else 458 1.2 ad error = EINVAL; 459 1.2 ad } else 460 1.2 ad error = ESRCH; 461 1.2 ad 462 1.39 ad mutex_exit(p->p_lock); 463 1.2 ad 464 1.2 ad return error; 465 1.2 ad } 466 1.2 ad 467 1.2 ad int 468 1.74 ad lwp_unpark(const lwpid_t *tp, const u_int ntargets) 469 1.2 ad { 470 1.74 ad u_int target; 471 1.87 ad kmutex_t *mp; 472 1.81 ad int error, s; 473 1.24 ad proc_t *p; 474 1.24 ad lwp_t *t; 475 1.24 ad 476 1.24 ad p = curproc; 477 1.74 ad error = 0; 478 1.24 ad 479 1.81 ad s = pserialize_read_enter(); 480 1.74 ad for (target = 0; target < ntargets; target++) { 481 1.81 ad t = proc_find_lwp_unlocked(p, tp[target]); 482 1.79 thorpej if (__predict_false(t == NULL)) { 483 1.74 ad error = ESRCH; 484 1.74 ad continue; 485 1.74 ad } 486 1.74 ad 487 1.81 ad KASSERT(lwp_locked(t, NULL)); 488 1.81 ad 489 1.79 thorpej if (__predict_true(t->l_syncobj == &lwp_park_syncobj)) { 490 1.87 ad /* As expected it's parked, so wake it up. */ 491 1.87 ad mp = t->l_mutex; 492 1.87 ad sleepq_remove(NULL, t, true); 493 1.87 ad mutex_spin_exit(mp); 494 1.80 ad } else if (__predict_false(t->l_stat == LSZOMB)) { 495 1.80 ad lwp_unlock(t); 496 1.80 ad error = ESRCH; 497 1.74 ad } else { 498 1.74 ad /* 499 1.75 ad * It hasn't parked yet because the wakeup side won 500 1.75 ad * the race, or something else has happened to make 501 1.75 ad * the thread not park. Why doesn't really matter. 502 1.75 ad * Set the operation pending, so that the next call 503 1.75 ad * to _lwp_park() in the LWP returns early. If it 504 1.75 ad * turns out to be a spurious wakeup, no harm done. 505 1.74 ad */ 506 1.74 ad t->l_flag |= LW_UNPARKED; 507 1.74 ad lwp_unlock(t); 508 1.74 ad } 509 1.24 ad } 510 1.81 ad pserialize_read_exit(s); 511 1.20 dsl 512 1.74 ad return error; 513 1.20 dsl } 514 1.20 dsl 515 1.20 dsl int 516 1.74 ad lwp_park(clockid_t clock_id, int flags, struct timespec *ts) 517 1.20 dsl { 518 1.2 ad int timo, error; 519 1.62 christos struct timespec start; 520 1.24 ad lwp_t *l; 521 1.62 christos bool timeremain = !(flags & TIMER_ABSTIME) && ts; 522 1.2 ad 523 1.20 dsl if (ts != NULL) { 524 1.62 christos if ((error = ts2timo(clock_id, flags, ts, &timo, 525 1.62 christos timeremain ? &start : NULL)) != 0) 526 1.2 ad return error; 527 1.24 ad KASSERT(timo != 0); 528 1.48 rmind } else { 529 1.2 ad timo = 0; 530 1.48 rmind } 531 1.2 ad 532 1.2 ad /* 533 1.2 ad * Before going the full route and blocking, check to see if an 534 1.2 ad * unpark op is pending. 535 1.2 ad */ 536 1.74 ad l = curlwp; 537 1.19 yamt lwp_lock(l); 538 1.8 ad if ((l->l_flag & (LW_CANCELLED | LW_UNPARKED)) != 0) { 539 1.8 ad l->l_flag &= ~(LW_CANCELLED | LW_UNPARKED); 540 1.19 yamt lwp_unlock(l); 541 1.2 ad return EALREADY; 542 1.2 ad } 543 1.77 ad sleepq_enqueue(NULL, l, "parked", &lwp_park_syncobj, true); 544 1.86 ad error = sleepq_block(timo, true, &lwp_park_syncobj, 0); 545 1.13 yamt switch (error) { 546 1.14 yamt case EWOULDBLOCK: 547 1.14 yamt error = ETIMEDOUT; 548 1.62 christos if (timeremain) 549 1.62 christos memset(ts, 0, sizeof(*ts)); 550 1.14 yamt break; 551 1.14 yamt case ERESTART: 552 1.14 yamt error = EINTR; 553 1.62 christos /*FALLTHROUGH*/ 554 1.14 yamt default: 555 1.62 christos if (timeremain) 556 1.62 christos clock_timeleft(clock_id, ts, &start); 557 1.14 yamt break; 558 1.13 yamt } 559 1.13 yamt return error; 560 1.2 ad } 561 1.2 ad 562 1.24 ad /* 563 1.24 ad * 'park' an LWP waiting on a user-level synchronisation object. The LWP 564 1.24 ad * will remain parked until another LWP in the same process calls in and 565 1.24 ad * requests that it be unparked. 566 1.24 ad */ 567 1.2 ad int 568 1.56 christos sys____lwp_park60(struct lwp *l, const struct sys____lwp_park60_args *uap, 569 1.44 christos register_t *retval) 570 1.2 ad { 571 1.32 dsl /* { 572 1.56 christos syscallarg(clockid_t) clock_id; 573 1.56 christos syscallarg(int) flags; 574 1.62 christos syscallarg(struct timespec *) ts; 575 1.24 ad syscallarg(lwpid_t) unpark; 576 1.24 ad syscallarg(const void *) hint; 577 1.24 ad syscallarg(const void *) unparkhint; 578 1.32 dsl } */ 579 1.24 ad struct timespec ts, *tsp; 580 1.24 ad int error; 581 1.2 ad 582 1.24 ad if (SCARG(uap, ts) == NULL) 583 1.24 ad tsp = NULL; 584 1.24 ad else { 585 1.24 ad error = copyin(SCARG(uap, ts), &ts, sizeof(ts)); 586 1.24 ad if (error != 0) 587 1.24 ad return error; 588 1.24 ad tsp = &ts; 589 1.24 ad } 590 1.2 ad 591 1.24 ad if (SCARG(uap, unpark) != 0) { 592 1.74 ad error = lwp_unpark(&SCARG(uap, unpark), 1); 593 1.24 ad if (error != 0) 594 1.24 ad return error; 595 1.15 ad } 596 1.15 ad 597 1.74 ad error = lwp_park(SCARG(uap, clock_id), SCARG(uap, flags), tsp); 598 1.62 christos if (SCARG(uap, ts) != NULL && (SCARG(uap, flags) & TIMER_ABSTIME) == 0) 599 1.62 christos (void)copyout(tsp, SCARG(uap, ts), sizeof(*tsp)); 600 1.62 christos return error; 601 1.24 ad } 602 1.2 ad 603 1.24 ad int 604 1.47 rmind sys__lwp_unpark(struct lwp *l, const struct sys__lwp_unpark_args *uap, 605 1.47 rmind register_t *retval) 606 1.24 ad { 607 1.32 dsl /* { 608 1.24 ad syscallarg(lwpid_t) target; 609 1.24 ad syscallarg(const void *) hint; 610 1.32 dsl } */ 611 1.2 ad 612 1.74 ad return lwp_unpark(&SCARG(uap, target), 1); 613 1.2 ad } 614 1.2 ad 615 1.2 ad int 616 1.47 rmind sys__lwp_unpark_all(struct lwp *l, const struct sys__lwp_unpark_all_args *uap, 617 1.47 rmind register_t *retval) 618 1.2 ad { 619 1.32 dsl /* { 620 1.2 ad syscallarg(const lwpid_t *) targets; 621 1.2 ad syscallarg(size_t) ntargets; 622 1.2 ad syscallarg(const void *) hint; 623 1.32 dsl } */ 624 1.74 ad lwpid_t targets[32], *tp; 625 1.46 rmind int error; 626 1.15 ad u_int ntargets; 627 1.2 ad size_t sz; 628 1.2 ad 629 1.2 ad ntargets = SCARG(uap, ntargets); 630 1.2 ad if (SCARG(uap, targets) == NULL) { 631 1.2 ad /* 632 1.2 ad * Let the caller know how much we are willing to do, and 633 1.2 ad * let it unpark the LWPs in blocks. 634 1.2 ad */ 635 1.2 ad *retval = LWP_UNPARK_MAX; 636 1.2 ad return 0; 637 1.2 ad } 638 1.2 ad if (ntargets > LWP_UNPARK_MAX || ntargets == 0) 639 1.2 ad return EINVAL; 640 1.2 ad 641 1.2 ad /* 642 1.2 ad * Copy in the target array. If it's a small number of LWPs, then 643 1.2 ad * place the numbers on the stack. 644 1.2 ad */ 645 1.74 ad sz = sizeof(lwpid_t) * ntargets; 646 1.2 ad if (sz <= sizeof(targets)) 647 1.2 ad tp = targets; 648 1.61 chs else 649 1.2 ad tp = kmem_alloc(sz, KM_SLEEP); 650 1.2 ad error = copyin(SCARG(uap, targets), tp, sz); 651 1.2 ad if (error != 0) { 652 1.2 ad if (tp != targets) { 653 1.2 ad kmem_free(tp, sz); 654 1.2 ad } 655 1.2 ad return error; 656 1.2 ad } 657 1.74 ad error = lwp_unpark(tp, ntargets); 658 1.33 ad if (tp != targets) 659 1.2 ad kmem_free(tp, sz); 660 1.74 ad return error; 661 1.2 ad } 662 1.28 ad 663 1.28 ad int 664 1.47 rmind sys__lwp_setname(struct lwp *l, const struct sys__lwp_setname_args *uap, 665 1.47 rmind register_t *retval) 666 1.28 ad { 667 1.32 dsl /* { 668 1.28 ad syscallarg(lwpid_t) target; 669 1.28 ad syscallarg(const char *) name; 670 1.32 dsl } */ 671 1.28 ad char *name, *oname; 672 1.30 ad lwpid_t target; 673 1.28 ad proc_t *p; 674 1.28 ad lwp_t *t; 675 1.28 ad int error; 676 1.28 ad 677 1.30 ad if ((target = SCARG(uap, target)) == 0) 678 1.30 ad target = l->l_lid; 679 1.30 ad 680 1.28 ad name = kmem_alloc(MAXCOMLEN, KM_SLEEP); 681 1.28 ad error = copyinstr(SCARG(uap, name), name, MAXCOMLEN, NULL); 682 1.28 ad switch (error) { 683 1.28 ad case ENAMETOOLONG: 684 1.28 ad case 0: 685 1.28 ad name[MAXCOMLEN - 1] = '\0'; 686 1.28 ad break; 687 1.28 ad default: 688 1.28 ad kmem_free(name, MAXCOMLEN); 689 1.28 ad return error; 690 1.28 ad } 691 1.28 ad 692 1.28 ad p = curproc; 693 1.39 ad mutex_enter(p->p_lock); 694 1.30 ad if ((t = lwp_find(p, target)) == NULL) { 695 1.39 ad mutex_exit(p->p_lock); 696 1.28 ad kmem_free(name, MAXCOMLEN); 697 1.28 ad return ESRCH; 698 1.28 ad } 699 1.28 ad lwp_lock(t); 700 1.28 ad oname = t->l_name; 701 1.28 ad t->l_name = name; 702 1.28 ad lwp_unlock(t); 703 1.39 ad mutex_exit(p->p_lock); 704 1.28 ad 705 1.28 ad if (oname != NULL) 706 1.28 ad kmem_free(oname, MAXCOMLEN); 707 1.28 ad 708 1.28 ad return 0; 709 1.28 ad } 710 1.28 ad 711 1.28 ad int 712 1.47 rmind sys__lwp_getname(struct lwp *l, const struct sys__lwp_getname_args *uap, 713 1.47 rmind register_t *retval) 714 1.28 ad { 715 1.32 dsl /* { 716 1.28 ad syscallarg(lwpid_t) target; 717 1.28 ad syscallarg(char *) name; 718 1.28 ad syscallarg(size_t) len; 719 1.32 dsl } */ 720 1.28 ad char name[MAXCOMLEN]; 721 1.30 ad lwpid_t target; 722 1.68 maxv size_t len; 723 1.28 ad proc_t *p; 724 1.28 ad lwp_t *t; 725 1.28 ad 726 1.30 ad if ((target = SCARG(uap, target)) == 0) 727 1.30 ad target = l->l_lid; 728 1.30 ad 729 1.28 ad p = curproc; 730 1.39 ad mutex_enter(p->p_lock); 731 1.30 ad if ((t = lwp_find(p, target)) == NULL) { 732 1.39 ad mutex_exit(p->p_lock); 733 1.28 ad return ESRCH; 734 1.28 ad } 735 1.28 ad lwp_lock(t); 736 1.28 ad if (t->l_name == NULL) 737 1.28 ad name[0] = '\0'; 738 1.28 ad else 739 1.58 maya strlcpy(name, t->l_name, sizeof(name)); 740 1.28 ad lwp_unlock(t); 741 1.39 ad mutex_exit(p->p_lock); 742 1.28 ad 743 1.68 maxv len = uimin(SCARG(uap, len), sizeof(name)); 744 1.68 maxv 745 1.68 maxv return copyoutstr(name, SCARG(uap, name), len, NULL); 746 1.28 ad } 747 1.30 ad 748 1.30 ad int 749 1.47 rmind sys__lwp_ctl(struct lwp *l, const struct sys__lwp_ctl_args *uap, 750 1.47 rmind register_t *retval) 751 1.30 ad { 752 1.32 dsl /* { 753 1.30 ad syscallarg(int) features; 754 1.30 ad syscallarg(struct lwpctl **) address; 755 1.32 dsl } */ 756 1.30 ad int error, features; 757 1.30 ad vaddr_t vaddr; 758 1.30 ad 759 1.30 ad features = SCARG(uap, features); 760 1.35 ad features &= ~(LWPCTL_FEATURE_CURCPU | LWPCTL_FEATURE_PCTR); 761 1.35 ad if (features != 0) 762 1.30 ad return ENODEV; 763 1.30 ad if ((error = lwp_ctl_alloc(&vaddr)) != 0) 764 1.30 ad return error; 765 1.30 ad return copyout(&vaddr, SCARG(uap, address), sizeof(void *)); 766 1.30 ad } 767