uipc_sem.c revision 1.21 1 /* $NetBSD: uipc_sem.c,v 1.21 2007/06/15 18:27:13 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2003, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of Wasabi Systems, Inc, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Copyright (c) 2002 Alfred Perlstein <alfred (at) FreeBSD.org>
41 * All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 */
64
65 #include <sys/cdefs.h>
66 __KERNEL_RCSID(0, "$NetBSD: uipc_sem.c,v 1.21 2007/06/15 18:27:13 ad Exp $");
67
68 #include "opt_posix.h"
69
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/kernel.h>
73 #include <sys/proc.h>
74 #include <sys/lock.h>
75 #include <sys/ksem.h>
76 #include <sys/syscall.h>
77 #include <sys/stat.h>
78 #include <sys/kmem.h>
79 #include <sys/fcntl.h>
80 #include <sys/kauth.h>
81
82 #include <sys/mount.h>
83
84 #include <sys/syscallargs.h>
85
86 #ifndef SEM_MAX
87 #define SEM_MAX 30
88 #endif
89
90 #define SEM_MAX_NAMELEN 14
91 #define SEM_VALUE_MAX (~0U)
92 #define SEM_HASHTBL_SIZE 13
93
94 #define SEM_TO_ID(x) (((x)->ks_id))
95 #define SEM_HASH(id) ((id) % SEM_HASHTBL_SIZE)
96
97 MALLOC_DEFINE(M_SEM, "p1003_1b_sem", "p1003_1b semaphores");
98
99 /*
100 * Note: to read the ks_name member, you need either the ks_interlock
101 * or the ksem_slock. To write the ks_name member, you need both. Make
102 * sure the order is ksem_slock -> ks_interlock.
103 */
104 struct ksem {
105 LIST_ENTRY(ksem) ks_entry; /* global list entry */
106 LIST_ENTRY(ksem) ks_hash; /* hash list entry */
107 kmutex_t ks_interlock; /* lock on this ksem */
108 kcondvar_t ks_cv; /* condition variable */
109 unsigned int ks_ref; /* number of references */
110 char *ks_name; /* if named, this is the name */
111 size_t ks_namelen; /* length of name */
112 mode_t ks_mode; /* protection bits */
113 uid_t ks_uid; /* creator uid */
114 gid_t ks_gid; /* creator gid */
115 unsigned int ks_value; /* current value */
116 unsigned int ks_waiters; /* number of waiters */
117 semid_t ks_id; /* unique identifier */
118 };
119
120 struct ksem_ref {
121 LIST_ENTRY(ksem_ref) ksr_list;
122 struct ksem *ksr_ksem;
123 };
124
125 struct ksem_proc {
126 krwlock_t kp_lock;
127 LIST_HEAD(, ksem_ref) kp_ksems;
128 };
129
130 LIST_HEAD(ksem_list, ksem);
131
132 /*
133 * ksem_slock protects ksem_head and nsems. Only named semaphores go
134 * onto ksem_head.
135 */
136 static kmutex_t ksem_mutex;
137 static struct ksem_list ksem_head = LIST_HEAD_INITIALIZER(&ksem_head);
138 static struct ksem_list ksem_hash[SEM_HASHTBL_SIZE];
139 static int nsems = 0;
140
141 /*
142 * ksem_counter is the last assigned semid_t. It needs to be COMPAT_NETBSD32
143 * friendly, even though semid_t itself is defined as uintptr_t.
144 */
145 static uint32_t ksem_counter = 1;
146
147 static specificdata_key_t ksem_specificdata_key;
148
149 static void
150 ksem_free(struct ksem *ks)
151 {
152
153 KASSERT(mutex_owned(&ks->ks_interlock));
154
155 /*
156 * If the ksem is anonymous (or has been unlinked), then
157 * this is the end if its life.
158 */
159 if (ks->ks_name == NULL) {
160 mutex_exit(&ks->ks_interlock);
161 mutex_destroy(&ks->ks_interlock);
162 cv_destroy(&ks->ks_cv);
163
164 mutex_enter(&ksem_mutex);
165 nsems--;
166 LIST_REMOVE(ks, ks_hash);
167 mutex_exit(&ksem_mutex);
168
169 kmem_free(ks, sizeof(*ks));
170 return;
171 }
172 mutex_exit(&ks->ks_interlock);
173 }
174
175 static inline void
176 ksem_addref(struct ksem *ks)
177 {
178
179 KASSERT(mutex_owned(&ks->ks_interlock));
180 ks->ks_ref++;
181 KASSERT(ks->ks_ref != 0);
182 }
183
184 static inline void
185 ksem_delref(struct ksem *ks)
186 {
187
188 KASSERT(mutex_owned(&ks->ks_interlock));
189 KASSERT(ks->ks_ref != 0);
190 if (--ks->ks_ref == 0) {
191 ksem_free(ks);
192 return;
193 }
194 mutex_exit(&ks->ks_interlock);
195 }
196
197 static struct ksem_proc *
198 ksem_proc_alloc(void)
199 {
200 struct ksem_proc *kp;
201
202 kp = kmem_alloc(sizeof(*kp), KM_SLEEP);
203 rw_init(&kp->kp_lock);
204 LIST_INIT(&kp->kp_ksems);
205
206 return (kp);
207 }
208
209 static void
210 ksem_proc_dtor(void *arg)
211 {
212 struct ksem_proc *kp = arg;
213 struct ksem_ref *ksr;
214
215 rw_enter(&kp->kp_lock, RW_WRITER);
216
217 while ((ksr = LIST_FIRST(&kp->kp_ksems)) != NULL) {
218 LIST_REMOVE(ksr, ksr_list);
219 mutex_enter(&ksr->ksr_ksem->ks_interlock);
220 ksem_delref(ksr->ksr_ksem);
221 kmem_free(ksr, sizeof(*ksr));
222 }
223
224 rw_exit(&kp->kp_lock);
225 rw_destroy(&kp->kp_lock);
226 kmem_free(kp, sizeof(*kp));
227 }
228
229 static void
230 ksem_add_proc(struct proc *p, struct ksem *ks)
231 {
232 struct ksem_proc *kp;
233 struct ksem_ref *ksr;
234
235 kp = proc_getspecific(p, ksem_specificdata_key);
236 if (kp == NULL) {
237 kp = ksem_proc_alloc();
238 proc_setspecific(p, ksem_specificdata_key, kp);
239 }
240
241 ksr = kmem_alloc(sizeof(*ksr), KM_SLEEP);
242 ksr->ksr_ksem = ks;
243
244 rw_enter(&kp->kp_lock, RW_WRITER);
245 LIST_INSERT_HEAD(&kp->kp_ksems, ksr, ksr_list);
246 rw_exit(&kp->kp_lock);
247 }
248
249 /* We MUST have a write lock on the ksem_proc list! */
250 static struct ksem_ref *
251 ksem_drop_proc(struct ksem_proc *kp, struct ksem *ks)
252 {
253 struct ksem_ref *ksr;
254
255 KASSERT(mutex_owned(&ks->ks_interlock));
256 LIST_FOREACH(ksr, &kp->kp_ksems, ksr_list) {
257 if (ksr->ksr_ksem == ks) {
258 ksem_delref(ks);
259 LIST_REMOVE(ksr, ksr_list);
260 return (ksr);
261 }
262 }
263 #ifdef DIAGNOSTIC
264 panic("ksem_drop_proc: ksem_proc %p ksem %p", kp, ks);
265 #endif
266 return (NULL);
267 }
268
269 static int
270 ksem_perm(struct lwp *l, struct ksem *ks)
271 {
272 kauth_cred_t uc;
273
274 KASSERT(mutex_owned(&ks->ks_interlock));
275 uc = l->l_cred;
276 if ((kauth_cred_geteuid(uc) == ks->ks_uid && (ks->ks_mode & S_IWUSR) != 0) ||
277 (kauth_cred_getegid(uc) == ks->ks_gid && (ks->ks_mode & S_IWGRP) != 0) ||
278 (ks->ks_mode & S_IWOTH) != 0 ||
279 kauth_authorize_generic(uc, KAUTH_GENERIC_ISSUSER, NULL) == 0)
280 return (0);
281 return (EPERM);
282 }
283
284 static struct ksem *
285 ksem_lookup_byid(semid_t id)
286 {
287 struct ksem *ks;
288
289 KASSERT(mutex_owned(&ksem_mutex));
290 LIST_FOREACH(ks, &ksem_hash[SEM_HASH(id)], ks_hash) {
291 if (ks->ks_id == id)
292 return ks;
293 }
294 return NULL;
295 }
296
297 static struct ksem *
298 ksem_lookup_byname(const char *name)
299 {
300 struct ksem *ks;
301
302 KASSERT(mutex_owned(&ksem_mutex));
303 LIST_FOREACH(ks, &ksem_head, ks_entry) {
304 if (strcmp(ks->ks_name, name) == 0) {
305 mutex_enter(&ks->ks_interlock);
306 return (ks);
307 }
308 }
309 return (NULL);
310 }
311
312 static int
313 ksem_create(struct lwp *l, const char *name, struct ksem **ksret,
314 mode_t mode, unsigned int value)
315 {
316 struct ksem *ret;
317 kauth_cred_t uc;
318 size_t len;
319
320 uc = l->l_cred;
321 if (value > SEM_VALUE_MAX)
322 return (EINVAL);
323 ret = kmem_zalloc(sizeof(*ret), KM_SLEEP);
324 if (name != NULL) {
325 len = strlen(name);
326 if (len > SEM_MAX_NAMELEN) {
327 kmem_free(ret, sizeof(*ret));
328 return (ENAMETOOLONG);
329 }
330 /* name must start with a '/' but not contain one. */
331 if (*name != '/' || len < 2 || strchr(name + 1, '/') != NULL) {
332 kmem_free(ret, sizeof(*ret));
333 return (EINVAL);
334 }
335 ret->ks_namelen = len + 1;
336 ret->ks_name = kmem_alloc(ret->ks_namelen, KM_SLEEP);
337 strlcpy(ret->ks_name, name, len + 1);
338 } else
339 ret->ks_name = NULL;
340 ret->ks_mode = mode;
341 ret->ks_value = value;
342 ret->ks_ref = 1;
343 ret->ks_waiters = 0;
344 ret->ks_uid = kauth_cred_geteuid(uc);
345 ret->ks_gid = kauth_cred_getegid(uc);
346 mutex_init(&ret->ks_interlock, MUTEX_DEFAULT, IPL_NONE);
347 cv_init(&ret->ks_cv, "psem");
348
349 mutex_enter(&ksem_mutex);
350 if (nsems >= SEM_MAX) {
351 mutex_exit(&ksem_mutex);
352 if (ret->ks_name != NULL)
353 kmem_free(ret->ks_name, ret->ks_namelen);
354 kmem_free(ret, sizeof(*ret));
355 return (ENFILE);
356 }
357 nsems++;
358 while (ksem_lookup_byid(ksem_counter) != NULL) {
359 ksem_counter++;
360 /* 0 is a special value for libpthread */
361 if (ksem_counter == 0)
362 ksem_counter++;
363 }
364 ret->ks_id = ksem_counter;
365 LIST_INSERT_HEAD(&ksem_hash[SEM_HASH(ret->ks_id)], ret, ks_hash);
366 mutex_exit(&ksem_mutex);
367
368 *ksret = ret;
369 return (0);
370 }
371
372 int
373 sys__ksem_init(struct lwp *l, void *v, register_t *retval)
374 {
375 struct sys__ksem_init_args /* {
376 unsigned int value;
377 semid_t *idp;
378 } */ *uap = v;
379
380 return do_ksem_init(l, SCARG(uap, value), SCARG(uap, idp), copyout);
381 }
382
383 int
384 do_ksem_init(struct lwp *l, unsigned int value, semid_t *idp,
385 copyout_t docopyout)
386 {
387 struct ksem *ks;
388 semid_t id;
389 int error;
390
391 /* Note the mode does not matter for anonymous semaphores. */
392 error = ksem_create(l, NULL, &ks, 0, value);
393 if (error)
394 return (error);
395 id = SEM_TO_ID(ks);
396 error = (*docopyout)(&id, idp, sizeof(id));
397 if (error) {
398 mutex_enter(&ks->ks_interlock);
399 ksem_delref(ks);
400 return (error);
401 }
402
403 ksem_add_proc(l->l_proc, ks);
404
405 return (0);
406 }
407
408 int
409 sys__ksem_open(struct lwp *l, void *v, register_t *retval)
410 {
411 struct sys__ksem_open_args /* {
412 const char *name;
413 int oflag;
414 mode_t mode;
415 unsigned int value;
416 semid_t *idp;
417 } */ *uap = v;
418
419 return do_ksem_open(l, SCARG(uap, name), SCARG(uap, oflag),
420 SCARG(uap, mode), SCARG(uap, value), SCARG(uap, idp), copyout);
421 }
422
423 int
424 do_ksem_open(struct lwp *l, const char *semname, int oflag, mode_t mode,
425 unsigned int value, semid_t *idp, copyout_t docopyout)
426 {
427 char name[SEM_MAX_NAMELEN + 1];
428 size_t done;
429 int error;
430 struct ksem *ksnew, *ks;
431 semid_t id;
432
433 error = copyinstr(semname, name, sizeof(name), &done);
434 if (error)
435 return (error);
436
437 ksnew = NULL;
438 mutex_enter(&ksem_mutex);
439 ks = ksem_lookup_byname(name);
440
441 /* Found one? */
442 if (ks != NULL) {
443 /* Check for exclusive create. */
444 if (oflag & O_EXCL) {
445 mutex_exit(&ks->ks_interlock);
446 mutex_exit(&ksem_mutex);
447 return (EEXIST);
448 }
449 found_one:
450 /*
451 * Verify permissions. If we can access it, add
452 * this process's reference.
453 */
454 KASSERT(mutex_owned(&ks->ks_interlock));
455 error = ksem_perm(l, ks);
456 if (error == 0)
457 ksem_addref(ks);
458 mutex_exit(&ks->ks_interlock);
459 mutex_exit(&ksem_mutex);
460 if (error)
461 return (error);
462
463 id = SEM_TO_ID(ks);
464 error = (*docopyout)(&id, idp, sizeof(id));
465 if (error) {
466 mutex_enter(&ks->ks_interlock);
467 ksem_delref(ks);
468 return (error);
469 }
470
471 ksem_add_proc(l->l_proc, ks);
472
473 return (0);
474 }
475
476 /*
477 * didn't ask for creation? error.
478 */
479 if ((oflag & O_CREAT) == 0) {
480 mutex_exit(&ksem_mutex);
481 return (ENOENT);
482 }
483
484 /*
485 * We may block during creation, so drop the lock.
486 */
487 mutex_exit(&ksem_mutex);
488 error = ksem_create(l, name, &ksnew, mode, value);
489 if (error != 0)
490 return (error);
491
492 id = SEM_TO_ID(ksnew);
493 error = (*docopyout)(&id, idp, sizeof(id));
494 if (error) {
495 kmem_free(ksnew->ks_name, ksnew->ks_namelen);
496 ksnew->ks_name = NULL;
497
498 mutex_enter(&ksnew->ks_interlock);
499 ksem_delref(ksnew);
500 return (error);
501 }
502
503 /*
504 * We need to make sure we haven't lost a race while
505 * allocating during creation.
506 */
507 mutex_enter(&ksem_mutex);
508 if ((ks = ksem_lookup_byname(name)) != NULL) {
509 if (oflag & O_EXCL) {
510 mutex_exit(&ks->ks_interlock);
511 mutex_exit(&ksem_mutex);
512
513 kmem_free(ksnew->ks_name, ksnew->ks_namelen);
514 ksnew->ks_name = NULL;
515
516 mutex_enter(&ksnew->ks_interlock);
517 ksem_delref(ksnew);
518 return (EEXIST);
519 }
520 goto found_one;
521 } else {
522 /* ksnew already has its initial reference. */
523 LIST_INSERT_HEAD(&ksem_head, ksnew, ks_entry);
524 mutex_exit(&ksem_mutex);
525
526 ksem_add_proc(l->l_proc, ksnew);
527 }
528 return (error);
529 }
530
531 /* We must have a read lock on the ksem_proc list! */
532 static struct ksem *
533 ksem_lookup_proc(struct ksem_proc *kp, semid_t id)
534 {
535 struct ksem_ref *ksr;
536
537 LIST_FOREACH(ksr, &kp->kp_ksems, ksr_list) {
538 if (id == SEM_TO_ID(ksr->ksr_ksem)) {
539 mutex_enter(&ksr->ksr_ksem->ks_interlock);
540 return (ksr->ksr_ksem);
541 }
542 }
543
544 return (NULL);
545 }
546
547 int
548 sys__ksem_unlink(struct lwp *l, void *v, register_t *retval)
549 {
550 struct sys__ksem_unlink_args /* {
551 const char *name;
552 } */ *uap = v;
553 char name[SEM_MAX_NAMELEN + 1], *cp;
554 size_t done, len;
555 struct ksem *ks;
556 int error;
557
558 error = copyinstr(SCARG(uap, name), name, sizeof(name), &done);
559 if (error)
560 return error;
561
562 mutex_enter(&ksem_mutex);
563 ks = ksem_lookup_byname(name);
564 if (ks == NULL) {
565 mutex_exit(&ksem_mutex);
566 return (ENOENT);
567 }
568
569 KASSERT(mutex_owned(&ks->ks_interlock));
570
571 LIST_REMOVE(ks, ks_entry);
572 cp = ks->ks_name;
573 len = ks->ks_namelen;
574 ks->ks_name = NULL;
575
576 mutex_exit(&ksem_mutex);
577
578 if (ks->ks_ref == 0)
579 ksem_free(ks);
580 else
581 mutex_exit(&ks->ks_interlock);
582
583 kmem_free(cp, len);
584
585 return (0);
586 }
587
588 int
589 sys__ksem_close(struct lwp *l, void *v, register_t *retval)
590 {
591 struct sys__ksem_close_args /* {
592 semid_t id;
593 } */ *uap = v;
594 struct ksem_proc *kp;
595 struct ksem_ref *ksr;
596 struct ksem *ks;
597
598 kp = proc_getspecific(l->l_proc, ksem_specificdata_key);
599 if (kp == NULL)
600 return (EINVAL);
601
602 rw_enter(&kp->kp_lock, RW_WRITER);
603
604 ks = ksem_lookup_proc(kp, SCARG(uap, id));
605 if (ks == NULL) {
606 rw_exit(&kp->kp_lock);
607 return (EINVAL);
608 }
609
610 KASSERT(mutex_owned(&ks->ks_interlock));
611 if (ks->ks_name == NULL) {
612 mutex_exit(&ks->ks_interlock);
613 rw_exit(&kp->kp_lock);
614 return (EINVAL);
615 }
616
617 ksr = ksem_drop_proc(kp, ks);
618 rw_exit(&kp->kp_lock);
619 kmem_free(ksr, sizeof(*ksr));
620
621 return (0);
622 }
623
624 int
625 sys__ksem_post(struct lwp *l, void *v, register_t *retval)
626 {
627 struct sys__ksem_post_args /* {
628 semid_t id;
629 } */ *uap = v;
630 struct ksem_proc *kp;
631 struct ksem *ks;
632 int error;
633
634 kp = proc_getspecific(l->l_proc, ksem_specificdata_key);
635 if (kp == NULL)
636 return (EINVAL);
637
638 rw_enter(&kp->kp_lock, RW_READER);
639 ks = ksem_lookup_proc(kp, SCARG(uap, id));
640 rw_exit(&kp->kp_lock);
641 if (ks == NULL)
642 return (EINVAL);
643
644 KASSERT(mutex_owned(&ks->ks_interlock));
645 if (ks->ks_value == SEM_VALUE_MAX) {
646 error = EOVERFLOW;
647 goto out;
648 }
649 ++ks->ks_value;
650 if (ks->ks_waiters)
651 cv_broadcast(&ks->ks_cv);
652 error = 0;
653 out:
654 mutex_exit(&ks->ks_interlock);
655 return (error);
656 }
657
658 static int
659 ksem_wait(struct lwp *l, semid_t id, int tryflag)
660 {
661 struct ksem_proc *kp;
662 struct ksem *ks;
663 int error;
664
665 kp = proc_getspecific(l->l_proc, ksem_specificdata_key);
666 if (kp == NULL)
667 return (EINVAL);
668
669 rw_enter(&kp->kp_lock, RW_READER);
670 ks = ksem_lookup_proc(kp, id);
671 rw_exit(&kp->kp_lock);
672 if (ks == NULL)
673 return (EINVAL);
674
675 KASSERT(mutex_owned(&ks->ks_interlock));
676 ksem_addref(ks);
677 while (ks->ks_value == 0) {
678 ks->ks_waiters++;
679 if (tryflag)
680 error = EAGAIN;
681 else
682 error = cv_wait_sig(&ks->ks_cv, &ks->ks_interlock);
683 ks->ks_waiters--;
684 if (error)
685 goto out;
686 }
687 ks->ks_value--;
688 error = 0;
689 out:
690 ksem_delref(ks);
691 return (error);
692 }
693
694 int
695 sys__ksem_wait(struct lwp *l, void *v, register_t *retval)
696 {
697 struct sys__ksem_wait_args /* {
698 semid_t id;
699 } */ *uap = v;
700
701 return ksem_wait(l, SCARG(uap, id), 0);
702 }
703
704 int
705 sys__ksem_trywait(struct lwp *l, void *v, register_t *retval)
706 {
707 struct sys__ksem_trywait_args /* {
708 semid_t id;
709 } */ *uap = v;
710
711 return ksem_wait(l, SCARG(uap, id), 1);
712 }
713
714 int
715 sys__ksem_getvalue(struct lwp *l, void *v, register_t *retval)
716 {
717 struct sys__ksem_getvalue_args /* {
718 semid_t id;
719 unsigned int *value;
720 } */ *uap = v;
721 struct ksem_proc *kp;
722 struct ksem *ks;
723 unsigned int val;
724
725 kp = proc_getspecific(l->l_proc, ksem_specificdata_key);
726 if (kp == NULL)
727 return (EINVAL);
728
729 rw_enter(&kp->kp_lock, RW_READER);
730 ks = ksem_lookup_proc(kp, SCARG(uap, id));
731 rw_exit(&kp->kp_lock);
732 if (ks == NULL)
733 return (EINVAL);
734
735 KASSERT(mutex_owned(&ks->ks_interlock));
736 val = ks->ks_value;
737 mutex_exit(&ks->ks_interlock);
738
739 return (copyout(&val, SCARG(uap, value), sizeof(val)));
740 }
741
742 int
743 sys__ksem_destroy(struct lwp *l, void *v, register_t *retval)
744 {
745 struct sys__ksem_destroy_args /*{
746 semid_t id;
747 } */ *uap = v;
748 struct ksem_proc *kp;
749 struct ksem_ref *ksr;
750 struct ksem *ks;
751
752 kp = proc_getspecific(l->l_proc, ksem_specificdata_key);
753 if (kp == NULL)
754 return (EINVAL);
755
756 rw_enter(&kp->kp_lock, RW_WRITER);
757
758 ks = ksem_lookup_proc(kp, SCARG(uap, id));
759 if (ks == NULL) {
760 rw_exit(&kp->kp_lock);
761 return (EINVAL);
762 }
763
764 KASSERT(mutex_owned(&ks->ks_interlock));
765
766 /*
767 * XXX This misses named semaphores which have been unlink'd,
768 * XXX but since behavior of destroying a named semaphore is
769 * XXX undefined, this is technically allowed.
770 */
771 if (ks->ks_name != NULL) {
772 mutex_exit(&ks->ks_interlock);
773 rw_exit(&kp->kp_lock);
774 return (EINVAL);
775 }
776
777 if (ks->ks_waiters) {
778 mutex_exit(&ks->ks_interlock);
779 rw_exit(&kp->kp_lock);
780 return (EBUSY);
781 }
782
783 ksr = ksem_drop_proc(kp, ks);
784 rw_exit(&kp->kp_lock);
785 kmem_free(ksr, sizeof(*ksr));
786
787 return (0);
788 }
789
790 static void
791 ksem_forkhook(struct proc *p2, struct proc *p1)
792 {
793 struct ksem_proc *kp1, *kp2;
794 struct ksem_ref *ksr, *ksr1;
795
796 kp1 = proc_getspecific(p1, ksem_specificdata_key);
797 if (kp1 == NULL)
798 return;
799
800 kp2 = ksem_proc_alloc();
801
802 rw_enter(&kp1->kp_lock, RW_READER);
803
804 if (!LIST_EMPTY(&kp1->kp_ksems)) {
805 LIST_FOREACH(ksr, &kp1->kp_ksems, ksr_list) {
806 ksr1 = kmem_alloc(sizeof(*ksr), KM_SLEEP);
807 ksr1->ksr_ksem = ksr->ksr_ksem;
808 mutex_enter(&ksr->ksr_ksem->ks_interlock);
809 ksem_addref(ksr->ksr_ksem);
810 mutex_exit(&ksr->ksr_ksem->ks_interlock);
811 LIST_INSERT_HEAD(&kp2->kp_ksems, ksr1, ksr_list);
812 }
813 }
814
815 rw_exit(&kp1->kp_lock);
816 proc_setspecific(p2, ksem_specificdata_key, kp2);
817 }
818
819 static void
820 ksem_exechook(struct proc *p, void *arg)
821 {
822 struct ksem_proc *kp;
823
824 kp = proc_getspecific(p, ksem_specificdata_key);
825 if (kp != NULL) {
826 proc_setspecific(p, ksem_specificdata_key, NULL);
827 ksem_proc_dtor(kp);
828 }
829 }
830
831 void
832 ksem_init(void)
833 {
834 int i, error;
835
836 mutex_init(&ksem_mutex, MUTEX_DEFAULT, IPL_NONE);
837 exechook_establish(ksem_exechook, NULL);
838 forkhook_establish(ksem_forkhook);
839
840 for (i = 0; i < SEM_HASHTBL_SIZE; i++)
841 LIST_INIT(&ksem_hash[i]);
842
843 error = proc_specific_key_create(&ksem_specificdata_key,
844 ksem_proc_dtor);
845 KASSERT(error == 0);
846 }
847