uipc_sem.c revision 1.15.4.2 1 /* $NetBSD: uipc_sem.c,v 1.15.4.2 2006/11/18 21:39:23 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of Wasabi Systems, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Copyright (c) 2002 Alfred Perlstein <alfred (at) FreeBSD.org>
41 * All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 */
64
65 #include <sys/cdefs.h>
66 __KERNEL_RCSID(0, "$NetBSD: uipc_sem.c,v 1.15.4.2 2006/11/18 21:39:23 ad Exp $");
67
68 #include "opt_posix.h"
69
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/kernel.h>
73 #include <sys/proc.h>
74 #include <sys/lock.h>
75 #include <sys/ksem.h>
76 #include <sys/sa.h>
77 #include <sys/syscall.h>
78 #include <sys/stat.h>
79 #include <sys/malloc.h>
80 #include <sys/fcntl.h>
81 #include <sys/kauth.h>
82
83 #include <sys/mount.h>
84
85 #include <sys/syscallargs.h>
86
87 #ifndef SEM_MAX
88 #define SEM_MAX 30
89 #endif
90
91 #define SEM_MAX_NAMELEN 14
92 #define SEM_VALUE_MAX (~0U)
93 #define SEM_HASHTBL_SIZE 13
94
95 #define SEM_TO_ID(x) (((x)->ks_id))
96 #define SEM_HASH(id) ((id) % SEM_HASHTBL_SIZE)
97
98 MALLOC_DEFINE(M_SEM, "p1003_1b_sem", "p1003_1b semaphores");
99
100 /*
101 * Note: to read the ks_name member, you need either the ks_interlock
102 * or the ksem_slock. To write the ks_name member, you need both. Make
103 * sure the order is ksem_slock -> ks_interlock.
104 */
105 struct ksem {
106 LIST_ENTRY(ksem) ks_entry; /* global list entry */
107 LIST_ENTRY(ksem) ks_hash; /* hash list entry */
108 kmutex_t ks_interlock; /* lock on this ksem */
109 char *ks_name; /* if named, this is the name */
110 unsigned int ks_ref; /* number of references */
111 mode_t ks_mode; /* protection bits */
112 uid_t ks_uid; /* creator uid */
113 gid_t ks_gid; /* creator gid */
114 unsigned int ks_value; /* current value */
115 unsigned int ks_waiters; /* number of waiters */
116 semid_t ks_id; /* unique identifier */
117 };
118
119 struct ksem_ref {
120 LIST_ENTRY(ksem_ref) ksr_list;
121 struct ksem *ksr_ksem;
122 };
123
124 struct ksem_proc {
125 krwlock_t kp_lock;
126 LIST_HEAD(, ksem_ref) kp_ksems;
127 };
128
129 LIST_HEAD(ksem_list, ksem);
130
131 /*
132 * ksem_slock protects ksem_head and nsems. Only named semaphores go
133 * onto ksem_head.
134 */
135 static kmutex_t ksem_mutex;
136 static struct ksem_list ksem_head = LIST_HEAD_INITIALIZER(&ksem_head);
137 static struct ksem_list ksem_hash[SEM_HASHTBL_SIZE];
138 static int nsems = 0;
139
140 /*
141 * ksem_counter is the last assigned semid_t. It needs to be COMPAT_NETBSD32
142 * friendly, even though semid_t itself is defined as uintptr_t.
143 */
144 static uint32_t ksem_counter = 1;
145
146 static specificdata_key_t ksem_specificdata_key;
147
148 static void
149 ksem_free(struct ksem *ks)
150 {
151
152 LOCK_ASSERT(mutex_owned(&ks->ks_interlock));
153
154 /*
155 * If the ksem is anonymous (or has been unlinked), then
156 * this is the end if its life.
157 */
158 if (ks->ks_name == NULL) {
159 mutex_exit(&ks->ks_interlock);
160
161 mutex_enter(&ksem_mutex);
162 nsems--;
163 LIST_REMOVE(ks, ks_hash);
164 mutex_exit(&ksem_mutex);
165
166 free(ks, M_SEM);
167 return;
168 }
169 mutex_exit(&ks->ks_interlock);
170 }
171
172 static inline void
173 ksem_addref(struct ksem *ks)
174 {
175
176 LOCK_ASSERT(mutex_owned(&ks->ks_interlock));
177 ks->ks_ref++;
178 KASSERT(ks->ks_ref != 0); /* XXX KDASSERT */
179 }
180
181 static inline void
182 ksem_delref(struct ksem *ks)
183 {
184
185 LOCK_ASSERT(mutex_owned(&ks->ks_interlock));
186 KASSERT(ks->ks_ref != 0); /* XXX KDASSERT */
187 if (--ks->ks_ref == 0) {
188 ksem_free(ks);
189 return;
190 }
191 mutex_exit(&ks->ks_interlock);
192 }
193
194 static struct ksem_proc *
195 ksem_proc_alloc(void)
196 {
197 struct ksem_proc *kp;
198
199 kp = malloc(sizeof(*kp), M_SEM, M_WAITOK);
200 rw_init(&kp->kp_lock);
201 LIST_INIT(&kp->kp_ksems);
202
203 return (kp);
204 }
205
206 static void
207 ksem_proc_dtor(void *arg)
208 {
209 struct ksem_proc *kp = arg;
210 struct ksem_ref *ksr;
211
212 rw_enter(&kp->kp_lock, RW_WRITER);
213
214 while ((ksr = LIST_FIRST(&kp->kp_ksems)) != NULL) {
215 LIST_REMOVE(ksr, ksr_list);
216 mutex_enter(&ksr->ksr_ksem->ks_interlock);
217 ksem_delref(ksr->ksr_ksem);
218 mutex_exit(&ksr->ksr_ksem->ks_interlock);
219 free(ksr, M_SEM);
220 }
221
222 rw_exit(&kp->kp_lock);
223 free(kp, M_SEM);
224 }
225
226 static void
227 ksem_add_proc(struct proc *p, struct ksem *ks)
228 {
229 struct ksem_proc *kp;
230 struct ksem_ref *ksr;
231
232 kp = proc_getspecific(p, ksem_specificdata_key);
233 if (kp == NULL) {
234 kp = ksem_proc_alloc();
235 proc_setspecific(p, ksem_specificdata_key, kp);
236 }
237
238 ksr = malloc(sizeof(*ksr), M_SEM, M_WAITOK);
239 ksr->ksr_ksem = ks;
240
241 rw_enter(&kp->kp_lock, RW_WRITER);
242 LIST_INSERT_HEAD(&kp->kp_ksems, ksr, ksr_list);
243 rw_exit(&kp->kp_lock);
244 }
245
246 /* We MUST have a write lock on the ksem_proc list! */
247 static struct ksem_ref *
248 ksem_drop_proc(struct ksem_proc *kp, struct ksem *ks)
249 {
250 struct ksem_ref *ksr;
251
252 LOCK_ASSERT(mutex_owned(&ks->ks_interlock));
253 LIST_FOREACH(ksr, &kp->kp_ksems, ksr_list) {
254 if (ksr->ksr_ksem == ks) {
255 ksem_delref(ks);
256 LIST_REMOVE(ksr, ksr_list);
257 return (ksr);
258 }
259 }
260 #ifdef DIAGNOSTIC
261 panic("ksem_drop_proc: ksem_proc %p ksem %p", kp, ks);
262 #endif
263 return (NULL);
264 }
265
266 static int
267 ksem_perm(struct lwp *l, struct ksem *ks)
268 {
269 kauth_cred_t uc;
270
271 LOCK_ASSERT(mutex_owned(&ks->ks_interlock));
272 uc = l->l_cred;
273 if ((kauth_cred_geteuid(uc) == ks->ks_uid && (ks->ks_mode & S_IWUSR) != 0) ||
274 (kauth_cred_getegid(uc) == ks->ks_gid && (ks->ks_mode & S_IWGRP) != 0) ||
275 (ks->ks_mode & S_IWOTH) != 0 ||
276 kauth_authorize_generic(uc, KAUTH_GENERIC_ISSUSER, &l->l_acflag) == 0)
277 return (0);
278 return (EPERM);
279 }
280
281 static struct ksem *
282 ksem_lookup_byid(semid_t id)
283 {
284 struct ksem *ks;
285
286 LOCK_ASSERT(mutex_owned(&ksem_mutex));
287 LIST_FOREACH(ks, &ksem_hash[SEM_HASH(id)], ks_hash) {
288 if (ks->ks_id == id)
289 return ks;
290 }
291 return NULL;
292 }
293
294 static struct ksem *
295 ksem_lookup_byname(const char *name)
296 {
297 struct ksem *ks;
298
299 LOCK_ASSERT(mutex_owned(&ksem_mutex));
300 LIST_FOREACH(ks, &ksem_head, ks_entry) {
301 if (strcmp(ks->ks_name, name) == 0) {
302 mutex_enter(&ks->ks_interlock);
303 return (ks);
304 }
305 }
306 return (NULL);
307 }
308
309 static int
310 ksem_create(struct lwp *l, const char *name, struct ksem **ksret,
311 mode_t mode, unsigned int value)
312 {
313 struct ksem *ret;
314 kauth_cred_t uc;
315 size_t len;
316
317 uc = l->l_cred;
318 if (value > SEM_VALUE_MAX)
319 return (EINVAL);
320 ret = malloc(sizeof(*ret), M_SEM, M_WAITOK | M_ZERO);
321 if (name != NULL) {
322 len = strlen(name);
323 if (len > SEM_MAX_NAMELEN) {
324 free(ret, M_SEM);
325 return (ENAMETOOLONG);
326 }
327 /* name must start with a '/' but not contain one. */
328 if (*name != '/' || len < 2 || strchr(name + 1, '/') != NULL) {
329 free(ret, M_SEM);
330 return (EINVAL);
331 }
332 ret->ks_name = malloc(len + 1, M_SEM, M_WAITOK);
333 strlcpy(ret->ks_name, name, len + 1);
334 } else
335 ret->ks_name = NULL;
336 ret->ks_mode = mode;
337 ret->ks_value = value;
338 ret->ks_ref = 1;
339 ret->ks_waiters = 0;
340 ret->ks_uid = kauth_cred_geteuid(uc);
341 ret->ks_gid = kauth_cred_getegid(uc);
342 mutex_init(&ret->ks_interlock, MUTEX_DEFAULT, IPL_NONE);
343
344 mutex_enter(&ksem_mutex);
345 if (nsems >= SEM_MAX) {
346 mutex_exit(&ksem_mutex);
347 if (ret->ks_name != NULL)
348 free(ret->ks_name, M_SEM);
349 free(ret, M_SEM);
350 return (ENFILE);
351 }
352 nsems++;
353 while (ksem_lookup_byid(ksem_counter) != NULL) {
354 ksem_counter++;
355 /* 0 is a special value for libpthread */
356 if (ksem_counter == 0)
357 ksem_counter++;
358 }
359 ret->ks_id = ksem_counter;
360 LIST_INSERT_HEAD(&ksem_hash[SEM_HASH(ret->ks_id)], ret, ks_hash);
361 mutex_exit(&ksem_mutex);
362
363 *ksret = ret;
364 return (0);
365 }
366
367 int
368 sys__ksem_init(struct lwp *l, void *v, register_t *retval)
369 {
370 struct sys__ksem_init_args /* {
371 unsigned int value;
372 semid_t *idp;
373 } */ *uap = v;
374
375 return do_ksem_init(l, SCARG(uap, value), SCARG(uap, idp), copyout);
376 }
377
378 int
379 do_ksem_init(struct lwp *l, unsigned int value, semid_t *idp,
380 copyout_t docopyout)
381 {
382 struct ksem *ks;
383 semid_t id;
384 int error;
385
386 /* Note the mode does not matter for anonymous semaphores. */
387 error = ksem_create(l, NULL, &ks, 0, value);
388 if (error)
389 return (error);
390 id = SEM_TO_ID(ks);
391 error = (*docopyout)(&id, idp, sizeof(id));
392 if (error) {
393 mutex_enter(&ks->ks_interlock);
394 ksem_delref(ks);
395 return (error);
396 }
397
398 ksem_add_proc(l->l_proc, ks);
399
400 return (0);
401 }
402
403 int
404 sys__ksem_open(struct lwp *l, void *v, register_t *retval)
405 {
406 struct sys__ksem_open_args /* {
407 const char *name;
408 int oflag;
409 mode_t mode;
410 unsigned int value;
411 semid_t *idp;
412 } */ *uap = v;
413
414 return do_ksem_open(l, SCARG(uap, name), SCARG(uap, oflag),
415 SCARG(uap, mode), SCARG(uap, value), SCARG(uap, idp), copyout);
416 }
417
418 int
419 do_ksem_open(struct lwp *l, const char *semname, int oflag, mode_t mode,
420 unsigned int value, semid_t *idp, copyout_t docopyout)
421 {
422 char name[SEM_MAX_NAMELEN + 1];
423 size_t done;
424 int error;
425 struct ksem *ksnew, *ks;
426 semid_t id;
427
428 error = copyinstr(semname, name, sizeof(name), &done);
429 if (error)
430 return (error);
431
432 ksnew = NULL;
433 mutex_enter(&ksem_mutex);
434 ks = ksem_lookup_byname(name);
435
436 /* Found one? */
437 if (ks != NULL) {
438 /* Check for exclusive create. */
439 if (oflag & O_EXCL) {
440 mutex_exit(&ks->ks_interlock);
441 mutex_exit(&ksem_mutex);
442 return (EEXIST);
443 }
444 found_one:
445 /*
446 * Verify permissions. If we can access it, add
447 * this process's reference.
448 */
449 LOCK_ASSERT(mutex_owned(&ks->ks_interlock));
450 error = ksem_perm(l, ks);
451 if (error == 0)
452 ksem_addref(ks);
453 mutex_exit(&ks->ks_interlock);
454 mutex_exit(&ksem_mutex);
455 if (error)
456 return (error);
457
458 id = SEM_TO_ID(ks);
459 error = (*docopyout)(&id, idp, sizeof(id));
460 if (error) {
461 mutex_enter(&ks->ks_interlock);
462 ksem_delref(ks);
463 return (error);
464 }
465
466 ksem_add_proc(l->l_proc, ks);
467
468 return (0);
469 }
470
471 /*
472 * didn't ask for creation? error.
473 */
474 if ((oflag & O_CREAT) == 0) {
475 mutex_exit(&ksem_mutex);
476 return (ENOENT);
477 }
478
479 /*
480 * We may block during creation, so drop the lock.
481 */
482 mutex_exit(&ksem_mutex);
483 error = ksem_create(l, name, &ksnew, mode, value);
484 if (error != 0)
485 return (error);
486
487 id = SEM_TO_ID(ksnew);
488 error = (*docopyout)(&id, idp, sizeof(id));
489 if (error) {
490 free(ksnew->ks_name, M_SEM);
491 ksnew->ks_name = NULL;
492
493 mutex_enter(&ksnew->ks_interlock);
494 ksem_delref(ksnew);
495 return (error);
496 }
497
498 /*
499 * We need to make sure we haven't lost a race while
500 * allocating during creation.
501 */
502 mutex_enter(&ksem_mutex);
503 if ((ks = ksem_lookup_byname(name)) != NULL) {
504 if (oflag & O_EXCL) {
505 mutex_exit(&ks->ks_interlock);
506 mutex_exit(&ksem_mutex);
507
508 free(ksnew->ks_name, M_SEM);
509 ksnew->ks_name = NULL;
510
511 mutex_enter(&ksnew->ks_interlock);
512 ksem_delref(ksnew);
513 return (EEXIST);
514 }
515 goto found_one;
516 } else {
517 /* ksnew already has its initial reference. */
518 LIST_INSERT_HEAD(&ksem_head, ksnew, ks_entry);
519 mutex_exit(&ksem_mutex);
520
521 ksem_add_proc(l->l_proc, ksnew);
522 }
523 return (error);
524 }
525
526 /* We must have a read lock on the ksem_proc list! */
527 static struct ksem *
528 ksem_lookup_proc(struct ksem_proc *kp, semid_t id)
529 {
530 struct ksem_ref *ksr;
531
532 LIST_FOREACH(ksr, &kp->kp_ksems, ksr_list) {
533 if (id == SEM_TO_ID(ksr->ksr_ksem)) {
534 mutex_enter(&ksr->ksr_ksem->ks_interlock);
535 return (ksr->ksr_ksem);
536 }
537 }
538
539 return (NULL);
540 }
541
542 int
543 sys__ksem_unlink(struct lwp *l, void *v, register_t *retval)
544 {
545 struct sys__ksem_unlink_args /* {
546 const char *name;
547 } */ *uap = v;
548 char name[SEM_MAX_NAMELEN + 1], *cp;
549 size_t done;
550 struct ksem *ks;
551 int error;
552
553 error = copyinstr(SCARG(uap, name), name, sizeof(name), &done);
554 if (error)
555 return error;
556
557 mutex_enter(&ksem_mutex);
558 ks = ksem_lookup_byname(name);
559 if (ks == NULL) {
560 mutex_exit(&ksem_mutex);
561 return (ENOENT);
562 }
563
564 LOCK_ASSERT(mutex_owned(&ks->ks_interlock));
565
566 LIST_REMOVE(ks, ks_entry);
567 cp = ks->ks_name;
568 ks->ks_name = NULL;
569
570 mutex_exit(&ksem_mutex);
571
572 if (ks->ks_ref == 0)
573 ksem_free(ks);
574 else
575 mutex_exit(&ks->ks_interlock);
576
577 free(cp, M_SEM);
578
579 return (0);
580 }
581
582 int
583 sys__ksem_close(struct lwp *l, void *v, register_t *retval)
584 {
585 struct sys__ksem_close_args /* {
586 semid_t id;
587 } */ *uap = v;
588 struct ksem_proc *kp;
589 struct ksem_ref *ksr;
590 struct ksem *ks;
591
592 kp = proc_getspecific(l->l_proc, ksem_specificdata_key);
593 if (kp == NULL)
594 return (EINVAL);
595
596 rw_enter(&kp->kp_lock, RW_WRITER);
597
598 ks = ksem_lookup_proc(kp, SCARG(uap, id));
599 if (ks == NULL) {
600 rw_exit(&kp->kp_lock);
601 return (EINVAL);
602 }
603
604 LOCK_ASSERT(mutex_owned(&ks->ks_interlock));
605 if (ks->ks_name == NULL) {
606 mutex_exit(&ks->ks_interlock);
607 rw_exit(&kp->kp_lock);
608 return (EINVAL);
609 }
610
611 ksr = ksem_drop_proc(kp, ks);
612 rw_exit(&kp->kp_lock);
613 free(ksr, M_SEM);
614
615 return (0);
616 }
617
618 int
619 sys__ksem_post(struct lwp *l, void *v, register_t *retval)
620 {
621 struct sys__ksem_post_args /* {
622 semid_t id;
623 } */ *uap = v;
624 struct ksem_proc *kp;
625 struct ksem *ks;
626 int error;
627
628 kp = proc_getspecific(l->l_proc, ksem_specificdata_key);
629 if (kp == NULL)
630 return (EINVAL);
631
632 rw_enter(&kp->kp_lock, RW_READER);
633 ks = ksem_lookup_proc(kp, SCARG(uap, id));
634 rw_exit(&kp->kp_lock);
635 if (ks == NULL)
636 return (EINVAL);
637
638 LOCK_ASSERT(mutex_owned(&ks->ks_interlock));
639 if (ks->ks_value == SEM_VALUE_MAX) {
640 error = EOVERFLOW;
641 goto out;
642 }
643 ++ks->ks_value;
644 if (ks->ks_waiters)
645 wakeup(ks);
646 error = 0;
647 out:
648 mutex_exit(&ks->ks_interlock);
649 return (error);
650 }
651
652 static int
653 ksem_wait(struct lwp *l, semid_t id, int tryflag)
654 {
655 struct ksem_proc *kp;
656 struct ksem *ks;
657 int error;
658
659 kp = proc_getspecific(l->l_proc, ksem_specificdata_key);
660 if (kp == NULL)
661 return (EINVAL);
662
663 rw_enter(&kp->kp_lock, RW_READER);
664 ks = ksem_lookup_proc(kp, id);
665 rw_exit(&kp->kp_lock);
666 if (ks == NULL)
667 return (EINVAL);
668
669 LOCK_ASSERT(mutex_owned(&ks->ks_interlock));
670 ksem_addref(ks);
671 while (ks->ks_value == 0) {
672 ks->ks_waiters++;
673 error = tryflag ? EAGAIN : mtsleep(ks, PCATCH, "psem", 0,
674 &ks->ks_interlock);
675 ks->ks_waiters--;
676 if (error)
677 goto out;
678 }
679 ks->ks_value--;
680 error = 0;
681 out:
682 ksem_delref(ks);
683 return (error);
684 }
685
686 int
687 sys__ksem_wait(struct lwp *l, void *v, register_t *retval)
688 {
689 struct sys__ksem_wait_args /* {
690 semid_t id;
691 } */ *uap = v;
692
693 return ksem_wait(l, SCARG(uap, id), 0);
694 }
695
696 int
697 sys__ksem_trywait(struct lwp *l, void *v, register_t *retval)
698 {
699 struct sys__ksem_trywait_args /* {
700 semid_t id;
701 } */ *uap = v;
702
703 return ksem_wait(l, SCARG(uap, id), 1);
704 }
705
706 int
707 sys__ksem_getvalue(struct lwp *l, void *v, register_t *retval)
708 {
709 struct sys__ksem_getvalue_args /* {
710 semid_t id;
711 unsigned int *value;
712 } */ *uap = v;
713 struct ksem_proc *kp;
714 struct ksem *ks;
715 unsigned int val;
716
717 kp = proc_getspecific(l->l_proc, ksem_specificdata_key);
718 if (kp == NULL)
719 return (EINVAL);
720
721 rw_enter(&kp->kp_lock, RW_READER);
722 ks = ksem_lookup_proc(kp, SCARG(uap, id));
723 rw_exit(&kp->kp_lock);
724 if (ks == NULL)
725 return (EINVAL);
726
727 LOCK_ASSERT(mutex_owned(&ks->ks_interlock));
728 val = ks->ks_value;
729 mutex_exit(&ks->ks_interlock);
730
731 return (copyout(&val, SCARG(uap, value), sizeof(val)));
732 }
733
734 int
735 sys__ksem_destroy(struct lwp *l, void *v, register_t *retval)
736 {
737 struct sys__ksem_destroy_args /*{
738 semid_t id;
739 } */ *uap = v;
740 struct ksem_proc *kp;
741 struct ksem_ref *ksr;
742 struct ksem *ks;
743
744 kp = proc_getspecific(l->l_proc, ksem_specificdata_key);
745 if (kp == NULL)
746 return (EINVAL);
747
748 rw_enter(&kp->kp_lock, RW_WRITER);
749
750 ks = ksem_lookup_proc(kp, SCARG(uap, id));
751 if (ks == NULL) {
752 rw_exit(&kp->kp_lock);
753 return (EINVAL);
754 }
755
756 LOCK_ASSERT(mutex_owned(&ks->ks_interlock));
757
758 /*
759 * XXX This misses named semaphores which have been unlink'd,
760 * XXX but since behavior of destroying a named semaphore is
761 * XXX undefined, this is technically allowed.
762 */
763 if (ks->ks_name != NULL) {
764 mutex_exit(&ks->ks_interlock);
765 rw_exit(&kp->kp_lock);
766 return (EINVAL);
767 }
768
769 if (ks->ks_waiters) {
770 mutex_exit(&ks->ks_interlock);
771 rw_exit(&kp->kp_lock);
772 return (EBUSY);
773 }
774
775 ksr = ksem_drop_proc(kp, ks);
776 rw_exit(&kp->kp_lock);
777 free(ksr, M_SEM);
778
779 return (0);
780 }
781
782 static void
783 ksem_forkhook(struct proc *p2, struct proc *p1)
784 {
785 struct ksem_proc *kp1, *kp2;
786 struct ksem_ref *ksr, *ksr1;
787
788 kp1 = proc_getspecific(p1, ksem_specificdata_key);
789 if (kp1 == NULL)
790 return;
791
792 kp2 = ksem_proc_alloc();
793
794 rw_enter(&kp1->kp_lock, RW_READER);
795
796 if (!LIST_EMPTY(&kp1->kp_ksems)) {
797 LIST_FOREACH(ksr, &kp1->kp_ksems, ksr_list) {
798 ksr1 = malloc(sizeof(*ksr), M_SEM, M_WAITOK);
799 ksr1->ksr_ksem = ksr->ksr_ksem;
800 mutex_enter(&ksr->ksr_ksem->ks_interlock);
801 ksem_addref(ksr->ksr_ksem);
802 mutex_exit(&ksr->ksr_ksem->ks_interlock);
803 LIST_INSERT_HEAD(&kp2->kp_ksems, ksr1, ksr_list);
804 }
805 }
806
807 rw_exit(&kp1->kp_lock);
808 proc_setspecific(p2, ksem_specificdata_key, kp2);
809 }
810
811 static void
812 ksem_exechook(struct proc *p, void *arg)
813 {
814 struct ksem_proc *kp;
815
816 kp = proc_getspecific(p, ksem_specificdata_key);
817 if (kp != NULL) {
818 proc_setspecific(p, ksem_specificdata_key, NULL);
819 ksem_proc_dtor(kp);
820 }
821 }
822
823 void
824 ksem_init(void)
825 {
826 int i, error;
827
828 mutex_init(&ksem_mutex, MUTEX_DEFAULT, IPL_NONE);
829 exechook_establish(ksem_exechook, NULL);
830 forkhook_establish(ksem_forkhook);
831
832 for (i = 0; i < SEM_HASHTBL_SIZE; i++)
833 LIST_INIT(&ksem_hash[i]);
834
835 error = proc_specific_key_create(&ksem_specificdata_key,
836 ksem_proc_dtor);
837 KASSERT(error == 0);
838 }
839