uipc_sem.c revision 1.29 1 /* $NetBSD: uipc_sem.c,v 1.29 2008/11/14 15:49:21 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2003, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of Wasabi Systems, Inc, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 2002 Alfred Perlstein <alfred (at) FreeBSD.org>
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 *
45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * SUCH DAMAGE.
56 */
57
58 #include <sys/cdefs.h>
59 __KERNEL_RCSID(0, "$NetBSD: uipc_sem.c,v 1.29 2008/11/14 15:49:21 ad Exp $");
60
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/kernel.h>
64 #include <sys/proc.h>
65 #include <sys/ksem.h>
66 #include <sys/syscall.h>
67 #include <sys/stat.h>
68 #include <sys/kmem.h>
69 #include <sys/fcntl.h>
70 #include <sys/kauth.h>
71 #include <sys/module.h>
72 #include <sys/mount.h>
73 #include <sys/syscall.h>
74 #include <sys/syscallargs.h>
75 #include <sys/syscallvar.h>
76
77 #define SEM_MAX_NAMELEN 14
78 #define SEM_VALUE_MAX (~0U)
79 #define SEM_HASHTBL_SIZE 13
80
81 #define SEM_TO_ID(x) (((x)->ks_id))
82 #define SEM_HASH(id) ((id) % SEM_HASHTBL_SIZE)
83
84 MODULE(MODULE_CLASS_MISC, ksem, NULL);
85
86 static const struct syscall_package ksem_syscalls[] = {
87 { SYS__ksem_init, 0, (sy_call_t *)sys__ksem_init },
88 { SYS__ksem_open, 0, (sy_call_t *)sys__ksem_open },
89 { SYS__ksem_unlink, 0, (sy_call_t *)sys__ksem_unlink },
90 { SYS__ksem_close, 0, (sy_call_t *)sys__ksem_close },
91 { SYS__ksem_post, 0, (sy_call_t *)sys__ksem_post },
92 { SYS__ksem_wait, 0, (sy_call_t *)sys__ksem_wait },
93 { SYS__ksem_trywait, 0, (sy_call_t *)sys__ksem_trywait },
94 { SYS__ksem_getvalue, 0, (sy_call_t *)sys__ksem_getvalue },
95 { SYS__ksem_destroy, 0, (sy_call_t *)sys__ksem_destroy },
96 { 0, 0, NULL },
97 };
98
99 /*
100 * Note: to read the ks_name member, you need either the ks_interlock
101 * or the ksem_mutex. To write the ks_name member, you need both. Make
102 * sure the order is ksem_mutex -> ks_interlock.
103 */
104 struct ksem {
105 LIST_ENTRY(ksem) ks_entry; /* global list entry */
106 LIST_ENTRY(ksem) ks_hash; /* hash list entry */
107 kmutex_t ks_interlock; /* lock on this ksem */
108 kcondvar_t ks_cv; /* condition variable */
109 unsigned int ks_ref; /* number of references */
110 char *ks_name; /* if named, this is the name */
111 size_t ks_namelen; /* length of name */
112 mode_t ks_mode; /* protection bits */
113 uid_t ks_uid; /* creator uid */
114 gid_t ks_gid; /* creator gid */
115 unsigned int ks_value; /* current value */
116 unsigned int ks_waiters; /* number of waiters */
117 intptr_t ks_id; /* unique identifier */
118 };
119
120 struct ksem_ref {
121 LIST_ENTRY(ksem_ref) ksr_list;
122 struct ksem *ksr_ksem;
123 };
124
125 struct ksem_proc {
126 krwlock_t kp_lock;
127 LIST_HEAD(, ksem_ref) kp_ksems;
128 };
129
130 LIST_HEAD(ksem_list, ksem);
131
132 /*
133 * ksem_mutex protects ksem_head and nsems. Only named semaphores go
134 * onto ksem_head.
135 */
136 static kmutex_t ksem_mutex;
137 static struct ksem_list ksem_head = LIST_HEAD_INITIALIZER(&ksem_head);
138 static struct ksem_list ksem_hash[SEM_HASHTBL_SIZE];
139 static int nsems = 0;
140
141 /*
142 * ksem_counter is the last assigned intptr_t. It needs to be COMPAT_NETBSD32
143 * friendly, even though intptr_t itself is defined as uintptr_t.
144 */
145 static uint32_t ksem_counter = 1;
146
147 static specificdata_key_t ksem_specificdata_key;
148 static void *ksem_ehook;
149 static void *ksem_fhook;
150
151 static void
152 ksem_free(struct ksem *ks)
153 {
154
155 KASSERT(mutex_owned(&ks->ks_interlock));
156
157 /*
158 * If the ksem is anonymous (or has been unlinked), then
159 * this is the end if its life.
160 */
161 if (ks->ks_name == NULL) {
162 mutex_exit(&ks->ks_interlock);
163 mutex_destroy(&ks->ks_interlock);
164 cv_destroy(&ks->ks_cv);
165
166 mutex_enter(&ksem_mutex);
167 nsems--;
168 LIST_REMOVE(ks, ks_hash);
169 mutex_exit(&ksem_mutex);
170
171 kmem_free(ks, sizeof(*ks));
172 return;
173 }
174 mutex_exit(&ks->ks_interlock);
175 }
176
177 static inline void
178 ksem_addref(struct ksem *ks)
179 {
180
181 KASSERT(mutex_owned(&ks->ks_interlock));
182 ks->ks_ref++;
183 KASSERT(ks->ks_ref != 0);
184 }
185
186 static inline void
187 ksem_delref(struct ksem *ks)
188 {
189
190 KASSERT(mutex_owned(&ks->ks_interlock));
191 KASSERT(ks->ks_ref != 0);
192 if (--ks->ks_ref == 0) {
193 ksem_free(ks);
194 return;
195 }
196 mutex_exit(&ks->ks_interlock);
197 }
198
199 static struct ksem_proc *
200 ksem_proc_alloc(void)
201 {
202 struct ksem_proc *kp;
203
204 kp = kmem_alloc(sizeof(*kp), KM_SLEEP);
205 rw_init(&kp->kp_lock);
206 LIST_INIT(&kp->kp_ksems);
207
208 return (kp);
209 }
210
211 static void
212 ksem_proc_dtor(void *arg)
213 {
214 struct ksem_proc *kp = arg;
215 struct ksem_ref *ksr;
216
217 rw_enter(&kp->kp_lock, RW_WRITER);
218
219 while ((ksr = LIST_FIRST(&kp->kp_ksems)) != NULL) {
220 LIST_REMOVE(ksr, ksr_list);
221 mutex_enter(&ksr->ksr_ksem->ks_interlock);
222 ksem_delref(ksr->ksr_ksem);
223 kmem_free(ksr, sizeof(*ksr));
224 }
225
226 rw_exit(&kp->kp_lock);
227 rw_destroy(&kp->kp_lock);
228 kmem_free(kp, sizeof(*kp));
229 }
230
231 static void
232 ksem_add_proc(struct proc *p, struct ksem *ks)
233 {
234 struct ksem_proc *kp;
235 struct ksem_ref *ksr;
236
237 kp = proc_getspecific(p, ksem_specificdata_key);
238 if (kp == NULL) {
239 kp = ksem_proc_alloc();
240 proc_setspecific(p, ksem_specificdata_key, kp);
241 }
242
243 ksr = kmem_alloc(sizeof(*ksr), KM_SLEEP);
244 ksr->ksr_ksem = ks;
245
246 rw_enter(&kp->kp_lock, RW_WRITER);
247 LIST_INSERT_HEAD(&kp->kp_ksems, ksr, ksr_list);
248 rw_exit(&kp->kp_lock);
249 }
250
251 /* We MUST have a write lock on the ksem_proc list! */
252 static struct ksem_ref *
253 ksem_drop_proc(struct ksem_proc *kp, struct ksem *ks)
254 {
255 struct ksem_ref *ksr;
256
257 KASSERT(mutex_owned(&ks->ks_interlock));
258 LIST_FOREACH(ksr, &kp->kp_ksems, ksr_list) {
259 if (ksr->ksr_ksem == ks) {
260 ksem_delref(ks);
261 LIST_REMOVE(ksr, ksr_list);
262 return (ksr);
263 }
264 }
265 #ifdef DIAGNOSTIC
266 panic("ksem_drop_proc: ksem_proc %p ksem %p", kp, ks);
267 #endif
268 return (NULL);
269 }
270
271 static int
272 ksem_perm(struct lwp *l, struct ksem *ks)
273 {
274 kauth_cred_t uc;
275
276 KASSERT(mutex_owned(&ks->ks_interlock));
277 uc = l->l_cred;
278 if ((kauth_cred_geteuid(uc) == ks->ks_uid && (ks->ks_mode & S_IWUSR) != 0) ||
279 (kauth_cred_getegid(uc) == ks->ks_gid && (ks->ks_mode & S_IWGRP) != 0) ||
280 (ks->ks_mode & S_IWOTH) != 0 ||
281 kauth_authorize_generic(uc, KAUTH_GENERIC_ISSUSER, NULL) == 0)
282 return (0);
283 return (EPERM);
284 }
285
286 static struct ksem *
287 ksem_lookup_byid(intptr_t id)
288 {
289 struct ksem *ks;
290
291 KASSERT(mutex_owned(&ksem_mutex));
292 LIST_FOREACH(ks, &ksem_hash[SEM_HASH(id)], ks_hash) {
293 if (ks->ks_id == id)
294 return ks;
295 }
296 return NULL;
297 }
298
299 static struct ksem *
300 ksem_lookup_byname(const char *name)
301 {
302 struct ksem *ks;
303
304 KASSERT(mutex_owned(&ksem_mutex));
305 LIST_FOREACH(ks, &ksem_head, ks_entry) {
306 if (strcmp(ks->ks_name, name) == 0) {
307 mutex_enter(&ks->ks_interlock);
308 return (ks);
309 }
310 }
311 return (NULL);
312 }
313
314 static int
315 ksem_create(struct lwp *l, const char *name, struct ksem **ksret,
316 mode_t mode, unsigned int value)
317 {
318 struct ksem *ret;
319 kauth_cred_t uc;
320 size_t len;
321
322 uc = l->l_cred;
323 if (value > SEM_VALUE_MAX)
324 return (EINVAL);
325 ret = kmem_zalloc(sizeof(*ret), KM_SLEEP);
326 if (name != NULL) {
327 len = strlen(name);
328 if (len > SEM_MAX_NAMELEN) {
329 kmem_free(ret, sizeof(*ret));
330 return (ENAMETOOLONG);
331 }
332 /* name must start with a '/' but not contain one. */
333 if (*name != '/' || len < 2 || strchr(name + 1, '/') != NULL) {
334 kmem_free(ret, sizeof(*ret));
335 return (EINVAL);
336 }
337 ret->ks_namelen = len + 1;
338 ret->ks_name = kmem_alloc(ret->ks_namelen, KM_SLEEP);
339 strlcpy(ret->ks_name, name, len + 1);
340 } else
341 ret->ks_name = NULL;
342 ret->ks_mode = mode;
343 ret->ks_value = value;
344 ret->ks_ref = 1;
345 ret->ks_waiters = 0;
346 ret->ks_uid = kauth_cred_geteuid(uc);
347 ret->ks_gid = kauth_cred_getegid(uc);
348 mutex_init(&ret->ks_interlock, MUTEX_DEFAULT, IPL_NONE);
349 cv_init(&ret->ks_cv, "psem");
350
351 mutex_enter(&ksem_mutex);
352 if (nsems >= ksem_max) {
353 mutex_exit(&ksem_mutex);
354 if (ret->ks_name != NULL)
355 kmem_free(ret->ks_name, ret->ks_namelen);
356 kmem_free(ret, sizeof(*ret));
357 return (ENFILE);
358 }
359 nsems++;
360 while (ksem_lookup_byid(ksem_counter) != NULL) {
361 ksem_counter++;
362 /* 0 is a special value for libpthread */
363 if (ksem_counter == 0)
364 ksem_counter++;
365 }
366 ret->ks_id = ksem_counter;
367 LIST_INSERT_HEAD(&ksem_hash[SEM_HASH(ret->ks_id)], ret, ks_hash);
368 mutex_exit(&ksem_mutex);
369
370 *ksret = ret;
371 return (0);
372 }
373
374 int
375 sys__ksem_init(struct lwp *l, const struct sys__ksem_init_args *uap, register_t *retval)
376 {
377 /* {
378 unsigned int value;
379 intptr_t *idp;
380 } */
381
382 return do_ksem_init(l, SCARG(uap, value), SCARG(uap, idp), copyout);
383 }
384
385 int
386 do_ksem_init(struct lwp *l, unsigned int value, intptr_t *idp,
387 copyout_t docopyout)
388 {
389 struct ksem *ks;
390 intptr_t id;
391 int error;
392
393 /* Note the mode does not matter for anonymous semaphores. */
394 error = ksem_create(l, NULL, &ks, 0, value);
395 if (error)
396 return (error);
397 id = SEM_TO_ID(ks);
398 error = (*docopyout)(&id, idp, sizeof(id));
399 if (error) {
400 mutex_enter(&ks->ks_interlock);
401 ksem_delref(ks);
402 return (error);
403 }
404
405 ksem_add_proc(l->l_proc, ks);
406
407 return (0);
408 }
409
410 int
411 sys__ksem_open(struct lwp *l, const struct sys__ksem_open_args *uap, register_t *retval)
412 {
413 /* {
414 const char *name;
415 int oflag;
416 mode_t mode;
417 unsigned int value;
418 intptr_t *idp;
419 } */
420
421 return do_ksem_open(l, SCARG(uap, name), SCARG(uap, oflag),
422 SCARG(uap, mode), SCARG(uap, value), SCARG(uap, idp), copyout);
423 }
424
425 int
426 do_ksem_open(struct lwp *l, const char *semname, int oflag, mode_t mode,
427 unsigned int value, intptr_t *idp, copyout_t docopyout)
428 {
429 char name[SEM_MAX_NAMELEN + 1];
430 size_t done;
431 int error;
432 struct ksem *ksnew, *ks;
433 intptr_t id;
434
435 error = copyinstr(semname, name, sizeof(name), &done);
436 if (error)
437 return (error);
438
439 ksnew = NULL;
440 mutex_enter(&ksem_mutex);
441 ks = ksem_lookup_byname(name);
442
443 /* Found one? */
444 if (ks != NULL) {
445 /* Check for exclusive create. */
446 if (oflag & O_EXCL) {
447 mutex_exit(&ks->ks_interlock);
448 mutex_exit(&ksem_mutex);
449 return (EEXIST);
450 }
451 found_one:
452 /*
453 * Verify permissions. If we can access it, add
454 * this process's reference.
455 */
456 KASSERT(mutex_owned(&ks->ks_interlock));
457 error = ksem_perm(l, ks);
458 if (error == 0)
459 ksem_addref(ks);
460 mutex_exit(&ks->ks_interlock);
461 mutex_exit(&ksem_mutex);
462 if (error)
463 return (error);
464
465 id = SEM_TO_ID(ks);
466 error = (*docopyout)(&id, idp, sizeof(id));
467 if (error) {
468 mutex_enter(&ks->ks_interlock);
469 ksem_delref(ks);
470 return (error);
471 }
472
473 ksem_add_proc(l->l_proc, ks);
474
475 return (0);
476 }
477
478 /*
479 * didn't ask for creation? error.
480 */
481 if ((oflag & O_CREAT) == 0) {
482 mutex_exit(&ksem_mutex);
483 return (ENOENT);
484 }
485
486 /*
487 * We may block during creation, so drop the lock.
488 */
489 mutex_exit(&ksem_mutex);
490 error = ksem_create(l, name, &ksnew, mode, value);
491 if (error != 0)
492 return (error);
493
494 id = SEM_TO_ID(ksnew);
495 error = (*docopyout)(&id, idp, sizeof(id));
496 if (error) {
497 kmem_free(ksnew->ks_name, ksnew->ks_namelen);
498 ksnew->ks_name = NULL;
499
500 mutex_enter(&ksnew->ks_interlock);
501 ksem_delref(ksnew);
502 return (error);
503 }
504
505 /*
506 * We need to make sure we haven't lost a race while
507 * allocating during creation.
508 */
509 mutex_enter(&ksem_mutex);
510 if ((ks = ksem_lookup_byname(name)) != NULL) {
511 if (oflag & O_EXCL) {
512 mutex_exit(&ks->ks_interlock);
513 mutex_exit(&ksem_mutex);
514
515 kmem_free(ksnew->ks_name, ksnew->ks_namelen);
516 ksnew->ks_name = NULL;
517
518 mutex_enter(&ksnew->ks_interlock);
519 ksem_delref(ksnew);
520 return (EEXIST);
521 }
522 goto found_one;
523 } else {
524 /* ksnew already has its initial reference. */
525 LIST_INSERT_HEAD(&ksem_head, ksnew, ks_entry);
526 mutex_exit(&ksem_mutex);
527
528 ksem_add_proc(l->l_proc, ksnew);
529 }
530 return (error);
531 }
532
533 /* We must have a read lock on the ksem_proc list! */
534 static struct ksem *
535 ksem_lookup_proc(struct ksem_proc *kp, intptr_t id)
536 {
537 struct ksem_ref *ksr;
538
539 LIST_FOREACH(ksr, &kp->kp_ksems, ksr_list) {
540 if (id == SEM_TO_ID(ksr->ksr_ksem)) {
541 mutex_enter(&ksr->ksr_ksem->ks_interlock);
542 return (ksr->ksr_ksem);
543 }
544 }
545
546 return (NULL);
547 }
548
549 int
550 sys__ksem_unlink(struct lwp *l, const struct sys__ksem_unlink_args *uap, register_t *retval)
551 {
552 /* {
553 const char *name;
554 } */
555 char name[SEM_MAX_NAMELEN + 1], *cp;
556 size_t done, len;
557 struct ksem *ks;
558 int error;
559
560 error = copyinstr(SCARG(uap, name), name, sizeof(name), &done);
561 if (error)
562 return error;
563
564 mutex_enter(&ksem_mutex);
565 ks = ksem_lookup_byname(name);
566 if (ks == NULL) {
567 mutex_exit(&ksem_mutex);
568 return (ENOENT);
569 }
570
571 KASSERT(mutex_owned(&ks->ks_interlock));
572
573 LIST_REMOVE(ks, ks_entry);
574 cp = ks->ks_name;
575 len = ks->ks_namelen;
576 ks->ks_name = NULL;
577
578 mutex_exit(&ksem_mutex);
579
580 if (ks->ks_ref == 0)
581 ksem_free(ks);
582 else
583 mutex_exit(&ks->ks_interlock);
584
585 kmem_free(cp, len);
586
587 return (0);
588 }
589
590 int
591 sys__ksem_close(struct lwp *l, const struct sys__ksem_close_args *uap, register_t *retval)
592 {
593 /* {
594 intptr_t id;
595 } */
596 struct ksem_proc *kp;
597 struct ksem_ref *ksr;
598 struct ksem *ks;
599
600 kp = proc_getspecific(l->l_proc, ksem_specificdata_key);
601 if (kp == NULL)
602 return (EINVAL);
603
604 rw_enter(&kp->kp_lock, RW_WRITER);
605
606 ks = ksem_lookup_proc(kp, SCARG(uap, id));
607 if (ks == NULL) {
608 rw_exit(&kp->kp_lock);
609 return (EINVAL);
610 }
611
612 KASSERT(mutex_owned(&ks->ks_interlock));
613 if (ks->ks_name == NULL) {
614 mutex_exit(&ks->ks_interlock);
615 rw_exit(&kp->kp_lock);
616 return (EINVAL);
617 }
618
619 ksr = ksem_drop_proc(kp, ks);
620 rw_exit(&kp->kp_lock);
621 kmem_free(ksr, sizeof(*ksr));
622
623 return (0);
624 }
625
626 int
627 sys__ksem_post(struct lwp *l, const struct sys__ksem_post_args *uap, register_t *retval)
628 {
629 /* {
630 intptr_t id;
631 } */
632 struct ksem_proc *kp;
633 struct ksem *ks;
634 int error;
635
636 kp = proc_getspecific(l->l_proc, ksem_specificdata_key);
637 if (kp == NULL)
638 return (EINVAL);
639
640 rw_enter(&kp->kp_lock, RW_READER);
641 ks = ksem_lookup_proc(kp, SCARG(uap, id));
642 rw_exit(&kp->kp_lock);
643 if (ks == NULL)
644 return (EINVAL);
645
646 KASSERT(mutex_owned(&ks->ks_interlock));
647 if (ks->ks_value == SEM_VALUE_MAX) {
648 error = EOVERFLOW;
649 goto out;
650 }
651 ++ks->ks_value;
652 if (ks->ks_waiters)
653 cv_broadcast(&ks->ks_cv);
654 error = 0;
655 out:
656 mutex_exit(&ks->ks_interlock);
657 return (error);
658 }
659
660 static int
661 ksem_wait(struct lwp *l, intptr_t id, int tryflag)
662 {
663 struct ksem_proc *kp;
664 struct ksem *ks;
665 int error;
666
667 kp = proc_getspecific(l->l_proc, ksem_specificdata_key);
668 if (kp == NULL)
669 return (EINVAL);
670
671 rw_enter(&kp->kp_lock, RW_READER);
672 ks = ksem_lookup_proc(kp, id);
673 rw_exit(&kp->kp_lock);
674 if (ks == NULL)
675 return (EINVAL);
676
677 KASSERT(mutex_owned(&ks->ks_interlock));
678 ksem_addref(ks);
679 while (ks->ks_value == 0) {
680 ks->ks_waiters++;
681 if (tryflag)
682 error = EAGAIN;
683 else
684 error = cv_wait_sig(&ks->ks_cv, &ks->ks_interlock);
685 ks->ks_waiters--;
686 if (error)
687 goto out;
688 }
689 ks->ks_value--;
690 error = 0;
691 out:
692 ksem_delref(ks);
693 return (error);
694 }
695
696 int
697 sys__ksem_wait(struct lwp *l, const struct sys__ksem_wait_args *uap, register_t *retval)
698 {
699 /* {
700 intptr_t id;
701 } */
702
703 return ksem_wait(l, SCARG(uap, id), 0);
704 }
705
706 int
707 sys__ksem_trywait(struct lwp *l, const struct sys__ksem_trywait_args *uap, register_t *retval)
708 {
709 /* {
710 intptr_t id;
711 } */
712
713 return ksem_wait(l, SCARG(uap, id), 1);
714 }
715
716 int
717 sys__ksem_getvalue(struct lwp *l, const struct sys__ksem_getvalue_args *uap, register_t *retval)
718 {
719 /* {
720 intptr_t id;
721 unsigned int *value;
722 } */
723 struct ksem_proc *kp;
724 struct ksem *ks;
725 unsigned int val;
726
727 kp = proc_getspecific(l->l_proc, ksem_specificdata_key);
728 if (kp == NULL)
729 return (EINVAL);
730
731 rw_enter(&kp->kp_lock, RW_READER);
732 ks = ksem_lookup_proc(kp, SCARG(uap, id));
733 rw_exit(&kp->kp_lock);
734 if (ks == NULL)
735 return (EINVAL);
736
737 KASSERT(mutex_owned(&ks->ks_interlock));
738 val = ks->ks_value;
739 mutex_exit(&ks->ks_interlock);
740
741 return (copyout(&val, SCARG(uap, value), sizeof(val)));
742 }
743
744 int
745 sys__ksem_destroy(struct lwp *l, const struct sys__ksem_destroy_args *uap, register_t *retval)
746 {
747 /* {
748 intptr_t id;
749 } */
750 struct ksem_proc *kp;
751 struct ksem_ref *ksr;
752 struct ksem *ks;
753
754 kp = proc_getspecific(l->l_proc, ksem_specificdata_key);
755 if (kp == NULL)
756 return (EINVAL);
757
758 rw_enter(&kp->kp_lock, RW_WRITER);
759
760 ks = ksem_lookup_proc(kp, SCARG(uap, id));
761 if (ks == NULL) {
762 rw_exit(&kp->kp_lock);
763 return (EINVAL);
764 }
765
766 KASSERT(mutex_owned(&ks->ks_interlock));
767
768 /*
769 * XXX This misses named semaphores which have been unlink'd,
770 * XXX but since behavior of destroying a named semaphore is
771 * XXX undefined, this is technically allowed.
772 */
773 if (ks->ks_name != NULL) {
774 mutex_exit(&ks->ks_interlock);
775 rw_exit(&kp->kp_lock);
776 return (EINVAL);
777 }
778
779 if (ks->ks_waiters) {
780 mutex_exit(&ks->ks_interlock);
781 rw_exit(&kp->kp_lock);
782 return (EBUSY);
783 }
784
785 ksr = ksem_drop_proc(kp, ks);
786 rw_exit(&kp->kp_lock);
787 kmem_free(ksr, sizeof(*ksr));
788
789 return (0);
790 }
791
792 static void
793 ksem_forkhook(struct proc *p2, struct proc *p1)
794 {
795 struct ksem_proc *kp1, *kp2;
796 struct ksem_ref *ksr, *ksr1;
797
798 kp1 = proc_getspecific(p1, ksem_specificdata_key);
799 if (kp1 == NULL)
800 return;
801
802 kp2 = ksem_proc_alloc();
803
804 rw_enter(&kp1->kp_lock, RW_READER);
805
806 if (!LIST_EMPTY(&kp1->kp_ksems)) {
807 LIST_FOREACH(ksr, &kp1->kp_ksems, ksr_list) {
808 ksr1 = kmem_alloc(sizeof(*ksr), KM_SLEEP);
809 ksr1->ksr_ksem = ksr->ksr_ksem;
810 mutex_enter(&ksr->ksr_ksem->ks_interlock);
811 ksem_addref(ksr->ksr_ksem);
812 mutex_exit(&ksr->ksr_ksem->ks_interlock);
813 LIST_INSERT_HEAD(&kp2->kp_ksems, ksr1, ksr_list);
814 }
815 }
816
817 rw_exit(&kp1->kp_lock);
818 proc_setspecific(p2, ksem_specificdata_key, kp2);
819 }
820
821 static void
822 ksem_exechook(struct proc *p, void *arg)
823 {
824 struct ksem_proc *kp;
825
826 kp = proc_getspecific(p, ksem_specificdata_key);
827 if (kp != NULL) {
828 proc_setspecific(p, ksem_specificdata_key, NULL);
829 ksem_proc_dtor(kp);
830 }
831 }
832
833 static int
834 ksem_fini(bool interface)
835 {
836 int error;
837
838 if (interface) {
839 error = syscall_disestablish(NULL, ksem_syscalls);
840 if (error != 0) {
841 return error;
842 }
843 if (nsems != 0) {
844 error = syscall_establish(NULL, ksem_syscalls);
845 KASSERT(error == 0);
846 return EBUSY;
847 }
848 }
849 exechook_disestablish(ksem_ehook);
850 forkhook_disestablish(ksem_fhook);
851 proc_specific_key_delete(ksem_specificdata_key);
852 mutex_destroy(&ksem_mutex);
853 return 0;
854 }
855
856 static int
857 ksem_init(void)
858 {
859 int error, i;
860
861 mutex_init(&ksem_mutex, MUTEX_DEFAULT, IPL_NONE);
862 for (i = 0; i < SEM_HASHTBL_SIZE; i++)
863 LIST_INIT(&ksem_hash[i]);
864 error = proc_specific_key_create(&ksem_specificdata_key,
865 ksem_proc_dtor);
866 if (error != 0) {
867 mutex_destroy(&ksem_mutex);
868 return error;
869 }
870 ksem_ehook = exechook_establish(ksem_exechook, NULL);
871 ksem_fhook = forkhook_establish(ksem_forkhook);
872 error = syscall_establish(NULL, ksem_syscalls);
873 if (error != 0) {
874 (void)ksem_fini(false);
875 }
876 return error;
877 }
878
879 static int
880 ksem_modcmd(modcmd_t cmd, void *arg)
881 {
882
883 switch (cmd) {
884 case MODULE_CMD_INIT:
885 return ksem_init();
886
887 case MODULE_CMD_FINI:
888 return ksem_fini(true);
889
890 default:
891 return ENOTTY;
892 }
893 }
894