sysv_sem.c revision 1.81 1 /* $NetBSD: sysv_sem.c,v 1.81 2008/04/25 11:21:18 ad Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Implementation of SVID semaphores
42 *
43 * Author: Daniel Boulet
44 *
45 * This software is provided ``AS IS'' without any warranties of any kind.
46 */
47
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: sysv_sem.c,v 1.81 2008/04/25 11:21:18 ad Exp $");
50
51 #define SYSVSEM
52
53 #include <sys/param.h>
54 #include <sys/kernel.h>
55 #include <sys/sem.h>
56 #include <sys/sysctl.h>
57 #include <sys/kmem.h>
58 #include <sys/mount.h> /* XXX for <sys/syscallargs.h> */
59 #include <sys/syscallargs.h>
60 #include <sys/kauth.h>
61
62 /*
63 * Memory areas:
64 * 1st: Pool of semaphore identifiers
65 * 2nd: Semaphores
66 * 3rd: Conditional variables
67 * 4th: Undo structures
68 */
69 struct semid_ds *sema;
70 static struct __sem *sem;
71 static kcondvar_t *semcv;
72 static int *semu;
73
74 static kmutex_t semlock;
75 static struct sem_undo *semu_list; /* list of active undo structures */
76 static u_int semtot = 0; /* total number of semaphores */
77
78 static u_int sem_waiters = 0; /* total number of semop waiters */
79 static bool sem_realloc_state;
80 static kcondvar_t sem_realloc_cv;
81
82 /* Macro to find a particular sem_undo vector */
83 #define SEMU(s, ix) ((struct sem_undo *)(((long)s) + ix * seminfo.semusz))
84
85 #ifdef SEM_DEBUG
86 #define SEM_PRINTF(a) printf a
87 #else
88 #define SEM_PRINTF(a)
89 #endif
90
91 struct sem_undo *semu_alloc(struct proc *);
92 int semundo_adjust(struct proc *, struct sem_undo **, int, int, int);
93 void semundo_clear(int, int);
94
95 void
96 seminit(void)
97 {
98 int i, sz;
99 vaddr_t v;
100
101 mutex_init(&semlock, MUTEX_DEFAULT, IPL_NONE);
102 cv_init(&sem_realloc_cv, "semrealc");
103 sem_realloc_state = false;
104
105 /* Allocate the wired memory for our structures */
106 sz = ALIGN(seminfo.semmni * sizeof(struct semid_ds)) +
107 ALIGN(seminfo.semmns * sizeof(struct __sem)) +
108 ALIGN(seminfo.semmni * sizeof(kcondvar_t)) +
109 ALIGN(seminfo.semmnu * seminfo.semusz);
110 v = uvm_km_alloc(kernel_map, round_page(sz), 0,
111 UVM_KMF_WIRED|UVM_KMF_ZERO);
112 if (v == 0)
113 panic("sysv_sem: cannot allocate memory");
114 sema = (void *)v;
115 sem = (void *)(ALIGN(sema) +
116 seminfo.semmni * sizeof(struct semid_ds));
117 semcv = (void *)(ALIGN(sem) +
118 seminfo.semmns * sizeof(struct __sem));
119 semu = (void *)(ALIGN(semcv) +
120 seminfo.semmni * sizeof(kcondvar_t));
121
122 for (i = 0; i < seminfo.semmni; i++) {
123 sema[i]._sem_base = 0;
124 sema[i].sem_perm.mode = 0;
125 cv_init(&semcv[i], "semwait");
126 }
127 for (i = 0; i < seminfo.semmnu; i++) {
128 struct sem_undo *suptr = SEMU(semu, i);
129 suptr->un_proc = NULL;
130 }
131 semu_list = NULL;
132 exithook_establish(semexit, NULL);
133 }
134
135 static int
136 semrealloc(int newsemmni, int newsemmns, int newsemmnu)
137 {
138 struct semid_ds *new_sema, *old_sema;
139 struct __sem *new_sem;
140 struct sem_undo *new_semu_list, *suptr, *nsuptr;
141 int *new_semu;
142 kcondvar_t *new_semcv;
143 vaddr_t v;
144 int i, j, lsemid, nmnus, sz;
145
146 if (newsemmni < 1 || newsemmns < 1 || newsemmnu < 1)
147 return EINVAL;
148
149 /* Allocate the wired memory for our structures */
150 sz = ALIGN(newsemmni * sizeof(struct semid_ds)) +
151 ALIGN(newsemmns * sizeof(struct __sem)) +
152 ALIGN(newsemmni * sizeof(kcondvar_t)) +
153 ALIGN(newsemmnu * seminfo.semusz);
154 v = uvm_km_alloc(kernel_map, round_page(sz), 0,
155 UVM_KMF_WIRED|UVM_KMF_ZERO);
156 if (v == 0)
157 return ENOMEM;
158
159 mutex_enter(&semlock);
160 if (sem_realloc_state) {
161 mutex_exit(&semlock);
162 uvm_km_free(kernel_map, v, sz, UVM_KMF_WIRED);
163 return EBUSY;
164 }
165 sem_realloc_state = true;
166 if (sem_waiters) {
167 /*
168 * Mark reallocation state, wake-up all waiters,
169 * and wait while they will all exit.
170 */
171 for (i = 0; i < seminfo.semmni; i++)
172 cv_broadcast(&semcv[i]);
173 while (sem_waiters)
174 cv_wait(&sem_realloc_cv, &semlock);
175 }
176 old_sema = sema;
177
178 /* Get the number of last slot */
179 lsemid = 0;
180 for (i = 0; i < seminfo.semmni; i++)
181 if (sema[i].sem_perm.mode & SEM_ALLOC)
182 lsemid = i;
183
184 /* Get the number of currently used undo structures */
185 nmnus = 0;
186 for (i = 0; i < seminfo.semmnu; i++) {
187 suptr = SEMU(semu, i);
188 if (suptr->un_proc == NULL)
189 continue;
190 nmnus++;
191 }
192
193 /* We cannot reallocate less memory than we use */
194 if (lsemid >= newsemmni || semtot > newsemmns || nmnus > newsemmnu) {
195 mutex_exit(&semlock);
196 uvm_km_free(kernel_map, v, sz, UVM_KMF_WIRED);
197 return EBUSY;
198 }
199
200 new_sema = (void *)v;
201 new_sem = (void *)(ALIGN(new_sema) +
202 newsemmni * sizeof(struct semid_ds));
203 new_semcv = (void *)(ALIGN(new_sem) +
204 newsemmns * sizeof(struct __sem));
205 new_semu = (void *)(ALIGN(new_semcv) +
206 newsemmni * sizeof(kcondvar_t));
207
208 /* Initialize all semaphore identifiers and condvars */
209 for (i = 0; i < newsemmni; i++) {
210 new_sema[i]._sem_base = 0;
211 new_sema[i].sem_perm.mode = 0;
212 cv_init(&new_semcv[i], "semwait");
213 }
214 for (i = 0; i < newsemmnu; i++) {
215 nsuptr = SEMU(new_semu, i);
216 nsuptr->un_proc = NULL;
217 }
218
219 /*
220 * Copy all identifiers, semaphores and list of the
221 * undo structures to the new memory allocation.
222 */
223 j = 0;
224 for (i = 0; i <= lsemid; i++) {
225 if ((sema[i].sem_perm.mode & SEM_ALLOC) == 0)
226 continue;
227 memcpy(&new_sema[i], &sema[i], sizeof(struct semid_ds));
228 new_sema[i]._sem_base = &new_sem[j];
229 memcpy(new_sema[i]._sem_base, sema[i]._sem_base,
230 (sizeof(struct __sem) * sema[i].sem_nsems));
231 j += sema[i].sem_nsems;
232 }
233 KASSERT(j == semtot);
234
235 j = 0;
236 new_semu_list = NULL;
237 for (suptr = semu_list; suptr != NULL; suptr = suptr->un_next) {
238 KASSERT(j < newsemmnu);
239 nsuptr = SEMU(new_semu, j);
240 memcpy(nsuptr, suptr, SEMUSZ);
241 nsuptr->un_next = new_semu_list;
242 new_semu_list = nsuptr;
243 j++;
244 }
245
246 for (i = 0; i < seminfo.semmni; i++) {
247 KASSERT(cv_has_waiters(&semcv[i]) == false);
248 cv_destroy(&semcv[i]);
249 }
250
251 sz = ALIGN(seminfo.semmni * sizeof(struct semid_ds)) +
252 ALIGN(seminfo.semmns * sizeof(struct __sem)) +
253 ALIGN(seminfo.semmni * sizeof(kcondvar_t)) +
254 ALIGN(seminfo.semmnu * seminfo.semusz);
255
256 /* Set the pointers and update the new values */
257 sema = new_sema;
258 sem = new_sem;
259 semcv = new_semcv;
260 semu = new_semu;
261 semu_list = new_semu_list;
262
263 seminfo.semmni = newsemmni;
264 seminfo.semmns = newsemmns;
265 seminfo.semmnu = newsemmnu;
266
267 /* Reallocation completed - notify all waiters, if any */
268 sem_realloc_state = false;
269 cv_broadcast(&sem_realloc_cv);
270 mutex_exit(&semlock);
271
272 uvm_km_free(kernel_map, (vaddr_t)old_sema, sz, UVM_KMF_WIRED);
273 return 0;
274 }
275
276 /*
277 * Placebo.
278 */
279
280 int
281 sys_semconfig(struct lwp *l, const struct sys_semconfig_args *uap, register_t *retval)
282 {
283
284 *retval = 0;
285 return 0;
286 }
287
288 /*
289 * Allocate a new sem_undo structure for a process
290 * (returns ptr to structure or NULL if no more room)
291 */
292
293 struct sem_undo *
294 semu_alloc(struct proc *p)
295 {
296 int i;
297 struct sem_undo *suptr;
298 struct sem_undo **supptr;
299 int attempt;
300
301 KASSERT(mutex_owned(&semlock));
302
303 /*
304 * Try twice to allocate something.
305 * (we'll purge any empty structures after the first pass so
306 * two passes are always enough)
307 */
308
309 for (attempt = 0; attempt < 2; attempt++) {
310 /*
311 * Look for a free structure.
312 * Fill it in and return it if we find one.
313 */
314
315 for (i = 0; i < seminfo.semmnu; i++) {
316 suptr = SEMU(semu, i);
317 if (suptr->un_proc == NULL) {
318 suptr->un_next = semu_list;
319 semu_list = suptr;
320 suptr->un_cnt = 0;
321 suptr->un_proc = p;
322 return (suptr);
323 }
324 }
325
326 /*
327 * We didn't find a free one, if this is the first attempt
328 * then try to free some structures.
329 */
330
331 if (attempt == 0) {
332 /* All the structures are in use - try to free some */
333 int did_something = 0;
334
335 supptr = &semu_list;
336 while ((suptr = *supptr) != NULL) {
337 if (suptr->un_cnt == 0) {
338 suptr->un_proc = NULL;
339 *supptr = suptr->un_next;
340 did_something = 1;
341 } else
342 supptr = &suptr->un_next;
343 }
344
345 /* If we didn't free anything then just give-up */
346 if (!did_something)
347 return (NULL);
348 } else {
349 /*
350 * The second pass failed even though we freed
351 * something after the first pass!
352 * This is IMPOSSIBLE!
353 */
354 panic("semu_alloc - second attempt failed");
355 }
356 }
357 return NULL;
358 }
359
360 /*
361 * Adjust a particular entry for a particular proc
362 */
363
364 int
365 semundo_adjust(struct proc *p, struct sem_undo **supptr, int semid, int semnum,
366 int adjval)
367 {
368 struct sem_undo *suptr;
369 struct undo *sunptr;
370 int i;
371
372 KASSERT(mutex_owned(&semlock));
373
374 /*
375 * Look for and remember the sem_undo if the caller doesn't
376 * provide it
377 */
378
379 suptr = *supptr;
380 if (suptr == NULL) {
381 for (suptr = semu_list; suptr != NULL; suptr = suptr->un_next)
382 if (suptr->un_proc == p)
383 break;
384
385 if (suptr == NULL) {
386 suptr = semu_alloc(p);
387 if (suptr == NULL)
388 return (ENOSPC);
389 }
390 *supptr = suptr;
391 }
392
393 /*
394 * Look for the requested entry and adjust it (delete if
395 * adjval becomes 0).
396 */
397 sunptr = &suptr->un_ent[0];
398 for (i = 0; i < suptr->un_cnt; i++, sunptr++) {
399 if (sunptr->un_id != semid || sunptr->un_num != semnum)
400 continue;
401 sunptr->un_adjval += adjval;
402 if (sunptr->un_adjval == 0) {
403 suptr->un_cnt--;
404 if (i < suptr->un_cnt)
405 suptr->un_ent[i] =
406 suptr->un_ent[suptr->un_cnt];
407 }
408 return (0);
409 }
410
411 /* Didn't find the right entry - create it */
412 if (suptr->un_cnt == SEMUME)
413 return (EINVAL);
414
415 sunptr = &suptr->un_ent[suptr->un_cnt];
416 suptr->un_cnt++;
417 sunptr->un_adjval = adjval;
418 sunptr->un_id = semid;
419 sunptr->un_num = semnum;
420 return (0);
421 }
422
423 void
424 semundo_clear(int semid, int semnum)
425 {
426 struct sem_undo *suptr;
427 struct undo *sunptr, *sunend;
428
429 KASSERT(mutex_owned(&semlock));
430
431 for (suptr = semu_list; suptr != NULL; suptr = suptr->un_next)
432 for (sunptr = &suptr->un_ent[0],
433 sunend = sunptr + suptr->un_cnt; sunptr < sunend;) {
434 if (sunptr->un_id == semid) {
435 if (semnum == -1 || sunptr->un_num == semnum) {
436 suptr->un_cnt--;
437 sunend--;
438 if (sunptr != sunend)
439 *sunptr = *sunend;
440 if (semnum != -1)
441 break;
442 else
443 continue;
444 }
445 }
446 sunptr++;
447 }
448 }
449
450 int
451 sys_____semctl13(struct lwp *l, const struct sys_____semctl13_args *uap, register_t *retval)
452 {
453 /* {
454 syscallarg(int) semid;
455 syscallarg(int) semnum;
456 syscallarg(int) cmd;
457 syscallarg(union __semun *) arg;
458 } */
459 struct semid_ds sembuf;
460 int cmd, error;
461 void *pass_arg;
462 union __semun karg;
463
464 cmd = SCARG(uap, cmd);
465
466 pass_arg = get_semctl_arg(cmd, &sembuf, &karg);
467
468 if (pass_arg) {
469 error = copyin(SCARG(uap, arg), &karg, sizeof(karg));
470 if (error)
471 return error;
472 if (cmd == IPC_SET) {
473 error = copyin(karg.buf, &sembuf, sizeof(sembuf));
474 if (error)
475 return (error);
476 }
477 }
478
479 error = semctl1(l, SCARG(uap, semid), SCARG(uap, semnum), cmd,
480 pass_arg, retval);
481
482 if (error == 0 && cmd == IPC_STAT)
483 error = copyout(&sembuf, karg.buf, sizeof(sembuf));
484
485 return (error);
486 }
487
488 int
489 semctl1(struct lwp *l, int semid, int semnum, int cmd, void *v,
490 register_t *retval)
491 {
492 kauth_cred_t cred = l->l_cred;
493 union __semun *arg = v;
494 struct semid_ds *sembuf = v, *semaptr;
495 int i, error, ix;
496
497 SEM_PRINTF(("call to semctl(%d, %d, %d, %p)\n",
498 semid, semnum, cmd, v));
499
500 mutex_enter(&semlock);
501
502 ix = IPCID_TO_IX(semid);
503 if (ix < 0 || ix >= seminfo.semmni) {
504 mutex_exit(&semlock);
505 return (EINVAL);
506 }
507
508 semaptr = &sema[ix];
509 if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0 ||
510 semaptr->sem_perm._seq != IPCID_TO_SEQ(semid)) {
511 mutex_exit(&semlock);
512 return (EINVAL);
513 }
514
515 switch (cmd) {
516 case IPC_RMID:
517 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_M)) != 0)
518 break;
519 semaptr->sem_perm.cuid = kauth_cred_geteuid(cred);
520 semaptr->sem_perm.uid = kauth_cred_geteuid(cred);
521 semtot -= semaptr->sem_nsems;
522 for (i = semaptr->_sem_base - sem; i < semtot; i++)
523 sem[i] = sem[i + semaptr->sem_nsems];
524 for (i = 0; i < seminfo.semmni; i++) {
525 if ((sema[i].sem_perm.mode & SEM_ALLOC) &&
526 sema[i]._sem_base > semaptr->_sem_base)
527 sema[i]._sem_base -= semaptr->sem_nsems;
528 }
529 semaptr->sem_perm.mode = 0;
530 semundo_clear(ix, -1);
531 cv_broadcast(&semcv[ix]);
532 break;
533
534 case IPC_SET:
535 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_M)))
536 break;
537 KASSERT(sembuf != NULL);
538 semaptr->sem_perm.uid = sembuf->sem_perm.uid;
539 semaptr->sem_perm.gid = sembuf->sem_perm.gid;
540 semaptr->sem_perm.mode = (semaptr->sem_perm.mode & ~0777) |
541 (sembuf->sem_perm.mode & 0777);
542 semaptr->sem_ctime = time_second;
543 break;
544
545 case IPC_STAT:
546 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_R)))
547 break;
548 KASSERT(sembuf != NULL);
549 memcpy(sembuf, semaptr, sizeof(struct semid_ds));
550 sembuf->sem_perm.mode &= 0777;
551 break;
552
553 case GETNCNT:
554 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_R)))
555 break;
556 if (semnum < 0 || semnum >= semaptr->sem_nsems) {
557 error = EINVAL;
558 break;
559 }
560 *retval = semaptr->_sem_base[semnum].semncnt;
561 break;
562
563 case GETPID:
564 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_R)))
565 break;
566 if (semnum < 0 || semnum >= semaptr->sem_nsems) {
567 error = EINVAL;
568 break;
569 }
570 *retval = semaptr->_sem_base[semnum].sempid;
571 break;
572
573 case GETVAL:
574 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_R)))
575 break;
576 if (semnum < 0 || semnum >= semaptr->sem_nsems) {
577 error = EINVAL;
578 break;
579 }
580 *retval = semaptr->_sem_base[semnum].semval;
581 break;
582
583 case GETALL:
584 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_R)))
585 break;
586 KASSERT(arg != NULL);
587 for (i = 0; i < semaptr->sem_nsems; i++) {
588 error = copyout(&semaptr->_sem_base[i].semval,
589 &arg->array[i], sizeof(arg->array[i]));
590 if (error != 0)
591 break;
592 }
593 break;
594
595 case GETZCNT:
596 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_R)))
597 break;
598 if (semnum < 0 || semnum >= semaptr->sem_nsems) {
599 error = EINVAL;
600 break;
601 }
602 *retval = semaptr->_sem_base[semnum].semzcnt;
603 break;
604
605 case SETVAL:
606 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_W)))
607 break;
608 if (semnum < 0 || semnum >= semaptr->sem_nsems) {
609 error = EINVAL;
610 break;
611 }
612 KASSERT(arg != NULL);
613 semaptr->_sem_base[semnum].semval = arg->val;
614 semundo_clear(ix, semnum);
615 cv_broadcast(&semcv[ix]);
616 break;
617
618 case SETALL:
619 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_W)))
620 break;
621 KASSERT(arg != NULL);
622 for (i = 0; i < semaptr->sem_nsems; i++) {
623 error = copyin(&arg->array[i],
624 &semaptr->_sem_base[i].semval,
625 sizeof(arg->array[i]));
626 if (error != 0)
627 break;
628 }
629 semundo_clear(ix, -1);
630 cv_broadcast(&semcv[ix]);
631 break;
632
633 default:
634 error = EINVAL;
635 break;
636 }
637
638 mutex_exit(&semlock);
639 return (error);
640 }
641
642 int
643 sys_semget(struct lwp *l, const struct sys_semget_args *uap, register_t *retval)
644 {
645 /* {
646 syscallarg(key_t) key;
647 syscallarg(int) nsems;
648 syscallarg(int) semflg;
649 } */
650 int semid, error = 0;
651 int key = SCARG(uap, key);
652 int nsems = SCARG(uap, nsems);
653 int semflg = SCARG(uap, semflg);
654 kauth_cred_t cred = l->l_cred;
655
656 SEM_PRINTF(("semget(0x%x, %d, 0%o)\n", key, nsems, semflg));
657
658 mutex_enter(&semlock);
659
660 if (key != IPC_PRIVATE) {
661 for (semid = 0; semid < seminfo.semmni; semid++) {
662 if ((sema[semid].sem_perm.mode & SEM_ALLOC) &&
663 sema[semid].sem_perm._key == key)
664 break;
665 }
666 if (semid < seminfo.semmni) {
667 SEM_PRINTF(("found public key\n"));
668 if ((error = ipcperm(cred, &sema[semid].sem_perm,
669 semflg & 0700)))
670 goto out;
671 if (nsems > 0 && sema[semid].sem_nsems < nsems) {
672 SEM_PRINTF(("too small\n"));
673 error = EINVAL;
674 goto out;
675 }
676 if ((semflg & IPC_CREAT) && (semflg & IPC_EXCL)) {
677 SEM_PRINTF(("not exclusive\n"));
678 error = EEXIST;
679 goto out;
680 }
681 goto found;
682 }
683 }
684
685 SEM_PRINTF(("need to allocate the semid_ds\n"));
686 if (key == IPC_PRIVATE || (semflg & IPC_CREAT)) {
687 if (nsems <= 0 || nsems > seminfo.semmsl) {
688 SEM_PRINTF(("nsems out of range (0<%d<=%d)\n", nsems,
689 seminfo.semmsl));
690 error = EINVAL;
691 goto out;
692 }
693 if (nsems > seminfo.semmns - semtot) {
694 SEM_PRINTF(("not enough semaphores left "
695 "(need %d, got %d)\n",
696 nsems, seminfo.semmns - semtot));
697 error = ENOSPC;
698 goto out;
699 }
700 for (semid = 0; semid < seminfo.semmni; semid++) {
701 if ((sema[semid].sem_perm.mode & SEM_ALLOC) == 0)
702 break;
703 }
704 if (semid == seminfo.semmni) {
705 SEM_PRINTF(("no more semid_ds's available\n"));
706 error = ENOSPC;
707 goto out;
708 }
709 SEM_PRINTF(("semid %d is available\n", semid));
710 sema[semid].sem_perm._key = key;
711 sema[semid].sem_perm.cuid = kauth_cred_geteuid(cred);
712 sema[semid].sem_perm.uid = kauth_cred_geteuid(cred);
713 sema[semid].sem_perm.cgid = kauth_cred_getegid(cred);
714 sema[semid].sem_perm.gid = kauth_cred_getegid(cred);
715 sema[semid].sem_perm.mode = (semflg & 0777) | SEM_ALLOC;
716 sema[semid].sem_perm._seq =
717 (sema[semid].sem_perm._seq + 1) & 0x7fff;
718 sema[semid].sem_nsems = nsems;
719 sema[semid].sem_otime = 0;
720 sema[semid].sem_ctime = time_second;
721 sema[semid]._sem_base = &sem[semtot];
722 semtot += nsems;
723 memset(sema[semid]._sem_base, 0,
724 sizeof(sema[semid]._sem_base[0]) * nsems);
725 SEM_PRINTF(("sembase = %p, next = %p\n", sema[semid]._sem_base,
726 &sem[semtot]));
727 } else {
728 SEM_PRINTF(("didn't find it and wasn't asked to create it\n"));
729 error = ENOENT;
730 goto out;
731 }
732
733 found:
734 *retval = IXSEQ_TO_IPCID(semid, sema[semid].sem_perm);
735 out:
736 mutex_exit(&semlock);
737 return (error);
738 }
739
740 #define SMALL_SOPS 8
741
742 int
743 sys_semop(struct lwp *l, const struct sys_semop_args *uap, register_t *retval)
744 {
745 /* {
746 syscallarg(int) semid;
747 syscallarg(struct sembuf *) sops;
748 syscallarg(size_t) nsops;
749 } */
750 struct proc *p = l->l_proc;
751 int semid = SCARG(uap, semid), seq;
752 size_t nsops = SCARG(uap, nsops);
753 struct sembuf small_sops[SMALL_SOPS];
754 struct sembuf *sops;
755 struct semid_ds *semaptr;
756 struct sembuf *sopptr = NULL;
757 struct __sem *semptr = NULL;
758 struct sem_undo *suptr = NULL;
759 kauth_cred_t cred = l->l_cred;
760 int i, error;
761 int do_wakeup, do_undos;
762
763 SEM_PRINTF(("call to semop(%d, %p, %zd)\n", semid, SCARG(uap,sops), nsops));
764
765 if (__predict_false((p->p_flag & PK_SYSVSEM) == 0)) {
766 mutex_enter(p->p_lock);
767 p->p_flag |= PK_SYSVSEM;
768 mutex_exit(p->p_lock);
769 }
770
771 restart:
772 if (nsops <= SMALL_SOPS) {
773 sops = small_sops;
774 } else if (nsops <= seminfo.semopm) {
775 sops = kmem_alloc(nsops * sizeof(*sops), KM_SLEEP);
776 } else {
777 SEM_PRINTF(("too many sops (max=%d, nsops=%zd)\n",
778 seminfo.semopm, nsops));
779 return (E2BIG);
780 }
781
782 error = copyin(SCARG(uap, sops), sops, nsops * sizeof(sops[0]));
783 if (error) {
784 SEM_PRINTF(("error = %d from copyin(%p, %p, %zd)\n", error,
785 SCARG(uap, sops), &sops, nsops * sizeof(sops[0])));
786 if (sops != small_sops)
787 kmem_free(sops, nsops * sizeof(*sops));
788 return error;
789 }
790
791 mutex_enter(&semlock);
792 /* In case of reallocation, we will wait for completion */
793 while (__predict_false(sem_realloc_state))
794 cv_wait(&sem_realloc_cv, &semlock);
795
796 semid = IPCID_TO_IX(semid); /* Convert back to zero origin */
797 if (semid < 0 || semid >= seminfo.semmni) {
798 error = EINVAL;
799 goto out;
800 }
801
802 semaptr = &sema[semid];
803 seq = IPCID_TO_SEQ(SCARG(uap, semid));
804 if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0 ||
805 semaptr->sem_perm._seq != seq) {
806 error = EINVAL;
807 goto out;
808 }
809
810 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_W))) {
811 SEM_PRINTF(("error = %d from ipaccess\n", error));
812 goto out;
813 }
814
815 for (i = 0; i < nsops; i++)
816 if (sops[i].sem_num >= semaptr->sem_nsems) {
817 error = EFBIG;
818 goto out;
819 }
820
821 /*
822 * Loop trying to satisfy the vector of requests.
823 * If we reach a point where we must wait, any requests already
824 * performed are rolled back and we go to sleep until some other
825 * process wakes us up. At this point, we start all over again.
826 *
827 * This ensures that from the perspective of other tasks, a set
828 * of requests is atomic (never partially satisfied).
829 */
830 do_undos = 0;
831
832 for (;;) {
833 do_wakeup = 0;
834
835 for (i = 0; i < nsops; i++) {
836 sopptr = &sops[i];
837 semptr = &semaptr->_sem_base[sopptr->sem_num];
838
839 SEM_PRINTF(("semop: semaptr=%p, sem_base=%p, "
840 "semptr=%p, sem[%d]=%d : op=%d, flag=%s\n",
841 semaptr, semaptr->_sem_base, semptr,
842 sopptr->sem_num, semptr->semval, sopptr->sem_op,
843 (sopptr->sem_flg & IPC_NOWAIT) ?
844 "nowait" : "wait"));
845
846 if (sopptr->sem_op < 0) {
847 if ((int)(semptr->semval +
848 sopptr->sem_op) < 0) {
849 SEM_PRINTF(("semop: "
850 "can't do it now\n"));
851 break;
852 } else {
853 semptr->semval += sopptr->sem_op;
854 if (semptr->semval == 0 &&
855 semptr->semzcnt > 0)
856 do_wakeup = 1;
857 }
858 if (sopptr->sem_flg & SEM_UNDO)
859 do_undos = 1;
860 } else if (sopptr->sem_op == 0) {
861 if (semptr->semval > 0) {
862 SEM_PRINTF(("semop: not zero now\n"));
863 break;
864 }
865 } else {
866 if (semptr->semncnt > 0)
867 do_wakeup = 1;
868 semptr->semval += sopptr->sem_op;
869 if (sopptr->sem_flg & SEM_UNDO)
870 do_undos = 1;
871 }
872 }
873
874 /*
875 * Did we get through the entire vector?
876 */
877 if (i >= nsops)
878 goto done;
879
880 /*
881 * No ... rollback anything that we've already done
882 */
883 SEM_PRINTF(("semop: rollback 0 through %d\n", i - 1));
884 while (i-- > 0)
885 semaptr->_sem_base[sops[i].sem_num].semval -=
886 sops[i].sem_op;
887
888 /*
889 * If the request that we couldn't satisfy has the
890 * NOWAIT flag set then return with EAGAIN.
891 */
892 if (sopptr->sem_flg & IPC_NOWAIT) {
893 error = EAGAIN;
894 goto out;
895 }
896
897 if (sopptr->sem_op == 0)
898 semptr->semzcnt++;
899 else
900 semptr->semncnt++;
901
902 sem_waiters++;
903 SEM_PRINTF(("semop: good night!\n"));
904 error = cv_wait_sig(&semcv[semid], &semlock);
905 SEM_PRINTF(("semop: good morning (error=%d)!\n", error));
906 sem_waiters--;
907
908 /* Notify reallocator, if it is waiting */
909 cv_broadcast(&sem_realloc_cv);
910
911 /*
912 * Make sure that the semaphore still exists
913 */
914 if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0 ||
915 semaptr->sem_perm._seq != seq) {
916 error = EIDRM;
917 goto out;
918 }
919
920 /*
921 * The semaphore is still alive. Readjust the count of
922 * waiting processes.
923 */
924 semptr = &semaptr->_sem_base[sopptr->sem_num];
925 if (sopptr->sem_op == 0)
926 semptr->semzcnt--;
927 else
928 semptr->semncnt--;
929
930 /* In case of such state, restart the call */
931 if (sem_realloc_state) {
932 mutex_exit(&semlock);
933 goto restart;
934 }
935
936 /* Is it really morning, or was our sleep interrupted? */
937 if (error != 0) {
938 error = EINTR;
939 goto out;
940 }
941 SEM_PRINTF(("semop: good morning!\n"));
942 }
943
944 done:
945 /*
946 * Process any SEM_UNDO requests.
947 */
948 if (do_undos) {
949 for (i = 0; i < nsops; i++) {
950 /*
951 * We only need to deal with SEM_UNDO's for non-zero
952 * op's.
953 */
954 int adjval;
955
956 if ((sops[i].sem_flg & SEM_UNDO) == 0)
957 continue;
958 adjval = sops[i].sem_op;
959 if (adjval == 0)
960 continue;
961 error = semundo_adjust(p, &suptr, semid,
962 sops[i].sem_num, -adjval);
963 if (error == 0)
964 continue;
965
966 /*
967 * Oh-Oh! We ran out of either sem_undo's or undo's.
968 * Rollback the adjustments to this point and then
969 * rollback the semaphore ups and down so we can return
970 * with an error with all structures restored. We
971 * rollback the undo's in the exact reverse order that
972 * we applied them. This guarantees that we won't run
973 * out of space as we roll things back out.
974 */
975 while (i-- > 0) {
976 if ((sops[i].sem_flg & SEM_UNDO) == 0)
977 continue;
978 adjval = sops[i].sem_op;
979 if (adjval == 0)
980 continue;
981 if (semundo_adjust(p, &suptr, semid,
982 sops[i].sem_num, adjval) != 0)
983 panic("semop - can't undo undos");
984 }
985
986 for (i = 0; i < nsops; i++)
987 semaptr->_sem_base[sops[i].sem_num].semval -=
988 sops[i].sem_op;
989
990 SEM_PRINTF(("error = %d from semundo_adjust\n", error));
991 goto out;
992 } /* loop through the sops */
993 } /* if (do_undos) */
994
995 /* We're definitely done - set the sempid's */
996 for (i = 0; i < nsops; i++) {
997 sopptr = &sops[i];
998 semptr = &semaptr->_sem_base[sopptr->sem_num];
999 semptr->sempid = p->p_pid;
1000 }
1001
1002 /* Update sem_otime */
1003 semaptr->sem_otime = time_second;
1004
1005 /* Do a wakeup if any semaphore was up'd. */
1006 if (do_wakeup) {
1007 SEM_PRINTF(("semop: doing wakeup\n"));
1008 cv_broadcast(&semcv[semid]);
1009 SEM_PRINTF(("semop: back from wakeup\n"));
1010 }
1011 SEM_PRINTF(("semop: done\n"));
1012 *retval = 0;
1013
1014 out:
1015 mutex_exit(&semlock);
1016 if (sops != small_sops)
1017 kmem_free(sops, nsops * sizeof(*sops));
1018 return error;
1019 }
1020
1021 /*
1022 * Go through the undo structures for this process and apply the
1023 * adjustments to semaphores.
1024 */
1025 /*ARGSUSED*/
1026 void
1027 semexit(struct proc *p, void *v)
1028 {
1029 struct sem_undo *suptr;
1030 struct sem_undo **supptr;
1031
1032 if ((p->p_flag & PK_SYSVSEM) == 0)
1033 return;
1034
1035 mutex_enter(&semlock);
1036
1037 /*
1038 * Go through the chain of undo vectors looking for one
1039 * associated with this process.
1040 */
1041
1042 for (supptr = &semu_list; (suptr = *supptr) != NULL;
1043 supptr = &suptr->un_next) {
1044 if (suptr->un_proc == p)
1045 break;
1046 }
1047
1048 /*
1049 * If there is no undo vector, skip to the end.
1050 */
1051
1052 if (suptr == NULL) {
1053 mutex_exit(&semlock);
1054 return;
1055 }
1056
1057 /*
1058 * We now have an undo vector for this process.
1059 */
1060
1061 SEM_PRINTF(("proc @%p has undo structure with %d entries\n", p,
1062 suptr->un_cnt));
1063
1064 /*
1065 * If there are any active undo elements then process them.
1066 */
1067 if (suptr->un_cnt > 0) {
1068 int ix;
1069
1070 for (ix = 0; ix < suptr->un_cnt; ix++) {
1071 int semid = suptr->un_ent[ix].un_id;
1072 int semnum = suptr->un_ent[ix].un_num;
1073 int adjval = suptr->un_ent[ix].un_adjval;
1074 struct semid_ds *semaptr;
1075
1076 semaptr = &sema[semid];
1077 if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0)
1078 panic("semexit - semid not allocated");
1079 if (semnum >= semaptr->sem_nsems)
1080 panic("semexit - semnum out of range");
1081
1082 SEM_PRINTF(("semexit: %p id=%d num=%d(adj=%d) ; "
1083 "sem=%d\n",
1084 suptr->un_proc, suptr->un_ent[ix].un_id,
1085 suptr->un_ent[ix].un_num,
1086 suptr->un_ent[ix].un_adjval,
1087 semaptr->_sem_base[semnum].semval));
1088
1089 if (adjval < 0 &&
1090 semaptr->_sem_base[semnum].semval < -adjval)
1091 semaptr->_sem_base[semnum].semval = 0;
1092 else
1093 semaptr->_sem_base[semnum].semval += adjval;
1094
1095 cv_broadcast(&semcv[semid]);
1096 SEM_PRINTF(("semexit: back from wakeup\n"));
1097 }
1098 }
1099
1100 /*
1101 * Deallocate the undo vector.
1102 */
1103 SEM_PRINTF(("removing vector\n"));
1104 suptr->un_proc = NULL;
1105 *supptr = suptr->un_next;
1106 mutex_exit(&semlock);
1107 }
1108
1109 /*
1110 * Sysctl initialization and nodes.
1111 */
1112
1113 static int
1114 sysctl_ipc_semmni(SYSCTLFN_ARGS)
1115 {
1116 int newsize, error;
1117 struct sysctlnode node;
1118 node = *rnode;
1119 node.sysctl_data = &newsize;
1120
1121 newsize = seminfo.semmni;
1122 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1123 if (error || newp == NULL)
1124 return error;
1125
1126 return semrealloc(newsize, seminfo.semmns, seminfo.semmnu);
1127 }
1128
1129 static int
1130 sysctl_ipc_semmns(SYSCTLFN_ARGS)
1131 {
1132 int newsize, error;
1133 struct sysctlnode node;
1134 node = *rnode;
1135 node.sysctl_data = &newsize;
1136
1137 newsize = seminfo.semmns;
1138 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1139 if (error || newp == NULL)
1140 return error;
1141
1142 return semrealloc(seminfo.semmni, newsize, seminfo.semmnu);
1143 }
1144
1145 static int
1146 sysctl_ipc_semmnu(SYSCTLFN_ARGS)
1147 {
1148 int newsize, error;
1149 struct sysctlnode node;
1150 node = *rnode;
1151 node.sysctl_data = &newsize;
1152
1153 newsize = seminfo.semmnu;
1154 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1155 if (error || newp == NULL)
1156 return error;
1157
1158 return semrealloc(seminfo.semmni, seminfo.semmns, newsize);
1159 }
1160
1161 SYSCTL_SETUP(sysctl_ipc_sem_setup, "sysctl kern.ipc subtree setup")
1162 {
1163 const struct sysctlnode *node = NULL;
1164
1165 sysctl_createv(clog, 0, NULL, NULL,
1166 CTLFLAG_PERMANENT,
1167 CTLTYPE_NODE, "kern", NULL,
1168 NULL, 0, NULL, 0,
1169 CTL_KERN, CTL_EOL);
1170 sysctl_createv(clog, 0, NULL, &node,
1171 CTLFLAG_PERMANENT,
1172 CTLTYPE_NODE, "ipc",
1173 SYSCTL_DESCR("SysV IPC options"),
1174 NULL, 0, NULL, 0,
1175 CTL_KERN, KERN_SYSVIPC, CTL_EOL);
1176
1177 if (node == NULL)
1178 return;
1179
1180 sysctl_createv(clog, 0, &node, NULL,
1181 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1182 CTLTYPE_INT, "semmni",
1183 SYSCTL_DESCR("Max number of number of semaphore identifiers"),
1184 sysctl_ipc_semmni, 0, &seminfo.semmni, 0,
1185 CTL_CREATE, CTL_EOL);
1186 sysctl_createv(clog, 0, &node, NULL,
1187 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1188 CTLTYPE_INT, "semmns",
1189 SYSCTL_DESCR("Max number of number of semaphores in system"),
1190 sysctl_ipc_semmns, 0, &seminfo.semmns, 0,
1191 CTL_CREATE, CTL_EOL);
1192 sysctl_createv(clog, 0, &node, NULL,
1193 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1194 CTLTYPE_INT, "semmnu",
1195 SYSCTL_DESCR("Max number of undo structures in system"),
1196 sysctl_ipc_semmnu, 0, &seminfo.semmnu, 0,
1197 CTL_CREATE, CTL_EOL);
1198 }
1199