sysv_sem.c revision 1.90.4.1 1 /* $NetBSD: sysv_sem.c,v 1.90.4.1 2019/02/23 07:02:20 martin Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Implementation of SVID semaphores
35 *
36 * Author: Daniel Boulet
37 *
38 * This software is provided ``AS IS'' without any warranties of any kind.
39 */
40
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: sysv_sem.c,v 1.90.4.1 2019/02/23 07:02:20 martin Exp $");
43
44 #define SYSVSEM
45
46 #include <sys/param.h>
47 #include <sys/kernel.h>
48 #include <sys/sem.h>
49 #include <sys/sysctl.h>
50 #include <sys/kmem.h>
51 #include <sys/mount.h> /* XXX for <sys/syscallargs.h> */
52 #include <sys/syscallargs.h>
53 #include <sys/kauth.h>
54
55 /*
56 * Memory areas:
57 * 1st: Pool of semaphore identifiers
58 * 2nd: Semaphores
59 * 3rd: Conditional variables
60 * 4th: Undo structures
61 */
62 struct semid_ds * sema __read_mostly;
63 static struct __sem * sem __read_mostly;
64 static kcondvar_t * semcv __read_mostly;
65 static int * semu __read_mostly;
66
67 static kmutex_t semlock __cacheline_aligned;
68 static bool sem_realloc_state __read_mostly;
69 static kcondvar_t sem_realloc_cv;
70
71 /*
72 * List of active undo structures, total number of semaphores,
73 * and total number of semop waiters.
74 */
75 static struct sem_undo *semu_list __read_mostly;
76 static u_int semtot __cacheline_aligned;
77 static u_int sem_waiters __cacheline_aligned;
78
79 /* Macro to find a particular sem_undo vector */
80 #define SEMU(s, ix) ((struct sem_undo *)(((long)s) + ix * seminfo.semusz))
81
82 #ifdef SEM_DEBUG
83 #define SEM_PRINTF(a) printf a
84 #else
85 #define SEM_PRINTF(a)
86 #endif
87
88 struct sem_undo *semu_alloc(struct proc *);
89 int semundo_adjust(struct proc *, struct sem_undo **, int, int, int);
90 void semundo_clear(int, int);
91
92 void
93 seminit(void)
94 {
95 int i, sz;
96 vaddr_t v;
97
98 mutex_init(&semlock, MUTEX_DEFAULT, IPL_NONE);
99 cv_init(&sem_realloc_cv, "semrealc");
100 sem_realloc_state = false;
101 semtot = 0;
102 sem_waiters = 0;
103
104 /* Allocate the wired memory for our structures */
105 sz = ALIGN(seminfo.semmni * sizeof(struct semid_ds)) +
106 ALIGN(seminfo.semmns * sizeof(struct __sem)) +
107 ALIGN(seminfo.semmni * sizeof(kcondvar_t)) +
108 ALIGN(seminfo.semmnu * seminfo.semusz);
109 sz = round_page(sz);
110 v = uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_WIRED|UVM_KMF_ZERO);
111 if (v == 0)
112 panic("sysv_sem: cannot allocate memory");
113 sema = (void *)v;
114 sem = (void *)((uintptr_t)sema +
115 ALIGN(seminfo.semmni * sizeof(struct semid_ds)));
116 semcv = (void *)((uintptr_t)sem +
117 ALIGN(seminfo.semmns * sizeof(struct __sem)));
118 semu = (void *)((uintptr_t)semcv +
119 ALIGN(seminfo.semmni * sizeof(kcondvar_t)));
120
121 for (i = 0; i < seminfo.semmni; i++) {
122 sema[i]._sem_base = 0;
123 sema[i].sem_perm.mode = 0;
124 cv_init(&semcv[i], "semwait");
125 }
126 for (i = 0; i < seminfo.semmnu; i++) {
127 struct sem_undo *suptr = SEMU(semu, i);
128 suptr->un_proc = NULL;
129 }
130 semu_list = NULL;
131 exithook_establish(semexit, NULL);
132
133 sysvipcinit();
134 }
135
136 static int
137 semrealloc(int newsemmni, int newsemmns, int newsemmnu)
138 {
139 struct semid_ds *new_sema, *old_sema;
140 struct __sem *new_sem;
141 struct sem_undo *new_semu_list, *suptr, *nsuptr;
142 int *new_semu;
143 kcondvar_t *new_semcv;
144 vaddr_t v;
145 int i, j, lsemid, nmnus, sz;
146
147 if (newsemmni < 1 || newsemmns < 1 || newsemmnu < 1)
148 return EINVAL;
149
150 /* Allocate the wired memory for our structures */
151 sz = ALIGN(newsemmni * sizeof(struct semid_ds)) +
152 ALIGN(newsemmns * sizeof(struct __sem)) +
153 ALIGN(newsemmni * sizeof(kcondvar_t)) +
154 ALIGN(newsemmnu * seminfo.semusz);
155 sz = round_page(sz);
156 v = uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_WIRED|UVM_KMF_ZERO);
157 if (v == 0)
158 return ENOMEM;
159
160 mutex_enter(&semlock);
161 if (sem_realloc_state) {
162 mutex_exit(&semlock);
163 uvm_km_free(kernel_map, v, sz, UVM_KMF_WIRED);
164 return EBUSY;
165 }
166 sem_realloc_state = true;
167 if (sem_waiters) {
168 /*
169 * Mark reallocation state, wake-up all waiters,
170 * and wait while they will all exit.
171 */
172 for (i = 0; i < seminfo.semmni; i++)
173 cv_broadcast(&semcv[i]);
174 while (sem_waiters)
175 cv_wait(&sem_realloc_cv, &semlock);
176 }
177 old_sema = sema;
178
179 /* Get the number of last slot */
180 lsemid = 0;
181 for (i = 0; i < seminfo.semmni; i++)
182 if (sema[i].sem_perm.mode & SEM_ALLOC)
183 lsemid = i;
184
185 /* Get the number of currently used undo structures */
186 nmnus = 0;
187 for (i = 0; i < seminfo.semmnu; i++) {
188 suptr = SEMU(semu, i);
189 if (suptr->un_proc == NULL)
190 continue;
191 nmnus++;
192 }
193
194 /* We cannot reallocate less memory than we use */
195 if (lsemid >= newsemmni || semtot > newsemmns || nmnus > newsemmnu) {
196 mutex_exit(&semlock);
197 uvm_km_free(kernel_map, v, sz, UVM_KMF_WIRED);
198 return EBUSY;
199 }
200
201 new_sema = (void *)v;
202 new_sem = (void *)((uintptr_t)new_sema +
203 ALIGN(newsemmni * sizeof(struct semid_ds)));
204 new_semcv = (void *)((uintptr_t)new_sem +
205 ALIGN(newsemmns * sizeof(struct __sem)));
206 new_semu = (void *)((uintptr_t)new_semcv +
207 ALIGN(newsemmni * sizeof(kcondvar_t)));
208
209 /* Initialize all semaphore identifiers and condvars */
210 for (i = 0; i < newsemmni; i++) {
211 new_sema[i]._sem_base = 0;
212 new_sema[i].sem_perm.mode = 0;
213 cv_init(&new_semcv[i], "semwait");
214 }
215 for (i = 0; i < newsemmnu; i++) {
216 nsuptr = SEMU(new_semu, i);
217 nsuptr->un_proc = NULL;
218 }
219
220 /*
221 * Copy all identifiers, semaphores and list of the
222 * undo structures to the new memory allocation.
223 */
224 j = 0;
225 for (i = 0; i <= lsemid; i++) {
226 if ((sema[i].sem_perm.mode & SEM_ALLOC) == 0)
227 continue;
228 memcpy(&new_sema[i], &sema[i], sizeof(struct semid_ds));
229 new_sema[i]._sem_base = &new_sem[j];
230 memcpy(new_sema[i]._sem_base, sema[i]._sem_base,
231 (sizeof(struct __sem) * sema[i].sem_nsems));
232 j += sema[i].sem_nsems;
233 }
234 KASSERT(j == semtot);
235
236 j = 0;
237 new_semu_list = NULL;
238 for (suptr = semu_list; suptr != NULL; suptr = suptr->un_next) {
239 KASSERT(j < newsemmnu);
240 nsuptr = SEMU(new_semu, j);
241 memcpy(nsuptr, suptr, SEMUSZ);
242 nsuptr->un_next = new_semu_list;
243 new_semu_list = nsuptr;
244 j++;
245 }
246
247 for (i = 0; i < seminfo.semmni; i++) {
248 KASSERT(cv_has_waiters(&semcv[i]) == false);
249 cv_destroy(&semcv[i]);
250 }
251
252 sz = ALIGN(seminfo.semmni * sizeof(struct semid_ds)) +
253 ALIGN(seminfo.semmns * sizeof(struct __sem)) +
254 ALIGN(seminfo.semmni * sizeof(kcondvar_t)) +
255 ALIGN(seminfo.semmnu * seminfo.semusz);
256 sz = round_page(sz);
257
258 /* Set the pointers and update the new values */
259 sema = new_sema;
260 sem = new_sem;
261 semcv = new_semcv;
262 semu = new_semu;
263 semu_list = new_semu_list;
264
265 seminfo.semmni = newsemmni;
266 seminfo.semmns = newsemmns;
267 seminfo.semmnu = newsemmnu;
268
269 /* Reallocation completed - notify all waiters, if any */
270 sem_realloc_state = false;
271 cv_broadcast(&sem_realloc_cv);
272 mutex_exit(&semlock);
273
274 uvm_km_free(kernel_map, (vaddr_t)old_sema, sz, UVM_KMF_WIRED);
275 return 0;
276 }
277
278 /*
279 * Placebo.
280 */
281
282 int
283 sys_semconfig(struct lwp *l, const struct sys_semconfig_args *uap, register_t *retval)
284 {
285
286 *retval = 0;
287 return 0;
288 }
289
290 /*
291 * Allocate a new sem_undo structure for a process.
292 * => Returns NULL on failure.
293 */
294 struct sem_undo *
295 semu_alloc(struct proc *p)
296 {
297 struct sem_undo *suptr, **supptr;
298 bool attempted = false;
299 int i;
300
301 KASSERT(mutex_owned(&semlock));
302 again:
303 /* Look for a free structure. */
304 for (i = 0; i < seminfo.semmnu; i++) {
305 suptr = SEMU(semu, i);
306 if (suptr->un_proc == NULL) {
307 /* Found. Fill it in and return. */
308 suptr->un_next = semu_list;
309 semu_list = suptr;
310 suptr->un_cnt = 0;
311 suptr->un_proc = p;
312 return suptr;
313 }
314 }
315
316 /* Not found. Attempt to free some structures. */
317 if (!attempted) {
318 bool freed = false;
319
320 attempted = true;
321 supptr = &semu_list;
322 while ((suptr = *supptr) != NULL) {
323 if (suptr->un_cnt == 0) {
324 suptr->un_proc = NULL;
325 *supptr = suptr->un_next;
326 freed = true;
327 } else {
328 supptr = &suptr->un_next;
329 }
330 }
331 if (freed) {
332 goto again;
333 }
334 }
335 return NULL;
336 }
337
338 /*
339 * Adjust a particular entry for a particular proc
340 */
341
342 int
343 semundo_adjust(struct proc *p, struct sem_undo **supptr, int semid, int semnum,
344 int adjval)
345 {
346 struct sem_undo *suptr;
347 struct undo *sunptr;
348 int i;
349
350 KASSERT(mutex_owned(&semlock));
351
352 /*
353 * Look for and remember the sem_undo if the caller doesn't
354 * provide it
355 */
356
357 suptr = *supptr;
358 if (suptr == NULL) {
359 for (suptr = semu_list; suptr != NULL; suptr = suptr->un_next)
360 if (suptr->un_proc == p)
361 break;
362
363 if (suptr == NULL) {
364 suptr = semu_alloc(p);
365 if (suptr == NULL)
366 return (ENOSPC);
367 }
368 *supptr = suptr;
369 }
370
371 /*
372 * Look for the requested entry and adjust it (delete if
373 * adjval becomes 0).
374 */
375 sunptr = &suptr->un_ent[0];
376 for (i = 0; i < suptr->un_cnt; i++, sunptr++) {
377 if (sunptr->un_id != semid || sunptr->un_num != semnum)
378 continue;
379 sunptr->un_adjval += adjval;
380 if (sunptr->un_adjval == 0) {
381 suptr->un_cnt--;
382 if (i < suptr->un_cnt)
383 suptr->un_ent[i] =
384 suptr->un_ent[suptr->un_cnt];
385 }
386 return (0);
387 }
388
389 /* Didn't find the right entry - create it */
390 if (suptr->un_cnt == SEMUME)
391 return (EINVAL);
392
393 sunptr = &suptr->un_ent[suptr->un_cnt];
394 suptr->un_cnt++;
395 sunptr->un_adjval = adjval;
396 sunptr->un_id = semid;
397 sunptr->un_num = semnum;
398 return (0);
399 }
400
401 void
402 semundo_clear(int semid, int semnum)
403 {
404 struct sem_undo *suptr;
405 struct undo *sunptr, *sunend;
406
407 KASSERT(mutex_owned(&semlock));
408
409 for (suptr = semu_list; suptr != NULL; suptr = suptr->un_next)
410 for (sunptr = &suptr->un_ent[0],
411 sunend = sunptr + suptr->un_cnt; sunptr < sunend;) {
412 if (sunptr->un_id == semid) {
413 if (semnum == -1 || sunptr->un_num == semnum) {
414 suptr->un_cnt--;
415 sunend--;
416 if (sunptr != sunend)
417 *sunptr = *sunend;
418 if (semnum != -1)
419 break;
420 else
421 continue;
422 }
423 }
424 sunptr++;
425 }
426 }
427
428 int
429 sys_____semctl50(struct lwp *l, const struct sys_____semctl50_args *uap,
430 register_t *retval)
431 {
432 /* {
433 syscallarg(int) semid;
434 syscallarg(int) semnum;
435 syscallarg(int) cmd;
436 syscallarg(union __semun *) arg;
437 } */
438 struct semid_ds sembuf;
439 int cmd, error;
440 void *pass_arg;
441 union __semun karg;
442
443 cmd = SCARG(uap, cmd);
444
445 pass_arg = get_semctl_arg(cmd, &sembuf, &karg);
446
447 if (pass_arg) {
448 error = copyin(SCARG(uap, arg), &karg, sizeof(karg));
449 if (error)
450 return error;
451 if (cmd == IPC_SET) {
452 error = copyin(karg.buf, &sembuf, sizeof(sembuf));
453 if (error)
454 return (error);
455 }
456 }
457
458 error = semctl1(l, SCARG(uap, semid), SCARG(uap, semnum), cmd,
459 pass_arg, retval);
460
461 if (error == 0 && cmd == IPC_STAT)
462 error = copyout(&sembuf, karg.buf, sizeof(sembuf));
463
464 return (error);
465 }
466
467 int
468 semctl1(struct lwp *l, int semid, int semnum, int cmd, void *v,
469 register_t *retval)
470 {
471 kauth_cred_t cred = l->l_cred;
472 union __semun *arg = v;
473 struct semid_ds *sembuf = v, *semaptr;
474 int i, error, ix;
475
476 SEM_PRINTF(("call to semctl(%d, %d, %d, %p)\n",
477 semid, semnum, cmd, v));
478
479 mutex_enter(&semlock);
480
481 ix = IPCID_TO_IX(semid);
482 if (ix < 0 || ix >= seminfo.semmni) {
483 mutex_exit(&semlock);
484 return (EINVAL);
485 }
486
487 semaptr = &sema[ix];
488 if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0 ||
489 semaptr->sem_perm._seq != IPCID_TO_SEQ(semid)) {
490 mutex_exit(&semlock);
491 return (EINVAL);
492 }
493
494 switch (cmd) {
495 case IPC_RMID:
496 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_M)) != 0)
497 break;
498 semaptr->sem_perm.cuid = kauth_cred_geteuid(cred);
499 semaptr->sem_perm.uid = kauth_cred_geteuid(cred);
500 semtot -= semaptr->sem_nsems;
501 for (i = semaptr->_sem_base - sem; i < semtot; i++)
502 sem[i] = sem[i + semaptr->sem_nsems];
503 for (i = 0; i < seminfo.semmni; i++) {
504 if ((sema[i].sem_perm.mode & SEM_ALLOC) &&
505 sema[i]._sem_base > semaptr->_sem_base)
506 sema[i]._sem_base -= semaptr->sem_nsems;
507 }
508 semaptr->sem_perm.mode = 0;
509 semundo_clear(ix, -1);
510 cv_broadcast(&semcv[ix]);
511 break;
512
513 case IPC_SET:
514 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_M)))
515 break;
516 KASSERT(sembuf != NULL);
517 semaptr->sem_perm.uid = sembuf->sem_perm.uid;
518 semaptr->sem_perm.gid = sembuf->sem_perm.gid;
519 semaptr->sem_perm.mode = (semaptr->sem_perm.mode & ~0777) |
520 (sembuf->sem_perm.mode & 0777);
521 semaptr->sem_ctime = time_second;
522 break;
523
524 case IPC_STAT:
525 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_R)))
526 break;
527 KASSERT(sembuf != NULL);
528 memset(sembuf, 0, sizeof *sembuf);
529 sembuf->sem_perm = semaptr->sem_perm;
530 sembuf->sem_perm.mode &= 0777;
531 sembuf->sem_nsems = semaptr->sem_nsems;
532 sembuf->sem_otime = semaptr->sem_otime;
533 sembuf->sem_ctime = semaptr->sem_ctime;
534 break;
535
536 case GETNCNT:
537 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_R)))
538 break;
539 if (semnum < 0 || semnum >= semaptr->sem_nsems) {
540 error = EINVAL;
541 break;
542 }
543 *retval = semaptr->_sem_base[semnum].semncnt;
544 break;
545
546 case GETPID:
547 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_R)))
548 break;
549 if (semnum < 0 || semnum >= semaptr->sem_nsems) {
550 error = EINVAL;
551 break;
552 }
553 *retval = semaptr->_sem_base[semnum].sempid;
554 break;
555
556 case GETVAL:
557 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_R)))
558 break;
559 if (semnum < 0 || semnum >= semaptr->sem_nsems) {
560 error = EINVAL;
561 break;
562 }
563 *retval = semaptr->_sem_base[semnum].semval;
564 break;
565
566 case GETALL:
567 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_R)))
568 break;
569 KASSERT(arg != NULL);
570 for (i = 0; i < semaptr->sem_nsems; i++) {
571 error = copyout(&semaptr->_sem_base[i].semval,
572 &arg->array[i], sizeof(arg->array[i]));
573 if (error != 0)
574 break;
575 }
576 break;
577
578 case GETZCNT:
579 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_R)))
580 break;
581 if (semnum < 0 || semnum >= semaptr->sem_nsems) {
582 error = EINVAL;
583 break;
584 }
585 *retval = semaptr->_sem_base[semnum].semzcnt;
586 break;
587
588 case SETVAL:
589 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_W)))
590 break;
591 if (semnum < 0 || semnum >= semaptr->sem_nsems) {
592 error = EINVAL;
593 break;
594 }
595 KASSERT(arg != NULL);
596 if ((unsigned int)arg->val > seminfo.semvmx) {
597 error = ERANGE;
598 break;
599 }
600 semaptr->_sem_base[semnum].semval = arg->val;
601 semundo_clear(ix, semnum);
602 cv_broadcast(&semcv[ix]);
603 break;
604
605 case SETALL:
606 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_W)))
607 break;
608 KASSERT(arg != NULL);
609 for (i = 0; i < semaptr->sem_nsems; i++) {
610 unsigned short semval;
611 error = copyin(&arg->array[i], &semval,
612 sizeof(arg->array[i]));
613 if (error != 0)
614 break;
615 if ((unsigned int)semval > seminfo.semvmx) {
616 error = ERANGE;
617 break;
618 }
619 semaptr->_sem_base[i].semval = semval;
620 }
621 semundo_clear(ix, -1);
622 cv_broadcast(&semcv[ix]);
623 break;
624
625 default:
626 error = EINVAL;
627 break;
628 }
629
630 mutex_exit(&semlock);
631 return (error);
632 }
633
634 int
635 sys_semget(struct lwp *l, const struct sys_semget_args *uap, register_t *retval)
636 {
637 /* {
638 syscallarg(key_t) key;
639 syscallarg(int) nsems;
640 syscallarg(int) semflg;
641 } */
642 int semid, error = 0;
643 int key = SCARG(uap, key);
644 int nsems = SCARG(uap, nsems);
645 int semflg = SCARG(uap, semflg);
646 kauth_cred_t cred = l->l_cred;
647
648 SEM_PRINTF(("semget(0x%x, %d, 0%o)\n", key, nsems, semflg));
649
650 mutex_enter(&semlock);
651
652 if (key != IPC_PRIVATE) {
653 for (semid = 0; semid < seminfo.semmni; semid++) {
654 if ((sema[semid].sem_perm.mode & SEM_ALLOC) &&
655 sema[semid].sem_perm._key == key)
656 break;
657 }
658 if (semid < seminfo.semmni) {
659 SEM_PRINTF(("found public key\n"));
660 if ((error = ipcperm(cred, &sema[semid].sem_perm,
661 semflg & 0700)))
662 goto out;
663 if (nsems > 0 && sema[semid].sem_nsems < nsems) {
664 SEM_PRINTF(("too small\n"));
665 error = EINVAL;
666 goto out;
667 }
668 if ((semflg & IPC_CREAT) && (semflg & IPC_EXCL)) {
669 SEM_PRINTF(("not exclusive\n"));
670 error = EEXIST;
671 goto out;
672 }
673 goto found;
674 }
675 }
676
677 SEM_PRINTF(("need to allocate the semid_ds\n"));
678 if (key == IPC_PRIVATE || (semflg & IPC_CREAT)) {
679 if (nsems <= 0 || nsems > seminfo.semmsl) {
680 SEM_PRINTF(("nsems out of range (0<%d<=%d)\n", nsems,
681 seminfo.semmsl));
682 error = EINVAL;
683 goto out;
684 }
685 if (nsems > seminfo.semmns - semtot) {
686 SEM_PRINTF(("not enough semaphores left "
687 "(need %d, got %d)\n",
688 nsems, seminfo.semmns - semtot));
689 error = ENOSPC;
690 goto out;
691 }
692 for (semid = 0; semid < seminfo.semmni; semid++) {
693 if ((sema[semid].sem_perm.mode & SEM_ALLOC) == 0)
694 break;
695 }
696 if (semid == seminfo.semmni) {
697 SEM_PRINTF(("no more semid_ds's available\n"));
698 error = ENOSPC;
699 goto out;
700 }
701 SEM_PRINTF(("semid %d is available\n", semid));
702 sema[semid].sem_perm._key = key;
703 sema[semid].sem_perm.cuid = kauth_cred_geteuid(cred);
704 sema[semid].sem_perm.uid = kauth_cred_geteuid(cred);
705 sema[semid].sem_perm.cgid = kauth_cred_getegid(cred);
706 sema[semid].sem_perm.gid = kauth_cred_getegid(cred);
707 sema[semid].sem_perm.mode = (semflg & 0777) | SEM_ALLOC;
708 sema[semid].sem_perm._seq =
709 (sema[semid].sem_perm._seq + 1) & 0x7fff;
710 sema[semid].sem_nsems = nsems;
711 sema[semid].sem_otime = 0;
712 sema[semid].sem_ctime = time_second;
713 sema[semid]._sem_base = &sem[semtot];
714 semtot += nsems;
715 memset(sema[semid]._sem_base, 0,
716 sizeof(sema[semid]._sem_base[0]) * nsems);
717 SEM_PRINTF(("sembase = %p, next = %p\n", sema[semid]._sem_base,
718 &sem[semtot]));
719 } else {
720 SEM_PRINTF(("didn't find it and wasn't asked to create it\n"));
721 error = ENOENT;
722 goto out;
723 }
724
725 found:
726 *retval = IXSEQ_TO_IPCID(semid, sema[semid].sem_perm);
727 out:
728 mutex_exit(&semlock);
729 return (error);
730 }
731
732 #define SMALL_SOPS 8
733
734 int
735 sys_semop(struct lwp *l, const struct sys_semop_args *uap, register_t *retval)
736 {
737 /* {
738 syscallarg(int) semid;
739 syscallarg(struct sembuf *) sops;
740 syscallarg(size_t) nsops;
741 } */
742 struct proc *p = l->l_proc;
743 int semid = SCARG(uap, semid), seq;
744 size_t nsops = SCARG(uap, nsops);
745 struct sembuf small_sops[SMALL_SOPS];
746 struct sembuf *sops;
747 struct semid_ds *semaptr;
748 struct sembuf *sopptr = NULL;
749 struct __sem *semptr = NULL;
750 struct sem_undo *suptr = NULL;
751 kauth_cred_t cred = l->l_cred;
752 int i, error;
753 int do_wakeup, do_undos;
754
755 SEM_PRINTF(("call to semop(%d, %p, %zd)\n", semid, SCARG(uap,sops), nsops));
756
757 if (__predict_false((p->p_flag & PK_SYSVSEM) == 0)) {
758 mutex_enter(p->p_lock);
759 p->p_flag |= PK_SYSVSEM;
760 mutex_exit(p->p_lock);
761 }
762
763 restart:
764 if (nsops <= SMALL_SOPS) {
765 sops = small_sops;
766 } else if (nsops <= seminfo.semopm) {
767 sops = kmem_alloc(nsops * sizeof(*sops), KM_SLEEP);
768 } else {
769 SEM_PRINTF(("too many sops (max=%d, nsops=%zd)\n",
770 seminfo.semopm, nsops));
771 return (E2BIG);
772 }
773
774 error = copyin(SCARG(uap, sops), sops, nsops * sizeof(sops[0]));
775 if (error) {
776 SEM_PRINTF(("error = %d from copyin(%p, %p, %zd)\n", error,
777 SCARG(uap, sops), &sops, nsops * sizeof(sops[0])));
778 if (sops != small_sops)
779 kmem_free(sops, nsops * sizeof(*sops));
780 return error;
781 }
782
783 mutex_enter(&semlock);
784 /* In case of reallocation, we will wait for completion */
785 while (__predict_false(sem_realloc_state))
786 cv_wait(&sem_realloc_cv, &semlock);
787
788 semid = IPCID_TO_IX(semid); /* Convert back to zero origin */
789 if (semid < 0 || semid >= seminfo.semmni) {
790 error = EINVAL;
791 goto out;
792 }
793
794 semaptr = &sema[semid];
795 seq = IPCID_TO_SEQ(SCARG(uap, semid));
796 if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0 ||
797 semaptr->sem_perm._seq != seq) {
798 error = EINVAL;
799 goto out;
800 }
801
802 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_W))) {
803 SEM_PRINTF(("error = %d from ipaccess\n", error));
804 goto out;
805 }
806
807 for (i = 0; i < nsops; i++)
808 if (sops[i].sem_num >= semaptr->sem_nsems) {
809 error = EFBIG;
810 goto out;
811 }
812
813 /*
814 * Loop trying to satisfy the vector of requests.
815 * If we reach a point where we must wait, any requests already
816 * performed are rolled back and we go to sleep until some other
817 * process wakes us up. At this point, we start all over again.
818 *
819 * This ensures that from the perspective of other tasks, a set
820 * of requests is atomic (never partially satisfied).
821 */
822 do_undos = 0;
823
824 for (;;) {
825 do_wakeup = 0;
826
827 for (i = 0; i < nsops; i++) {
828 sopptr = &sops[i];
829 semptr = &semaptr->_sem_base[sopptr->sem_num];
830
831 SEM_PRINTF(("semop: semaptr=%p, sem_base=%p, "
832 "semptr=%p, sem[%d]=%d : op=%d, flag=%s\n",
833 semaptr, semaptr->_sem_base, semptr,
834 sopptr->sem_num, semptr->semval, sopptr->sem_op,
835 (sopptr->sem_flg & IPC_NOWAIT) ?
836 "nowait" : "wait"));
837
838 if (sopptr->sem_op < 0) {
839 if ((int)(semptr->semval +
840 sopptr->sem_op) < 0) {
841 SEM_PRINTF(("semop: "
842 "can't do it now\n"));
843 break;
844 } else {
845 semptr->semval += sopptr->sem_op;
846 if (semptr->semval == 0 &&
847 semptr->semzcnt > 0)
848 do_wakeup = 1;
849 }
850 if (sopptr->sem_flg & SEM_UNDO)
851 do_undos = 1;
852 } else if (sopptr->sem_op == 0) {
853 if (semptr->semval > 0) {
854 SEM_PRINTF(("semop: not zero now\n"));
855 break;
856 }
857 } else {
858 if (semptr->semncnt > 0)
859 do_wakeup = 1;
860 semptr->semval += sopptr->sem_op;
861 if (sopptr->sem_flg & SEM_UNDO)
862 do_undos = 1;
863 }
864 }
865
866 /*
867 * Did we get through the entire vector?
868 */
869 if (i >= nsops)
870 goto done;
871
872 /*
873 * No ... rollback anything that we've already done
874 */
875 SEM_PRINTF(("semop: rollback 0 through %d\n", i - 1));
876 while (i-- > 0)
877 semaptr->_sem_base[sops[i].sem_num].semval -=
878 sops[i].sem_op;
879
880 /*
881 * If the request that we couldn't satisfy has the
882 * NOWAIT flag set then return with EAGAIN.
883 */
884 if (sopptr->sem_flg & IPC_NOWAIT) {
885 error = EAGAIN;
886 goto out;
887 }
888
889 if (sopptr->sem_op == 0)
890 semptr->semzcnt++;
891 else
892 semptr->semncnt++;
893
894 sem_waiters++;
895 SEM_PRINTF(("semop: good night!\n"));
896 error = cv_wait_sig(&semcv[semid], &semlock);
897 SEM_PRINTF(("semop: good morning (error=%d)!\n", error));
898 sem_waiters--;
899
900 /* Notify reallocator, if it is waiting */
901 cv_broadcast(&sem_realloc_cv);
902
903 /*
904 * Make sure that the semaphore still exists
905 */
906 if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0 ||
907 semaptr->sem_perm._seq != seq) {
908 error = EIDRM;
909 goto out;
910 }
911
912 /*
913 * The semaphore is still alive. Readjust the count of
914 * waiting processes.
915 */
916 semptr = &semaptr->_sem_base[sopptr->sem_num];
917 if (sopptr->sem_op == 0)
918 semptr->semzcnt--;
919 else
920 semptr->semncnt--;
921
922 /* In case of such state, restart the call */
923 if (sem_realloc_state) {
924 mutex_exit(&semlock);
925 goto restart;
926 }
927
928 /* Is it really morning, or was our sleep interrupted? */
929 if (error != 0) {
930 error = EINTR;
931 goto out;
932 }
933 SEM_PRINTF(("semop: good morning!\n"));
934 }
935
936 done:
937 /*
938 * Process any SEM_UNDO requests.
939 */
940 if (do_undos) {
941 for (i = 0; i < nsops; i++) {
942 /*
943 * We only need to deal with SEM_UNDO's for non-zero
944 * op's.
945 */
946 int adjval;
947
948 if ((sops[i].sem_flg & SEM_UNDO) == 0)
949 continue;
950 adjval = sops[i].sem_op;
951 if (adjval == 0)
952 continue;
953 error = semundo_adjust(p, &suptr, semid,
954 sops[i].sem_num, -adjval);
955 if (error == 0)
956 continue;
957
958 /*
959 * Oh-Oh! We ran out of either sem_undo's or undo's.
960 * Rollback the adjustments to this point and then
961 * rollback the semaphore ups and down so we can return
962 * with an error with all structures restored. We
963 * rollback the undo's in the exact reverse order that
964 * we applied them. This guarantees that we won't run
965 * out of space as we roll things back out.
966 */
967 while (i-- > 0) {
968 if ((sops[i].sem_flg & SEM_UNDO) == 0)
969 continue;
970 adjval = sops[i].sem_op;
971 if (adjval == 0)
972 continue;
973 if (semundo_adjust(p, &suptr, semid,
974 sops[i].sem_num, adjval) != 0)
975 panic("semop - can't undo undos");
976 }
977
978 for (i = 0; i < nsops; i++)
979 semaptr->_sem_base[sops[i].sem_num].semval -=
980 sops[i].sem_op;
981
982 SEM_PRINTF(("error = %d from semundo_adjust\n", error));
983 goto out;
984 } /* loop through the sops */
985 } /* if (do_undos) */
986
987 /* We're definitely done - set the sempid's */
988 for (i = 0; i < nsops; i++) {
989 sopptr = &sops[i];
990 semptr = &semaptr->_sem_base[sopptr->sem_num];
991 semptr->sempid = p->p_pid;
992 }
993
994 /* Update sem_otime */
995 semaptr->sem_otime = time_second;
996
997 /* Do a wakeup if any semaphore was up'd. */
998 if (do_wakeup) {
999 SEM_PRINTF(("semop: doing wakeup\n"));
1000 cv_broadcast(&semcv[semid]);
1001 SEM_PRINTF(("semop: back from wakeup\n"));
1002 }
1003 SEM_PRINTF(("semop: done\n"));
1004 *retval = 0;
1005
1006 out:
1007 mutex_exit(&semlock);
1008 if (sops != small_sops)
1009 kmem_free(sops, nsops * sizeof(*sops));
1010 return error;
1011 }
1012
1013 /*
1014 * Go through the undo structures for this process and apply the
1015 * adjustments to semaphores.
1016 */
1017 /*ARGSUSED*/
1018 void
1019 semexit(struct proc *p, void *v)
1020 {
1021 struct sem_undo *suptr;
1022 struct sem_undo **supptr;
1023
1024 if ((p->p_flag & PK_SYSVSEM) == 0)
1025 return;
1026
1027 mutex_enter(&semlock);
1028
1029 /*
1030 * Go through the chain of undo vectors looking for one
1031 * associated with this process.
1032 */
1033
1034 for (supptr = &semu_list; (suptr = *supptr) != NULL;
1035 supptr = &suptr->un_next) {
1036 if (suptr->un_proc == p)
1037 break;
1038 }
1039
1040 /*
1041 * If there is no undo vector, skip to the end.
1042 */
1043
1044 if (suptr == NULL) {
1045 mutex_exit(&semlock);
1046 return;
1047 }
1048
1049 /*
1050 * We now have an undo vector for this process.
1051 */
1052
1053 SEM_PRINTF(("proc @%p has undo structure with %d entries\n", p,
1054 suptr->un_cnt));
1055
1056 /*
1057 * If there are any active undo elements then process them.
1058 */
1059 if (suptr->un_cnt > 0) {
1060 int ix;
1061
1062 for (ix = 0; ix < suptr->un_cnt; ix++) {
1063 int semid = suptr->un_ent[ix].un_id;
1064 int semnum = suptr->un_ent[ix].un_num;
1065 int adjval = suptr->un_ent[ix].un_adjval;
1066 struct semid_ds *semaptr;
1067
1068 semaptr = &sema[semid];
1069 if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0)
1070 panic("semexit - semid not allocated");
1071 if (semnum >= semaptr->sem_nsems)
1072 panic("semexit - semnum out of range");
1073
1074 SEM_PRINTF(("semexit: %p id=%d num=%d(adj=%d) ; "
1075 "sem=%d\n",
1076 suptr->un_proc, suptr->un_ent[ix].un_id,
1077 suptr->un_ent[ix].un_num,
1078 suptr->un_ent[ix].un_adjval,
1079 semaptr->_sem_base[semnum].semval));
1080
1081 if (adjval < 0 &&
1082 semaptr->_sem_base[semnum].semval < -adjval)
1083 semaptr->_sem_base[semnum].semval = 0;
1084 else
1085 semaptr->_sem_base[semnum].semval += adjval;
1086
1087 cv_broadcast(&semcv[semid]);
1088 SEM_PRINTF(("semexit: back from wakeup\n"));
1089 }
1090 }
1091
1092 /*
1093 * Deallocate the undo vector.
1094 */
1095 SEM_PRINTF(("removing vector\n"));
1096 suptr->un_proc = NULL;
1097 *supptr = suptr->un_next;
1098 mutex_exit(&semlock);
1099 }
1100
1101 /*
1102 * Sysctl initialization and nodes.
1103 */
1104
1105 static int
1106 sysctl_ipc_semmni(SYSCTLFN_ARGS)
1107 {
1108 int newsize, error;
1109 struct sysctlnode node;
1110 node = *rnode;
1111 node.sysctl_data = &newsize;
1112
1113 newsize = seminfo.semmni;
1114 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1115 if (error || newp == NULL)
1116 return error;
1117
1118 return semrealloc(newsize, seminfo.semmns, seminfo.semmnu);
1119 }
1120
1121 static int
1122 sysctl_ipc_semmns(SYSCTLFN_ARGS)
1123 {
1124 int newsize, error;
1125 struct sysctlnode node;
1126 node = *rnode;
1127 node.sysctl_data = &newsize;
1128
1129 newsize = seminfo.semmns;
1130 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1131 if (error || newp == NULL)
1132 return error;
1133
1134 return semrealloc(seminfo.semmni, newsize, seminfo.semmnu);
1135 }
1136
1137 static int
1138 sysctl_ipc_semmnu(SYSCTLFN_ARGS)
1139 {
1140 int newsize, error;
1141 struct sysctlnode node;
1142 node = *rnode;
1143 node.sysctl_data = &newsize;
1144
1145 newsize = seminfo.semmnu;
1146 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1147 if (error || newp == NULL)
1148 return error;
1149
1150 return semrealloc(seminfo.semmni, seminfo.semmns, newsize);
1151 }
1152
1153 SYSCTL_SETUP(sysctl_ipc_sem_setup, "sysctl kern.ipc subtree setup")
1154 {
1155 const struct sysctlnode *node = NULL;
1156
1157 sysctl_createv(clog, 0, NULL, &node,
1158 CTLFLAG_PERMANENT,
1159 CTLTYPE_NODE, "ipc",
1160 SYSCTL_DESCR("SysV IPC options"),
1161 NULL, 0, NULL, 0,
1162 CTL_KERN, KERN_SYSVIPC, CTL_EOL);
1163
1164 if (node == NULL)
1165 return;
1166
1167 sysctl_createv(clog, 0, &node, NULL,
1168 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1169 CTLTYPE_INT, "semmni",
1170 SYSCTL_DESCR("Max number of number of semaphore identifiers"),
1171 sysctl_ipc_semmni, 0, &seminfo.semmni, 0,
1172 CTL_CREATE, CTL_EOL);
1173 sysctl_createv(clog, 0, &node, NULL,
1174 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1175 CTLTYPE_INT, "semmns",
1176 SYSCTL_DESCR("Max number of number of semaphores in system"),
1177 sysctl_ipc_semmns, 0, &seminfo.semmns, 0,
1178 CTL_CREATE, CTL_EOL);
1179 sysctl_createv(clog, 0, &node, NULL,
1180 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1181 CTLTYPE_INT, "semmnu",
1182 SYSCTL_DESCR("Max number of undo structures in system"),
1183 sysctl_ipc_semmnu, 0, &seminfo.semmnu, 0,
1184 CTL_CREATE, CTL_EOL);
1185 }
1186