sysv_shm.c revision 1.123 1 /* $NetBSD: sysv_shm.c,v 1.123 2012/03/13 18:40:54 elad Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, and by Mindaugas Rasiukevicius.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Copyright (c) 1994 Adam Glass and Charles M. Hannum. All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by Adam Glass and Charles M.
47 * Hannum.
48 * 4. The names of the authors may not be used to endorse or promote products
49 * derived from this software without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
52 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
53 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
54 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
55 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
56 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
60 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 */
62
63 #include <sys/cdefs.h>
64 __KERNEL_RCSID(0, "$NetBSD: sysv_shm.c,v 1.123 2012/03/13 18:40:54 elad Exp $");
65
66 #define SYSVSHM
67
68 #include <sys/param.h>
69 #include <sys/kernel.h>
70 #include <sys/kmem.h>
71 #include <sys/shm.h>
72 #include <sys/mutex.h>
73 #include <sys/mman.h>
74 #include <sys/stat.h>
75 #include <sys/sysctl.h>
76 #include <sys/mount.h> /* XXX for <sys/syscallargs.h> */
77 #include <sys/syscallargs.h>
78 #include <sys/queue.h>
79 #include <sys/kauth.h>
80
81 #include <uvm/uvm_extern.h>
82 #include <uvm/uvm_object.h>
83
84 struct shmmap_entry {
85 SLIST_ENTRY(shmmap_entry) next;
86 vaddr_t va;
87 int shmid;
88 };
89
90 int shm_nused __cacheline_aligned;
91 struct shmid_ds * shmsegs __read_mostly;
92
93 static kmutex_t shm_lock __cacheline_aligned;
94 static kcondvar_t * shm_cv __cacheline_aligned;
95 static int shm_last_free __cacheline_aligned;
96 static size_t shm_committed __cacheline_aligned;
97 static int shm_use_phys __read_mostly;
98
99 static kcondvar_t shm_realloc_cv;
100 static bool shm_realloc_state;
101 static u_int shm_realloc_disable;
102
103 struct shmmap_state {
104 unsigned int nitems;
105 unsigned int nrefs;
106 SLIST_HEAD(, shmmap_entry) entries;
107 };
108
109 #ifdef SHMDEBUG
110 #define SHMPRINTF(a) printf a
111 #else
112 #define SHMPRINTF(a)
113 #endif
114
115 static int shmrealloc(int);
116
117 /*
118 * Find the shared memory segment by the identifier.
119 * => must be called with shm_lock held;
120 */
121 static struct shmid_ds *
122 shm_find_segment_by_shmid(int shmid)
123 {
124 int segnum;
125 struct shmid_ds *shmseg;
126
127 KASSERT(mutex_owned(&shm_lock));
128
129 segnum = IPCID_TO_IX(shmid);
130 if (segnum < 0 || segnum >= shminfo.shmmni)
131 return NULL;
132 shmseg = &shmsegs[segnum];
133 if ((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) == 0)
134 return NULL;
135 if ((shmseg->shm_perm.mode &
136 (SHMSEG_REMOVED|SHMSEG_RMLINGER)) == SHMSEG_REMOVED)
137 return NULL;
138 if (shmseg->shm_perm._seq != IPCID_TO_SEQ(shmid))
139 return NULL;
140
141 return shmseg;
142 }
143
144 /*
145 * Free memory segment.
146 * => must be called with shm_lock held;
147 */
148 static void
149 shm_free_segment(int segnum)
150 {
151 struct shmid_ds *shmseg;
152 size_t size;
153 bool wanted;
154
155 KASSERT(mutex_owned(&shm_lock));
156
157 shmseg = &shmsegs[segnum];
158 SHMPRINTF(("shm freeing key 0x%lx seq 0x%x\n",
159 shmseg->shm_perm._key, shmseg->shm_perm._seq));
160
161 size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
162 wanted = (shmseg->shm_perm.mode & SHMSEG_WANTED);
163
164 shmseg->_shm_internal = NULL;
165 shm_committed -= btoc(size);
166 shm_nused--;
167 shmseg->shm_perm.mode = SHMSEG_FREE;
168 shm_last_free = segnum;
169 if (wanted == true)
170 cv_broadcast(&shm_cv[segnum]);
171 }
172
173 /*
174 * Delete entry from the shm map.
175 * => must be called with shm_lock held;
176 */
177 static struct uvm_object *
178 shm_delete_mapping(struct shmmap_state *shmmap_s,
179 struct shmmap_entry *shmmap_se)
180 {
181 struct uvm_object *uobj = NULL;
182 struct shmid_ds *shmseg;
183 int segnum;
184
185 KASSERT(mutex_owned(&shm_lock));
186
187 segnum = IPCID_TO_IX(shmmap_se->shmid);
188 shmseg = &shmsegs[segnum];
189 SLIST_REMOVE(&shmmap_s->entries, shmmap_se, shmmap_entry, next);
190 shmmap_s->nitems--;
191 shmseg->shm_dtime = time_second;
192 if ((--shmseg->shm_nattch <= 0) &&
193 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
194 uobj = shmseg->_shm_internal;
195 shm_free_segment(segnum);
196 }
197
198 return uobj;
199 }
200
201 /*
202 * Get a non-shared shm map for that vmspace. Note, that memory
203 * allocation might be performed with lock held.
204 */
205 static struct shmmap_state *
206 shmmap_getprivate(struct proc *p)
207 {
208 struct shmmap_state *oshmmap_s, *shmmap_s;
209 struct shmmap_entry *oshmmap_se, *shmmap_se;
210
211 KASSERT(mutex_owned(&shm_lock));
212
213 /* 1. A shm map with refcnt = 1, used by ourselves, thus return */
214 oshmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
215 if (oshmmap_s && oshmmap_s->nrefs == 1)
216 return oshmmap_s;
217
218 /* 2. No shm map preset - create a fresh one */
219 shmmap_s = kmem_zalloc(sizeof(struct shmmap_state), KM_SLEEP);
220 shmmap_s->nrefs = 1;
221 SLIST_INIT(&shmmap_s->entries);
222 p->p_vmspace->vm_shm = (void *)shmmap_s;
223
224 if (oshmmap_s == NULL)
225 return shmmap_s;
226
227 SHMPRINTF(("shmmap_getprivate: vm %p split (%d entries), was used by %d\n",
228 p->p_vmspace, oshmmap_s->nitems, oshmmap_s->nrefs));
229
230 /* 3. A shared shm map, copy to a fresh one and adjust refcounts */
231 SLIST_FOREACH(oshmmap_se, &oshmmap_s->entries, next) {
232 shmmap_se = kmem_alloc(sizeof(struct shmmap_entry), KM_SLEEP);
233 shmmap_se->va = oshmmap_se->va;
234 shmmap_se->shmid = oshmmap_se->shmid;
235 SLIST_INSERT_HEAD(&shmmap_s->entries, shmmap_se, next);
236 }
237 shmmap_s->nitems = oshmmap_s->nitems;
238 oshmmap_s->nrefs--;
239
240 return shmmap_s;
241 }
242
243 /*
244 * Lock/unlock the memory.
245 * => must be called with shm_lock held;
246 * => called from one place, thus, inline;
247 */
248 static inline int
249 shm_memlock(struct lwp *l, struct shmid_ds *shmseg, int shmid, int cmd)
250 {
251 struct proc *p = l->l_proc;
252 struct shmmap_entry *shmmap_se;
253 struct shmmap_state *shmmap_s;
254 size_t size;
255 int error;
256
257 KASSERT(mutex_owned(&shm_lock));
258 shmmap_s = shmmap_getprivate(p);
259
260 /* Find our shared memory address by shmid */
261 SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next) {
262 if (shmmap_se->shmid != shmid)
263 continue;
264
265 size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
266
267 if (cmd == SHM_LOCK &&
268 (shmseg->shm_perm.mode & SHMSEG_WIRED) == 0) {
269 /* Wire the object and map, then tag it */
270 error = uvm_obj_wirepages(shmseg->_shm_internal,
271 0, size, NULL);
272 if (error)
273 return EIO;
274 error = uvm_map_pageable(&p->p_vmspace->vm_map,
275 shmmap_se->va, shmmap_se->va + size, false, 0);
276 if (error) {
277 uvm_obj_unwirepages(shmseg->_shm_internal,
278 0, size);
279 if (error == EFAULT)
280 error = ENOMEM;
281 return error;
282 }
283 shmseg->shm_perm.mode |= SHMSEG_WIRED;
284
285 } else if (cmd == SHM_UNLOCK &&
286 (shmseg->shm_perm.mode & SHMSEG_WIRED) != 0) {
287 /* Unwire the object and map, then untag it */
288 uvm_obj_unwirepages(shmseg->_shm_internal, 0, size);
289 error = uvm_map_pageable(&p->p_vmspace->vm_map,
290 shmmap_se->va, shmmap_se->va + size, true, 0);
291 if (error)
292 return EIO;
293 shmseg->shm_perm.mode &= ~SHMSEG_WIRED;
294 }
295 }
296
297 return 0;
298 }
299
300 /*
301 * Unmap shared memory.
302 */
303 int
304 sys_shmdt(struct lwp *l, const struct sys_shmdt_args *uap, register_t *retval)
305 {
306 /* {
307 syscallarg(const void *) shmaddr;
308 } */
309 struct proc *p = l->l_proc;
310 struct shmmap_state *shmmap_s1, *shmmap_s;
311 struct shmmap_entry *shmmap_se;
312 struct uvm_object *uobj;
313 struct shmid_ds *shmseg;
314 size_t size;
315
316 mutex_enter(&shm_lock);
317 /* In case of reallocation, we will wait for completion */
318 while (__predict_false(shm_realloc_state))
319 cv_wait(&shm_realloc_cv, &shm_lock);
320
321 shmmap_s1 = (struct shmmap_state *)p->p_vmspace->vm_shm;
322 if (shmmap_s1 == NULL) {
323 mutex_exit(&shm_lock);
324 return EINVAL;
325 }
326
327 /* Find the map entry */
328 SLIST_FOREACH(shmmap_se, &shmmap_s1->entries, next)
329 if (shmmap_se->va == (vaddr_t)SCARG(uap, shmaddr))
330 break;
331 if (shmmap_se == NULL) {
332 mutex_exit(&shm_lock);
333 return EINVAL;
334 }
335
336 shmmap_s = shmmap_getprivate(p);
337 if (shmmap_s != shmmap_s1) {
338 /* Map has been copied, lookup entry in new map */
339 SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next)
340 if (shmmap_se->va == (vaddr_t)SCARG(uap, shmaddr))
341 break;
342 if (shmmap_se == NULL) {
343 mutex_exit(&shm_lock);
344 return EINVAL;
345 }
346 }
347
348 SHMPRINTF(("shmdt: vm %p: remove %d @%lx\n",
349 p->p_vmspace, shmmap_se->shmid, shmmap_se->va));
350
351 /* Delete the entry from shm map */
352 uobj = shm_delete_mapping(shmmap_s, shmmap_se);
353 shmseg = &shmsegs[IPCID_TO_IX(shmmap_se->shmid)];
354 size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
355 mutex_exit(&shm_lock);
356
357 uvm_deallocate(&p->p_vmspace->vm_map, shmmap_se->va, size);
358 if (uobj != NULL) {
359 uao_detach(uobj);
360 }
361 kmem_free(shmmap_se, sizeof(struct shmmap_entry));
362
363 return 0;
364 }
365
366 /*
367 * Map shared memory.
368 */
369 int
370 sys_shmat(struct lwp *l, const struct sys_shmat_args *uap, register_t *retval)
371 {
372 /* {
373 syscallarg(int) shmid;
374 syscallarg(const void *) shmaddr;
375 syscallarg(int) shmflg;
376 } */
377 int error, flags = 0;
378 struct proc *p = l->l_proc;
379 kauth_cred_t cred = l->l_cred;
380 struct shmid_ds *shmseg;
381 struct shmmap_state *shmmap_s;
382 struct shmmap_entry *shmmap_se;
383 struct uvm_object *uobj;
384 struct vmspace *vm;
385 vaddr_t attach_va;
386 vm_prot_t prot;
387 vsize_t size;
388
389 /* Allocate a new map entry and set it */
390 shmmap_se = kmem_alloc(sizeof(struct shmmap_entry), KM_SLEEP);
391 shmmap_se->shmid = SCARG(uap, shmid);
392
393 mutex_enter(&shm_lock);
394 /* In case of reallocation, we will wait for completion */
395 while (__predict_false(shm_realloc_state))
396 cv_wait(&shm_realloc_cv, &shm_lock);
397
398 shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid));
399 if (shmseg == NULL) {
400 error = EINVAL;
401 goto err;
402 }
403 error = ipcperm(cred, &shmseg->shm_perm,
404 (SCARG(uap, shmflg) & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
405 if (error)
406 goto err;
407
408 vm = p->p_vmspace;
409 shmmap_s = (struct shmmap_state *)vm->vm_shm;
410 if (shmmap_s && shmmap_s->nitems >= shminfo.shmseg) {
411 error = EMFILE;
412 goto err;
413 }
414
415 size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
416 prot = VM_PROT_READ;
417 if ((SCARG(uap, shmflg) & SHM_RDONLY) == 0)
418 prot |= VM_PROT_WRITE;
419 if (SCARG(uap, shmaddr)) {
420 flags |= UVM_FLAG_FIXED;
421 if (SCARG(uap, shmflg) & SHM_RND)
422 attach_va =
423 (vaddr_t)SCARG(uap, shmaddr) & ~(SHMLBA-1);
424 else if (((vaddr_t)SCARG(uap, shmaddr) & (SHMLBA-1)) == 0)
425 attach_va = (vaddr_t)SCARG(uap, shmaddr);
426 else {
427 error = EINVAL;
428 goto err;
429 }
430 } else {
431 /* This is just a hint to uvm_map() about where to put it. */
432 attach_va = p->p_emul->e_vm_default_addr(p,
433 (vaddr_t)vm->vm_daddr, size);
434 }
435
436 /*
437 * Create a map entry, add it to the list and increase the counters.
438 * The lock will be dropped before the mapping, disable reallocation.
439 */
440 shmmap_s = shmmap_getprivate(p);
441 SLIST_INSERT_HEAD(&shmmap_s->entries, shmmap_se, next);
442 shmmap_s->nitems++;
443 shmseg->shm_lpid = p->p_pid;
444 shmseg->shm_nattch++;
445 shm_realloc_disable++;
446 mutex_exit(&shm_lock);
447
448 /*
449 * Add a reference to the memory object, map it to the
450 * address space, and lock the memory, if needed.
451 */
452 uobj = shmseg->_shm_internal;
453 uao_reference(uobj);
454 error = uvm_map(&vm->vm_map, &attach_va, size, uobj, 0, 0,
455 UVM_MAPFLAG(prot, prot, UVM_INH_SHARE, UVM_ADV_RANDOM, flags));
456 if (error)
457 goto err_detach;
458 if (shm_use_phys || (shmseg->shm_perm.mode & SHMSEG_WIRED)) {
459 error = uvm_map_pageable(&vm->vm_map, attach_va,
460 attach_va + size, false, 0);
461 if (error) {
462 if (error == EFAULT)
463 error = ENOMEM;
464 uvm_deallocate(&vm->vm_map, attach_va, size);
465 goto err_detach;
466 }
467 }
468
469 /* Set the new address, and update the time */
470 mutex_enter(&shm_lock);
471 shmmap_se->va = attach_va;
472 shmseg->shm_atime = time_second;
473 shm_realloc_disable--;
474 retval[0] = attach_va;
475 SHMPRINTF(("shmat: vm %p: add %d @%lx\n",
476 p->p_vmspace, shmmap_se->shmid, attach_va));
477 err:
478 cv_broadcast(&shm_realloc_cv);
479 mutex_exit(&shm_lock);
480 if (error && shmmap_se) {
481 kmem_free(shmmap_se, sizeof(struct shmmap_entry));
482 }
483 return error;
484
485 err_detach:
486 uao_detach(uobj);
487 mutex_enter(&shm_lock);
488 uobj = shm_delete_mapping(shmmap_s, shmmap_se);
489 shm_realloc_disable--;
490 cv_broadcast(&shm_realloc_cv);
491 mutex_exit(&shm_lock);
492 if (uobj != NULL) {
493 uao_detach(uobj);
494 }
495 kmem_free(shmmap_se, sizeof(struct shmmap_entry));
496 return error;
497 }
498
499 /*
500 * Shared memory control operations.
501 */
502 int
503 sys___shmctl50(struct lwp *l, const struct sys___shmctl50_args *uap,
504 register_t *retval)
505 {
506 /* {
507 syscallarg(int) shmid;
508 syscallarg(int) cmd;
509 syscallarg(struct shmid_ds *) buf;
510 } */
511 struct shmid_ds shmbuf;
512 int cmd, error;
513
514 cmd = SCARG(uap, cmd);
515 if (cmd == IPC_SET) {
516 error = copyin(SCARG(uap, buf), &shmbuf, sizeof(shmbuf));
517 if (error)
518 return error;
519 }
520
521 error = shmctl1(l, SCARG(uap, shmid), cmd,
522 (cmd == IPC_SET || cmd == IPC_STAT) ? &shmbuf : NULL);
523
524 if (error == 0 && cmd == IPC_STAT)
525 error = copyout(&shmbuf, SCARG(uap, buf), sizeof(shmbuf));
526
527 return error;
528 }
529
530 int
531 shmctl1(struct lwp *l, int shmid, int cmd, struct shmid_ds *shmbuf)
532 {
533 struct uvm_object *uobj = NULL;
534 kauth_cred_t cred = l->l_cred;
535 struct shmid_ds *shmseg;
536 int error = 0;
537
538 mutex_enter(&shm_lock);
539 /* In case of reallocation, we will wait for completion */
540 while (__predict_false(shm_realloc_state))
541 cv_wait(&shm_realloc_cv, &shm_lock);
542
543 shmseg = shm_find_segment_by_shmid(shmid);
544 if (shmseg == NULL) {
545 mutex_exit(&shm_lock);
546 return EINVAL;
547 }
548
549 switch (cmd) {
550 case IPC_STAT:
551 if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_R)) != 0)
552 break;
553 memcpy(shmbuf, shmseg, sizeof(struct shmid_ds));
554 break;
555 case IPC_SET:
556 if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
557 break;
558 shmseg->shm_perm.uid = shmbuf->shm_perm.uid;
559 shmseg->shm_perm.gid = shmbuf->shm_perm.gid;
560 shmseg->shm_perm.mode =
561 (shmseg->shm_perm.mode & ~ACCESSPERMS) |
562 (shmbuf->shm_perm.mode & ACCESSPERMS);
563 shmseg->shm_ctime = time_second;
564 break;
565 case IPC_RMID:
566 if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
567 break;
568 shmseg->shm_perm._key = IPC_PRIVATE;
569 shmseg->shm_perm.mode |= SHMSEG_REMOVED;
570 if (shmseg->shm_nattch <= 0) {
571 uobj = shmseg->_shm_internal;
572 shm_free_segment(IPCID_TO_IX(shmid));
573 }
574 break;
575 case SHM_LOCK:
576 case SHM_UNLOCK:
577 if ((error = kauth_authorize_system(cred,
578 KAUTH_SYSTEM_SYSVIPC,
579 (cmd == SHM_LOCK) ? KAUTH_REQ_SYSTEM_SYSVIPC_SHM_LOCK :
580 KAUTH_REQ_SYSTEM_SYSVIPC_SHM_UNLOCK, NULL, NULL, NULL)) != 0)
581 break;
582 error = shm_memlock(l, shmseg, shmid, cmd);
583 break;
584 default:
585 error = EINVAL;
586 }
587
588 mutex_exit(&shm_lock);
589 if (uobj != NULL)
590 uao_detach(uobj);
591 return error;
592 }
593
594 /*
595 * Try to take an already existing segment.
596 * => must be called with shm_lock held;
597 * => called from one place, thus, inline;
598 */
599 static inline int
600 shmget_existing(struct lwp *l, const struct sys_shmget_args *uap, int mode,
601 register_t *retval)
602 {
603 struct shmid_ds *shmseg;
604 kauth_cred_t cred = l->l_cred;
605 int segnum, error;
606 again:
607 KASSERT(mutex_owned(&shm_lock));
608
609 /* Find segment by key */
610 for (segnum = 0; segnum < shminfo.shmmni; segnum++)
611 if ((shmsegs[segnum].shm_perm.mode & SHMSEG_ALLOCATED) &&
612 shmsegs[segnum].shm_perm._key == SCARG(uap, key))
613 break;
614 if (segnum == shminfo.shmmni) {
615 /* Not found */
616 return -1;
617 }
618
619 shmseg = &shmsegs[segnum];
620 if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
621 /*
622 * This segment is in the process of being allocated. Wait
623 * until it's done, and look the key up again (in case the
624 * allocation failed or it was freed).
625 */
626 shmseg->shm_perm.mode |= SHMSEG_WANTED;
627 error = cv_wait_sig(&shm_cv[segnum], &shm_lock);
628 if (error)
629 return error;
630 goto again;
631 }
632
633 /*
634 * First check the flags, to generate a useful error when a
635 * segment already exists.
636 */
637 if ((SCARG(uap, shmflg) & (IPC_CREAT | IPC_EXCL)) ==
638 (IPC_CREAT | IPC_EXCL))
639 return EEXIST;
640
641 /* Check the permission and segment size. */
642 error = ipcperm(cred, &shmseg->shm_perm, mode);
643 if (error)
644 return error;
645 if (SCARG(uap, size) && SCARG(uap, size) > shmseg->shm_segsz)
646 return EINVAL;
647
648 *retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
649 return 0;
650 }
651
652 int
653 sys_shmget(struct lwp *l, const struct sys_shmget_args *uap, register_t *retval)
654 {
655 /* {
656 syscallarg(key_t) key;
657 syscallarg(size_t) size;
658 syscallarg(int) shmflg;
659 } */
660 struct shmid_ds *shmseg;
661 kauth_cred_t cred = l->l_cred;
662 key_t key = SCARG(uap, key);
663 size_t size;
664 int error, mode, segnum;
665 bool lockmem;
666
667 mode = SCARG(uap, shmflg) & ACCESSPERMS;
668 if (SCARG(uap, shmflg) & _SHM_RMLINGER)
669 mode |= SHMSEG_RMLINGER;
670
671 SHMPRINTF(("shmget: key 0x%lx size 0x%zx shmflg 0x%x mode 0x%x\n",
672 SCARG(uap, key), SCARG(uap, size), SCARG(uap, shmflg), mode));
673
674 mutex_enter(&shm_lock);
675 /* In case of reallocation, we will wait for completion */
676 while (__predict_false(shm_realloc_state))
677 cv_wait(&shm_realloc_cv, &shm_lock);
678
679 if (key != IPC_PRIVATE) {
680 error = shmget_existing(l, uap, mode, retval);
681 if (error != -1) {
682 mutex_exit(&shm_lock);
683 return error;
684 }
685 if ((SCARG(uap, shmflg) & IPC_CREAT) == 0) {
686 mutex_exit(&shm_lock);
687 return ENOENT;
688 }
689 }
690 error = 0;
691
692 /*
693 * Check the for the limits.
694 */
695 size = SCARG(uap, size);
696 if (size < shminfo.shmmin || size > shminfo.shmmax) {
697 mutex_exit(&shm_lock);
698 return EINVAL;
699 }
700 if (shm_nused >= shminfo.shmmni) {
701 mutex_exit(&shm_lock);
702 return ENOSPC;
703 }
704 size = (size + PGOFSET) & ~PGOFSET;
705 if (shm_committed + btoc(size) > shminfo.shmall) {
706 mutex_exit(&shm_lock);
707 return ENOMEM;
708 }
709
710 /* Find the first available segment */
711 if (shm_last_free < 0) {
712 for (segnum = 0; segnum < shminfo.shmmni; segnum++)
713 if (shmsegs[segnum].shm_perm.mode & SHMSEG_FREE)
714 break;
715 KASSERT(segnum < shminfo.shmmni);
716 } else {
717 segnum = shm_last_free;
718 shm_last_free = -1;
719 }
720
721 /*
722 * Initialize the segment.
723 * We will drop the lock while allocating the memory, thus mark the
724 * segment present, but removed, that no other thread could take it.
725 * Also, disable reallocation, while lock is dropped.
726 */
727 shmseg = &shmsegs[segnum];
728 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
729 shm_committed += btoc(size);
730 shm_nused++;
731 lockmem = shm_use_phys;
732 shm_realloc_disable++;
733 mutex_exit(&shm_lock);
734
735 /* Allocate the memory object and lock it if needed */
736 shmseg->_shm_internal = uao_create(size, 0);
737 if (lockmem) {
738 /* Wire the pages and tag it */
739 error = uvm_obj_wirepages(shmseg->_shm_internal, 0, size, NULL);
740 if (error) {
741 uao_detach(shmseg->_shm_internal);
742 mutex_enter(&shm_lock);
743 shm_free_segment(segnum);
744 shm_realloc_disable--;
745 mutex_exit(&shm_lock);
746 return error;
747 }
748 }
749
750 /*
751 * Please note, while segment is marked, there are no need to hold the
752 * lock, while setting it (except shm_perm.mode).
753 */
754 shmseg->shm_perm._key = SCARG(uap, key);
755 shmseg->shm_perm._seq = (shmseg->shm_perm._seq + 1) & 0x7fff;
756 *retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
757
758 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = kauth_cred_geteuid(cred);
759 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = kauth_cred_getegid(cred);
760 shmseg->shm_segsz = SCARG(uap, size);
761 shmseg->shm_cpid = l->l_proc->p_pid;
762 shmseg->shm_lpid = shmseg->shm_nattch = 0;
763 shmseg->shm_atime = shmseg->shm_dtime = 0;
764 shmseg->shm_ctime = time_second;
765
766 /*
767 * Segment is initialized.
768 * Enter the lock, mark as allocated, and notify waiters (if any).
769 * Also, unmark the state of reallocation.
770 */
771 mutex_enter(&shm_lock);
772 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
773 (mode & (ACCESSPERMS | SHMSEG_RMLINGER)) |
774 SHMSEG_ALLOCATED | (lockmem ? SHMSEG_WIRED : 0);
775 if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
776 shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
777 cv_broadcast(&shm_cv[segnum]);
778 }
779 shm_realloc_disable--;
780 cv_broadcast(&shm_realloc_cv);
781 mutex_exit(&shm_lock);
782
783 return error;
784 }
785
786 void
787 shmfork(struct vmspace *vm1, struct vmspace *vm2)
788 {
789 struct shmmap_state *shmmap_s;
790 struct shmmap_entry *shmmap_se;
791
792 SHMPRINTF(("shmfork %p->%p\n", vm1, vm2));
793 mutex_enter(&shm_lock);
794 vm2->vm_shm = vm1->vm_shm;
795 if (vm1->vm_shm) {
796 shmmap_s = (struct shmmap_state *)vm1->vm_shm;
797 SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next)
798 shmsegs[IPCID_TO_IX(shmmap_se->shmid)].shm_nattch++;
799 shmmap_s->nrefs++;
800 }
801 mutex_exit(&shm_lock);
802 }
803
804 void
805 shmexit(struct vmspace *vm)
806 {
807 struct shmmap_state *shmmap_s;
808 struct shmmap_entry *shmmap_se;
809
810 mutex_enter(&shm_lock);
811 shmmap_s = (struct shmmap_state *)vm->vm_shm;
812 if (shmmap_s == NULL) {
813 mutex_exit(&shm_lock);
814 return;
815 }
816 vm->vm_shm = NULL;
817
818 if (--shmmap_s->nrefs > 0) {
819 SHMPRINTF(("shmexit: vm %p drop ref (%d entries), refs = %d\n",
820 vm, shmmap_s->nitems, shmmap_s->nrefs));
821 SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next) {
822 shmsegs[IPCID_TO_IX(shmmap_se->shmid)].shm_nattch--;
823 }
824 mutex_exit(&shm_lock);
825 return;
826 }
827
828 SHMPRINTF(("shmexit: vm %p cleanup (%d entries)\n", vm, shmmap_s->nitems));
829 if (shmmap_s->nitems == 0) {
830 mutex_exit(&shm_lock);
831 kmem_free(shmmap_s, sizeof(struct shmmap_state));
832 return;
833 }
834
835 /*
836 * Delete the entry from shm map.
837 */
838 for (;;) {
839 struct shmid_ds *shmseg;
840 struct uvm_object *uobj;
841 size_t sz;
842
843 shmmap_se = SLIST_FIRST(&shmmap_s->entries);
844 KASSERT(shmmap_se != NULL);
845
846 shmseg = &shmsegs[IPCID_TO_IX(shmmap_se->shmid)];
847 sz = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
848 /* shm_delete_mapping() removes from the list. */
849 uobj = shm_delete_mapping(shmmap_s, shmmap_se);
850 mutex_exit(&shm_lock);
851
852 uvm_deallocate(&vm->vm_map, shmmap_se->va, sz);
853 if (uobj != NULL) {
854 uao_detach(uobj);
855 }
856 kmem_free(shmmap_se, sizeof(struct shmmap_entry));
857
858 if (SLIST_EMPTY(&shmmap_s->entries)) {
859 break;
860 }
861 mutex_enter(&shm_lock);
862 KASSERT(!SLIST_EMPTY(&shmmap_s->entries));
863 }
864 kmem_free(shmmap_s, sizeof(struct shmmap_state));
865 }
866
867 static int
868 shmrealloc(int newshmni)
869 {
870 vaddr_t v;
871 struct shmid_ds *oldshmsegs, *newshmsegs;
872 kcondvar_t *newshm_cv, *oldshm_cv;
873 size_t sz;
874 int i, lsegid, oldshmni;
875
876 if (newshmni < 1)
877 return EINVAL;
878
879 /* Allocate new memory area */
880 sz = ALIGN(newshmni * sizeof(struct shmid_ds)) +
881 ALIGN(newshmni * sizeof(kcondvar_t));
882 sz = round_page(sz);
883 v = uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_WIRED|UVM_KMF_ZERO);
884 if (v == 0)
885 return ENOMEM;
886
887 mutex_enter(&shm_lock);
888 while (shm_realloc_state || shm_realloc_disable)
889 cv_wait(&shm_realloc_cv, &shm_lock);
890
891 /*
892 * Get the number of last segment. Fail we are trying to
893 * reallocate less memory than we use.
894 */
895 lsegid = 0;
896 for (i = 0; i < shminfo.shmmni; i++)
897 if ((shmsegs[i].shm_perm.mode & SHMSEG_FREE) == 0)
898 lsegid = i;
899 if (lsegid >= newshmni) {
900 mutex_exit(&shm_lock);
901 uvm_km_free(kernel_map, v, sz, UVM_KMF_WIRED);
902 return EBUSY;
903 }
904 shm_realloc_state = true;
905
906 newshmsegs = (void *)v;
907 newshm_cv = (void *)((uintptr_t)newshmsegs +
908 ALIGN(newshmni * sizeof(struct shmid_ds)));
909
910 /* Copy all memory to the new area */
911 for (i = 0; i < shm_nused; i++)
912 (void)memcpy(&newshmsegs[i], &shmsegs[i],
913 sizeof(newshmsegs[0]));
914
915 /* Mark as free all new segments, if there is any */
916 for (; i < newshmni; i++) {
917 cv_init(&newshm_cv[i], "shmwait");
918 newshmsegs[i].shm_perm.mode = SHMSEG_FREE;
919 newshmsegs[i].shm_perm._seq = 0;
920 }
921
922 oldshmsegs = shmsegs;
923 oldshmni = shminfo.shmmni;
924 shminfo.shmmni = newshmni;
925 shmsegs = newshmsegs;
926 shm_cv = newshm_cv;
927
928 /* Reallocation completed - notify all waiters, if any */
929 shm_realloc_state = false;
930 cv_broadcast(&shm_realloc_cv);
931 mutex_exit(&shm_lock);
932
933 /* Release now unused resources. */
934 oldshm_cv = (void *)((uintptr_t)oldshmsegs +
935 ALIGN(oldshmni * sizeof(struct shmid_ds)));
936 for (i = 0; i < oldshmni; i++)
937 cv_destroy(&oldshm_cv[i]);
938
939 sz = ALIGN(oldshmni * sizeof(struct shmid_ds)) +
940 ALIGN(oldshmni * sizeof(kcondvar_t));
941 sz = round_page(sz);
942 uvm_km_free(kernel_map, (vaddr_t)oldshmsegs, sz, UVM_KMF_WIRED);
943
944 return 0;
945 }
946
947 void
948 shminit(void)
949 {
950 vaddr_t v;
951 size_t sz;
952 int i;
953
954 mutex_init(&shm_lock, MUTEX_DEFAULT, IPL_NONE);
955 cv_init(&shm_realloc_cv, "shmrealc");
956
957 /* Allocate the wired memory for our structures */
958 sz = ALIGN(shminfo.shmmni * sizeof(struct shmid_ds)) +
959 ALIGN(shminfo.shmmni * sizeof(kcondvar_t));
960 sz = round_page(sz);
961 v = uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_WIRED|UVM_KMF_ZERO);
962 if (v == 0)
963 panic("sysv_shm: cannot allocate memory");
964 shmsegs = (void *)v;
965 shm_cv = (void *)((uintptr_t)shmsegs +
966 ALIGN(shminfo.shmmni * sizeof(struct shmid_ds)));
967
968 if (shminfo.shmmax == 0)
969 shminfo.shmmax = max(physmem / 4, 1024) * PAGE_SIZE;
970 else
971 shminfo.shmmax *= PAGE_SIZE;
972 shminfo.shmall = shminfo.shmmax / PAGE_SIZE;
973
974 for (i = 0; i < shminfo.shmmni; i++) {
975 cv_init(&shm_cv[i], "shmwait");
976 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
977 shmsegs[i].shm_perm._seq = 0;
978 }
979 shm_last_free = 0;
980 shm_nused = 0;
981 shm_committed = 0;
982 shm_realloc_disable = 0;
983 shm_realloc_state = false;
984
985 sysvipcinit();
986 }
987
988 static int
989 sysctl_ipc_shmmni(SYSCTLFN_ARGS)
990 {
991 int newsize, error;
992 struct sysctlnode node;
993 node = *rnode;
994 node.sysctl_data = &newsize;
995
996 newsize = shminfo.shmmni;
997 error = sysctl_lookup(SYSCTLFN_CALL(&node));
998 if (error || newp == NULL)
999 return error;
1000
1001 sysctl_unlock();
1002 error = shmrealloc(newsize);
1003 sysctl_relock();
1004 return error;
1005 }
1006
1007 static int
1008 sysctl_ipc_shmmaxpgs(SYSCTLFN_ARGS)
1009 {
1010 uint32_t newsize;
1011 int error;
1012 struct sysctlnode node;
1013 node = *rnode;
1014 node.sysctl_data = &newsize;
1015
1016 newsize = shminfo.shmall;
1017 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1018 if (error || newp == NULL)
1019 return error;
1020
1021 if (newsize < 1)
1022 return EINVAL;
1023
1024 shminfo.shmall = newsize;
1025 shminfo.shmmax = (uint64_t)shminfo.shmall * PAGE_SIZE;
1026
1027 return 0;
1028 }
1029
1030 static int
1031 sysctl_ipc_shmmax(SYSCTLFN_ARGS)
1032 {
1033 uint64_t newsize;
1034 int error;
1035 struct sysctlnode node;
1036 node = *rnode;
1037 node.sysctl_data = &newsize;
1038
1039 newsize = shminfo.shmmax;
1040 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1041 if (error || newp == NULL)
1042 return error;
1043
1044 if (newsize < PAGE_SIZE)
1045 return EINVAL;
1046
1047 shminfo.shmmax = round_page(newsize);
1048 shminfo.shmall = shminfo.shmmax >> PAGE_SHIFT;
1049
1050 return 0;
1051 }
1052
1053 SYSCTL_SETUP(sysctl_ipc_shm_setup, "sysctl kern.ipc subtree setup")
1054 {
1055
1056 sysctl_createv(clog, 0, NULL, NULL,
1057 CTLFLAG_PERMANENT,
1058 CTLTYPE_NODE, "kern", NULL,
1059 NULL, 0, NULL, 0,
1060 CTL_KERN, CTL_EOL);
1061 sysctl_createv(clog, 0, NULL, NULL,
1062 CTLFLAG_PERMANENT,
1063 CTLTYPE_NODE, "ipc",
1064 SYSCTL_DESCR("SysV IPC options"),
1065 NULL, 0, NULL, 0,
1066 CTL_KERN, KERN_SYSVIPC, CTL_EOL);
1067 sysctl_createv(clog, 0, NULL, NULL,
1068 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1069 CTLTYPE_QUAD, "shmmax",
1070 SYSCTL_DESCR("Max shared memory segment size in bytes"),
1071 sysctl_ipc_shmmax, 0, &shminfo.shmmax, 0,
1072 CTL_KERN, KERN_SYSVIPC, KERN_SYSVIPC_SHMMAX, CTL_EOL);
1073 sysctl_createv(clog, 0, NULL, NULL,
1074 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1075 CTLTYPE_INT, "shmmni",
1076 SYSCTL_DESCR("Max number of shared memory identifiers"),
1077 sysctl_ipc_shmmni, 0, &shminfo.shmmni, 0,
1078 CTL_KERN, KERN_SYSVIPC, KERN_SYSVIPC_SHMMNI, CTL_EOL);
1079 sysctl_createv(clog, 0, NULL, NULL,
1080 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1081 CTLTYPE_INT, "shmseg",
1082 SYSCTL_DESCR("Max shared memory segments per process"),
1083 NULL, 0, &shminfo.shmseg, 0,
1084 CTL_KERN, KERN_SYSVIPC, KERN_SYSVIPC_SHMSEG, CTL_EOL);
1085 sysctl_createv(clog, 0, NULL, NULL,
1086 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1087 CTLTYPE_INT, "shmmaxpgs",
1088 SYSCTL_DESCR("Max amount of shared memory in pages"),
1089 sysctl_ipc_shmmaxpgs, 0, &shminfo.shmall, 0,
1090 CTL_KERN, KERN_SYSVIPC, KERN_SYSVIPC_SHMMAXPGS, CTL_EOL);
1091 sysctl_createv(clog, 0, NULL, NULL,
1092 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1093 CTLTYPE_INT, "shm_use_phys",
1094 SYSCTL_DESCR("Enable/disable locking of shared memory in "
1095 "physical memory"), NULL, 0, &shm_use_phys, 0,
1096 CTL_KERN, KERN_SYSVIPC, KERN_SYSVIPC_SHMUSEPHYS, CTL_EOL);
1097 }
1098