sysv_shm.c revision 1.87 1 /* $NetBSD: sysv_shm.c,v 1.87 2006/05/14 21:15:11 elad Exp $ */
2
3 /*-
4 * Copyright (c) 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Copyright (c) 1994 Adam Glass and Charles M. Hannum. All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by Adam Glass and Charles M.
54 * Hannum.
55 * 4. The names of the authors may not be used to endorse or promote products
56 * derived from this software without specific prior written permission.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
59 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
60 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
61 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
62 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
63 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
64 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
65 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
66 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
67 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
68 */
69
70 #include <sys/cdefs.h>
71 __KERNEL_RCSID(0, "$NetBSD: sysv_shm.c,v 1.87 2006/05/14 21:15:11 elad Exp $");
72
73 #define SYSVSHM
74
75 #include <sys/param.h>
76 #include <sys/kernel.h>
77 #include <sys/shm.h>
78 #include <sys/malloc.h>
79 #include <sys/mman.h>
80 #include <sys/stat.h>
81 #include <sys/sysctl.h>
82 #include <sys/mount.h> /* XXX for <sys/syscallargs.h> */
83 #include <sys/sa.h>
84 #include <sys/syscallargs.h>
85 #include <sys/queue.h>
86 #include <sys/pool.h>
87 #include <sys/kauth.h>
88
89 #include <uvm/uvm_extern.h>
90 #include <uvm/uvm_object.h>
91
92 static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments");
93
94 /*
95 * Provides the following externally accessible functions:
96 *
97 * shminit(void); initialization
98 * shmexit(struct vmspace *) cleanup
99 * shmfork(struct vmspace *, struct vmspace *) fork handling
100 *
101 * Structures:
102 * shmsegs (an array of 'struct shmid_ds')
103 * per proc array of 'struct shmmap_state'
104 */
105
106 int shm_nused;
107 struct shmid_ds *shmsegs;
108
109 struct shmmap_entry {
110 SLIST_ENTRY(shmmap_entry) next;
111 vaddr_t va;
112 int shmid;
113 };
114
115 static int shm_last_free, shm_committed;
116
117 static POOL_INIT(shmmap_entry_pool, sizeof(struct shmmap_entry), 0, 0, 0,
118 "shmmp", &pool_allocator_nointr);
119
120 struct shmmap_state {
121 unsigned int nitems;
122 unsigned int nrefs;
123 SLIST_HEAD(, shmmap_entry) entries;
124 };
125
126 static int shm_find_segment_by_key(key_t);
127 static void shm_deallocate_segment(struct shmid_ds *);
128 static void shm_delete_mapping(struct vmspace *, struct shmmap_state *,
129 struct shmmap_entry *);
130 static int shmget_existing(struct proc *, struct sys_shmget_args *,
131 int, int, register_t *);
132 static int shmget_allocate_segment(struct proc *, struct sys_shmget_args *,
133 int, register_t *);
134 static struct shmmap_state *shmmap_getprivate(struct proc *);
135 static struct shmmap_entry *shm_find_mapping(struct shmmap_state *, vaddr_t);
136
137 static int
138 shm_find_segment_by_key(key_t key)
139 {
140 int i;
141
142 for (i = 0; i < shminfo.shmmni; i++)
143 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
144 shmsegs[i].shm_perm._key == key)
145 return i;
146 return -1;
147 }
148
149 static struct shmid_ds *
150 shm_find_segment_by_shmid(int shmid)
151 {
152 int segnum;
153 struct shmid_ds *shmseg;
154
155 segnum = IPCID_TO_IX(shmid);
156 if (segnum < 0 || segnum >= shminfo.shmmni)
157 return NULL;
158 shmseg = &shmsegs[segnum];
159 if ((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) == 0)
160 return NULL;
161 if ((shmseg->shm_perm.mode & (SHMSEG_REMOVED|SHMSEG_RMLINGER)) == SHMSEG_REMOVED)
162 return NULL;
163 if (shmseg->shm_perm._seq != IPCID_TO_SEQ(shmid))
164 return NULL;
165 return shmseg;
166 }
167
168 static void
169 shm_deallocate_segment(struct shmid_ds *shmseg)
170 {
171 struct uvm_object *uobj = shmseg->_shm_internal;
172 size_t size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
173
174 #ifdef SHMDEBUG
175 printf("shm freeing key 0x%lx seq 0x%x\n",
176 shmseg->shm_perm._key, shmseg->shm_perm._seq);
177 #endif
178
179 (*uobj->pgops->pgo_detach)(uobj);
180 shmseg->_shm_internal = NULL;
181 shm_committed -= btoc(size);
182 shmseg->shm_perm.mode = SHMSEG_FREE;
183 shm_nused--;
184 }
185
186 static void
187 shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s,
188 struct shmmap_entry *shmmap_se)
189 {
190 struct shmid_ds *shmseg;
191 int segnum;
192 size_t size;
193
194 segnum = IPCID_TO_IX(shmmap_se->shmid);
195 #ifdef DEBUG
196 if (segnum < 0 || segnum >= shminfo.shmmni)
197 panic("shm_delete_mapping: vmspace %p state %p entry %p - "
198 "entry segment ID bad (%d)",
199 vm, shmmap_s, shmmap_se, segnum);
200 #endif
201 shmseg = &shmsegs[segnum];
202 size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
203 uvm_deallocate(&vm->vm_map, shmmap_se->va, size);
204 SLIST_REMOVE(&shmmap_s->entries, shmmap_se, shmmap_entry, next);
205 shmmap_s->nitems--;
206 pool_put(&shmmap_entry_pool, shmmap_se);
207 shmseg->shm_dtime = time.tv_sec;
208 if ((--shmseg->shm_nattch <= 0) &&
209 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
210 shm_deallocate_segment(shmseg);
211 shm_last_free = segnum;
212 }
213 }
214
215 /*
216 * Get a non-shared shm map for that vmspace.
217 * 3 cases:
218 * - no shm map present: create a fresh one
219 * - a shm map with refcount=1, just used by ourselves: fine
220 * - a shared shm map: copy to a fresh one and adjust refcounts
221 */
222 static struct shmmap_state *
223 shmmap_getprivate(struct proc *p)
224 {
225 struct shmmap_state *oshmmap_s, *shmmap_s;
226 struct shmmap_entry *oshmmap_se, *shmmap_se;
227
228 oshmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
229 if (oshmmap_s && oshmmap_s->nrefs == 1)
230 return (oshmmap_s);
231
232 shmmap_s = malloc(sizeof(struct shmmap_state), M_SHM, M_WAITOK);
233 memset(shmmap_s, 0, sizeof(struct shmmap_state));
234 shmmap_s->nrefs = 1;
235 SLIST_INIT(&shmmap_s->entries);
236 p->p_vmspace->vm_shm = (caddr_t)shmmap_s;
237
238 if (!oshmmap_s)
239 return (shmmap_s);
240
241 #ifdef SHMDEBUG
242 printf("shmmap_getprivate: vm %p split (%d entries), was used by %d\n",
243 p->p_vmspace, oshmmap_s->nitems, oshmmap_s->nrefs);
244 #endif
245 SLIST_FOREACH(oshmmap_se, &oshmmap_s->entries, next) {
246 shmmap_se = pool_get(&shmmap_entry_pool, PR_WAITOK);
247 shmmap_se->va = oshmmap_se->va;
248 shmmap_se->shmid = oshmmap_se->shmid;
249 SLIST_INSERT_HEAD(&shmmap_s->entries, shmmap_se, next);
250 }
251 shmmap_s->nitems = oshmmap_s->nitems;
252 oshmmap_s->nrefs--;
253 return (shmmap_s);
254 }
255
256 static struct shmmap_entry *
257 shm_find_mapping(struct shmmap_state *map, vaddr_t va)
258 {
259 struct shmmap_entry *shmmap_se;
260
261 SLIST_FOREACH(shmmap_se, &map->entries, next) {
262 if (shmmap_se->va == va)
263 return shmmap_se;
264 }
265 return 0;
266 }
267
268 int
269 sys_shmdt(struct lwp *l, void *v, register_t *retval)
270 {
271 struct sys_shmdt_args /* {
272 syscallarg(const void *) shmaddr;
273 } */ *uap = v;
274 struct proc *p = l->l_proc;
275 struct shmmap_state *shmmap_s, *shmmap_s1;
276 struct shmmap_entry *shmmap_se;
277
278 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
279 if (shmmap_s == NULL)
280 return EINVAL;
281
282 shmmap_se = shm_find_mapping(shmmap_s, (vaddr_t)SCARG(uap, shmaddr));
283 if (!shmmap_se)
284 return EINVAL;
285
286 shmmap_s1 = shmmap_getprivate(p);
287 if (shmmap_s1 != shmmap_s) {
288 /* map has been copied, lookup entry in new map */
289 shmmap_se = shm_find_mapping(shmmap_s1,
290 (vaddr_t)SCARG(uap, shmaddr));
291 KASSERT(shmmap_se != NULL);
292 }
293 #ifdef SHMDEBUG
294 printf("shmdt: vm %p: remove %d @%lx\n",
295 p->p_vmspace, shmmap_se->shmid, shmmap_se->va);
296 #endif
297 shm_delete_mapping(p->p_vmspace, shmmap_s1, shmmap_se);
298 return 0;
299 }
300
301 int
302 sys_shmat(struct lwp *l, void *v, register_t *retval)
303 {
304 struct sys_shmat_args /* {
305 syscallarg(int) shmid;
306 syscallarg(const void *) shmaddr;
307 syscallarg(int) shmflg;
308 } */ *uap = v;
309 int error, flags;
310 struct proc *p = l->l_proc;
311 kauth_cred_t cred = p->p_cred;
312 struct shmid_ds *shmseg;
313 struct shmmap_state *shmmap_s;
314 struct uvm_object *uobj;
315 vaddr_t attach_va;
316 vm_prot_t prot;
317 vsize_t size;
318 struct shmmap_entry *shmmap_se;
319
320 shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid));
321 if (shmseg == NULL)
322 return EINVAL;
323 error = ipcperm(cred, &shmseg->shm_perm,
324 (SCARG(uap, shmflg) & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
325 if (error)
326 return error;
327
328 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
329 if (shmmap_s && shmmap_s->nitems >= shminfo.shmseg)
330 return EMFILE;
331
332 size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
333 prot = VM_PROT_READ;
334 if ((SCARG(uap, shmflg) & SHM_RDONLY) == 0)
335 prot |= VM_PROT_WRITE;
336 flags = MAP_ANON | MAP_SHARED;
337 if (SCARG(uap, shmaddr)) {
338 flags |= MAP_FIXED;
339 if (SCARG(uap, shmflg) & SHM_RND)
340 attach_va =
341 (vaddr_t)SCARG(uap, shmaddr) & ~(SHMLBA-1);
342 else if (((vaddr_t)SCARG(uap, shmaddr) & (SHMLBA-1)) == 0)
343 attach_va = (vaddr_t)SCARG(uap, shmaddr);
344 else
345 return EINVAL;
346 } else {
347 /* This is just a hint to uvm_mmap() about where to put it. */
348 attach_va = p->p_emul->e_vm_default_addr(p,
349 (vaddr_t)p->p_vmspace->vm_daddr, size);
350 }
351 uobj = shmseg->_shm_internal;
352 (*uobj->pgops->pgo_reference)(uobj);
353 error = uvm_map(&p->p_vmspace->vm_map, &attach_va, size,
354 uobj, 0, 0,
355 UVM_MAPFLAG(prot, prot, UVM_INH_SHARE, UVM_ADV_RANDOM, 0));
356 if (error) {
357 (*uobj->pgops->pgo_detach)(uobj);
358 return error;
359 }
360 shmmap_se = pool_get(&shmmap_entry_pool, PR_WAITOK);
361 shmmap_se->va = attach_va;
362 shmmap_se->shmid = SCARG(uap, shmid);
363 shmmap_s = shmmap_getprivate(p);
364 #ifdef SHMDEBUG
365 printf("shmat: vm %p: add %d @%lx\n", p->p_vmspace, shmmap_se->shmid, attach_va);
366 #endif
367 SLIST_INSERT_HEAD(&shmmap_s->entries, shmmap_se, next);
368 shmmap_s->nitems++;
369 shmseg->shm_lpid = p->p_pid;
370 shmseg->shm_atime = time.tv_sec;
371 shmseg->shm_nattch++;
372
373 retval[0] = attach_va;
374 return 0;
375 }
376
377 int
378 sys___shmctl13(struct lwp *l, void *v, register_t *retval)
379 {
380 struct sys___shmctl13_args /* {
381 syscallarg(int) shmid;
382 syscallarg(int) cmd;
383 syscallarg(struct shmid_ds *) buf;
384 } */ *uap = v;
385 struct proc *p = l->l_proc;
386 struct shmid_ds shmbuf;
387 int cmd, error;
388
389 cmd = SCARG(uap, cmd);
390
391 if (cmd == IPC_SET) {
392 error = copyin(SCARG(uap, buf), &shmbuf, sizeof(shmbuf));
393 if (error)
394 return (error);
395 }
396
397 error = shmctl1(p, SCARG(uap, shmid), cmd,
398 (cmd == IPC_SET || cmd == IPC_STAT) ? &shmbuf : NULL);
399
400 if (error == 0 && cmd == IPC_STAT)
401 error = copyout(&shmbuf, SCARG(uap, buf), sizeof(shmbuf));
402
403 return (error);
404 }
405
406 int
407 shmctl1(struct proc *p, int shmid, int cmd, struct shmid_ds *shmbuf)
408 {
409 kauth_cred_t cred = p->p_cred;
410 struct shmid_ds *shmseg;
411 int error = 0;
412
413 shmseg = shm_find_segment_by_shmid(shmid);
414 if (shmseg == NULL)
415 return EINVAL;
416 switch (cmd) {
417 case IPC_STAT:
418 if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_R)) != 0)
419 return error;
420 memcpy(shmbuf, shmseg, sizeof(struct shmid_ds));
421 break;
422 case IPC_SET:
423 if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
424 return error;
425 shmseg->shm_perm.uid = shmbuf->shm_perm.uid;
426 shmseg->shm_perm.gid = shmbuf->shm_perm.gid;
427 shmseg->shm_perm.mode =
428 (shmseg->shm_perm.mode & ~ACCESSPERMS) |
429 (shmbuf->shm_perm.mode & ACCESSPERMS);
430 shmseg->shm_ctime = time.tv_sec;
431 break;
432 case IPC_RMID:
433 if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
434 return error;
435 shmseg->shm_perm._key = IPC_PRIVATE;
436 shmseg->shm_perm.mode |= SHMSEG_REMOVED;
437 if (shmseg->shm_nattch <= 0) {
438 shm_deallocate_segment(shmseg);
439 shm_last_free = IPCID_TO_IX(shmid);
440 }
441 break;
442 case SHM_LOCK:
443 case SHM_UNLOCK:
444 default:
445 return EINVAL;
446 }
447 return 0;
448 }
449
450 static int
451 shmget_existing(struct proc *p, struct sys_shmget_args *uap, int mode,
452 int segnum, register_t *retval)
453 {
454 struct shmid_ds *shmseg;
455 kauth_cred_t cred = p->p_cred;
456 int error;
457
458 shmseg = &shmsegs[segnum];
459 if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
460 /*
461 * This segment is in the process of being allocated. Wait
462 * until it's done, and look the key up again (in case the
463 * allocation failed or it was freed).
464 */
465 shmseg->shm_perm.mode |= SHMSEG_WANTED;
466 error = tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0);
467 if (error)
468 return error;
469 return EAGAIN;
470 }
471 if ((error = ipcperm(cred, &shmseg->shm_perm, mode)) != 0)
472 return error;
473 if (SCARG(uap, size) && SCARG(uap, size) > shmseg->shm_segsz)
474 return EINVAL;
475 if ((SCARG(uap, shmflg) & (IPC_CREAT | IPC_EXCL)) ==
476 (IPC_CREAT | IPC_EXCL))
477 return EEXIST;
478 *retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
479 return 0;
480 }
481
482 static int
483 shmget_allocate_segment(struct proc *p, struct sys_shmget_args *uap, int mode,
484 register_t *retval)
485 {
486 int i, segnum, shmid, size;
487 kauth_cred_t cred = p->p_cred;
488 struct shmid_ds *shmseg;
489 int error = 0;
490
491 if (SCARG(uap, size) < shminfo.shmmin ||
492 SCARG(uap, size) > shminfo.shmmax)
493 return EINVAL;
494 if (shm_nused >= shminfo.shmmni) /* any shmids left? */
495 return ENOSPC;
496 size = (SCARG(uap, size) + PGOFSET) & ~PGOFSET;
497 if (shm_committed + btoc(size) > shminfo.shmall)
498 return ENOMEM;
499 if (shm_last_free < 0) {
500 for (i = 0; i < shminfo.shmmni; i++)
501 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
502 break;
503 if (i == shminfo.shmmni)
504 panic("shmseg free count inconsistent");
505 segnum = i;
506 } else {
507 segnum = shm_last_free;
508 shm_last_free = -1;
509 }
510 shmseg = &shmsegs[segnum];
511 /*
512 * In case we sleep in malloc(), mark the segment present but deleted
513 * so that noone else tries to create the same key.
514 */
515 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
516 shmseg->shm_perm._key = SCARG(uap, key);
517 shmseg->shm_perm._seq = (shmseg->shm_perm._seq + 1) & 0x7fff;
518 shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
519
520 shmseg->_shm_internal = uao_create(size, 0);
521
522 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = kauth_cred_geteuid(cred);
523 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = kauth_cred_getegid(cred);
524 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
525 (mode & (ACCESSPERMS|SHMSEG_RMLINGER)) | SHMSEG_ALLOCATED;
526 shmseg->shm_segsz = SCARG(uap, size);
527 shmseg->shm_cpid = p->p_pid;
528 shmseg->shm_lpid = shmseg->shm_nattch = 0;
529 shmseg->shm_atime = shmseg->shm_dtime = 0;
530 shmseg->shm_ctime = time.tv_sec;
531 shm_committed += btoc(size);
532 shm_nused++;
533
534 *retval = shmid;
535 if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
536 /*
537 * Somebody else wanted this key while we were asleep. Wake
538 * them up now.
539 */
540 shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
541 wakeup((caddr_t)shmseg);
542 }
543 return error;
544 }
545
546 int
547 sys_shmget(struct lwp *l, void *v, register_t *retval)
548 {
549 struct sys_shmget_args /* {
550 syscallarg(key_t) key;
551 syscallarg(int) size;
552 syscallarg(int) shmflg;
553 } */ *uap = v;
554 struct proc *p = l->l_proc;
555 int segnum, mode, error;
556
557 mode = SCARG(uap, shmflg) & ACCESSPERMS;
558 if (SCARG(uap, shmflg) & _SHM_RMLINGER)
559 mode |= SHMSEG_RMLINGER;
560
561 #ifdef SHMDEBUG
562 printf("shmget: key 0x%lx size 0x%x shmflg 0x%x mode 0x%x\n",
563 SCARG(uap, key), SCARG(uap, size), SCARG(uap, shmflg), mode);
564 #endif
565
566 if (SCARG(uap, key) != IPC_PRIVATE) {
567 again:
568 segnum = shm_find_segment_by_key(SCARG(uap, key));
569 if (segnum >= 0) {
570 error = shmget_existing(p, uap, mode, segnum, retval);
571 if (error == EAGAIN)
572 goto again;
573 return error;
574 }
575 if ((SCARG(uap, shmflg) & IPC_CREAT) == 0)
576 return ENOENT;
577 }
578 return shmget_allocate_segment(p, uap, mode, retval);
579 }
580
581 void
582 shmfork(struct vmspace *vm1, struct vmspace *vm2)
583 {
584 struct shmmap_state *shmmap_s;
585 struct shmmap_entry *shmmap_se;
586
587 vm2->vm_shm = vm1->vm_shm;
588
589 if (vm1->vm_shm == NULL)
590 return;
591
592 #ifdef SHMDEBUG
593 printf("shmfork %p->%p\n", vm1, vm2);
594 #endif
595
596 shmmap_s = (struct shmmap_state *)vm1->vm_shm;
597
598 SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next)
599 shmsegs[IPCID_TO_IX(shmmap_se->shmid)].shm_nattch++;
600 shmmap_s->nrefs++;
601 }
602
603 void
604 shmexit(struct vmspace *vm)
605 {
606 struct shmmap_state *shmmap_s;
607 struct shmmap_entry *shmmap_se;
608
609 shmmap_s = (struct shmmap_state *)vm->vm_shm;
610 if (shmmap_s == NULL)
611 return;
612
613 vm->vm_shm = NULL;
614
615 if (--shmmap_s->nrefs > 0) {
616 #ifdef SHMDEBUG
617 printf("shmexit: vm %p drop ref (%d entries), now used by %d\n",
618 vm, shmmap_s->nitems, shmmap_s->nrefs);
619 #endif
620 SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next)
621 shmsegs[IPCID_TO_IX(shmmap_se->shmid)].shm_nattch--;
622 return;
623 }
624
625 #ifdef SHMDEBUG
626 printf("shmexit: vm %p cleanup (%d entries)\n", vm, shmmap_s->nitems);
627 #endif
628 while (!SLIST_EMPTY(&shmmap_s->entries)) {
629 shmmap_se = SLIST_FIRST(&shmmap_s->entries);
630 shm_delete_mapping(vm, shmmap_s, shmmap_se);
631 }
632 KASSERT(shmmap_s->nitems == 0);
633 free(shmmap_s, M_SHM);
634 }
635
636 void
637 shminit(void)
638 {
639 int i, sz;
640 vaddr_t v;
641
642 /* Allocate pageable memory for our structures */
643 sz = shminfo.shmmni * sizeof(struct shmid_ds);
644 v = uvm_km_alloc(kernel_map, round_page(sz), 0, UVM_KMF_WIRED);
645 if (v == 0)
646 panic("sysv_shm: cannot allocate memory");
647 shmsegs = (void *)v;
648
649 shminfo.shmmax *= PAGE_SIZE;
650
651 for (i = 0; i < shminfo.shmmni; i++) {
652 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
653 shmsegs[i].shm_perm._seq = 0;
654 }
655 shm_last_free = 0;
656 shm_nused = 0;
657 shm_committed = 0;
658 }
659