sysv_shm.c revision 1.77 1 /* $NetBSD: sysv_shm.c,v 1.77 2004/04/25 16:42:41 simonb Exp $ */
2
3 /*-
4 * Copyright (c) 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Copyright (c) 1994 Adam Glass and Charles M. Hannum. All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by Adam Glass and Charles M.
54 * Hannum.
55 * 4. The names of the authors may not be used to endorse or promote products
56 * derived from this software without specific prior written permission.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
59 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
60 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
61 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
62 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
63 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
64 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
65 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
66 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
67 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
68 */
69
70 #include <sys/cdefs.h>
71 __KERNEL_RCSID(0, "$NetBSD: sysv_shm.c,v 1.77 2004/04/25 16:42:41 simonb Exp $");
72
73 #define SYSVSHM
74
75 #include <sys/param.h>
76 #include <sys/kernel.h>
77 #include <sys/shm.h>
78 #include <sys/malloc.h>
79 #include <sys/mman.h>
80 #include <sys/stat.h>
81 #include <sys/sysctl.h>
82 #include <sys/mount.h> /* XXX for <sys/syscallargs.h> */
83 #include <sys/sa.h>
84 #include <sys/syscallargs.h>
85 #include <sys/queue.h>
86 #include <sys/pool.h>
87
88 #include <uvm/uvm_extern.h>
89 #include <uvm/uvm_object.h>
90
91 struct shmid_ds *shm_find_segment_by_shmid(int, int);
92
93 static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments");
94
95 /*
96 * Provides the following externally accessible functions:
97 *
98 * shminit(void); initialization
99 * shmexit(struct vmspace *) cleanup
100 * shmfork(struct vmspace *, struct vmspace *) fork handling
101 *
102 * Structures:
103 * shmsegs (an array of 'struct shmid_ds')
104 * per proc array of 'struct shmmap_state'
105 */
106
107 #define SHMSEG_FREE 0x0200
108 #define SHMSEG_REMOVED 0x0400
109 #define SHMSEG_ALLOCATED 0x0800
110 #define SHMSEG_WANTED 0x1000
111
112 static int shm_last_free, shm_nused, shm_committed;
113 struct shmid_ds *shmsegs;
114
115 struct shm_handle {
116 struct uvm_object *shm_object;
117 };
118
119 struct shmmap_entry {
120 SLIST_ENTRY(shmmap_entry) next;
121 vaddr_t va;
122 int shmid;
123 };
124
125 static POOL_INIT(shmmap_entry_pool, sizeof(struct shmmap_entry), 0, 0, 0,
126 "shmmp", 0);
127
128 struct shmmap_state {
129 unsigned int nitems;
130 unsigned int nrefs;
131 SLIST_HEAD(, shmmap_entry) entries;
132 };
133
134 static int shm_find_segment_by_key(key_t);
135 static void shm_deallocate_segment(struct shmid_ds *);
136 static void shm_delete_mapping(struct vmspace *, struct shmmap_state *,
137 struct shmmap_entry *);
138 static int shmget_existing(struct proc *, struct sys_shmget_args *,
139 int, int, register_t *);
140 static int shmget_allocate_segment(struct proc *, struct sys_shmget_args *,
141 int, register_t *);
142 static struct shmmap_state *shmmap_getprivate(struct proc *);
143 static struct shmmap_entry *shm_find_mapping(struct shmmap_state *, vaddr_t);
144
145 static int
146 shm_find_segment_by_key(key)
147 key_t key;
148 {
149 int i;
150
151 for (i = 0; i < shminfo.shmmni; i++)
152 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
153 shmsegs[i].shm_perm._key == key)
154 return i;
155 return -1;
156 }
157
158 struct shmid_ds *
159 shm_find_segment_by_shmid(shmid, findremoved)
160 int shmid;
161 int findremoved;
162 {
163 int segnum;
164 struct shmid_ds *shmseg;
165
166 segnum = IPCID_TO_IX(shmid);
167 if (segnum < 0 || segnum >= shminfo.shmmni)
168 return NULL;
169 shmseg = &shmsegs[segnum];
170 if ((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) == 0)
171 return NULL;
172 if (!findremoved && ((shmseg->shm_perm.mode & SHMSEG_REMOVED) != 0))
173 return NULL;
174 if (shmseg->shm_perm._seq != IPCID_TO_SEQ(shmid))
175 return NULL;
176 return shmseg;
177 }
178
179 static void
180 shm_deallocate_segment(shmseg)
181 struct shmid_ds *shmseg;
182 {
183 struct shm_handle *shm_handle = shmseg->_shm_internal;
184 struct uvm_object *uobj = shm_handle->shm_object;
185 size_t size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
186
187 (*uobj->pgops->pgo_detach)(uobj);
188 free((caddr_t)shm_handle, M_SHM);
189 shmseg->_shm_internal = NULL;
190 shm_committed -= btoc(size);
191 shmseg->shm_perm.mode = SHMSEG_FREE;
192 shm_nused--;
193 }
194
195 static void
196 shm_delete_mapping(vm, shmmap_s, shmmap_se)
197 struct vmspace *vm;
198 struct shmmap_state *shmmap_s;
199 struct shmmap_entry *shmmap_se;
200 {
201 struct shmid_ds *shmseg;
202 int segnum;
203 size_t size;
204
205 segnum = IPCID_TO_IX(shmmap_se->shmid);
206 #ifdef DEBUG
207 if (segnum < 0 || segnum >= shminfo.shmmni)
208 panic("shm_delete_mapping: vmspace %p state %p entry %p - "
209 "entry segment ID bad (%d)",
210 vm, shmmap_s, shmmap_se, segnum);
211 #endif
212 shmseg = &shmsegs[segnum];
213 size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
214 uvm_deallocate(&vm->vm_map, shmmap_se->va, size);
215 SLIST_REMOVE(&shmmap_s->entries, shmmap_se, shmmap_entry, next);
216 shmmap_s->nitems--;
217 pool_put(&shmmap_entry_pool, shmmap_se);
218 shmseg->shm_dtime = time.tv_sec;
219 if ((--shmseg->shm_nattch <= 0) &&
220 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
221 shm_deallocate_segment(shmseg);
222 shm_last_free = segnum;
223 }
224 }
225
226 /*
227 * Get a non-shared shm map for that vmspace.
228 * 3 cases:
229 * - no shm map present: create a fresh one
230 * - a shm map with refcount=1, just used by ourselves: fine
231 * - a shared shm map: copy to a fresh one and adjust refcounts
232 */
233 static struct shmmap_state *
234 shmmap_getprivate(struct proc *p)
235 {
236 struct shmmap_state *oshmmap_s, *shmmap_s;
237 struct shmmap_entry *oshmmap_se, *shmmap_se;
238
239 oshmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
240 if (oshmmap_s && oshmmap_s->nrefs == 1)
241 return (oshmmap_s);
242
243 shmmap_s = malloc(sizeof(struct shmmap_state), M_SHM, M_WAITOK);
244 memset(shmmap_s, 0, sizeof(struct shmmap_state));
245 shmmap_s->nrefs = 1;
246 SLIST_INIT(&shmmap_s->entries);
247 p->p_vmspace->vm_shm = (caddr_t)shmmap_s;
248
249 if (!oshmmap_s)
250 return (shmmap_s);
251
252 #ifdef SHMDEBUG
253 printf("shmmap_getprivate: vm %p split (%d entries), was used by %d\n",
254 p->p_vmspace, oshmmap_s->nitems, oshmmap_s->nrefs);
255 #endif
256 SLIST_FOREACH(oshmmap_se, &oshmmap_s->entries, next) {
257 shmmap_se = pool_get(&shmmap_entry_pool, PR_WAITOK);
258 shmmap_se->va = oshmmap_se->va;
259 shmmap_se->shmid = oshmmap_se->shmid;
260 SLIST_INSERT_HEAD(&shmmap_s->entries, shmmap_se, next);
261 }
262 shmmap_s->nitems = oshmmap_s->nitems;
263 oshmmap_s->nrefs--;
264 return (shmmap_s);
265 }
266
267 static struct shmmap_entry *
268 shm_find_mapping(map, va)
269 struct shmmap_state *map;
270 vaddr_t va;
271 {
272 struct shmmap_entry *shmmap_se;
273
274 SLIST_FOREACH(shmmap_se, &map->entries, next) {
275 if (shmmap_se->va == va)
276 return shmmap_se;
277 }
278 return 0;
279 }
280
281 int
282 sys_shmdt(l, v, retval)
283 struct lwp *l;
284 void *v;
285 register_t *retval;
286 {
287 struct sys_shmdt_args /* {
288 syscallarg(const void *) shmaddr;
289 } */ *uap = v;
290 struct proc *p = l->l_proc;
291 struct shmmap_state *shmmap_s, *shmmap_s1;
292 struct shmmap_entry *shmmap_se;
293
294 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
295 if (shmmap_s == NULL)
296 return EINVAL;
297
298 shmmap_se = shm_find_mapping(shmmap_s, (vaddr_t)SCARG(uap, shmaddr));
299 if (!shmmap_se)
300 return EINVAL;
301
302 shmmap_s1 = shmmap_getprivate(p);
303 if (shmmap_s1 != shmmap_s) {
304 /* map has been copied, lookup entry in new map */
305 shmmap_se = shm_find_mapping(shmmap_s1,
306 (vaddr_t)SCARG(uap, shmaddr));
307 KASSERT(shmmap_se != NULL);
308 }
309 #ifdef SHMDEBUG
310 printf("shmdt: vm %p: remove %d @%lx\n",
311 p->p_vmspace, shmmap_se->shmid, shmmap_se->va);
312 #endif
313 shm_delete_mapping(p->p_vmspace, shmmap_s1, shmmap_se);
314 return 0;
315 }
316
317 int
318 sys_shmat(l, v, retval)
319 struct lwp *l;
320 void *v;
321 register_t *retval;
322 {
323 struct sys_shmat_args /* {
324 syscallarg(int) shmid;
325 syscallarg(const void *) shmaddr;
326 syscallarg(int) shmflg;
327 } */ *uap = v;
328 struct proc *p = l->l_proc;
329 vaddr_t attach_va;
330 int error;
331
332 error = shmat1(p, SCARG(uap, shmid), SCARG(uap, shmaddr),
333 SCARG(uap, shmflg), &attach_va, 0);
334 if (error != 0)
335 return error;
336 retval[0] = attach_va;
337 return 0;
338 }
339
340 int
341 shmat1(p, shmid, shmaddr, shmflg, attachp, findremoved)
342 struct proc *p;
343 int shmid;
344 const void *shmaddr;
345 int shmflg;
346 vaddr_t *attachp;
347 int findremoved;
348 {
349 int error, flags;
350 struct ucred *cred = p->p_ucred;
351 struct shmid_ds *shmseg;
352 struct shmmap_state *shmmap_s;
353 struct uvm_object *uobj;
354 vaddr_t attach_va;
355 vm_prot_t prot;
356 vsize_t size;
357 struct shmmap_entry *shmmap_se;
358
359 shmseg = shm_find_segment_by_shmid(shmid, findremoved);
360 if (shmseg == NULL)
361 return EINVAL;
362 error = ipcperm(cred, &shmseg->shm_perm,
363 (shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
364 if (error)
365 return error;
366
367 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
368 if (shmmap_s && shmmap_s->nitems >= shminfo.shmseg)
369 return EMFILE;
370
371 size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
372 prot = VM_PROT_READ;
373 if ((shmflg & SHM_RDONLY) == 0)
374 prot |= VM_PROT_WRITE;
375 flags = MAP_ANON | MAP_SHARED;
376 if (shmaddr) {
377 flags |= MAP_FIXED;
378 if (shmflg & SHM_RND)
379 attach_va =
380 (vaddr_t)shmaddr & ~(SHMLBA-1);
381 else if (((vaddr_t)shmaddr & (SHMLBA-1)) == 0)
382 attach_va = (vaddr_t)shmaddr;
383 else
384 return EINVAL;
385 } else {
386 /* This is just a hint to uvm_mmap() about where to put it. */
387 attach_va = VM_DEFAULT_ADDRESS(p->p_vmspace->vm_daddr, size);
388 }
389 uobj = ((struct shm_handle *)shmseg->_shm_internal)->shm_object;
390 (*uobj->pgops->pgo_reference)(uobj);
391 error = uvm_map(&p->p_vmspace->vm_map, &attach_va, size,
392 uobj, 0, 0,
393 UVM_MAPFLAG(prot, prot, UVM_INH_SHARE, UVM_ADV_RANDOM, 0));
394 if (error) {
395 (*uobj->pgops->pgo_detach)(uobj);
396 return error;
397 }
398 shmmap_se = pool_get(&shmmap_entry_pool, PR_WAITOK);
399 shmmap_se->va = attach_va;
400 shmmap_se->shmid = shmid;
401 shmmap_s = shmmap_getprivate(p);
402 #ifdef SHMDEBUG
403 printf("shmat: vm %p: add %d @%lx\n", p->p_vmspace, shmid, attach_va);
404 #endif
405 SLIST_INSERT_HEAD(&shmmap_s->entries, shmmap_se, next);
406 shmmap_s->nitems++;
407 shmseg->shm_lpid = p->p_pid;
408 shmseg->shm_atime = time.tv_sec;
409 shmseg->shm_nattch++;
410 *attachp = attach_va;
411 return 0;
412 }
413
414 int
415 sys___shmctl13(l, v, retval)
416 struct lwp *l;
417 void *v;
418 register_t *retval;
419 {
420 struct sys___shmctl13_args /* {
421 syscallarg(int) shmid;
422 syscallarg(int) cmd;
423 syscallarg(struct shmid_ds *) buf;
424 } */ *uap = v;
425 struct proc *p = l->l_proc;
426 struct shmid_ds shmbuf;
427 int cmd, error;
428
429 cmd = SCARG(uap, cmd);
430
431 if (cmd == IPC_SET) {
432 error = copyin(SCARG(uap, buf), &shmbuf, sizeof(shmbuf));
433 if (error)
434 return (error);
435 }
436
437 error = shmctl1(p, SCARG(uap, shmid), cmd,
438 (cmd == IPC_SET || cmd == IPC_STAT) ? &shmbuf : NULL);
439
440 if (error == 0 && cmd == IPC_STAT)
441 error = copyout(&shmbuf, SCARG(uap, buf), sizeof(shmbuf));
442
443 return (error);
444 }
445
446 int
447 shmctl1(p, shmid, cmd, shmbuf)
448 struct proc *p;
449 int shmid;
450 int cmd;
451 struct shmid_ds *shmbuf;
452 {
453 struct ucred *cred = p->p_ucred;
454 struct shmid_ds *shmseg;
455 int error = 0;
456
457 shmseg = shm_find_segment_by_shmid(shmid, 0);
458 if (shmseg == NULL)
459 return EINVAL;
460 switch (cmd) {
461 case IPC_STAT:
462 if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_R)) != 0)
463 return error;
464 memcpy(shmbuf, shmseg, sizeof(struct shmid_ds));
465 break;
466 case IPC_SET:
467 if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
468 return error;
469 shmseg->shm_perm.uid = shmbuf->shm_perm.uid;
470 shmseg->shm_perm.gid = shmbuf->shm_perm.gid;
471 shmseg->shm_perm.mode =
472 (shmseg->shm_perm.mode & ~ACCESSPERMS) |
473 (shmbuf->shm_perm.mode & ACCESSPERMS);
474 shmseg->shm_ctime = time.tv_sec;
475 break;
476 case IPC_RMID:
477 if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
478 return error;
479 shmseg->shm_perm._key = IPC_PRIVATE;
480 shmseg->shm_perm.mode |= SHMSEG_REMOVED;
481 if (shmseg->shm_nattch <= 0) {
482 shm_deallocate_segment(shmseg);
483 shm_last_free = IPCID_TO_IX(shmid);
484 }
485 break;
486 case SHM_LOCK:
487 case SHM_UNLOCK:
488 default:
489 return EINVAL;
490 }
491 return 0;
492 }
493
494 static int
495 shmget_existing(p, uap, mode, segnum, retval)
496 struct proc *p;
497 struct sys_shmget_args /* {
498 syscallarg(key_t) key;
499 syscallarg(size_t) size;
500 syscallarg(int) shmflg;
501 } */ *uap;
502 int mode;
503 int segnum;
504 register_t *retval;
505 {
506 struct shmid_ds *shmseg;
507 struct ucred *cred = p->p_ucred;
508 int error;
509
510 shmseg = &shmsegs[segnum];
511 if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
512 /*
513 * This segment is in the process of being allocated. Wait
514 * until it's done, and look the key up again (in case the
515 * allocation failed or it was freed).
516 */
517 shmseg->shm_perm.mode |= SHMSEG_WANTED;
518 error = tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0);
519 if (error)
520 return error;
521 return EAGAIN;
522 }
523 if ((error = ipcperm(cred, &shmseg->shm_perm, mode)) != 0)
524 return error;
525 if (SCARG(uap, size) && SCARG(uap, size) > shmseg->shm_segsz)
526 return EINVAL;
527 if ((SCARG(uap, shmflg) & (IPC_CREAT | IPC_EXCL)) ==
528 (IPC_CREAT | IPC_EXCL))
529 return EEXIST;
530 *retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
531 return 0;
532 }
533
534 static int
535 shmget_allocate_segment(p, uap, mode, retval)
536 struct proc *p;
537 struct sys_shmget_args /* {
538 syscallarg(key_t) key;
539 syscallarg(size_t) size;
540 syscallarg(int) shmflg;
541 } */ *uap;
542 int mode;
543 register_t *retval;
544 {
545 int i, segnum, shmid, size;
546 struct ucred *cred = p->p_ucred;
547 struct shmid_ds *shmseg;
548 struct shm_handle *shm_handle;
549 int error = 0;
550
551 if (SCARG(uap, size) < shminfo.shmmin ||
552 SCARG(uap, size) > shminfo.shmmax)
553 return EINVAL;
554 if (shm_nused >= shminfo.shmmni) /* any shmids left? */
555 return ENOSPC;
556 size = (SCARG(uap, size) + PGOFSET) & ~PGOFSET;
557 if (shm_committed + btoc(size) > shminfo.shmall)
558 return ENOMEM;
559 if (shm_last_free < 0) {
560 for (i = 0; i < shminfo.shmmni; i++)
561 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
562 break;
563 if (i == shminfo.shmmni)
564 panic("shmseg free count inconsistent");
565 segnum = i;
566 } else {
567 segnum = shm_last_free;
568 shm_last_free = -1;
569 }
570 shmseg = &shmsegs[segnum];
571 /*
572 * In case we sleep in malloc(), mark the segment present but deleted
573 * so that noone else tries to create the same key.
574 */
575 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
576 shmseg->shm_perm._key = SCARG(uap, key);
577 shmseg->shm_perm._seq = (shmseg->shm_perm._seq + 1) & 0x7fff;
578 shm_handle = (struct shm_handle *)
579 malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
580 shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
581
582 shm_handle->shm_object = uao_create(size, 0);
583
584 shmseg->_shm_internal = shm_handle;
585 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
586 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
587 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
588 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
589 shmseg->shm_segsz = SCARG(uap, size);
590 shmseg->shm_cpid = p->p_pid;
591 shmseg->shm_lpid = shmseg->shm_nattch = 0;
592 shmseg->shm_atime = shmseg->shm_dtime = 0;
593 shmseg->shm_ctime = time.tv_sec;
594 shm_committed += btoc(size);
595 shm_nused++;
596
597 *retval = shmid;
598 if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
599 /*
600 * Somebody else wanted this key while we were asleep. Wake
601 * them up now.
602 */
603 shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
604 wakeup((caddr_t)shmseg);
605 }
606 return error;
607 }
608
609 int
610 sys_shmget(l, v, retval)
611 struct lwp *l;
612 void *v;
613 register_t *retval;
614 {
615 struct sys_shmget_args /* {
616 syscallarg(key_t) key;
617 syscallarg(int) size;
618 syscallarg(int) shmflg;
619 } */ *uap = v;
620 struct proc *p = l->l_proc;
621 int segnum, mode, error;
622
623 mode = SCARG(uap, shmflg) & ACCESSPERMS;
624 if (SCARG(uap, key) != IPC_PRIVATE) {
625 again:
626 segnum = shm_find_segment_by_key(SCARG(uap, key));
627 if (segnum >= 0) {
628 error = shmget_existing(p, uap, mode, segnum, retval);
629 if (error == EAGAIN)
630 goto again;
631 return error;
632 }
633 if ((SCARG(uap, shmflg) & IPC_CREAT) == 0)
634 return ENOENT;
635 }
636 return shmget_allocate_segment(p, uap, mode, retval);
637 }
638
639 void
640 shmfork(vm1, vm2)
641 struct vmspace *vm1, *vm2;
642 {
643 struct shmmap_state *shmmap_s;
644 struct shmmap_entry *shmmap_se;
645
646 vm2->vm_shm = vm1->vm_shm;
647
648 if (vm1->vm_shm == NULL)
649 return;
650
651 #ifdef SHMDEBUG
652 printf("shmfork %p->%p\n", vm1, vm2);
653 #endif
654
655 shmmap_s = (struct shmmap_state *)vm1->vm_shm;
656
657 SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next)
658 shmsegs[IPCID_TO_IX(shmmap_se->shmid)].shm_nattch++;
659 shmmap_s->nrefs++;
660 }
661
662 void
663 shmexit(vm)
664 struct vmspace *vm;
665 {
666 struct shmmap_state *shmmap_s;
667 struct shmmap_entry *shmmap_se;
668
669 shmmap_s = (struct shmmap_state *)vm->vm_shm;
670 if (shmmap_s == NULL)
671 return;
672
673 vm->vm_shm = NULL;
674
675 if (--shmmap_s->nrefs > 0) {
676 #ifdef SHMDEBUG
677 printf("shmexit: vm %p drop ref (%d entries), now used by %d\n",
678 vm, shmmap_s->nitems, shmmap_s->nrefs);
679 #endif
680 SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next)
681 shmsegs[IPCID_TO_IX(shmmap_se->shmid)].shm_nattch--;
682 return;
683 }
684
685 #ifdef SHMDEBUG
686 printf("shmexit: vm %p cleanup (%d entries)\n", vm, shmmap_s->nitems);
687 #endif
688 while (!SLIST_EMPTY(&shmmap_s->entries)) {
689 shmmap_se = SLIST_FIRST(&shmmap_s->entries);
690 shm_delete_mapping(vm, shmmap_s, shmmap_se);
691 }
692 KASSERT(shmmap_s->nitems == 0);
693 free(shmmap_s, M_SHM);
694 }
695
696 void
697 shminit()
698 {
699 int i, sz;
700 vaddr_t v;
701
702 /* Allocate pageable memory for our structures */
703 sz = shminfo.shmmni * sizeof(struct shmid_ds);
704 if ((v = uvm_km_alloc(kernel_map, round_page(sz))) == 0)
705 panic("sysv_shm: cannot allocate memory");
706 shmsegs = (void *)v;
707
708 shminfo.shmmax *= PAGE_SIZE;
709
710 for (i = 0; i < shminfo.shmmni; i++) {
711 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
712 shmsegs[i].shm_perm._seq = 0;
713 }
714 shm_last_free = 0;
715 shm_nused = 0;
716 shm_committed = 0;
717 }
718