sysv_shm.c revision 1.70 1 /* $NetBSD: sysv_shm.c,v 1.70 2003/09/10 17:01:04 drochner Exp $ */
2
3 /*-
4 * Copyright (c) 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Copyright (c) 1994 Adam Glass and Charles M. Hannum. All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by Adam Glass and Charles M.
54 * Hannum.
55 * 4. The names of the authors may not be used to endorse or promote products
56 * derived from this software without specific prior written permission.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
59 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
60 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
61 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
62 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
63 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
64 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
65 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
66 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
67 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
68 */
69
70 #include <sys/cdefs.h>
71 __KERNEL_RCSID(0, "$NetBSD: sysv_shm.c,v 1.70 2003/09/10 17:01:04 drochner Exp $");
72
73 #define SYSVSHM
74
75 #include <sys/param.h>
76 #include <sys/kernel.h>
77 #include <sys/shm.h>
78 #include <sys/malloc.h>
79 #include <sys/mman.h>
80 #include <sys/stat.h>
81 #include <sys/sysctl.h>
82 #include <sys/mount.h> /* XXX for <sys/syscallargs.h> */
83 #include <sys/sa.h>
84 #include <sys/syscallargs.h>
85 #include <sys/queue.h>
86 #include <sys/pool.h>
87
88 #include <uvm/uvm_extern.h>
89
90 struct shmid_ds *shm_find_segment_by_shmid __P((int, int));
91
92 MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments");
93
94 /*
95 * Provides the following externally accessible functions:
96 *
97 * shminit(void); initialization
98 * shmexit(struct vmspace *) cleanup
99 * shmfork(struct vmspace *, struct vmspace *) fork handling
100 *
101 * Structures:
102 * shmsegs (an array of 'struct shmid_ds')
103 * per proc array of 'struct shmmap_state'
104 */
105
106 #define SHMSEG_FREE 0x0200
107 #define SHMSEG_REMOVED 0x0400
108 #define SHMSEG_ALLOCATED 0x0800
109 #define SHMSEG_WANTED 0x1000
110
111 int shm_last_free, shm_nused, shm_committed;
112 struct shmid_ds *shmsegs;
113
114 struct shm_handle {
115 struct uvm_object *shm_object;
116 };
117
118 struct shmmap_entry {
119 SLIST_ENTRY(shmmap_entry) next;
120 vaddr_t va;
121 int shmid;
122 };
123
124 struct pool shmmap_entry_pool;
125
126 struct shmmap_state {
127 unsigned int nitems;
128 unsigned int nrefs;
129 SLIST_HEAD(, shmmap_entry) entries;
130 };
131
132 static int shm_find_segment_by_key __P((key_t));
133 static void shm_deallocate_segment __P((struct shmid_ds *));
134 static void shm_delete_mapping __P((struct vmspace *, struct shmmap_state *,
135 struct shmmap_entry *));
136 static int shmget_existing __P((struct proc *, struct sys_shmget_args *,
137 int, int, register_t *));
138 static int shmget_allocate_segment __P((struct proc *, struct sys_shmget_args *,
139 int, register_t *));
140 static struct shmmap_state *shmmap_getprivate __P((struct proc *));
141 static struct shmmap_entry *
142 shm_find_mapping __P((struct shmmap_state *, vaddr_t));
143
144 static int
145 shm_find_segment_by_key(key)
146 key_t key;
147 {
148 int i;
149
150 for (i = 0; i < shminfo.shmmni; i++)
151 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
152 shmsegs[i].shm_perm._key == key)
153 return i;
154 return -1;
155 }
156
157 struct shmid_ds *
158 shm_find_segment_by_shmid(shmid, findremoved)
159 int shmid;
160 int findremoved;
161 {
162 int segnum;
163 struct shmid_ds *shmseg;
164
165 segnum = IPCID_TO_IX(shmid);
166 if (segnum < 0 || segnum >= shminfo.shmmni)
167 return NULL;
168 shmseg = &shmsegs[segnum];
169 if ((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) == 0)
170 return NULL;
171 if (!findremoved && ((shmseg->shm_perm.mode & SHMSEG_REMOVED) != 0))
172 return NULL;
173 if (shmseg->shm_perm._seq != IPCID_TO_SEQ(shmid))
174 return NULL;
175 return shmseg;
176 }
177
178 static void
179 shm_deallocate_segment(shmseg)
180 struct shmid_ds *shmseg;
181 {
182 struct shm_handle *shm_handle;
183 size_t size;
184
185 shm_handle = shmseg->_shm_internal;
186 size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
187 uao_detach(shm_handle->shm_object);
188 free((caddr_t)shm_handle, M_SHM);
189 shmseg->_shm_internal = NULL;
190 shm_committed -= btoc(size);
191 shmseg->shm_perm.mode = SHMSEG_FREE;
192 shm_nused--;
193 }
194
195 static void
196 shm_delete_mapping(vm, shmmap_s, shmmap_se)
197 struct vmspace *vm;
198 struct shmmap_state *shmmap_s;
199 struct shmmap_entry *shmmap_se;
200 {
201 struct shmid_ds *shmseg;
202 int segnum;
203 size_t size;
204
205 segnum = IPCID_TO_IX(shmmap_se->shmid);
206 shmseg = &shmsegs[segnum];
207 size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
208 uvm_deallocate(&vm->vm_map, shmmap_se->va, size);
209 SLIST_REMOVE(&shmmap_s->entries, shmmap_se, shmmap_entry, next);
210 shmmap_s->nitems--;
211 pool_put(&shmmap_entry_pool, shmmap_se);
212 shmseg->shm_dtime = time.tv_sec;
213 if ((--shmseg->shm_nattch <= 0) &&
214 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
215 shm_deallocate_segment(shmseg);
216 shm_last_free = segnum;
217 }
218 }
219
220 /*
221 * Get a non-shared shm map for that vmspace.
222 * 3 cases:
223 * - no shm map present: create a fresh one
224 * - a shm map with refcount=1, just used by ourselves: fine
225 * - a shared shm map: copy to a fresh one and adjust refcounts
226 */
227 static struct shmmap_state *
228 shmmap_getprivate(struct proc *p)
229 {
230 struct shmmap_state *oshmmap_s, *shmmap_s;
231 struct shmmap_entry *oshmmap_se, *shmmap_se;
232
233 oshmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
234 if (oshmmap_s && oshmmap_s->nrefs == 1)
235 return (oshmmap_s);
236
237 shmmap_s = malloc(sizeof(struct shmmap_state), M_SHM, M_WAITOK);
238 memset(shmmap_s, 0, sizeof(struct shmmap_state));
239 shmmap_s->nrefs = 1;
240 SLIST_INIT(&shmmap_s->entries);
241 p->p_vmspace->vm_shm = (caddr_t)shmmap_s;
242
243 if (!oshmmap_s)
244 return (shmmap_s);
245
246 #ifdef SHMDEBUG
247 printf("shmmap_getprivate: vm %p split (%d entries), was used by %d\n",
248 p->p_vmspace, oshmmap_s->nitems, oshmmap_s->nrefs);
249 #endif
250 SLIST_FOREACH(oshmmap_se, &oshmmap_s->entries, next) {
251 shmmap_se = pool_get(&shmmap_entry_pool, PR_WAITOK);
252 shmmap_se->va = oshmmap_se->va;
253 shmmap_se->shmid = oshmmap_se->shmid;
254 SLIST_INSERT_HEAD(&shmmap_s->entries, shmmap_se, next);
255 }
256 shmmap_s->nitems = oshmmap_s->nitems;
257 oshmmap_s->nrefs--;
258 return (shmmap_s);
259 }
260
261 static struct shmmap_entry *
262 shm_find_mapping(map, va)
263 struct shmmap_state *map;
264 vaddr_t va;
265 {
266 struct shmmap_entry *shmmap_se;
267
268 SLIST_FOREACH(shmmap_se, &map->entries, next) {
269 if (shmmap_se->va == va)
270 return shmmap_se;
271 }
272 return 0;
273 }
274
275 int
276 sys_shmdt(l, v, retval)
277 struct lwp *l;
278 void *v;
279 register_t *retval;
280 {
281 struct sys_shmdt_args /* {
282 syscallarg(const void *) shmaddr;
283 } */ *uap = v;
284 struct proc *p = l->l_proc;
285 struct shmmap_state *shmmap_s, *shmmap_s1;
286 struct shmmap_entry *shmmap_se;
287
288 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
289 if (shmmap_s == NULL)
290 return EINVAL;
291
292 shmmap_se = shm_find_mapping(shmmap_s, (vaddr_t)SCARG(uap, shmaddr));
293 if (!shmmap_se)
294 return EINVAL;
295
296 shmmap_s1 = shmmap_getprivate(p);
297 if (shmmap_s1 != shmmap_s) {
298 /* map has been copied, lookup entry in new map */
299 shmmap_se = shm_find_mapping(shmmap_s1,
300 (vaddr_t)SCARG(uap, shmaddr));
301 KASSERT(shmmap_se != NULL);
302 }
303 #ifdef SHMDEBUG
304 printf("shmdt: vm %p: remove %d @%lx\n",
305 p->p_vmspace, shmmap_se->shmid, shmmap_se->va);
306 #endif
307 shm_delete_mapping(p->p_vmspace, shmmap_s1, shmmap_se);
308 return 0;
309 }
310
311 int
312 sys_shmat(l, v, retval)
313 struct lwp *l;
314 void *v;
315 register_t *retval;
316 {
317 struct sys_shmat_args /* {
318 syscallarg(int) shmid;
319 syscallarg(const void *) shmaddr;
320 syscallarg(int) shmflg;
321 } */ *uap = v;
322 struct proc *p = l->l_proc;
323 vaddr_t attach_va;
324 int error;
325
326 error = shmat1(p, SCARG(uap, shmid), SCARG(uap, shmaddr),
327 SCARG(uap, shmflg), &attach_va, 0);
328 if (error != 0)
329 return error;
330 retval[0] = attach_va;
331 return 0;
332 }
333
334 int
335 shmat1(p, shmid, shmaddr, shmflg, attachp, findremoved)
336 struct proc *p;
337 int shmid;
338 const void *shmaddr;
339 int shmflg;
340 vaddr_t *attachp;
341 int findremoved;
342 {
343 int error, flags;
344 struct ucred *cred = p->p_ucred;
345 struct shmid_ds *shmseg;
346 struct shmmap_state *shmmap_s;
347 struct shm_handle *shm_handle;
348 vaddr_t attach_va;
349 vm_prot_t prot;
350 vsize_t size;
351 struct shmmap_entry *shmmap_se;
352
353 shmseg = shm_find_segment_by_shmid(shmid, findremoved);
354 if (shmseg == NULL)
355 return EINVAL;
356 error = ipcperm(cred, &shmseg->shm_perm,
357 (shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
358 if (error)
359 return error;
360
361 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
362 if (shmmap_s && shmmap_s->nitems >= shminfo.shmseg)
363 return EMFILE;
364
365 size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
366 prot = VM_PROT_READ;
367 if ((shmflg & SHM_RDONLY) == 0)
368 prot |= VM_PROT_WRITE;
369 flags = MAP_ANON | MAP_SHARED;
370 if (shmaddr) {
371 flags |= MAP_FIXED;
372 if (shmflg & SHM_RND)
373 attach_va =
374 (vaddr_t)shmaddr & ~(SHMLBA-1);
375 else if (((vaddr_t)shmaddr & (SHMLBA-1)) == 0)
376 attach_va = (vaddr_t)shmaddr;
377 else
378 return EINVAL;
379 } else {
380 /* This is just a hint to uvm_mmap() about where to put it. */
381 attach_va = VM_DEFAULT_ADDRESS(p->p_vmspace->vm_daddr, size);
382 }
383 shm_handle = shmseg->_shm_internal;
384 uao_reference(shm_handle->shm_object);
385 error = uvm_map(&p->p_vmspace->vm_map, &attach_va, size,
386 shm_handle->shm_object, 0, 0,
387 UVM_MAPFLAG(prot, prot, UVM_INH_SHARE, UVM_ADV_RANDOM, 0));
388 if (error) {
389 return error;
390 }
391 shmmap_se = pool_get(&shmmap_entry_pool, PR_WAITOK);
392 shmmap_se->va = attach_va;
393 shmmap_se->shmid = shmid;
394 shmmap_s = shmmap_getprivate(p);
395 #ifdef SHMDEBUG
396 printf("shmat: vm %p: add %d @%lx\n", p->p_vmspace, shmid, attach_va);
397 #endif
398 SLIST_INSERT_HEAD(&shmmap_s->entries, shmmap_se, next);
399 shmmap_s->nitems++;
400 shmseg->shm_lpid = p->p_pid;
401 shmseg->shm_atime = time.tv_sec;
402 shmseg->shm_nattch++;
403 *attachp = attach_va;
404 return 0;
405 }
406
407 int
408 sys___shmctl13(l, v, retval)
409 struct lwp *l;
410 void *v;
411 register_t *retval;
412 {
413 struct sys___shmctl13_args /* {
414 syscallarg(int) shmid;
415 syscallarg(int) cmd;
416 syscallarg(struct shmid_ds *) buf;
417 } */ *uap = v;
418 struct proc *p = l->l_proc;
419 struct shmid_ds shmbuf;
420 int cmd, error;
421
422 cmd = SCARG(uap, cmd);
423
424 if (cmd == IPC_SET) {
425 error = copyin(SCARG(uap, buf), &shmbuf, sizeof(shmbuf));
426 if (error)
427 return (error);
428 }
429
430 error = shmctl1(p, SCARG(uap, shmid), cmd,
431 (cmd == IPC_SET || cmd == IPC_STAT) ? &shmbuf : NULL);
432
433 if (error == 0 && cmd == IPC_STAT)
434 error = copyout(&shmbuf, SCARG(uap, buf), sizeof(shmbuf));
435
436 return (error);
437 }
438
439 int
440 shmctl1(p, shmid, cmd, shmbuf)
441 struct proc *p;
442 int shmid;
443 int cmd;
444 struct shmid_ds *shmbuf;
445 {
446 struct ucred *cred = p->p_ucred;
447 struct shmid_ds *shmseg;
448 int error = 0;
449
450 shmseg = shm_find_segment_by_shmid(shmid, 0);
451 if (shmseg == NULL)
452 return EINVAL;
453 switch (cmd) {
454 case IPC_STAT:
455 if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_R)) != 0)
456 return error;
457 memcpy(shmbuf, shmseg, sizeof(struct shmid_ds));
458 break;
459 case IPC_SET:
460 if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
461 return error;
462 shmseg->shm_perm.uid = shmbuf->shm_perm.uid;
463 shmseg->shm_perm.gid = shmbuf->shm_perm.gid;
464 shmseg->shm_perm.mode =
465 (shmseg->shm_perm.mode & ~ACCESSPERMS) |
466 (shmbuf->shm_perm.mode & ACCESSPERMS);
467 shmseg->shm_ctime = time.tv_sec;
468 break;
469 case IPC_RMID:
470 if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
471 return error;
472 shmseg->shm_perm._key = IPC_PRIVATE;
473 shmseg->shm_perm.mode |= SHMSEG_REMOVED;
474 if (shmseg->shm_nattch <= 0) {
475 shm_deallocate_segment(shmseg);
476 shm_last_free = IPCID_TO_IX(shmid);
477 }
478 break;
479 case SHM_LOCK:
480 case SHM_UNLOCK:
481 default:
482 return EINVAL;
483 }
484 return 0;
485 }
486
487 static int
488 shmget_existing(p, uap, mode, segnum, retval)
489 struct proc *p;
490 struct sys_shmget_args /* {
491 syscallarg(key_t) key;
492 syscallarg(size_t) size;
493 syscallarg(int) shmflg;
494 } */ *uap;
495 int mode;
496 int segnum;
497 register_t *retval;
498 {
499 struct shmid_ds *shmseg;
500 struct ucred *cred = p->p_ucred;
501 int error;
502
503 shmseg = &shmsegs[segnum];
504 if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
505 /*
506 * This segment is in the process of being allocated. Wait
507 * until it's done, and look the key up again (in case the
508 * allocation failed or it was freed).
509 */
510 shmseg->shm_perm.mode |= SHMSEG_WANTED;
511 error = tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0);
512 if (error)
513 return error;
514 return EAGAIN;
515 }
516 if ((error = ipcperm(cred, &shmseg->shm_perm, mode)) != 0)
517 return error;
518 if (SCARG(uap, size) && SCARG(uap, size) > shmseg->shm_segsz)
519 return EINVAL;
520 if ((SCARG(uap, shmflg) & (IPC_CREAT | IPC_EXCL)) ==
521 (IPC_CREAT | IPC_EXCL))
522 return EEXIST;
523 *retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
524 return 0;
525 }
526
527 static int
528 shmget_allocate_segment(p, uap, mode, retval)
529 struct proc *p;
530 struct sys_shmget_args /* {
531 syscallarg(key_t) key;
532 syscallarg(size_t) size;
533 syscallarg(int) shmflg;
534 } */ *uap;
535 int mode;
536 register_t *retval;
537 {
538 int i, segnum, shmid, size;
539 struct ucred *cred = p->p_ucred;
540 struct shmid_ds *shmseg;
541 struct shm_handle *shm_handle;
542 int error = 0;
543
544 if (SCARG(uap, size) < shminfo.shmmin ||
545 SCARG(uap, size) > shminfo.shmmax)
546 return EINVAL;
547 if (shm_nused >= shminfo.shmmni) /* any shmids left? */
548 return ENOSPC;
549 size = (SCARG(uap, size) + PGOFSET) & ~PGOFSET;
550 if (shm_committed + btoc(size) > shminfo.shmall)
551 return ENOMEM;
552 if (shm_last_free < 0) {
553 for (i = 0; i < shminfo.shmmni; i++)
554 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
555 break;
556 if (i == shminfo.shmmni)
557 panic("shmseg free count inconsistent");
558 segnum = i;
559 } else {
560 segnum = shm_last_free;
561 shm_last_free = -1;
562 }
563 shmseg = &shmsegs[segnum];
564 /*
565 * In case we sleep in malloc(), mark the segment present but deleted
566 * so that noone else tries to create the same key.
567 */
568 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
569 shmseg->shm_perm._key = SCARG(uap, key);
570 shmseg->shm_perm._seq = (shmseg->shm_perm._seq + 1) & 0x7fff;
571 shm_handle = (struct shm_handle *)
572 malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
573 shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
574
575 shm_handle->shm_object = uao_create(size, 0);
576
577 shmseg->_shm_internal = shm_handle;
578 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
579 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
580 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
581 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
582 shmseg->shm_segsz = SCARG(uap, size);
583 shmseg->shm_cpid = p->p_pid;
584 shmseg->shm_lpid = shmseg->shm_nattch = 0;
585 shmseg->shm_atime = shmseg->shm_dtime = 0;
586 shmseg->shm_ctime = time.tv_sec;
587 shm_committed += btoc(size);
588 shm_nused++;
589
590 *retval = shmid;
591 if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
592 /*
593 * Somebody else wanted this key while we were asleep. Wake
594 * them up now.
595 */
596 shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
597 wakeup((caddr_t)shmseg);
598 }
599 return error;
600 }
601
602 int
603 sys_shmget(l, v, retval)
604 struct lwp *l;
605 void *v;
606 register_t *retval;
607 {
608 struct sys_shmget_args /* {
609 syscallarg(key_t) key;
610 syscallarg(int) size;
611 syscallarg(int) shmflg;
612 } */ *uap = v;
613 struct proc *p = l->l_proc;
614 int segnum, mode, error;
615
616 mode = SCARG(uap, shmflg) & ACCESSPERMS;
617 if (SCARG(uap, key) != IPC_PRIVATE) {
618 again:
619 segnum = shm_find_segment_by_key(SCARG(uap, key));
620 if (segnum >= 0) {
621 error = shmget_existing(p, uap, mode, segnum, retval);
622 if (error == EAGAIN)
623 goto again;
624 return error;
625 }
626 if ((SCARG(uap, shmflg) & IPC_CREAT) == 0)
627 return ENOENT;
628 }
629 return shmget_allocate_segment(p, uap, mode, retval);
630 }
631
632 void
633 shmfork(vm1, vm2)
634 struct vmspace *vm1, *vm2;
635 {
636 struct shmmap_state *shmmap_s;
637 struct shmmap_entry *shmmap_se;
638
639 vm2->vm_shm = vm1->vm_shm;
640
641 if (vm1->vm_shm == NULL)
642 return;
643
644 #ifdef SHMDEBUG
645 printf("shmfork %p->%p\n", vm1, vm2);
646 #endif
647
648 shmmap_s = (struct shmmap_state *)vm1->vm_shm;
649
650 SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next)
651 shmsegs[IPCID_TO_IX(shmmap_se->shmid)].shm_nattch++;
652 shmmap_s->nrefs++;
653 }
654
655 void
656 shmexit(vm)
657 struct vmspace *vm;
658 {
659 struct shmmap_state *shmmap_s;
660 struct shmmap_entry *shmmap_se;
661
662 shmmap_s = (struct shmmap_state *)vm->vm_shm;
663 if (shmmap_s == NULL)
664 return;
665
666 vm->vm_shm = NULL;
667
668 if (--shmmap_s->nrefs > 0) {
669 #ifdef SHMDEBUG
670 printf("shmexit: vm %p drop ref (%d entries), now used by %d\n",
671 vm, shmmap_s->nitems, shmmap_s->nrefs);
672 #endif
673 SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next)
674 shmsegs[IPCID_TO_IX(shmmap_se->shmid)].shm_nattch--;
675 return;
676 }
677
678 #ifdef SHMDEBUG
679 printf("shmexit: vm %p cleanup (%d entries)\n", vm, shmmap_s->nitems);
680 #endif
681 while (!SLIST_EMPTY(&shmmap_s->entries)) {
682 shmmap_se = SLIST_FIRST(&shmmap_s->entries);
683 shm_delete_mapping(vm, shmmap_s, shmmap_se);
684 }
685 KASSERT(shmmap_s->nitems == 0);
686 free(shmmap_s, M_SHM);
687 }
688
689 void
690 shminit()
691 {
692 int i;
693
694 shminfo.shmmax *= PAGE_SIZE;
695
696 for (i = 0; i < shminfo.shmmni; i++) {
697 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
698 shmsegs[i].shm_perm._seq = 0;
699 }
700 shm_last_free = 0;
701 shm_nused = 0;
702 shm_committed = 0;
703
704 pool_init(&shmmap_entry_pool, sizeof(struct shmmap_entry), 0, 0, 0,
705 "shmmp", 0);
706 }
707