sysv_shm.c revision 1.72 1 /* $NetBSD: sysv_shm.c,v 1.72 2003/12/05 22:09:56 jdolecek Exp $ */
2
3 /*-
4 * Copyright (c) 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Copyright (c) 1994 Adam Glass and Charles M. Hannum. All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by Adam Glass and Charles M.
54 * Hannum.
55 * 4. The names of the authors may not be used to endorse or promote products
56 * derived from this software without specific prior written permission.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
59 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
60 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
61 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
62 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
63 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
64 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
65 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
66 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
67 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
68 */
69
70 #include <sys/cdefs.h>
71 __KERNEL_RCSID(0, "$NetBSD: sysv_shm.c,v 1.72 2003/12/05 22:09:56 jdolecek Exp $");
72
73 #define SYSVSHM
74
75 #include <sys/param.h>
76 #include <sys/kernel.h>
77 #include <sys/shm.h>
78 #include <sys/malloc.h>
79 #include <sys/mman.h>
80 #include <sys/stat.h>
81 #include <sys/sysctl.h>
82 #include <sys/mount.h> /* XXX for <sys/syscallargs.h> */
83 #include <sys/sa.h>
84 #include <sys/syscallargs.h>
85 #include <sys/queue.h>
86 #include <sys/pool.h>
87
88 #include <uvm/uvm_extern.h>
89
90 struct shmid_ds *shm_find_segment_by_shmid __P((int, int));
91
92 static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments");
93
94 /*
95 * Provides the following externally accessible functions:
96 *
97 * shminit(void); initialization
98 * shmexit(struct vmspace *) cleanup
99 * shmfork(struct vmspace *, struct vmspace *) fork handling
100 *
101 * Structures:
102 * shmsegs (an array of 'struct shmid_ds')
103 * per proc array of 'struct shmmap_state'
104 */
105
106 #define SHMSEG_FREE 0x0200
107 #define SHMSEG_REMOVED 0x0400
108 #define SHMSEG_ALLOCATED 0x0800
109 #define SHMSEG_WANTED 0x1000
110
111 static int shm_last_free, shm_nused, shm_committed;
112 struct shmid_ds *shmsegs;
113
114 struct shm_handle {
115 struct uvm_object *shm_object;
116 };
117
118 struct shmmap_entry {
119 SLIST_ENTRY(shmmap_entry) next;
120 vaddr_t va;
121 int shmid;
122 };
123
124 static struct pool shmmap_entry_pool;
125
126 struct shmmap_state {
127 unsigned int nitems;
128 unsigned int nrefs;
129 SLIST_HEAD(, shmmap_entry) entries;
130 };
131
132 static int shm_find_segment_by_key __P((key_t));
133 static void shm_deallocate_segment __P((struct shmid_ds *));
134 static void shm_delete_mapping __P((struct vmspace *, struct shmmap_state *,
135 struct shmmap_entry *));
136 static int shmget_existing __P((struct proc *, struct sys_shmget_args *,
137 int, int, register_t *));
138 static int shmget_allocate_segment __P((struct proc *, struct sys_shmget_args *,
139 int, register_t *));
140 static struct shmmap_state *shmmap_getprivate __P((struct proc *));
141 static struct shmmap_entry *
142 shm_find_mapping __P((struct shmmap_state *, vaddr_t));
143
144 static int
145 shm_find_segment_by_key(key)
146 key_t key;
147 {
148 int i;
149
150 for (i = 0; i < shminfo.shmmni; i++)
151 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
152 shmsegs[i].shm_perm._key == key)
153 return i;
154 return -1;
155 }
156
157 struct shmid_ds *
158 shm_find_segment_by_shmid(shmid, findremoved)
159 int shmid;
160 int findremoved;
161 {
162 int segnum;
163 struct shmid_ds *shmseg;
164
165 segnum = IPCID_TO_IX(shmid);
166 if (segnum < 0 || segnum >= shminfo.shmmni)
167 return NULL;
168 shmseg = &shmsegs[segnum];
169 if ((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) == 0)
170 return NULL;
171 if (!findremoved && ((shmseg->shm_perm.mode & SHMSEG_REMOVED) != 0))
172 return NULL;
173 if (shmseg->shm_perm._seq != IPCID_TO_SEQ(shmid))
174 return NULL;
175 return shmseg;
176 }
177
178 static void
179 shm_deallocate_segment(shmseg)
180 struct shmid_ds *shmseg;
181 {
182 struct shm_handle *shm_handle;
183 size_t size;
184
185 shm_handle = shmseg->_shm_internal;
186 size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
187 uao_detach(shm_handle->shm_object);
188 free((caddr_t)shm_handle, M_SHM);
189 shmseg->_shm_internal = NULL;
190 shm_committed -= btoc(size);
191 shmseg->shm_perm.mode = SHMSEG_FREE;
192 shm_nused--;
193 }
194
195 static void
196 shm_delete_mapping(vm, shmmap_s, shmmap_se)
197 struct vmspace *vm;
198 struct shmmap_state *shmmap_s;
199 struct shmmap_entry *shmmap_se;
200 {
201 struct shmid_ds *shmseg;
202 int segnum;
203 size_t size;
204
205 segnum = IPCID_TO_IX(shmmap_se->shmid);
206 #ifdef DEBUG
207 if (segnum < 0 || segnum >= shminfo.shmmni)
208 panic("shm_delete_mapping: vmspace %p state %p entry %p - entry segment ID bad (%d)", vm, shmmap_s, shmmap_se, segnum);
209 #endif
210 shmseg = &shmsegs[segnum];
211 size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
212 uvm_deallocate(&vm->vm_map, shmmap_se->va, size);
213 SLIST_REMOVE(&shmmap_s->entries, shmmap_se, shmmap_entry, next);
214 shmmap_s->nitems--;
215 pool_put(&shmmap_entry_pool, shmmap_se);
216 shmseg->shm_dtime = time.tv_sec;
217 if ((--shmseg->shm_nattch <= 0) &&
218 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
219 shm_deallocate_segment(shmseg);
220 shm_last_free = segnum;
221 }
222 }
223
224 /*
225 * Get a non-shared shm map for that vmspace.
226 * 3 cases:
227 * - no shm map present: create a fresh one
228 * - a shm map with refcount=1, just used by ourselves: fine
229 * - a shared shm map: copy to a fresh one and adjust refcounts
230 */
231 static struct shmmap_state *
232 shmmap_getprivate(struct proc *p)
233 {
234 struct shmmap_state *oshmmap_s, *shmmap_s;
235 struct shmmap_entry *oshmmap_se, *shmmap_se;
236
237 oshmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
238 if (oshmmap_s && oshmmap_s->nrefs == 1)
239 return (oshmmap_s);
240
241 shmmap_s = malloc(sizeof(struct shmmap_state), M_SHM, M_WAITOK);
242 memset(shmmap_s, 0, sizeof(struct shmmap_state));
243 shmmap_s->nrefs = 1;
244 SLIST_INIT(&shmmap_s->entries);
245 p->p_vmspace->vm_shm = (caddr_t)shmmap_s;
246
247 if (!oshmmap_s)
248 return (shmmap_s);
249
250 #ifdef SHMDEBUG
251 printf("shmmap_getprivate: vm %p split (%d entries), was used by %d\n",
252 p->p_vmspace, oshmmap_s->nitems, oshmmap_s->nrefs);
253 #endif
254 SLIST_FOREACH(oshmmap_se, &oshmmap_s->entries, next) {
255 shmmap_se = pool_get(&shmmap_entry_pool, PR_WAITOK);
256 shmmap_se->va = oshmmap_se->va;
257 shmmap_se->shmid = oshmmap_se->shmid;
258 SLIST_INSERT_HEAD(&shmmap_s->entries, shmmap_se, next);
259 }
260 shmmap_s->nitems = oshmmap_s->nitems;
261 oshmmap_s->nrefs--;
262 return (shmmap_s);
263 }
264
265 static struct shmmap_entry *
266 shm_find_mapping(map, va)
267 struct shmmap_state *map;
268 vaddr_t va;
269 {
270 struct shmmap_entry *shmmap_se;
271
272 SLIST_FOREACH(shmmap_se, &map->entries, next) {
273 if (shmmap_se->va == va)
274 return shmmap_se;
275 }
276 return 0;
277 }
278
279 int
280 sys_shmdt(l, v, retval)
281 struct lwp *l;
282 void *v;
283 register_t *retval;
284 {
285 struct sys_shmdt_args /* {
286 syscallarg(const void *) shmaddr;
287 } */ *uap = v;
288 struct proc *p = l->l_proc;
289 struct shmmap_state *shmmap_s, *shmmap_s1;
290 struct shmmap_entry *shmmap_se;
291
292 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
293 if (shmmap_s == NULL)
294 return EINVAL;
295
296 shmmap_se = shm_find_mapping(shmmap_s, (vaddr_t)SCARG(uap, shmaddr));
297 if (!shmmap_se)
298 return EINVAL;
299
300 shmmap_s1 = shmmap_getprivate(p);
301 if (shmmap_s1 != shmmap_s) {
302 /* map has been copied, lookup entry in new map */
303 shmmap_se = shm_find_mapping(shmmap_s1,
304 (vaddr_t)SCARG(uap, shmaddr));
305 KASSERT(shmmap_se != NULL);
306 }
307 #ifdef SHMDEBUG
308 printf("shmdt: vm %p: remove %d @%lx\n",
309 p->p_vmspace, shmmap_se->shmid, shmmap_se->va);
310 #endif
311 shm_delete_mapping(p->p_vmspace, shmmap_s1, shmmap_se);
312 return 0;
313 }
314
315 int
316 sys_shmat(l, v, retval)
317 struct lwp *l;
318 void *v;
319 register_t *retval;
320 {
321 struct sys_shmat_args /* {
322 syscallarg(int) shmid;
323 syscallarg(const void *) shmaddr;
324 syscallarg(int) shmflg;
325 } */ *uap = v;
326 struct proc *p = l->l_proc;
327 vaddr_t attach_va;
328 int error;
329
330 error = shmat1(p, SCARG(uap, shmid), SCARG(uap, shmaddr),
331 SCARG(uap, shmflg), &attach_va, 0);
332 if (error != 0)
333 return error;
334 retval[0] = attach_va;
335 return 0;
336 }
337
338 int
339 shmat1(p, shmid, shmaddr, shmflg, attachp, findremoved)
340 struct proc *p;
341 int shmid;
342 const void *shmaddr;
343 int shmflg;
344 vaddr_t *attachp;
345 int findremoved;
346 {
347 int error, flags;
348 struct ucred *cred = p->p_ucred;
349 struct shmid_ds *shmseg;
350 struct shmmap_state *shmmap_s;
351 struct shm_handle *shm_handle;
352 vaddr_t attach_va;
353 vm_prot_t prot;
354 vsize_t size;
355 struct shmmap_entry *shmmap_se;
356
357 shmseg = shm_find_segment_by_shmid(shmid, findremoved);
358 if (shmseg == NULL)
359 return EINVAL;
360 error = ipcperm(cred, &shmseg->shm_perm,
361 (shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
362 if (error)
363 return error;
364
365 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
366 if (shmmap_s && shmmap_s->nitems >= shminfo.shmseg)
367 return EMFILE;
368
369 size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
370 prot = VM_PROT_READ;
371 if ((shmflg & SHM_RDONLY) == 0)
372 prot |= VM_PROT_WRITE;
373 flags = MAP_ANON | MAP_SHARED;
374 if (shmaddr) {
375 flags |= MAP_FIXED;
376 if (shmflg & SHM_RND)
377 attach_va =
378 (vaddr_t)shmaddr & ~(SHMLBA-1);
379 else if (((vaddr_t)shmaddr & (SHMLBA-1)) == 0)
380 attach_va = (vaddr_t)shmaddr;
381 else
382 return EINVAL;
383 } else {
384 /* This is just a hint to uvm_mmap() about where to put it. */
385 attach_va = VM_DEFAULT_ADDRESS(p->p_vmspace->vm_daddr, size);
386 }
387 shm_handle = shmseg->_shm_internal;
388 uao_reference(shm_handle->shm_object);
389 error = uvm_map(&p->p_vmspace->vm_map, &attach_va, size,
390 shm_handle->shm_object, 0, 0,
391 UVM_MAPFLAG(prot, prot, UVM_INH_SHARE, UVM_ADV_RANDOM, 0));
392 if (error) {
393 return error;
394 }
395 shmmap_se = pool_get(&shmmap_entry_pool, PR_WAITOK);
396 shmmap_se->va = attach_va;
397 shmmap_se->shmid = shmid;
398 shmmap_s = shmmap_getprivate(p);
399 #ifdef SHMDEBUG
400 printf("shmat: vm %p: add %d @%lx\n", p->p_vmspace, shmid, attach_va);
401 #endif
402 SLIST_INSERT_HEAD(&shmmap_s->entries, shmmap_se, next);
403 shmmap_s->nitems++;
404 shmseg->shm_lpid = p->p_pid;
405 shmseg->shm_atime = time.tv_sec;
406 shmseg->shm_nattch++;
407 *attachp = attach_va;
408 return 0;
409 }
410
411 int
412 sys___shmctl13(l, v, retval)
413 struct lwp *l;
414 void *v;
415 register_t *retval;
416 {
417 struct sys___shmctl13_args /* {
418 syscallarg(int) shmid;
419 syscallarg(int) cmd;
420 syscallarg(struct shmid_ds *) buf;
421 } */ *uap = v;
422 struct proc *p = l->l_proc;
423 struct shmid_ds shmbuf;
424 int cmd, error;
425
426 cmd = SCARG(uap, cmd);
427
428 if (cmd == IPC_SET) {
429 error = copyin(SCARG(uap, buf), &shmbuf, sizeof(shmbuf));
430 if (error)
431 return (error);
432 }
433
434 error = shmctl1(p, SCARG(uap, shmid), cmd,
435 (cmd == IPC_SET || cmd == IPC_STAT) ? &shmbuf : NULL);
436
437 if (error == 0 && cmd == IPC_STAT)
438 error = copyout(&shmbuf, SCARG(uap, buf), sizeof(shmbuf));
439
440 return (error);
441 }
442
443 int
444 shmctl1(p, shmid, cmd, shmbuf)
445 struct proc *p;
446 int shmid;
447 int cmd;
448 struct shmid_ds *shmbuf;
449 {
450 struct ucred *cred = p->p_ucred;
451 struct shmid_ds *shmseg;
452 int error = 0;
453
454 shmseg = shm_find_segment_by_shmid(shmid, 0);
455 if (shmseg == NULL)
456 return EINVAL;
457 switch (cmd) {
458 case IPC_STAT:
459 if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_R)) != 0)
460 return error;
461 memcpy(shmbuf, shmseg, sizeof(struct shmid_ds));
462 break;
463 case IPC_SET:
464 if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
465 return error;
466 shmseg->shm_perm.uid = shmbuf->shm_perm.uid;
467 shmseg->shm_perm.gid = shmbuf->shm_perm.gid;
468 shmseg->shm_perm.mode =
469 (shmseg->shm_perm.mode & ~ACCESSPERMS) |
470 (shmbuf->shm_perm.mode & ACCESSPERMS);
471 shmseg->shm_ctime = time.tv_sec;
472 break;
473 case IPC_RMID:
474 if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
475 return error;
476 shmseg->shm_perm._key = IPC_PRIVATE;
477 shmseg->shm_perm.mode |= SHMSEG_REMOVED;
478 if (shmseg->shm_nattch <= 0) {
479 shm_deallocate_segment(shmseg);
480 shm_last_free = IPCID_TO_IX(shmid);
481 }
482 break;
483 case SHM_LOCK:
484 case SHM_UNLOCK:
485 default:
486 return EINVAL;
487 }
488 return 0;
489 }
490
491 static int
492 shmget_existing(p, uap, mode, segnum, retval)
493 struct proc *p;
494 struct sys_shmget_args /* {
495 syscallarg(key_t) key;
496 syscallarg(size_t) size;
497 syscallarg(int) shmflg;
498 } */ *uap;
499 int mode;
500 int segnum;
501 register_t *retval;
502 {
503 struct shmid_ds *shmseg;
504 struct ucred *cred = p->p_ucred;
505 int error;
506
507 shmseg = &shmsegs[segnum];
508 if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
509 /*
510 * This segment is in the process of being allocated. Wait
511 * until it's done, and look the key up again (in case the
512 * allocation failed or it was freed).
513 */
514 shmseg->shm_perm.mode |= SHMSEG_WANTED;
515 error = tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0);
516 if (error)
517 return error;
518 return EAGAIN;
519 }
520 if ((error = ipcperm(cred, &shmseg->shm_perm, mode)) != 0)
521 return error;
522 if (SCARG(uap, size) && SCARG(uap, size) > shmseg->shm_segsz)
523 return EINVAL;
524 if ((SCARG(uap, shmflg) & (IPC_CREAT | IPC_EXCL)) ==
525 (IPC_CREAT | IPC_EXCL))
526 return EEXIST;
527 *retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
528 return 0;
529 }
530
531 static int
532 shmget_allocate_segment(p, uap, mode, retval)
533 struct proc *p;
534 struct sys_shmget_args /* {
535 syscallarg(key_t) key;
536 syscallarg(size_t) size;
537 syscallarg(int) shmflg;
538 } */ *uap;
539 int mode;
540 register_t *retval;
541 {
542 int i, segnum, shmid, size;
543 struct ucred *cred = p->p_ucred;
544 struct shmid_ds *shmseg;
545 struct shm_handle *shm_handle;
546 int error = 0;
547
548 if (SCARG(uap, size) < shminfo.shmmin ||
549 SCARG(uap, size) > shminfo.shmmax)
550 return EINVAL;
551 if (shm_nused >= shminfo.shmmni) /* any shmids left? */
552 return ENOSPC;
553 size = (SCARG(uap, size) + PGOFSET) & ~PGOFSET;
554 if (shm_committed + btoc(size) > shminfo.shmall)
555 return ENOMEM;
556 if (shm_last_free < 0) {
557 for (i = 0; i < shminfo.shmmni; i++)
558 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
559 break;
560 if (i == shminfo.shmmni)
561 panic("shmseg free count inconsistent");
562 segnum = i;
563 } else {
564 segnum = shm_last_free;
565 shm_last_free = -1;
566 }
567 shmseg = &shmsegs[segnum];
568 /*
569 * In case we sleep in malloc(), mark the segment present but deleted
570 * so that noone else tries to create the same key.
571 */
572 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
573 shmseg->shm_perm._key = SCARG(uap, key);
574 shmseg->shm_perm._seq = (shmseg->shm_perm._seq + 1) & 0x7fff;
575 shm_handle = (struct shm_handle *)
576 malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
577 shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
578
579 shm_handle->shm_object = uao_create(size, 0);
580
581 shmseg->_shm_internal = shm_handle;
582 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
583 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
584 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
585 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
586 shmseg->shm_segsz = SCARG(uap, size);
587 shmseg->shm_cpid = p->p_pid;
588 shmseg->shm_lpid = shmseg->shm_nattch = 0;
589 shmseg->shm_atime = shmseg->shm_dtime = 0;
590 shmseg->shm_ctime = time.tv_sec;
591 shm_committed += btoc(size);
592 shm_nused++;
593
594 *retval = shmid;
595 if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
596 /*
597 * Somebody else wanted this key while we were asleep. Wake
598 * them up now.
599 */
600 shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
601 wakeup((caddr_t)shmseg);
602 }
603 return error;
604 }
605
606 int
607 sys_shmget(l, v, retval)
608 struct lwp *l;
609 void *v;
610 register_t *retval;
611 {
612 struct sys_shmget_args /* {
613 syscallarg(key_t) key;
614 syscallarg(int) size;
615 syscallarg(int) shmflg;
616 } */ *uap = v;
617 struct proc *p = l->l_proc;
618 int segnum, mode, error;
619
620 mode = SCARG(uap, shmflg) & ACCESSPERMS;
621 if (SCARG(uap, key) != IPC_PRIVATE) {
622 again:
623 segnum = shm_find_segment_by_key(SCARG(uap, key));
624 if (segnum >= 0) {
625 error = shmget_existing(p, uap, mode, segnum, retval);
626 if (error == EAGAIN)
627 goto again;
628 return error;
629 }
630 if ((SCARG(uap, shmflg) & IPC_CREAT) == 0)
631 return ENOENT;
632 }
633 return shmget_allocate_segment(p, uap, mode, retval);
634 }
635
636 void
637 shmfork(vm1, vm2)
638 struct vmspace *vm1, *vm2;
639 {
640 struct shmmap_state *shmmap_s;
641 struct shmmap_entry *shmmap_se;
642
643 vm2->vm_shm = vm1->vm_shm;
644
645 if (vm1->vm_shm == NULL)
646 return;
647
648 #ifdef SHMDEBUG
649 printf("shmfork %p->%p\n", vm1, vm2);
650 #endif
651
652 shmmap_s = (struct shmmap_state *)vm1->vm_shm;
653
654 SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next)
655 shmsegs[IPCID_TO_IX(shmmap_se->shmid)].shm_nattch++;
656 shmmap_s->nrefs++;
657 }
658
659 void
660 shmexit(vm)
661 struct vmspace *vm;
662 {
663 struct shmmap_state *shmmap_s;
664 struct shmmap_entry *shmmap_se;
665
666 shmmap_s = (struct shmmap_state *)vm->vm_shm;
667 if (shmmap_s == NULL)
668 return;
669
670 vm->vm_shm = NULL;
671
672 if (--shmmap_s->nrefs > 0) {
673 #ifdef SHMDEBUG
674 printf("shmexit: vm %p drop ref (%d entries), now used by %d\n",
675 vm, shmmap_s->nitems, shmmap_s->nrefs);
676 #endif
677 SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next)
678 shmsegs[IPCID_TO_IX(shmmap_se->shmid)].shm_nattch--;
679 return;
680 }
681
682 #ifdef SHMDEBUG
683 printf("shmexit: vm %p cleanup (%d entries)\n", vm, shmmap_s->nitems);
684 #endif
685 while (!SLIST_EMPTY(&shmmap_s->entries)) {
686 shmmap_se = SLIST_FIRST(&shmmap_s->entries);
687 shm_delete_mapping(vm, shmmap_s, shmmap_se);
688 }
689 KASSERT(shmmap_s->nitems == 0);
690 free(shmmap_s, M_SHM);
691 }
692
693 void
694 shminit()
695 {
696 int i, sz;
697 vaddr_t v;
698
699 /* Allocate pageable memory for our structures */
700 sz = shminfo.shmmni * sizeof(struct shmid_ds);
701 if ((v = uvm_km_alloc(kernel_map, round_page(sz))) == 0)
702 panic("sysv_shm: cannot allocate memory");
703 shmsegs = (void *)v;
704
705 shminfo.shmmax *= PAGE_SIZE;
706
707 for (i = 0; i < shminfo.shmmni; i++) {
708 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
709 shmsegs[i].shm_perm._seq = 0;
710 }
711 shm_last_free = 0;
712 shm_nused = 0;
713 shm_committed = 0;
714
715 pool_init(&shmmap_entry_pool, sizeof(struct shmmap_entry), 0, 0, 0,
716 "shmmp", 0);
717 }
718