sysv_shm.c revision 1.50 1 /* $NetBSD: sysv_shm.c,v 1.50 1998/10/21 22:24:29 tron Exp $ */
2
3 /*
4 * Copyright (c) 1994 Adam Glass and Charles M. Hannum. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Adam Glass and Charles M.
17 * Hannum.
18 * 4. The names of the authors may not be used to endorse or promote products
19 * derived from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #define SYSVSHM
34
35 #include <sys/types.h>
36 #include <sys/param.h>
37 #include <sys/kernel.h>
38 #include <sys/shm.h>
39 #include <sys/proc.h>
40 #include <sys/uio.h>
41 #include <sys/time.h>
42 #include <sys/malloc.h>
43 #include <sys/mman.h>
44 #include <sys/systm.h>
45 #include <sys/stat.h>
46
47 #include <sys/mount.h>
48 #include <sys/syscallargs.h>
49
50 #include <vm/vm.h>
51 #ifdef UVM
52 #include <uvm/uvm_extern.h>
53 #else
54 #include <vm/vm_map.h>
55 #include <vm/vm_kern.h>
56 #endif
57
58 struct shmid_ds *shm_find_segment_by_shmid __P((int));
59
60 /*
61 * Provides the following externally accessible functions:
62 *
63 * shminit(void); initialization
64 * shmexit(struct vmspace *) cleanup
65 * shmfork(struct vmspace *, struct vmspace *) fork handling
66 * shmsys(arg1, arg2, arg3, arg4); shm{at,ctl,dt,get}(arg2, arg3, arg4)
67 *
68 * Structures:
69 * shmsegs (an array of 'struct shmid_ds')
70 * per proc array of 'struct shmmap_state'
71 */
72
73 #define SHMSEG_FREE 0x0200
74 #define SHMSEG_REMOVED 0x0400
75 #define SHMSEG_ALLOCATED 0x0800
76 #define SHMSEG_WANTED 0x1000
77
78 int shm_last_free, shm_nused, shm_committed;
79
80 struct shm_handle {
81 #ifdef UVM
82 struct uvm_object *shm_object;
83 #else
84 vm_object_t shm_object;
85 #endif
86 };
87
88 struct shmmap_state {
89 vaddr_t va;
90 int shmid;
91 };
92
93 static int shm_find_segment_by_key __P((key_t));
94 static void shm_deallocate_segment __P((struct shmid_ds *));
95 static int shm_delete_mapping __P((struct vmspace *, struct shmmap_state *));
96 static int shmget_existing __P((struct proc *, struct sys_shmget_args *,
97 int, int, register_t *));
98 static int shmget_allocate_segment __P((struct proc *, struct sys_shmget_args *,
99 int, register_t *));
100
101 static int
102 shm_find_segment_by_key(key)
103 key_t key;
104 {
105 int i;
106
107 for (i = 0; i < shminfo.shmmni; i++)
108 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
109 shmsegs[i].shm_perm.key == key)
110 return i;
111 return -1;
112 }
113
114 struct shmid_ds *
115 shm_find_segment_by_shmid(shmid)
116 int shmid;
117 {
118 int segnum;
119 struct shmid_ds *shmseg;
120
121 segnum = IPCID_TO_IX(shmid);
122 if (segnum < 0 || segnum >= shminfo.shmmni)
123 return NULL;
124 shmseg = &shmsegs[segnum];
125 if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED))
126 != SHMSEG_ALLOCATED ||
127 shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
128 return NULL;
129 return shmseg;
130 }
131
132 static void
133 shm_deallocate_segment(shmseg)
134 struct shmid_ds *shmseg;
135 {
136 struct shm_handle *shm_handle;
137 size_t size;
138
139 shm_handle = shmseg->shm_internal;
140 size = (shmseg->shm_segsz + CLOFSET) & ~CLOFSET;
141 #ifdef UVM
142 uao_detach(shm_handle->shm_object);
143 #else
144 vm_object_deallocate(shm_handle->shm_object);
145 #endif
146 free((caddr_t)shm_handle, M_SHM);
147 shmseg->shm_internal = NULL;
148 shm_committed -= btoc(size);
149 shmseg->shm_perm.mode = SHMSEG_FREE;
150 shm_nused--;
151 }
152
153 static int
154 shm_delete_mapping(vm, shmmap_s)
155 struct vmspace *vm;
156 struct shmmap_state *shmmap_s;
157 {
158 struct shmid_ds *shmseg;
159 int segnum, result;
160 size_t size;
161
162 segnum = IPCID_TO_IX(shmmap_s->shmid);
163 shmseg = &shmsegs[segnum];
164 size = (shmseg->shm_segsz + CLOFSET) & ~CLOFSET;
165 #ifdef UVM
166 result = uvm_deallocate(&vm->vm_map, shmmap_s->va, size);
167 #else
168 result = vm_map_remove(&vm->vm_map,
169 shmmap_s->va, shmmap_s->va + size);
170 #endif
171 if (result != KERN_SUCCESS)
172 return EINVAL;
173 shmmap_s->shmid = -1;
174 shmseg->shm_dtime = time.tv_sec;
175 if ((--shmseg->shm_nattch <= 0) &&
176 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
177 shm_deallocate_segment(shmseg);
178 shm_last_free = segnum;
179 }
180 return 0;
181 }
182
183 int
184 sys_shmdt(p, v, retval)
185 struct proc *p;
186 void *v;
187 register_t *retval;
188 {
189 struct sys_shmdt_args /* {
190 syscallarg(const void *) shmaddr;
191 } */ *uap = v;
192 struct shmmap_state *shmmap_s;
193 int i;
194
195 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
196 if (shmmap_s == NULL)
197 return EINVAL;
198
199 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
200 if (shmmap_s->shmid != -1 &&
201 shmmap_s->va == (vaddr_t)SCARG(uap, shmaddr))
202 break;
203 if (i == shminfo.shmseg)
204 return EINVAL;
205 return shm_delete_mapping(p->p_vmspace, shmmap_s);
206 }
207
208 int
209 sys_shmat(p, v, retval)
210 struct proc *p;
211 void *v;
212 register_t *retval;
213 {
214 struct sys_shmat_args /* {
215 syscallarg(int) shmid;
216 syscallarg(const void *) shmaddr;
217 syscallarg(int) shmflg;
218 } */ *uap = v;
219 int error, i, flags;
220 struct ucred *cred = p->p_ucred;
221 struct shmid_ds *shmseg;
222 struct shmmap_state *shmmap_s = NULL;
223 struct shm_handle *shm_handle;
224 vaddr_t attach_va;
225 vm_prot_t prot;
226 vsize_t size;
227 int rv;
228
229 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
230 if (shmmap_s == NULL) {
231 size = shminfo.shmseg * sizeof(struct shmmap_state);
232 shmmap_s = malloc(size, M_SHM, M_WAITOK);
233 for (i = 0; i < shminfo.shmseg; i++)
234 shmmap_s[i].shmid = -1;
235 p->p_vmspace->vm_shm = (caddr_t)shmmap_s;
236 }
237 shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid));
238 if (shmseg == NULL)
239 return EINVAL;
240 error = ipcperm(cred, &shmseg->shm_perm,
241 (SCARG(uap, shmflg) & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
242 if (error)
243 return error;
244 for (i = 0; i < shminfo.shmseg; i++) {
245 if (shmmap_s->shmid == -1)
246 break;
247 shmmap_s++;
248 }
249 if (i >= shminfo.shmseg)
250 return EMFILE;
251 size = (shmseg->shm_segsz + CLOFSET) & ~CLOFSET;
252 prot = VM_PROT_READ;
253 if ((SCARG(uap, shmflg) & SHM_RDONLY) == 0)
254 prot |= VM_PROT_WRITE;
255 flags = MAP_ANON | MAP_SHARED;
256 if (SCARG(uap, shmaddr)) {
257 flags |= MAP_FIXED;
258 if (SCARG(uap, shmflg) & SHM_RND)
259 attach_va =
260 (vaddr_t)SCARG(uap, shmaddr) & ~(SHMLBA-1);
261 else if (((vaddr_t)SCARG(uap, shmaddr) & (SHMLBA-1)) == 0)
262 attach_va = (vaddr_t)SCARG(uap, shmaddr);
263 else
264 return EINVAL;
265 } else {
266 /* This is just a hint to vm_mmap() about where to put it. */
267 attach_va =
268 round_page(p->p_vmspace->vm_taddr + MAXTSIZ + MAXDSIZ);
269 }
270 shm_handle = shmseg->shm_internal;
271 #ifdef UVM
272 uao_reference(shm_handle->shm_object);
273 rv = uvm_map(&p->p_vmspace->vm_map, &attach_va, size,
274 shm_handle->shm_object, 0,
275 UVM_MAPFLAG(prot, prot, UVM_INH_SHARE,
276 UVM_ADV_RANDOM, 0));
277 if (rv != KERN_SUCCESS) {
278 return ENOMEM;
279 }
280 #else
281 vm_object_reference(shm_handle->shm_object);
282 rv = vm_map_find(&p->p_vmspace->vm_map, shm_handle->shm_object,
283 0, &attach_va, size, (flags & MAP_FIXED)?0:1);
284 if (rv != KERN_SUCCESS) {
285 return ENOMEM;
286 }
287 vm_map_protect(&p->p_vmspace->vm_map, attach_va, attach_va + size,
288 prot, 0);
289 vm_map_inherit(&p->p_vmspace->vm_map,
290 attach_va, attach_va + size, VM_INHERIT_SHARE);
291 #endif
292
293 shmmap_s->va = attach_va;
294 shmmap_s->shmid = SCARG(uap, shmid);
295 shmseg->shm_lpid = p->p_pid;
296 shmseg->shm_atime = time.tv_sec;
297 shmseg->shm_nattch++;
298 *retval = attach_va;
299 return 0;
300 }
301
302 int
303 sys_shmctl(p, v, retval)
304 struct proc *p;
305 void *v;
306 register_t *retval;
307 {
308 struct sys_shmctl_args /* {
309 syscallarg(int) shmid;
310 syscallarg(int) cmd;
311 syscallarg(struct shmid_ds *) buf;
312 } */ *uap = v;
313 int error;
314 struct ucred *cred = p->p_ucred;
315 struct shmid_ds inbuf;
316 struct shmid_ds *shmseg;
317
318 shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid));
319 if (shmseg == NULL)
320 return EINVAL;
321 switch (SCARG(uap, cmd)) {
322 case IPC_STAT:
323 if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_R)) != 0)
324 return error;
325 error = copyout((caddr_t)shmseg, SCARG(uap, buf),
326 sizeof(inbuf));
327 if (error)
328 return error;
329 break;
330 case IPC_SET:
331 if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
332 return error;
333 error = copyin(SCARG(uap, buf), (caddr_t)&inbuf,
334 sizeof(inbuf));
335 if (error)
336 return error;
337 shmseg->shm_perm.uid = inbuf.shm_perm.uid;
338 shmseg->shm_perm.gid = inbuf.shm_perm.gid;
339 shmseg->shm_perm.mode =
340 (shmseg->shm_perm.mode & ~ACCESSPERMS) |
341 (inbuf.shm_perm.mode & ACCESSPERMS);
342 shmseg->shm_ctime = time.tv_sec;
343 break;
344 case IPC_RMID:
345 if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
346 return error;
347 shmseg->shm_perm.key = IPC_PRIVATE;
348 shmseg->shm_perm.mode |= SHMSEG_REMOVED;
349 if (shmseg->shm_nattch <= 0) {
350 shm_deallocate_segment(shmseg);
351 shm_last_free = IPCID_TO_IX(SCARG(uap, shmid));
352 }
353 break;
354 case SHM_LOCK:
355 case SHM_UNLOCK:
356 default:
357 return EINVAL;
358 }
359 return 0;
360 }
361
362 static int
363 shmget_existing(p, uap, mode, segnum, retval)
364 struct proc *p;
365 struct sys_shmget_args /* {
366 syscallarg(key_t) key;
367 syscallarg(size_t) size;
368 syscallarg(int) shmflg;
369 } */ *uap;
370 int mode;
371 int segnum;
372 register_t *retval;
373 {
374 struct shmid_ds *shmseg;
375 struct ucred *cred = p->p_ucred;
376 int error;
377
378 shmseg = &shmsegs[segnum];
379 if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
380 /*
381 * This segment is in the process of being allocated. Wait
382 * until it's done, and look the key up again (in case the
383 * allocation failed or it was freed).
384 */
385 shmseg->shm_perm.mode |= SHMSEG_WANTED;
386 error = tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0);
387 if (error)
388 return error;
389 return EAGAIN;
390 }
391 if ((error = ipcperm(cred, &shmseg->shm_perm, mode)) != 0)
392 return error;
393 if (SCARG(uap, size) && SCARG(uap, size) > shmseg->shm_segsz)
394 return EINVAL;
395 if ((SCARG(uap, shmflg) & (IPC_CREAT | IPC_EXCL)) ==
396 (IPC_CREAT | IPC_EXCL))
397 return EEXIST;
398 *retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
399 return 0;
400 }
401
402 static int
403 shmget_allocate_segment(p, uap, mode, retval)
404 struct proc *p;
405 struct sys_shmget_args /* {
406 syscallarg(key_t) key;
407 syscallarg(size_t) size;
408 syscallarg(int) shmflg;
409 } */ *uap;
410 int mode;
411 register_t *retval;
412 {
413 int i, segnum, shmid, size;
414 struct ucred *cred = p->p_ucred;
415 struct shmid_ds *shmseg;
416 struct shm_handle *shm_handle;
417 #ifndef UVM
418 vm_pager_t pager;
419 #endif
420 int error = 0;
421
422 if (SCARG(uap, size) < shminfo.shmmin ||
423 SCARG(uap, size) > shminfo.shmmax)
424 return EINVAL;
425 if (shm_nused >= shminfo.shmmni) /* any shmids left? */
426 return ENOSPC;
427 size = (SCARG(uap, size) + CLOFSET) & ~CLOFSET;
428 if (shm_committed + btoc(size) > shminfo.shmall)
429 return ENOMEM;
430 if (shm_last_free < 0) {
431 for (i = 0; i < shminfo.shmmni; i++)
432 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
433 break;
434 if (i == shminfo.shmmni)
435 panic("shmseg free count inconsistent");
436 segnum = i;
437 } else {
438 segnum = shm_last_free;
439 shm_last_free = -1;
440 }
441 shmseg = &shmsegs[segnum];
442 /*
443 * In case we sleep in malloc(), mark the segment present but deleted
444 * so that noone else tries to create the same key.
445 */
446 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
447 shmseg->shm_perm.key = SCARG(uap, key);
448 shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
449 shm_handle = (struct shm_handle *)
450 malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
451 shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
452
453
454 #ifdef UVM
455 shm_handle->shm_object = uao_create(size, 0);
456 #else
457 shm_handle->shm_object = vm_object_allocate(size);
458 if (shm_handle->shm_object == NULL) {
459 /* XXX cannot happen */
460 error = ENOMEM;
461 goto out;
462 }
463 /*
464 * We make sure that we have allocated a pager before we need
465 * to.
466 */
467 pager = vm_pager_allocate(PG_DFLT, 0, size, VM_PROT_DEFAULT, 0);
468 if (pager == NULL) {
469 error = ENOMEM;
470 goto out;
471 }
472 vm_object_setpager(shm_handle->shm_object, pager, 0, 0);
473 #endif
474
475 shmseg->shm_internal = shm_handle;
476 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
477 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
478 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
479 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
480 shmseg->shm_segsz = SCARG(uap, size);
481 shmseg->shm_cpid = p->p_pid;
482 shmseg->shm_lpid = shmseg->shm_nattch = 0;
483 shmseg->shm_atime = shmseg->shm_dtime = 0;
484 shmseg->shm_ctime = time.tv_sec;
485 shm_committed += btoc(size);
486 shm_nused++;
487
488 #ifndef UVM
489 out:
490 if (error) {
491 if (shm_handle->shm_object != NULL)
492 vm_object_deallocate(shm_handle->shm_object);
493 free(shm_handle, M_SHM);
494 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED)
495 | SHMSEG_FREE;
496 } else
497 #endif
498 *retval = shmid;
499 if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
500 /*
501 * Somebody else wanted this key while we were asleep. Wake
502 * them up now.
503 */
504 shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
505 wakeup((caddr_t)shmseg);
506 }
507 return error;
508 }
509
510 int
511 sys_shmget(p, v, retval)
512 struct proc *p;
513 void *v;
514 register_t *retval;
515 {
516 struct sys_shmget_args /* {
517 syscallarg(key_t) key;
518 syscallarg(int) size;
519 syscallarg(int) shmflg;
520 } */ *uap = v;
521 int segnum, mode, error;
522
523 mode = SCARG(uap, shmflg) & ACCESSPERMS;
524 if (SCARG(uap, key) != IPC_PRIVATE) {
525 again:
526 segnum = shm_find_segment_by_key(SCARG(uap, key));
527 if (segnum >= 0) {
528 error = shmget_existing(p, uap, mode, segnum, retval);
529 if (error == EAGAIN)
530 goto again;
531 return error;
532 }
533 if ((SCARG(uap, shmflg) & IPC_CREAT) == 0)
534 return ENOENT;
535 }
536 return shmget_allocate_segment(p, uap, mode, retval);
537 }
538
539 void
540 shmfork(vm1, vm2)
541 struct vmspace *vm1, *vm2;
542 {
543 struct shmmap_state *shmmap_s;
544 size_t size;
545 int i;
546
547 if (vm1->vm_shm == NULL) {
548 vm2->vm_shm = NULL;
549 return;
550 }
551
552 size = shminfo.shmseg * sizeof(struct shmmap_state);
553 shmmap_s = malloc(size, M_SHM, M_WAITOK);
554 memcpy(shmmap_s, vm1->vm_shm, size);
555 vm2->vm_shm = (caddr_t)shmmap_s;
556 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
557 if (shmmap_s->shmid != -1)
558 shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
559 }
560
561 void
562 shmexit(vm)
563 struct vmspace *vm;
564 {
565 struct shmmap_state *shmmap_s;
566 int i;
567
568 shmmap_s = (struct shmmap_state *)vm->vm_shm;
569 if (shmmap_s == NULL)
570 return;
571 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
572 if (shmmap_s->shmid != -1)
573 shm_delete_mapping(vm, shmmap_s);
574 free(vm->vm_shm, M_SHM);
575 vm->vm_shm = NULL;
576 }
577
578 void
579 shminit()
580 {
581 int i;
582
583 shminfo.shmmax *= NBPG;
584
585 for (i = 0; i < shminfo.shmmni; i++) {
586 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
587 shmsegs[i].shm_perm.seq = 0;
588 }
589 shm_last_free = 0;
590 shm_nused = 0;
591 shm_committed = 0;
592 }
593