sysv_shm.c revision 1.43 1 /* $NetBSD: sysv_shm.c,v 1.43 1998/02/10 14:09:49 mrg Exp $ */
2
3 /*
4 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Adam Glass and Charles
17 * Hannum.
18 * 4. The names of the authors may not be used to endorse or promote products
19 * derived from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include "opt_uvm.h"
34
35 #include <sys/types.h>
36 #include <sys/param.h>
37 #include <sys/kernel.h>
38 #include <sys/shm.h>
39 #include <sys/proc.h>
40 #include <sys/uio.h>
41 #include <sys/time.h>
42 #include <sys/malloc.h>
43 #include <sys/mman.h>
44 #include <sys/systm.h>
45 #include <sys/stat.h>
46
47 #include <sys/mount.h>
48 #include <sys/syscallargs.h>
49
50 #include <vm/vm.h>
51 #ifdef UVM
52 #include <uvm/uvm_extern.h>
53 #else
54 #include <vm/vm_map.h>
55 #include <vm/vm_kern.h>
56 #endif
57
58 struct shmid_ds *shm_find_segment_by_shmid __P((int));
59
60 /*
61 * Provides the following externally accessible functions:
62 *
63 * shminit(void); initialization
64 * shmexit(struct vmspace *) cleanup
65 * shmfork(struct vmspace *, struct vmspace *) fork handling
66 * shmsys(arg1, arg2, arg3, arg4); shm{at,ctl,dt,get}(arg2, arg3, arg4)
67 *
68 * Structures:
69 * shmsegs (an array of 'struct shmid_ds')
70 * per proc array of 'struct shmmap_state'
71 */
72
73 #define SHMSEG_FREE 0x0200
74 #define SHMSEG_REMOVED 0x0400
75 #define SHMSEG_ALLOCATED 0x0800
76 #define SHMSEG_WANTED 0x1000
77
78 int shm_last_free, shm_nused, shm_committed;
79
80 struct shm_handle {
81 #ifdef UVM
82 struct uvm_object *shm_object;
83 #else
84 vm_object_t shm_object;
85 #endif
86 };
87
88 struct shmmap_state {
89 vm_offset_t va;
90 int shmid;
91 };
92
93 static int shm_find_segment_by_key __P((key_t));
94 static void shm_deallocate_segment __P((struct shmid_ds *));
95 static int shm_delete_mapping __P((struct vmspace *, struct shmmap_state *));
96 static int shmget_existing __P((struct proc *, struct sys_shmget_args *,
97 int, int, register_t *));
98 static int shmget_allocate_segment __P((struct proc *, struct sys_shmget_args *,
99 int, register_t *));
100
101 static int
102 shm_find_segment_by_key(key)
103 key_t key;
104 {
105 int i;
106
107 for (i = 0; i < shminfo.shmmni; i++)
108 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
109 shmsegs[i].shm_perm.key == key)
110 return i;
111 return -1;
112 }
113
114 struct shmid_ds *
115 shm_find_segment_by_shmid(shmid)
116 int shmid;
117 {
118 int segnum;
119 struct shmid_ds *shmseg;
120
121 segnum = IPCID_TO_IX(shmid);
122 if (segnum < 0 || segnum >= shminfo.shmmni)
123 return NULL;
124 shmseg = &shmsegs[segnum];
125 if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED))
126 != SHMSEG_ALLOCATED ||
127 shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
128 return NULL;
129 return shmseg;
130 }
131
132 static void
133 shm_deallocate_segment(shmseg)
134 struct shmid_ds *shmseg;
135 {
136 struct shm_handle *shm_handle;
137 size_t size;
138
139 shm_handle = shmseg->shm_internal;
140 size = (shmseg->shm_segsz + CLOFSET) & ~CLOFSET;
141 #ifdef UVM
142 uao_detach(shm_handle->shm_object);
143 #else
144 vm_object_deallocate(shm_handle->shm_object);
145 #endif
146 free((caddr_t)shm_handle, M_SHM);
147 shmseg->shm_internal = NULL;
148 shm_committed -= btoc(size);
149 shmseg->shm_perm.mode = SHMSEG_FREE;
150 shm_nused--;
151 }
152
153 static int
154 shm_delete_mapping(vm, shmmap_s)
155 struct vmspace *vm;
156 struct shmmap_state *shmmap_s;
157 {
158 struct shmid_ds *shmseg;
159 int segnum, result;
160 size_t size;
161
162 segnum = IPCID_TO_IX(shmmap_s->shmid);
163 shmseg = &shmsegs[segnum];
164 size = (shmseg->shm_segsz + CLOFSET) & ~CLOFSET;
165 #ifdef UVM
166 result = uvm_deallocate(&vm->vm_map,
167 shmmap_s->va, shmmap_s->va + size);
168 #else
169 result = vm_map_remove(&vm->vm_map,
170 shmmap_s->va, shmmap_s->va + size);
171 #endif
172 if (result != KERN_SUCCESS)
173 return EINVAL;
174 shmmap_s->shmid = -1;
175 shmseg->shm_dtime = time.tv_sec;
176 if ((--shmseg->shm_nattch <= 0) &&
177 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
178 shm_deallocate_segment(shmseg);
179 shm_last_free = segnum;
180 }
181 return 0;
182 }
183
184 int
185 sys_shmdt(p, v, retval)
186 struct proc *p;
187 void *v;
188 register_t *retval;
189 {
190 struct sys_shmdt_args /* {
191 syscallarg(void *) shmaddr;
192 } */ *uap = v;
193 struct shmmap_state *shmmap_s;
194 int i;
195
196 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
197 if (shmmap_s == NULL)
198 return EINVAL;
199
200 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
201 if (shmmap_s->shmid != -1 &&
202 shmmap_s->va == (vm_offset_t)SCARG(uap, shmaddr))
203 break;
204 if (i == shminfo.shmseg)
205 return EINVAL;
206 return shm_delete_mapping(p->p_vmspace, shmmap_s);
207 }
208
209 int
210 sys_shmat(p, v, retval)
211 struct proc *p;
212 void *v;
213 register_t *retval;
214 {
215 struct sys_shmat_args /* {
216 syscallarg(int) shmid;
217 syscallarg(void *) shmaddr;
218 syscallarg(int) shmflg;
219 } */ *uap = v;
220 int error, i, flags;
221 struct ucred *cred = p->p_ucred;
222 struct shmid_ds *shmseg;
223 struct shmmap_state *shmmap_s = NULL;
224 struct shm_handle *shm_handle;
225 vm_offset_t attach_va;
226 vm_prot_t prot;
227 vm_size_t size;
228 int rv;
229
230 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
231 if (shmmap_s == NULL) {
232 size = shminfo.shmseg * sizeof(struct shmmap_state);
233 shmmap_s = malloc(size, M_SHM, M_WAITOK);
234 for (i = 0; i < shminfo.shmseg; i++)
235 shmmap_s[i].shmid = -1;
236 p->p_vmspace->vm_shm = (caddr_t)shmmap_s;
237 }
238 shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid));
239 if (shmseg == NULL)
240 return EINVAL;
241 error = ipcperm(cred, &shmseg->shm_perm,
242 (SCARG(uap, shmflg) & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
243 if (error)
244 return error;
245 for (i = 0; i < shminfo.shmseg; i++) {
246 if (shmmap_s->shmid == -1)
247 break;
248 shmmap_s++;
249 }
250 if (i >= shminfo.shmseg)
251 return EMFILE;
252 size = (shmseg->shm_segsz + CLOFSET) & ~CLOFSET;
253 prot = VM_PROT_READ;
254 if ((SCARG(uap, shmflg) & SHM_RDONLY) == 0)
255 prot |= VM_PROT_WRITE;
256 flags = MAP_ANON | MAP_SHARED;
257 if (SCARG(uap, shmaddr)) {
258 flags |= MAP_FIXED;
259 if (SCARG(uap, shmflg) & SHM_RND)
260 attach_va =
261 (vm_offset_t)SCARG(uap, shmaddr) & ~(SHMLBA-1);
262 else if (((vm_offset_t)SCARG(uap, shmaddr) & (SHMLBA-1)) == 0)
263 attach_va = (vm_offset_t)SCARG(uap, shmaddr);
264 else
265 return EINVAL;
266 } else {
267 /* This is just a hint to vm_mmap() about where to put it. */
268 attach_va =
269 round_page(p->p_vmspace->vm_taddr + MAXTSIZ + MAXDSIZ);
270 }
271 shm_handle = shmseg->shm_internal;
272 #ifdef UVM
273 uao_reference(shm_handle->shm_object);
274 rv = uvm_map(&p->p_vmspace->vm_map, &attach_va, size,
275 shm_handle->shm_object, 0,
276 UVM_MAPFLAG(prot, prot, UVM_INH_SHARE,
277 UVM_ADV_RANDOM, 0));
278 if (rv != KERN_SUCCESS) {
279 return ENOMEM;
280 }
281 #else
282 vm_object_reference(shm_handle->shm_object);
283 rv = vm_map_find(&p->p_vmspace->vm_map, shm_handle->shm_object,
284 0, &attach_va, size, (flags & MAP_FIXED)?0:1);
285 if (rv != KERN_SUCCESS) {
286 return ENOMEM;
287 }
288 vm_map_protect(&p->p_vmspace->vm_map, attach_va, attach_va + size,
289 prot, 0);
290 vm_map_inherit(&p->p_vmspace->vm_map,
291 attach_va, attach_va + size, VM_INHERIT_SHARE);
292 #endif
293
294 shmmap_s->va = attach_va;
295 shmmap_s->shmid = SCARG(uap, shmid);
296 shmseg->shm_lpid = p->p_pid;
297 shmseg->shm_atime = time.tv_sec;
298 shmseg->shm_nattch++;
299 *retval = attach_va;
300 return 0;
301 }
302
303 int
304 sys_shmctl(p, v, retval)
305 struct proc *p;
306 void *v;
307 register_t *retval;
308 {
309 struct sys_shmctl_args /* {
310 syscallarg(int) shmid;
311 syscallarg(int) cmd;
312 syscallarg(struct shmid_ds *) buf;
313 } */ *uap = v;
314 int error;
315 struct ucred *cred = p->p_ucred;
316 struct shmid_ds inbuf;
317 struct shmid_ds *shmseg;
318
319 shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid));
320 if (shmseg == NULL)
321 return EINVAL;
322 switch (SCARG(uap, cmd)) {
323 case IPC_STAT:
324 if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_R)) != 0)
325 return error;
326 error = copyout((caddr_t)shmseg, SCARG(uap, buf),
327 sizeof(inbuf));
328 if (error)
329 return error;
330 break;
331 case IPC_SET:
332 if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
333 return error;
334 error = copyin(SCARG(uap, buf), (caddr_t)&inbuf,
335 sizeof(inbuf));
336 if (error)
337 return error;
338 shmseg->shm_perm.uid = inbuf.shm_perm.uid;
339 shmseg->shm_perm.gid = inbuf.shm_perm.gid;
340 shmseg->shm_perm.mode =
341 (shmseg->shm_perm.mode & ~ACCESSPERMS) |
342 (inbuf.shm_perm.mode & ACCESSPERMS);
343 shmseg->shm_ctime = time.tv_sec;
344 break;
345 case IPC_RMID:
346 if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
347 return error;
348 shmseg->shm_perm.key = IPC_PRIVATE;
349 shmseg->shm_perm.mode |= SHMSEG_REMOVED;
350 if (shmseg->shm_nattch <= 0) {
351 shm_deallocate_segment(shmseg);
352 shm_last_free = IPCID_TO_IX(SCARG(uap, shmid));
353 }
354 break;
355 case SHM_LOCK:
356 case SHM_UNLOCK:
357 default:
358 return EINVAL;
359 }
360 return 0;
361 }
362
363 static int
364 shmget_existing(p, uap, mode, segnum, retval)
365 struct proc *p;
366 struct sys_shmget_args /* {
367 syscallarg(key_t) key;
368 syscallarg(int) size;
369 syscallarg(int) shmflg;
370 } */ *uap;
371 int mode;
372 int segnum;
373 register_t *retval;
374 {
375 struct shmid_ds *shmseg;
376 struct ucred *cred = p->p_ucred;
377 int error;
378
379 shmseg = &shmsegs[segnum];
380 if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
381 /*
382 * This segment is in the process of being allocated. Wait
383 * until it's done, and look the key up again (in case the
384 * allocation failed or it was freed).
385 */
386 shmseg->shm_perm.mode |= SHMSEG_WANTED;
387 error = tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0);
388 if (error)
389 return error;
390 return EAGAIN;
391 }
392 if ((error = ipcperm(cred, &shmseg->shm_perm, mode)) != 0)
393 return error;
394 if (SCARG(uap, size) && SCARG(uap, size) > shmseg->shm_segsz)
395 return EINVAL;
396 if ((SCARG(uap, shmflg) & (IPC_CREAT | IPC_EXCL)) ==
397 (IPC_CREAT | IPC_EXCL))
398 return EEXIST;
399 *retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
400 return 0;
401 }
402
403 static int
404 shmget_allocate_segment(p, uap, mode, retval)
405 struct proc *p;
406 struct sys_shmget_args /* {
407 syscallarg(key_t) key;
408 syscallarg(int) size;
409 syscallarg(int) shmflg;
410 } */ *uap;
411 int mode;
412 register_t *retval;
413 {
414 int i, segnum, shmid, size;
415 struct ucred *cred = p->p_ucred;
416 struct shmid_ds *shmseg;
417 struct shm_handle *shm_handle;
418 #ifndef UVM
419 vm_pager_t pager;
420 #endif
421 int error = 0;
422
423 if (SCARG(uap, size) < shminfo.shmmin ||
424 SCARG(uap, size) > shminfo.shmmax)
425 return EINVAL;
426 if (shm_nused >= shminfo.shmmni) /* any shmids left? */
427 return ENOSPC;
428 size = (SCARG(uap, size) + CLOFSET) & ~CLOFSET;
429 if (shm_committed + btoc(size) > shminfo.shmall)
430 return ENOMEM;
431 if (shm_last_free < 0) {
432 for (i = 0; i < shminfo.shmmni; i++)
433 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
434 break;
435 if (i == shminfo.shmmni)
436 panic("shmseg free count inconsistent");
437 segnum = i;
438 } else {
439 segnum = shm_last_free;
440 shm_last_free = -1;
441 }
442 shmseg = &shmsegs[segnum];
443 /*
444 * In case we sleep in malloc(), mark the segment present but deleted
445 * so that noone else tries to create the same key.
446 */
447 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
448 shmseg->shm_perm.key = SCARG(uap, key);
449 shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
450 shm_handle = (struct shm_handle *)
451 malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
452 shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
453
454
455 #ifdef UVM
456 shm_handle->shm_object = uao_create(size, 0);
457 #else
458 shm_handle->shm_object = vm_object_allocate(size);
459 if (shm_handle->shm_object == NULL) {
460 /* XXX cannot happen */
461 error = ENOMEM;
462 goto out;
463 }
464 /*
465 * We make sure that we have allocated a pager before we need
466 * to.
467 */
468 pager = vm_pager_allocate(PG_DFLT, 0, size, VM_PROT_DEFAULT, 0);
469 if (pager == NULL) {
470 error = ENOMEM;
471 goto out;
472 }
473 vm_object_setpager(shm_handle->shm_object, pager, 0, 0);
474 #endif
475
476 shmseg->shm_internal = shm_handle;
477 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
478 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
479 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
480 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
481 shmseg->shm_segsz = SCARG(uap, size);
482 shmseg->shm_cpid = p->p_pid;
483 shmseg->shm_lpid = shmseg->shm_nattch = 0;
484 shmseg->shm_atime = shmseg->shm_dtime = 0;
485 shmseg->shm_ctime = time.tv_sec;
486 shm_committed += btoc(size);
487 shm_nused++;
488
489 #ifndef UVM
490 out:
491 if (error) {
492 if (shm_handle->shm_object != NULL)
493 vm_object_deallocate(shm_handle->shm_object);
494 free(shm_handle, M_SHM);
495 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED)
496 | SHMSEG_FREE;
497 } else
498 #endif
499 *retval = shmid;
500 if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
501 /*
502 * Somebody else wanted this key while we were asleep. Wake
503 * them up now.
504 */
505 shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
506 wakeup((caddr_t)shmseg);
507 }
508 return error;
509 }
510
511 int
512 sys_shmget(p, v, retval)
513 struct proc *p;
514 void *v;
515 register_t *retval;
516 {
517 struct sys_shmget_args /* {
518 syscallarg(key_t) key;
519 syscallarg(int) size;
520 syscallarg(int) shmflg;
521 } */ *uap = v;
522 int segnum, mode, error;
523
524 mode = SCARG(uap, shmflg) & ACCESSPERMS;
525 if (SCARG(uap, key) != IPC_PRIVATE) {
526 again:
527 segnum = shm_find_segment_by_key(SCARG(uap, key));
528 if (segnum >= 0) {
529 error = shmget_existing(p, uap, mode, segnum, retval);
530 if (error == EAGAIN)
531 goto again;
532 return error;
533 }
534 if ((SCARG(uap, shmflg) & IPC_CREAT) == 0)
535 return ENOENT;
536 }
537 return shmget_allocate_segment(p, uap, mode, retval);
538 }
539
540 void
541 shmfork(vm1, vm2)
542 struct vmspace *vm1, *vm2;
543 {
544 struct shmmap_state *shmmap_s;
545 size_t size;
546 int i;
547
548 if (vm1->vm_shm == NULL) {
549 vm2->vm_shm = NULL;
550 return;
551 }
552
553 size = shminfo.shmseg * sizeof(struct shmmap_state);
554 shmmap_s = malloc(size, M_SHM, M_WAITOK);
555 bcopy(vm1->vm_shm, shmmap_s, size);
556 vm2->vm_shm = (caddr_t)shmmap_s;
557 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
558 if (shmmap_s->shmid != -1)
559 shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
560 }
561
562 void
563 shmexit(vm)
564 struct vmspace *vm;
565 {
566 struct shmmap_state *shmmap_s;
567 int i;
568
569 shmmap_s = (struct shmmap_state *)vm->vm_shm;
570 if (shmmap_s == NULL)
571 return;
572 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
573 if (shmmap_s->shmid != -1)
574 shm_delete_mapping(vm, shmmap_s);
575 free(vm->vm_shm, M_SHM);
576 vm->vm_shm = NULL;
577 }
578
579 void
580 shminit()
581 {
582 int i;
583
584 shminfo.shmmax *= NBPG;
585
586 for (i = 0; i < shminfo.shmmni; i++) {
587 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
588 shmsegs[i].shm_perm.seq = 0;
589 }
590 shm_last_free = 0;
591 shm_nused = 0;
592 shm_committed = 0;
593 }
594