sysv_shm.c revision 1.38 1 /* $NetBSD: sysv_shm.c,v 1.38 1996/09/01 22:53:06 christos Exp $ */
2
3 /*
4 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Adam Glass and Charles
17 * Hannum.
18 * 4. The names of the authors may not be used to endorse or promote products
19 * derived from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/types.h>
34 #include <sys/param.h>
35 #include <sys/kernel.h>
36 #include <sys/shm.h>
37 #include <sys/proc.h>
38 #include <sys/uio.h>
39 #include <sys/time.h>
40 #include <sys/malloc.h>
41 #include <sys/mman.h>
42 #include <sys/systm.h>
43 #include <sys/stat.h>
44
45 #include <sys/mount.h>
46 #include <sys/syscallargs.h>
47
48 #include <vm/vm.h>
49 #include <vm/vm_map.h>
50 #include <vm/vm_map.h>
51 #include <vm/vm_kern.h>
52
53 struct shmid_ds *shm_find_segment_by_shmid __P((int));
54 void shmexit __P((struct proc *));
55
56 /*
57 * Provides the following externally accessible functions:
58 *
59 * shminit(void); initialization
60 * shmexit(struct proc *) cleanup
61 * shmfork(struct proc *, struct proc *) fork handling
62 * shmsys(arg1, arg2, arg3, arg4); shm{at,ctl,dt,get}(arg2, arg3, arg4)
63 *
64 * Structures:
65 * shmsegs (an array of 'struct shmid_ds')
66 * per proc array of 'struct shmmap_state'
67 */
68
69 #define SHMSEG_FREE 0x0200
70 #define SHMSEG_REMOVED 0x0400
71 #define SHMSEG_ALLOCATED 0x0800
72 #define SHMSEG_WANTED 0x1000
73
74 vm_map_t sysvshm_map;
75 int shm_last_free, shm_nused, shm_committed;
76
77 struct shm_handle {
78 vm_offset_t kva;
79 };
80
81 struct shmmap_state {
82 vm_offset_t va;
83 int shmid;
84 };
85
86 static int shm_find_segment_by_key __P((key_t));
87 static void shm_deallocate_segment __P((struct shmid_ds *));
88 static int shm_delete_mapping __P((struct proc *, struct shmmap_state *));
89 static int shmget_existing __P((struct proc *, struct sys_shmget_args *,
90 int, int, register_t *));
91 static int shmget_allocate_segment __P((struct proc *, struct sys_shmget_args *,
92 int, register_t *));
93
94 static int
95 shm_find_segment_by_key(key)
96 key_t key;
97 {
98 int i;
99
100 for (i = 0; i < shminfo.shmmni; i++)
101 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
102 shmsegs[i].shm_perm.key == key)
103 return i;
104 return -1;
105 }
106
107 struct shmid_ds *
108 shm_find_segment_by_shmid(shmid)
109 int shmid;
110 {
111 int segnum;
112 struct shmid_ds *shmseg;
113
114 segnum = IPCID_TO_IX(shmid);
115 if (segnum < 0 || segnum >= shminfo.shmmni)
116 return NULL;
117 shmseg = &shmsegs[segnum];
118 if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED))
119 != SHMSEG_ALLOCATED ||
120 shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
121 return NULL;
122 return shmseg;
123 }
124
125 static void
126 shm_deallocate_segment(shmseg)
127 struct shmid_ds *shmseg;
128 {
129 struct shm_handle *shm_handle;
130 size_t size;
131
132 shm_handle = shmseg->shm_internal;
133 size = (shmseg->shm_segsz + CLOFSET) & ~CLOFSET;
134 vm_deallocate(sysvshm_map, shm_handle->kva, size);
135 free((caddr_t)shm_handle, M_SHM);
136 shmseg->shm_internal = NULL;
137 shm_committed -= btoc(size);
138 shmseg->shm_perm.mode = SHMSEG_FREE;
139 shm_nused--;
140 }
141
142 static int
143 shm_delete_mapping(p, shmmap_s)
144 struct proc *p;
145 struct shmmap_state *shmmap_s;
146 {
147 struct shmid_ds *shmseg;
148 int segnum, result;
149 size_t size;
150
151 segnum = IPCID_TO_IX(shmmap_s->shmid);
152 shmseg = &shmsegs[segnum];
153 size = (shmseg->shm_segsz + CLOFSET) & ~CLOFSET;
154 result = vm_deallocate(&p->p_vmspace->vm_map, shmmap_s->va, size);
155 if (result != KERN_SUCCESS)
156 return EINVAL;
157 shmmap_s->shmid = -1;
158 shmseg->shm_dtime = time.tv_sec;
159 if ((--shmseg->shm_nattch <= 0) &&
160 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
161 shm_deallocate_segment(shmseg);
162 shm_last_free = segnum;
163 }
164 return 0;
165 }
166
167 int
168 sys_shmdt(p, v, retval)
169 struct proc *p;
170 void *v;
171 register_t *retval;
172 {
173 struct sys_shmdt_args /* {
174 syscallarg(void *) shmaddr;
175 } */ *uap = v;
176 struct shmmap_state *shmmap_s;
177 int i;
178
179 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
180 if (shmmap_s == NULL)
181 return EINVAL;
182
183 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
184 if (shmmap_s->shmid != -1 &&
185 shmmap_s->va == (vm_offset_t)SCARG(uap, shmaddr))
186 break;
187 if (i == shminfo.shmseg)
188 return EINVAL;
189 return shm_delete_mapping(p, shmmap_s);
190 }
191
192 int
193 sys_shmat(p, v, retval)
194 struct proc *p;
195 void *v;
196 register_t *retval;
197 {
198 struct sys_shmat_args /* {
199 syscallarg(int) shmid;
200 syscallarg(void *) shmaddr;
201 syscallarg(int) shmflg;
202 } */ *uap = v;
203 int error, i, flags;
204 struct ucred *cred = p->p_ucred;
205 struct shmid_ds *shmseg;
206 struct shmmap_state *shmmap_s = NULL;
207 vm_offset_t attach_va;
208 vm_prot_t prot;
209 vm_size_t size;
210
211 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
212 if (shmmap_s == NULL) {
213 size = shminfo.shmseg * sizeof(struct shmmap_state);
214 shmmap_s = malloc(size, M_SHM, M_WAITOK);
215 for (i = 0; i < shminfo.shmseg; i++)
216 shmmap_s[i].shmid = -1;
217 p->p_vmspace->vm_shm = (caddr_t)shmmap_s;
218 }
219 shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid));
220 if (shmseg == NULL)
221 return EINVAL;
222 error = ipcperm(cred, &shmseg->shm_perm,
223 (SCARG(uap, shmflg) & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
224 if (error)
225 return error;
226 for (i = 0; i < shminfo.shmseg; i++) {
227 if (shmmap_s->shmid == -1)
228 break;
229 shmmap_s++;
230 }
231 if (i >= shminfo.shmseg)
232 return EMFILE;
233 size = (shmseg->shm_segsz + CLOFSET) & ~CLOFSET;
234 prot = VM_PROT_READ;
235 if ((SCARG(uap, shmflg) & SHM_RDONLY) == 0)
236 prot |= VM_PROT_WRITE;
237 flags = MAP_ANON | MAP_SHARED;
238 if (SCARG(uap, shmaddr)) {
239 flags |= MAP_FIXED;
240 if (SCARG(uap, shmflg) & SHM_RND)
241 attach_va =
242 (vm_offset_t)SCARG(uap, shmaddr) & ~(SHMLBA-1);
243 else if (((vm_offset_t)SCARG(uap, shmaddr) & (SHMLBA-1)) == 0)
244 attach_va = (vm_offset_t)SCARG(uap, shmaddr);
245 else
246 return EINVAL;
247 } else {
248 /* This is just a hint to vm_mmap() about where to put it. */
249 attach_va =
250 round_page(p->p_vmspace->vm_taddr + MAXTSIZ + MAXDSIZ);
251 }
252 error = vm_mmap(&p->p_vmspace->vm_map, &attach_va, size, prot,
253 VM_PROT_DEFAULT, flags, (caddr_t)(long)SCARG(uap, shmid), 0);
254 if (error)
255 return error;
256 shmmap_s->va = attach_va;
257 shmmap_s->shmid = SCARG(uap, shmid);
258 shmseg->shm_lpid = p->p_pid;
259 shmseg->shm_atime = time.tv_sec;
260 shmseg->shm_nattch++;
261 *retval = attach_va;
262 return 0;
263 }
264
265 int
266 sys_shmctl(p, v, retval)
267 struct proc *p;
268 void *v;
269 register_t *retval;
270 {
271 struct sys_shmctl_args /* {
272 syscallarg(int) shmid;
273 syscallarg(int) cmd;
274 syscallarg(struct shmid_ds *) buf;
275 } */ *uap = v;
276 int error;
277 struct ucred *cred = p->p_ucred;
278 struct shmid_ds inbuf;
279 struct shmid_ds *shmseg;
280
281 shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid));
282 if (shmseg == NULL)
283 return EINVAL;
284 switch (SCARG(uap, cmd)) {
285 case IPC_STAT:
286 if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_R)) != 0)
287 return error;
288 error = copyout((caddr_t)shmseg, SCARG(uap, buf),
289 sizeof(inbuf));
290 if (error)
291 return error;
292 break;
293 case IPC_SET:
294 if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
295 return error;
296 error = copyin(SCARG(uap, buf), (caddr_t)&inbuf,
297 sizeof(inbuf));
298 if (error)
299 return error;
300 shmseg->shm_perm.uid = inbuf.shm_perm.uid;
301 shmseg->shm_perm.gid = inbuf.shm_perm.gid;
302 shmseg->shm_perm.mode =
303 (shmseg->shm_perm.mode & ~ACCESSPERMS) |
304 (inbuf.shm_perm.mode & ACCESSPERMS);
305 shmseg->shm_ctime = time.tv_sec;
306 break;
307 case IPC_RMID:
308 if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
309 return error;
310 shmseg->shm_perm.key = IPC_PRIVATE;
311 shmseg->shm_perm.mode |= SHMSEG_REMOVED;
312 if (shmseg->shm_nattch <= 0) {
313 shm_deallocate_segment(shmseg);
314 shm_last_free = IPCID_TO_IX(SCARG(uap, shmid));
315 }
316 break;
317 case SHM_LOCK:
318 case SHM_UNLOCK:
319 default:
320 return EINVAL;
321 }
322 return 0;
323 }
324
325 static int
326 shmget_existing(p, uap, mode, segnum, retval)
327 struct proc *p;
328 struct sys_shmget_args /* {
329 syscallarg(key_t) key;
330 syscallarg(int) size;
331 syscallarg(int) shmflg;
332 } */ *uap;
333 int mode;
334 int segnum;
335 register_t *retval;
336 {
337 struct shmid_ds *shmseg;
338 struct ucred *cred = p->p_ucred;
339 int error;
340
341 shmseg = &shmsegs[segnum];
342 if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
343 /*
344 * This segment is in the process of being allocated. Wait
345 * until it's done, and look the key up again (in case the
346 * allocation failed or it was freed).
347 */
348 shmseg->shm_perm.mode |= SHMSEG_WANTED;
349 error = tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0);
350 if (error)
351 return error;
352 return EAGAIN;
353 }
354 if ((error = ipcperm(cred, &shmseg->shm_perm, mode)) != 0)
355 return error;
356 if (SCARG(uap, size) && SCARG(uap, size) > shmseg->shm_segsz)
357 return EINVAL;
358 if ((SCARG(uap, shmflg) & (IPC_CREAT | IPC_EXCL)) ==
359 (IPC_CREAT | IPC_EXCL))
360 return EEXIST;
361 *retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
362 return 0;
363 }
364
365 static int
366 shmget_allocate_segment(p, uap, mode, retval)
367 struct proc *p;
368 struct sys_shmget_args /* {
369 syscallarg(key_t) key;
370 syscallarg(int) size;
371 syscallarg(int) shmflg;
372 } */ *uap;
373 int mode;
374 register_t *retval;
375 {
376 int i, segnum, result, shmid, size;
377 struct ucred *cred = p->p_ucred;
378 struct shmid_ds *shmseg;
379 struct shm_handle *shm_handle;
380
381 if (SCARG(uap, size) < shminfo.shmmin ||
382 SCARG(uap, size) > shminfo.shmmax)
383 return EINVAL;
384 if (shm_nused >= shminfo.shmmni) /* any shmids left? */
385 return ENOSPC;
386 size = (SCARG(uap, size) + CLOFSET) & ~CLOFSET;
387 if (shm_committed + btoc(size) > shminfo.shmall)
388 return ENOMEM;
389 if (shm_last_free < 0) {
390 for (i = 0; i < shminfo.shmmni; i++)
391 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
392 break;
393 if (i == shminfo.shmmni)
394 panic("shmseg free count inconsistent");
395 segnum = i;
396 } else {
397 segnum = shm_last_free;
398 shm_last_free = -1;
399 }
400 shmseg = &shmsegs[segnum];
401 /*
402 * In case we sleep in malloc(), mark the segment present but deleted
403 * so that noone else tries to create the same key.
404 */
405 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
406 shmseg->shm_perm.key = SCARG(uap, key);
407 shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
408 shm_handle = (struct shm_handle *)
409 malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
410 shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
411 result = vm_mmap(sysvshm_map, &shm_handle->kva, size, VM_PROT_ALL,
412 VM_PROT_DEFAULT, MAP_ANON, (caddr_t)(long)shmid, 0);
413 if (result != KERN_SUCCESS) {
414 shmseg->shm_perm.mode = SHMSEG_FREE;
415 shm_last_free = segnum;
416 free((caddr_t)shm_handle, M_SHM);
417 /* Just in case. */
418 wakeup((caddr_t)shmseg);
419 return ENOMEM;
420 }
421 shmseg->shm_internal = shm_handle;
422 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
423 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
424 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
425 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
426 shmseg->shm_segsz = SCARG(uap, size);
427 shmseg->shm_cpid = p->p_pid;
428 shmseg->shm_lpid = shmseg->shm_nattch = 0;
429 shmseg->shm_atime = shmseg->shm_dtime = 0;
430 shmseg->shm_ctime = time.tv_sec;
431 shm_committed += btoc(size);
432 shm_nused++;
433 if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
434 /*
435 * Somebody else wanted this key while we were asleep. Wake
436 * them up now.
437 */
438 shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
439 wakeup((caddr_t)shmseg);
440 }
441 *retval = shmid;
442 return 0;
443 }
444
445 int
446 sys_shmget(p, v, retval)
447 struct proc *p;
448 void *v;
449 register_t *retval;
450 {
451 struct sys_shmget_args /* {
452 syscallarg(key_t) key;
453 syscallarg(int) size;
454 syscallarg(int) shmflg;
455 } */ *uap = v;
456 int segnum, mode, error;
457
458 mode = SCARG(uap, shmflg) & ACCESSPERMS;
459 if (SCARG(uap, key) != IPC_PRIVATE) {
460 again:
461 segnum = shm_find_segment_by_key(SCARG(uap, key));
462 if (segnum >= 0) {
463 error = shmget_existing(p, uap, mode, segnum, retval);
464 if (error == EAGAIN)
465 goto again;
466 return error;
467 }
468 if ((SCARG(uap, shmflg) & IPC_CREAT) == 0)
469 return ENOENT;
470 }
471 return shmget_allocate_segment(p, uap, mode, retval);
472 }
473
474 void
475 shmfork(p1, p2)
476 struct proc *p1, *p2;
477 {
478 struct shmmap_state *shmmap_s;
479 size_t size;
480 int i;
481
482 if (p1->p_vmspace->vm_shm == NULL) {
483 p2->p_vmspace->vm_shm = NULL;
484 return;
485 }
486
487 size = shminfo.shmseg * sizeof(struct shmmap_state);
488 shmmap_s = malloc(size, M_SHM, M_WAITOK);
489 bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmmap_s, size);
490 p2->p_vmspace->vm_shm = (caddr_t)shmmap_s;
491 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
492 if (shmmap_s->shmid != -1)
493 shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
494 }
495
496 void
497 shmexit(p)
498 struct proc *p;
499 {
500 struct shmmap_state *shmmap_s;
501 int i;
502
503 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
504 if (shmmap_s == NULL)
505 return;
506 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
507 if (shmmap_s->shmid != -1)
508 shm_delete_mapping(p, shmmap_s);
509 free((caddr_t)p->p_vmspace->vm_shm, M_SHM);
510 p->p_vmspace->vm_shm = NULL;
511 }
512
513 void
514 shminit()
515 {
516 int i;
517 vm_offset_t garbage1, garbage2;
518
519 shminfo.shmmax *= NBPG;
520
521 /* actually this *should* be pageable. SHM_{LOCK,UNLOCK} */
522 sysvshm_map = kmem_suballoc(kernel_map, &garbage1, &garbage2,
523 shminfo.shmall * NBPG, TRUE);
524 for (i = 0; i < shminfo.shmmni; i++) {
525 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
526 shmsegs[i].shm_perm.seq = 0;
527 }
528 shm_last_free = 0;
529 shm_nused = 0;
530 shm_committed = 0;
531 }
532