sysv_shm.c revision 1.35 1 /* $NetBSD: sysv_shm.c,v 1.35 1996/02/04 02:17:10 christos Exp $ */
2
3 /*
4 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Adam Glass and Charles
17 * Hannum.
18 * 4. The names of the authors may not be used to endorse or promote products
19 * derived from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/types.h>
34 #include <sys/param.h>
35 #include <sys/kernel.h>
36 #include <sys/shm.h>
37 #include <sys/proc.h>
38 #include <sys/uio.h>
39 #include <sys/time.h>
40 #include <sys/malloc.h>
41 #include <sys/mman.h>
42 #include <sys/systm.h>
43 #include <sys/stat.h>
44
45 #include <sys/mount.h>
46 #include <sys/syscallargs.h>
47
48 #include <vm/vm.h>
49 #include <vm/vm_map.h>
50 #include <vm/vm_map.h>
51 #include <vm/vm_kern.h>
52
53 #include <kern/kern_extern.h>
54
55 struct shmid_ds *shm_find_segment_by_shmid __P((int));
56 void shmexit __P((struct proc *));
57
58 /*
59 * Provides the following externally accessible functions:
60 *
61 * shminit(void); initialization
62 * shmexit(struct proc *) cleanup
63 * shmfork(struct proc *, struct proc *) fork handling
64 * shmsys(arg1, arg2, arg3, arg4); shm{at,ctl,dt,get}(arg2, arg3, arg4)
65 *
66 * Structures:
67 * shmsegs (an array of 'struct shmid_ds')
68 * per proc array of 'struct shmmap_state'
69 */
70
71 #define SHMSEG_FREE 0x0200
72 #define SHMSEG_REMOVED 0x0400
73 #define SHMSEG_ALLOCATED 0x0800
74 #define SHMSEG_WANTED 0x1000
75
76 vm_map_t sysvshm_map;
77 int shm_last_free, shm_nused, shm_committed;
78
79 struct shm_handle {
80 vm_offset_t kva;
81 };
82
83 struct shmmap_state {
84 vm_offset_t va;
85 int shmid;
86 };
87
88 static int shm_find_segment_by_key __P((key_t));
89 static void shm_deallocate_segment __P((struct shmid_ds *));
90 static int shm_delete_mapping __P((struct proc *, struct shmmap_state *));
91 static int shmget_existing __P((struct proc *, struct sys_shmget_args *,
92 int, int, register_t *));
93 static int shmget_allocate_segment __P((struct proc *, struct sys_shmget_args *,
94 int, register_t *));
95
96 static int
97 shm_find_segment_by_key(key)
98 key_t key;
99 {
100 int i;
101
102 for (i = 0; i < shminfo.shmmni; i++)
103 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
104 shmsegs[i].shm_perm.key == key)
105 return i;
106 return -1;
107 }
108
109 struct shmid_ds *
110 shm_find_segment_by_shmid(shmid)
111 int shmid;
112 {
113 int segnum;
114 struct shmid_ds *shmseg;
115
116 segnum = IPCID_TO_IX(shmid);
117 if (segnum < 0 || segnum >= shminfo.shmmni)
118 return NULL;
119 shmseg = &shmsegs[segnum];
120 if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED))
121 != SHMSEG_ALLOCATED ||
122 shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
123 return NULL;
124 return shmseg;
125 }
126
127 static void
128 shm_deallocate_segment(shmseg)
129 struct shmid_ds *shmseg;
130 {
131 struct shm_handle *shm_handle;
132 size_t size;
133
134 shm_handle = shmseg->shm_internal;
135 size = (shmseg->shm_segsz + CLOFSET) & ~CLOFSET;
136 vm_deallocate(sysvshm_map, shm_handle->kva, size);
137 free((caddr_t)shm_handle, M_SHM);
138 shmseg->shm_internal = NULL;
139 shm_committed -= btoc(size);
140 shmseg->shm_perm.mode = SHMSEG_FREE;
141 shm_nused--;
142 }
143
144 static int
145 shm_delete_mapping(p, shmmap_s)
146 struct proc *p;
147 struct shmmap_state *shmmap_s;
148 {
149 struct shmid_ds *shmseg;
150 int segnum, result;
151 size_t size;
152
153 segnum = IPCID_TO_IX(shmmap_s->shmid);
154 shmseg = &shmsegs[segnum];
155 size = (shmseg->shm_segsz + CLOFSET) & ~CLOFSET;
156 result = vm_deallocate(&p->p_vmspace->vm_map, shmmap_s->va, size);
157 if (result != KERN_SUCCESS)
158 return EINVAL;
159 shmmap_s->shmid = -1;
160 shmseg->shm_dtime = time.tv_sec;
161 if ((--shmseg->shm_nattch <= 0) &&
162 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
163 shm_deallocate_segment(shmseg);
164 shm_last_free = segnum;
165 }
166 return 0;
167 }
168
169 int
170 sys_shmdt(p, v, retval)
171 struct proc *p;
172 void *v;
173 register_t *retval;
174 {
175 struct sys_shmdt_args /* {
176 syscallarg(void *) shmaddr;
177 } */ *uap = v;
178 struct shmmap_state *shmmap_s;
179 int i;
180
181 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
182 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
183 if (shmmap_s->shmid != -1 &&
184 shmmap_s->va == (vm_offset_t)SCARG(uap, shmaddr))
185 break;
186 if (i == shminfo.shmseg)
187 return EINVAL;
188 return shm_delete_mapping(p, shmmap_s);
189 }
190
191 int
192 sys_shmat(p, v, retval)
193 struct proc *p;
194 void *v;
195 register_t *retval;
196 {
197 struct sys_shmat_args /* {
198 syscallarg(int) shmid;
199 syscallarg(void *) shmaddr;
200 syscallarg(int) shmflg;
201 } */ *uap = v;
202 int error, i, flags;
203 struct ucred *cred = p->p_ucred;
204 struct shmid_ds *shmseg;
205 struct shmmap_state *shmmap_s = NULL;
206 vm_offset_t attach_va;
207 vm_prot_t prot;
208 vm_size_t size;
209
210 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
211 if (shmmap_s == NULL) {
212 size = shminfo.shmseg * sizeof(struct shmmap_state);
213 shmmap_s = malloc(size, M_SHM, M_WAITOK);
214 for (i = 0; i < shminfo.shmseg; i++)
215 shmmap_s[i].shmid = -1;
216 p->p_vmspace->vm_shm = (caddr_t)shmmap_s;
217 }
218 shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid));
219 if (shmseg == NULL)
220 return EINVAL;
221 error = ipcperm(cred, &shmseg->shm_perm,
222 (SCARG(uap, shmflg) & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
223 if (error)
224 return error;
225 for (i = 0; i < shminfo.shmseg; i++) {
226 if (shmmap_s->shmid == -1)
227 break;
228 shmmap_s++;
229 }
230 if (i >= shminfo.shmseg)
231 return EMFILE;
232 size = (shmseg->shm_segsz + CLOFSET) & ~CLOFSET;
233 prot = VM_PROT_READ;
234 if ((SCARG(uap, shmflg) & SHM_RDONLY) == 0)
235 prot |= VM_PROT_WRITE;
236 flags = MAP_ANON | MAP_SHARED;
237 if (SCARG(uap, shmaddr)) {
238 flags |= MAP_FIXED;
239 if (SCARG(uap, shmflg) & SHM_RND)
240 attach_va =
241 (vm_offset_t)SCARG(uap, shmaddr) & ~(SHMLBA-1);
242 else if (((vm_offset_t)SCARG(uap, shmaddr) & (SHMLBA-1)) == 0)
243 attach_va = (vm_offset_t)SCARG(uap, shmaddr);
244 else
245 return EINVAL;
246 } else {
247 /* This is just a hint to vm_mmap() about where to put it. */
248 attach_va =
249 round_page(p->p_vmspace->vm_taddr + MAXTSIZ + MAXDSIZ);
250 }
251 error = vm_mmap(&p->p_vmspace->vm_map, &attach_va, size, prot,
252 VM_PROT_DEFAULT, flags, (caddr_t)(long)SCARG(uap, shmid), 0);
253 if (error)
254 return error;
255 shmmap_s->va = attach_va;
256 shmmap_s->shmid = SCARG(uap, shmid);
257 shmseg->shm_lpid = p->p_pid;
258 shmseg->shm_atime = time.tv_sec;
259 shmseg->shm_nattch++;
260 *retval = attach_va;
261 return 0;
262 }
263
264 int
265 sys_shmctl(p, v, retval)
266 struct proc *p;
267 void *v;
268 register_t *retval;
269 {
270 struct sys_shmctl_args /* {
271 syscallarg(int) shmid;
272 syscallarg(int) cmd;
273 syscallarg(struct shmid_ds *) buf;
274 } */ *uap = v;
275 int error;
276 struct ucred *cred = p->p_ucred;
277 struct shmid_ds inbuf;
278 struct shmid_ds *shmseg;
279
280 shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid));
281 if (shmseg == NULL)
282 return EINVAL;
283 switch (SCARG(uap, cmd)) {
284 case IPC_STAT:
285 if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_R)) != 0)
286 return error;
287 error = copyout((caddr_t)shmseg, SCARG(uap, buf),
288 sizeof(inbuf));
289 if (error)
290 return error;
291 break;
292 case IPC_SET:
293 if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
294 return error;
295 error = copyin(SCARG(uap, buf), (caddr_t)&inbuf,
296 sizeof(inbuf));
297 if (error)
298 return error;
299 shmseg->shm_perm.uid = inbuf.shm_perm.uid;
300 shmseg->shm_perm.gid = inbuf.shm_perm.gid;
301 shmseg->shm_perm.mode =
302 (shmseg->shm_perm.mode & ~ACCESSPERMS) |
303 (inbuf.shm_perm.mode & ACCESSPERMS);
304 shmseg->shm_ctime = time.tv_sec;
305 break;
306 case IPC_RMID:
307 if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
308 return error;
309 shmseg->shm_perm.key = IPC_PRIVATE;
310 shmseg->shm_perm.mode |= SHMSEG_REMOVED;
311 if (shmseg->shm_nattch <= 0) {
312 shm_deallocate_segment(shmseg);
313 shm_last_free = IPCID_TO_IX(SCARG(uap, shmid));
314 }
315 break;
316 case SHM_LOCK:
317 case SHM_UNLOCK:
318 default:
319 return EINVAL;
320 }
321 return 0;
322 }
323
324 static int
325 shmget_existing(p, uap, mode, segnum, retval)
326 struct proc *p;
327 struct sys_shmget_args /* {
328 syscallarg(key_t) key;
329 syscallarg(int) size;
330 syscallarg(int) shmflg;
331 } */ *uap;
332 int mode;
333 int segnum;
334 register_t *retval;
335 {
336 struct shmid_ds *shmseg;
337 struct ucred *cred = p->p_ucred;
338 int error;
339
340 shmseg = &shmsegs[segnum];
341 if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
342 /*
343 * This segment is in the process of being allocated. Wait
344 * until it's done, and look the key up again (in case the
345 * allocation failed or it was freed).
346 */
347 shmseg->shm_perm.mode |= SHMSEG_WANTED;
348 error = tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0);
349 if (error)
350 return error;
351 return EAGAIN;
352 }
353 if ((error = ipcperm(cred, &shmseg->shm_perm, mode)) != 0)
354 return error;
355 if (SCARG(uap, size) && SCARG(uap, size) > shmseg->shm_segsz)
356 return EINVAL;
357 if (SCARG(uap, shmflg) & (IPC_CREAT | IPC_EXCL) ==
358 (IPC_CREAT | IPC_EXCL))
359 return EEXIST;
360 *retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
361 return 0;
362 }
363
364 static int
365 shmget_allocate_segment(p, uap, mode, retval)
366 struct proc *p;
367 struct sys_shmget_args /* {
368 syscallarg(key_t) key;
369 syscallarg(int) size;
370 syscallarg(int) shmflg;
371 } */ *uap;
372 int mode;
373 register_t *retval;
374 {
375 int i, segnum, result, shmid, size;
376 struct ucred *cred = p->p_ucred;
377 struct shmid_ds *shmseg;
378 struct shm_handle *shm_handle;
379
380 if (SCARG(uap, size) < shminfo.shmmin ||
381 SCARG(uap, size) > shminfo.shmmax)
382 return EINVAL;
383 if (shm_nused >= shminfo.shmmni) /* any shmids left? */
384 return ENOSPC;
385 size = (SCARG(uap, size) + CLOFSET) & ~CLOFSET;
386 if (shm_committed + btoc(size) > shminfo.shmall)
387 return ENOMEM;
388 if (shm_last_free < 0) {
389 for (i = 0; i < shminfo.shmmni; i++)
390 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
391 break;
392 if (i == shminfo.shmmni)
393 panic("shmseg free count inconsistent");
394 segnum = i;
395 } else {
396 segnum = shm_last_free;
397 shm_last_free = -1;
398 }
399 shmseg = &shmsegs[segnum];
400 /*
401 * In case we sleep in malloc(), mark the segment present but deleted
402 * so that noone else tries to create the same key.
403 */
404 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
405 shmseg->shm_perm.key = SCARG(uap, key);
406 shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
407 shm_handle = (struct shm_handle *)
408 malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
409 shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
410 result = vm_mmap(sysvshm_map, &shm_handle->kva, size, VM_PROT_ALL,
411 VM_PROT_DEFAULT, MAP_ANON, (caddr_t)(long)shmid, 0);
412 if (result != KERN_SUCCESS) {
413 shmseg->shm_perm.mode = SHMSEG_FREE;
414 shm_last_free = segnum;
415 free((caddr_t)shm_handle, M_SHM);
416 /* Just in case. */
417 wakeup((caddr_t)shmseg);
418 return ENOMEM;
419 }
420 shmseg->shm_internal = shm_handle;
421 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
422 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
423 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
424 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
425 shmseg->shm_segsz = SCARG(uap, size);
426 shmseg->shm_cpid = p->p_pid;
427 shmseg->shm_lpid = shmseg->shm_nattch = 0;
428 shmseg->shm_atime = shmseg->shm_dtime = 0;
429 shmseg->shm_ctime = time.tv_sec;
430 shm_committed += btoc(size);
431 shm_nused++;
432 if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
433 /*
434 * Somebody else wanted this key while we were asleep. Wake
435 * them up now.
436 */
437 shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
438 wakeup((caddr_t)shmseg);
439 }
440 *retval = shmid;
441 return 0;
442 }
443
444 int
445 sys_shmget(p, v, retval)
446 struct proc *p;
447 void *v;
448 register_t *retval;
449 {
450 struct sys_shmget_args /* {
451 syscallarg(key_t) key;
452 syscallarg(int) size;
453 syscallarg(int) shmflg;
454 } */ *uap = v;
455 int segnum, mode, error;
456
457 mode = SCARG(uap, shmflg) & ACCESSPERMS;
458 if (SCARG(uap, key) != IPC_PRIVATE) {
459 again:
460 segnum = shm_find_segment_by_key(SCARG(uap, key));
461 if (segnum >= 0) {
462 error = shmget_existing(p, uap, mode, segnum, retval);
463 if (error == EAGAIN)
464 goto again;
465 return error;
466 }
467 if ((SCARG(uap, shmflg) & IPC_CREAT) == 0)
468 return ENOENT;
469 }
470 return shmget_allocate_segment(p, uap, mode, retval);
471 }
472
473 void
474 shmfork(p1, p2)
475 struct proc *p1, *p2;
476 {
477 struct shmmap_state *shmmap_s;
478 size_t size;
479 int i;
480
481 size = shminfo.shmseg * sizeof(struct shmmap_state);
482 shmmap_s = malloc(size, M_SHM, M_WAITOK);
483 bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmmap_s, size);
484 p2->p_vmspace->vm_shm = (caddr_t)shmmap_s;
485 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
486 if (shmmap_s->shmid != -1)
487 shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
488 }
489
490 void
491 shmexit(p)
492 struct proc *p;
493 {
494 struct shmmap_state *shmmap_s;
495 int i;
496
497 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
498 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
499 if (shmmap_s->shmid != -1)
500 shm_delete_mapping(p, shmmap_s);
501 free((caddr_t)p->p_vmspace->vm_shm, M_SHM);
502 p->p_vmspace->vm_shm = NULL;
503 }
504
505 void
506 shminit()
507 {
508 int i;
509 vm_offset_t garbage1, garbage2;
510
511 shminfo.shmmax *= NBPG;
512
513 /* actually this *should* be pageable. SHM_{LOCK,UNLOCK} */
514 sysvshm_map = kmem_suballoc(kernel_map, &garbage1, &garbage2,
515 shminfo.shmall * NBPG, TRUE);
516 for (i = 0; i < shminfo.shmmni; i++) {
517 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
518 shmsegs[i].shm_perm.seq = 0;
519 }
520 shm_last_free = 0;
521 shm_nused = 0;
522 shm_committed = 0;
523 }
524