sysv_shm.c revision 1.34 1 /* $NetBSD: sysv_shm.c,v 1.34 1995/12/09 04:12:56 mycroft Exp $ */
2
3 /*
4 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Adam Glass and Charles
17 * Hannum.
18 * 4. The names of the authors may not be used to endorse or promote products
19 * derived from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/types.h>
34 #include <sys/param.h>
35 #include <sys/kernel.h>
36 #include <sys/shm.h>
37 #include <sys/proc.h>
38 #include <sys/uio.h>
39 #include <sys/time.h>
40 #include <sys/malloc.h>
41 #include <sys/mman.h>
42 #include <sys/systm.h>
43 #include <sys/stat.h>
44
45 #include <sys/mount.h>
46 #include <sys/syscallargs.h>
47
48 #include <vm/vm.h>
49 #include <vm/vm_map.h>
50 #include <vm/vm_map.h>
51 #include <vm/vm_kern.h>
52
53 /*
54 * Provides the following externally accessible functions:
55 *
56 * shminit(void); initialization
57 * shmexit(struct proc *) cleanup
58 * shmfork(struct proc *, struct proc *) fork handling
59 * shmsys(arg1, arg2, arg3, arg4); shm{at,ctl,dt,get}(arg2, arg3, arg4)
60 *
61 * Structures:
62 * shmsegs (an array of 'struct shmid_ds')
63 * per proc array of 'struct shmmap_state'
64 */
65
66 #define SHMSEG_FREE 0x0200
67 #define SHMSEG_REMOVED 0x0400
68 #define SHMSEG_ALLOCATED 0x0800
69 #define SHMSEG_WANTED 0x1000
70
71 vm_map_t sysvshm_map;
72 int shm_last_free, shm_nused, shm_committed;
73
74 struct shm_handle {
75 vm_offset_t kva;
76 };
77
78 struct shmmap_state {
79 vm_offset_t va;
80 int shmid;
81 };
82
83 static void shm_deallocate_segment __P((struct shmid_ds *));
84 static int shm_find_segment_by_key __P((key_t));
85 struct shmid_ds *shm_find_segment_by_shmid __P((int));
86 static int shm_delete_mapping __P((struct proc *, struct shmmap_state *));
87
88 static int
89 shm_find_segment_by_key(key)
90 key_t key;
91 {
92 int i;
93
94 for (i = 0; i < shminfo.shmmni; i++)
95 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
96 shmsegs[i].shm_perm.key == key)
97 return i;
98 return -1;
99 }
100
101 struct shmid_ds *
102 shm_find_segment_by_shmid(shmid)
103 int shmid;
104 {
105 int segnum;
106 struct shmid_ds *shmseg;
107
108 segnum = IPCID_TO_IX(shmid);
109 if (segnum < 0 || segnum >= shminfo.shmmni)
110 return NULL;
111 shmseg = &shmsegs[segnum];
112 if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED))
113 != SHMSEG_ALLOCATED ||
114 shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
115 return NULL;
116 return shmseg;
117 }
118
119 static void
120 shm_deallocate_segment(shmseg)
121 struct shmid_ds *shmseg;
122 {
123 struct shm_handle *shm_handle;
124 size_t size;
125
126 shm_handle = shmseg->shm_internal;
127 size = (shmseg->shm_segsz + CLOFSET) & ~CLOFSET;
128 vm_deallocate(sysvshm_map, shm_handle->kva, size);
129 free((caddr_t)shm_handle, M_SHM);
130 shmseg->shm_internal = NULL;
131 shm_committed -= btoc(size);
132 shmseg->shm_perm.mode = SHMSEG_FREE;
133 shm_nused--;
134 }
135
136 static int
137 shm_delete_mapping(p, shmmap_s)
138 struct proc *p;
139 struct shmmap_state *shmmap_s;
140 {
141 struct shmid_ds *shmseg;
142 int segnum, result;
143 size_t size;
144
145 segnum = IPCID_TO_IX(shmmap_s->shmid);
146 shmseg = &shmsegs[segnum];
147 size = (shmseg->shm_segsz + CLOFSET) & ~CLOFSET;
148 result = vm_deallocate(&p->p_vmspace->vm_map, shmmap_s->va, size);
149 if (result != KERN_SUCCESS)
150 return EINVAL;
151 shmmap_s->shmid = -1;
152 shmseg->shm_dtime = time.tv_sec;
153 if ((--shmseg->shm_nattch <= 0) &&
154 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
155 shm_deallocate_segment(shmseg);
156 shm_last_free = segnum;
157 }
158 return 0;
159 }
160
161 int
162 sys_shmdt(p, v, retval)
163 struct proc *p;
164 void *v;
165 register_t *retval;
166 {
167 struct sys_shmdt_args /* {
168 syscallarg(void *) shmaddr;
169 } */ *uap = v;
170 struct shmmap_state *shmmap_s;
171 int i;
172
173 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
174 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
175 if (shmmap_s->shmid != -1 &&
176 shmmap_s->va == (vm_offset_t)SCARG(uap, shmaddr))
177 break;
178 if (i == shminfo.shmseg)
179 return EINVAL;
180 return shm_delete_mapping(p, shmmap_s);
181 }
182
183 int
184 sys_shmat(p, v, retval)
185 struct proc *p;
186 void *v;
187 register_t *retval;
188 {
189 struct sys_shmat_args /* {
190 syscallarg(int) shmid;
191 syscallarg(void *) shmaddr;
192 syscallarg(int) shmflag;
193 } */ *uap = v;
194 int error, i, flags;
195 struct ucred *cred = p->p_ucred;
196 struct shmid_ds *shmseg;
197 struct shmmap_state *shmmap_s = NULL;
198 vm_offset_t attach_va;
199 vm_prot_t prot;
200 vm_size_t size;
201
202 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
203 if (shmmap_s == NULL) {
204 size = shminfo.shmseg * sizeof(struct shmmap_state);
205 shmmap_s = malloc(size, M_SHM, M_WAITOK);
206 for (i = 0; i < shminfo.shmseg; i++)
207 shmmap_s[i].shmid = -1;
208 p->p_vmspace->vm_shm = (caddr_t)shmmap_s;
209 }
210 shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid));
211 if (shmseg == NULL)
212 return EINVAL;
213 if (error = ipcperm(cred, &shmseg->shm_perm,
214 (SCARG(uap, shmflg) & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W))
215 return error;
216 for (i = 0; i < shminfo.shmseg; i++) {
217 if (shmmap_s->shmid == -1)
218 break;
219 shmmap_s++;
220 }
221 if (i >= shminfo.shmseg)
222 return EMFILE;
223 size = (shmseg->shm_segsz + CLOFSET) & ~CLOFSET;
224 prot = VM_PROT_READ;
225 if ((SCARG(uap, shmflg) & SHM_RDONLY) == 0)
226 prot |= VM_PROT_WRITE;
227 flags = MAP_ANON | MAP_SHARED;
228 if (SCARG(uap, shmaddr)) {
229 flags |= MAP_FIXED;
230 if (SCARG(uap, shmflg) & SHM_RND)
231 attach_va =
232 (vm_offset_t)SCARG(uap, shmaddr) & ~(SHMLBA-1);
233 else if (((vm_offset_t)SCARG(uap, shmaddr) & (SHMLBA-1)) == 0)
234 attach_va = (vm_offset_t)SCARG(uap, shmaddr);
235 else
236 return EINVAL;
237 } else {
238 /* This is just a hint to vm_mmap() about where to put it. */
239 attach_va =
240 round_page(p->p_vmspace->vm_taddr + MAXTSIZ + MAXDSIZ);
241 }
242 error = vm_mmap(&p->p_vmspace->vm_map, &attach_va, size, prot,
243 VM_PROT_DEFAULT, flags, (caddr_t)(long)SCARG(uap, shmid), 0);
244 if (error)
245 return error;
246 shmmap_s->va = attach_va;
247 shmmap_s->shmid = SCARG(uap, shmid);
248 shmseg->shm_lpid = p->p_pid;
249 shmseg->shm_atime = time.tv_sec;
250 shmseg->shm_nattch++;
251 *retval = attach_va;
252 return 0;
253 }
254
255 int
256 sys_shmctl(p, v, retval)
257 struct proc *p;
258 void *v;
259 register_t *retval;
260 {
261 struct sys_shmctl_args /* {
262 syscallarg(int) shmid;
263 syscallarg(int) cmd;
264 syscallarg(struct shmid_ds *) buf;
265 } */ *uap = v;
266 int error, segnum;
267 struct ucred *cred = p->p_ucred;
268 struct shmid_ds inbuf;
269 struct shmid_ds *shmseg;
270
271 shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid));
272 if (shmseg == NULL)
273 return EINVAL;
274 switch (SCARG(uap, cmd)) {
275 case IPC_STAT:
276 if (error = ipcperm(cred, &shmseg->shm_perm, IPC_R))
277 return error;
278 if (error = copyout((caddr_t)shmseg, SCARG(uap, buf),
279 sizeof(inbuf)))
280 return error;
281 break;
282 case IPC_SET:
283 if (error = ipcperm(cred, &shmseg->shm_perm, IPC_M))
284 return error;
285 if (error = copyin(SCARG(uap, buf), (caddr_t)&inbuf,
286 sizeof(inbuf)))
287 return error;
288 shmseg->shm_perm.uid = inbuf.shm_perm.uid;
289 shmseg->shm_perm.gid = inbuf.shm_perm.gid;
290 shmseg->shm_perm.mode =
291 (shmseg->shm_perm.mode & ~ACCESSPERMS) |
292 (inbuf.shm_perm.mode & ACCESSPERMS);
293 shmseg->shm_ctime = time.tv_sec;
294 break;
295 case IPC_RMID:
296 if (error = ipcperm(cred, &shmseg->shm_perm, IPC_M))
297 return error;
298 shmseg->shm_perm.key = IPC_PRIVATE;
299 shmseg->shm_perm.mode |= SHMSEG_REMOVED;
300 if (shmseg->shm_nattch <= 0) {
301 shm_deallocate_segment(shmseg);
302 shm_last_free = IPCID_TO_IX(SCARG(uap, shmid));
303 }
304 break;
305 case SHM_LOCK:
306 case SHM_UNLOCK:
307 default:
308 return EINVAL;
309 }
310 return 0;
311 }
312
313 static int
314 shmget_existing(p, uap, mode, segnum, retval)
315 struct proc *p;
316 struct sys_shmget_args /* {
317 syscallarg(key_t) key;
318 syscallarg(int) size;
319 syscallarg(int) shmflag;
320 } */ *uap;
321 int mode;
322 int segnum;
323 register_t *retval;
324 {
325 struct shmid_ds *shmseg;
326 struct ucred *cred = p->p_ucred;
327 int error;
328
329 shmseg = &shmsegs[segnum];
330 if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
331 /*
332 * This segment is in the process of being allocated. Wait
333 * until it's done, and look the key up again (in case the
334 * allocation failed or it was freed).
335 */
336 shmseg->shm_perm.mode |= SHMSEG_WANTED;
337 if (error =
338 tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0))
339 return error;
340 return EAGAIN;
341 }
342 if (error = ipcperm(cred, &shmseg->shm_perm, mode))
343 return error;
344 if (SCARG(uap, size) && SCARG(uap, size) > shmseg->shm_segsz)
345 return EINVAL;
346 if (SCARG(uap, shmflg) & (IPC_CREAT | IPC_EXCL) ==
347 (IPC_CREAT | IPC_EXCL))
348 return EEXIST;
349 *retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
350 return 0;
351 }
352
353 static int
354 shmget_allocate_segment(p, uap, mode, retval)
355 struct proc *p;
356 struct sys_shmget_args /* {
357 syscallarg(key_t) key;
358 syscallarg(int) size;
359 syscallarg(int) shmflag;
360 } */ *uap;
361 int mode;
362 register_t *retval;
363 {
364 int i, segnum, result, shmid, size;
365 struct ucred *cred = p->p_ucred;
366 struct shmid_ds *shmseg;
367 struct shm_handle *shm_handle;
368
369 if (SCARG(uap, size) < shminfo.shmmin ||
370 SCARG(uap, size) > shminfo.shmmax)
371 return EINVAL;
372 if (shm_nused >= shminfo.shmmni) /* any shmids left? */
373 return ENOSPC;
374 size = (SCARG(uap, size) + CLOFSET) & ~CLOFSET;
375 if (shm_committed + btoc(size) > shminfo.shmall)
376 return ENOMEM;
377 if (shm_last_free < 0) {
378 for (i = 0; i < shminfo.shmmni; i++)
379 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
380 break;
381 if (i == shminfo.shmmni)
382 panic("shmseg free count inconsistent");
383 segnum = i;
384 } else {
385 segnum = shm_last_free;
386 shm_last_free = -1;
387 }
388 shmseg = &shmsegs[segnum];
389 /*
390 * In case we sleep in malloc(), mark the segment present but deleted
391 * so that noone else tries to create the same key.
392 */
393 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
394 shmseg->shm_perm.key = SCARG(uap, key);
395 shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
396 shm_handle = (struct shm_handle *)
397 malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
398 shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
399 result = vm_mmap(sysvshm_map, &shm_handle->kva, size, VM_PROT_ALL,
400 VM_PROT_DEFAULT, MAP_ANON, (caddr_t)(long)shmid, 0);
401 if (result != KERN_SUCCESS) {
402 shmseg->shm_perm.mode = SHMSEG_FREE;
403 shm_last_free = segnum;
404 free((caddr_t)shm_handle, M_SHM);
405 /* Just in case. */
406 wakeup((caddr_t)shmseg);
407 return ENOMEM;
408 }
409 shmseg->shm_internal = shm_handle;
410 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
411 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
412 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
413 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
414 shmseg->shm_segsz = SCARG(uap, size);
415 shmseg->shm_cpid = p->p_pid;
416 shmseg->shm_lpid = shmseg->shm_nattch = 0;
417 shmseg->shm_atime = shmseg->shm_dtime = 0;
418 shmseg->shm_ctime = time.tv_sec;
419 shm_committed += btoc(size);
420 shm_nused++;
421 if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
422 /*
423 * Somebody else wanted this key while we were asleep. Wake
424 * them up now.
425 */
426 shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
427 wakeup((caddr_t)shmseg);
428 }
429 *retval = shmid;
430 return 0;
431 }
432
433 int
434 sys_shmget(p, v, retval)
435 struct proc *p;
436 void *v;
437 register_t *retval;
438 {
439 struct sys_shmget_args /* {
440 syscallarg(key_t) key;
441 syscallarg(int) size;
442 syscallarg(int) shmflag;
443 } */ *uap = v;
444 int segnum, mode, error;
445 struct shmid_ds *shmseg;
446
447 mode = SCARG(uap, shmflg) & ACCESSPERMS;
448 if (SCARG(uap, key) != IPC_PRIVATE) {
449 again:
450 segnum = shm_find_segment_by_key(SCARG(uap, key));
451 if (segnum >= 0) {
452 error = shmget_existing(p, uap, mode, segnum, retval);
453 if (error == EAGAIN)
454 goto again;
455 return error;
456 }
457 if ((SCARG(uap, shmflg) & IPC_CREAT) == 0)
458 return ENOENT;
459 }
460 return shmget_allocate_segment(p, uap, mode, retval);
461 }
462
463 void
464 shmfork(p1, p2)
465 struct proc *p1, *p2;
466 {
467 struct shmmap_state *shmmap_s;
468 size_t size;
469 int i;
470
471 size = shminfo.shmseg * sizeof(struct shmmap_state);
472 shmmap_s = malloc(size, M_SHM, M_WAITOK);
473 bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmmap_s, size);
474 p2->p_vmspace->vm_shm = (caddr_t)shmmap_s;
475 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
476 if (shmmap_s->shmid != -1)
477 shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
478 }
479
480 void
481 shmexit(p)
482 struct proc *p;
483 {
484 struct shmmap_state *shmmap_s;
485 struct shmid_ds *shmseg;
486 int i;
487
488 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
489 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
490 if (shmmap_s->shmid != -1)
491 shm_delete_mapping(p, shmmap_s);
492 free((caddr_t)p->p_vmspace->vm_shm, M_SHM);
493 p->p_vmspace->vm_shm = NULL;
494 }
495
496 void
497 shminit()
498 {
499 int i;
500 vm_offset_t garbage1, garbage2;
501
502 shminfo.shmmax *= NBPG;
503
504 /* actually this *should* be pageable. SHM_{LOCK,UNLOCK} */
505 sysvshm_map = kmem_suballoc(kernel_map, &garbage1, &garbage2,
506 shminfo.shmall * NBPG, TRUE);
507 for (i = 0; i < shminfo.shmmni; i++) {
508 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
509 shmsegs[i].shm_perm.seq = 0;
510 }
511 shm_last_free = 0;
512 shm_nused = 0;
513 shm_committed = 0;
514 }
515