sysv_shm.c revision 1.17 1 /*
2 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. All advertising materials mentioning features or use of this software
13 * must display the following acknowledgement:
14 * This product includes software developed by Adam Glass and Charles
15 * Hannum.
16 * 4. The names of the authors may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include <sys/types.h>
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/shm.h>
35 #include <sys/proc.h>
36 #include <sys/uio.h>
37 #include <sys/time.h>
38 #include <sys/malloc.h>
39 #include <sys/mman.h>
40 #include <sys/systm.h>
41 #include <sys/stat.h>
42
43 #include <vm/vm.h>
44 #include <vm/vm_map.h>
45 #include <vm/vm_map.h>
46 #include <vm/vm_kern.h>
47
48 /*
49 * Provides the following externally accessible functions:
50 *
51 * shminit(void); initialization
52 * shmexit(struct proc *) cleanup
53 * shmfork(struct proc *, struct proc *, int) fork handling
54 * shmsys(arg1, arg2, arg3, arg4); shm{at,ctl,dt,get}(arg2, arg3, arg4)
55 *
56 * Structures:
57 * shmsegs (an array of 'struct shmid_ds')
58 * per proc array of 'struct shmmap_state'
59 */
60
61 int shmat(), shmctl(), shmdt(), shmget();
62 int (*shmcalls[])() = { shmat, shmctl, shmdt, shmget };
63
64 #define SHMSEG_FREE 0x0200
65 #define SHMSEG_REMOVED 0x0400
66 #define SHMSEG_ALLOCATED 0x0800
67 #define SHMSEG_WANTED 0x1000
68
69 vm_map_t sysvshm_map;
70 int shm_last_free, shm_nused, shm_committed;
71
72 struct shm_handle {
73 vm_offset_t kva;
74 };
75
76 struct shmmap_state {
77 vm_offset_t va;
78 int shmid;
79 };
80
81 static void shm_deallocate_segment __P((struct shmid_ds *));
82 static int shm_find_segment_by_key __P((key_t));
83 static struct shmid_ds *shm_find_segment_by_shmid __P((int));
84 static int shm_delete_mapping __P((struct proc *, struct shmmap_state *));
85
86 static int
87 shm_find_segment_by_key(key)
88 key_t key;
89 {
90 int i;
91
92 for (i = 0; i < shminfo.shmmni; i++)
93 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
94 shmsegs[i].shm_perm.key == key)
95 return i;
96 return -1;
97 }
98
99 static struct shmid_ds *
100 shm_find_segment_by_shmid(shmid)
101 int shmid;
102 {
103 int segnum;
104 struct shmid_ds *shmseg;
105
106 segnum = IPCID_TO_IX(shmid);
107 if (segnum < 0 || segnum >= shminfo.shmmni)
108 return NULL;
109 shmseg = &shmsegs[segnum];
110 if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED))
111 != SHMSEG_ALLOCATED ||
112 shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
113 return NULL;
114 return shmseg;
115 }
116
117 static vm_offset_t
118 shm_find_space(p, size)
119 struct proc *p;
120 size_t size;
121 {
122 vm_offset_t low_end, range, current;
123 int result;
124
125 low_end = (vm_offset_t)p->p_vmspace->vm_daddr +
126 (p->p_vmspace->vm_dsize << PGSHIFT);
127 range = (USRSTACK - low_end);
128
129 /* XXXX totally bogus */
130 /* current = range *3/4 + low_end */
131 current = ((range&1)<<1 + range)>>2 + range>>1 + low_end;
132 #if 0
133 result = vm_map_find(&p->p_vmspace->vm_map, NULL, 0, ¤t, size,
134 TRUE);
135 if (result)
136 return NULL;
137 #endif
138 return current;
139 }
140
141 static void
142 shm_deallocate_segment(shmseg)
143 struct shmid_ds *shmseg;
144 {
145 struct shm_handle *shm_handle;
146 size_t size;
147
148 shm_handle = shmseg->shm_internal;
149 size = (shmseg->shm_segsz + CLOFSET) & ~CLOFSET;
150 vm_deallocate(sysvshm_map, shm_handle->kva, size);
151 free((caddr_t)shm_handle, M_SHM);
152 shmseg->shm_internal = NULL;
153 shm_committed -= btoc(size);
154 shmseg->shm_perm.mode = SHMSEG_FREE;
155 }
156
157 static int
158 shm_delete_mapping(p, shmmap_s)
159 struct proc *p;
160 struct shmmap_state *shmmap_s;
161 {
162 struct shmid_ds *shmseg;
163 int segnum, result;
164 size_t size;
165
166 segnum = IPCID_TO_IX(shmmap_s->shmid);
167 shmseg = &shmsegs[segnum];
168 size = (shmseg->shm_segsz + CLOFSET) & ~CLOFSET;
169 result = vm_deallocate(&p->p_vmspace->vm_map, shmmap_s->va, size);
170 if (result != KERN_SUCCESS)
171 return EINVAL;
172 shmmap_s->shmid = -1;
173 shmseg->shm_dtime = time.tv_sec;
174 if ((--shmseg->shm_nattch <= 0) &&
175 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
176 shm_deallocate_segment(shmseg);
177 shm_last_free = segnum;
178 }
179 return 0;
180 }
181
182 struct shmdt_args {
183 void *shmaddr;
184 };
185 int
186 shmdt(p, uap, retval)
187 struct proc *p;
188 struct shmdt_args *uap;
189 int *retval;
190 {
191 struct shmmap_state *shmmap_s;
192 int i;
193
194 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
195 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
196 if (shmmap_s->shmid != -1 &&
197 shmmap_s->va == (vm_offset_t)uap->shmaddr)
198 break;
199 if (i == shminfo.shmseg)
200 return EINVAL;
201 return shm_delete_mapping(p, shmmap_s);
202 }
203
204 struct shmat_args {
205 int shmid;
206 void *shmaddr;
207 int shmflg;
208 };
209 int
210 shmat(p, uap, retval)
211 struct proc *p;
212 struct shmat_args *uap;
213 int *retval;
214 {
215 int error, i, flags;
216 struct ucred *cred = p->p_ucred;
217 struct shmid_ds *shmseg;
218 struct shmmap_state *shmmap_s = NULL;
219 vm_offset_t attach_va;
220 vm_prot_t prot;
221 vm_size_t size;
222
223 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
224 if (shmmap_s == NULL) {
225 size = shminfo.shmseg * sizeof(struct shmmap_state);
226 shmmap_s = malloc(size, M_SHM, M_WAITOK);
227 bzero((caddr_t)shmmap_s, size);
228 p->p_vmspace->vm_shm = (caddr_t)shmmap_s;
229 }
230 shmseg = shm_find_segment_by_shmid(uap->shmid);
231 if (shmseg == NULL)
232 return EINVAL;
233 if (error = ipcperm(cred, &shmseg->shm_perm,
234 (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W))
235 return error;
236 for (i = 0; i < shminfo.shmseg; i++) {
237 if (shmmap_s->shmid == -1)
238 break;
239 shmmap_s++;
240 }
241 if (i >= shminfo.shmseg)
242 return EMFILE;
243 size = (shmseg->shm_segsz + CLOFSET) & ~CLOFSET;
244 prot = VM_PROT_READ;
245 if ((uap->shmflg & SHM_RDONLY) == 0)
246 prot |= VM_PROT_WRITE;
247 flags = MAP_ANON | MAP_SHARED;
248 if (uap->shmaddr) {
249 flags |= MAP_FIXED;
250 if (uap->shmflg & SHM_RND)
251 attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1);
252 else if (((vm_offset_t)uap->shmaddr & (SHMLBA-1)) == 0)
253 attach_va = (vm_offset_t)uap->shmaddr;
254 else
255 return EINVAL;
256 } else {
257 attach_va = shm_find_space(p, shmseg->shm_segsz);
258 if (attach_va == NULL)
259 return ENOMEM;
260 }
261 error = vm_mmap(&p->p_vmspace->vm_map, &attach_va, size, prot,
262 VM_PROT_DEFAULT, flags, uap->shmid, 0);
263 if (error)
264 return error;
265 shmmap_s->va = attach_va;
266 shmmap_s->shmid = uap->shmid;
267 shmseg->shm_lpid = p->p_pid;
268 shmseg->shm_atime = time.tv_sec;
269 shmseg->shm_nattch++;
270 *retval = attach_va;
271 return 0;
272 }
273
274 struct shmctl_args {
275 int shmid;
276 int cmd;
277 struct shmat_ds *ubuf;
278 };
279 int
280 shmctl(p, uap, retval)
281 struct proc *p;
282 struct shmctl_args *uap;
283 int *retval;
284 {
285 int error, segnum;
286 struct ucred *cred = p->p_ucred;
287 struct shmid_ds inbuf;
288 struct shmid_ds *shmseg;
289
290 shmseg = shm_find_segment_by_shmid(uap->shmid);
291 if (shmseg == NULL)
292 return EINVAL;
293 switch (uap->cmd) {
294 case IPC_STAT:
295 if (error = ipcperm(cred, &shmseg->shm_perm, IPC_R))
296 return error;
297 if (error = copyout((caddr_t)shmseg, uap->ubuf, sizeof(inbuf)))
298 return error;
299 break;
300 case IPC_SET:
301 if (error = ipcperm(cred, &shmseg->shm_perm, IPC_M))
302 return error;
303 if (error = copyin(uap->ubuf, (caddr_t)&inbuf, sizeof(inbuf)))
304 return error;
305 shmseg->shm_perm.uid = inbuf.shm_perm.uid;
306 shmseg->shm_perm.gid = inbuf.shm_perm.gid;
307 shmseg->shm_perm.mode =
308 (shmseg->shm_perm.mode & ~ACCESSPERMS) |
309 (inbuf.shm_perm.mode & ACCESSPERMS);
310 shmseg->shm_ctime = time.tv_sec;
311 break;
312 case IPC_RMID:
313 if (error = ipcperm(cred, &shmseg->shm_perm, IPC_M))
314 return error;
315 shmseg->shm_perm.key = IPC_PRIVATE;
316 shmseg->shm_perm.mode |= SHMSEG_REMOVED;
317 if (shmseg->shm_nattch <= 0) {
318 shm_deallocate_segment(shmseg);
319 shm_last_free = IPCID_TO_IX(uap->shmid);
320 }
321 break;
322 #if 0
323 case SHM_LOCK:
324 case SHM_UNLOCK:
325 #endif
326 default:
327 return EINVAL;
328 }
329 return 0;
330 }
331
332 struct shmget_args {
333 key_t key;
334 size_t size;
335 int shmflg;
336 };
337 static int
338 shmget_existing(p, uap, mode, segnum, retval)
339 struct proc *p;
340 struct shmget_args *uap;
341 int mode;
342 int segnum;
343 int *retval;
344 {
345 struct shmid_ds *shmseg;
346 struct ucred *cred = p->p_ucred;
347 int error;
348
349 shmseg = &shmsegs[segnum];
350 if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
351 /*
352 * This segment is in the process of being allocated. Wait
353 * until it's done, and look the key up again (in case the
354 * allocation failed or it was freed).
355 */
356 shmseg->shm_perm.mode |= SHMSEG_WANTED;
357 if (error =
358 tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0))
359 return error;
360 return EAGAIN;
361 }
362 if (error = ipcperm(cred, &shmseg->shm_perm, mode))
363 return error;
364 if (uap->size && uap->size > shmseg->shm_segsz)
365 return EINVAL;
366 if (uap->shmflg & (IPC_CREAT | IPC_EXCL) == (IPC_CREAT | IPC_EXCL))
367 return EEXIST;
368 *retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
369 return 0;
370 }
371
372 static int
373 shmget_allocate_segment(p, uap, mode, retval)
374 struct proc *p;
375 struct shmget_args *uap;
376 int mode;
377 int *retval;
378 {
379 int i, segnum, result, shmid, size;
380 struct ucred *cred = p->p_ucred;
381 struct shmid_ds *shmseg;
382 struct shm_handle *shm_handle;
383
384 if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
385 return EINVAL;
386 if (shm_nused >= shminfo.shmmni) /* any shmids left? */
387 return ENOSPC;
388 size = (uap->size + CLOFSET) & ~CLOFSET;
389 if (shm_committed + btoc(size) > shminfo.shmall)
390 return ENOMEM;
391 if (shm_last_free < 0) {
392 for (i = 0; i < shminfo.shmmni; i++)
393 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
394 break;
395 if (i == shminfo.shmmni)
396 panic("shmseg free count inconsistent");
397 segnum = i;
398 } else {
399 segnum = shm_last_free;
400 shm_last_free = -1;
401 }
402 shmseg = &shmsegs[segnum];
403 /*
404 * In case we sleep in malloc(), mark the segment present but deleted
405 * so that noone else tries to create the same key.
406 */
407 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
408 shmseg->shm_perm.key = uap->key;
409 shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
410 shm_handle = (struct shm_handle *)
411 malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
412 shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
413 result = vm_mmap(sysvshm_map, &shm_handle->kva, size, VM_PROT_ALL,
414 VM_PROT_DEFAULT, MAP_ANON, shmid, 0);
415 if (result != KERN_SUCCESS) {
416 shmseg->shm_perm.mode = SHMSEG_FREE;
417 shm_last_free = segnum;
418 free((caddr_t)shm_handle, M_SHM);
419 /* Just in case. */
420 wakeup((caddr_t)shmseg);
421 return ENOMEM;
422 }
423 shmseg->shm_internal = shm_handle;
424 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
425 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
426 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
427 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
428 shmseg->shm_segsz = uap->size;
429 shmseg->shm_cpid = p->p_pid;
430 shmseg->shm_lpid = shmseg->shm_nattch = 0;
431 shmseg->shm_atime = shmseg->shm_dtime = 0;
432 shmseg->shm_ctime = time.tv_sec;
433 shm_committed += btoc(size);
434 shm_nused++;
435 if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
436 /*
437 * Somebody else wanted this key while we were asleep. Wake
438 * them up now.
439 */
440 shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
441 wakeup((caddr_t)shmseg);
442 }
443 *retval = shmid;
444 return 0;
445 }
446
447 int
448 shmget(p, uap, retval)
449 struct proc *p;
450 struct shmget_args *uap;
451 int *retval;
452 {
453 int segnum, mode, error;
454 struct shmid_ds *shmseg;
455
456 mode = uap->shmflg & ACCESSPERMS;
457 if (uap->key != IPC_PRIVATE) {
458 again:
459 segnum = shm_find_segment_by_key(uap->key);
460 if (segnum >= 0) {
461 error = shmget_existing(p, uap, mode, segnum, retval);
462 if (error == EAGAIN)
463 goto again;
464 return error;
465 }
466 if ((uap->shmflg & IPC_CREAT) == 0)
467 return ENOENT;
468 }
469 return shmget_allocate_segment(p, uap, mode, retval);
470 }
471
472 struct shmsys_args {
473 u_int which;
474 };
475 int
476 shmsys(p, uap, retval)
477 struct proc *p;
478 struct shmsys_args *uap;
479 int *retval;
480 {
481
482 if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
483 return EINVAL;
484 return ((*shmcalls[uap->which])(p, &uap[1], retval));
485 }
486
487 void
488 shmfork(p1, p2, isvfork)
489 struct proc *p1, *p2;
490 int isvfork;
491 {
492 struct shmmap_state *shmmap_s;
493 size_t size;
494 int i;
495
496 size = shminfo.shmseg * sizeof(struct shmmap_state);
497 shmmap_s = malloc(size, M_SHM, M_WAITOK);
498 bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmmap_s, size);
499 p2->p_vmspace->vm_shm = (caddr_t)shmmap_s;
500 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
501 if (shmmap_s->shmid != -1)
502 shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
503 }
504
505 void
506 shmexit(p)
507 struct proc *p;
508 {
509 struct shmmap_state *shmmap_s;
510 struct shmid_ds *shmseg;
511 int i;
512
513 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
514 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
515 if (shmmap_s->shmid != -1)
516 shm_delete_mapping(p, shmmap_s);
517 free((caddr_t)p->p_vmspace->vm_shm, M_SHM);
518 p->p_vmspace->vm_shm = NULL;
519 }
520
521 void
522 shminit()
523 {
524 int i;
525 vm_offset_t garbage1, garbage2;
526
527 /* actually this *should* be pageable. SHM_{LOCK,UNLOCK} */
528 sysvshm_map = kmem_suballoc(kernel_map, &garbage1, &garbage2,
529 shminfo.shmall * NBPG, FALSE);
530 for (i = 0; i < shminfo.shmmni; i++) {
531 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
532 shmsegs[i].shm_perm.seq = 0;
533 }
534 shm_last_free = 0;
535 shm_nused = 0;
536 shm_committed = 0;
537 }
538