sysv_shm.c revision 1.12 1 /*
2 * Copyright (c) 1994 Adam Glass
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. The name of the Author may not be used to endorse or promote products
11 * derived from this software without specific prior written permission.
12 *
13 * THIS SOFTWARE IS PROVIDED BY Adam Glass ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL Adam Glass BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26 #include <sys/types.h>
27 #include <sys/param.h>
28 #include <sys/kernel.h>
29 #include <sys/shm.h>
30 #include <sys/proc.h>
31 #include <sys/uio.h>
32 #include <sys/time.h>
33 #include <sys/malloc.h>
34 #include <sys/mman.h>
35 #include <sys/systm.h>
36 #include <sys/stat.h>
37
38 #include <vm/vm.h>
39 #include <vm/vm_map.h>
40 #include <vm/vm_map.h>
41 #include <vm/vm_kern.h>
42
43 /*
44 * Provides the following externally accessible functions:
45 *
46 * shminit(void); initialization
47 * shmexit(struct proc *) cleanup
48 * shmfork(struct proc *, struct proc *, int) fork handling
49 * shmsys(arg1, arg2, arg3, arg4); shm{at,ctl,dt,get}(arg2, arg3, arg4)
50 *
51 * Structures:
52 * shmsegs (an array of 'struct shmid_ds')
53 * per proc array of 'struct shmmap_state'
54 */
55
56 int shmat(), shmctl(), shmdt(), shmget();
57 int (*shmcalls[])() = { shmat, shmctl, shmdt, shmget };
58
59 #define SHMSEG_FREE 0x200
60 #define SHMSEG_REMOVED 0x400
61 #define SHMSEG_ALLOCATED 0x800
62
63 vm_map_t sysvshm_map;
64 int shm_last_free, shm_nused, shm_committed;
65
66 struct shm_handle {
67 vm_offset_t kva;
68 };
69
70 struct shmmap_state {
71 vm_offset_t va;
72 int shmid;
73 };
74
75 static int shm_allocate_segment __P((struct proc *, key_t, size_t, int, int *));
76 static void shm_deallocate_segment __P((struct shmid_ds *));
77 static int shm_find_segment_by_key __P((key_t));
78 static struct shmid_ds *shm_find_segment_by_shmid __P((int, int *));
79 static int shm_delete_mapping __P((struct proc *, struct shmmap_state *));
80
81 static int
82 shm_allocate_segment(p, key, size, mode, retval)
83 struct proc *p;
84 key_t key;
85 size_t size;
86 int mode;
87 int *retval;
88 {
89 int i, segnum, result, shmid;
90 struct ucred *cred = p->p_ucred;
91 struct shmid_ds *shmseg;
92 struct shm_handle *shm_handle;
93
94 if (shm_nused >= shminfo.shmmni) /* any shmids left? */
95 return ENOSPC;
96 if (shm_last_free < 0) {
97 for (i = 0; i < shminfo.shmmni; i++)
98 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
99 break;
100 if (i == shminfo.shmmni)
101 panic("shmseg free count inconsistent");
102 segnum = i;
103 } else {
104 segnum = shm_last_free;
105 shm_last_free = -1;
106 }
107 shmseg = &shmsegs[segnum];
108 /*
109 * In case we sleep in malloc(), mark the segment present but deleted
110 * so that noone else tries to create the same key.
111 */
112 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
113 shmseg->shm_perm.key = key;
114 shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
115 shm_handle = (struct shm_handle *)
116 malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
117 shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
118 result = vm_mmap(sysvshm_map, &shm_handle->kva, ctob(size),
119 VM_PROT_ALL, VM_PROT_DEFAULT, MAP_ANON, shmid, 0);
120 if (result != KERN_SUCCESS) {
121 shmseg->shm_perm.mode = SHMSEG_FREE;
122 free((caddr_t)shm_handle, M_SHM);
123 return ENOMEM;
124 }
125 shmseg->shm_internal = shm_handle;
126 shm_committed += size;
127 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
128 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
129 shmseg->shm_perm.mode = (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
130 shmseg->shm_segsz = ctob(size); /* XXX */
131 shmseg->shm_cpid = p->p_pid;
132 shmseg->shm_lpid = shmseg->shm_nattch = 0;
133 shmseg->shm_atime = shmseg->shm_dtime = 0;
134 shmseg->shm_ctime = time.tv_sec;
135 shm_nused++;
136 *retval = shmid;
137 return 0;
138 }
139
140 static int
141 shm_find_segment_by_key(key)
142 key_t key;
143 {
144 int i;
145
146 for (i = 0; i < shminfo.shmmni; i++)
147 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
148 shmsegs[i].shm_perm.key == key)
149 return i;
150 return -1;
151 }
152
153 static struct shmid_ds *
154 shm_find_segment_by_shmid(shmid, where)
155 int shmid;
156 int *where;
157 {
158 int segnum;
159 struct shmid_ds *shmseg;
160
161 segnum = IPCID_TO_IX(shmid);
162 if (segnum < 0 || segnum >= shminfo.shmmni)
163 return NULL;
164 shmseg = &shmsegs[segnum];
165 if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED))
166 != SHMSEG_ALLOCATED ||
167 shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
168 return NULL;
169 if (where)
170 *where = segnum;
171 return shmseg;
172 }
173
174 static vm_offset_t
175 shm_find_space(p, size)
176 struct proc *p;
177 size_t size;
178 {
179 vm_offset_t low_end, range, current;
180 int result;
181
182 low_end = (vm_offset_t)p->p_vmspace->vm_daddr +
183 (p->p_vmspace->vm_dsize << PGSHIFT);
184 range = (USRSTACK - low_end);
185
186 /* XXXX totally bogus */
187 /* current = range *3/4 + low_end */
188 current = ((range&1)<<1 + range)>>2 + range>>1 + low_end;
189 #if 0
190 result = vm_map_find(&p->p_vmspace->vm_map, NULL, 0, ¤t, size,
191 TRUE);
192 if (result)
193 return NULL;
194 #endif
195 return current;
196 }
197
198 static void
199 shm_deallocate_segment(shmseg)
200 struct shmid_ds *shmseg;
201 {
202 struct shm_handle *shm_handle;
203
204 shm_handle = shmseg->shm_internal;
205 vm_deallocate(sysvshm_map, shm_handle->kva,
206 ctob(clrnd(btoc(shmseg->shm_segsz))));
207 free((caddr_t)shm_handle, M_SHM);
208 shmseg->shm_internal = NULL;
209 shm_committed -= clrnd(btoc(shmseg->shm_segsz));
210 shmseg->shm_perm.mode = SHMSEG_FREE;
211 }
212
213 static int
214 shm_delete_mapping(p, shmmap_s)
215 struct proc *p;
216 struct shmmap_state *shmmap_s;
217 {
218 struct shmid_ds *shmseg;
219 int segnum, result;
220
221 segnum = IPCID_TO_IX(shmmap_s->shmid);
222 shmseg = &shmsegs[segnum];
223 result = vm_deallocate(&p->p_vmspace->vm_map, shmmap_s->va,
224 ctob(clrnd(btoc(shmseg->shm_segsz))));
225 if (result != KERN_SUCCESS)
226 return EINVAL;
227 shmmap_s->shmid = -1;
228 shmseg->shm_dtime = time.tv_sec;
229 if ((--shmseg->shm_nattch <= 0) &&
230 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
231 shm_deallocate_segment(shmseg);
232 shm_last_free = segnum;
233 }
234 return 0;
235 }
236
237 struct shmdt_args {
238 void *shmaddr;
239 };
240 int
241 shmdt(p, uap, retval)
242 struct proc *p;
243 struct shmdt_args *uap;
244 int *retval;
245 {
246 struct shmmap_state *shmmap_s;
247 int i;
248
249 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
250 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
251 if (shmmap_s->shmid != -1 &&
252 shmmap_s->va == (vm_offset_t)uap->shmaddr)
253 break;
254 if (i == shminfo.shmseg)
255 return EINVAL;
256 return shm_delete_mapping(p, shmmap_s);
257 }
258
259 struct shmat_args {
260 int shmid;
261 void *shmaddr;
262 int shmflg;
263 };
264 int
265 shmat(p, uap, retval)
266 struct proc *p;
267 struct shmat_args *uap;
268 int *retval;
269 {
270 int error, i, flags;
271 struct ucred *cred = p->p_ucred;
272 struct shmid_ds *shmseg;
273 struct shmmap_state *shmmap_s = NULL;
274 vm_offset_t attach_va;
275 vm_prot_t prot;
276 vm_size_t size;
277
278 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
279 if (shmmap_s == NULL) {
280 size = shminfo.shmseg * sizeof(struct shmmap_state);
281 shmmap_s = malloc(size, M_SHM, M_WAITOK);
282 bzero((caddr_t)shmmap_s, size);
283 p->p_vmspace->vm_shm = (caddr_t)shmmap_s;
284 }
285 shmseg = shm_find_segment_by_shmid(uap->shmid, NULL);
286 if (shmseg == NULL)
287 return EINVAL;
288 if (error = ipcperm(cred, &shmseg->shm_perm,
289 (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W))
290 return error;
291 for (i = 0; i < shminfo.shmseg; i++) {
292 if (shmmap_s->shmid == -1)
293 break;
294 shmmap_s++;
295 }
296 if (i >= shminfo.shmseg)
297 return EMFILE;
298 size = ctob(clrnd(btoc(shmseg->shm_segsz)));
299 prot = VM_PROT_READ;
300 if ((uap->shmflg & SHM_RDONLY) == 0)
301 prot |= VM_PROT_WRITE;
302 flags = MAP_ANON | MAP_SHARED;
303 if (uap->shmaddr) {
304 flags |= MAP_FIXED;
305 if (uap->shmflg & SHM_RND)
306 attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1);
307 else
308 attach_va = (vm_offset_t)uap->shmaddr;
309 } else {
310 attach_va = shm_find_space(p, shmseg->shm_segsz);
311 if (attach_va == NULL)
312 return ENOMEM;
313 }
314 error = vm_mmap(&p->p_vmspace->vm_map, &attach_va, size, prot,
315 VM_PROT_DEFAULT, flags, uap->shmid, 0);
316 if (error)
317 return error;
318 shmmap_s->va = attach_va;
319 shmmap_s->shmid = uap->shmid;
320 shmseg->shm_lpid = p->p_pid;
321 shmseg->shm_atime = time.tv_sec;
322 shmseg->shm_nattch++;
323 *retval = attach_va;
324 return 0;
325 }
326
327 struct shmctl_args {
328 int shmid;
329 int cmd;
330 struct shmat_ds *ubuf;
331 };
332 int
333 shmctl(p, uap, retval)
334 struct proc *p;
335 struct shmctl_args *uap;
336 int *retval;
337 {
338 int error, segnum;
339 struct ucred *cred = p->p_ucred;
340 struct shmid_ds inbuf;
341 struct shmid_ds *shmseg;
342
343 shmseg = shm_find_segment_by_shmid(uap->shmid, &segnum);
344 if (shmseg == NULL)
345 return EINVAL;
346 switch (uap->cmd) {
347 case IPC_STAT:
348 if (error = ipcperm(cred, &shmseg->shm_perm, IPC_R))
349 return error;
350 if (error = copyout((caddr_t)shmseg, uap->ubuf, sizeof(inbuf)))
351 return error;
352 break;
353 case IPC_SET:
354 if (cred->cr_uid != 0 &&
355 shmseg->shm_perm.cuid != cred->cr_uid &&
356 shmseg->shm_perm.uid != cred->cr_uid)
357 return EPERM;
358 if (error = copyin(uap->ubuf, (caddr_t)&inbuf, sizeof(inbuf)))
359 return error;
360 shmseg->shm_perm.uid = inbuf.shm_perm.uid;
361 shmseg->shm_perm.gid = inbuf.shm_perm.gid;
362 shmseg->shm_perm.mode =
363 (shmseg->shm_perm.mode & ~ACCESSPERMS) |
364 (inbuf.shm_perm.mode & ACCESSPERMS);
365 shmseg->shm_ctime = time.tv_sec;
366 break;
367 case IPC_RMID:
368 if (cred->cr_uid != 0 &&
369 shmseg->shm_perm.cuid != cred->cr_uid &&
370 shmseg->shm_perm.uid != cred->cr_uid)
371 return EPERM;
372 shmseg->shm_perm.key = IPC_PRIVATE;
373 shmseg->shm_perm.mode |= SHMSEG_REMOVED;
374 if (shmseg->shm_nattch <= 0) {
375 shm_deallocate_segment(shmseg);
376 shm_last_free = segnum;
377 }
378 break;
379 #if 0
380 case SHM_LOCK:
381 case SHM_UNLOCK:
382 #endif
383 default:
384 return EINVAL;
385 }
386 return 0;
387 }
388
389 struct shmget_args {
390 key_t key;
391 size_t size;
392 int shmflg;
393 };
394 static int
395 shmget_existing(p, uap, mode, segnum, retval)
396 struct proc *p;
397 struct shmget_args *uap;
398 int mode;
399 int segnum;
400 int *retval;
401 {
402 struct shmid_ds *shmseg;
403 struct ucred *cred = p->p_ucred;
404 int error;
405
406 shmseg = &shmsegs[segnum];
407 if (shmseg->shm_perm.mode & SHMSEG_REMOVED)
408 return EBUSY;
409 if (error = ipcperm(cred, &shmseg->shm_perm, mode))
410 return error;
411 if (uap->size && uap->size > shmseg->shm_segsz)
412 return EINVAL;
413 if (uap->shmflg & (IPC_CREAT | IPC_EXCL)) /* XXX */
414 return EEXIST;
415 *retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
416 return 0;
417 }
418
419 int
420 shmget(p, uap, retval)
421 struct proc *p;
422 struct shmget_args *uap;
423 int *retval;
424 {
425 int segnum, mode, error;
426 size_t size;
427 struct shmid_ds *shmseg;
428
429 mode = uap->shmflg & ACCESSPERMS;
430 if (uap->key != IPC_PRIVATE) {
431 segnum = shm_find_segment_by_key(uap->key);
432 if (segnum >= 0)
433 return shmget_existing(p, uap, mode, segnum, retval);
434 }
435 if ((uap->shmflg & IPC_CREAT) == 0)
436 return ENOENT;
437 if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
438 return EINVAL;
439 size = clrnd(btoc(uap->size));
440 if (shm_committed + size > shminfo.shmall)
441 return ENOMEM;
442 return shm_allocate_segment(p, uap->key, size, mode, retval);
443 }
444
445 struct shmsys_args {
446 u_int which;
447 };
448 int
449 shmsys(p, uap, retval)
450 struct proc *p;
451 struct shmsys_args *uap;
452 int *retval;
453 {
454
455 if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
456 return EINVAL;
457 return ((*shmcalls[uap->which])(p, &uap[1], retval));
458 }
459
460 void
461 shmfork(p1, p2, isvfork)
462 struct proc *p1, *p2;
463 int isvfork;
464 {
465 struct shmmap_state *shmmap_s;
466 size_t size;
467 int i;
468
469 size = shminfo.shmseg * sizeof(struct shmmap_state);
470 shmmap_s = malloc(size, M_SHM, M_WAITOK);
471 bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmmap_s, size);
472 p2->p_vmspace->vm_shm = (caddr_t)shmmap_s;
473 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
474 if (shmmap_s->shmid != -1)
475 shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
476 }
477
478 void
479 shmexit(p)
480 struct proc *p;
481 {
482 struct shmmap_state *shmmap_s;
483 struct shmid_ds *shmseg;
484 int i;
485
486 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
487 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
488 if (shmmap_s->shmid != -1)
489 shm_delete_mapping(p, shmmap_s);
490 free((caddr_t)p->p_vmspace->vm_shm, M_SHM);
491 p->p_vmspace->vm_shm = NULL;
492 }
493
494 void
495 shminit()
496 {
497 int i;
498 vm_offset_t garbage1, garbage2;
499
500 /* actually this *should* be pageable. SHM_{LOCK,UNLOCK} */
501 sysvshm_map = kmem_suballoc(kernel_map, &garbage1, &garbage2,
502 shminfo.shmall * NBPG, FALSE);
503 for (i = 0; i < shminfo.shmmni; i++) {
504 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
505 shmsegs[i].shm_perm.seq = 0;
506 }
507 shm_last_free = 0;
508 shm_nused = 0;
509 shm_committed = 0;
510 }
511