kern_descrip.c revision 1.169 1 /* $NetBSD: kern_descrip.c,v 1.169 2008/01/05 23:53:21 ad Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1989, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)kern_descrip.c 8.8 (Berkeley) 2/14/95
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: kern_descrip.c,v 1.169 2008/01/05 23:53:21 ad Exp $");
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/filedesc.h>
45 #include <sys/kernel.h>
46 #include <sys/vnode.h>
47 #include <sys/proc.h>
48 #include <sys/file.h>
49 #include <sys/namei.h>
50 #include <sys/socket.h>
51 #include <sys/socketvar.h>
52 #include <sys/stat.h>
53 #include <sys/ioctl.h>
54 #include <sys/fcntl.h>
55 #include <sys/malloc.h>
56 #include <sys/pool.h>
57 #include <sys/syslog.h>
58 #include <sys/unistd.h>
59 #include <sys/resourcevar.h>
60 #include <sys/conf.h>
61 #include <sys/event.h>
62 #include <sys/kauth.h>
63 #include <sys/atomic.h>
64
65 #include <sys/mount.h>
66 #include <sys/syscallargs.h>
67
68 static int cwdi_ctor(void *, void *, int);
69 static void cwdi_dtor(void *, void *);
70 static int file_ctor(void *, void *, int);
71 static void file_dtor(void *, void *);
72
73 /*
74 * Descriptor management.
75 */
76 struct filelist filehead; /* head of list of open files */
77 u_int nfiles; /* actual number of open files */
78
79 static pool_cache_t cwdi_cache;
80 static pool_cache_t filedesc0_cache;
81 static pool_cache_t file_cache;
82
83 /* Global file list lock */
84 kmutex_t filelist_lock;
85
86 MALLOC_DEFINE(M_FILE, "file", "Open file structure");
87 MALLOC_DEFINE(M_FILEDESC, "file desc", "Open file descriptor table");
88 MALLOC_DEFINE(M_IOCTLOPS, "ioctlops", "ioctl data buffer");
89
90 static inline int
91 find_next_zero(uint32_t *bitmap, int want, u_int bits)
92 {
93 int i, off, maxoff;
94 uint32_t sub;
95
96 if (want > bits)
97 return -1;
98
99 off = want >> NDENTRYSHIFT;
100 i = want & NDENTRYMASK;
101 if (i) {
102 sub = bitmap[off] | ((u_int)~0 >> (NDENTRIES - i));
103 if (sub != ~0)
104 goto found;
105 off++;
106 }
107
108 maxoff = NDLOSLOTS(bits);
109 while (off < maxoff) {
110 if ((sub = bitmap[off]) != ~0)
111 goto found;
112 off++;
113 }
114
115 return (-1);
116
117 found:
118 return (off << NDENTRYSHIFT) + ffs(~sub) - 1;
119 }
120
121 static int
122 find_last_set(struct filedesc *fd, int last)
123 {
124 int off, i;
125 struct file **ofiles = fd->fd_ofiles;
126 uint32_t *bitmap = fd->fd_lomap;
127
128 off = (last - 1) >> NDENTRYSHIFT;
129
130 while (off >= 0 && !bitmap[off])
131 off--;
132
133 if (off < 0)
134 return (-1);
135
136 i = ((off + 1) << NDENTRYSHIFT) - 1;
137 if (i >= last)
138 i = last - 1;
139
140 while (i > 0 && ofiles[i] == NULL)
141 i--;
142
143 return (i);
144 }
145
146 static inline void
147 fd_used(struct filedesc *fdp, int fd)
148 {
149 u_int off = fd >> NDENTRYSHIFT;
150
151 KASSERT(rw_write_held(&fdp->fd_lock));
152 KDASSERT((fdp->fd_lomap[off] & (1 << (fd & NDENTRYMASK))) == 0);
153
154 fdp->fd_lomap[off] |= 1 << (fd & NDENTRYMASK);
155 if (fdp->fd_lomap[off] == ~0) {
156 KDASSERT((fdp->fd_himap[off >> NDENTRYSHIFT] &
157 (1 << (off & NDENTRYMASK))) == 0);
158 fdp->fd_himap[off >> NDENTRYSHIFT] |= 1 << (off & NDENTRYMASK);
159 }
160
161 if (fd > fdp->fd_lastfile)
162 fdp->fd_lastfile = fd;
163 }
164
165 static inline void
166 fd_unused(struct filedesc *fdp, int fd)
167 {
168 u_int off = fd >> NDENTRYSHIFT;
169
170 KASSERT(rw_write_held(&fdp->fd_lock));
171 if (fd < fdp->fd_freefile)
172 fdp->fd_freefile = fd;
173
174 if (fdp->fd_lomap[off] == ~0) {
175 KDASSERT((fdp->fd_himap[off >> NDENTRYSHIFT] &
176 (1 << (off & NDENTRYMASK))) != 0);
177 fdp->fd_himap[off >> NDENTRYSHIFT] &=
178 ~(1 << (off & NDENTRYMASK));
179 }
180 KDASSERT((fdp->fd_lomap[off] & (1 << (fd & NDENTRYMASK))) != 0);
181 fdp->fd_lomap[off] &= ~(1 << (fd & NDENTRYMASK));
182
183 #ifdef DIAGNOSTIC
184 if (fd > fdp->fd_lastfile)
185 panic("fd_unused: fd_lastfile inconsistent");
186 #endif
187 if (fd == fdp->fd_lastfile)
188 fdp->fd_lastfile = find_last_set(fdp, fd);
189 }
190
191 /*
192 * Lookup the file structure corresponding to a file descriptor
193 * and return it locked.
194 * Note: typical usage is: `fp = fd_getfile(..); FILE_USE(fp);'
195 * The locking strategy has been optimised for this case, i.e.
196 * fd_getfile() returns the file locked while FILE_USE() will increment
197 * the file's use count and unlock.
198 */
199 struct file *
200 fd_getfile(struct filedesc *fdp, int fd)
201 {
202 struct file *fp;
203
204 rw_enter(&fdp->fd_lock, RW_READER);
205 if ((u_int) fd >= fdp->fd_nfiles || (fp = fdp->fd_ofiles[fd]) == NULL) {
206 rw_exit(&fdp->fd_lock);
207 return (NULL);
208 }
209
210 FILE_LOCK(fp);
211 if (FILE_IS_USABLE(fp) == 0) {
212 FILE_UNLOCK(fp);
213 rw_exit(&fdp->fd_lock);
214 return (NULL);
215 }
216 rw_exit(&fdp->fd_lock);
217
218 return (fp);
219 }
220
221 /*
222 * Common code for dup, dup2, and fcntl(F_DUPFD).
223 */
224 static int
225 finishdup(struct lwp *l, int old, int new, register_t *retval)
226 {
227 struct filedesc *fdp;
228 struct file *fp, *delfp;
229
230 fdp = l->l_proc->p_fd;
231
232 /*
233 * If there is a file in the new slot, remember it so we
234 * can close it after we've finished the dup. We need
235 * to do it after the dup is finished, since closing
236 * the file may block.
237 *
238 * Note: `old' is already used for us.
239 * Note: Caller already marked `new' slot "used".
240 */
241 rw_enter(&fdp->fd_lock, RW_WRITER);
242 delfp = fdp->fd_ofiles[new];
243
244 fp = fdp->fd_ofiles[old];
245 KDASSERT(fp != NULL);
246 fdp->fd_ofiles[new] = fp;
247 fdp->fd_ofileflags[new] = fdp->fd_ofileflags[old] &~ UF_EXCLOSE;
248 rw_exit(&fdp->fd_lock);
249
250 *retval = new;
251 FILE_LOCK(fp);
252 fp->f_count++;
253 FILE_UNUSE_HAVELOCK(fp, l);
254
255 if (delfp != NULL) {
256 FILE_LOCK(delfp);
257 FILE_USE(delfp);
258 if (new < fdp->fd_knlistsize)
259 knote_fdclose(l, new);
260 (void) closef(delfp, l);
261 }
262 return (0);
263 }
264
265 /*
266 * Initialize the descriptor system.
267 */
268 void
269 filedesc_init(void)
270 {
271
272 mutex_init(&filelist_lock, MUTEX_DEFAULT, IPL_NONE);
273
274 file_cache = pool_cache_init(sizeof(struct file), 0, 0, 0,
275 "filepl", NULL, IPL_NONE, file_ctor, file_dtor, NULL);
276 KASSERT(file_cache != NULL);
277
278 cwdi_cache = pool_cache_init(sizeof(struct cwdinfo), 0, 0, 0,
279 "cwdipl", NULL, IPL_NONE, cwdi_ctor, cwdi_dtor, NULL);
280 KASSERT(cwdi_cache != NULL);
281
282 filedesc0_cache = pool_cache_init(sizeof(struct filedesc0), 0, 0, 0,
283 "fdescpl", NULL, IPL_NONE, NULL, NULL, NULL);
284 KASSERT(filedesc0_cache != NULL);
285 }
286
287 /*
288 * System calls on descriptors.
289 */
290
291 /*
292 * Duplicate a file descriptor.
293 */
294 /* ARGSUSED */
295 int
296 sys_dup(struct lwp *l, const struct sys_dup_args *uap, register_t *retval)
297 {
298 /* {
299 syscallarg(int) fd;
300 } */
301 struct file *fp;
302 struct filedesc *fdp;
303 struct proc *p;
304 int old, new, error;
305
306 p = l->l_proc;
307 fdp = p->p_fd;
308 old = SCARG(uap, fd);
309
310 restart:
311 if ((fp = fd_getfile(fdp, old)) == NULL)
312 return (EBADF);
313
314 FILE_USE(fp);
315
316 if ((error = fdalloc(p, 0, &new)) != 0) {
317 if (error == ENOSPC) {
318 fdexpand(p);
319 FILE_UNUSE(fp, l);
320 goto restart;
321 }
322 FILE_UNUSE(fp, l);
323 return (error);
324 }
325
326 /* finishdup() will unuse the descriptors for us */
327 return (finishdup(l, old, new, retval));
328 }
329
330 /*
331 * Duplicate a file descriptor to a particular value.
332 */
333 /* ARGSUSED */
334 int
335 sys_dup2(struct lwp *l, const struct sys_dup2_args *uap, register_t *retval)
336 {
337 /* {
338 syscallarg(int) from;
339 syscallarg(int) to;
340 } */
341 struct file *fp;
342 struct filedesc *fdp;
343 struct proc *p;
344 int old, new, i, error;
345
346 p = l->l_proc;
347 fdp = p->p_fd;
348 old = SCARG(uap, from);
349 new = SCARG(uap, to);
350
351 restart:
352 if ((fp = fd_getfile(fdp, old)) == NULL)
353 return (EBADF);
354
355 if ((u_int)new >= p->p_rlimit[RLIMIT_NOFILE].rlim_cur ||
356 (u_int)new >= maxfiles) {
357 FILE_UNLOCK(fp);
358 return (EBADF);
359 }
360
361 if (old == new) {
362 FILE_UNLOCK(fp);
363 *retval = new;
364 return (0);
365 }
366
367 FILE_USE(fp);
368
369 if (new >= fdp->fd_nfiles) {
370 if ((error = fdalloc(p, new, &i)) != 0) {
371 if (error == ENOSPC) {
372 fdexpand(p);
373 FILE_UNUSE(fp, l);
374 goto restart;
375 }
376 FILE_UNUSE(fp, l);
377 return (error);
378 }
379 if (new != i)
380 panic("dup2: fdalloc");
381 } else {
382 rw_enter(&fdp->fd_lock, RW_WRITER);
383 /*
384 * Mark `new' slot "used" only if it was empty.
385 */
386 if (fdp->fd_ofiles[new] == NULL)
387 fd_used(fdp, new);
388 rw_exit(&fdp->fd_lock);
389 }
390
391 /*
392 * finishdup() will close the file that's in the `new'
393 * slot, if there's one there.
394 */
395
396 /* finishdup() will unuse the descriptors for us */
397 return (finishdup(l, old, new, retval));
398 }
399
400 /*
401 * fcntl call which is being passed to the file's fs.
402 */
403 static int
404 fcntl_forfs(int fd, struct lwp *l, int cmd, void *arg)
405 {
406 struct file *fp;
407 struct filedesc *fdp;
408 int error;
409 u_int size;
410 void *data, *memp;
411 #define STK_PARAMS 128
412 char stkbuf[STK_PARAMS];
413
414 /* fd's value was validated in sys_fcntl before calling this routine */
415 fdp = l->l_proc->p_fd;
416 fp = fdp->fd_ofiles[fd];
417
418 if ((fp->f_flag & (FREAD | FWRITE)) == 0)
419 return (EBADF);
420
421 /*
422 * Interpret high order word to find amount of data to be
423 * copied to/from the user's address space.
424 */
425 size = (size_t)F_PARAM_LEN(cmd);
426 if (size > F_PARAM_MAX)
427 return (EINVAL);
428 memp = NULL;
429 if (size > sizeof(stkbuf)) {
430 memp = malloc((u_long)size, M_IOCTLOPS, M_WAITOK);
431 data = memp;
432 } else
433 data = stkbuf;
434 if (cmd & F_FSIN) {
435 if (size) {
436 error = copyin(arg, data, size);
437 if (error) {
438 if (memp)
439 free(memp, M_IOCTLOPS);
440 return (error);
441 }
442 } else
443 *(void **)data = arg;
444 } else if ((cmd & F_FSOUT) && size)
445 /*
446 * Zero the buffer so the user always
447 * gets back something deterministic.
448 */
449 memset(data, 0, size);
450 else if (cmd & F_FSVOID)
451 *(void **)data = arg;
452
453
454 error = (*fp->f_ops->fo_fcntl)(fp, cmd, data, l);
455
456 /*
457 * Copy any data to user, size was
458 * already set and checked above.
459 */
460 if (error == 0 && (cmd & F_FSOUT) && size)
461 error = copyout(data, arg, size);
462 if (memp)
463 free(memp, M_IOCTLOPS);
464 return (error);
465 }
466
467 int
468 do_fcntl_lock(struct lwp *l, int fd, int cmd, struct flock *fl)
469 {
470 struct file *fp;
471 struct vnode *vp;
472 struct proc *p = l->l_proc;
473 int error, flg;
474
475 if ((fp = fd_getfile(p->p_fd, fd)) == NULL)
476 return (EBADF);
477
478 FILE_USE(fp);
479
480 if (fp->f_type != DTYPE_VNODE) {
481 error = EINVAL;
482 goto out;
483 }
484 vp = (struct vnode *)fp->f_data;
485 if (fl->l_whence == SEEK_CUR)
486 fl->l_start += fp->f_offset;
487
488 flg = F_POSIX;
489
490 switch (cmd) {
491
492 case F_SETLKW:
493 flg |= F_WAIT;
494 /* Fall into F_SETLK */
495
496 case F_SETLK:
497 switch (fl->l_type) {
498 case F_RDLCK:
499 if ((fp->f_flag & FREAD) == 0) {
500 error = EBADF;
501 goto out;
502 }
503 p->p_flag |= PK_ADVLOCK;
504 error = VOP_ADVLOCK(vp, p, F_SETLK, fl, flg);
505 goto out;
506
507 case F_WRLCK:
508 if ((fp->f_flag & FWRITE) == 0) {
509 error = EBADF;
510 goto out;
511 }
512 p->p_flag |= PK_ADVLOCK;
513 error = VOP_ADVLOCK(vp, p, F_SETLK, fl, flg);
514 goto out;
515
516 case F_UNLCK:
517 error = VOP_ADVLOCK(vp, p, F_UNLCK, fl, F_POSIX);
518 goto out;
519
520 default:
521 error = EINVAL;
522 goto out;
523 }
524
525 case F_GETLK:
526 if (fl->l_type != F_RDLCK &&
527 fl->l_type != F_WRLCK &&
528 fl->l_type != F_UNLCK) {
529 error = EINVAL;
530 goto out;
531 }
532 error = VOP_ADVLOCK(vp, p, F_GETLK, fl, F_POSIX);
533 break;
534
535 default:
536 error = EINVAL;
537 break;
538 }
539
540 out:
541 FILE_UNUSE(fp, l);
542 return error;
543 }
544
545 /*
546 * The file control system call.
547 */
548 /* ARGSUSED */
549 int
550 sys_fcntl(struct lwp *l, const struct sys_fcntl_args *uap, register_t *retval)
551 {
552 /* {
553 syscallarg(int) fd;
554 syscallarg(int) cmd;
555 syscallarg(void *) arg;
556 } */
557 struct filedesc *fdp;
558 struct file *fp;
559 struct proc *p;
560 int fd, i, tmp, error, cmd, newmin;
561 struct flock fl;
562
563 p = l->l_proc;
564 fd = SCARG(uap, fd);
565 cmd = SCARG(uap, cmd);
566 fdp = p->p_fd;
567 error = 0;
568
569 switch (cmd) {
570 case F_CLOSEM:
571 if (fd < 0)
572 return EBADF;
573 while (fdp->fd_lastfile >= fd)
574 fdrelease(l, fdp->fd_lastfile);
575 return 0;
576
577 case F_MAXFD:
578 *retval = fdp->fd_lastfile;
579 return 0;
580
581 case F_SETLKW:
582 case F_SETLK:
583 case F_GETLK:
584 error = copyin(SCARG(uap, arg), &fl, sizeof(fl));
585 if (error)
586 return error;
587 error = do_fcntl_lock(l, fd, cmd, &fl);
588 if (cmd == F_GETLK && error == 0)
589 error = copyout(&fl, SCARG(uap, arg), sizeof(fl));
590 return error;
591
592 default:
593 /* Handled below */
594 break;
595 }
596
597 restart:
598 if ((fp = fd_getfile(fdp, fd)) == NULL)
599 return (EBADF);
600
601 FILE_USE(fp);
602
603 if ((cmd & F_FSCTL)) {
604 error = fcntl_forfs(fd, l, cmd, SCARG(uap, arg));
605 goto out;
606 }
607
608 switch (cmd) {
609
610 case F_DUPFD:
611 newmin = (long)SCARG(uap, arg);
612 if ((u_int)newmin >= p->p_rlimit[RLIMIT_NOFILE].rlim_cur ||
613 (u_int)newmin >= maxfiles) {
614 error = EINVAL;
615 goto out;
616 }
617 if ((error = fdalloc(p, newmin, &i)) != 0) {
618 if (error == ENOSPC) {
619 fdexpand(p);
620 FILE_UNUSE(fp, l);
621 goto restart;
622 }
623 goto out;
624 }
625
626 /* finishdup() will unuse the descriptors for us */
627 return (finishdup(l, fd, i, retval));
628
629 case F_GETFD:
630 *retval = fdp->fd_ofileflags[fd] & UF_EXCLOSE ? 1 : 0;
631 break;
632
633 case F_SETFD:
634 if ((long)SCARG(uap, arg) & 1)
635 fdp->fd_ofileflags[fd] |= UF_EXCLOSE;
636 else
637 fdp->fd_ofileflags[fd] &= ~UF_EXCLOSE;
638 break;
639
640 case F_GETFL:
641 *retval = OFLAGS(fp->f_flag);
642 break;
643
644 case F_SETFL:
645 tmp = FFLAGS((long)SCARG(uap, arg)) & FCNTLFLAGS;
646 error = (*fp->f_ops->fo_fcntl)(fp, F_SETFL, &tmp, l);
647 if (error)
648 break;
649 i = tmp ^ fp->f_flag;
650 if (i & FNONBLOCK) {
651 int flgs = tmp & FNONBLOCK;
652 error = (*fp->f_ops->fo_ioctl)(fp, FIONBIO, &flgs, l);
653 if (error)
654 goto reset_fcntl;
655 }
656 if (i & FASYNC) {
657 int flgs = tmp & FASYNC;
658 error = (*fp->f_ops->fo_ioctl)(fp, FIOASYNC, &flgs, l);
659 if (error) {
660 if (i & FNONBLOCK) {
661 tmp = fp->f_flag & FNONBLOCK;
662 (void)(*fp->f_ops->fo_ioctl)(fp,
663 FIONBIO, &tmp, l);
664 }
665 goto reset_fcntl;
666 }
667 }
668 fp->f_flag = (fp->f_flag & ~FCNTLFLAGS) | tmp;
669 break;
670 reset_fcntl:
671 (void)(*fp->f_ops->fo_fcntl)(fp, F_SETFL, &fp->f_flag, l);
672 break;
673
674 case F_GETOWN:
675 error = (*fp->f_ops->fo_ioctl)(fp, FIOGETOWN, &tmp, l);
676 *retval = tmp;
677 break;
678
679 case F_SETOWN:
680 tmp = (int)(intptr_t) SCARG(uap, arg);
681 error = (*fp->f_ops->fo_ioctl)(fp, FIOSETOWN, &tmp, l);
682 break;
683
684 default:
685 error = EINVAL;
686 }
687
688 out:
689 FILE_UNUSE(fp, l);
690 return (error);
691 }
692
693 void
694 fdremove(struct filedesc *fdp, int fd)
695 {
696
697 rw_enter(&fdp->fd_lock, RW_WRITER);
698 fdp->fd_ofiles[fd] = NULL;
699 fd_unused(fdp, fd);
700 rw_exit(&fdp->fd_lock);
701 }
702
703 int
704 fdrelease(struct lwp *l, int fd)
705 {
706 struct proc *p = l->l_proc;
707 struct filedesc *fdp;
708 struct file **fpp, *fp;
709
710 fdp = p->p_fd;
711 rw_enter(&fdp->fd_lock, RW_WRITER);
712 if (fd < 0 || fd > fdp->fd_lastfile)
713 goto badf;
714 fpp = &fdp->fd_ofiles[fd];
715 fp = *fpp;
716 if (fp == NULL)
717 goto badf;
718
719 FILE_LOCK(fp);
720 if (!FILE_IS_USABLE(fp)) {
721 FILE_UNLOCK(fp);
722 goto badf;
723 }
724
725 FILE_USE(fp);
726
727 *fpp = NULL;
728 fdp->fd_ofileflags[fd] = 0;
729 fd_unused(fdp, fd);
730 rw_exit(&fdp->fd_lock);
731 if (fd < fdp->fd_knlistsize)
732 knote_fdclose(l, fd);
733 return (closef(fp, l));
734
735 badf:
736 rw_exit(&fdp->fd_lock);
737 return (EBADF);
738 }
739
740 /*
741 * Close a file descriptor.
742 */
743 /* ARGSUSED */
744 int
745 sys_close(struct lwp *l, const struct sys_close_args *uap, register_t *retval)
746 {
747 /* {
748 syscallarg(int) fd;
749 } */
750 int fd;
751 struct filedesc *fdp;
752 struct proc *p;
753
754 p = l->l_proc;
755 fd = SCARG(uap, fd);
756 fdp = p->p_fd;
757
758 #if 0
759 if (fd_getfile(fdp, fd) == NULL)
760 return (EBADF);
761 #endif
762
763 return (fdrelease(l, fd));
764 }
765
766 /*
767 * Return status information about a file descriptor.
768 * Common function for compat code.
769 */
770 int
771 do_sys_fstat(struct lwp *l, int fd, struct stat *sb)
772 {
773 struct file *fp;
774 int error;
775
776 fp = fd_getfile(l->l_proc->p_fd, fd);
777 if (fp == NULL)
778 return EBADF;
779
780 FILE_USE(fp);
781 error = (*fp->f_ops->fo_stat)(fp, sb, l);
782 FILE_UNUSE(fp, l);
783
784 return error;
785 }
786
787 /*
788 * Return status information about a file descriptor.
789 */
790 /* ARGSUSED */
791 int
792 sys___fstat30(struct lwp *l, const struct sys___fstat30_args *uap, register_t *retval)
793 {
794 /* {
795 syscallarg(int) fd;
796 syscallarg(struct stat *) sb;
797 } */
798 struct stat sb;
799 int error;
800
801 error = do_sys_fstat(l, SCARG(uap, fd), &sb);
802
803 if (error == 0)
804 error = copyout(&sb, SCARG(uap, sb), sizeof(sb));
805
806 return (error);
807 }
808
809 /*
810 * Return pathconf information about a file descriptor.
811 */
812 /* ARGSUSED */
813 int
814 sys_fpathconf(struct lwp *l, const struct sys_fpathconf_args *uap, register_t *retval)
815 {
816 /* {
817 syscallarg(int) fd;
818 syscallarg(int) name;
819 } */
820 int fd;
821 struct filedesc *fdp;
822 struct file *fp;
823 struct proc *p;
824 struct vnode *vp;
825 int error;
826
827 p = l->l_proc;
828 fd = SCARG(uap, fd);
829 fdp = p->p_fd;
830 error = 0;
831
832 if ((fp = fd_getfile(fdp, fd)) == NULL)
833 return (EBADF);
834
835 FILE_USE(fp);
836
837 switch (fp->f_type) {
838
839 case DTYPE_SOCKET:
840 case DTYPE_PIPE:
841 if (SCARG(uap, name) != _PC_PIPE_BUF)
842 error = EINVAL;
843 else
844 *retval = PIPE_BUF;
845 break;
846
847 case DTYPE_VNODE:
848 vp = (struct vnode *)fp->f_data;
849 error = VOP_PATHCONF(vp, SCARG(uap, name), retval);
850 break;
851
852 case DTYPE_KQUEUE:
853 error = EINVAL;
854 break;
855
856 default:
857 error = EOPNOTSUPP;
858 break;
859 }
860
861 FILE_UNUSE(fp, l);
862 return (error);
863 }
864
865 /*
866 * Allocate a file descriptor for the process.
867 */
868 int fdexpanded; /* XXX: what else uses this? */
869
870 int
871 fdalloc(struct proc *p, int want, int *result)
872 {
873 struct filedesc *fdp;
874 int i, lim, last, error;
875 u_int off, new;
876
877 fdp = p->p_fd;
878 rw_enter(&fdp->fd_lock, RW_WRITER);
879
880 /*
881 * Search for a free descriptor starting at the higher
882 * of want or fd_freefile. If that fails, consider
883 * expanding the ofile array.
884 */
885 lim = min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfiles);
886 last = min(fdp->fd_nfiles, lim);
887 again:
888 if ((i = want) < fdp->fd_freefile)
889 i = fdp->fd_freefile;
890 off = i >> NDENTRYSHIFT;
891 new = find_next_zero(fdp->fd_himap, off,
892 (last + NDENTRIES - 1) >> NDENTRYSHIFT);
893 if (new != -1) {
894 i = find_next_zero(&fdp->fd_lomap[new],
895 new > off ? 0 : i & NDENTRYMASK, NDENTRIES);
896 if (i == -1) {
897 /*
898 * free file descriptor in this block was
899 * below want, try again with higher want.
900 */
901 want = (new + 1) << NDENTRYSHIFT;
902 goto again;
903 }
904 i += (new << NDENTRYSHIFT);
905 if (i < last) {
906 if (fdp->fd_ofiles[i] == NULL) {
907 fd_used(fdp, i);
908 if (want <= fdp->fd_freefile)
909 fdp->fd_freefile = i;
910 *result = i;
911 error = 0;
912 goto out;
913 }
914 }
915 }
916
917 /* No space in current array. Expand or let the caller do it. */
918 error = (fdp->fd_nfiles >= lim) ? EMFILE : ENOSPC;
919
920 out:
921 rw_exit(&fdp->fd_lock);
922 return (error);
923 }
924
925 void
926 fdexpand(struct proc *p)
927 {
928 struct filedesc *fdp;
929 int i, numfiles, oldnfiles;
930 struct file **newofile;
931 char *newofileflags;
932 uint32_t *newhimap = NULL, *newlomap = NULL;
933
934 fdp = p->p_fd;
935
936 restart:
937 oldnfiles = fdp->fd_nfiles;
938
939 if (oldnfiles < NDEXTENT)
940 numfiles = NDEXTENT;
941 else
942 numfiles = 2 * oldnfiles;
943
944 newofile = malloc(numfiles * OFILESIZE, M_FILEDESC, M_WAITOK);
945 if (NDHISLOTS(numfiles) > NDHISLOTS(oldnfiles)) {
946 newhimap = malloc(NDHISLOTS(numfiles) * sizeof(uint32_t),
947 M_FILEDESC, M_WAITOK);
948 newlomap = malloc(NDLOSLOTS(numfiles) * sizeof(uint32_t),
949 M_FILEDESC, M_WAITOK);
950 }
951
952 rw_enter(&fdp->fd_lock, RW_WRITER);
953 /* lock fdp */
954 if (fdp->fd_nfiles != oldnfiles) {
955 /* fdp changed; retry */
956 rw_exit(&fdp->fd_lock);
957 free(newofile, M_FILEDESC);
958 if (newhimap != NULL) free(newhimap, M_FILEDESC);
959 if (newlomap != NULL) free(newlomap, M_FILEDESC);
960 goto restart;
961 }
962
963 newofileflags = (char *) &newofile[numfiles];
964 /*
965 * Copy the existing ofile and ofileflags arrays
966 * and zero the new portion of each array.
967 */
968 memcpy(newofile, fdp->fd_ofiles,
969 (i = sizeof(struct file *) * fdp->fd_nfiles));
970 memset((char *)newofile + i, 0,
971 numfiles * sizeof(struct file *) - i);
972 memcpy(newofileflags, fdp->fd_ofileflags,
973 (i = sizeof(char) * fdp->fd_nfiles));
974 memset(newofileflags + i, 0, numfiles * sizeof(char) - i);
975 if (oldnfiles > NDFILE)
976 free(fdp->fd_ofiles, M_FILEDESC);
977
978 if (NDHISLOTS(numfiles) > NDHISLOTS(oldnfiles)) {
979 memcpy(newhimap, fdp->fd_himap,
980 (i = NDHISLOTS(oldnfiles) * sizeof(uint32_t)));
981 memset((char *)newhimap + i, 0,
982 NDHISLOTS(numfiles) * sizeof(uint32_t) - i);
983
984 memcpy(newlomap, fdp->fd_lomap,
985 (i = NDLOSLOTS(oldnfiles) * sizeof(uint32_t)));
986 memset((char *)newlomap + i, 0,
987 NDLOSLOTS(numfiles) * sizeof(uint32_t) - i);
988
989 if (NDHISLOTS(oldnfiles) > NDHISLOTS(NDFILE)) {
990 free(fdp->fd_himap, M_FILEDESC);
991 free(fdp->fd_lomap, M_FILEDESC);
992 }
993 fdp->fd_himap = newhimap;
994 fdp->fd_lomap = newlomap;
995 }
996
997 fdp->fd_ofiles = newofile;
998 fdp->fd_ofileflags = newofileflags;
999 fdp->fd_nfiles = numfiles;
1000
1001 rw_exit(&fdp->fd_lock);
1002
1003 fdexpanded++;
1004 }
1005
1006 /*
1007 * Create a new open file structure and allocate
1008 * a file descriptor for the process that refers to it.
1009 */
1010 int
1011 falloc(struct lwp *l, struct file **resultfp, int *resultfd)
1012 {
1013 struct filedesc *fdp;
1014 struct file *fp;
1015 struct proc *p;
1016 int error, i;
1017
1018 p = l->l_proc;
1019 fdp = p->p_fd;
1020
1021 restart:
1022 if ((error = fdalloc(p, 0, &i)) != 0) {
1023 if (error == ENOSPC) {
1024 fdexpand(p);
1025 goto restart;
1026 }
1027 return (error);
1028 }
1029
1030 fp = pool_cache_get(file_cache, PR_WAITOK);
1031
1032 if (atomic_inc_uint_nv(&nfiles) >= maxfiles) {
1033 atomic_dec_uint(&nfiles);
1034 tablefull("file", "increase kern.maxfiles or MAXFILES");
1035 rw_enter(&fdp->fd_lock, RW_WRITER);
1036 fd_unused(fdp, i);
1037 rw_exit(&fdp->fd_lock);
1038 pool_cache_put(file_cache, fp);
1039 return (ENFILE);
1040 }
1041
1042 fp->f_advice = 0;
1043 fp->f_msgcount = 0;
1044 fp->f_offset = 0;
1045
1046 /*
1047 * Allocate a new file descriptor.
1048 * If the process has file descriptor zero open, add to the list
1049 * of open files at that point, otherwise put it at the front of
1050 * the list of open files.
1051 */
1052 fp->f_iflags = FIF_LARVAL;
1053 fp->f_cred = l->l_cred;
1054 kauth_cred_hold(fp->f_cred);
1055
1056 FILE_LOCK(fp);
1057 fp->f_count = 1;
1058 FILE_UNLOCK(fp);
1059
1060 rw_enter(&fdp->fd_lock, RW_WRITER); /* XXXAD check order */
1061 KDASSERT(fdp->fd_ofiles[i] == NULL);
1062 fdp->fd_ofiles[i] = fp;
1063 rw_exit(&fdp->fd_lock);
1064
1065 if (resultfp) {
1066 fp->f_usecount = 1;
1067 *resultfp = fp;
1068 }
1069 if (resultfd)
1070 *resultfd = i;
1071
1072 return (0);
1073 }
1074
1075 /*
1076 * Free a file descriptor.
1077 */
1078 void
1079 ffree(struct file *fp)
1080 {
1081
1082 KASSERT(fp->f_usecount == 0);
1083
1084 atomic_dec_uint(&nfiles);
1085 kauth_cred_free(fp->f_cred);
1086 pool_cache_put(file_cache, fp);
1087 }
1088
1089 /*
1090 * Create an initial cwdinfo structure, using the same current and root
1091 * directories as p.
1092 */
1093 struct cwdinfo *
1094 cwdinit(struct proc *p)
1095 {
1096 struct cwdinfo *cwdi;
1097 struct cwdinfo *copy;
1098
1099 cwdi = pool_cache_get(cwdi_cache, PR_WAITOK);
1100 copy = p->p_cwdi;
1101
1102 rw_enter(©->cwdi_lock, RW_READER);
1103 cwdi->cwdi_cdir = p->p_cwdi->cwdi_cdir;
1104 if (cwdi->cwdi_cdir)
1105 VREF(cwdi->cwdi_cdir);
1106 cwdi->cwdi_rdir = p->p_cwdi->cwdi_rdir;
1107 if (cwdi->cwdi_rdir)
1108 VREF(cwdi->cwdi_rdir);
1109 cwdi->cwdi_edir = p->p_cwdi->cwdi_edir;
1110 if (cwdi->cwdi_edir)
1111 VREF(cwdi->cwdi_edir);
1112 cwdi->cwdi_cmask = p->p_cwdi->cwdi_cmask;
1113 cwdi->cwdi_refcnt = 1;
1114 rw_exit(©->cwdi_lock);
1115
1116 return (cwdi);
1117 }
1118
1119 static int
1120 cwdi_ctor(void *arg, void *obj, int flags)
1121 {
1122 struct cwdinfo *cwdi = obj;
1123
1124 rw_init(&cwdi->cwdi_lock);
1125
1126 return 0;
1127 }
1128
1129 static void
1130 cwdi_dtor(void *arg, void *obj)
1131 {
1132 struct cwdinfo *cwdi = obj;
1133
1134 rw_destroy(&cwdi->cwdi_lock);
1135 }
1136
1137 static int
1138 file_ctor(void *arg, void *obj, int flags)
1139 {
1140 struct file *fp = obj;
1141
1142 memset(fp, 0, sizeof(*fp));
1143 mutex_init(&fp->f_lock, MUTEX_DEFAULT, IPL_NONE);
1144 cv_init(&fp->f_cv, "closef");
1145
1146 mutex_enter(&filelist_lock);
1147 LIST_INSERT_HEAD(&filehead, fp, f_list);
1148 mutex_exit(&filelist_lock);
1149
1150 return 0;
1151 }
1152
1153 static void
1154 file_dtor(void *arg, void *obj)
1155 {
1156 struct file *fp = obj;
1157
1158 mutex_enter(&filelist_lock);
1159 LIST_REMOVE(fp, f_list);
1160 mutex_exit(&filelist_lock);
1161
1162 mutex_destroy(&fp->f_lock);
1163 cv_destroy(&fp->f_cv);
1164 }
1165
1166 struct file *
1167 fgetdummy(void)
1168 {
1169 struct file *fp;
1170
1171 fp = kmem_alloc(sizeof(*fp), KM_SLEEP);
1172 if (fp != NULL) {
1173 memset(fp, 0, sizeof(*fp));
1174 mutex_init(&fp->f_lock, MUTEX_DEFAULT, IPL_NONE);
1175 }
1176 return fp;
1177 }
1178
1179 void
1180 fputdummy(struct file *fp)
1181 {
1182
1183 mutex_destroy(&fp->f_lock);
1184 kmem_free(fp, sizeof(*fp));
1185 }
1186
1187 /*
1188 * Make p2 share p1's cwdinfo.
1189 */
1190 void
1191 cwdshare(struct proc *p1, struct proc *p2)
1192 {
1193 struct cwdinfo *cwdi = p1->p_cwdi;
1194
1195 atomic_inc_uint(&cwdi->cwdi_refcnt);
1196 p2->p_cwdi = cwdi;
1197 }
1198
1199 /*
1200 * Make this process not share its cwdinfo structure, maintaining
1201 * all cwdinfo state.
1202 */
1203 void
1204 cwdunshare(struct proc *p)
1205 {
1206 struct cwdinfo *oldcwdi, *newcwdi;
1207
1208 if (p->p_cwdi->cwdi_refcnt == 1)
1209 return;
1210
1211 newcwdi = cwdinit(p);
1212 oldcwdi = p->p_cwdi;
1213 p->p_cwdi = newcwdi;
1214 cwdfree(oldcwdi);
1215 }
1216
1217 /*
1218 * Release a cwdinfo structure.
1219 */
1220 void
1221 cwdfree(struct cwdinfo *cwdi)
1222 {
1223
1224 if (atomic_dec_uint_nv(&cwdi->cwdi_refcnt) > 0)
1225 return;
1226
1227 vrele(cwdi->cwdi_cdir);
1228 if (cwdi->cwdi_rdir)
1229 vrele(cwdi->cwdi_rdir);
1230 if (cwdi->cwdi_edir)
1231 vrele(cwdi->cwdi_edir);
1232 pool_cache_put(cwdi_cache, cwdi);
1233 }
1234
1235 /*
1236 * Create an initial filedesc structure, using the same current and root
1237 * directories as p.
1238 */
1239 struct filedesc *
1240 fdinit(struct proc *p)
1241 {
1242 struct filedesc0 *newfdp;
1243
1244 newfdp = pool_cache_get(filedesc0_cache, PR_WAITOK);
1245 memset(newfdp, 0, sizeof(struct filedesc0));
1246
1247 fdinit1(newfdp);
1248
1249 return (&newfdp->fd_fd);
1250 }
1251
1252 /*
1253 * Initialize a file descriptor table.
1254 */
1255 void
1256 fdinit1(struct filedesc0 *newfdp)
1257 {
1258
1259 newfdp->fd_fd.fd_refcnt = 1;
1260 newfdp->fd_fd.fd_ofiles = newfdp->fd_dfiles;
1261 newfdp->fd_fd.fd_ofileflags = newfdp->fd_dfileflags;
1262 newfdp->fd_fd.fd_nfiles = NDFILE;
1263 newfdp->fd_fd.fd_knlistsize = -1;
1264 newfdp->fd_fd.fd_himap = newfdp->fd_dhimap;
1265 newfdp->fd_fd.fd_lomap = newfdp->fd_dlomap;
1266 newfdp->fd_fd.fd_lastfile = -1;
1267 rw_init(&newfdp->fd_fd.fd_lock);
1268 }
1269
1270 /*
1271 * Make p2 share p1's filedesc structure.
1272 */
1273 void
1274 fdshare(struct proc *p1, struct proc *p2)
1275 {
1276 struct filedesc *fdp = p1->p_fd;
1277
1278 p2->p_fd = fdp;
1279 atomic_inc_uint(&fdp->fd_refcnt);
1280 }
1281
1282 /*
1283 * Make this process not share its filedesc structure, maintaining
1284 * all file descriptor state.
1285 */
1286 void
1287 fdunshare(struct lwp *l)
1288 {
1289 struct proc *p = l->l_proc;
1290 struct filedesc *newfd;
1291
1292 if (p->p_fd->fd_refcnt == 1)
1293 return;
1294
1295 newfd = fdcopy(p);
1296 fdfree(l);
1297 p->p_fd = newfd;
1298 }
1299
1300 /*
1301 * Clear a process's fd table.
1302 */
1303 void
1304 fdclear(struct lwp *l)
1305 {
1306 struct proc *p = l->l_proc;
1307 struct filedesc *newfd;
1308
1309 newfd = fdinit(p);
1310 fdfree(l);
1311 p->p_fd = newfd;
1312 }
1313
1314 /*
1315 * Copy a filedesc structure.
1316 */
1317 struct filedesc *
1318 fdcopy(struct proc *p)
1319 {
1320 struct filedesc *newfdp, *fdp;
1321 struct file **fpp, **nfpp;
1322 int i, numfiles, lastfile;
1323
1324 fdp = p->p_fd;
1325 newfdp = pool_cache_get(filedesc0_cache, PR_WAITOK);
1326 newfdp->fd_refcnt = 1;
1327 rw_init(&newfdp->fd_lock);
1328
1329 restart:
1330 numfiles = fdp->fd_nfiles;
1331 lastfile = fdp->fd_lastfile;
1332
1333 /*
1334 * If the number of open files fits in the internal arrays
1335 * of the open file structure, use them, otherwise allocate
1336 * additional memory for the number of descriptors currently
1337 * in use.
1338 */
1339 if (lastfile < NDFILE) {
1340 i = NDFILE;
1341 } else {
1342 /*
1343 * Compute the smallest multiple of NDEXTENT needed
1344 * for the file descriptors currently in use,
1345 * allowing the table to shrink.
1346 */
1347 i = numfiles;
1348 while (i >= 2 * NDEXTENT && i > lastfile * 2)
1349 i /= 2;
1350 newfdp->fd_ofiles = malloc(i * OFILESIZE, M_FILEDESC, M_WAITOK);
1351 }
1352 if (NDHISLOTS(i) > NDHISLOTS(NDFILE)) {
1353 newfdp->fd_himap = malloc(NDHISLOTS(i) * sizeof(uint32_t),
1354 M_FILEDESC, M_WAITOK);
1355 newfdp->fd_lomap = malloc(NDLOSLOTS(i) * sizeof(uint32_t),
1356 M_FILEDESC, M_WAITOK);
1357 }
1358
1359 rw_enter(&fdp->fd_lock, RW_READER);
1360 if (numfiles != fdp->fd_nfiles || lastfile != fdp->fd_lastfile) {
1361 rw_exit(&fdp->fd_lock);
1362 if (i > NDFILE)
1363 free(newfdp->fd_ofiles, M_FILEDESC);
1364 if (NDHISLOTS(i) > NDHISLOTS(NDFILE)) {
1365 free(newfdp->fd_himap, M_FILEDESC);
1366 free(newfdp->fd_lomap, M_FILEDESC);
1367 }
1368 goto restart;
1369 }
1370
1371 if (lastfile < NDFILE) {
1372 newfdp->fd_ofiles = ((struct filedesc0 *) newfdp)->fd_dfiles;
1373 newfdp->fd_ofileflags =
1374 ((struct filedesc0 *) newfdp)->fd_dfileflags;
1375 } else {
1376 newfdp->fd_ofileflags = (char *) &newfdp->fd_ofiles[i];
1377 }
1378 if (NDHISLOTS(i) <= NDHISLOTS(NDFILE)) {
1379 newfdp->fd_himap =
1380 ((struct filedesc0 *) newfdp)->fd_dhimap;
1381 newfdp->fd_lomap =
1382 ((struct filedesc0 *) newfdp)->fd_dlomap;
1383 }
1384
1385 newfdp->fd_nfiles = i;
1386 newfdp->fd_lastfile = lastfile;
1387 newfdp->fd_freefile = fdp->fd_freefile;
1388
1389 /* Clear the entries that will not be copied over.
1390 * Avoid calling memset with 0 size (i.e. when
1391 * lastfile == i-1 */
1392 if (lastfile < (i-1))
1393 memset(newfdp->fd_ofiles + lastfile + 1, 0,
1394 (i - lastfile - 1) * sizeof(struct file **));
1395 memcpy(newfdp->fd_ofileflags, fdp->fd_ofileflags, i * sizeof(char));
1396 if (i < NDENTRIES * NDENTRIES)
1397 i = NDENTRIES * NDENTRIES; /* size of inlined bitmaps */
1398 memcpy(newfdp->fd_himap, fdp->fd_himap, NDHISLOTS(i)*sizeof(uint32_t));
1399 memcpy(newfdp->fd_lomap, fdp->fd_lomap, NDLOSLOTS(i)*sizeof(uint32_t));
1400
1401 fpp = fdp->fd_ofiles;
1402 nfpp = newfdp->fd_ofiles;
1403 for (i = 0; i <= lastfile; i++, fpp++, nfpp++) {
1404 if ((*nfpp = *fpp) == NULL)
1405 continue;
1406
1407 if ((*fpp)->f_type == DTYPE_KQUEUE)
1408 /* kq descriptors cannot be copied. */
1409 fdremove(newfdp, i);
1410 else {
1411 FILE_LOCK(*fpp);
1412 (*fpp)->f_count++;
1413 FILE_UNLOCK(*fpp);
1414 }
1415 }
1416
1417 rw_exit(&fdp->fd_lock);
1418
1419 newfdp->fd_knlist = NULL;
1420 newfdp->fd_knlistsize = -1;
1421 newfdp->fd_knhash = NULL;
1422 newfdp->fd_knhashmask = 0;
1423
1424 return (newfdp);
1425 }
1426
1427 /*
1428 * Release a filedesc structure.
1429 */
1430 void
1431 fdfree(struct lwp *l)
1432 {
1433 struct proc *p = l->l_proc;
1434 struct filedesc *fdp;
1435 struct file **fpp, *fp;
1436 int i;
1437
1438 fdp = p->p_fd;
1439 if (atomic_dec_uint_nv(&fdp->fd_refcnt) > 0)
1440 return;
1441
1442 rw_destroy(&fdp->fd_lock);
1443 fpp = fdp->fd_ofiles;
1444 for (i = fdp->fd_lastfile; i >= 0; i--, fpp++) {
1445 fp = *fpp;
1446 if (fp != NULL) {
1447 *fpp = NULL;
1448 FILE_LOCK(fp);
1449 FILE_USE(fp);
1450 if ((fdp->fd_lastfile - i) < fdp->fd_knlistsize)
1451 knote_fdclose(l, fdp->fd_lastfile - i);
1452 (void) closef(fp, l);
1453 }
1454 }
1455 p->p_fd = NULL;
1456 if (fdp->fd_nfiles > NDFILE)
1457 free(fdp->fd_ofiles, M_FILEDESC);
1458 if (NDHISLOTS(fdp->fd_nfiles) > NDHISLOTS(NDFILE)) {
1459 free(fdp->fd_himap, M_FILEDESC);
1460 free(fdp->fd_lomap, M_FILEDESC);
1461 }
1462 if (fdp->fd_knlist)
1463 free(fdp->fd_knlist, M_KEVENT);
1464 if (fdp->fd_knhash)
1465 hashdone(fdp->fd_knhash, M_KEVENT);
1466 pool_cache_put(filedesc0_cache, fdp);
1467 }
1468
1469 /*
1470 * Internal form of close.
1471 * Decrement reference count on file structure.
1472 * Note: p may be NULL when closing a file
1473 * that was being passed in a message.
1474 *
1475 * Note: we expect the caller is holding a usecount, and expects us
1476 * to drop it (the caller thinks the file is going away forever).
1477 */
1478 int
1479 closef(struct file *fp, struct lwp *l)
1480 {
1481 struct proc *p = l ? l->l_proc : NULL;
1482 struct vnode *vp;
1483 struct flock lf;
1484 int error;
1485
1486 if (fp == NULL)
1487 return (0);
1488
1489 /*
1490 * POSIX record locking dictates that any close releases ALL
1491 * locks owned by this process. This is handled by setting
1492 * a flag in the unlock to free ONLY locks obeying POSIX
1493 * semantics, and not to free BSD-style file locks.
1494 * If the descriptor was in a message, POSIX-style locks
1495 * aren't passed with the descriptor.
1496 */
1497 if (p && (p->p_flag & PK_ADVLOCK) && fp->f_type == DTYPE_VNODE) {
1498 lf.l_whence = SEEK_SET;
1499 lf.l_start = 0;
1500 lf.l_len = 0;
1501 lf.l_type = F_UNLCK;
1502 vp = (struct vnode *)fp->f_data;
1503 (void) VOP_ADVLOCK(vp, p, F_UNLCK, &lf, F_POSIX);
1504 }
1505
1506 /*
1507 * If WANTCLOSE is set, then the reference count on the file
1508 * is 0, but there were multiple users of the file. This can
1509 * happen if a filedesc structure is shared by multiple
1510 * processes.
1511 */
1512 FILE_LOCK(fp);
1513 if (fp->f_iflags & FIF_WANTCLOSE) {
1514 /*
1515 * Another user of the file is already closing, and is
1516 * simply waiting for other users of the file to drain.
1517 * Release our usecount, and wake up the closer if it
1518 * is the only remaining use.
1519 */
1520 #ifdef DIAGNOSTIC
1521 if (fp->f_count != 0)
1522 panic("closef: wantclose and count != 0");
1523 if (fp->f_usecount < 2)
1524 panic("closef: wantclose and usecount < 2");
1525 #endif
1526 if (--fp->f_usecount == 1)
1527 cv_broadcast(&fp->f_cv);
1528 FILE_UNLOCK(fp);
1529 return (0);
1530 } else {
1531 /*
1532 * Decrement the reference count. If we were not the
1533 * last reference, then release our use and just
1534 * return.
1535 */
1536 if (--fp->f_count > 0) {
1537 #ifdef DIAGNOSTIC
1538 if (fp->f_usecount < 1)
1539 panic("closef: no wantclose and usecount < 1");
1540 #endif
1541 fp->f_usecount--;
1542 FILE_UNLOCK(fp);
1543 return (0);
1544 }
1545 }
1546
1547 /*
1548 * The reference count is now 0. However, there may be
1549 * multiple potential users of this file. This can happen
1550 * if multiple processes shared a single filedesc structure.
1551 *
1552 * Notify these potential users that the file is closing.
1553 * This will prevent them from adding additional uses to
1554 * the file.
1555 */
1556 fp->f_iflags |= FIF_WANTCLOSE;
1557
1558 /*
1559 * We expect the caller to add a use to the file. So, if we
1560 * are the last user, usecount will be 1. If it is not, we
1561 * must wait for the usecount to drain. When it drains back
1562 * to 1, we will be awakened so that we may proceed with the
1563 * close.
1564 */
1565 #ifdef DIAGNOSTIC
1566 if (fp->f_usecount < 1)
1567 panic("closef: usecount < 1");
1568 #endif
1569 while (fp->f_usecount > 1)
1570 cv_wait(&fp->f_cv, &fp->f_lock);
1571 #ifdef DIAGNOSTIC
1572 if (fp->f_usecount != 1)
1573 panic("closef: usecount != 1");
1574 #endif
1575
1576 FILE_UNLOCK(fp);
1577 if ((fp->f_flag & FHASLOCK) && fp->f_type == DTYPE_VNODE) {
1578 lf.l_whence = SEEK_SET;
1579 lf.l_start = 0;
1580 lf.l_len = 0;
1581 lf.l_type = F_UNLCK;
1582 vp = (struct vnode *)fp->f_data;
1583 (void) VOP_ADVLOCK(vp, fp, F_UNLCK, &lf, F_FLOCK);
1584 }
1585 if (fp->f_ops)
1586 error = (*fp->f_ops->fo_close)(fp, l);
1587 else
1588 error = 0;
1589
1590 /* Nothing references the file now, drop the final use (us). */
1591 fp->f_usecount--;
1592
1593 ffree(fp);
1594 return (error);
1595 }
1596
1597 /*
1598 * Apply an advisory lock on a file descriptor.
1599 *
1600 * Just attempt to get a record lock of the requested type on
1601 * the entire file (l_whence = SEEK_SET, l_start = 0, l_len = 0).
1602 */
1603 /* ARGSUSED */
1604 int
1605 sys_flock(struct lwp *l, const struct sys_flock_args *uap, register_t *retval)
1606 {
1607 /* {
1608 syscallarg(int) fd;
1609 syscallarg(int) how;
1610 } */
1611 int fd, how, error;
1612 struct proc *p;
1613 struct filedesc *fdp;
1614 struct file *fp;
1615 struct vnode *vp;
1616 struct flock lf;
1617
1618 p = l->l_proc;
1619 fd = SCARG(uap, fd);
1620 how = SCARG(uap, how);
1621 fdp = p->p_fd;
1622 error = 0;
1623
1624 if ((fp = fd_getfile(fdp, fd)) == NULL)
1625 return (EBADF);
1626
1627 FILE_USE(fp);
1628
1629 if (fp->f_type != DTYPE_VNODE) {
1630 error = EOPNOTSUPP;
1631 goto out;
1632 }
1633
1634 vp = (struct vnode *)fp->f_data;
1635 lf.l_whence = SEEK_SET;
1636 lf.l_start = 0;
1637 lf.l_len = 0;
1638 if (how & LOCK_UN) {
1639 lf.l_type = F_UNLCK;
1640 fp->f_flag &= ~FHASLOCK;
1641 error = VOP_ADVLOCK(vp, fp, F_UNLCK, &lf, F_FLOCK);
1642 goto out;
1643 }
1644 if (how & LOCK_EX)
1645 lf.l_type = F_WRLCK;
1646 else if (how & LOCK_SH)
1647 lf.l_type = F_RDLCK;
1648 else {
1649 error = EINVAL;
1650 goto out;
1651 }
1652 fp->f_flag |= FHASLOCK;
1653 if (how & LOCK_NB)
1654 error = VOP_ADVLOCK(vp, fp, F_SETLK, &lf, F_FLOCK);
1655 else
1656 error = VOP_ADVLOCK(vp, fp, F_SETLK, &lf,
1657 F_FLOCK|F_WAIT);
1658 out:
1659 FILE_UNUSE(fp, l);
1660 return (error);
1661 }
1662
1663 /* ARGSUSED */
1664 int
1665 sys_posix_fadvise(struct lwp *l, const struct sys_posix_fadvise_args *uap, register_t *retval)
1666 {
1667 /* {
1668 syscallarg(int) fd;
1669 syscallarg(off_t) offset;
1670 syscallarg(off_t) len;
1671 syscallarg(int) advice;
1672 } */
1673 const int fd = SCARG(uap, fd);
1674 const int advice = SCARG(uap, advice);
1675 struct proc *p = l->l_proc;
1676 struct file *fp;
1677 int error = 0;
1678
1679 fp = fd_getfile(p->p_fd, fd);
1680 if (fp == NULL) {
1681 error = EBADF;
1682 goto out;
1683 }
1684 FILE_USE(fp);
1685
1686 if (fp->f_type != DTYPE_VNODE) {
1687 if (fp->f_type == DTYPE_PIPE || fp->f_type == DTYPE_SOCKET) {
1688 error = ESPIPE;
1689 } else {
1690 error = EOPNOTSUPP;
1691 }
1692 goto out;
1693 }
1694
1695 switch (advice) {
1696 case POSIX_FADV_NORMAL:
1697 case POSIX_FADV_RANDOM:
1698 case POSIX_FADV_SEQUENTIAL:
1699 KASSERT(POSIX_FADV_NORMAL == UVM_ADV_NORMAL);
1700 KASSERT(POSIX_FADV_RANDOM == UVM_ADV_RANDOM);
1701 KASSERT(POSIX_FADV_SEQUENTIAL == UVM_ADV_SEQUENTIAL);
1702
1703 /*
1704 * we ignore offset and size.
1705 */
1706
1707 fp->f_advice = advice;
1708 break;
1709
1710 case POSIX_FADV_WILLNEED:
1711 case POSIX_FADV_DONTNEED:
1712 case POSIX_FADV_NOREUSE:
1713
1714 /*
1715 * not implemented yet.
1716 */
1717
1718 break;
1719 default:
1720 error = EINVAL;
1721 break;
1722 }
1723 out:
1724 if (fp != NULL) {
1725 FILE_UNUSE(fp, l);
1726 }
1727 *retval = error;
1728 return 0;
1729 }
1730
1731 /*
1732 * File Descriptor pseudo-device driver (/dev/fd/).
1733 *
1734 * Opening minor device N dup()s the file (if any) connected to file
1735 * descriptor N belonging to the calling process. Note that this driver
1736 * consists of only the ``open()'' routine, because all subsequent
1737 * references to this file will be direct to the other driver.
1738 */
1739 /* ARGSUSED */
1740 static int
1741 filedescopen(dev_t dev, int mode, int type, struct lwp *l)
1742 {
1743
1744 /*
1745 * XXX Kludge: set dupfd to contain the value of the
1746 * the file descriptor being sought for duplication. The error
1747 * return ensures that the vnode for this device will be released
1748 * by vn_open. Open will detect this special error and take the
1749 * actions in dupfdopen below. Other callers of vn_open or VOP_OPEN
1750 * will simply report the error.
1751 */
1752 l->l_dupfd = minor(dev); /* XXX */
1753 return EDUPFD;
1754 }
1755
1756 const struct cdevsw filedesc_cdevsw = {
1757 filedescopen, noclose, noread, nowrite, noioctl,
1758 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER,
1759 };
1760
1761 /*
1762 * Duplicate the specified descriptor to a free descriptor.
1763 *
1764 * 'indx' has been fdalloc'ed (and will be fdremove'ed on error) by the caller.
1765 */
1766 int
1767 dupfdopen(struct lwp *l, int indx, int dfd, int mode, int error)
1768 {
1769 struct proc *p = l->l_proc;
1770 struct filedesc *fdp;
1771 struct file *wfp;
1772
1773 fdp = p->p_fd;
1774
1775 /* should be cleared by the caller */
1776 KASSERT(fdp->fd_ofiles[indx] == NULL);
1777
1778 /*
1779 * If the to-be-dup'd fd number is greater than the allowed number
1780 * of file descriptors, or the fd to be dup'd has already been
1781 * closed, reject.
1782 */
1783
1784 /*
1785 * Note, in the case of indx == dfd, fd_getfile below returns NULL.
1786 */
1787 if ((wfp = fd_getfile(fdp, dfd)) == NULL)
1788 return (EBADF);
1789
1790 FILE_USE(wfp);
1791
1792 /*
1793 * There are two cases of interest here.
1794 *
1795 * For EDUPFD simply dup (dfd) to file descriptor
1796 * (indx) and return.
1797 *
1798 * For EMOVEFD steal away the file structure from (dfd) and
1799 * store it in (indx). (dfd) is effectively closed by
1800 * this operation.
1801 *
1802 * Any other error code is just returned.
1803 */
1804 switch (error) {
1805 case EDUPFD:
1806 /*
1807 * Check that the mode the file is being opened for is a
1808 * subset of the mode of the existing descriptor.
1809 */
1810 if (((mode & (FREAD|FWRITE)) | wfp->f_flag) != wfp->f_flag) {
1811 FILE_UNUSE(wfp, l);
1812 return (EACCES);
1813 }
1814 rw_enter(&fdp->fd_lock, RW_WRITER);
1815 fdp->fd_ofiles[indx] = wfp;
1816 fdp->fd_ofileflags[indx] = fdp->fd_ofileflags[dfd];
1817 rw_exit(&fdp->fd_lock);
1818 FILE_LOCK(wfp);
1819 wfp->f_count++;
1820 /* 'indx' has been fd_used'ed by caller */
1821 FILE_UNUSE_HAVELOCK(wfp, l);
1822 return (0);
1823
1824 case EMOVEFD:
1825 /*
1826 * Steal away the file pointer from dfd, and stuff it into indx.
1827 */
1828 rw_enter(&fdp->fd_lock, RW_WRITER);
1829 fdp->fd_ofiles[indx] = fdp->fd_ofiles[dfd];
1830 fdp->fd_ofileflags[indx] = fdp->fd_ofileflags[dfd];
1831 fdp->fd_ofiles[dfd] = NULL;
1832 fdp->fd_ofileflags[dfd] = 0;
1833 /*
1834 * Complete the clean up of the filedesc structure by
1835 * recomputing the various hints.
1836 */
1837 /* 'indx' has been fd_used'ed by caller */
1838 fd_unused(fdp, dfd);
1839 rw_exit(&fdp->fd_lock);
1840 FILE_UNUSE(wfp, l);
1841 return (0);
1842
1843 default:
1844 FILE_UNUSE(wfp, l);
1845 return (error);
1846 }
1847 /* NOTREACHED */
1848 }
1849
1850 /*
1851 * Close any files on exec?
1852 */
1853 void
1854 fdcloseexec(struct lwp *l)
1855 {
1856 struct proc *p = l->l_proc;
1857 struct filedesc *fdp;
1858 int fd;
1859
1860 fdunshare(l);
1861 cwdunshare(p);
1862
1863 if (p->p_cwdi->cwdi_edir)
1864 vrele(p->p_cwdi->cwdi_edir);
1865
1866 fdp = p->p_fd;
1867 for (fd = 0; fd <= fdp->fd_lastfile; fd++)
1868 if (fdp->fd_ofileflags[fd] & UF_EXCLOSE)
1869 (void) fdrelease(l, fd);
1870 }
1871
1872 /*
1873 * It is unsafe for set[ug]id processes to be started with file
1874 * descriptors 0..2 closed, as these descriptors are given implicit
1875 * significance in the Standard C library. fdcheckstd() will create a
1876 * descriptor referencing /dev/null for each of stdin, stdout, and
1877 * stderr that is not already open.
1878 */
1879 #define CHECK_UPTO 3
1880 int
1881 fdcheckstd(struct lwp *l)
1882 {
1883 struct proc *p;
1884 struct nameidata nd;
1885 struct filedesc *fdp;
1886 struct file *fp;
1887 struct file *devnullfp = NULL; /* Quell compiler warning */
1888 struct proc *pp;
1889 register_t retval;
1890 int fd, i, error, flags = FREAD|FWRITE, devnull = -1;
1891 char closed[CHECK_UPTO * 3 + 1], which[3 + 1];
1892
1893 p = l->l_proc;
1894 closed[0] = '\0';
1895 if ((fdp = p->p_fd) == NULL)
1896 return (0);
1897 for (i = 0; i < CHECK_UPTO; i++) {
1898 if (fdp->fd_ofiles[i] != NULL)
1899 continue;
1900 snprintf(which, sizeof(which), ",%d", i);
1901 strlcat(closed, which, sizeof(closed));
1902 if (devnullfp == NULL) {
1903 if ((error = falloc(l, &fp, &fd)) != 0)
1904 return (error);
1905 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, "/dev/null");
1906 if ((error = vn_open(&nd, flags, 0)) != 0) {
1907 FILE_UNUSE(fp, l);
1908 ffree(fp);
1909 fdremove(p->p_fd, fd);
1910 return (error);
1911 }
1912 fp->f_data = nd.ni_vp;
1913 fp->f_flag = flags;
1914 fp->f_ops = &vnops;
1915 fp->f_type = DTYPE_VNODE;
1916 VOP_UNLOCK(nd.ni_vp, 0);
1917 devnull = fd;
1918 devnullfp = fp;
1919 FILE_SET_MATURE(fp);
1920 } else {
1921 restart:
1922 if ((error = fdalloc(p, 0, &fd)) != 0) {
1923 if (error == ENOSPC) {
1924 fdexpand(p);
1925 goto restart;
1926 }
1927 return (error);
1928 }
1929
1930 FILE_LOCK(devnullfp);
1931 FILE_USE(devnullfp);
1932 /* finishdup() will unuse the descriptors for us */
1933 if ((error = finishdup(l, devnull, fd, &retval)) != 0)
1934 return (error);
1935 }
1936 }
1937 if (devnullfp)
1938 FILE_UNUSE(devnullfp, l);
1939 if (closed[0] != '\0') {
1940 mutex_enter(&proclist_lock);
1941 pp = p->p_pptr;
1942 mutex_enter(&pp->p_mutex);
1943 log(LOG_WARNING, "set{u,g}id pid %d (%s) "
1944 "was invoked by uid %d ppid %d (%s) "
1945 "with fd %s closed\n",
1946 p->p_pid, p->p_comm, kauth_cred_geteuid(pp->p_cred),
1947 pp->p_pid, pp->p_comm, &closed[1]);
1948 mutex_exit(&pp->p_mutex);
1949 mutex_exit(&proclist_lock);
1950 }
1951 return (0);
1952 }
1953 #undef CHECK_UPTO
1954
1955 /*
1956 * Sets descriptor owner. If the owner is a process, 'pgid'
1957 * is set to positive value, process ID. If the owner is process group,
1958 * 'pgid' is set to -pg_id.
1959 */
1960 int
1961 fsetown(struct proc *p, pid_t *pgid, int cmd, const void *data)
1962 {
1963 int id = *(const int *)data;
1964 int error;
1965
1966 switch (cmd) {
1967 case TIOCSPGRP:
1968 if (id < 0)
1969 return (EINVAL);
1970 id = -id;
1971 break;
1972 default:
1973 break;
1974 }
1975
1976 if (id > 0 && !pfind(id))
1977 return (ESRCH);
1978 else if (id < 0 && (error = pgid_in_session(p, -id)))
1979 return (error);
1980
1981 *pgid = id;
1982 return (0);
1983 }
1984
1985 /*
1986 * Return descriptor owner information. If the value is positive,
1987 * it's process ID. If it's negative, it's process group ID and
1988 * needs the sign removed before use.
1989 */
1990 int
1991 fgetown(struct proc *p, pid_t pgid, int cmd, void *data)
1992 {
1993 switch (cmd) {
1994 case TIOCGPGRP:
1995 *(int *)data = -pgid;
1996 break;
1997 default:
1998 *(int *)data = pgid;
1999 break;
2000 }
2001 return (0);
2002 }
2003
2004 /*
2005 * Send signal to descriptor owner, either process or process group.
2006 */
2007 void
2008 fownsignal(pid_t pgid, int signo, int code, int band, void *fdescdata)
2009 {
2010 struct proc *p1;
2011 struct pgrp *pgrp;
2012 ksiginfo_t ksi;
2013
2014 KSI_INIT(&ksi);
2015 ksi.ksi_signo = signo;
2016 ksi.ksi_code = code;
2017 ksi.ksi_band = band;
2018
2019 /*
2020 * Since we may be called from an interrupt context, we must use
2021 * the proclist_mutex.
2022 */
2023 mutex_enter(&proclist_mutex);
2024 if (pgid > 0 && (p1 = p_find(pgid, PFIND_LOCKED)))
2025 kpsignal(p1, &ksi, fdescdata);
2026 else if (pgid < 0 && (pgrp = pg_find(-pgid, PFIND_LOCKED)))
2027 kpgsignal(pgrp, &ksi, fdescdata, 0);
2028 mutex_exit(&proclist_mutex);
2029 }
2030
2031 int
2032 fdclone(struct lwp *l, struct file *fp, int fd, int flag,
2033 const struct fileops *fops, void *data)
2034 {
2035 fp->f_flag = flag;
2036 fp->f_type = DTYPE_MISC;
2037 fp->f_ops = fops;
2038 fp->f_data = data;
2039
2040 l->l_dupfd = fd;
2041
2042 FILE_SET_MATURE(fp);
2043 FILE_UNUSE(fp, l);
2044 return EMOVEFD;
2045 }
2046
2047 /* ARGSUSED */
2048 int
2049 fnullop_fcntl(struct file *fp, u_int cmd, void *data, struct lwp *l)
2050 {
2051
2052 if (cmd == F_SETFL)
2053 return 0;
2054
2055 return EOPNOTSUPP;
2056 }
2057
2058 /* ARGSUSED */
2059 int
2060 fnullop_poll(struct file *fp, int which, struct lwp *l)
2061 {
2062
2063 return 0;
2064 }
2065
2066
2067 /* ARGSUSED */
2068 int
2069 fnullop_kqfilter(struct file *fp, struct knote *kn)
2070 {
2071
2072 return 0;
2073 }
2074
2075 /* ARGSUSED */
2076 int
2077 fbadop_read(struct file *fp, off_t *offset, struct uio *uio,
2078 kauth_cred_t cred, int flags)
2079 {
2080
2081 return EOPNOTSUPP;
2082 }
2083
2084 /* ARGSUSED */
2085 int
2086 fbadop_write(struct file *fp, off_t *offset, struct uio *uio,
2087 kauth_cred_t cred, int flags)
2088 {
2089
2090 return EOPNOTSUPP;
2091 }
2092
2093 /* ARGSUSED */
2094 int
2095 fbadop_ioctl(struct file *fp, u_long com, void *data, struct lwp *l)
2096 {
2097
2098 return EOPNOTSUPP;
2099 }
2100
2101 /* ARGSUSED */
2102 int
2103 fbadop_stat(struct file *fp, struct stat *sb, struct lwp *l)
2104 {
2105
2106 return EOPNOTSUPP;
2107 }
2108
2109 /* ARGSUSED */
2110 int
2111 fbadop_close(struct file *fp, struct lwp *l)
2112 {
2113
2114 return EOPNOTSUPP;
2115 }
2116