kern_descrip.c revision 1.191 1 /* $NetBSD: kern_descrip.c,v 1.191 2009/05/23 18:28:05 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 1982, 1986, 1989, 1991, 1993
34 * The Regents of the University of California. All rights reserved.
35 * (c) UNIX System Laboratories, Inc.
36 * All or some portions of this file are derived from material licensed
37 * to the University of California by American Telephone and Telegraph
38 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
39 * the permission of UNIX System Laboratories, Inc.
40 *
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
43 * are met:
44 * 1. Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in the
48 * documentation and/or other materials provided with the distribution.
49 * 3. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * @(#)kern_descrip.c 8.8 (Berkeley) 2/14/95
66 */
67
68 /*
69 * File descriptor management.
70 */
71
72 #include <sys/cdefs.h>
73 __KERNEL_RCSID(0, "$NetBSD: kern_descrip.c,v 1.191 2009/05/23 18:28:05 ad Exp $");
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/filedesc.h>
78 #include <sys/kernel.h>
79 #include <sys/proc.h>
80 #include <sys/file.h>
81 #include <sys/socket.h>
82 #include <sys/socketvar.h>
83 #include <sys/stat.h>
84 #include <sys/ioctl.h>
85 #include <sys/fcntl.h>
86 #include <sys/pool.h>
87 #include <sys/unistd.h>
88 #include <sys/resourcevar.h>
89 #include <sys/conf.h>
90 #include <sys/event.h>
91 #include <sys/kauth.h>
92 #include <sys/atomic.h>
93 #include <sys/syscallargs.h>
94 #include <sys/cpu.h>
95 #include <sys/kmem.h>
96 #include <sys/vnode.h>
97
98 static int file_ctor(void *, void *, int);
99 static void file_dtor(void *, void *);
100 static int fdfile_ctor(void *, void *, int);
101 static void fdfile_dtor(void *, void *);
102 static int filedesc_ctor(void *, void *, int);
103 static void filedesc_dtor(void *, void *);
104 static int filedescopen(dev_t, int, int, lwp_t *);
105
106 kmutex_t filelist_lock; /* lock on filehead */
107 struct filelist filehead; /* head of list of open files */
108 u_int nfiles; /* actual number of open files */
109
110 static pool_cache_t filedesc_cache;
111 static pool_cache_t file_cache;
112 static pool_cache_t fdfile_cache;
113
114 const struct cdevsw filedesc_cdevsw = {
115 filedescopen, noclose, noread, nowrite, noioctl,
116 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER | D_MPSAFE,
117 };
118
119 /* For ease of reading. */
120 __strong_alias(fd_putvnode,fd_putfile)
121 __strong_alias(fd_putsock,fd_putfile)
122
123 /*
124 * Initialize the descriptor system.
125 */
126 void
127 fd_sys_init(void)
128 {
129
130 mutex_init(&filelist_lock, MUTEX_DEFAULT, IPL_NONE);
131
132 file_cache = pool_cache_init(sizeof(file_t), coherency_unit, 0,
133 0, "file", NULL, IPL_NONE, file_ctor, file_dtor, NULL);
134 KASSERT(file_cache != NULL);
135
136 fdfile_cache = pool_cache_init(sizeof(fdfile_t), coherency_unit, 0,
137 PR_LARGECACHE, "fdfile", NULL, IPL_NONE, fdfile_ctor, fdfile_dtor,
138 NULL);
139 KASSERT(fdfile_cache != NULL);
140
141 filedesc_cache = pool_cache_init(sizeof(filedesc_t), coherency_unit,
142 0, 0, "filedesc", NULL, IPL_NONE, filedesc_ctor, filedesc_dtor,
143 NULL);
144 KASSERT(filedesc_cache != NULL);
145 }
146
147 static int
148 fd_next_zero(filedesc_t *fdp, uint32_t *bitmap, int want, u_int bits)
149 {
150 int i, off, maxoff;
151 uint32_t sub;
152
153 KASSERT(mutex_owned(&fdp->fd_lock));
154
155 if (want > bits)
156 return -1;
157
158 off = want >> NDENTRYSHIFT;
159 i = want & NDENTRYMASK;
160 if (i) {
161 sub = bitmap[off] | ((u_int)~0 >> (NDENTRIES - i));
162 if (sub != ~0)
163 goto found;
164 off++;
165 }
166
167 maxoff = NDLOSLOTS(bits);
168 while (off < maxoff) {
169 if ((sub = bitmap[off]) != ~0)
170 goto found;
171 off++;
172 }
173
174 return (-1);
175
176 found:
177 return (off << NDENTRYSHIFT) + ffs(~sub) - 1;
178 }
179
180 static int
181 fd_last_set(filedesc_t *fd, int last)
182 {
183 int off, i;
184 fdfile_t **ofiles = fd->fd_ofiles;
185 uint32_t *bitmap = fd->fd_lomap;
186
187 KASSERT(mutex_owned(&fd->fd_lock));
188
189 off = (last - 1) >> NDENTRYSHIFT;
190
191 while (off >= 0 && !bitmap[off])
192 off--;
193
194 if (off < 0)
195 return (-1);
196
197 i = ((off + 1) << NDENTRYSHIFT) - 1;
198 if (i >= last)
199 i = last - 1;
200
201 /* XXX should use bitmap */
202 /* XXXAD does not work for fd_copy() */
203 while (i > 0 && (ofiles[i] == NULL || !ofiles[i]->ff_allocated))
204 i--;
205
206 return (i);
207 }
208
209 void
210 fd_used(filedesc_t *fdp, unsigned fd)
211 {
212 u_int off = fd >> NDENTRYSHIFT;
213 fdfile_t *ff;
214
215 ff = fdp->fd_ofiles[fd];
216
217 KASSERT(mutex_owned(&fdp->fd_lock));
218 KASSERT((fdp->fd_lomap[off] & (1 << (fd & NDENTRYMASK))) == 0);
219 KASSERT(ff != NULL);
220 KASSERT(ff->ff_file == NULL);
221 KASSERT(!ff->ff_allocated);
222
223 ff->ff_allocated = 1;
224 fdp->fd_lomap[off] |= 1 << (fd & NDENTRYMASK);
225 if (fdp->fd_lomap[off] == ~0) {
226 KASSERT((fdp->fd_himap[off >> NDENTRYSHIFT] &
227 (1 << (off & NDENTRYMASK))) == 0);
228 fdp->fd_himap[off >> NDENTRYSHIFT] |= 1 << (off & NDENTRYMASK);
229 }
230
231 if ((int)fd > fdp->fd_lastfile) {
232 fdp->fd_lastfile = fd;
233 }
234
235 if (fd >= NDFDFILE) {
236 fdp->fd_nused++;
237 } else {
238 KASSERT(ff == (fdfile_t *)fdp->fd_dfdfile[fd]);
239 }
240 }
241
242 void
243 fd_unused(filedesc_t *fdp, unsigned fd)
244 {
245 u_int off = fd >> NDENTRYSHIFT;
246 fdfile_t *ff;
247
248 ff = fdp->fd_ofiles[fd];
249
250 /*
251 * Don't assert the lock is held here, as we may be copying
252 * the table during exec() and it is not needed there.
253 * procfs and sysctl are locked out by proc::p_reflock.
254 *
255 * KASSERT(mutex_owned(&fdp->fd_lock));
256 */
257 KASSERT(ff != NULL);
258 KASSERT(ff->ff_file == NULL);
259 KASSERT(ff->ff_allocated);
260
261 if (fd < fdp->fd_freefile) {
262 fdp->fd_freefile = fd;
263 }
264
265 if (fdp->fd_lomap[off] == ~0) {
266 KASSERT((fdp->fd_himap[off >> NDENTRYSHIFT] &
267 (1 << (off & NDENTRYMASK))) != 0);
268 fdp->fd_himap[off >> NDENTRYSHIFT] &=
269 ~(1 << (off & NDENTRYMASK));
270 }
271 KASSERT((fdp->fd_lomap[off] & (1 << (fd & NDENTRYMASK))) != 0);
272 fdp->fd_lomap[off] &= ~(1 << (fd & NDENTRYMASK));
273 ff->ff_allocated = 0;
274
275 KASSERT(fd <= fdp->fd_lastfile);
276 if (fd == fdp->fd_lastfile) {
277 fdp->fd_lastfile = fd_last_set(fdp, fd);
278 }
279
280 if (fd >= NDFDFILE) {
281 KASSERT(fdp->fd_nused > 0);
282 fdp->fd_nused--;
283 } else {
284 KASSERT(ff == (fdfile_t *)fdp->fd_dfdfile[fd]);
285 }
286 }
287
288 /*
289 * Custom version of fd_unused() for fd_copy(), where the descriptor
290 * table is not yet fully initialized.
291 */
292 static inline void
293 fd_zap(filedesc_t *fdp, unsigned fd)
294 {
295 u_int off = fd >> NDENTRYSHIFT;
296
297 if (fd < fdp->fd_freefile) {
298 fdp->fd_freefile = fd;
299 }
300
301 if (fdp->fd_lomap[off] == ~0) {
302 KASSERT((fdp->fd_himap[off >> NDENTRYSHIFT] &
303 (1 << (off & NDENTRYMASK))) != 0);
304 fdp->fd_himap[off >> NDENTRYSHIFT] &=
305 ~(1 << (off & NDENTRYMASK));
306 }
307 KASSERT((fdp->fd_lomap[off] & (1 << (fd & NDENTRYMASK))) != 0);
308 fdp->fd_lomap[off] &= ~(1 << (fd & NDENTRYMASK));
309 }
310
311 bool
312 fd_isused(filedesc_t *fdp, unsigned fd)
313 {
314 u_int off = fd >> NDENTRYSHIFT;
315
316 KASSERT(fd < fdp->fd_nfiles);
317
318 return (fdp->fd_lomap[off] & (1 << (fd & NDENTRYMASK))) != 0;
319 }
320
321 /*
322 * Look up the file structure corresponding to a file descriptor
323 * and return the file, holding a reference on the descriptor.
324 */
325 inline file_t *
326 fd_getfile(unsigned fd)
327 {
328 filedesc_t *fdp;
329 fdfile_t *ff;
330 file_t *fp;
331
332 fdp = curlwp->l_fd;
333
334 /*
335 * Look up the fdfile structure representing this descriptor.
336 * Ensure that we see fd_nfiles before fd_ofiles since we
337 * are doing this unlocked. See fd_tryexpand().
338 */
339 if (__predict_false(fd >= fdp->fd_nfiles)) {
340 return NULL;
341 }
342 membar_consumer();
343 ff = fdp->fd_ofiles[fd];
344 KASSERT(fd >= NDFDFILE || ff == (fdfile_t *)fdp->fd_dfdfile[fd]);
345 if (__predict_false(ff == NULL)) {
346 return NULL;
347 }
348
349 /* Now get a reference to the descriptor. */
350 if (fdp->fd_refcnt == 1) {
351 /*
352 * Single threaded: don't need to worry about concurrent
353 * access (other than earlier calls to kqueue, which may
354 * hold a reference to the descriptor).
355 */
356 ff->ff_refcnt++;
357 } else {
358 /*
359 * Issue a memory barrier to ensure that we acquire the file
360 * pointer _after_ adding a reference. If no memory
361 * barrier, we could fetch a stale pointer.
362 */
363 atomic_inc_uint(&ff->ff_refcnt);
364 #ifndef __HAVE_ATOMIC_AS_MEMBAR
365 membar_enter();
366 #endif
367 }
368
369 /*
370 * If the file is not open or is being closed then put the
371 * reference back.
372 */
373 fp = ff->ff_file;
374 if (__predict_true(fp != NULL)) {
375 return fp;
376 }
377 fd_putfile(fd);
378 return NULL;
379 }
380
381 /*
382 * Release a reference to a file descriptor acquired with fd_getfile().
383 */
384 void
385 fd_putfile(unsigned fd)
386 {
387 filedesc_t *fdp;
388 fdfile_t *ff;
389 u_int u, v;
390
391 fdp = curlwp->l_fd;
392 ff = fdp->fd_ofiles[fd];
393
394 KASSERT(fd < fdp->fd_nfiles);
395 KASSERT(ff != NULL);
396 KASSERT((ff->ff_refcnt & FR_MASK) > 0);
397 KASSERT(fd >= NDFDFILE || ff == (fdfile_t *)fdp->fd_dfdfile[fd]);
398
399 if (fdp->fd_refcnt == 1) {
400 /*
401 * Single threaded: don't need to worry about concurrent
402 * access (other than earlier calls to kqueue, which may
403 * hold a reference to the descriptor).
404 */
405 if (__predict_false((ff->ff_refcnt & FR_CLOSING) != 0)) {
406 fd_close(fd);
407 return;
408 }
409 ff->ff_refcnt--;
410 return;
411 }
412
413 /*
414 * Ensure that any use of the file is complete and globally
415 * visible before dropping the final reference. If no membar,
416 * the current CPU could still access memory associated with
417 * the file after it has been freed or recycled by another
418 * CPU.
419 */
420 #ifndef __HAVE_ATOMIC_AS_MEMBAR
421 membar_exit();
422 #endif
423
424 /*
425 * Be optimistic and start out with the assumption that no other
426 * threads are trying to close the descriptor. If the CAS fails,
427 * we lost a race and/or it's being closed.
428 */
429 for (u = ff->ff_refcnt & FR_MASK;; u = v) {
430 v = atomic_cas_uint(&ff->ff_refcnt, u, u - 1);
431 if (__predict_true(u == v)) {
432 return;
433 }
434 if (__predict_false((v & FR_CLOSING) != 0)) {
435 break;
436 }
437 }
438
439 /* Another thread is waiting to close the file: join it. */
440 (void)fd_close(fd);
441 }
442
443 /*
444 * Convenience wrapper around fd_getfile() that returns reference
445 * to a vnode.
446 */
447 int
448 fd_getvnode(unsigned fd, file_t **fpp)
449 {
450 vnode_t *vp;
451 file_t *fp;
452
453 fp = fd_getfile(fd);
454 if (__predict_false(fp == NULL)) {
455 return EBADF;
456 }
457 if (__predict_false(fp->f_type != DTYPE_VNODE)) {
458 fd_putfile(fd);
459 return EINVAL;
460 }
461 vp = fp->f_data;
462 if (__predict_false(vp->v_type == VBAD)) {
463 /* XXX Is this case really necessary? */
464 fd_putfile(fd);
465 return EBADF;
466 }
467 *fpp = fp;
468 return 0;
469 }
470
471 /*
472 * Convenience wrapper around fd_getfile() that returns reference
473 * to a socket.
474 */
475 int
476 fd_getsock(unsigned fd, struct socket **sop)
477 {
478 file_t *fp;
479
480 fp = fd_getfile(fd);
481 if (__predict_false(fp == NULL)) {
482 return EBADF;
483 }
484 if (__predict_false(fp->f_type != DTYPE_SOCKET)) {
485 fd_putfile(fd);
486 return ENOTSOCK;
487 }
488 *sop = fp->f_data;
489 return 0;
490 }
491
492 /*
493 * Look up the file structure corresponding to a file descriptor
494 * and return it with a reference held on the file, not the
495 * descriptor.
496 *
497 * This is heavyweight and only used when accessing descriptors
498 * from a foreign process. The caller must ensure that `p' does
499 * not exit or fork across this call.
500 *
501 * To release the file (not descriptor) reference, use closef().
502 */
503 file_t *
504 fd_getfile2(proc_t *p, unsigned fd)
505 {
506 filedesc_t *fdp;
507 fdfile_t *ff;
508 file_t *fp;
509
510 fdp = p->p_fd;
511 mutex_enter(&fdp->fd_lock);
512 if (fd > fdp->fd_nfiles) {
513 mutex_exit(&fdp->fd_lock);
514 return NULL;
515 }
516 if ((ff = fdp->fd_ofiles[fd]) == NULL) {
517 mutex_exit(&fdp->fd_lock);
518 return NULL;
519 }
520 mutex_enter(&ff->ff_lock);
521 if ((fp = ff->ff_file) == NULL) {
522 mutex_exit(&ff->ff_lock);
523 mutex_exit(&fdp->fd_lock);
524 return NULL;
525 }
526 mutex_enter(&fp->f_lock);
527 fp->f_count++;
528 mutex_exit(&fp->f_lock);
529 mutex_exit(&ff->ff_lock);
530 mutex_exit(&fdp->fd_lock);
531
532 return fp;
533 }
534
535 /*
536 * Internal form of close. Must be called with a reference to the
537 * descriptor, and will drop the reference. When all descriptor
538 * references are dropped, releases the descriptor slot and a single
539 * reference to the file structure.
540 */
541 int
542 fd_close(unsigned fd)
543 {
544 struct flock lf;
545 filedesc_t *fdp;
546 fdfile_t *ff;
547 file_t *fp;
548 proc_t *p;
549 lwp_t *l;
550
551 l = curlwp;
552 p = l->l_proc;
553 fdp = l->l_fd;
554 ff = fdp->fd_ofiles[fd];
555
556 KASSERT(fd >= NDFDFILE || ff == (fdfile_t *)fdp->fd_dfdfile[fd]);
557
558 mutex_enter(&ff->ff_lock);
559 KASSERT((ff->ff_refcnt & FR_MASK) > 0);
560 if (ff->ff_file == NULL) {
561 /*
562 * Another user of the file is already closing, and is
563 * waiting for other users of the file to drain. Release
564 * our reference, and wake up the closer.
565 */
566 atomic_dec_uint(&ff->ff_refcnt);
567 cv_broadcast(&ff->ff_closing);
568 mutex_exit(&ff->ff_lock);
569
570 /*
571 * An application error, so pretend that the descriptor
572 * was already closed. We can't safely wait for it to
573 * be closed without potentially deadlocking.
574 */
575 return (EBADF);
576 }
577 KASSERT((ff->ff_refcnt & FR_CLOSING) == 0);
578
579 /*
580 * There may be multiple users of this file within the process.
581 * Notify existing and new users that the file is closing. This
582 * will prevent them from adding additional uses to this file
583 * while we are closing it.
584 */
585 fp = ff->ff_file;
586 ff->ff_file = NULL;
587 ff->ff_exclose = false;
588
589 /*
590 * We expect the caller to hold a descriptor reference - drop it.
591 * The reference count may increase beyond zero at this point due
592 * to an erroneous descriptor reference by an application, but
593 * fd_getfile() will notice that the file is being closed and drop
594 * the reference again.
595 */
596 #ifndef __HAVE_ATOMIC_AS_MEMBAR
597 membar_producer();
598 #endif
599 if (__predict_false(atomic_dec_uint_nv(&ff->ff_refcnt) != 0)) {
600 /*
601 * Wait for other references to drain. This is typically
602 * an application error - the descriptor is being closed
603 * while still in use.
604 *
605 */
606 atomic_or_uint(&ff->ff_refcnt, FR_CLOSING);
607
608 /*
609 * Remove any knotes attached to the file. A knote
610 * attached to the descriptor can hold references on it.
611 */
612 mutex_exit(&ff->ff_lock);
613 if (!SLIST_EMPTY(&ff->ff_knlist)) {
614 knote_fdclose(fd);
615 }
616
617 /* Try to drain out descriptor references. */
618 (*fp->f_ops->fo_drain)(fp);
619 mutex_enter(&ff->ff_lock);
620
621 /*
622 * We need to see the count drop to zero at least once,
623 * in order to ensure that all pre-existing references
624 * have been drained. New references past this point are
625 * of no interest.
626 */
627 while ((ff->ff_refcnt & FR_MASK) != 0) {
628 cv_wait(&ff->ff_closing, &ff->ff_lock);
629 }
630 atomic_and_uint(&ff->ff_refcnt, ~FR_CLOSING);
631 } else {
632 /* If no references, there must be no knotes. */
633 KASSERT(SLIST_EMPTY(&ff->ff_knlist));
634 }
635 mutex_exit(&ff->ff_lock);
636
637 /*
638 * POSIX record locking dictates that any close releases ALL
639 * locks owned by this process. This is handled by setting
640 * a flag in the unlock to free ONLY locks obeying POSIX
641 * semantics, and not to free BSD-style file locks.
642 * If the descriptor was in a message, POSIX-style locks
643 * aren't passed with the descriptor.
644 */
645 if ((p->p_flag & PK_ADVLOCK) != 0 && fp->f_type == DTYPE_VNODE) {
646 lf.l_whence = SEEK_SET;
647 lf.l_start = 0;
648 lf.l_len = 0;
649 lf.l_type = F_UNLCK;
650 (void)VOP_ADVLOCK(fp->f_data, p, F_UNLCK, &lf, F_POSIX);
651 }
652
653
654 /* Free descriptor slot. */
655 mutex_enter(&fdp->fd_lock);
656 fd_unused(fdp, fd);
657 mutex_exit(&fdp->fd_lock);
658
659 /* Now drop reference to the file itself. */
660 return closef(fp);
661 }
662
663 /*
664 * Duplicate a file descriptor.
665 */
666 int
667 fd_dup(file_t *fp, int minfd, int *newp, bool exclose)
668 {
669 proc_t *p;
670 int error;
671
672 p = curproc;
673
674 while ((error = fd_alloc(p, minfd, newp)) != 0) {
675 if (error != ENOSPC) {
676 return error;
677 }
678 fd_tryexpand(p);
679 }
680
681 curlwp->l_fd->fd_ofiles[*newp]->ff_exclose = exclose;
682 fd_affix(p, fp, *newp);
683 return 0;
684 }
685
686 /*
687 * dup2 operation.
688 */
689 int
690 fd_dup2(file_t *fp, unsigned new)
691 {
692 filedesc_t *fdp;
693 fdfile_t *ff;
694
695 fdp = curlwp->l_fd;
696
697 /*
698 * Ensure there are enough slots in the descriptor table,
699 * and allocate an fdfile_t up front in case we need it.
700 */
701 while (new >= fdp->fd_nfiles) {
702 fd_tryexpand(curproc);
703 }
704 ff = pool_cache_get(fdfile_cache, PR_WAITOK);
705
706 /*
707 * If there is already a file open, close it. If the file is
708 * half open, wait for it to be constructed before closing it.
709 * XXX Potential for deadlock here?
710 */
711 mutex_enter(&fdp->fd_lock);
712 while (fd_isused(fdp, new)) {
713 mutex_exit(&fdp->fd_lock);
714 if (fd_getfile(new) != NULL) {
715 (void)fd_close(new);
716 } else {
717 /* XXX Crummy, but unlikely to happen. */
718 kpause("dup2", false, 1, NULL);
719 }
720 mutex_enter(&fdp->fd_lock);
721 }
722 if (fdp->fd_ofiles[new] == NULL) {
723 KASSERT(new >= NDFDFILE);
724 fdp->fd_ofiles[new] = ff;
725 ff = NULL;
726 }
727 fd_used(fdp, new);
728 mutex_exit(&fdp->fd_lock);
729
730 /* Slot is now allocated. Insert copy of the file. */
731 fd_affix(curproc, fp, new);
732 if (ff != NULL) {
733 pool_cache_put(fdfile_cache, ff);
734 }
735 return 0;
736 }
737
738 /*
739 * Drop reference to a file structure.
740 */
741 int
742 closef(file_t *fp)
743 {
744 struct flock lf;
745 int error;
746
747 /*
748 * Drop reference. If referenced elsewhere it's still open
749 * and we have nothing more to do.
750 */
751 mutex_enter(&fp->f_lock);
752 KASSERT(fp->f_count > 0);
753 if (--fp->f_count > 0) {
754 mutex_exit(&fp->f_lock);
755 return 0;
756 }
757 KASSERT(fp->f_count == 0);
758 mutex_exit(&fp->f_lock);
759
760 /* We held the last reference - release locks, close and free. */
761 if ((fp->f_flag & FHASLOCK) && fp->f_type == DTYPE_VNODE) {
762 lf.l_whence = SEEK_SET;
763 lf.l_start = 0;
764 lf.l_len = 0;
765 lf.l_type = F_UNLCK;
766 (void)VOP_ADVLOCK(fp->f_data, fp, F_UNLCK, &lf, F_FLOCK);
767 }
768 if (fp->f_ops != NULL) {
769 error = (*fp->f_ops->fo_close)(fp);
770 } else {
771 error = 0;
772 }
773 KASSERT(fp->f_count == 0);
774 KASSERT(fp->f_cred != NULL);
775 pool_cache_put(file_cache, fp);
776
777 return error;
778 }
779
780 /*
781 * Allocate a file descriptor for the process.
782 */
783 int
784 fd_alloc(proc_t *p, int want, int *result)
785 {
786 filedesc_t *fdp;
787 int i, lim, last, error;
788 u_int off, new;
789
790 KASSERT(p == curproc || p == &proc0);
791
792 fdp = p->p_fd;
793
794 /*
795 * Search for a free descriptor starting at the higher
796 * of want or fd_freefile.
797 */
798 mutex_enter(&fdp->fd_lock);
799 KASSERT(fdp->fd_ofiles[0] == (fdfile_t *)fdp->fd_dfdfile[0]);
800 lim = min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfiles);
801 last = min(fdp->fd_nfiles, lim);
802 for (;;) {
803 if ((i = want) < fdp->fd_freefile)
804 i = fdp->fd_freefile;
805 off = i >> NDENTRYSHIFT;
806 new = fd_next_zero(fdp, fdp->fd_himap, off,
807 (last + NDENTRIES - 1) >> NDENTRYSHIFT);
808 if (new == -1)
809 break;
810 i = fd_next_zero(fdp, &fdp->fd_lomap[new],
811 new > off ? 0 : i & NDENTRYMASK, NDENTRIES);
812 if (i == -1) {
813 /*
814 * Free file descriptor in this block was
815 * below want, try again with higher want.
816 */
817 want = (new + 1) << NDENTRYSHIFT;
818 continue;
819 }
820 i += (new << NDENTRYSHIFT);
821 if (i >= last) {
822 break;
823 }
824 if (fdp->fd_ofiles[i] == NULL) {
825 KASSERT(i >= NDFDFILE);
826 fdp->fd_ofiles[i] =
827 pool_cache_get(fdfile_cache, PR_WAITOK);
828 }
829 KASSERT(fdp->fd_ofiles[i]->ff_refcnt == 0);
830 KASSERT(fdp->fd_ofiles[i]->ff_file == NULL);
831 fd_used(fdp, i);
832 if (want <= fdp->fd_freefile) {
833 fdp->fd_freefile = i;
834 }
835 *result = i;
836 mutex_exit(&fdp->fd_lock);
837 KASSERT(i >= NDFDFILE ||
838 fdp->fd_ofiles[i] == (fdfile_t *)fdp->fd_dfdfile[i]);
839 return 0;
840 }
841
842 /* No space in current array. Let the caller expand and retry. */
843 error = (fdp->fd_nfiles >= lim) ? EMFILE : ENOSPC;
844 mutex_exit(&fdp->fd_lock);
845 return error;
846 }
847
848 /*
849 * Allocate memory for the open files array.
850 */
851 static fdfile_t **
852 fd_ofile_alloc(int n)
853 {
854 uintptr_t *ptr, sz;
855
856 KASSERT(n > NDFILE);
857
858 sz = (n + 2) * sizeof(uintptr_t);
859 ptr = kmem_alloc((size_t)sz, KM_SLEEP);
860 ptr[1] = sz;
861
862 return (fdfile_t **)(ptr + 2);
863 }
864
865 /*
866 * Free an open files array.
867 */
868 static void
869 fd_ofile_free(int n, fdfile_t **of)
870 {
871 uintptr_t *ptr, sz;
872
873 KASSERT(n > NDFILE);
874
875 sz = (n + 2) * sizeof(uintptr_t);
876 ptr = (uintptr_t *)of - 2;
877 KASSERT(ptr[1] == sz);
878 kmem_free(ptr, sz);
879 }
880
881 /*
882 * Allocate descriptor bitmap.
883 */
884 static void
885 fd_map_alloc(int n, uint32_t **lo, uint32_t **hi)
886 {
887 uint8_t *ptr;
888 size_t szlo, szhi;
889
890 KASSERT(n > NDENTRIES);
891
892 szlo = NDLOSLOTS(n) * sizeof(uint32_t);
893 szhi = NDHISLOTS(n) * sizeof(uint32_t);
894 ptr = kmem_alloc(szlo + szhi, KM_SLEEP);
895 *lo = (uint32_t *)ptr;
896 *hi = (uint32_t *)(ptr + szlo);
897 }
898
899 /*
900 * Free descriptor bitmap.
901 */
902 static void
903 fd_map_free(int n, uint32_t *lo, uint32_t *hi)
904 {
905 size_t szlo, szhi;
906
907 KASSERT(n > NDENTRIES);
908
909 szlo = NDLOSLOTS(n) * sizeof(uint32_t);
910 szhi = NDHISLOTS(n) * sizeof(uint32_t);
911 KASSERT(hi == (uint32_t *)((uint8_t *)lo + szlo));
912 kmem_free(lo, szlo + szhi);
913 }
914
915 /*
916 * Expand a process' descriptor table.
917 */
918 void
919 fd_tryexpand(proc_t *p)
920 {
921 filedesc_t *fdp;
922 int i, numfiles, oldnfiles;
923 fdfile_t **newofile;
924 uint32_t *newhimap, *newlomap;
925
926 KASSERT(p == curproc || p == &proc0);
927
928 fdp = p->p_fd;
929 newhimap = NULL;
930 newlomap = NULL;
931 oldnfiles = fdp->fd_nfiles;
932
933 if (oldnfiles < NDEXTENT)
934 numfiles = NDEXTENT;
935 else
936 numfiles = 2 * oldnfiles;
937
938 newofile = fd_ofile_alloc(numfiles);
939 if (NDHISLOTS(numfiles) > NDHISLOTS(oldnfiles)) {
940 fd_map_alloc(numfiles, &newlomap, &newhimap);
941 }
942
943 mutex_enter(&fdp->fd_lock);
944 KASSERT(fdp->fd_ofiles[0] == (fdfile_t *)fdp->fd_dfdfile[0]);
945 if (fdp->fd_nfiles != oldnfiles) {
946 /* fdp changed; caller must retry */
947 mutex_exit(&fdp->fd_lock);
948 fd_ofile_free(numfiles, newofile);
949 if (NDHISLOTS(numfiles) > NDHISLOTS(oldnfiles)) {
950 fd_map_free(numfiles, newlomap, newhimap);
951 }
952 return;
953 }
954
955 /* Copy the existing ofile array and zero the new portion. */
956 i = sizeof(fdfile_t *) * fdp->fd_nfiles;
957 memcpy(newofile, fdp->fd_ofiles, i);
958 memset((uint8_t *)newofile + i, 0, numfiles * sizeof(fdfile_t *) - i);
959
960 /*
961 * Link old ofiles array into list to be discarded. We defer
962 * freeing until process exit if the descriptor table is visble
963 * to other threads.
964 */
965 if (oldnfiles > NDFILE) {
966 if (fdp->fd_refcnt > 1) {
967 fdp->fd_ofiles[-2] = (void *)fdp->fd_discard;
968 fdp->fd_discard = fdp->fd_ofiles - 2;
969 } else {
970 fd_ofile_free(oldnfiles, fdp->fd_ofiles);
971 }
972 }
973
974 if (NDHISLOTS(numfiles) > NDHISLOTS(oldnfiles)) {
975 i = NDHISLOTS(oldnfiles) * sizeof(uint32_t);
976 memcpy(newhimap, fdp->fd_himap, i);
977 memset((uint8_t *)newhimap + i, 0,
978 NDHISLOTS(numfiles) * sizeof(uint32_t) - i);
979
980 i = NDLOSLOTS(oldnfiles) * sizeof(uint32_t);
981 memcpy(newlomap, fdp->fd_lomap, i);
982 memset((uint8_t *)newlomap + i, 0,
983 NDLOSLOTS(numfiles) * sizeof(uint32_t) - i);
984
985 if (NDHISLOTS(oldnfiles) > NDHISLOTS(NDFILE)) {
986 fd_map_free(oldnfiles, fdp->fd_lomap, fdp->fd_himap);
987 }
988 fdp->fd_himap = newhimap;
989 fdp->fd_lomap = newlomap;
990 }
991
992 /*
993 * All other modifications must become globally visible before
994 * the change to fd_nfiles. See fd_getfile().
995 */
996 fdp->fd_ofiles = newofile;
997 membar_producer();
998 fdp->fd_nfiles = numfiles;
999 mutex_exit(&fdp->fd_lock);
1000
1001 KASSERT(fdp->fd_ofiles[0] == (fdfile_t *)fdp->fd_dfdfile[0]);
1002 }
1003
1004 /*
1005 * Create a new open file structure and allocate a file descriptor
1006 * for the current process.
1007 */
1008 int
1009 fd_allocfile(file_t **resultfp, int *resultfd)
1010 {
1011 kauth_cred_t cred;
1012 file_t *fp;
1013 proc_t *p;
1014 int error;
1015
1016 p = curproc;
1017
1018 while ((error = fd_alloc(p, 0, resultfd)) != 0) {
1019 if (error != ENOSPC) {
1020 return error;
1021 }
1022 fd_tryexpand(p);
1023 }
1024
1025 fp = pool_cache_get(file_cache, PR_WAITOK);
1026 if (fp == NULL) {
1027 return ENFILE;
1028 }
1029 KASSERT(fp->f_count == 0);
1030 KASSERT(fp->f_msgcount == 0);
1031 KASSERT(fp->f_unpcount == 0);
1032
1033 /* Replace cached credentials if not what we need. */
1034 cred = curlwp->l_cred;
1035 if (__predict_false(cred != fp->f_cred)) {
1036 kauth_cred_free(fp->f_cred);
1037 kauth_cred_hold(cred);
1038 fp->f_cred = cred;
1039 }
1040
1041 /*
1042 * Don't allow recycled files to be scanned.
1043 * See uipc_usrreq.c.
1044 */
1045 if (__predict_false((fp->f_flag & FSCAN) != 0)) {
1046 mutex_enter(&fp->f_lock);
1047 atomic_and_uint(&fp->f_flag, ~FSCAN);
1048 mutex_exit(&fp->f_lock);
1049 }
1050
1051 fp->f_advice = 0;
1052 fp->f_offset = 0;
1053 *resultfp = fp;
1054
1055 return 0;
1056 }
1057
1058 /*
1059 * Successful creation of a new descriptor: make visible to the process.
1060 */
1061 void
1062 fd_affix(proc_t *p, file_t *fp, unsigned fd)
1063 {
1064 fdfile_t *ff;
1065 filedesc_t *fdp;
1066
1067 KASSERT(p == curproc || p == &proc0);
1068
1069 /* Add a reference to the file structure. */
1070 mutex_enter(&fp->f_lock);
1071 fp->f_count++;
1072 mutex_exit(&fp->f_lock);
1073
1074 /*
1075 * Insert the new file into the descriptor slot.
1076 *
1077 * The memory barriers provided by lock activity in this routine
1078 * ensure that any updates to the file structure become globally
1079 * visible before the file becomes visible to other LWPs in the
1080 * current process.
1081 */
1082 fdp = p->p_fd;
1083 ff = fdp->fd_ofiles[fd];
1084
1085 KASSERT(ff != NULL);
1086 KASSERT(ff->ff_file == NULL);
1087 KASSERT(ff->ff_allocated);
1088 KASSERT(fd_isused(fdp, fd));
1089 KASSERT(fd >= NDFDFILE ||
1090 fdp->fd_ofiles[fd] == (fdfile_t *)fdp->fd_dfdfile[fd]);
1091
1092 /* No need to lock in order to make file initially visible. */
1093 ff->ff_file = fp;
1094 }
1095
1096 /*
1097 * Abort creation of a new descriptor: free descriptor slot and file.
1098 */
1099 void
1100 fd_abort(proc_t *p, file_t *fp, unsigned fd)
1101 {
1102 filedesc_t *fdp;
1103 fdfile_t *ff;
1104
1105 KASSERT(p == curproc || p == &proc0);
1106
1107 fdp = p->p_fd;
1108 ff = fdp->fd_ofiles[fd];
1109
1110 KASSERT(fd >= NDFDFILE ||
1111 fdp->fd_ofiles[fd] == (fdfile_t *)fdp->fd_dfdfile[fd]);
1112
1113 mutex_enter(&fdp->fd_lock);
1114 KASSERT(fd_isused(fdp, fd));
1115 fd_unused(fdp, fd);
1116 mutex_exit(&fdp->fd_lock);
1117
1118 if (fp != NULL) {
1119 KASSERT(fp->f_count == 0);
1120 KASSERT(fp->f_cred != NULL);
1121 pool_cache_put(file_cache, fp);
1122 }
1123 }
1124
1125 static int
1126 file_ctor(void *arg, void *obj, int flags)
1127 {
1128 file_t *fp = obj;
1129
1130 memset(fp, 0, sizeof(*fp));
1131
1132 mutex_enter(&filelist_lock);
1133 if (__predict_false(nfiles >= maxfiles)) {
1134 mutex_exit(&filelist_lock);
1135 tablefull("file", "increase kern.maxfiles or MAXFILES");
1136 return ENFILE;
1137 }
1138 nfiles++;
1139 LIST_INSERT_HEAD(&filehead, fp, f_list);
1140 mutex_init(&fp->f_lock, MUTEX_DEFAULT, IPL_NONE);
1141 fp->f_cred = curlwp->l_cred;
1142 kauth_cred_hold(fp->f_cred);
1143 mutex_exit(&filelist_lock);
1144
1145 return 0;
1146 }
1147
1148 static void
1149 file_dtor(void *arg, void *obj)
1150 {
1151 file_t *fp = obj;
1152
1153 mutex_enter(&filelist_lock);
1154 nfiles--;
1155 LIST_REMOVE(fp, f_list);
1156 mutex_exit(&filelist_lock);
1157
1158 kauth_cred_free(fp->f_cred);
1159 mutex_destroy(&fp->f_lock);
1160 }
1161
1162 static int
1163 fdfile_ctor(void *arg, void *obj, int flags)
1164 {
1165 fdfile_t *ff = obj;
1166
1167 memset(ff, 0, sizeof(*ff));
1168 mutex_init(&ff->ff_lock, MUTEX_DEFAULT, IPL_NONE);
1169 cv_init(&ff->ff_closing, "fdclose");
1170
1171 return 0;
1172 }
1173
1174 static void
1175 fdfile_dtor(void *arg, void *obj)
1176 {
1177 fdfile_t *ff = obj;
1178
1179 mutex_destroy(&ff->ff_lock);
1180 cv_destroy(&ff->ff_closing);
1181 }
1182
1183 file_t *
1184 fgetdummy(void)
1185 {
1186 file_t *fp;
1187
1188 fp = kmem_alloc(sizeof(*fp), KM_SLEEP);
1189 if (fp != NULL) {
1190 memset(fp, 0, sizeof(*fp));
1191 mutex_init(&fp->f_lock, MUTEX_DEFAULT, IPL_NONE);
1192 }
1193 return fp;
1194 }
1195
1196 void
1197 fputdummy(file_t *fp)
1198 {
1199
1200 mutex_destroy(&fp->f_lock);
1201 kmem_free(fp, sizeof(*fp));
1202 }
1203
1204 /*
1205 * Create an initial filedesc structure.
1206 */
1207 filedesc_t *
1208 fd_init(filedesc_t *fdp)
1209 {
1210 unsigned fd;
1211
1212 if (fdp == NULL) {
1213 fdp = pool_cache_get(filedesc_cache, PR_WAITOK);
1214 } else {
1215 filedesc_ctor(NULL, fdp, PR_WAITOK);
1216 }
1217
1218 fdp->fd_refcnt = 1;
1219 fdp->fd_ofiles = fdp->fd_dfiles;
1220 fdp->fd_nfiles = NDFILE;
1221 fdp->fd_himap = fdp->fd_dhimap;
1222 fdp->fd_lomap = fdp->fd_dlomap;
1223 KASSERT(fdp->fd_lastfile == -1);
1224 KASSERT(fdp->fd_lastkqfile == -1);
1225 KASSERT(fdp->fd_knhash == NULL);
1226
1227 memset(&fdp->fd_startzero, 0, sizeof(*fdp) -
1228 offsetof(filedesc_t, fd_startzero));
1229 for (fd = 0; fd < NDFDFILE; fd++) {
1230 fdp->fd_ofiles[fd] = (fdfile_t *)fdp->fd_dfdfile[fd];
1231 }
1232
1233 return fdp;
1234 }
1235
1236 /*
1237 * Initialize a file descriptor table.
1238 */
1239 static int
1240 filedesc_ctor(void *arg, void *obj, int flag)
1241 {
1242 filedesc_t *fdp = obj;
1243 int i;
1244
1245 memset(fdp, 0, sizeof(*fdp));
1246 mutex_init(&fdp->fd_lock, MUTEX_DEFAULT, IPL_NONE);
1247 fdp->fd_lastfile = -1;
1248 fdp->fd_lastkqfile = -1;
1249
1250 CTASSERT(sizeof(fdp->fd_dfdfile[0]) >= sizeof(fdfile_t));
1251 for (i = 0; i < NDFDFILE; i++) {
1252 fdfile_ctor(NULL, fdp->fd_dfdfile[i], PR_WAITOK);
1253 }
1254
1255 return 0;
1256 }
1257
1258 static void
1259 filedesc_dtor(void *arg, void *obj)
1260 {
1261 filedesc_t *fdp = obj;
1262 int i;
1263
1264 for (i = 0; i < NDFDFILE; i++) {
1265 fdfile_dtor(NULL, fdp->fd_dfdfile[i]);
1266 }
1267
1268 mutex_destroy(&fdp->fd_lock);
1269 }
1270
1271 /*
1272 * Make p2 share p1's filedesc structure.
1273 */
1274 void
1275 fd_share(struct proc *p2)
1276 {
1277 filedesc_t *fdp;
1278
1279 fdp = curlwp->l_fd;
1280 p2->p_fd = fdp;
1281 atomic_inc_uint(&fdp->fd_refcnt);
1282 }
1283
1284 /*
1285 * Acquire a hold on a filedesc structure.
1286 */
1287 void
1288 fd_hold(void)
1289 {
1290
1291 atomic_inc_uint(&curlwp->l_fd->fd_refcnt);
1292 }
1293
1294 /*
1295 * Copy a filedesc structure.
1296 */
1297 filedesc_t *
1298 fd_copy(void)
1299 {
1300 filedesc_t *newfdp, *fdp;
1301 fdfile_t *ff, *fflist, **ffp, **nffp, *ff2;
1302 int i, nused, numfiles, lastfile, j, newlast;
1303 file_t *fp;
1304
1305 fdp = curproc->p_fd;
1306 newfdp = pool_cache_get(filedesc_cache, PR_WAITOK);
1307 newfdp->fd_refcnt = 1;
1308
1309 KASSERT(newfdp->fd_knhash == NULL);
1310 KASSERT(newfdp->fd_knhashmask == 0);
1311 KASSERT(newfdp->fd_discard == NULL);
1312
1313 for (;;) {
1314 numfiles = fdp->fd_nfiles;
1315 lastfile = fdp->fd_lastfile;
1316
1317 /*
1318 * If the number of open files fits in the internal arrays
1319 * of the open file structure, use them, otherwise allocate
1320 * additional memory for the number of descriptors currently
1321 * in use.
1322 */
1323 if (lastfile < NDFILE) {
1324 i = NDFILE;
1325 newfdp->fd_ofiles = newfdp->fd_dfiles;
1326 } else {
1327 /*
1328 * Compute the smallest multiple of NDEXTENT needed
1329 * for the file descriptors currently in use,
1330 * allowing the table to shrink.
1331 */
1332 i = numfiles;
1333 while (i >= 2 * NDEXTENT && i > lastfile * 2) {
1334 i /= 2;
1335 }
1336 newfdp->fd_ofiles = fd_ofile_alloc(i);
1337 KASSERT(i > NDFILE);
1338 }
1339 if (NDHISLOTS(i) <= NDHISLOTS(NDFILE)) {
1340 newfdp->fd_himap = newfdp->fd_dhimap;
1341 newfdp->fd_lomap = newfdp->fd_dlomap;
1342 } else {
1343 fd_map_alloc(i, &newfdp->fd_lomap,
1344 &newfdp->fd_himap);
1345 }
1346
1347 /*
1348 * Allocate and string together fdfile structures.
1349 * We abuse fdfile_t::ff_file here, but it will be
1350 * cleared before this routine returns.
1351 */
1352 nused = fdp->fd_nused;
1353 fflist = NULL;
1354 for (j = nused; j != 0; j--) {
1355 ff = pool_cache_get(fdfile_cache, PR_WAITOK);
1356 ff->ff_file = (void *)fflist;
1357 fflist = ff;
1358 }
1359
1360 mutex_enter(&fdp->fd_lock);
1361 if (numfiles == fdp->fd_nfiles && nused == fdp->fd_nused &&
1362 lastfile == fdp->fd_lastfile) {
1363 break;
1364 }
1365 mutex_exit(&fdp->fd_lock);
1366 if (i > NDFILE) {
1367 fd_ofile_free(i, newfdp->fd_ofiles);
1368 }
1369 if (NDHISLOTS(i) > NDHISLOTS(NDFILE)) {
1370 fd_map_free(i, newfdp->fd_lomap, newfdp->fd_himap);
1371 }
1372 while (fflist != NULL) {
1373 ff = fflist;
1374 fflist = (void *)ff->ff_file;
1375 ff->ff_file = NULL;
1376 pool_cache_put(fdfile_cache, ff);
1377 }
1378 }
1379
1380 newfdp->fd_nfiles = i;
1381 newfdp->fd_freefile = fdp->fd_freefile;
1382 newfdp->fd_exclose = fdp->fd_exclose;
1383
1384 /*
1385 * Clear the entries that will not be copied over.
1386 * Avoid calling memset with 0 size.
1387 */
1388 if (lastfile < (i-1)) {
1389 memset(newfdp->fd_ofiles + lastfile + 1, 0,
1390 (i - lastfile - 1) * sizeof(file_t **));
1391 }
1392 if (i < NDENTRIES * NDENTRIES) {
1393 i = NDENTRIES * NDENTRIES; /* size of inlined bitmaps */
1394 }
1395 memcpy(newfdp->fd_himap, fdp->fd_himap, NDHISLOTS(i)*sizeof(uint32_t));
1396 memcpy(newfdp->fd_lomap, fdp->fd_lomap, NDLOSLOTS(i)*sizeof(uint32_t));
1397
1398 ffp = fdp->fd_ofiles;
1399 nffp = newfdp->fd_ofiles;
1400 j = imax(lastfile, (NDFDFILE - 1));
1401 newlast = -1;
1402 KASSERT(j < fdp->fd_nfiles);
1403 for (i = 0; i <= j; i++, ffp++, *nffp++ = ff2) {
1404 ff = *ffp;
1405 /* Install built-in fdfiles even if unused here. */
1406 if (i < NDFDFILE) {
1407 ff2 = (fdfile_t *)newfdp->fd_dfdfile[i];
1408 } else {
1409 ff2 = NULL;
1410 }
1411 /* Determine if descriptor is active in parent. */
1412 if (ff == NULL || !fd_isused(fdp, i)) {
1413 KASSERT(ff != NULL || i >= NDFDFILE);
1414 continue;
1415 }
1416 mutex_enter(&ff->ff_lock);
1417 fp = ff->ff_file;
1418 if (fp == NULL) {
1419 /* Descriptor is half-open: free slot. */
1420 fd_zap(newfdp, i);
1421 mutex_exit(&ff->ff_lock);
1422 continue;
1423 }
1424 if (fp->f_type == DTYPE_KQUEUE) {
1425 /* kqueue descriptors cannot be copied. */
1426 fd_zap(newfdp, i);
1427 mutex_exit(&ff->ff_lock);
1428 continue;
1429 }
1430 /* It's active: add a reference to the file. */
1431 mutex_enter(&fp->f_lock);
1432 fp->f_count++;
1433 mutex_exit(&fp->f_lock);
1434 /* Consume one fdfile_t to represent it. */
1435 if (i >= NDFDFILE) {
1436 ff2 = fflist;
1437 fflist = (void *)ff2->ff_file;
1438 }
1439 ff2->ff_file = fp;
1440 ff2->ff_exclose = ff->ff_exclose;
1441 ff2->ff_allocated = true;
1442 mutex_exit(&ff->ff_lock);
1443 if (i > newlast) {
1444 newlast = i;
1445 }
1446 }
1447 mutex_exit(&fdp->fd_lock);
1448
1449 /* Discard unused fdfile_t structures. */
1450 while (__predict_false(fflist != NULL)) {
1451 ff = fflist;
1452 fflist = (void *)ff->ff_file;
1453 ff->ff_file = NULL;
1454 pool_cache_put(fdfile_cache, ff);
1455 nused--;
1456 }
1457 KASSERT(nused >= 0);
1458 KASSERT(newfdp->fd_ofiles[0] == (fdfile_t *)newfdp->fd_dfdfile[0]);
1459
1460 newfdp->fd_nused = nused;
1461 newfdp->fd_lastfile = newlast;
1462
1463 return (newfdp);
1464 }
1465
1466 /*
1467 * Release a filedesc structure.
1468 */
1469 void
1470 fd_free(void)
1471 {
1472 filedesc_t *fdp;
1473 fdfile_t *ff;
1474 file_t *fp;
1475 int fd, lastfd;
1476 void **discard;
1477
1478 fdp = curlwp->l_fd;
1479
1480 KASSERT(fdp->fd_ofiles[0] == (fdfile_t *)fdp->fd_dfdfile[0]);
1481
1482 if (atomic_dec_uint_nv(&fdp->fd_refcnt) > 0)
1483 return;
1484
1485 /*
1486 * Close any files that the process holds open.
1487 */
1488 for (fd = 0, lastfd = fdp->fd_nfiles - 1; fd <= lastfd; fd++) {
1489 ff = fdp->fd_ofiles[fd];
1490 KASSERT(fd >= NDFDFILE ||
1491 ff == (fdfile_t *)fdp->fd_dfdfile[fd]);
1492 if ((ff = fdp->fd_ofiles[fd]) == NULL)
1493 continue;
1494 if ((fp = ff->ff_file) != NULL) {
1495 /*
1496 * Must use fd_close() here as kqueue holds
1497 * long term references to descriptors.
1498 */
1499 ff->ff_refcnt++;
1500 fd_close(fd);
1501 }
1502 KASSERT(ff->ff_refcnt == 0);
1503 KASSERT(ff->ff_file == NULL);
1504 KASSERT(!ff->ff_exclose);
1505 KASSERT(!ff->ff_allocated);
1506 if (fd >= NDFDFILE) {
1507 pool_cache_put(fdfile_cache, ff);
1508 }
1509 }
1510
1511 /*
1512 * Clean out the descriptor table for the next user and return
1513 * to the cache.
1514 */
1515 while ((discard = fdp->fd_discard) != NULL) {
1516 fdp->fd_discard = discard[0];
1517 kmem_free(discard, (uintptr_t)discard[1]);
1518 }
1519 if (NDHISLOTS(fdp->fd_nfiles) > NDHISLOTS(NDFILE)) {
1520 KASSERT(fdp->fd_himap != fdp->fd_dhimap);
1521 KASSERT(fdp->fd_lomap != fdp->fd_dlomap);
1522 fd_map_free(fdp->fd_nfiles, fdp->fd_lomap, fdp->fd_himap);
1523 }
1524 if (fdp->fd_nfiles > NDFILE) {
1525 KASSERT(fdp->fd_ofiles != fdp->fd_dfiles);
1526 fd_ofile_free(fdp->fd_nfiles, fdp->fd_ofiles);
1527 }
1528 if (fdp->fd_knhash != NULL) {
1529 hashdone(fdp->fd_knhash, HASH_LIST, fdp->fd_knhashmask);
1530 fdp->fd_knhash = NULL;
1531 fdp->fd_knhashmask = 0;
1532 } else {
1533 KASSERT(fdp->fd_knhashmask == 0);
1534 }
1535 fdp->fd_lastkqfile = -1;
1536 pool_cache_put(filedesc_cache, fdp);
1537 }
1538
1539 /*
1540 * File Descriptor pseudo-device driver (/dev/fd/).
1541 *
1542 * Opening minor device N dup()s the file (if any) connected to file
1543 * descriptor N belonging to the calling process. Note that this driver
1544 * consists of only the ``open()'' routine, because all subsequent
1545 * references to this file will be direct to the other driver.
1546 */
1547 static int
1548 filedescopen(dev_t dev, int mode, int type, lwp_t *l)
1549 {
1550
1551 /*
1552 * XXX Kludge: set dupfd to contain the value of the
1553 * the file descriptor being sought for duplication. The error
1554 * return ensures that the vnode for this device will be released
1555 * by vn_open. Open will detect this special error and take the
1556 * actions in dupfdopen below. Other callers of vn_open or VOP_OPEN
1557 * will simply report the error.
1558 */
1559 l->l_dupfd = minor(dev); /* XXX */
1560 return EDUPFD;
1561 }
1562
1563 /*
1564 * Duplicate the specified descriptor to a free descriptor.
1565 */
1566 int
1567 fd_dupopen(int old, int *new, int mode, int error)
1568 {
1569 filedesc_t *fdp;
1570 fdfile_t *ff;
1571 file_t *fp;
1572
1573 if ((fp = fd_getfile(old)) == NULL) {
1574 return EBADF;
1575 }
1576 fdp = curlwp->l_fd;
1577 ff = fdp->fd_ofiles[old];
1578
1579 /*
1580 * There are two cases of interest here.
1581 *
1582 * For EDUPFD simply dup (dfd) to file descriptor
1583 * (indx) and return.
1584 *
1585 * For EMOVEFD steal away the file structure from (dfd) and
1586 * store it in (indx). (dfd) is effectively closed by
1587 * this operation.
1588 *
1589 * Any other error code is just returned.
1590 */
1591 switch (error) {
1592 case EDUPFD:
1593 /*
1594 * Check that the mode the file is being opened for is a
1595 * subset of the mode of the existing descriptor.
1596 */
1597 if (((mode & (FREAD|FWRITE)) | fp->f_flag) != fp->f_flag) {
1598 error = EACCES;
1599 break;
1600 }
1601
1602 /* Copy it. */
1603 error = fd_dup(fp, 0, new, fdp->fd_ofiles[old]->ff_exclose);
1604 break;
1605
1606 case EMOVEFD:
1607 /* Copy it. */
1608 error = fd_dup(fp, 0, new, fdp->fd_ofiles[old]->ff_exclose);
1609 if (error != 0) {
1610 break;
1611 }
1612
1613 /* Steal away the file pointer from 'old'. */
1614 (void)fd_close(old);
1615 return 0;
1616 }
1617
1618 fd_putfile(old);
1619 return error;
1620 }
1621
1622 /*
1623 * Sets descriptor owner. If the owner is a process, 'pgid'
1624 * is set to positive value, process ID. If the owner is process group,
1625 * 'pgid' is set to -pg_id.
1626 */
1627 int
1628 fsetown(pid_t *pgid, u_long cmd, const void *data)
1629 {
1630 int id = *(const int *)data;
1631 int error;
1632
1633 switch (cmd) {
1634 case TIOCSPGRP:
1635 if (id < 0)
1636 return (EINVAL);
1637 id = -id;
1638 break;
1639 default:
1640 break;
1641 }
1642
1643 if (id > 0 && !pfind(id))
1644 return (ESRCH);
1645 else if (id < 0 && (error = pgid_in_session(curproc, -id)))
1646 return (error);
1647
1648 *pgid = id;
1649 return (0);
1650 }
1651
1652 /*
1653 * Return descriptor owner information. If the value is positive,
1654 * it's process ID. If it's negative, it's process group ID and
1655 * needs the sign removed before use.
1656 */
1657 int
1658 fgetown(pid_t pgid, u_long cmd, void *data)
1659 {
1660
1661 switch (cmd) {
1662 case TIOCGPGRP:
1663 *(int *)data = -pgid;
1664 break;
1665 default:
1666 *(int *)data = pgid;
1667 break;
1668 }
1669 return (0);
1670 }
1671
1672 /*
1673 * Send signal to descriptor owner, either process or process group.
1674 */
1675 void
1676 fownsignal(pid_t pgid, int signo, int code, int band, void *fdescdata)
1677 {
1678 ksiginfo_t ksi;
1679
1680 KASSERT(!cpu_intr_p());
1681
1682 if (pgid == 0) {
1683 return;
1684 }
1685
1686 KSI_INIT(&ksi);
1687 ksi.ksi_signo = signo;
1688 ksi.ksi_code = code;
1689 ksi.ksi_band = band;
1690
1691 mutex_enter(proc_lock);
1692 if (pgid > 0) {
1693 struct proc *p1;
1694
1695 p1 = p_find(pgid, PFIND_LOCKED);
1696 if (p1 != NULL) {
1697 kpsignal(p1, &ksi, fdescdata);
1698 }
1699 } else {
1700 struct pgrp *pgrp;
1701
1702 KASSERT(pgid < 0);
1703 pgrp = pg_find(-pgid, PFIND_LOCKED);
1704 if (pgrp != NULL) {
1705 kpgsignal(pgrp, &ksi, fdescdata, 0);
1706 }
1707 }
1708 mutex_exit(proc_lock);
1709 }
1710
1711 int
1712 fd_clone(file_t *fp, unsigned fd, int flag, const struct fileops *fops,
1713 void *data)
1714 {
1715
1716 fp->f_flag = flag;
1717 fp->f_type = DTYPE_MISC;
1718 fp->f_ops = fops;
1719 fp->f_data = data;
1720 curlwp->l_dupfd = fd;
1721 fd_affix(curproc, fp, fd);
1722
1723 return EMOVEFD;
1724 }
1725
1726 int
1727 fnullop_fcntl(file_t *fp, u_int cmd, void *data)
1728 {
1729
1730 if (cmd == F_SETFL)
1731 return 0;
1732
1733 return EOPNOTSUPP;
1734 }
1735
1736 int
1737 fnullop_poll(file_t *fp, int which)
1738 {
1739
1740 return 0;
1741 }
1742
1743 int
1744 fnullop_kqfilter(file_t *fp, struct knote *kn)
1745 {
1746
1747 return 0;
1748 }
1749
1750 void
1751 fnullop_drain(file_t *fp)
1752 {
1753
1754 }
1755
1756 int
1757 fbadop_read(file_t *fp, off_t *offset, struct uio *uio,
1758 kauth_cred_t cred, int flags)
1759 {
1760
1761 return EOPNOTSUPP;
1762 }
1763
1764 int
1765 fbadop_write(file_t *fp, off_t *offset, struct uio *uio,
1766 kauth_cred_t cred, int flags)
1767 {
1768
1769 return EOPNOTSUPP;
1770 }
1771
1772 int
1773 fbadop_ioctl(file_t *fp, u_long com, void *data)
1774 {
1775
1776 return EOPNOTSUPP;
1777 }
1778
1779 int
1780 fbadop_stat(file_t *fp, struct stat *sb)
1781 {
1782
1783 return EOPNOTSUPP;
1784 }
1785
1786 int
1787 fbadop_close(file_t *fp)
1788 {
1789
1790 return EOPNOTSUPP;
1791 }
1792