Home | History | Annotate | Line # | Download | only in kern
sys_pipe.c revision 1.102
      1 /*	$NetBSD: sys_pipe.c,v 1.102 2008/09/06 23:05:52 rmind Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2003, 2007, 2008 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Paul Kranenburg, and by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Copyright (c) 1996 John S. Dyson
     34  * All rights reserved.
     35  *
     36  * Redistribution and use in source and binary forms, with or without
     37  * modification, are permitted provided that the following conditions
     38  * are met:
     39  * 1. Redistributions of source code must retain the above copyright
     40  *    notice immediately at the beginning of the file, without modification,
     41  *    this list of conditions, and the following disclaimer.
     42  * 2. Redistributions in binary form must reproduce the above copyright
     43  *    notice, this list of conditions and the following disclaimer in the
     44  *    documentation and/or other materials provided with the distribution.
     45  * 3. Absolutely no warranty of function or purpose is made by the author
     46  *    John S. Dyson.
     47  * 4. Modifications may be freely made to this file if the above conditions
     48  *    are met.
     49  *
     50  * $FreeBSD: src/sys/kern/sys_pipe.c,v 1.95 2002/03/09 22:06:31 alfred Exp $
     51  */
     52 
     53 /*
     54  * This file contains a high-performance replacement for the socket-based
     55  * pipes scheme originally used in FreeBSD/4.4Lite.  It does not support
     56  * all features of sockets, but does do everything that pipes normally
     57  * do.
     58  *
     59  * Adaption for NetBSD UVM, including uvm_loan() based direct write, was
     60  * written by Jaromir Dolecek.
     61  */
     62 
     63 /*
     64  * This code has two modes of operation, a small write mode and a large
     65  * write mode.  The small write mode acts like conventional pipes with
     66  * a kernel buffer.  If the buffer is less than PIPE_MINDIRECT, then the
     67  * "normal" pipe buffering is done.  If the buffer is between PIPE_MINDIRECT
     68  * and PIPE_SIZE in size it is mapped read-only into the kernel address space
     69  * using the UVM page loan facility from where the receiving process can copy
     70  * the data directly from the pages in the sending process.
     71  *
     72  * The constant PIPE_MINDIRECT is chosen to make sure that buffering will
     73  * happen for small transfers so that the system will not spend all of
     74  * its time context switching.  PIPE_SIZE is constrained by the
     75  * amount of kernel virtual memory.
     76  */
     77 
     78 #include <sys/cdefs.h>
     79 __KERNEL_RCSID(0, "$NetBSD: sys_pipe.c,v 1.102 2008/09/06 23:05:52 rmind Exp $");
     80 
     81 #include <sys/param.h>
     82 #include <sys/systm.h>
     83 #include <sys/proc.h>
     84 #include <sys/fcntl.h>
     85 #include <sys/file.h>
     86 #include <sys/filedesc.h>
     87 #include <sys/filio.h>
     88 #include <sys/kernel.h>
     89 #include <sys/ttycom.h>
     90 #include <sys/stat.h>
     91 #include <sys/poll.h>
     92 #include <sys/signalvar.h>
     93 #include <sys/vnode.h>
     94 #include <sys/uio.h>
     95 #include <sys/select.h>
     96 #include <sys/mount.h>
     97 #include <sys/syscallargs.h>
     98 #include <sys/sysctl.h>
     99 #include <sys/kauth.h>
    100 #include <sys/atomic.h>
    101 #include <sys/pipe.h>
    102 
    103 #include <uvm/uvm.h>
    104 
    105 /*
    106  * Use this define if you want to disable *fancy* VM things.  Expect an
    107  * approx 30% decrease in transfer rate.
    108  */
    109 /* #define PIPE_NODIRECT */
    110 
    111 /*
    112  * interfaces to the outside world
    113  */
    114 static int pipe_read(struct file *fp, off_t *offset, struct uio *uio,
    115 		kauth_cred_t cred, int flags);
    116 static int pipe_write(struct file *fp, off_t *offset, struct uio *uio,
    117 		kauth_cred_t cred, int flags);
    118 static int pipe_close(struct file *fp);
    119 static int pipe_poll(struct file *fp, int events);
    120 static int pipe_kqfilter(struct file *fp, struct knote *kn);
    121 static int pipe_stat(struct file *fp, struct stat *sb);
    122 static int pipe_ioctl(struct file *fp, u_long cmd, void *data);
    123 
    124 static const struct fileops pipeops = {
    125 	pipe_read, pipe_write, pipe_ioctl, fnullop_fcntl, pipe_poll,
    126 	pipe_stat, pipe_close, pipe_kqfilter
    127 };
    128 
    129 /*
    130  * Single mutex shared between both ends of the pipe.
    131  */
    132 
    133 struct pipe_mutex {
    134 	kmutex_t	pm_mutex;
    135 	u_int		pm_refcnt;
    136 };
    137 
    138 /*
    139  * Default pipe buffer size(s), this can be kind-of large now because pipe
    140  * space is pageable.  The pipe code will try to maintain locality of
    141  * reference for performance reasons, so small amounts of outstanding I/O
    142  * will not wipe the cache.
    143  */
    144 #define MINPIPESIZE (PIPE_SIZE/3)
    145 #define MAXPIPESIZE (2*PIPE_SIZE/3)
    146 
    147 /*
    148  * Maximum amount of kva for pipes -- this is kind-of a soft limit, but
    149  * is there so that on large systems, we don't exhaust it.
    150  */
    151 #define MAXPIPEKVA (8*1024*1024)
    152 static u_int maxpipekva = MAXPIPEKVA;
    153 
    154 /*
    155  * Limit for direct transfers, we cannot, of course limit
    156  * the amount of kva for pipes in general though.
    157  */
    158 #define LIMITPIPEKVA (16*1024*1024)
    159 static u_int limitpipekva = LIMITPIPEKVA;
    160 
    161 /*
    162  * Limit the number of "big" pipes
    163  */
    164 #define LIMITBIGPIPES  32
    165 static u_int maxbigpipes = LIMITBIGPIPES;
    166 static u_int nbigpipe = 0;
    167 
    168 /*
    169  * Amount of KVA consumed by pipe buffers.
    170  */
    171 static u_int amountpipekva = 0;
    172 
    173 MALLOC_DEFINE(M_PIPE, "pipe", "Pipe structures");
    174 
    175 static void pipeclose(struct file *fp, struct pipe *pipe);
    176 static void pipe_free_kmem(struct pipe *pipe);
    177 static int pipe_create(struct pipe **pipep, int allockva, struct pipe_mutex *);
    178 static int pipelock(struct pipe *pipe, int catch);
    179 static inline void pipeunlock(struct pipe *pipe);
    180 static void pipeselwakeup(struct pipe *pipe, struct pipe *sigp, int code);
    181 #ifndef PIPE_NODIRECT
    182 static int pipe_direct_write(struct file *fp, struct pipe *wpipe,
    183     struct uio *uio);
    184 #endif
    185 static int pipespace(struct pipe *pipe, int size);
    186 
    187 #ifndef PIPE_NODIRECT
    188 static int pipe_loan_alloc(struct pipe *, int);
    189 static void pipe_loan_free(struct pipe *);
    190 #endif /* PIPE_NODIRECT */
    191 
    192 static int pipe_mutex_ctor(void *, void *, int);
    193 static void pipe_mutex_dtor(void *, void *);
    194 
    195 static pool_cache_t pipe_cache;
    196 static pool_cache_t pipe_mutex_cache;
    197 
    198 void
    199 pipe_init(void)
    200 {
    201 
    202 	pipe_cache = pool_cache_init(sizeof(struct pipe), 0, 0, 0, "pipepl",
    203 	    NULL, IPL_NONE, NULL, NULL, NULL);
    204 	KASSERT(pipe_cache != NULL);
    205 
    206 	pipe_mutex_cache = pool_cache_init(sizeof(struct pipe_mutex),
    207 	    coherency_unit, 0, 0, "pipemtxpl", NULL, IPL_NONE, pipe_mutex_ctor,
    208 	    pipe_mutex_dtor, NULL);
    209 	KASSERT(pipe_cache != NULL);
    210 }
    211 
    212 static int
    213 pipe_mutex_ctor(void *arg, void *obj, int flag)
    214 {
    215 	struct pipe_mutex *pm = obj;
    216 
    217 	mutex_init(&pm->pm_mutex, MUTEX_DEFAULT, IPL_NONE);
    218 	pm->pm_refcnt = 0;
    219 
    220 	return 0;
    221 }
    222 
    223 static void
    224 pipe_mutex_dtor(void *arg, void *obj)
    225 {
    226 	struct pipe_mutex *pm = obj;
    227 
    228 	KASSERT(pm->pm_refcnt == 0);
    229 
    230 	mutex_destroy(&pm->pm_mutex);
    231 }
    232 
    233 /*
    234  * The pipe system call for the DTYPE_PIPE type of pipes
    235  */
    236 
    237 /* ARGSUSED */
    238 int
    239 sys_pipe(struct lwp *l, const void *v, register_t *retval)
    240 {
    241 	struct file *rf, *wf;
    242 	struct pipe *rpipe, *wpipe;
    243 	struct pipe_mutex *mutex;
    244 	int fd, error;
    245 	proc_t *p;
    246 
    247 	p = curproc;
    248 	rpipe = wpipe = NULL;
    249 	mutex = pool_cache_get(pipe_mutex_cache, PR_WAITOK);
    250 	if (mutex == NULL)
    251 		return (ENOMEM);
    252 	if (pipe_create(&rpipe, 1, mutex) || pipe_create(&wpipe, 0, mutex)) {
    253 		pipeclose(NULL, rpipe);
    254 		pipeclose(NULL, wpipe);
    255 		return (ENFILE);
    256 	}
    257 
    258 	error = fd_allocfile(&rf, &fd);
    259 	if (error)
    260 		goto free2;
    261 	retval[0] = fd;
    262 	rf->f_flag = FREAD;
    263 	rf->f_type = DTYPE_PIPE;
    264 	rf->f_data = (void *)rpipe;
    265 	rf->f_ops = &pipeops;
    266 
    267 	error = fd_allocfile(&wf, &fd);
    268 	if (error)
    269 		goto free3;
    270 	retval[1] = fd;
    271 	wf->f_flag = FWRITE;
    272 	wf->f_type = DTYPE_PIPE;
    273 	wf->f_data = (void *)wpipe;
    274 	wf->f_ops = &pipeops;
    275 
    276 	rpipe->pipe_peer = wpipe;
    277 	wpipe->pipe_peer = rpipe;
    278 
    279 	fd_affix(p, rf, (int)retval[0]);
    280 	fd_affix(p, wf, (int)retval[1]);
    281 	return (0);
    282 free3:
    283 	fd_abort(p, rf, (int)retval[0]);
    284 free2:
    285 	pipeclose(NULL, wpipe);
    286 	pipeclose(NULL, rpipe);
    287 
    288 	return (error);
    289 }
    290 
    291 /*
    292  * Allocate kva for pipe circular buffer, the space is pageable
    293  * This routine will 'realloc' the size of a pipe safely, if it fails
    294  * it will retain the old buffer.
    295  * If it fails it will return ENOMEM.
    296  */
    297 static int
    298 pipespace(struct pipe *pipe, int size)
    299 {
    300 	void *buffer;
    301 	/*
    302 	 * Allocate pageable virtual address space. Physical memory is
    303 	 * allocated on demand.
    304 	 */
    305 	buffer = (void *) uvm_km_alloc(kernel_map, round_page(size), 0,
    306 	    UVM_KMF_PAGEABLE);
    307 	if (buffer == NULL)
    308 		return (ENOMEM);
    309 
    310 	/* free old resources if we're resizing */
    311 	pipe_free_kmem(pipe);
    312 	pipe->pipe_buffer.buffer = buffer;
    313 	pipe->pipe_buffer.size = size;
    314 	pipe->pipe_buffer.in = 0;
    315 	pipe->pipe_buffer.out = 0;
    316 	pipe->pipe_buffer.cnt = 0;
    317 	atomic_add_int(&amountpipekva, pipe->pipe_buffer.size);
    318 	return (0);
    319 }
    320 
    321 /*
    322  * Initialize and allocate VM and memory for pipe.
    323  */
    324 static int
    325 pipe_create(struct pipe **pipep, int allockva, struct pipe_mutex *mutex)
    326 {
    327 	struct pipe *pipe;
    328 	int error;
    329 
    330 	pipe = *pipep = pool_cache_get(pipe_cache, PR_WAITOK);
    331 	mutex->pm_refcnt++;
    332 
    333 	/* Initialize */
    334 	memset(pipe, 0, sizeof(struct pipe));
    335 	pipe->pipe_state = PIPE_SIGNALR;
    336 
    337 	getmicrotime(&pipe->pipe_ctime);
    338 	pipe->pipe_atime = pipe->pipe_ctime;
    339 	pipe->pipe_mtime = pipe->pipe_ctime;
    340 	pipe->pipe_lock = &mutex->pm_mutex;
    341 	cv_init(&pipe->pipe_rcv, "piperd");
    342 	cv_init(&pipe->pipe_wcv, "pipewr");
    343 	cv_init(&pipe->pipe_draincv, "pipedrain");
    344 	cv_init(&pipe->pipe_lkcv, "pipelk");
    345 	selinit(&pipe->pipe_sel);
    346 
    347 	if (allockva && (error = pipespace(pipe, PIPE_SIZE)))
    348 		return (error);
    349 
    350 	return (0);
    351 }
    352 
    353 
    354 /*
    355  * Lock a pipe for I/O, blocking other access
    356  * Called with pipe spin lock held.
    357  * Return with pipe spin lock released on success.
    358  */
    359 static int
    360 pipelock(struct pipe *pipe, int catch)
    361 {
    362 	int error;
    363 
    364 	KASSERT(mutex_owned(pipe->pipe_lock));
    365 
    366 	while (pipe->pipe_state & PIPE_LOCKFL) {
    367 		pipe->pipe_state |= PIPE_LWANT;
    368 		if (catch) {
    369 			error = cv_wait_sig(&pipe->pipe_lkcv, pipe->pipe_lock);
    370 			if (error != 0)
    371 				return error;
    372 		} else
    373 			cv_wait(&pipe->pipe_lkcv, pipe->pipe_lock);
    374 	}
    375 
    376 	pipe->pipe_state |= PIPE_LOCKFL;
    377 
    378 	return 0;
    379 }
    380 
    381 /*
    382  * unlock a pipe I/O lock
    383  */
    384 static inline void
    385 pipeunlock(struct pipe *pipe)
    386 {
    387 
    388 	KASSERT(pipe->pipe_state & PIPE_LOCKFL);
    389 
    390 	pipe->pipe_state &= ~PIPE_LOCKFL;
    391 	if (pipe->pipe_state & PIPE_LWANT) {
    392 		pipe->pipe_state &= ~PIPE_LWANT;
    393 		cv_broadcast(&pipe->pipe_lkcv);
    394 	}
    395 }
    396 
    397 /*
    398  * Select/poll wakup. This also sends SIGIO to peer connected to
    399  * 'sigpipe' side of pipe.
    400  */
    401 static void
    402 pipeselwakeup(struct pipe *selp, struct pipe *sigp, int code)
    403 {
    404 	int band;
    405 
    406 	switch (code) {
    407 	case POLL_IN:
    408 		band = POLLIN|POLLRDNORM;
    409 		break;
    410 	case POLL_OUT:
    411 		band = POLLOUT|POLLWRNORM;
    412 		break;
    413 	case POLL_HUP:
    414 		band = POLLHUP;
    415 		break;
    416 #if POLL_HUP != POLL_ERR
    417 	case POLL_ERR:
    418 		band = POLLERR;
    419 		break;
    420 #endif
    421 	default:
    422 		band = 0;
    423 #ifdef DIAGNOSTIC
    424 		printf("bad siginfo code %d in pipe notification.\n", code);
    425 #endif
    426 		break;
    427 	}
    428 
    429 	selnotify(&selp->pipe_sel, band, NOTE_SUBMIT);
    430 
    431 	if (sigp == NULL || (sigp->pipe_state & PIPE_ASYNC) == 0)
    432 		return;
    433 
    434 	fownsignal(sigp->pipe_pgid, SIGIO, code, band, selp);
    435 }
    436 
    437 /* ARGSUSED */
    438 static int
    439 pipe_read(struct file *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
    440     int flags)
    441 {
    442 	struct pipe *rpipe = (struct pipe *) fp->f_data;
    443 	struct pipebuf *bp = &rpipe->pipe_buffer;
    444 	kmutex_t *lock = rpipe->pipe_lock;
    445 	int error;
    446 	size_t nread = 0;
    447 	size_t size;
    448 	size_t ocnt;
    449 
    450 	mutex_enter(lock);
    451 	++rpipe->pipe_busy;
    452 	ocnt = bp->cnt;
    453 
    454 again:
    455 	error = pipelock(rpipe, 1);
    456 	if (error)
    457 		goto unlocked_error;
    458 
    459 	while (uio->uio_resid) {
    460 		/*
    461 		 * normal pipe buffer receive
    462 		 */
    463 		if (bp->cnt > 0) {
    464 			size = bp->size - bp->out;
    465 			if (size > bp->cnt)
    466 				size = bp->cnt;
    467 			if (size > uio->uio_resid)
    468 				size = uio->uio_resid;
    469 
    470 			mutex_exit(lock);
    471 			error = uiomove((char *)bp->buffer + bp->out, size, uio);
    472 			mutex_enter(lock);
    473 			if (error)
    474 				break;
    475 
    476 			bp->out += size;
    477 			if (bp->out >= bp->size)
    478 				bp->out = 0;
    479 
    480 			bp->cnt -= size;
    481 
    482 			/*
    483 			 * If there is no more to read in the pipe, reset
    484 			 * its pointers to the beginning.  This improves
    485 			 * cache hit stats.
    486 			 */
    487 			if (bp->cnt == 0) {
    488 				bp->in = 0;
    489 				bp->out = 0;
    490 			}
    491 			nread += size;
    492 			continue;
    493 		}
    494 
    495 #ifndef PIPE_NODIRECT
    496 		if ((rpipe->pipe_state & PIPE_DIRECTR) != 0) {
    497 			/*
    498 			 * Direct copy, bypassing a kernel buffer.
    499 			 */
    500 			void *	va;
    501 
    502 			KASSERT(rpipe->pipe_state & PIPE_DIRECTW);
    503 
    504 			size = rpipe->pipe_map.cnt;
    505 			if (size > uio->uio_resid)
    506 				size = uio->uio_resid;
    507 
    508 			va = (char *)rpipe->pipe_map.kva + rpipe->pipe_map.pos;
    509 			mutex_exit(lock);
    510 			error = uiomove(va, size, uio);
    511 			mutex_enter(lock);
    512 			if (error)
    513 				break;
    514 			nread += size;
    515 			rpipe->pipe_map.pos += size;
    516 			rpipe->pipe_map.cnt -= size;
    517 			if (rpipe->pipe_map.cnt == 0) {
    518 				rpipe->pipe_state &= ~PIPE_DIRECTR;
    519 				cv_broadcast(&rpipe->pipe_wcv);
    520 			}
    521 			continue;
    522 		}
    523 #endif
    524 		/*
    525 		 * Break if some data was read.
    526 		 */
    527 		if (nread > 0)
    528 			break;
    529 
    530 		/*
    531 		 * detect EOF condition
    532 		 * read returns 0 on EOF, no need to set error
    533 		 */
    534 		if (rpipe->pipe_state & PIPE_EOF)
    535 			break;
    536 
    537 		/*
    538 		 * don't block on non-blocking I/O
    539 		 */
    540 		if (fp->f_flag & FNONBLOCK) {
    541 			error = EAGAIN;
    542 			break;
    543 		}
    544 
    545 		/*
    546 		 * Unlock the pipe buffer for our remaining processing.
    547 		 * We will either break out with an error or we will
    548 		 * sleep and relock to loop.
    549 		 */
    550 		pipeunlock(rpipe);
    551 
    552 		/*
    553 		 * Re-check to see if more direct writes are pending.
    554 		 */
    555 		if ((rpipe->pipe_state & PIPE_DIRECTR) != 0)
    556 			goto again;
    557 
    558 		/*
    559 		 * We want to read more, wake up select/poll.
    560 		 */
    561 		pipeselwakeup(rpipe, rpipe->pipe_peer, POLL_IN);
    562 
    563 		/*
    564 		 * If the "write-side" is blocked, wake it up now.
    565 		 */
    566 		cv_broadcast(&rpipe->pipe_wcv);
    567 
    568 		/* Now wait until the pipe is filled */
    569 		error = cv_wait_sig(&rpipe->pipe_rcv, lock);
    570 		if (error != 0)
    571 			goto unlocked_error;
    572 		goto again;
    573 	}
    574 
    575 	if (error == 0)
    576 		getmicrotime(&rpipe->pipe_atime);
    577 	pipeunlock(rpipe);
    578 
    579 unlocked_error:
    580 	--rpipe->pipe_busy;
    581 	if (rpipe->pipe_busy == 0) {
    582 		cv_broadcast(&rpipe->pipe_draincv);
    583 	}
    584 	if (bp->cnt < MINPIPESIZE) {
    585 		cv_broadcast(&rpipe->pipe_wcv);
    586 	}
    587 
    588 	/*
    589 	 * If anything was read off the buffer, signal to the writer it's
    590 	 * possible to write more data. Also send signal if we are here for the
    591 	 * first time after last write.
    592 	 */
    593 	if ((bp->size - bp->cnt) >= PIPE_BUF
    594 	    && (ocnt != bp->cnt || (rpipe->pipe_state & PIPE_SIGNALR))) {
    595 		pipeselwakeup(rpipe, rpipe->pipe_peer, POLL_OUT);
    596 		rpipe->pipe_state &= ~PIPE_SIGNALR;
    597 	}
    598 
    599 	mutex_exit(lock);
    600 	return (error);
    601 }
    602 
    603 #ifndef PIPE_NODIRECT
    604 /*
    605  * Allocate structure for loan transfer.
    606  */
    607 static int
    608 pipe_loan_alloc(struct pipe *wpipe, int npages)
    609 {
    610 	vsize_t len;
    611 
    612 	len = (vsize_t)npages << PAGE_SHIFT;
    613 	atomic_add_int(&amountpipekva, len);
    614 	wpipe->pipe_map.kva = uvm_km_alloc(kernel_map, len, 0,
    615 	    UVM_KMF_VAONLY | UVM_KMF_WAITVA);
    616 	if (wpipe->pipe_map.kva == 0) {
    617 		atomic_add_int(&amountpipekva, -len);
    618 		return (ENOMEM);
    619 	}
    620 
    621 	wpipe->pipe_map.npages = npages;
    622 	wpipe->pipe_map.pgs = kmem_alloc(npages * sizeof(struct vm_page *),
    623 	    KM_SLEEP);
    624 	return (0);
    625 }
    626 
    627 /*
    628  * Free resources allocated for loan transfer.
    629  */
    630 static void
    631 pipe_loan_free(struct pipe *wpipe)
    632 {
    633 	vsize_t len;
    634 
    635 	len = (vsize_t)wpipe->pipe_map.npages << PAGE_SHIFT;
    636 	uvm_km_free(kernel_map, wpipe->pipe_map.kva, len, UVM_KMF_VAONLY);
    637 	wpipe->pipe_map.kva = 0;
    638 	atomic_add_int(&amountpipekva, -len);
    639 	kmem_free(wpipe->pipe_map.pgs,
    640 	    wpipe->pipe_map.npages * sizeof(struct vm_page *));
    641 	wpipe->pipe_map.pgs = NULL;
    642 }
    643 
    644 /*
    645  * NetBSD direct write, using uvm_loan() mechanism.
    646  * This implements the pipe buffer write mechanism.  Note that only
    647  * a direct write OR a normal pipe write can be pending at any given time.
    648  * If there are any characters in the pipe buffer, the direct write will
    649  * be deferred until the receiving process grabs all of the bytes from
    650  * the pipe buffer.  Then the direct mapping write is set-up.
    651  *
    652  * Called with the long-term pipe lock held.
    653  */
    654 static int
    655 pipe_direct_write(struct file *fp, struct pipe *wpipe, struct uio *uio)
    656 {
    657 	int error, npages, j;
    658 	struct vm_page **pgs;
    659 	vaddr_t bbase, kva, base, bend;
    660 	vsize_t blen, bcnt;
    661 	voff_t bpos;
    662 	kmutex_t *lock = wpipe->pipe_lock;
    663 
    664 	KASSERT(mutex_owned(wpipe->pipe_lock));
    665 	KASSERT(wpipe->pipe_map.cnt == 0);
    666 
    667 	mutex_exit(lock);
    668 
    669 	/*
    670 	 * Handle first PIPE_CHUNK_SIZE bytes of buffer. Deal with buffers
    671 	 * not aligned to PAGE_SIZE.
    672 	 */
    673 	bbase = (vaddr_t)uio->uio_iov->iov_base;
    674 	base = trunc_page(bbase);
    675 	bend = round_page(bbase + uio->uio_iov->iov_len);
    676 	blen = bend - base;
    677 	bpos = bbase - base;
    678 
    679 	if (blen > PIPE_DIRECT_CHUNK) {
    680 		blen = PIPE_DIRECT_CHUNK;
    681 		bend = base + blen;
    682 		bcnt = PIPE_DIRECT_CHUNK - bpos;
    683 	} else {
    684 		bcnt = uio->uio_iov->iov_len;
    685 	}
    686 	npages = blen >> PAGE_SHIFT;
    687 
    688 	/*
    689 	 * Free the old kva if we need more pages than we have
    690 	 * allocated.
    691 	 */
    692 	if (wpipe->pipe_map.kva != 0 && npages > wpipe->pipe_map.npages)
    693 		pipe_loan_free(wpipe);
    694 
    695 	/* Allocate new kva. */
    696 	if (wpipe->pipe_map.kva == 0) {
    697 		error = pipe_loan_alloc(wpipe, npages);
    698 		if (error) {
    699 			mutex_enter(lock);
    700 			return (error);
    701 		}
    702 	}
    703 
    704 	/* Loan the write buffer memory from writer process */
    705 	pgs = wpipe->pipe_map.pgs;
    706 	error = uvm_loan(&uio->uio_vmspace->vm_map, base, blen,
    707 			 pgs, UVM_LOAN_TOPAGE);
    708 	if (error) {
    709 		pipe_loan_free(wpipe);
    710 		mutex_enter(lock);
    711 		return (ENOMEM); /* so that caller fallback to ordinary write */
    712 	}
    713 
    714 	/* Enter the loaned pages to kva */
    715 	kva = wpipe->pipe_map.kva;
    716 	for (j = 0; j < npages; j++, kva += PAGE_SIZE) {
    717 		pmap_kenter_pa(kva, VM_PAGE_TO_PHYS(pgs[j]), VM_PROT_READ);
    718 	}
    719 	pmap_update(pmap_kernel());
    720 
    721 	/* Now we can put the pipe in direct write mode */
    722 	wpipe->pipe_map.pos = bpos;
    723 	wpipe->pipe_map.cnt = bcnt;
    724 
    725 	/*
    726 	 * But before we can let someone do a direct read, we
    727 	 * have to wait until the pipe is drained.  Release the
    728 	 * pipe lock while we wait.
    729 	 */
    730 	mutex_enter(lock);
    731 	wpipe->pipe_state |= PIPE_DIRECTW;
    732 	pipeunlock(wpipe);
    733 
    734 	while (error == 0 && wpipe->pipe_buffer.cnt > 0) {
    735 		cv_broadcast(&wpipe->pipe_rcv);
    736 		error = cv_wait_sig(&wpipe->pipe_wcv, lock);
    737 		if (error == 0 && wpipe->pipe_state & PIPE_EOF)
    738 			error = EPIPE;
    739 	}
    740 
    741 	/* Pipe is drained; next read will off the direct buffer */
    742 	wpipe->pipe_state |= PIPE_DIRECTR;
    743 
    744 	/* Wait until the reader is done */
    745 	while (error == 0 && (wpipe->pipe_state & PIPE_DIRECTR)) {
    746 		cv_broadcast(&wpipe->pipe_rcv);
    747 		pipeselwakeup(wpipe, wpipe, POLL_IN);
    748 		error = cv_wait_sig(&wpipe->pipe_wcv, lock);
    749 		if (error == 0 && wpipe->pipe_state & PIPE_EOF)
    750 			error = EPIPE;
    751 	}
    752 
    753 	/* Take pipe out of direct write mode */
    754 	wpipe->pipe_state &= ~(PIPE_DIRECTW | PIPE_DIRECTR);
    755 
    756 	/* Acquire the pipe lock and cleanup */
    757 	(void)pipelock(wpipe, 0);
    758 	mutex_exit(lock);
    759 
    760 	if (pgs != NULL) {
    761 		pmap_kremove(wpipe->pipe_map.kva, blen);
    762 		pmap_update(pmap_kernel());
    763 		uvm_unloan(pgs, npages, UVM_LOAN_TOPAGE);
    764 	}
    765 	if (error || amountpipekva > maxpipekva)
    766 		pipe_loan_free(wpipe);
    767 
    768 	mutex_enter(lock);
    769 	if (error) {
    770 		pipeselwakeup(wpipe, wpipe, POLL_ERR);
    771 
    772 		/*
    773 		 * If nothing was read from what we offered, return error
    774 		 * straight on. Otherwise update uio resid first. Caller
    775 		 * will deal with the error condition, returning short
    776 		 * write, error, or restarting the write(2) as appropriate.
    777 		 */
    778 		if (wpipe->pipe_map.cnt == bcnt) {
    779 			wpipe->pipe_map.cnt = 0;
    780 			cv_broadcast(&wpipe->pipe_wcv);
    781 			return (error);
    782 		}
    783 
    784 		bcnt -= wpipe->pipe_map.cnt;
    785 	}
    786 
    787 	uio->uio_resid -= bcnt;
    788 	/* uio_offset not updated, not set/used for write(2) */
    789 	uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + bcnt;
    790 	uio->uio_iov->iov_len -= bcnt;
    791 	if (uio->uio_iov->iov_len == 0) {
    792 		uio->uio_iov++;
    793 		uio->uio_iovcnt--;
    794 	}
    795 
    796 	wpipe->pipe_map.cnt = 0;
    797 	return (error);
    798 }
    799 #endif /* !PIPE_NODIRECT */
    800 
    801 static int
    802 pipe_write(struct file *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
    803     int flags)
    804 {
    805 	struct pipe *wpipe, *rpipe;
    806 	struct pipebuf *bp;
    807 	kmutex_t *lock;
    808 	int error;
    809 
    810 	/* We want to write to our peer */
    811 	rpipe = (struct pipe *) fp->f_data;
    812 	lock = rpipe->pipe_lock;
    813 	error = 0;
    814 
    815 	mutex_enter(lock);
    816 	wpipe = rpipe->pipe_peer;
    817 
    818 	/*
    819 	 * Detect loss of pipe read side, issue SIGPIPE if lost.
    820 	 */
    821 	if (wpipe == NULL || (wpipe->pipe_state & PIPE_EOF) != 0) {
    822 		mutex_exit(lock);
    823 		return EPIPE;
    824 	}
    825 	++wpipe->pipe_busy;
    826 
    827 	/* Aquire the long-term pipe lock */
    828 	if ((error = pipelock(wpipe, 1)) != 0) {
    829 		--wpipe->pipe_busy;
    830 		if (wpipe->pipe_busy == 0) {
    831 			cv_broadcast(&wpipe->pipe_draincv);
    832 		}
    833 		mutex_exit(lock);
    834 		return (error);
    835 	}
    836 
    837 	bp = &wpipe->pipe_buffer;
    838 
    839 	/*
    840 	 * If it is advantageous to resize the pipe buffer, do so.
    841 	 */
    842 	if ((uio->uio_resid > PIPE_SIZE) &&
    843 	    (nbigpipe < maxbigpipes) &&
    844 #ifndef PIPE_NODIRECT
    845 	    (wpipe->pipe_state & PIPE_DIRECTW) == 0 &&
    846 #endif
    847 	    (bp->size <= PIPE_SIZE) && (bp->cnt == 0)) {
    848 
    849 		if (pipespace(wpipe, BIG_PIPE_SIZE) == 0)
    850 			atomic_inc_uint(&nbigpipe);
    851 	}
    852 
    853 	while (uio->uio_resid) {
    854 		size_t space;
    855 
    856 #ifndef PIPE_NODIRECT
    857 		/*
    858 		 * Pipe buffered writes cannot be coincidental with
    859 		 * direct writes.  Also, only one direct write can be
    860 		 * in progress at any one time.  We wait until the currently
    861 		 * executing direct write is completed before continuing.
    862 		 *
    863 		 * We break out if a signal occurs or the reader goes away.
    864 		 */
    865 		while (error == 0 && wpipe->pipe_state & PIPE_DIRECTW) {
    866 			cv_broadcast(&wpipe->pipe_rcv);
    867 			pipeunlock(wpipe);
    868 			error = cv_wait_sig(&wpipe->pipe_wcv, lock);
    869 			(void)pipelock(wpipe, 0);
    870 			if (wpipe->pipe_state & PIPE_EOF)
    871 				error = EPIPE;
    872 		}
    873 		if (error)
    874 			break;
    875 
    876 		/*
    877 		 * If the transfer is large, we can gain performance if
    878 		 * we do process-to-process copies directly.
    879 		 * If the write is non-blocking, we don't use the
    880 		 * direct write mechanism.
    881 		 *
    882 		 * The direct write mechanism will detect the reader going
    883 		 * away on us.
    884 		 */
    885 		if ((uio->uio_iov->iov_len >= PIPE_MINDIRECT) &&
    886 		    (fp->f_flag & FNONBLOCK) == 0 &&
    887 		    (wpipe->pipe_map.kva || (amountpipekva < limitpipekva))) {
    888 			error = pipe_direct_write(fp, wpipe, uio);
    889 
    890 			/*
    891 			 * Break out if error occurred, unless it's ENOMEM.
    892 			 * ENOMEM means we failed to allocate some resources
    893 			 * for direct write, so we just fallback to ordinary
    894 			 * write. If the direct write was successful,
    895 			 * process rest of data via ordinary write.
    896 			 */
    897 			if (error == 0)
    898 				continue;
    899 
    900 			if (error != ENOMEM)
    901 				break;
    902 		}
    903 #endif /* PIPE_NODIRECT */
    904 
    905 		space = bp->size - bp->cnt;
    906 
    907 		/* Writes of size <= PIPE_BUF must be atomic. */
    908 		if ((space < uio->uio_resid) && (uio->uio_resid <= PIPE_BUF))
    909 			space = 0;
    910 
    911 		if (space > 0) {
    912 			int size;	/* Transfer size */
    913 			int segsize;	/* first segment to transfer */
    914 
    915 			/*
    916 			 * Transfer size is minimum of uio transfer
    917 			 * and free space in pipe buffer.
    918 			 */
    919 			if (space > uio->uio_resid)
    920 				size = uio->uio_resid;
    921 			else
    922 				size = space;
    923 			/*
    924 			 * First segment to transfer is minimum of
    925 			 * transfer size and contiguous space in
    926 			 * pipe buffer.  If first segment to transfer
    927 			 * is less than the transfer size, we've got
    928 			 * a wraparound in the buffer.
    929 			 */
    930 			segsize = bp->size - bp->in;
    931 			if (segsize > size)
    932 				segsize = size;
    933 
    934 			/* Transfer first segment */
    935 			mutex_exit(lock);
    936 			error = uiomove((char *)bp->buffer + bp->in, segsize,
    937 			    uio);
    938 
    939 			if (error == 0 && segsize < size) {
    940 				/*
    941 				 * Transfer remaining part now, to
    942 				 * support atomic writes.  Wraparound
    943 				 * happened.
    944 				 */
    945 #ifdef DEBUG
    946 				if (bp->in + segsize != bp->size)
    947 					panic("Expected pipe buffer wraparound disappeared");
    948 #endif
    949 
    950 				error = uiomove(bp->buffer,
    951 				    size - segsize, uio);
    952 			}
    953 			mutex_enter(lock);
    954 			if (error)
    955 				break;
    956 
    957 			bp->in += size;
    958 			if (bp->in >= bp->size) {
    959 #ifdef DEBUG
    960 				if (bp->in != size - segsize + bp->size)
    961 					panic("Expected wraparound bad");
    962 #endif
    963 				bp->in = size - segsize;
    964 			}
    965 
    966 			bp->cnt += size;
    967 #ifdef DEBUG
    968 			if (bp->cnt > bp->size)
    969 				panic("Pipe buffer overflow");
    970 #endif
    971 		} else {
    972 			/*
    973 			 * If the "read-side" has been blocked, wake it up now.
    974 			 */
    975 			cv_broadcast(&wpipe->pipe_rcv);
    976 
    977 			/*
    978 			 * don't block on non-blocking I/O
    979 			 */
    980 			if (fp->f_flag & FNONBLOCK) {
    981 				error = EAGAIN;
    982 				break;
    983 			}
    984 
    985 			/*
    986 			 * We have no more space and have something to offer,
    987 			 * wake up select/poll.
    988 			 */
    989 			if (bp->cnt)
    990 				pipeselwakeup(wpipe, wpipe, POLL_OUT);
    991 
    992 			pipeunlock(wpipe);
    993 			error = cv_wait_sig(&wpipe->pipe_wcv, lock);
    994 			(void)pipelock(wpipe, 0);
    995 			if (error != 0)
    996 				break;
    997 			/*
    998 			 * If read side wants to go away, we just issue a signal
    999 			 * to ourselves.
   1000 			 */
   1001 			if (wpipe->pipe_state & PIPE_EOF) {
   1002 				error = EPIPE;
   1003 				break;
   1004 			}
   1005 		}
   1006 	}
   1007 
   1008 	--wpipe->pipe_busy;
   1009 	if (wpipe->pipe_busy == 0) {
   1010 		cv_broadcast(&wpipe->pipe_draincv);
   1011 	}
   1012 	if (bp->cnt > 0) {
   1013 		cv_broadcast(&wpipe->pipe_rcv);
   1014 	}
   1015 
   1016 	/*
   1017 	 * Don't return EPIPE if I/O was successful
   1018 	 */
   1019 	if (error == EPIPE && bp->cnt == 0 && uio->uio_resid == 0)
   1020 		error = 0;
   1021 
   1022 	if (error == 0)
   1023 		getmicrotime(&wpipe->pipe_mtime);
   1024 
   1025 	/*
   1026 	 * We have something to offer, wake up select/poll.
   1027 	 * wpipe->pipe_map.cnt is always 0 in this point (direct write
   1028 	 * is only done synchronously), so check only wpipe->pipe_buffer.cnt
   1029 	 */
   1030 	if (bp->cnt)
   1031 		pipeselwakeup(wpipe, wpipe, POLL_OUT);
   1032 
   1033 	/*
   1034 	 * Arrange for next read(2) to do a signal.
   1035 	 */
   1036 	wpipe->pipe_state |= PIPE_SIGNALR;
   1037 
   1038 	pipeunlock(wpipe);
   1039 	mutex_exit(lock);
   1040 	return (error);
   1041 }
   1042 
   1043 /*
   1044  * we implement a very minimal set of ioctls for compatibility with sockets.
   1045  */
   1046 int
   1047 pipe_ioctl(struct file *fp, u_long cmd, void *data)
   1048 {
   1049 	struct pipe *pipe = fp->f_data;
   1050 	kmutex_t *lock = pipe->pipe_lock;
   1051 
   1052 	switch (cmd) {
   1053 
   1054 	case FIONBIO:
   1055 		return (0);
   1056 
   1057 	case FIOASYNC:
   1058 		mutex_enter(lock);
   1059 		if (*(int *)data) {
   1060 			pipe->pipe_state |= PIPE_ASYNC;
   1061 		} else {
   1062 			pipe->pipe_state &= ~PIPE_ASYNC;
   1063 		}
   1064 		mutex_exit(lock);
   1065 		return (0);
   1066 
   1067 	case FIONREAD:
   1068 		mutex_enter(lock);
   1069 #ifndef PIPE_NODIRECT
   1070 		if (pipe->pipe_state & PIPE_DIRECTW)
   1071 			*(int *)data = pipe->pipe_map.cnt;
   1072 		else
   1073 #endif
   1074 			*(int *)data = pipe->pipe_buffer.cnt;
   1075 		mutex_exit(lock);
   1076 		return (0);
   1077 
   1078 	case FIONWRITE:
   1079 		/* Look at other side */
   1080 		pipe = pipe->pipe_peer;
   1081 		mutex_enter(lock);
   1082 #ifndef PIPE_NODIRECT
   1083 		if (pipe->pipe_state & PIPE_DIRECTW)
   1084 			*(int *)data = pipe->pipe_map.cnt;
   1085 		else
   1086 #endif
   1087 			*(int *)data = pipe->pipe_buffer.cnt;
   1088 		mutex_exit(lock);
   1089 		return (0);
   1090 
   1091 	case FIONSPACE:
   1092 		/* Look at other side */
   1093 		pipe = pipe->pipe_peer;
   1094 		mutex_enter(lock);
   1095 #ifndef PIPE_NODIRECT
   1096 		/*
   1097 		 * If we're in direct-mode, we don't really have a
   1098 		 * send queue, and any other write will block. Thus
   1099 		 * zero seems like the best answer.
   1100 		 */
   1101 		if (pipe->pipe_state & PIPE_DIRECTW)
   1102 			*(int *)data = 0;
   1103 		else
   1104 #endif
   1105 			*(int *)data = pipe->pipe_buffer.size -
   1106 			    pipe->pipe_buffer.cnt;
   1107 		mutex_exit(lock);
   1108 		return (0);
   1109 
   1110 	case TIOCSPGRP:
   1111 	case FIOSETOWN:
   1112 		return fsetown(&pipe->pipe_pgid, cmd, data);
   1113 
   1114 	case TIOCGPGRP:
   1115 	case FIOGETOWN:
   1116 		return fgetown(pipe->pipe_pgid, cmd, data);
   1117 
   1118 	}
   1119 	return (EPASSTHROUGH);
   1120 }
   1121 
   1122 int
   1123 pipe_poll(struct file *fp, int events)
   1124 {
   1125 	struct pipe *rpipe = fp->f_data;
   1126 	struct pipe *wpipe;
   1127 	int eof = 0;
   1128 	int revents = 0;
   1129 
   1130 	mutex_enter(rpipe->pipe_lock);
   1131 	wpipe = rpipe->pipe_peer;
   1132 
   1133 	if (events & (POLLIN | POLLRDNORM))
   1134 		if ((rpipe->pipe_buffer.cnt > 0) ||
   1135 #ifndef PIPE_NODIRECT
   1136 		    (rpipe->pipe_state & PIPE_DIRECTR) ||
   1137 #endif
   1138 		    (rpipe->pipe_state & PIPE_EOF))
   1139 			revents |= events & (POLLIN | POLLRDNORM);
   1140 
   1141 	eof |= (rpipe->pipe_state & PIPE_EOF);
   1142 
   1143 	if (wpipe == NULL)
   1144 		revents |= events & (POLLOUT | POLLWRNORM);
   1145 	else {
   1146 		if (events & (POLLOUT | POLLWRNORM))
   1147 			if ((wpipe->pipe_state & PIPE_EOF) || (
   1148 #ifndef PIPE_NODIRECT
   1149 			     (wpipe->pipe_state & PIPE_DIRECTW) == 0 &&
   1150 #endif
   1151 			     (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF))
   1152 				revents |= events & (POLLOUT | POLLWRNORM);
   1153 
   1154 		eof |= (wpipe->pipe_state & PIPE_EOF);
   1155 	}
   1156 
   1157 	if (wpipe == NULL || eof)
   1158 		revents |= POLLHUP;
   1159 
   1160 	if (revents == 0) {
   1161 		if (events & (POLLIN | POLLRDNORM))
   1162 			selrecord(curlwp, &rpipe->pipe_sel);
   1163 
   1164 		if (events & (POLLOUT | POLLWRNORM))
   1165 			selrecord(curlwp, &wpipe->pipe_sel);
   1166 	}
   1167 	mutex_exit(rpipe->pipe_lock);
   1168 
   1169 	return (revents);
   1170 }
   1171 
   1172 static int
   1173 pipe_stat(struct file *fp, struct stat *ub)
   1174 {
   1175 	struct pipe *pipe = fp->f_data;
   1176 
   1177 	memset((void *)ub, 0, sizeof(*ub));
   1178 	ub->st_mode = S_IFIFO | S_IRUSR | S_IWUSR;
   1179 	ub->st_blksize = pipe->pipe_buffer.size;
   1180 	if (ub->st_blksize == 0 && pipe->pipe_peer)
   1181 		ub->st_blksize = pipe->pipe_peer->pipe_buffer.size;
   1182 	ub->st_size = pipe->pipe_buffer.cnt;
   1183 	ub->st_blocks = (ub->st_size) ? 1 : 0;
   1184 	TIMEVAL_TO_TIMESPEC(&pipe->pipe_atime, &ub->st_atimespec);
   1185 	TIMEVAL_TO_TIMESPEC(&pipe->pipe_mtime, &ub->st_mtimespec);
   1186 	TIMEVAL_TO_TIMESPEC(&pipe->pipe_ctime, &ub->st_ctimespec);
   1187 	ub->st_uid = kauth_cred_geteuid(fp->f_cred);
   1188 	ub->st_gid = kauth_cred_getegid(fp->f_cred);
   1189 
   1190 	/*
   1191 	 * Left as 0: st_dev, st_ino, st_nlink, st_rdev, st_flags, st_gen.
   1192 	 * XXX (st_dev, st_ino) should be unique.
   1193 	 */
   1194 	return (0);
   1195 }
   1196 
   1197 /* ARGSUSED */
   1198 static int
   1199 pipe_close(struct file *fp)
   1200 {
   1201 	struct pipe *pipe = fp->f_data;
   1202 
   1203 	fp->f_data = NULL;
   1204 	pipeclose(fp, pipe);
   1205 	return (0);
   1206 }
   1207 
   1208 static void
   1209 pipe_free_kmem(struct pipe *pipe)
   1210 {
   1211 
   1212 	if (pipe->pipe_buffer.buffer != NULL) {
   1213 		if (pipe->pipe_buffer.size > PIPE_SIZE)
   1214 			atomic_dec_uint(&nbigpipe);
   1215 		uvm_km_free(kernel_map,
   1216 			(vaddr_t)pipe->pipe_buffer.buffer,
   1217 			pipe->pipe_buffer.size, UVM_KMF_PAGEABLE);
   1218 		atomic_add_int(&amountpipekva, -pipe->pipe_buffer.size);
   1219 		pipe->pipe_buffer.buffer = NULL;
   1220 	}
   1221 #ifndef PIPE_NODIRECT
   1222 	if (pipe->pipe_map.kva != 0) {
   1223 		pipe_loan_free(pipe);
   1224 		pipe->pipe_map.cnt = 0;
   1225 		pipe->pipe_map.kva = 0;
   1226 		pipe->pipe_map.pos = 0;
   1227 		pipe->pipe_map.npages = 0;
   1228 	}
   1229 #endif /* !PIPE_NODIRECT */
   1230 }
   1231 
   1232 /*
   1233  * shutdown the pipe
   1234  */
   1235 static void
   1236 pipeclose(struct file *fp, struct pipe *pipe)
   1237 {
   1238 	struct pipe_mutex *mutex;
   1239 	kmutex_t *lock;
   1240 	struct pipe *ppipe;
   1241 	u_int refcnt;
   1242 
   1243 	if (pipe == NULL)
   1244 		return;
   1245 
   1246 	KASSERT(cv_is_valid(&pipe->pipe_rcv));
   1247 	KASSERT(cv_is_valid(&pipe->pipe_wcv));
   1248 	KASSERT(cv_is_valid(&pipe->pipe_draincv));
   1249 	KASSERT(cv_is_valid(&pipe->pipe_lkcv));
   1250 
   1251 	lock = pipe->pipe_lock;
   1252 	mutex_enter(lock);
   1253 	pipeselwakeup(pipe, pipe, POLL_HUP);
   1254 
   1255 	/*
   1256 	 * If the other side is blocked, wake it up saying that
   1257 	 * we want to close it down.
   1258 	 */
   1259 	pipe->pipe_state |= PIPE_EOF;
   1260 	if (pipe->pipe_busy) {
   1261 		while (pipe->pipe_busy) {
   1262 			cv_broadcast(&pipe->pipe_wcv);
   1263 			cv_wait_sig(&pipe->pipe_draincv, lock);
   1264 		}
   1265 	}
   1266 
   1267 	/*
   1268 	 * Disconnect from peer
   1269 	 */
   1270 	if ((ppipe = pipe->pipe_peer) != NULL) {
   1271 		pipeselwakeup(ppipe, ppipe, POLL_HUP);
   1272 		ppipe->pipe_state |= PIPE_EOF;
   1273 		cv_broadcast(&ppipe->pipe_rcv);
   1274 		ppipe->pipe_peer = NULL;
   1275 	}
   1276 
   1277 	KASSERT((pipe->pipe_state & PIPE_LOCKFL) == 0);
   1278 
   1279 	mutex = (struct pipe_mutex *)lock;
   1280 	refcnt = --(mutex->pm_refcnt);
   1281 	KASSERT(refcnt == 0 || refcnt == 1);
   1282 	mutex_exit(lock);
   1283 
   1284 	/*
   1285 	 * free resources
   1286 	 */
   1287 	pipe_free_kmem(pipe);
   1288 	cv_destroy(&pipe->pipe_rcv);
   1289 	cv_destroy(&pipe->pipe_wcv);
   1290 	cv_destroy(&pipe->pipe_draincv);
   1291 	cv_destroy(&pipe->pipe_lkcv);
   1292 	seldestroy(&pipe->pipe_sel);
   1293 	pool_cache_put(pipe_cache, pipe);
   1294 	if (refcnt == 0)
   1295 		pool_cache_put(pipe_mutex_cache, mutex);
   1296 }
   1297 
   1298 static void
   1299 filt_pipedetach(struct knote *kn)
   1300 {
   1301 	struct pipe *pipe;
   1302 	kmutex_t *lock;
   1303 
   1304 	pipe = ((file_t *)kn->kn_obj)->f_data;
   1305 	lock = pipe->pipe_lock;
   1306 
   1307 	mutex_enter(lock);
   1308 
   1309 	switch(kn->kn_filter) {
   1310 	case EVFILT_WRITE:
   1311 		/* need the peer structure, not our own */
   1312 		pipe = pipe->pipe_peer;
   1313 
   1314 		/* if reader end already closed, just return */
   1315 		if (pipe == NULL) {
   1316 			mutex_exit(lock);
   1317 			return;
   1318 		}
   1319 
   1320 		break;
   1321 	default:
   1322 		/* nothing to do */
   1323 		break;
   1324 	}
   1325 
   1326 #ifdef DIAGNOSTIC
   1327 	if (kn->kn_hook != pipe)
   1328 		panic("filt_pipedetach: inconsistent knote");
   1329 #endif
   1330 
   1331 	SLIST_REMOVE(&pipe->pipe_sel.sel_klist, kn, knote, kn_selnext);
   1332 	mutex_exit(lock);
   1333 }
   1334 
   1335 /*ARGSUSED*/
   1336 static int
   1337 filt_piperead(struct knote *kn, long hint)
   1338 {
   1339 	struct pipe *rpipe = ((file_t *)kn->kn_obj)->f_data;
   1340 	struct pipe *wpipe;
   1341 
   1342 	if ((hint & NOTE_SUBMIT) == 0) {
   1343 		mutex_enter(rpipe->pipe_lock);
   1344 	}
   1345 	wpipe = rpipe->pipe_peer;
   1346 	kn->kn_data = rpipe->pipe_buffer.cnt;
   1347 
   1348 	if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW))
   1349 		kn->kn_data = rpipe->pipe_map.cnt;
   1350 
   1351 	if ((rpipe->pipe_state & PIPE_EOF) ||
   1352 	    (wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
   1353 		kn->kn_flags |= EV_EOF;
   1354 		if ((hint & NOTE_SUBMIT) == 0) {
   1355 			mutex_exit(rpipe->pipe_lock);
   1356 		}
   1357 		return (1);
   1358 	}
   1359 
   1360 	if ((hint & NOTE_SUBMIT) == 0) {
   1361 		mutex_exit(rpipe->pipe_lock);
   1362 	}
   1363 	return (kn->kn_data > 0);
   1364 }
   1365 
   1366 /*ARGSUSED*/
   1367 static int
   1368 filt_pipewrite(struct knote *kn, long hint)
   1369 {
   1370 	struct pipe *rpipe = ((file_t *)kn->kn_obj)->f_data;
   1371 	struct pipe *wpipe;
   1372 
   1373 	if ((hint & NOTE_SUBMIT) == 0) {
   1374 		mutex_enter(rpipe->pipe_lock);
   1375 	}
   1376 	wpipe = rpipe->pipe_peer;
   1377 
   1378 	if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
   1379 		kn->kn_data = 0;
   1380 		kn->kn_flags |= EV_EOF;
   1381 		if ((hint & NOTE_SUBMIT) == 0) {
   1382 			mutex_exit(rpipe->pipe_lock);
   1383 		}
   1384 		return (1);
   1385 	}
   1386 	kn->kn_data = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
   1387 	if (wpipe->pipe_state & PIPE_DIRECTW)
   1388 		kn->kn_data = 0;
   1389 
   1390 	if ((hint & NOTE_SUBMIT) == 0) {
   1391 		mutex_exit(rpipe->pipe_lock);
   1392 	}
   1393 	return (kn->kn_data >= PIPE_BUF);
   1394 }
   1395 
   1396 static const struct filterops pipe_rfiltops =
   1397 	{ 1, NULL, filt_pipedetach, filt_piperead };
   1398 static const struct filterops pipe_wfiltops =
   1399 	{ 1, NULL, filt_pipedetach, filt_pipewrite };
   1400 
   1401 /*ARGSUSED*/
   1402 static int
   1403 pipe_kqfilter(struct file *fp, struct knote *kn)
   1404 {
   1405 	struct pipe *pipe;
   1406 	kmutex_t *lock;
   1407 
   1408 	pipe = ((file_t *)kn->kn_obj)->f_data;
   1409 	lock = pipe->pipe_lock;
   1410 
   1411 	mutex_enter(lock);
   1412 
   1413 	switch (kn->kn_filter) {
   1414 	case EVFILT_READ:
   1415 		kn->kn_fop = &pipe_rfiltops;
   1416 		break;
   1417 	case EVFILT_WRITE:
   1418 		kn->kn_fop = &pipe_wfiltops;
   1419 		pipe = pipe->pipe_peer;
   1420 		if (pipe == NULL) {
   1421 			/* other end of pipe has been closed */
   1422 			mutex_exit(lock);
   1423 			return (EBADF);
   1424 		}
   1425 		break;
   1426 	default:
   1427 		mutex_exit(lock);
   1428 		return (EINVAL);
   1429 	}
   1430 
   1431 	kn->kn_hook = pipe;
   1432 	SLIST_INSERT_HEAD(&pipe->pipe_sel.sel_klist, kn, kn_selnext);
   1433 	mutex_exit(lock);
   1434 
   1435 	return (0);
   1436 }
   1437 
   1438 /*
   1439  * Handle pipe sysctls.
   1440  */
   1441 SYSCTL_SETUP(sysctl_kern_pipe_setup, "sysctl kern.pipe subtree setup")
   1442 {
   1443 
   1444 	sysctl_createv(clog, 0, NULL, NULL,
   1445 		       CTLFLAG_PERMANENT,
   1446 		       CTLTYPE_NODE, "kern", NULL,
   1447 		       NULL, 0, NULL, 0,
   1448 		       CTL_KERN, CTL_EOL);
   1449 	sysctl_createv(clog, 0, NULL, NULL,
   1450 		       CTLFLAG_PERMANENT,
   1451 		       CTLTYPE_NODE, "pipe",
   1452 		       SYSCTL_DESCR("Pipe settings"),
   1453 		       NULL, 0, NULL, 0,
   1454 		       CTL_KERN, KERN_PIPE, CTL_EOL);
   1455 
   1456 	sysctl_createv(clog, 0, NULL, NULL,
   1457 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
   1458 		       CTLTYPE_INT, "maxkvasz",
   1459 		       SYSCTL_DESCR("Maximum amount of kernel memory to be "
   1460 				    "used for pipes"),
   1461 		       NULL, 0, &maxpipekva, 0,
   1462 		       CTL_KERN, KERN_PIPE, KERN_PIPE_MAXKVASZ, CTL_EOL);
   1463 	sysctl_createv(clog, 0, NULL, NULL,
   1464 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
   1465 		       CTLTYPE_INT, "maxloankvasz",
   1466 		       SYSCTL_DESCR("Limit for direct transfers via page loan"),
   1467 		       NULL, 0, &limitpipekva, 0,
   1468 		       CTL_KERN, KERN_PIPE, KERN_PIPE_LIMITKVA, CTL_EOL);
   1469 	sysctl_createv(clog, 0, NULL, NULL,
   1470 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
   1471 		       CTLTYPE_INT, "maxbigpipes",
   1472 		       SYSCTL_DESCR("Maximum number of \"big\" pipes"),
   1473 		       NULL, 0, &maxbigpipes, 0,
   1474 		       CTL_KERN, KERN_PIPE, KERN_PIPE_MAXBIGPIPES, CTL_EOL);
   1475 	sysctl_createv(clog, 0, NULL, NULL,
   1476 		       CTLFLAG_PERMANENT,
   1477 		       CTLTYPE_INT, "nbigpipes",
   1478 		       SYSCTL_DESCR("Number of \"big\" pipes"),
   1479 		       NULL, 0, &nbigpipe, 0,
   1480 		       CTL_KERN, KERN_PIPE, KERN_PIPE_NBIGPIPES, CTL_EOL);
   1481 	sysctl_createv(clog, 0, NULL, NULL,
   1482 		       CTLFLAG_PERMANENT,
   1483 		       CTLTYPE_INT, "kvasize",
   1484 		       SYSCTL_DESCR("Amount of kernel memory consumed by pipe "
   1485 				    "buffers"),
   1486 		       NULL, 0, &amountpipekva, 0,
   1487 		       CTL_KERN, KERN_PIPE, KERN_PIPE_KVASIZE, CTL_EOL);
   1488 }
   1489