Home | History | Annotate | Line # | Download | only in kern
sys_pipe.c revision 1.104
      1 /*	$NetBSD: sys_pipe.c,v 1.104 2009/01/20 14:50:22 yamt Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2003, 2007, 2008 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Paul Kranenburg, and by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Copyright (c) 1996 John S. Dyson
     34  * All rights reserved.
     35  *
     36  * Redistribution and use in source and binary forms, with or without
     37  * modification, are permitted provided that the following conditions
     38  * are met:
     39  * 1. Redistributions of source code must retain the above copyright
     40  *    notice immediately at the beginning of the file, without modification,
     41  *    this list of conditions, and the following disclaimer.
     42  * 2. Redistributions in binary form must reproduce the above copyright
     43  *    notice, this list of conditions and the following disclaimer in the
     44  *    documentation and/or other materials provided with the distribution.
     45  * 3. Absolutely no warranty of function or purpose is made by the author
     46  *    John S. Dyson.
     47  * 4. Modifications may be freely made to this file if the above conditions
     48  *    are met.
     49  *
     50  * $FreeBSD: src/sys/kern/sys_pipe.c,v 1.95 2002/03/09 22:06:31 alfred Exp $
     51  */
     52 
     53 /*
     54  * This file contains a high-performance replacement for the socket-based
     55  * pipes scheme originally used in FreeBSD/4.4Lite.  It does not support
     56  * all features of sockets, but does do everything that pipes normally
     57  * do.
     58  *
     59  * Adaption for NetBSD UVM, including uvm_loan() based direct write, was
     60  * written by Jaromir Dolecek.
     61  */
     62 
     63 /*
     64  * This code has two modes of operation, a small write mode and a large
     65  * write mode.  The small write mode acts like conventional pipes with
     66  * a kernel buffer.  If the buffer is less than PIPE_MINDIRECT, then the
     67  * "normal" pipe buffering is done.  If the buffer is between PIPE_MINDIRECT
     68  * and PIPE_SIZE in size it is mapped read-only into the kernel address space
     69  * using the UVM page loan facility from where the receiving process can copy
     70  * the data directly from the pages in the sending process.
     71  *
     72  * The constant PIPE_MINDIRECT is chosen to make sure that buffering will
     73  * happen for small transfers so that the system will not spend all of
     74  * its time context switching.  PIPE_SIZE is constrained by the
     75  * amount of kernel virtual memory.
     76  */
     77 
     78 #include <sys/cdefs.h>
     79 __KERNEL_RCSID(0, "$NetBSD: sys_pipe.c,v 1.104 2009/01/20 14:50:22 yamt Exp $");
     80 
     81 #include <sys/param.h>
     82 #include <sys/systm.h>
     83 #include <sys/proc.h>
     84 #include <sys/fcntl.h>
     85 #include <sys/file.h>
     86 #include <sys/filedesc.h>
     87 #include <sys/filio.h>
     88 #include <sys/kernel.h>
     89 #include <sys/ttycom.h>
     90 #include <sys/stat.h>
     91 #include <sys/poll.h>
     92 #include <sys/signalvar.h>
     93 #include <sys/vnode.h>
     94 #include <sys/uio.h>
     95 #include <sys/select.h>
     96 #include <sys/mount.h>
     97 #include <sys/syscallargs.h>
     98 #include <sys/sysctl.h>
     99 #include <sys/kauth.h>
    100 #include <sys/atomic.h>
    101 #include <sys/pipe.h>
    102 
    103 #include <uvm/uvm.h>
    104 
    105 /*
    106  * Use this define if you want to disable *fancy* VM things.  Expect an
    107  * approx 30% decrease in transfer rate.
    108  */
    109 /* #define PIPE_NODIRECT */
    110 
    111 /*
    112  * interfaces to the outside world
    113  */
    114 static int pipe_read(struct file *fp, off_t *offset, struct uio *uio,
    115 		kauth_cred_t cred, int flags);
    116 static int pipe_write(struct file *fp, off_t *offset, struct uio *uio,
    117 		kauth_cred_t cred, int flags);
    118 static int pipe_close(struct file *fp);
    119 static int pipe_poll(struct file *fp, int events);
    120 static int pipe_kqfilter(struct file *fp, struct knote *kn);
    121 static int pipe_stat(struct file *fp, struct stat *sb);
    122 static int pipe_ioctl(struct file *fp, u_long cmd, void *data);
    123 
    124 static const struct fileops pipeops = {
    125 	pipe_read, pipe_write, pipe_ioctl, fnullop_fcntl, pipe_poll,
    126 	pipe_stat, pipe_close, pipe_kqfilter
    127 };
    128 
    129 /*
    130  * Single mutex shared between both ends of the pipe.
    131  */
    132 
    133 struct pipe_mutex {
    134 	kmutex_t	pm_mutex;
    135 	u_int		pm_refcnt;
    136 };
    137 
    138 /*
    139  * Default pipe buffer size(s), this can be kind-of large now because pipe
    140  * space is pageable.  The pipe code will try to maintain locality of
    141  * reference for performance reasons, so small amounts of outstanding I/O
    142  * will not wipe the cache.
    143  */
    144 #define MINPIPESIZE (PIPE_SIZE/3)
    145 #define MAXPIPESIZE (2*PIPE_SIZE/3)
    146 
    147 /*
    148  * Maximum amount of kva for pipes -- this is kind-of a soft limit, but
    149  * is there so that on large systems, we don't exhaust it.
    150  */
    151 #define MAXPIPEKVA (8*1024*1024)
    152 static u_int maxpipekva = MAXPIPEKVA;
    153 
    154 /*
    155  * Limit for direct transfers, we cannot, of course limit
    156  * the amount of kva for pipes in general though.
    157  */
    158 #define LIMITPIPEKVA (16*1024*1024)
    159 static u_int limitpipekva = LIMITPIPEKVA;
    160 
    161 /*
    162  * Limit the number of "big" pipes
    163  */
    164 #define LIMITBIGPIPES  32
    165 static u_int maxbigpipes = LIMITBIGPIPES;
    166 static u_int nbigpipe = 0;
    167 
    168 /*
    169  * Amount of KVA consumed by pipe buffers.
    170  */
    171 static u_int amountpipekva = 0;
    172 
    173 static void pipeclose(struct file *fp, struct pipe *pipe);
    174 static void pipe_free_kmem(struct pipe *pipe);
    175 static int pipe_create(struct pipe **pipep, int allockva, struct pipe_mutex *);
    176 static int pipelock(struct pipe *pipe, int catch);
    177 static inline void pipeunlock(struct pipe *pipe);
    178 static void pipeselwakeup(struct pipe *pipe, struct pipe *sigp, int code);
    179 #ifndef PIPE_NODIRECT
    180 static int pipe_direct_write(struct file *fp, struct pipe *wpipe,
    181     struct uio *uio);
    182 #endif
    183 static int pipespace(struct pipe *pipe, int size);
    184 
    185 #ifndef PIPE_NODIRECT
    186 static int pipe_loan_alloc(struct pipe *, int);
    187 static void pipe_loan_free(struct pipe *);
    188 #endif /* PIPE_NODIRECT */
    189 
    190 static int pipe_mutex_ctor(void *, void *, int);
    191 static void pipe_mutex_dtor(void *, void *);
    192 
    193 static pool_cache_t pipe_cache;
    194 static pool_cache_t pipe_mutex_cache;
    195 
    196 void
    197 pipe_init(void)
    198 {
    199 
    200 	pipe_cache = pool_cache_init(sizeof(struct pipe), 0, 0, 0, "pipepl",
    201 	    NULL, IPL_NONE, NULL, NULL, NULL);
    202 	KASSERT(pipe_cache != NULL);
    203 
    204 	pipe_mutex_cache = pool_cache_init(sizeof(struct pipe_mutex),
    205 	    coherency_unit, 0, 0, "pipemtxpl", NULL, IPL_NONE, pipe_mutex_ctor,
    206 	    pipe_mutex_dtor, NULL);
    207 	KASSERT(pipe_cache != NULL);
    208 }
    209 
    210 static int
    211 pipe_mutex_ctor(void *arg, void *obj, int flag)
    212 {
    213 	struct pipe_mutex *pm = obj;
    214 
    215 	mutex_init(&pm->pm_mutex, MUTEX_DEFAULT, IPL_NONE);
    216 	pm->pm_refcnt = 0;
    217 
    218 	return 0;
    219 }
    220 
    221 static void
    222 pipe_mutex_dtor(void *arg, void *obj)
    223 {
    224 	struct pipe_mutex *pm = obj;
    225 
    226 	KASSERT(pm->pm_refcnt == 0);
    227 
    228 	mutex_destroy(&pm->pm_mutex);
    229 }
    230 
    231 /*
    232  * The pipe system call for the DTYPE_PIPE type of pipes
    233  */
    234 
    235 /* ARGSUSED */
    236 int
    237 sys_pipe(struct lwp *l, const void *v, register_t *retval)
    238 {
    239 	struct file *rf, *wf;
    240 	struct pipe *rpipe, *wpipe;
    241 	struct pipe_mutex *mutex;
    242 	int fd, error;
    243 	proc_t *p;
    244 
    245 	p = curproc;
    246 	rpipe = wpipe = NULL;
    247 	mutex = pool_cache_get(pipe_mutex_cache, PR_WAITOK);
    248 	if (mutex == NULL)
    249 		return (ENOMEM);
    250 	if (pipe_create(&rpipe, 1, mutex) || pipe_create(&wpipe, 0, mutex)) {
    251 		pipeclose(NULL, rpipe);
    252 		pipeclose(NULL, wpipe);
    253 		return (ENFILE);
    254 	}
    255 
    256 	error = fd_allocfile(&rf, &fd);
    257 	if (error)
    258 		goto free2;
    259 	retval[0] = fd;
    260 	rf->f_flag = FREAD;
    261 	rf->f_type = DTYPE_PIPE;
    262 	rf->f_data = (void *)rpipe;
    263 	rf->f_ops = &pipeops;
    264 
    265 	error = fd_allocfile(&wf, &fd);
    266 	if (error)
    267 		goto free3;
    268 	retval[1] = fd;
    269 	wf->f_flag = FWRITE;
    270 	wf->f_type = DTYPE_PIPE;
    271 	wf->f_data = (void *)wpipe;
    272 	wf->f_ops = &pipeops;
    273 
    274 	rpipe->pipe_peer = wpipe;
    275 	wpipe->pipe_peer = rpipe;
    276 
    277 	fd_affix(p, rf, (int)retval[0]);
    278 	fd_affix(p, wf, (int)retval[1]);
    279 	return (0);
    280 free3:
    281 	fd_abort(p, rf, (int)retval[0]);
    282 free2:
    283 	pipeclose(NULL, wpipe);
    284 	pipeclose(NULL, rpipe);
    285 
    286 	return (error);
    287 }
    288 
    289 /*
    290  * Allocate kva for pipe circular buffer, the space is pageable
    291  * This routine will 'realloc' the size of a pipe safely, if it fails
    292  * it will retain the old buffer.
    293  * If it fails it will return ENOMEM.
    294  */
    295 static int
    296 pipespace(struct pipe *pipe, int size)
    297 {
    298 	void *buffer;
    299 	/*
    300 	 * Allocate pageable virtual address space. Physical memory is
    301 	 * allocated on demand.
    302 	 */
    303 	buffer = (void *) uvm_km_alloc(kernel_map, round_page(size), 0,
    304 	    UVM_KMF_PAGEABLE);
    305 	if (buffer == NULL)
    306 		return (ENOMEM);
    307 
    308 	/* free old resources if we're resizing */
    309 	pipe_free_kmem(pipe);
    310 	pipe->pipe_buffer.buffer = buffer;
    311 	pipe->pipe_buffer.size = size;
    312 	pipe->pipe_buffer.in = 0;
    313 	pipe->pipe_buffer.out = 0;
    314 	pipe->pipe_buffer.cnt = 0;
    315 	atomic_add_int(&amountpipekva, pipe->pipe_buffer.size);
    316 	return (0);
    317 }
    318 
    319 /*
    320  * Initialize and allocate VM and memory for pipe.
    321  */
    322 static int
    323 pipe_create(struct pipe **pipep, int allockva, struct pipe_mutex *mutex)
    324 {
    325 	struct pipe *pipe;
    326 	int error;
    327 
    328 	pipe = *pipep = pool_cache_get(pipe_cache, PR_WAITOK);
    329 	mutex->pm_refcnt++;
    330 
    331 	/* Initialize */
    332 	memset(pipe, 0, sizeof(struct pipe));
    333 	pipe->pipe_state = PIPE_SIGNALR;
    334 
    335 	getmicrotime(&pipe->pipe_ctime);
    336 	pipe->pipe_atime = pipe->pipe_ctime;
    337 	pipe->pipe_mtime = pipe->pipe_ctime;
    338 	pipe->pipe_lock = &mutex->pm_mutex;
    339 	cv_init(&pipe->pipe_rcv, "piperd");
    340 	cv_init(&pipe->pipe_wcv, "pipewr");
    341 	cv_init(&pipe->pipe_draincv, "pipedrain");
    342 	cv_init(&pipe->pipe_lkcv, "pipelk");
    343 	selinit(&pipe->pipe_sel);
    344 
    345 	if (allockva && (error = pipespace(pipe, PIPE_SIZE)))
    346 		return (error);
    347 
    348 	return (0);
    349 }
    350 
    351 
    352 /*
    353  * Lock a pipe for I/O, blocking other access
    354  * Called with pipe spin lock held.
    355  * Return with pipe spin lock released on success.
    356  */
    357 static int
    358 pipelock(struct pipe *pipe, int catch)
    359 {
    360 	int error;
    361 
    362 	KASSERT(mutex_owned(pipe->pipe_lock));
    363 
    364 	while (pipe->pipe_state & PIPE_LOCKFL) {
    365 		pipe->pipe_state |= PIPE_LWANT;
    366 		if (catch) {
    367 			error = cv_wait_sig(&pipe->pipe_lkcv, pipe->pipe_lock);
    368 			if (error != 0)
    369 				return error;
    370 		} else
    371 			cv_wait(&pipe->pipe_lkcv, pipe->pipe_lock);
    372 	}
    373 
    374 	pipe->pipe_state |= PIPE_LOCKFL;
    375 
    376 	return 0;
    377 }
    378 
    379 /*
    380  * unlock a pipe I/O lock
    381  */
    382 static inline void
    383 pipeunlock(struct pipe *pipe)
    384 {
    385 
    386 	KASSERT(pipe->pipe_state & PIPE_LOCKFL);
    387 
    388 	pipe->pipe_state &= ~PIPE_LOCKFL;
    389 	if (pipe->pipe_state & PIPE_LWANT) {
    390 		pipe->pipe_state &= ~PIPE_LWANT;
    391 		cv_broadcast(&pipe->pipe_lkcv);
    392 	}
    393 }
    394 
    395 /*
    396  * Select/poll wakup. This also sends SIGIO to peer connected to
    397  * 'sigpipe' side of pipe.
    398  */
    399 static void
    400 pipeselwakeup(struct pipe *selp, struct pipe *sigp, int code)
    401 {
    402 	int band;
    403 
    404 	switch (code) {
    405 	case POLL_IN:
    406 		band = POLLIN|POLLRDNORM;
    407 		break;
    408 	case POLL_OUT:
    409 		band = POLLOUT|POLLWRNORM;
    410 		break;
    411 	case POLL_HUP:
    412 		band = POLLHUP;
    413 		break;
    414 	case POLL_ERR:
    415 		band = POLLERR;
    416 		break;
    417 	default:
    418 		band = 0;
    419 #ifdef DIAGNOSTIC
    420 		printf("bad siginfo code %d in pipe notification.\n", code);
    421 #endif
    422 		break;
    423 	}
    424 
    425 	selnotify(&selp->pipe_sel, band, NOTE_SUBMIT);
    426 
    427 	if (sigp == NULL || (sigp->pipe_state & PIPE_ASYNC) == 0)
    428 		return;
    429 
    430 	fownsignal(sigp->pipe_pgid, SIGIO, code, band, selp);
    431 }
    432 
    433 /* ARGSUSED */
    434 static int
    435 pipe_read(struct file *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
    436     int flags)
    437 {
    438 	struct pipe *rpipe = (struct pipe *) fp->f_data;
    439 	struct pipebuf *bp = &rpipe->pipe_buffer;
    440 	kmutex_t *lock = rpipe->pipe_lock;
    441 	int error;
    442 	size_t nread = 0;
    443 	size_t size;
    444 	size_t ocnt;
    445 
    446 	mutex_enter(lock);
    447 	++rpipe->pipe_busy;
    448 	ocnt = bp->cnt;
    449 
    450 again:
    451 	error = pipelock(rpipe, 1);
    452 	if (error)
    453 		goto unlocked_error;
    454 
    455 	while (uio->uio_resid) {
    456 		/*
    457 		 * normal pipe buffer receive
    458 		 */
    459 		if (bp->cnt > 0) {
    460 			size = bp->size - bp->out;
    461 			if (size > bp->cnt)
    462 				size = bp->cnt;
    463 			if (size > uio->uio_resid)
    464 				size = uio->uio_resid;
    465 
    466 			mutex_exit(lock);
    467 			error = uiomove((char *)bp->buffer + bp->out, size, uio);
    468 			mutex_enter(lock);
    469 			if (error)
    470 				break;
    471 
    472 			bp->out += size;
    473 			if (bp->out >= bp->size)
    474 				bp->out = 0;
    475 
    476 			bp->cnt -= size;
    477 
    478 			/*
    479 			 * If there is no more to read in the pipe, reset
    480 			 * its pointers to the beginning.  This improves
    481 			 * cache hit stats.
    482 			 */
    483 			if (bp->cnt == 0) {
    484 				bp->in = 0;
    485 				bp->out = 0;
    486 			}
    487 			nread += size;
    488 			continue;
    489 		}
    490 
    491 #ifndef PIPE_NODIRECT
    492 		if ((rpipe->pipe_state & PIPE_DIRECTR) != 0) {
    493 			/*
    494 			 * Direct copy, bypassing a kernel buffer.
    495 			 */
    496 			void *	va;
    497 
    498 			KASSERT(rpipe->pipe_state & PIPE_DIRECTW);
    499 
    500 			size = rpipe->pipe_map.cnt;
    501 			if (size > uio->uio_resid)
    502 				size = uio->uio_resid;
    503 
    504 			va = (char *)rpipe->pipe_map.kva + rpipe->pipe_map.pos;
    505 			mutex_exit(lock);
    506 			error = uiomove(va, size, uio);
    507 			mutex_enter(lock);
    508 			if (error)
    509 				break;
    510 			nread += size;
    511 			rpipe->pipe_map.pos += size;
    512 			rpipe->pipe_map.cnt -= size;
    513 			if (rpipe->pipe_map.cnt == 0) {
    514 				rpipe->pipe_state &= ~PIPE_DIRECTR;
    515 				cv_broadcast(&rpipe->pipe_wcv);
    516 			}
    517 			continue;
    518 		}
    519 #endif
    520 		/*
    521 		 * Break if some data was read.
    522 		 */
    523 		if (nread > 0)
    524 			break;
    525 
    526 		/*
    527 		 * detect EOF condition
    528 		 * read returns 0 on EOF, no need to set error
    529 		 */
    530 		if (rpipe->pipe_state & PIPE_EOF)
    531 			break;
    532 
    533 		/*
    534 		 * don't block on non-blocking I/O
    535 		 */
    536 		if (fp->f_flag & FNONBLOCK) {
    537 			error = EAGAIN;
    538 			break;
    539 		}
    540 
    541 		/*
    542 		 * Unlock the pipe buffer for our remaining processing.
    543 		 * We will either break out with an error or we will
    544 		 * sleep and relock to loop.
    545 		 */
    546 		pipeunlock(rpipe);
    547 
    548 		/*
    549 		 * Re-check to see if more direct writes are pending.
    550 		 */
    551 		if ((rpipe->pipe_state & PIPE_DIRECTR) != 0)
    552 			goto again;
    553 
    554 		/*
    555 		 * We want to read more, wake up select/poll.
    556 		 */
    557 		pipeselwakeup(rpipe, rpipe->pipe_peer, POLL_IN);
    558 
    559 		/*
    560 		 * If the "write-side" is blocked, wake it up now.
    561 		 */
    562 		cv_broadcast(&rpipe->pipe_wcv);
    563 
    564 		/* Now wait until the pipe is filled */
    565 		error = cv_wait_sig(&rpipe->pipe_rcv, lock);
    566 		if (error != 0)
    567 			goto unlocked_error;
    568 		goto again;
    569 	}
    570 
    571 	if (error == 0)
    572 		getmicrotime(&rpipe->pipe_atime);
    573 	pipeunlock(rpipe);
    574 
    575 unlocked_error:
    576 	--rpipe->pipe_busy;
    577 	if (rpipe->pipe_busy == 0) {
    578 		cv_broadcast(&rpipe->pipe_draincv);
    579 	}
    580 	if (bp->cnt < MINPIPESIZE) {
    581 		cv_broadcast(&rpipe->pipe_wcv);
    582 	}
    583 
    584 	/*
    585 	 * If anything was read off the buffer, signal to the writer it's
    586 	 * possible to write more data. Also send signal if we are here for the
    587 	 * first time after last write.
    588 	 */
    589 	if ((bp->size - bp->cnt) >= PIPE_BUF
    590 	    && (ocnt != bp->cnt || (rpipe->pipe_state & PIPE_SIGNALR))) {
    591 		pipeselwakeup(rpipe, rpipe->pipe_peer, POLL_OUT);
    592 		rpipe->pipe_state &= ~PIPE_SIGNALR;
    593 	}
    594 
    595 	mutex_exit(lock);
    596 	return (error);
    597 }
    598 
    599 #ifndef PIPE_NODIRECT
    600 /*
    601  * Allocate structure for loan transfer.
    602  */
    603 static int
    604 pipe_loan_alloc(struct pipe *wpipe, int npages)
    605 {
    606 	vsize_t len;
    607 
    608 	len = (vsize_t)npages << PAGE_SHIFT;
    609 	atomic_add_int(&amountpipekva, len);
    610 	wpipe->pipe_map.kva = uvm_km_alloc(kernel_map, len, 0,
    611 	    UVM_KMF_VAONLY | UVM_KMF_WAITVA);
    612 	if (wpipe->pipe_map.kva == 0) {
    613 		atomic_add_int(&amountpipekva, -len);
    614 		return (ENOMEM);
    615 	}
    616 
    617 	wpipe->pipe_map.npages = npages;
    618 	wpipe->pipe_map.pgs = kmem_alloc(npages * sizeof(struct vm_page *),
    619 	    KM_SLEEP);
    620 	return (0);
    621 }
    622 
    623 /*
    624  * Free resources allocated for loan transfer.
    625  */
    626 static void
    627 pipe_loan_free(struct pipe *wpipe)
    628 {
    629 	vsize_t len;
    630 
    631 	len = (vsize_t)wpipe->pipe_map.npages << PAGE_SHIFT;
    632 	uvm_km_free(kernel_map, wpipe->pipe_map.kva, len, UVM_KMF_VAONLY);
    633 	wpipe->pipe_map.kva = 0;
    634 	atomic_add_int(&amountpipekva, -len);
    635 	kmem_free(wpipe->pipe_map.pgs,
    636 	    wpipe->pipe_map.npages * sizeof(struct vm_page *));
    637 	wpipe->pipe_map.pgs = NULL;
    638 }
    639 
    640 /*
    641  * NetBSD direct write, using uvm_loan() mechanism.
    642  * This implements the pipe buffer write mechanism.  Note that only
    643  * a direct write OR a normal pipe write can be pending at any given time.
    644  * If there are any characters in the pipe buffer, the direct write will
    645  * be deferred until the receiving process grabs all of the bytes from
    646  * the pipe buffer.  Then the direct mapping write is set-up.
    647  *
    648  * Called with the long-term pipe lock held.
    649  */
    650 static int
    651 pipe_direct_write(struct file *fp, struct pipe *wpipe, struct uio *uio)
    652 {
    653 	int error, npages, j;
    654 	struct vm_page **pgs;
    655 	vaddr_t bbase, kva, base, bend;
    656 	vsize_t blen, bcnt;
    657 	voff_t bpos;
    658 	kmutex_t *lock = wpipe->pipe_lock;
    659 
    660 	KASSERT(mutex_owned(wpipe->pipe_lock));
    661 	KASSERT(wpipe->pipe_map.cnt == 0);
    662 
    663 	mutex_exit(lock);
    664 
    665 	/*
    666 	 * Handle first PIPE_CHUNK_SIZE bytes of buffer. Deal with buffers
    667 	 * not aligned to PAGE_SIZE.
    668 	 */
    669 	bbase = (vaddr_t)uio->uio_iov->iov_base;
    670 	base = trunc_page(bbase);
    671 	bend = round_page(bbase + uio->uio_iov->iov_len);
    672 	blen = bend - base;
    673 	bpos = bbase - base;
    674 
    675 	if (blen > PIPE_DIRECT_CHUNK) {
    676 		blen = PIPE_DIRECT_CHUNK;
    677 		bend = base + blen;
    678 		bcnt = PIPE_DIRECT_CHUNK - bpos;
    679 	} else {
    680 		bcnt = uio->uio_iov->iov_len;
    681 	}
    682 	npages = blen >> PAGE_SHIFT;
    683 
    684 	/*
    685 	 * Free the old kva if we need more pages than we have
    686 	 * allocated.
    687 	 */
    688 	if (wpipe->pipe_map.kva != 0 && npages > wpipe->pipe_map.npages)
    689 		pipe_loan_free(wpipe);
    690 
    691 	/* Allocate new kva. */
    692 	if (wpipe->pipe_map.kva == 0) {
    693 		error = pipe_loan_alloc(wpipe, npages);
    694 		if (error) {
    695 			mutex_enter(lock);
    696 			return (error);
    697 		}
    698 	}
    699 
    700 	/* Loan the write buffer memory from writer process */
    701 	pgs = wpipe->pipe_map.pgs;
    702 	error = uvm_loan(&uio->uio_vmspace->vm_map, base, blen,
    703 			 pgs, UVM_LOAN_TOPAGE);
    704 	if (error) {
    705 		pipe_loan_free(wpipe);
    706 		mutex_enter(lock);
    707 		return (ENOMEM); /* so that caller fallback to ordinary write */
    708 	}
    709 
    710 	/* Enter the loaned pages to kva */
    711 	kva = wpipe->pipe_map.kva;
    712 	for (j = 0; j < npages; j++, kva += PAGE_SIZE) {
    713 		pmap_kenter_pa(kva, VM_PAGE_TO_PHYS(pgs[j]), VM_PROT_READ);
    714 	}
    715 	pmap_update(pmap_kernel());
    716 
    717 	/* Now we can put the pipe in direct write mode */
    718 	wpipe->pipe_map.pos = bpos;
    719 	wpipe->pipe_map.cnt = bcnt;
    720 
    721 	/*
    722 	 * But before we can let someone do a direct read, we
    723 	 * have to wait until the pipe is drained.  Release the
    724 	 * pipe lock while we wait.
    725 	 */
    726 	mutex_enter(lock);
    727 	wpipe->pipe_state |= PIPE_DIRECTW;
    728 	pipeunlock(wpipe);
    729 
    730 	while (error == 0 && wpipe->pipe_buffer.cnt > 0) {
    731 		cv_broadcast(&wpipe->pipe_rcv);
    732 		error = cv_wait_sig(&wpipe->pipe_wcv, lock);
    733 		if (error == 0 && wpipe->pipe_state & PIPE_EOF)
    734 			error = EPIPE;
    735 	}
    736 
    737 	/* Pipe is drained; next read will off the direct buffer */
    738 	wpipe->pipe_state |= PIPE_DIRECTR;
    739 
    740 	/* Wait until the reader is done */
    741 	while (error == 0 && (wpipe->pipe_state & PIPE_DIRECTR)) {
    742 		cv_broadcast(&wpipe->pipe_rcv);
    743 		pipeselwakeup(wpipe, wpipe, POLL_IN);
    744 		error = cv_wait_sig(&wpipe->pipe_wcv, lock);
    745 		if (error == 0 && wpipe->pipe_state & PIPE_EOF)
    746 			error = EPIPE;
    747 	}
    748 
    749 	/* Take pipe out of direct write mode */
    750 	wpipe->pipe_state &= ~(PIPE_DIRECTW | PIPE_DIRECTR);
    751 
    752 	/* Acquire the pipe lock and cleanup */
    753 	(void)pipelock(wpipe, 0);
    754 	mutex_exit(lock);
    755 
    756 	if (pgs != NULL) {
    757 		pmap_kremove(wpipe->pipe_map.kva, blen);
    758 		pmap_update(pmap_kernel());
    759 		uvm_unloan(pgs, npages, UVM_LOAN_TOPAGE);
    760 	}
    761 	if (error || amountpipekva > maxpipekva)
    762 		pipe_loan_free(wpipe);
    763 
    764 	mutex_enter(lock);
    765 	if (error) {
    766 		pipeselwakeup(wpipe, wpipe, POLL_ERR);
    767 
    768 		/*
    769 		 * If nothing was read from what we offered, return error
    770 		 * straight on. Otherwise update uio resid first. Caller
    771 		 * will deal with the error condition, returning short
    772 		 * write, error, or restarting the write(2) as appropriate.
    773 		 */
    774 		if (wpipe->pipe_map.cnt == bcnt) {
    775 			wpipe->pipe_map.cnt = 0;
    776 			cv_broadcast(&wpipe->pipe_wcv);
    777 			return (error);
    778 		}
    779 
    780 		bcnt -= wpipe->pipe_map.cnt;
    781 	}
    782 
    783 	uio->uio_resid -= bcnt;
    784 	/* uio_offset not updated, not set/used for write(2) */
    785 	uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + bcnt;
    786 	uio->uio_iov->iov_len -= bcnt;
    787 	if (uio->uio_iov->iov_len == 0) {
    788 		uio->uio_iov++;
    789 		uio->uio_iovcnt--;
    790 	}
    791 
    792 	wpipe->pipe_map.cnt = 0;
    793 	return (error);
    794 }
    795 #endif /* !PIPE_NODIRECT */
    796 
    797 static int
    798 pipe_write(struct file *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
    799     int flags)
    800 {
    801 	struct pipe *wpipe, *rpipe;
    802 	struct pipebuf *bp;
    803 	kmutex_t *lock;
    804 	int error;
    805 
    806 	/* We want to write to our peer */
    807 	rpipe = (struct pipe *) fp->f_data;
    808 	lock = rpipe->pipe_lock;
    809 	error = 0;
    810 
    811 	mutex_enter(lock);
    812 	wpipe = rpipe->pipe_peer;
    813 
    814 	/*
    815 	 * Detect loss of pipe read side, issue SIGPIPE if lost.
    816 	 */
    817 	if (wpipe == NULL || (wpipe->pipe_state & PIPE_EOF) != 0) {
    818 		mutex_exit(lock);
    819 		return EPIPE;
    820 	}
    821 	++wpipe->pipe_busy;
    822 
    823 	/* Aquire the long-term pipe lock */
    824 	if ((error = pipelock(wpipe, 1)) != 0) {
    825 		--wpipe->pipe_busy;
    826 		if (wpipe->pipe_busy == 0) {
    827 			cv_broadcast(&wpipe->pipe_draincv);
    828 		}
    829 		mutex_exit(lock);
    830 		return (error);
    831 	}
    832 
    833 	bp = &wpipe->pipe_buffer;
    834 
    835 	/*
    836 	 * If it is advantageous to resize the pipe buffer, do so.
    837 	 */
    838 	if ((uio->uio_resid > PIPE_SIZE) &&
    839 	    (nbigpipe < maxbigpipes) &&
    840 #ifndef PIPE_NODIRECT
    841 	    (wpipe->pipe_state & PIPE_DIRECTW) == 0 &&
    842 #endif
    843 	    (bp->size <= PIPE_SIZE) && (bp->cnt == 0)) {
    844 
    845 		if (pipespace(wpipe, BIG_PIPE_SIZE) == 0)
    846 			atomic_inc_uint(&nbigpipe);
    847 	}
    848 
    849 	while (uio->uio_resid) {
    850 		size_t space;
    851 
    852 #ifndef PIPE_NODIRECT
    853 		/*
    854 		 * Pipe buffered writes cannot be coincidental with
    855 		 * direct writes.  Also, only one direct write can be
    856 		 * in progress at any one time.  We wait until the currently
    857 		 * executing direct write is completed before continuing.
    858 		 *
    859 		 * We break out if a signal occurs or the reader goes away.
    860 		 */
    861 		while (error == 0 && wpipe->pipe_state & PIPE_DIRECTW) {
    862 			cv_broadcast(&wpipe->pipe_rcv);
    863 			pipeunlock(wpipe);
    864 			error = cv_wait_sig(&wpipe->pipe_wcv, lock);
    865 			(void)pipelock(wpipe, 0);
    866 			if (wpipe->pipe_state & PIPE_EOF)
    867 				error = EPIPE;
    868 		}
    869 		if (error)
    870 			break;
    871 
    872 		/*
    873 		 * If the transfer is large, we can gain performance if
    874 		 * we do process-to-process copies directly.
    875 		 * If the write is non-blocking, we don't use the
    876 		 * direct write mechanism.
    877 		 *
    878 		 * The direct write mechanism will detect the reader going
    879 		 * away on us.
    880 		 */
    881 		if ((uio->uio_iov->iov_len >= PIPE_MINDIRECT) &&
    882 		    (fp->f_flag & FNONBLOCK) == 0 &&
    883 		    (wpipe->pipe_map.kva || (amountpipekva < limitpipekva))) {
    884 			error = pipe_direct_write(fp, wpipe, uio);
    885 
    886 			/*
    887 			 * Break out if error occurred, unless it's ENOMEM.
    888 			 * ENOMEM means we failed to allocate some resources
    889 			 * for direct write, so we just fallback to ordinary
    890 			 * write. If the direct write was successful,
    891 			 * process rest of data via ordinary write.
    892 			 */
    893 			if (error == 0)
    894 				continue;
    895 
    896 			if (error != ENOMEM)
    897 				break;
    898 		}
    899 #endif /* PIPE_NODIRECT */
    900 
    901 		space = bp->size - bp->cnt;
    902 
    903 		/* Writes of size <= PIPE_BUF must be atomic. */
    904 		if ((space < uio->uio_resid) && (uio->uio_resid <= PIPE_BUF))
    905 			space = 0;
    906 
    907 		if (space > 0) {
    908 			int size;	/* Transfer size */
    909 			int segsize;	/* first segment to transfer */
    910 
    911 			/*
    912 			 * Transfer size is minimum of uio transfer
    913 			 * and free space in pipe buffer.
    914 			 */
    915 			if (space > uio->uio_resid)
    916 				size = uio->uio_resid;
    917 			else
    918 				size = space;
    919 			/*
    920 			 * First segment to transfer is minimum of
    921 			 * transfer size and contiguous space in
    922 			 * pipe buffer.  If first segment to transfer
    923 			 * is less than the transfer size, we've got
    924 			 * a wraparound in the buffer.
    925 			 */
    926 			segsize = bp->size - bp->in;
    927 			if (segsize > size)
    928 				segsize = size;
    929 
    930 			/* Transfer first segment */
    931 			mutex_exit(lock);
    932 			error = uiomove((char *)bp->buffer + bp->in, segsize,
    933 			    uio);
    934 
    935 			if (error == 0 && segsize < size) {
    936 				/*
    937 				 * Transfer remaining part now, to
    938 				 * support atomic writes.  Wraparound
    939 				 * happened.
    940 				 */
    941 #ifdef DEBUG
    942 				if (bp->in + segsize != bp->size)
    943 					panic("Expected pipe buffer wraparound disappeared");
    944 #endif
    945 
    946 				error = uiomove(bp->buffer,
    947 				    size - segsize, uio);
    948 			}
    949 			mutex_enter(lock);
    950 			if (error)
    951 				break;
    952 
    953 			bp->in += size;
    954 			if (bp->in >= bp->size) {
    955 #ifdef DEBUG
    956 				if (bp->in != size - segsize + bp->size)
    957 					panic("Expected wraparound bad");
    958 #endif
    959 				bp->in = size - segsize;
    960 			}
    961 
    962 			bp->cnt += size;
    963 #ifdef DEBUG
    964 			if (bp->cnt > bp->size)
    965 				panic("Pipe buffer overflow");
    966 #endif
    967 		} else {
    968 			/*
    969 			 * If the "read-side" has been blocked, wake it up now.
    970 			 */
    971 			cv_broadcast(&wpipe->pipe_rcv);
    972 
    973 			/*
    974 			 * don't block on non-blocking I/O
    975 			 */
    976 			if (fp->f_flag & FNONBLOCK) {
    977 				error = EAGAIN;
    978 				break;
    979 			}
    980 
    981 			/*
    982 			 * We have no more space and have something to offer,
    983 			 * wake up select/poll.
    984 			 */
    985 			if (bp->cnt)
    986 				pipeselwakeup(wpipe, wpipe, POLL_OUT);
    987 
    988 			pipeunlock(wpipe);
    989 			error = cv_wait_sig(&wpipe->pipe_wcv, lock);
    990 			(void)pipelock(wpipe, 0);
    991 			if (error != 0)
    992 				break;
    993 			/*
    994 			 * If read side wants to go away, we just issue a signal
    995 			 * to ourselves.
    996 			 */
    997 			if (wpipe->pipe_state & PIPE_EOF) {
    998 				error = EPIPE;
    999 				break;
   1000 			}
   1001 		}
   1002 	}
   1003 
   1004 	--wpipe->pipe_busy;
   1005 	if (wpipe->pipe_busy == 0) {
   1006 		cv_broadcast(&wpipe->pipe_draincv);
   1007 	}
   1008 	if (bp->cnt > 0) {
   1009 		cv_broadcast(&wpipe->pipe_rcv);
   1010 	}
   1011 
   1012 	/*
   1013 	 * Don't return EPIPE if I/O was successful
   1014 	 */
   1015 	if (error == EPIPE && bp->cnt == 0 && uio->uio_resid == 0)
   1016 		error = 0;
   1017 
   1018 	if (error == 0)
   1019 		getmicrotime(&wpipe->pipe_mtime);
   1020 
   1021 	/*
   1022 	 * We have something to offer, wake up select/poll.
   1023 	 * wpipe->pipe_map.cnt is always 0 in this point (direct write
   1024 	 * is only done synchronously), so check only wpipe->pipe_buffer.cnt
   1025 	 */
   1026 	if (bp->cnt)
   1027 		pipeselwakeup(wpipe, wpipe, POLL_OUT);
   1028 
   1029 	/*
   1030 	 * Arrange for next read(2) to do a signal.
   1031 	 */
   1032 	wpipe->pipe_state |= PIPE_SIGNALR;
   1033 
   1034 	pipeunlock(wpipe);
   1035 	mutex_exit(lock);
   1036 	return (error);
   1037 }
   1038 
   1039 /*
   1040  * we implement a very minimal set of ioctls for compatibility with sockets.
   1041  */
   1042 int
   1043 pipe_ioctl(struct file *fp, u_long cmd, void *data)
   1044 {
   1045 	struct pipe *pipe = fp->f_data;
   1046 	kmutex_t *lock = pipe->pipe_lock;
   1047 
   1048 	switch (cmd) {
   1049 
   1050 	case FIONBIO:
   1051 		return (0);
   1052 
   1053 	case FIOASYNC:
   1054 		mutex_enter(lock);
   1055 		if (*(int *)data) {
   1056 			pipe->pipe_state |= PIPE_ASYNC;
   1057 		} else {
   1058 			pipe->pipe_state &= ~PIPE_ASYNC;
   1059 		}
   1060 		mutex_exit(lock);
   1061 		return (0);
   1062 
   1063 	case FIONREAD:
   1064 		mutex_enter(lock);
   1065 #ifndef PIPE_NODIRECT
   1066 		if (pipe->pipe_state & PIPE_DIRECTW)
   1067 			*(int *)data = pipe->pipe_map.cnt;
   1068 		else
   1069 #endif
   1070 			*(int *)data = pipe->pipe_buffer.cnt;
   1071 		mutex_exit(lock);
   1072 		return (0);
   1073 
   1074 	case FIONWRITE:
   1075 		/* Look at other side */
   1076 		pipe = pipe->pipe_peer;
   1077 		mutex_enter(lock);
   1078 #ifndef PIPE_NODIRECT
   1079 		if (pipe->pipe_state & PIPE_DIRECTW)
   1080 			*(int *)data = pipe->pipe_map.cnt;
   1081 		else
   1082 #endif
   1083 			*(int *)data = pipe->pipe_buffer.cnt;
   1084 		mutex_exit(lock);
   1085 		return (0);
   1086 
   1087 	case FIONSPACE:
   1088 		/* Look at other side */
   1089 		pipe = pipe->pipe_peer;
   1090 		mutex_enter(lock);
   1091 #ifndef PIPE_NODIRECT
   1092 		/*
   1093 		 * If we're in direct-mode, we don't really have a
   1094 		 * send queue, and any other write will block. Thus
   1095 		 * zero seems like the best answer.
   1096 		 */
   1097 		if (pipe->pipe_state & PIPE_DIRECTW)
   1098 			*(int *)data = 0;
   1099 		else
   1100 #endif
   1101 			*(int *)data = pipe->pipe_buffer.size -
   1102 			    pipe->pipe_buffer.cnt;
   1103 		mutex_exit(lock);
   1104 		return (0);
   1105 
   1106 	case TIOCSPGRP:
   1107 	case FIOSETOWN:
   1108 		return fsetown(&pipe->pipe_pgid, cmd, data);
   1109 
   1110 	case TIOCGPGRP:
   1111 	case FIOGETOWN:
   1112 		return fgetown(pipe->pipe_pgid, cmd, data);
   1113 
   1114 	}
   1115 	return (EPASSTHROUGH);
   1116 }
   1117 
   1118 int
   1119 pipe_poll(struct file *fp, int events)
   1120 {
   1121 	struct pipe *rpipe = fp->f_data;
   1122 	struct pipe *wpipe;
   1123 	int eof = 0;
   1124 	int revents = 0;
   1125 
   1126 	mutex_enter(rpipe->pipe_lock);
   1127 	wpipe = rpipe->pipe_peer;
   1128 
   1129 	if (events & (POLLIN | POLLRDNORM))
   1130 		if ((rpipe->pipe_buffer.cnt > 0) ||
   1131 #ifndef PIPE_NODIRECT
   1132 		    (rpipe->pipe_state & PIPE_DIRECTR) ||
   1133 #endif
   1134 		    (rpipe->pipe_state & PIPE_EOF))
   1135 			revents |= events & (POLLIN | POLLRDNORM);
   1136 
   1137 	eof |= (rpipe->pipe_state & PIPE_EOF);
   1138 
   1139 	if (wpipe == NULL)
   1140 		revents |= events & (POLLOUT | POLLWRNORM);
   1141 	else {
   1142 		if (events & (POLLOUT | POLLWRNORM))
   1143 			if ((wpipe->pipe_state & PIPE_EOF) || (
   1144 #ifndef PIPE_NODIRECT
   1145 			     (wpipe->pipe_state & PIPE_DIRECTW) == 0 &&
   1146 #endif
   1147 			     (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF))
   1148 				revents |= events & (POLLOUT | POLLWRNORM);
   1149 
   1150 		eof |= (wpipe->pipe_state & PIPE_EOF);
   1151 	}
   1152 
   1153 	if (wpipe == NULL || eof)
   1154 		revents |= POLLHUP;
   1155 
   1156 	if (revents == 0) {
   1157 		if (events & (POLLIN | POLLRDNORM))
   1158 			selrecord(curlwp, &rpipe->pipe_sel);
   1159 
   1160 		if (events & (POLLOUT | POLLWRNORM))
   1161 			selrecord(curlwp, &wpipe->pipe_sel);
   1162 	}
   1163 	mutex_exit(rpipe->pipe_lock);
   1164 
   1165 	return (revents);
   1166 }
   1167 
   1168 static int
   1169 pipe_stat(struct file *fp, struct stat *ub)
   1170 {
   1171 	struct pipe *pipe = fp->f_data;
   1172 
   1173 	memset((void *)ub, 0, sizeof(*ub));
   1174 	ub->st_mode = S_IFIFO | S_IRUSR | S_IWUSR;
   1175 	ub->st_blksize = pipe->pipe_buffer.size;
   1176 	if (ub->st_blksize == 0 && pipe->pipe_peer)
   1177 		ub->st_blksize = pipe->pipe_peer->pipe_buffer.size;
   1178 	ub->st_size = pipe->pipe_buffer.cnt;
   1179 	ub->st_blocks = (ub->st_size) ? 1 : 0;
   1180 	TIMEVAL_TO_TIMESPEC(&pipe->pipe_atime, &ub->st_atimespec);
   1181 	TIMEVAL_TO_TIMESPEC(&pipe->pipe_mtime, &ub->st_mtimespec);
   1182 	TIMEVAL_TO_TIMESPEC(&pipe->pipe_ctime, &ub->st_ctimespec);
   1183 	ub->st_uid = kauth_cred_geteuid(fp->f_cred);
   1184 	ub->st_gid = kauth_cred_getegid(fp->f_cred);
   1185 
   1186 	/*
   1187 	 * Left as 0: st_dev, st_ino, st_nlink, st_rdev, st_flags, st_gen.
   1188 	 * XXX (st_dev, st_ino) should be unique.
   1189 	 */
   1190 	return (0);
   1191 }
   1192 
   1193 /* ARGSUSED */
   1194 static int
   1195 pipe_close(struct file *fp)
   1196 {
   1197 	struct pipe *pipe = fp->f_data;
   1198 
   1199 	fp->f_data = NULL;
   1200 	pipeclose(fp, pipe);
   1201 	return (0);
   1202 }
   1203 
   1204 static void
   1205 pipe_free_kmem(struct pipe *pipe)
   1206 {
   1207 
   1208 	if (pipe->pipe_buffer.buffer != NULL) {
   1209 		if (pipe->pipe_buffer.size > PIPE_SIZE)
   1210 			atomic_dec_uint(&nbigpipe);
   1211 		uvm_km_free(kernel_map,
   1212 			(vaddr_t)pipe->pipe_buffer.buffer,
   1213 			pipe->pipe_buffer.size, UVM_KMF_PAGEABLE);
   1214 		atomic_add_int(&amountpipekva, -pipe->pipe_buffer.size);
   1215 		pipe->pipe_buffer.buffer = NULL;
   1216 	}
   1217 #ifndef PIPE_NODIRECT
   1218 	if (pipe->pipe_map.kva != 0) {
   1219 		pipe_loan_free(pipe);
   1220 		pipe->pipe_map.cnt = 0;
   1221 		pipe->pipe_map.kva = 0;
   1222 		pipe->pipe_map.pos = 0;
   1223 		pipe->pipe_map.npages = 0;
   1224 	}
   1225 #endif /* !PIPE_NODIRECT */
   1226 }
   1227 
   1228 /*
   1229  * shutdown the pipe
   1230  */
   1231 static void
   1232 pipeclose(struct file *fp, struct pipe *pipe)
   1233 {
   1234 	struct pipe_mutex *mutex;
   1235 	kmutex_t *lock;
   1236 	struct pipe *ppipe;
   1237 	u_int refcnt;
   1238 
   1239 	if (pipe == NULL)
   1240 		return;
   1241 
   1242 	KASSERT(cv_is_valid(&pipe->pipe_rcv));
   1243 	KASSERT(cv_is_valid(&pipe->pipe_wcv));
   1244 	KASSERT(cv_is_valid(&pipe->pipe_draincv));
   1245 	KASSERT(cv_is_valid(&pipe->pipe_lkcv));
   1246 
   1247 	lock = pipe->pipe_lock;
   1248 	mutex_enter(lock);
   1249 	pipeselwakeup(pipe, pipe, POLL_HUP);
   1250 
   1251 	/*
   1252 	 * If the other side is blocked, wake it up saying that
   1253 	 * we want to close it down.
   1254 	 */
   1255 	pipe->pipe_state |= PIPE_EOF;
   1256 	if (pipe->pipe_busy) {
   1257 		while (pipe->pipe_busy) {
   1258 			cv_broadcast(&pipe->pipe_wcv);
   1259 			cv_wait_sig(&pipe->pipe_draincv, lock);
   1260 		}
   1261 	}
   1262 
   1263 	/*
   1264 	 * Disconnect from peer
   1265 	 */
   1266 	if ((ppipe = pipe->pipe_peer) != NULL) {
   1267 		pipeselwakeup(ppipe, ppipe, POLL_HUP);
   1268 		ppipe->pipe_state |= PIPE_EOF;
   1269 		cv_broadcast(&ppipe->pipe_rcv);
   1270 		ppipe->pipe_peer = NULL;
   1271 	}
   1272 
   1273 	KASSERT((pipe->pipe_state & PIPE_LOCKFL) == 0);
   1274 
   1275 	mutex = (struct pipe_mutex *)lock;
   1276 	refcnt = --(mutex->pm_refcnt);
   1277 	KASSERT(refcnt == 0 || refcnt == 1);
   1278 	mutex_exit(lock);
   1279 
   1280 	/*
   1281 	 * free resources
   1282 	 */
   1283 	pipe_free_kmem(pipe);
   1284 	cv_destroy(&pipe->pipe_rcv);
   1285 	cv_destroy(&pipe->pipe_wcv);
   1286 	cv_destroy(&pipe->pipe_draincv);
   1287 	cv_destroy(&pipe->pipe_lkcv);
   1288 	seldestroy(&pipe->pipe_sel);
   1289 	pool_cache_put(pipe_cache, pipe);
   1290 	if (refcnt == 0)
   1291 		pool_cache_put(pipe_mutex_cache, mutex);
   1292 }
   1293 
   1294 static void
   1295 filt_pipedetach(struct knote *kn)
   1296 {
   1297 	struct pipe *pipe;
   1298 	kmutex_t *lock;
   1299 
   1300 	pipe = ((file_t *)kn->kn_obj)->f_data;
   1301 	lock = pipe->pipe_lock;
   1302 
   1303 	mutex_enter(lock);
   1304 
   1305 	switch(kn->kn_filter) {
   1306 	case EVFILT_WRITE:
   1307 		/* need the peer structure, not our own */
   1308 		pipe = pipe->pipe_peer;
   1309 
   1310 		/* if reader end already closed, just return */
   1311 		if (pipe == NULL) {
   1312 			mutex_exit(lock);
   1313 			return;
   1314 		}
   1315 
   1316 		break;
   1317 	default:
   1318 		/* nothing to do */
   1319 		break;
   1320 	}
   1321 
   1322 #ifdef DIAGNOSTIC
   1323 	if (kn->kn_hook != pipe)
   1324 		panic("filt_pipedetach: inconsistent knote");
   1325 #endif
   1326 
   1327 	SLIST_REMOVE(&pipe->pipe_sel.sel_klist, kn, knote, kn_selnext);
   1328 	mutex_exit(lock);
   1329 }
   1330 
   1331 /*ARGSUSED*/
   1332 static int
   1333 filt_piperead(struct knote *kn, long hint)
   1334 {
   1335 	struct pipe *rpipe = ((file_t *)kn->kn_obj)->f_data;
   1336 	struct pipe *wpipe;
   1337 
   1338 	if ((hint & NOTE_SUBMIT) == 0) {
   1339 		mutex_enter(rpipe->pipe_lock);
   1340 	}
   1341 	wpipe = rpipe->pipe_peer;
   1342 	kn->kn_data = rpipe->pipe_buffer.cnt;
   1343 
   1344 	if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW))
   1345 		kn->kn_data = rpipe->pipe_map.cnt;
   1346 
   1347 	if ((rpipe->pipe_state & PIPE_EOF) ||
   1348 	    (wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
   1349 		kn->kn_flags |= EV_EOF;
   1350 		if ((hint & NOTE_SUBMIT) == 0) {
   1351 			mutex_exit(rpipe->pipe_lock);
   1352 		}
   1353 		return (1);
   1354 	}
   1355 
   1356 	if ((hint & NOTE_SUBMIT) == 0) {
   1357 		mutex_exit(rpipe->pipe_lock);
   1358 	}
   1359 	return (kn->kn_data > 0);
   1360 }
   1361 
   1362 /*ARGSUSED*/
   1363 static int
   1364 filt_pipewrite(struct knote *kn, long hint)
   1365 {
   1366 	struct pipe *rpipe = ((file_t *)kn->kn_obj)->f_data;
   1367 	struct pipe *wpipe;
   1368 
   1369 	if ((hint & NOTE_SUBMIT) == 0) {
   1370 		mutex_enter(rpipe->pipe_lock);
   1371 	}
   1372 	wpipe = rpipe->pipe_peer;
   1373 
   1374 	if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
   1375 		kn->kn_data = 0;
   1376 		kn->kn_flags |= EV_EOF;
   1377 		if ((hint & NOTE_SUBMIT) == 0) {
   1378 			mutex_exit(rpipe->pipe_lock);
   1379 		}
   1380 		return (1);
   1381 	}
   1382 	kn->kn_data = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
   1383 	if (wpipe->pipe_state & PIPE_DIRECTW)
   1384 		kn->kn_data = 0;
   1385 
   1386 	if ((hint & NOTE_SUBMIT) == 0) {
   1387 		mutex_exit(rpipe->pipe_lock);
   1388 	}
   1389 	return (kn->kn_data >= PIPE_BUF);
   1390 }
   1391 
   1392 static const struct filterops pipe_rfiltops =
   1393 	{ 1, NULL, filt_pipedetach, filt_piperead };
   1394 static const struct filterops pipe_wfiltops =
   1395 	{ 1, NULL, filt_pipedetach, filt_pipewrite };
   1396 
   1397 /*ARGSUSED*/
   1398 static int
   1399 pipe_kqfilter(struct file *fp, struct knote *kn)
   1400 {
   1401 	struct pipe *pipe;
   1402 	kmutex_t *lock;
   1403 
   1404 	pipe = ((file_t *)kn->kn_obj)->f_data;
   1405 	lock = pipe->pipe_lock;
   1406 
   1407 	mutex_enter(lock);
   1408 
   1409 	switch (kn->kn_filter) {
   1410 	case EVFILT_READ:
   1411 		kn->kn_fop = &pipe_rfiltops;
   1412 		break;
   1413 	case EVFILT_WRITE:
   1414 		kn->kn_fop = &pipe_wfiltops;
   1415 		pipe = pipe->pipe_peer;
   1416 		if (pipe == NULL) {
   1417 			/* other end of pipe has been closed */
   1418 			mutex_exit(lock);
   1419 			return (EBADF);
   1420 		}
   1421 		break;
   1422 	default:
   1423 		mutex_exit(lock);
   1424 		return (EINVAL);
   1425 	}
   1426 
   1427 	kn->kn_hook = pipe;
   1428 	SLIST_INSERT_HEAD(&pipe->pipe_sel.sel_klist, kn, kn_selnext);
   1429 	mutex_exit(lock);
   1430 
   1431 	return (0);
   1432 }
   1433 
   1434 /*
   1435  * Handle pipe sysctls.
   1436  */
   1437 SYSCTL_SETUP(sysctl_kern_pipe_setup, "sysctl kern.pipe subtree setup")
   1438 {
   1439 
   1440 	sysctl_createv(clog, 0, NULL, NULL,
   1441 		       CTLFLAG_PERMANENT,
   1442 		       CTLTYPE_NODE, "kern", NULL,
   1443 		       NULL, 0, NULL, 0,
   1444 		       CTL_KERN, CTL_EOL);
   1445 	sysctl_createv(clog, 0, NULL, NULL,
   1446 		       CTLFLAG_PERMANENT,
   1447 		       CTLTYPE_NODE, "pipe",
   1448 		       SYSCTL_DESCR("Pipe settings"),
   1449 		       NULL, 0, NULL, 0,
   1450 		       CTL_KERN, KERN_PIPE, CTL_EOL);
   1451 
   1452 	sysctl_createv(clog, 0, NULL, NULL,
   1453 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
   1454 		       CTLTYPE_INT, "maxkvasz",
   1455 		       SYSCTL_DESCR("Maximum amount of kernel memory to be "
   1456 				    "used for pipes"),
   1457 		       NULL, 0, &maxpipekva, 0,
   1458 		       CTL_KERN, KERN_PIPE, KERN_PIPE_MAXKVASZ, CTL_EOL);
   1459 	sysctl_createv(clog, 0, NULL, NULL,
   1460 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
   1461 		       CTLTYPE_INT, "maxloankvasz",
   1462 		       SYSCTL_DESCR("Limit for direct transfers via page loan"),
   1463 		       NULL, 0, &limitpipekva, 0,
   1464 		       CTL_KERN, KERN_PIPE, KERN_PIPE_LIMITKVA, CTL_EOL);
   1465 	sysctl_createv(clog, 0, NULL, NULL,
   1466 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
   1467 		       CTLTYPE_INT, "maxbigpipes",
   1468 		       SYSCTL_DESCR("Maximum number of \"big\" pipes"),
   1469 		       NULL, 0, &maxbigpipes, 0,
   1470 		       CTL_KERN, KERN_PIPE, KERN_PIPE_MAXBIGPIPES, CTL_EOL);
   1471 	sysctl_createv(clog, 0, NULL, NULL,
   1472 		       CTLFLAG_PERMANENT,
   1473 		       CTLTYPE_INT, "nbigpipes",
   1474 		       SYSCTL_DESCR("Number of \"big\" pipes"),
   1475 		       NULL, 0, &nbigpipe, 0,
   1476 		       CTL_KERN, KERN_PIPE, KERN_PIPE_NBIGPIPES, CTL_EOL);
   1477 	sysctl_createv(clog, 0, NULL, NULL,
   1478 		       CTLFLAG_PERMANENT,
   1479 		       CTLTYPE_INT, "kvasize",
   1480 		       SYSCTL_DESCR("Amount of kernel memory consumed by pipe "
   1481 				    "buffers"),
   1482 		       NULL, 0, &amountpipekva, 0,
   1483 		       CTL_KERN, KERN_PIPE, KERN_PIPE_KVASIZE, CTL_EOL);
   1484 }
   1485