sys_pipe.c revision 1.121 1 /* $NetBSD: sys_pipe.c,v 1.121 2009/12/09 21:32:59 dsl Exp $ */
2
3 /*-
4 * Copyright (c) 2003, 2007, 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 1996 John S. Dyson
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice immediately at the beginning of the file, without modification,
41 * this list of conditions, and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. Absolutely no warranty of function or purpose is made by the author
46 * John S. Dyson.
47 * 4. Modifications may be freely made to this file if the above conditions
48 * are met.
49 */
50
51 /*
52 * This file contains a high-performance replacement for the socket-based
53 * pipes scheme originally used. It does not support all features of
54 * sockets, but does do everything that pipes normally do.
55 *
56 * This code has two modes of operation, a small write mode and a large
57 * write mode. The small write mode acts like conventional pipes with
58 * a kernel buffer. If the buffer is less than PIPE_MINDIRECT, then the
59 * "normal" pipe buffering is done. If the buffer is between PIPE_MINDIRECT
60 * and PIPE_SIZE in size it is mapped read-only into the kernel address space
61 * using the UVM page loan facility from where the receiving process can copy
62 * the data directly from the pages in the sending process.
63 *
64 * The constant PIPE_MINDIRECT is chosen to make sure that buffering will
65 * happen for small transfers so that the system will not spend all of
66 * its time context switching. PIPE_SIZE is constrained by the
67 * amount of kernel virtual memory.
68 */
69
70 #include <sys/cdefs.h>
71 __KERNEL_RCSID(0, "$NetBSD: sys_pipe.c,v 1.121 2009/12/09 21:32:59 dsl Exp $");
72
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/proc.h>
76 #include <sys/fcntl.h>
77 #include <sys/file.h>
78 #include <sys/filedesc.h>
79 #include <sys/filio.h>
80 #include <sys/kernel.h>
81 #include <sys/ttycom.h>
82 #include <sys/stat.h>
83 #include <sys/poll.h>
84 #include <sys/signalvar.h>
85 #include <sys/vnode.h>
86 #include <sys/uio.h>
87 #include <sys/select.h>
88 #include <sys/mount.h>
89 #include <sys/syscallargs.h>
90 #include <sys/sysctl.h>
91 #include <sys/kauth.h>
92 #include <sys/atomic.h>
93 #include <sys/pipe.h>
94
95 #include <uvm/uvm.h>
96
97 /*
98 * Use this to disable direct I/O and decrease the code size:
99 * #define PIPE_NODIRECT
100 */
101
102 /* XXX Disabled for now; rare hangs switching between direct/buffered */
103 #define PIPE_NODIRECT
104
105 static int pipe_read(file_t *, off_t *, struct uio *, kauth_cred_t, int);
106 static int pipe_write(file_t *, off_t *, struct uio *, kauth_cred_t, int);
107 static int pipe_close(file_t *);
108 static int pipe_poll(file_t *, int);
109 static int pipe_kqfilter(file_t *, struct knote *);
110 static int pipe_stat(file_t *, struct stat *);
111 static int pipe_ioctl(file_t *, u_long, void *);
112
113 static const struct fileops pipeops = {
114 .fo_read = pipe_read,
115 .fo_write = pipe_write,
116 .fo_ioctl = pipe_ioctl,
117 .fo_fcntl = fnullop_fcntl,
118 .fo_poll = pipe_poll,
119 .fo_stat = pipe_stat,
120 .fo_close = pipe_close,
121 .fo_kqfilter = pipe_kqfilter,
122 .fo_abort = fnullop_abort,
123 };
124
125 /*
126 * Default pipe buffer size(s), this can be kind-of large now because pipe
127 * space is pageable. The pipe code will try to maintain locality of
128 * reference for performance reasons, so small amounts of outstanding I/O
129 * will not wipe the cache.
130 */
131 #define MINPIPESIZE (PIPE_SIZE / 3)
132 #define MAXPIPESIZE (2 * PIPE_SIZE / 3)
133
134 /*
135 * Maximum amount of kva for pipes -- this is kind-of a soft limit, but
136 * is there so that on large systems, we don't exhaust it.
137 */
138 #define MAXPIPEKVA (8 * 1024 * 1024)
139 static u_int maxpipekva = MAXPIPEKVA;
140
141 /*
142 * Limit for direct transfers, we cannot, of course limit
143 * the amount of kva for pipes in general though.
144 */
145 #define LIMITPIPEKVA (16 * 1024 * 1024)
146 static u_int limitpipekva = LIMITPIPEKVA;
147
148 /*
149 * Limit the number of "big" pipes
150 */
151 #define LIMITBIGPIPES 32
152 static u_int maxbigpipes = LIMITBIGPIPES;
153 static u_int nbigpipe = 0;
154
155 /*
156 * Amount of KVA consumed by pipe buffers.
157 */
158 static u_int amountpipekva = 0;
159
160 static void pipeclose(file_t *, struct pipe *);
161 static void pipe_free_kmem(struct pipe *);
162 static int pipe_create(struct pipe **, pool_cache_t, kmutex_t *);
163 static int pipelock(struct pipe *, int);
164 static inline void pipeunlock(struct pipe *);
165 static void pipeselwakeup(struct pipe *, struct pipe *, int);
166 #ifndef PIPE_NODIRECT
167 static int pipe_direct_write(file_t *, struct pipe *, struct uio *);
168 #endif
169 static int pipespace(struct pipe *, int);
170 static int pipe_ctor(void *, void *, int);
171 static void pipe_dtor(void *, void *);
172
173 #ifndef PIPE_NODIRECT
174 static int pipe_loan_alloc(struct pipe *, int);
175 static void pipe_loan_free(struct pipe *);
176 #endif /* PIPE_NODIRECT */
177
178 static pool_cache_t pipe_wr_cache;
179 static pool_cache_t pipe_rd_cache;
180
181 void
182 pipe_init(void)
183 {
184
185 /* Writer side is not automatically allocated KVA. */
186 pipe_wr_cache = pool_cache_init(sizeof(struct pipe), 0, 0, 0, "pipewr",
187 NULL, IPL_NONE, pipe_ctor, pipe_dtor, NULL);
188 KASSERT(pipe_wr_cache != NULL);
189
190 /* Reader side gets preallocated KVA. */
191 pipe_rd_cache = pool_cache_init(sizeof(struct pipe), 0, 0, 0, "piperd",
192 NULL, IPL_NONE, pipe_ctor, pipe_dtor, (void *)1);
193 KASSERT(pipe_rd_cache != NULL);
194 }
195
196 static int
197 pipe_ctor(void *arg, void *obj, int flags)
198 {
199 struct pipe *pipe;
200 vaddr_t va;
201
202 pipe = obj;
203
204 memset(pipe, 0, sizeof(struct pipe));
205 if (arg != NULL) {
206 /* Preallocate space. */
207 va = uvm_km_alloc(kernel_map, PIPE_SIZE, 0,
208 UVM_KMF_PAGEABLE | UVM_KMF_WAITVA);
209 KASSERT(va != 0);
210 pipe->pipe_kmem = va;
211 atomic_add_int(&amountpipekva, PIPE_SIZE);
212 }
213 cv_init(&pipe->pipe_rcv, "piperd");
214 cv_init(&pipe->pipe_wcv, "pipewr");
215 cv_init(&pipe->pipe_draincv, "pipedrain");
216 cv_init(&pipe->pipe_lkcv, "pipelk");
217 selinit(&pipe->pipe_sel);
218 pipe->pipe_state = PIPE_SIGNALR;
219
220 return 0;
221 }
222
223 static void
224 pipe_dtor(void *arg, void *obj)
225 {
226 struct pipe *pipe;
227
228 pipe = obj;
229
230 cv_destroy(&pipe->pipe_rcv);
231 cv_destroy(&pipe->pipe_wcv);
232 cv_destroy(&pipe->pipe_draincv);
233 cv_destroy(&pipe->pipe_lkcv);
234 seldestroy(&pipe->pipe_sel);
235 if (pipe->pipe_kmem != 0) {
236 uvm_km_free(kernel_map, pipe->pipe_kmem, PIPE_SIZE,
237 UVM_KMF_PAGEABLE);
238 atomic_add_int(&amountpipekva, -PIPE_SIZE);
239 }
240 }
241
242 /*
243 * The pipe system call for the DTYPE_PIPE type of pipes
244 */
245 int
246 sys_pipe(struct lwp *l, const void *v, register_t *retval)
247 {
248 struct pipe *rpipe, *wpipe;
249 file_t *rf, *wf;
250 kmutex_t *mutex;
251 int fd, error;
252 proc_t *p;
253
254 p = curproc;
255 rpipe = wpipe = NULL;
256 mutex = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
257 if (mutex == NULL)
258 return (ENOMEM);
259 mutex_obj_hold(mutex);
260 if (pipe_create(&rpipe, pipe_rd_cache, mutex) ||
261 pipe_create(&wpipe, pipe_wr_cache, mutex)) {
262 pipeclose(NULL, rpipe);
263 pipeclose(NULL, wpipe);
264 return (ENFILE);
265 }
266
267 error = fd_allocfile(&rf, &fd);
268 if (error)
269 goto free2;
270 retval[0] = fd;
271 rf->f_flag = FREAD;
272 rf->f_type = DTYPE_PIPE;
273 rf->f_data = (void *)rpipe;
274 rf->f_ops = &pipeops;
275
276 error = fd_allocfile(&wf, &fd);
277 if (error)
278 goto free3;
279 retval[1] = fd;
280 wf->f_flag = FWRITE;
281 wf->f_type = DTYPE_PIPE;
282 wf->f_data = (void *)wpipe;
283 wf->f_ops = &pipeops;
284
285 rpipe->pipe_peer = wpipe;
286 wpipe->pipe_peer = rpipe;
287
288 fd_affix(p, rf, (int)retval[0]);
289 fd_affix(p, wf, (int)retval[1]);
290 return (0);
291 free3:
292 fd_abort(p, rf, (int)retval[0]);
293 free2:
294 pipeclose(NULL, wpipe);
295 pipeclose(NULL, rpipe);
296
297 return (error);
298 }
299
300 /*
301 * Allocate kva for pipe circular buffer, the space is pageable
302 * This routine will 'realloc' the size of a pipe safely, if it fails
303 * it will retain the old buffer.
304 * If it fails it will return ENOMEM.
305 */
306 static int
307 pipespace(struct pipe *pipe, int size)
308 {
309 void *buffer;
310
311 /*
312 * Allocate pageable virtual address space. Physical memory is
313 * allocated on demand.
314 */
315 if (size == PIPE_SIZE && pipe->pipe_kmem != 0) {
316 buffer = (void *)pipe->pipe_kmem;
317 } else {
318 buffer = (void *)uvm_km_alloc(kernel_map, round_page(size),
319 0, UVM_KMF_PAGEABLE);
320 if (buffer == NULL)
321 return (ENOMEM);
322 atomic_add_int(&amountpipekva, size);
323 }
324
325 /* free old resources if we're resizing */
326 pipe_free_kmem(pipe);
327 pipe->pipe_buffer.buffer = buffer;
328 pipe->pipe_buffer.size = size;
329 pipe->pipe_buffer.in = 0;
330 pipe->pipe_buffer.out = 0;
331 pipe->pipe_buffer.cnt = 0;
332 return (0);
333 }
334
335 /*
336 * Initialize and allocate VM and memory for pipe.
337 */
338 static int
339 pipe_create(struct pipe **pipep, pool_cache_t cache, kmutex_t *mutex)
340 {
341 struct pipe *pipe;
342 int error;
343
344 pipe = pool_cache_get(cache, PR_WAITOK);
345 KASSERT(pipe != NULL);
346 *pipep = pipe;
347 error = 0;
348 getnanotime(&pipe->pipe_btime);
349 pipe->pipe_atime = pipe->pipe_mtime = pipe->pipe_btime;
350 pipe->pipe_lock = mutex;
351 if (cache == pipe_rd_cache) {
352 error = pipespace(pipe, PIPE_SIZE);
353 } else {
354 pipe->pipe_buffer.buffer = NULL;
355 pipe->pipe_buffer.size = 0;
356 pipe->pipe_buffer.in = 0;
357 pipe->pipe_buffer.out = 0;
358 pipe->pipe_buffer.cnt = 0;
359 }
360 return error;
361 }
362
363 /*
364 * Lock a pipe for I/O, blocking other access
365 * Called with pipe spin lock held.
366 */
367 static int
368 pipelock(struct pipe *pipe, int catch)
369 {
370 int error;
371
372 KASSERT(mutex_owned(pipe->pipe_lock));
373
374 while (pipe->pipe_state & PIPE_LOCKFL) {
375 pipe->pipe_state |= PIPE_LWANT;
376 if (catch) {
377 error = cv_wait_sig(&pipe->pipe_lkcv, pipe->pipe_lock);
378 if (error != 0)
379 return error;
380 } else
381 cv_wait(&pipe->pipe_lkcv, pipe->pipe_lock);
382 }
383
384 pipe->pipe_state |= PIPE_LOCKFL;
385
386 return 0;
387 }
388
389 /*
390 * unlock a pipe I/O lock
391 */
392 static inline void
393 pipeunlock(struct pipe *pipe)
394 {
395
396 KASSERT(pipe->pipe_state & PIPE_LOCKFL);
397
398 pipe->pipe_state &= ~PIPE_LOCKFL;
399 if (pipe->pipe_state & PIPE_LWANT) {
400 pipe->pipe_state &= ~PIPE_LWANT;
401 cv_broadcast(&pipe->pipe_lkcv);
402 }
403 }
404
405 /*
406 * Select/poll wakup. This also sends SIGIO to peer connected to
407 * 'sigpipe' side of pipe.
408 */
409 static void
410 pipeselwakeup(struct pipe *selp, struct pipe *sigp, int code)
411 {
412 int band;
413
414 switch (code) {
415 case POLL_IN:
416 band = POLLIN|POLLRDNORM;
417 break;
418 case POLL_OUT:
419 band = POLLOUT|POLLWRNORM;
420 break;
421 case POLL_HUP:
422 band = POLLHUP;
423 break;
424 case POLL_ERR:
425 band = POLLERR;
426 break;
427 default:
428 band = 0;
429 #ifdef DIAGNOSTIC
430 printf("bad siginfo code %d in pipe notification.\n", code);
431 #endif
432 break;
433 }
434
435 selnotify(&selp->pipe_sel, band, NOTE_SUBMIT);
436
437 if (sigp == NULL || (sigp->pipe_state & PIPE_ASYNC) == 0)
438 return;
439
440 fownsignal(sigp->pipe_pgid, SIGIO, code, band, selp);
441 }
442
443 static int
444 pipe_read(file_t *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
445 int flags)
446 {
447 struct pipe *rpipe = (struct pipe *) fp->f_data;
448 struct pipebuf *bp = &rpipe->pipe_buffer;
449 kmutex_t *lock = rpipe->pipe_lock;
450 int error;
451 size_t nread = 0;
452 size_t size;
453 size_t ocnt;
454
455 mutex_enter(lock);
456 ++rpipe->pipe_busy;
457 ocnt = bp->cnt;
458
459 again:
460 error = pipelock(rpipe, 1);
461 if (error)
462 goto unlocked_error;
463
464 while (uio->uio_resid) {
465 /*
466 * Normal pipe buffer receive.
467 */
468 if (bp->cnt > 0) {
469 size = bp->size - bp->out;
470 if (size > bp->cnt)
471 size = bp->cnt;
472 if (size > uio->uio_resid)
473 size = uio->uio_resid;
474
475 mutex_exit(lock);
476 error = uiomove((char *)bp->buffer + bp->out, size, uio);
477 mutex_enter(lock);
478 if (error)
479 break;
480
481 bp->out += size;
482 if (bp->out >= bp->size)
483 bp->out = 0;
484
485 bp->cnt -= size;
486
487 /*
488 * If there is no more to read in the pipe, reset
489 * its pointers to the beginning. This improves
490 * cache hit stats.
491 */
492 if (bp->cnt == 0) {
493 bp->in = 0;
494 bp->out = 0;
495 }
496 nread += size;
497 continue;
498 }
499
500 #ifndef PIPE_NODIRECT
501 if ((rpipe->pipe_state & PIPE_DIRECTR) != 0) {
502 /*
503 * Direct copy, bypassing a kernel buffer.
504 */
505 void *va;
506 u_int gen;
507
508 KASSERT(rpipe->pipe_state & PIPE_DIRECTW);
509
510 size = rpipe->pipe_map.cnt;
511 if (size > uio->uio_resid)
512 size = uio->uio_resid;
513
514 va = (char *)rpipe->pipe_map.kva + rpipe->pipe_map.pos;
515 gen = rpipe->pipe_map.egen;
516 mutex_exit(lock);
517
518 /*
519 * Consume emap and read the data from loaned pages.
520 */
521 uvm_emap_consume(gen);
522 error = uiomove(va, size, uio);
523
524 mutex_enter(lock);
525 if (error)
526 break;
527 nread += size;
528 rpipe->pipe_map.pos += size;
529 rpipe->pipe_map.cnt -= size;
530 if (rpipe->pipe_map.cnt == 0) {
531 rpipe->pipe_state &= ~PIPE_DIRECTR;
532 cv_broadcast(&rpipe->pipe_wcv);
533 }
534 continue;
535 }
536 #endif
537 /*
538 * Break if some data was read.
539 */
540 if (nread > 0)
541 break;
542
543 /*
544 * Detect EOF condition.
545 * Read returns 0 on EOF, no need to set error.
546 */
547 if (rpipe->pipe_state & PIPE_EOF)
548 break;
549
550 /*
551 * Don't block on non-blocking I/O.
552 */
553 if (fp->f_flag & FNONBLOCK) {
554 error = EAGAIN;
555 break;
556 }
557
558 /*
559 * Unlock the pipe buffer for our remaining processing.
560 * We will either break out with an error or we will
561 * sleep and relock to loop.
562 */
563 pipeunlock(rpipe);
564
565 /*
566 * Re-check to see if more direct writes are pending.
567 */
568 if ((rpipe->pipe_state & PIPE_DIRECTR) != 0)
569 goto again;
570
571 /*
572 * We want to read more, wake up select/poll.
573 */
574 pipeselwakeup(rpipe, rpipe->pipe_peer, POLL_OUT);
575
576 /*
577 * If the "write-side" is blocked, wake it up now.
578 */
579 cv_broadcast(&rpipe->pipe_wcv);
580
581 /* Now wait until the pipe is filled */
582 error = cv_wait_sig(&rpipe->pipe_rcv, lock);
583 if (error != 0)
584 goto unlocked_error;
585 goto again;
586 }
587
588 if (error == 0)
589 getnanotime(&rpipe->pipe_atime);
590 pipeunlock(rpipe);
591
592 unlocked_error:
593 --rpipe->pipe_busy;
594 if (rpipe->pipe_busy == 0) {
595 cv_broadcast(&rpipe->pipe_draincv);
596 }
597 if (bp->cnt < MINPIPESIZE) {
598 cv_broadcast(&rpipe->pipe_wcv);
599 }
600
601 /*
602 * If anything was read off the buffer, signal to the writer it's
603 * possible to write more data. Also send signal if we are here for the
604 * first time after last write.
605 */
606 if ((bp->size - bp->cnt) >= PIPE_BUF
607 && (ocnt != bp->cnt || (rpipe->pipe_state & PIPE_SIGNALR))) {
608 pipeselwakeup(rpipe, rpipe->pipe_peer, POLL_OUT);
609 rpipe->pipe_state &= ~PIPE_SIGNALR;
610 }
611
612 mutex_exit(lock);
613 return (error);
614 }
615
616 #ifndef PIPE_NODIRECT
617 /*
618 * Allocate structure for loan transfer.
619 */
620 static int
621 pipe_loan_alloc(struct pipe *wpipe, int npages)
622 {
623 vsize_t len;
624
625 len = (vsize_t)npages << PAGE_SHIFT;
626 atomic_add_int(&amountpipekva, len);
627 wpipe->pipe_map.kva = uvm_km_alloc(kernel_map, len, 0,
628 UVM_KMF_VAONLY | UVM_KMF_WAITVA);
629 if (wpipe->pipe_map.kva == 0) {
630 atomic_add_int(&amountpipekva, -len);
631 return (ENOMEM);
632 }
633
634 wpipe->pipe_map.npages = npages;
635 wpipe->pipe_map.pgs = kmem_alloc(npages * sizeof(struct vm_page *),
636 KM_SLEEP);
637 return (0);
638 }
639
640 /*
641 * Free resources allocated for loan transfer.
642 */
643 static void
644 pipe_loan_free(struct pipe *wpipe)
645 {
646 vsize_t len;
647
648 len = (vsize_t)wpipe->pipe_map.npages << PAGE_SHIFT;
649 uvm_emap_remove(wpipe->pipe_map.kva, len); /* XXX */
650 uvm_km_free(kernel_map, wpipe->pipe_map.kva, len, UVM_KMF_VAONLY);
651 wpipe->pipe_map.kva = 0;
652 atomic_add_int(&amountpipekva, -len);
653 kmem_free(wpipe->pipe_map.pgs,
654 wpipe->pipe_map.npages * sizeof(struct vm_page *));
655 wpipe->pipe_map.pgs = NULL;
656 }
657
658 /*
659 * NetBSD direct write, using uvm_loan() mechanism.
660 * This implements the pipe buffer write mechanism. Note that only
661 * a direct write OR a normal pipe write can be pending at any given time.
662 * If there are any characters in the pipe buffer, the direct write will
663 * be deferred until the receiving process grabs all of the bytes from
664 * the pipe buffer. Then the direct mapping write is set-up.
665 *
666 * Called with the long-term pipe lock held.
667 */
668 static int
669 pipe_direct_write(file_t *fp, struct pipe *wpipe, struct uio *uio)
670 {
671 struct vm_page **pgs;
672 vaddr_t bbase, base, bend;
673 vsize_t blen, bcnt;
674 int error, npages;
675 voff_t bpos;
676 kmutex_t *lock = wpipe->pipe_lock;
677
678 KASSERT(mutex_owned(wpipe->pipe_lock));
679 KASSERT(wpipe->pipe_map.cnt == 0);
680
681 mutex_exit(lock);
682
683 /*
684 * Handle first PIPE_CHUNK_SIZE bytes of buffer. Deal with buffers
685 * not aligned to PAGE_SIZE.
686 */
687 bbase = (vaddr_t)uio->uio_iov->iov_base;
688 base = trunc_page(bbase);
689 bend = round_page(bbase + uio->uio_iov->iov_len);
690 blen = bend - base;
691 bpos = bbase - base;
692
693 if (blen > PIPE_DIRECT_CHUNK) {
694 blen = PIPE_DIRECT_CHUNK;
695 bend = base + blen;
696 bcnt = PIPE_DIRECT_CHUNK - bpos;
697 } else {
698 bcnt = uio->uio_iov->iov_len;
699 }
700 npages = blen >> PAGE_SHIFT;
701
702 /*
703 * Free the old kva if we need more pages than we have
704 * allocated.
705 */
706 if (wpipe->pipe_map.kva != 0 && npages > wpipe->pipe_map.npages)
707 pipe_loan_free(wpipe);
708
709 /* Allocate new kva. */
710 if (wpipe->pipe_map.kva == 0) {
711 error = pipe_loan_alloc(wpipe, npages);
712 if (error) {
713 mutex_enter(lock);
714 return (error);
715 }
716 }
717
718 /* Loan the write buffer memory from writer process */
719 pgs = wpipe->pipe_map.pgs;
720 error = uvm_loan(&uio->uio_vmspace->vm_map, base, blen,
721 pgs, UVM_LOAN_TOPAGE);
722 if (error) {
723 pipe_loan_free(wpipe);
724 mutex_enter(lock);
725 return (ENOMEM); /* so that caller fallback to ordinary write */
726 }
727
728 /* Enter the loaned pages to KVA, produce new emap generation number. */
729 uvm_emap_enter(wpipe->pipe_map.kva, pgs, npages);
730 wpipe->pipe_map.egen = uvm_emap_produce();
731
732 /* Now we can put the pipe in direct write mode */
733 wpipe->pipe_map.pos = bpos;
734 wpipe->pipe_map.cnt = bcnt;
735
736 /*
737 * But before we can let someone do a direct read, we
738 * have to wait until the pipe is drained. Release the
739 * pipe lock while we wait.
740 */
741 mutex_enter(lock);
742 wpipe->pipe_state |= PIPE_DIRECTW;
743 pipeunlock(wpipe);
744
745 while (error == 0 && wpipe->pipe_buffer.cnt > 0) {
746 cv_broadcast(&wpipe->pipe_rcv);
747 error = cv_wait_sig(&wpipe->pipe_wcv, lock);
748 if (error == 0 && wpipe->pipe_state & PIPE_EOF)
749 error = EPIPE;
750 }
751
752 /* Pipe is drained; next read will off the direct buffer */
753 wpipe->pipe_state |= PIPE_DIRECTR;
754
755 /* Wait until the reader is done */
756 while (error == 0 && (wpipe->pipe_state & PIPE_DIRECTR)) {
757 cv_broadcast(&wpipe->pipe_rcv);
758 pipeselwakeup(wpipe, wpipe, POLL_IN);
759 error = cv_wait_sig(&wpipe->pipe_wcv, lock);
760 if (error == 0 && wpipe->pipe_state & PIPE_EOF)
761 error = EPIPE;
762 }
763
764 /* Take pipe out of direct write mode */
765 wpipe->pipe_state &= ~(PIPE_DIRECTW | PIPE_DIRECTR);
766
767 /* Acquire the pipe lock and cleanup */
768 (void)pipelock(wpipe, 0);
769 mutex_exit(lock);
770
771 if (pgs != NULL) {
772 /* XXX: uvm_emap_remove */
773 uvm_unloan(pgs, npages, UVM_LOAN_TOPAGE);
774 }
775 if (error || amountpipekva > maxpipekva)
776 pipe_loan_free(wpipe);
777
778 mutex_enter(lock);
779 if (error) {
780 pipeselwakeup(wpipe, wpipe, POLL_ERR);
781
782 /*
783 * If nothing was read from what we offered, return error
784 * straight on. Otherwise update uio resid first. Caller
785 * will deal with the error condition, returning short
786 * write, error, or restarting the write(2) as appropriate.
787 */
788 if (wpipe->pipe_map.cnt == bcnt) {
789 wpipe->pipe_map.cnt = 0;
790 cv_broadcast(&wpipe->pipe_wcv);
791 return (error);
792 }
793
794 bcnt -= wpipe->pipe_map.cnt;
795 }
796
797 uio->uio_resid -= bcnt;
798 /* uio_offset not updated, not set/used for write(2) */
799 uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + bcnt;
800 uio->uio_iov->iov_len -= bcnt;
801 if (uio->uio_iov->iov_len == 0) {
802 uio->uio_iov++;
803 uio->uio_iovcnt--;
804 }
805
806 wpipe->pipe_map.cnt = 0;
807 return (error);
808 }
809 #endif /* !PIPE_NODIRECT */
810
811 static int
812 pipe_write(file_t *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
813 int flags)
814 {
815 struct pipe *wpipe, *rpipe;
816 struct pipebuf *bp;
817 kmutex_t *lock;
818 int error;
819
820 /* We want to write to our peer */
821 rpipe = (struct pipe *) fp->f_data;
822 lock = rpipe->pipe_lock;
823 error = 0;
824
825 mutex_enter(lock);
826 wpipe = rpipe->pipe_peer;
827
828 /*
829 * Detect loss of pipe read side, issue SIGPIPE if lost.
830 */
831 if (wpipe == NULL || (wpipe->pipe_state & PIPE_EOF) != 0) {
832 mutex_exit(lock);
833 return EPIPE;
834 }
835 ++wpipe->pipe_busy;
836
837 /* Aquire the long-term pipe lock */
838 if ((error = pipelock(wpipe, 1)) != 0) {
839 --wpipe->pipe_busy;
840 if (wpipe->pipe_busy == 0) {
841 cv_broadcast(&wpipe->pipe_draincv);
842 }
843 mutex_exit(lock);
844 return (error);
845 }
846
847 bp = &wpipe->pipe_buffer;
848
849 /*
850 * If it is advantageous to resize the pipe buffer, do so.
851 */
852 if ((uio->uio_resid > PIPE_SIZE) &&
853 (nbigpipe < maxbigpipes) &&
854 #ifndef PIPE_NODIRECT
855 (wpipe->pipe_state & PIPE_DIRECTW) == 0 &&
856 #endif
857 (bp->size <= PIPE_SIZE) && (bp->cnt == 0)) {
858
859 if (pipespace(wpipe, BIG_PIPE_SIZE) == 0)
860 atomic_inc_uint(&nbigpipe);
861 }
862
863 while (uio->uio_resid) {
864 size_t space;
865
866 #ifndef PIPE_NODIRECT
867 /*
868 * Pipe buffered writes cannot be coincidental with
869 * direct writes. Also, only one direct write can be
870 * in progress at any one time. We wait until the currently
871 * executing direct write is completed before continuing.
872 *
873 * We break out if a signal occurs or the reader goes away.
874 */
875 while (error == 0 && wpipe->pipe_state & PIPE_DIRECTW) {
876 cv_broadcast(&wpipe->pipe_rcv);
877 pipeunlock(wpipe);
878 error = cv_wait_sig(&wpipe->pipe_wcv, lock);
879 (void)pipelock(wpipe, 0);
880 if (wpipe->pipe_state & PIPE_EOF)
881 error = EPIPE;
882 }
883 if (error)
884 break;
885
886 /*
887 * If the transfer is large, we can gain performance if
888 * we do process-to-process copies directly.
889 * If the write is non-blocking, we don't use the
890 * direct write mechanism.
891 *
892 * The direct write mechanism will detect the reader going
893 * away on us.
894 */
895 if ((uio->uio_iov->iov_len >= PIPE_MINDIRECT) &&
896 (fp->f_flag & FNONBLOCK) == 0 &&
897 (wpipe->pipe_map.kva || (amountpipekva < limitpipekva))) {
898 error = pipe_direct_write(fp, wpipe, uio);
899
900 /*
901 * Break out if error occurred, unless it's ENOMEM.
902 * ENOMEM means we failed to allocate some resources
903 * for direct write, so we just fallback to ordinary
904 * write. If the direct write was successful,
905 * process rest of data via ordinary write.
906 */
907 if (error == 0)
908 continue;
909
910 if (error != ENOMEM)
911 break;
912 }
913 #endif /* PIPE_NODIRECT */
914
915 space = bp->size - bp->cnt;
916
917 /* Writes of size <= PIPE_BUF must be atomic. */
918 if ((space < uio->uio_resid) && (uio->uio_resid <= PIPE_BUF))
919 space = 0;
920
921 if (space > 0) {
922 int size; /* Transfer size */
923 int segsize; /* first segment to transfer */
924
925 /*
926 * Transfer size is minimum of uio transfer
927 * and free space in pipe buffer.
928 */
929 if (space > uio->uio_resid)
930 size = uio->uio_resid;
931 else
932 size = space;
933 /*
934 * First segment to transfer is minimum of
935 * transfer size and contiguous space in
936 * pipe buffer. If first segment to transfer
937 * is less than the transfer size, we've got
938 * a wraparound in the buffer.
939 */
940 segsize = bp->size - bp->in;
941 if (segsize > size)
942 segsize = size;
943
944 /* Transfer first segment */
945 mutex_exit(lock);
946 error = uiomove((char *)bp->buffer + bp->in, segsize,
947 uio);
948
949 if (error == 0 && segsize < size) {
950 /*
951 * Transfer remaining part now, to
952 * support atomic writes. Wraparound
953 * happened.
954 */
955 KASSERT(bp->in + segsize == bp->size);
956 error = uiomove(bp->buffer,
957 size - segsize, uio);
958 }
959 mutex_enter(lock);
960 if (error)
961 break;
962
963 bp->in += size;
964 if (bp->in >= bp->size) {
965 KASSERT(bp->in == size - segsize + bp->size);
966 bp->in = size - segsize;
967 }
968
969 bp->cnt += size;
970 KASSERT(bp->cnt <= bp->size);
971 } else {
972 /*
973 * If the "read-side" has been blocked, wake it up now.
974 */
975 cv_broadcast(&wpipe->pipe_rcv);
976
977 /*
978 * Don't block on non-blocking I/O.
979 */
980 if (fp->f_flag & FNONBLOCK) {
981 error = EAGAIN;
982 break;
983 }
984
985 /*
986 * We have no more space and have something to offer,
987 * wake up select/poll.
988 */
989 if (bp->cnt)
990 pipeselwakeup(wpipe, wpipe, POLL_IN);
991
992 pipeunlock(wpipe);
993 error = cv_wait_sig(&wpipe->pipe_wcv, lock);
994 (void)pipelock(wpipe, 0);
995 if (error != 0)
996 break;
997 /*
998 * If read side wants to go away, we just issue a signal
999 * to ourselves.
1000 */
1001 if (wpipe->pipe_state & PIPE_EOF) {
1002 error = EPIPE;
1003 break;
1004 }
1005 }
1006 }
1007
1008 --wpipe->pipe_busy;
1009 if (wpipe->pipe_busy == 0) {
1010 cv_broadcast(&wpipe->pipe_draincv);
1011 }
1012 if (bp->cnt > 0) {
1013 cv_broadcast(&wpipe->pipe_rcv);
1014 }
1015
1016 /*
1017 * Don't return EPIPE if I/O was successful
1018 */
1019 if (error == EPIPE && bp->cnt == 0 && uio->uio_resid == 0)
1020 error = 0;
1021
1022 if (error == 0)
1023 getnanotime(&wpipe->pipe_mtime);
1024
1025 /*
1026 * We have something to offer, wake up select/poll.
1027 * wpipe->pipe_map.cnt is always 0 in this point (direct write
1028 * is only done synchronously), so check only wpipe->pipe_buffer.cnt
1029 */
1030 if (bp->cnt)
1031 pipeselwakeup(wpipe, wpipe, POLL_IN);
1032
1033 /*
1034 * Arrange for next read(2) to do a signal.
1035 */
1036 wpipe->pipe_state |= PIPE_SIGNALR;
1037
1038 pipeunlock(wpipe);
1039 mutex_exit(lock);
1040 return (error);
1041 }
1042
1043 /*
1044 * We implement a very minimal set of ioctls for compatibility with sockets.
1045 */
1046 int
1047 pipe_ioctl(file_t *fp, u_long cmd, void *data)
1048 {
1049 struct pipe *pipe = fp->f_data;
1050 kmutex_t *lock = pipe->pipe_lock;
1051
1052 switch (cmd) {
1053
1054 case FIONBIO:
1055 return (0);
1056
1057 case FIOASYNC:
1058 mutex_enter(lock);
1059 if (*(int *)data) {
1060 pipe->pipe_state |= PIPE_ASYNC;
1061 } else {
1062 pipe->pipe_state &= ~PIPE_ASYNC;
1063 }
1064 mutex_exit(lock);
1065 return (0);
1066
1067 case FIONREAD:
1068 mutex_enter(lock);
1069 #ifndef PIPE_NODIRECT
1070 if (pipe->pipe_state & PIPE_DIRECTW)
1071 *(int *)data = pipe->pipe_map.cnt;
1072 else
1073 #endif
1074 *(int *)data = pipe->pipe_buffer.cnt;
1075 mutex_exit(lock);
1076 return (0);
1077
1078 case FIONWRITE:
1079 /* Look at other side */
1080 pipe = pipe->pipe_peer;
1081 mutex_enter(lock);
1082 #ifndef PIPE_NODIRECT
1083 if (pipe->pipe_state & PIPE_DIRECTW)
1084 *(int *)data = pipe->pipe_map.cnt;
1085 else
1086 #endif
1087 *(int *)data = pipe->pipe_buffer.cnt;
1088 mutex_exit(lock);
1089 return (0);
1090
1091 case FIONSPACE:
1092 /* Look at other side */
1093 pipe = pipe->pipe_peer;
1094 mutex_enter(lock);
1095 #ifndef PIPE_NODIRECT
1096 /*
1097 * If we're in direct-mode, we don't really have a
1098 * send queue, and any other write will block. Thus
1099 * zero seems like the best answer.
1100 */
1101 if (pipe->pipe_state & PIPE_DIRECTW)
1102 *(int *)data = 0;
1103 else
1104 #endif
1105 *(int *)data = pipe->pipe_buffer.size -
1106 pipe->pipe_buffer.cnt;
1107 mutex_exit(lock);
1108 return (0);
1109
1110 case TIOCSPGRP:
1111 case FIOSETOWN:
1112 return fsetown(&pipe->pipe_pgid, cmd, data);
1113
1114 case TIOCGPGRP:
1115 case FIOGETOWN:
1116 return fgetown(pipe->pipe_pgid, cmd, data);
1117
1118 }
1119 return (EPASSTHROUGH);
1120 }
1121
1122 int
1123 pipe_poll(file_t *fp, int events)
1124 {
1125 struct pipe *rpipe = fp->f_data;
1126 struct pipe *wpipe;
1127 int eof = 0;
1128 int revents = 0;
1129
1130 mutex_enter(rpipe->pipe_lock);
1131 wpipe = rpipe->pipe_peer;
1132
1133 if (events & (POLLIN | POLLRDNORM))
1134 if ((rpipe->pipe_buffer.cnt > 0) ||
1135 #ifndef PIPE_NODIRECT
1136 (rpipe->pipe_state & PIPE_DIRECTR) ||
1137 #endif
1138 (rpipe->pipe_state & PIPE_EOF))
1139 revents |= events & (POLLIN | POLLRDNORM);
1140
1141 eof |= (rpipe->pipe_state & PIPE_EOF);
1142
1143 if (wpipe == NULL)
1144 revents |= events & (POLLOUT | POLLWRNORM);
1145 else {
1146 if (events & (POLLOUT | POLLWRNORM))
1147 if ((wpipe->pipe_state & PIPE_EOF) || (
1148 #ifndef PIPE_NODIRECT
1149 (wpipe->pipe_state & PIPE_DIRECTW) == 0 &&
1150 #endif
1151 (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF))
1152 revents |= events & (POLLOUT | POLLWRNORM);
1153
1154 eof |= (wpipe->pipe_state & PIPE_EOF);
1155 }
1156
1157 if (wpipe == NULL || eof)
1158 revents |= POLLHUP;
1159
1160 if (revents == 0) {
1161 if (events & (POLLIN | POLLRDNORM))
1162 selrecord(curlwp, &rpipe->pipe_sel);
1163
1164 if (events & (POLLOUT | POLLWRNORM))
1165 selrecord(curlwp, &wpipe->pipe_sel);
1166 }
1167 mutex_exit(rpipe->pipe_lock);
1168
1169 return (revents);
1170 }
1171
1172 static int
1173 pipe_stat(file_t *fp, struct stat *ub)
1174 {
1175 struct pipe *pipe = fp->f_data;
1176
1177 mutex_enter(pipe->pipe_lock);
1178 memset(ub, 0, sizeof(*ub));
1179 ub->st_mode = S_IFIFO | S_IRUSR | S_IWUSR;
1180 ub->st_blksize = pipe->pipe_buffer.size;
1181 if (ub->st_blksize == 0 && pipe->pipe_peer)
1182 ub->st_blksize = pipe->pipe_peer->pipe_buffer.size;
1183 ub->st_size = pipe->pipe_buffer.cnt;
1184 ub->st_blocks = (ub->st_size) ? 1 : 0;
1185 ub->st_atimespec = pipe->pipe_atime;
1186 ub->st_mtimespec = pipe->pipe_mtime;
1187 ub->st_ctimespec = ub->st_birthtimespec = pipe->pipe_btime;
1188 ub->st_uid = kauth_cred_geteuid(fp->f_cred);
1189 ub->st_gid = kauth_cred_getegid(fp->f_cred);
1190
1191 /*
1192 * Left as 0: st_dev, st_ino, st_nlink, st_rdev, st_flags, st_gen.
1193 * XXX (st_dev, st_ino) should be unique.
1194 */
1195 mutex_exit(pipe->pipe_lock);
1196 return 0;
1197 }
1198
1199 static int
1200 pipe_close(file_t *fp)
1201 {
1202 struct pipe *pipe = fp->f_data;
1203
1204 fp->f_data = NULL;
1205 pipeclose(fp, pipe);
1206 return (0);
1207 }
1208
1209 static void
1210 pipe_free_kmem(struct pipe *pipe)
1211 {
1212
1213 if (pipe->pipe_buffer.buffer != NULL) {
1214 if (pipe->pipe_buffer.size > PIPE_SIZE) {
1215 atomic_dec_uint(&nbigpipe);
1216 }
1217 if (pipe->pipe_buffer.buffer != (void *)pipe->pipe_kmem) {
1218 uvm_km_free(kernel_map,
1219 (vaddr_t)pipe->pipe_buffer.buffer,
1220 pipe->pipe_buffer.size, UVM_KMF_PAGEABLE);
1221 atomic_add_int(&amountpipekva,
1222 -pipe->pipe_buffer.size);
1223 }
1224 pipe->pipe_buffer.buffer = NULL;
1225 }
1226 #ifndef PIPE_NODIRECT
1227 if (pipe->pipe_map.kva != 0) {
1228 pipe_loan_free(pipe);
1229 pipe->pipe_map.cnt = 0;
1230 pipe->pipe_map.kva = 0;
1231 pipe->pipe_map.pos = 0;
1232 pipe->pipe_map.npages = 0;
1233 }
1234 #endif /* !PIPE_NODIRECT */
1235 }
1236
1237 /*
1238 * Shutdown the pipe.
1239 */
1240 static void
1241 pipeclose(file_t *fp, struct pipe *pipe)
1242 {
1243 kmutex_t *lock;
1244 struct pipe *ppipe;
1245
1246 if (pipe == NULL)
1247 return;
1248
1249 KASSERT(cv_is_valid(&pipe->pipe_rcv));
1250 KASSERT(cv_is_valid(&pipe->pipe_wcv));
1251 KASSERT(cv_is_valid(&pipe->pipe_draincv));
1252 KASSERT(cv_is_valid(&pipe->pipe_lkcv));
1253
1254 lock = pipe->pipe_lock;
1255 mutex_enter(lock);
1256 pipeselwakeup(pipe, pipe, POLL_HUP);
1257
1258 /*
1259 * If the other side is blocked, wake it up saying that
1260 * we want to close it down.
1261 */
1262 pipe->pipe_state |= PIPE_EOF;
1263 if (pipe->pipe_busy) {
1264 while (pipe->pipe_busy) {
1265 cv_broadcast(&pipe->pipe_wcv);
1266 cv_wait_sig(&pipe->pipe_draincv, lock);
1267 }
1268 }
1269
1270 /*
1271 * Disconnect from peer.
1272 */
1273 if ((ppipe = pipe->pipe_peer) != NULL) {
1274 pipeselwakeup(ppipe, ppipe, POLL_HUP);
1275 ppipe->pipe_state |= PIPE_EOF;
1276 cv_broadcast(&ppipe->pipe_rcv);
1277 ppipe->pipe_peer = NULL;
1278 }
1279
1280 /*
1281 * Any knote objects still left in the list are
1282 * the one attached by peer. Since no one will
1283 * traverse this list, we just clear it.
1284 */
1285 SLIST_INIT(&pipe->pipe_sel.sel_klist);
1286
1287 KASSERT((pipe->pipe_state & PIPE_LOCKFL) == 0);
1288 mutex_exit(lock);
1289
1290 /*
1291 * Free resources.
1292 */
1293 pipe->pipe_pgid = 0;
1294 pipe->pipe_state = PIPE_SIGNALR;
1295 pipe_free_kmem(pipe);
1296 if (pipe->pipe_kmem != 0) {
1297 pool_cache_put(pipe_rd_cache, pipe);
1298 } else {
1299 pool_cache_put(pipe_wr_cache, pipe);
1300 }
1301 mutex_obj_free(lock);
1302 }
1303
1304 static void
1305 filt_pipedetach(struct knote *kn)
1306 {
1307 struct pipe *pipe;
1308 kmutex_t *lock;
1309
1310 pipe = ((file_t *)kn->kn_obj)->f_data;
1311 lock = pipe->pipe_lock;
1312
1313 mutex_enter(lock);
1314
1315 switch(kn->kn_filter) {
1316 case EVFILT_WRITE:
1317 /* Need the peer structure, not our own. */
1318 pipe = pipe->pipe_peer;
1319
1320 /* If reader end already closed, just return. */
1321 if (pipe == NULL) {
1322 mutex_exit(lock);
1323 return;
1324 }
1325
1326 break;
1327 default:
1328 /* Nothing to do. */
1329 break;
1330 }
1331
1332 KASSERT(kn->kn_hook == pipe);
1333 SLIST_REMOVE(&pipe->pipe_sel.sel_klist, kn, knote, kn_selnext);
1334 mutex_exit(lock);
1335 }
1336
1337 static int
1338 filt_piperead(struct knote *kn, long hint)
1339 {
1340 struct pipe *rpipe = ((file_t *)kn->kn_obj)->f_data;
1341 struct pipe *wpipe;
1342
1343 if ((hint & NOTE_SUBMIT) == 0) {
1344 mutex_enter(rpipe->pipe_lock);
1345 }
1346 wpipe = rpipe->pipe_peer;
1347 kn->kn_data = rpipe->pipe_buffer.cnt;
1348
1349 if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW))
1350 kn->kn_data = rpipe->pipe_map.cnt;
1351
1352 if ((rpipe->pipe_state & PIPE_EOF) ||
1353 (wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1354 kn->kn_flags |= EV_EOF;
1355 if ((hint & NOTE_SUBMIT) == 0) {
1356 mutex_exit(rpipe->pipe_lock);
1357 }
1358 return (1);
1359 }
1360
1361 if ((hint & NOTE_SUBMIT) == 0) {
1362 mutex_exit(rpipe->pipe_lock);
1363 }
1364 return (kn->kn_data > 0);
1365 }
1366
1367 static int
1368 filt_pipewrite(struct knote *kn, long hint)
1369 {
1370 struct pipe *rpipe = ((file_t *)kn->kn_obj)->f_data;
1371 struct pipe *wpipe;
1372
1373 if ((hint & NOTE_SUBMIT) == 0) {
1374 mutex_enter(rpipe->pipe_lock);
1375 }
1376 wpipe = rpipe->pipe_peer;
1377
1378 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1379 kn->kn_data = 0;
1380 kn->kn_flags |= EV_EOF;
1381 if ((hint & NOTE_SUBMIT) == 0) {
1382 mutex_exit(rpipe->pipe_lock);
1383 }
1384 return (1);
1385 }
1386 kn->kn_data = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
1387 if (wpipe->pipe_state & PIPE_DIRECTW)
1388 kn->kn_data = 0;
1389
1390 if ((hint & NOTE_SUBMIT) == 0) {
1391 mutex_exit(rpipe->pipe_lock);
1392 }
1393 return (kn->kn_data >= PIPE_BUF);
1394 }
1395
1396 static const struct filterops pipe_rfiltops =
1397 { 1, NULL, filt_pipedetach, filt_piperead };
1398 static const struct filterops pipe_wfiltops =
1399 { 1, NULL, filt_pipedetach, filt_pipewrite };
1400
1401 static int
1402 pipe_kqfilter(file_t *fp, struct knote *kn)
1403 {
1404 struct pipe *pipe;
1405 kmutex_t *lock;
1406
1407 pipe = ((file_t *)kn->kn_obj)->f_data;
1408 lock = pipe->pipe_lock;
1409
1410 mutex_enter(lock);
1411
1412 switch (kn->kn_filter) {
1413 case EVFILT_READ:
1414 kn->kn_fop = &pipe_rfiltops;
1415 break;
1416 case EVFILT_WRITE:
1417 kn->kn_fop = &pipe_wfiltops;
1418 pipe = pipe->pipe_peer;
1419 if (pipe == NULL) {
1420 /* Other end of pipe has been closed. */
1421 mutex_exit(lock);
1422 return (EBADF);
1423 }
1424 break;
1425 default:
1426 mutex_exit(lock);
1427 return (EINVAL);
1428 }
1429
1430 kn->kn_hook = pipe;
1431 SLIST_INSERT_HEAD(&pipe->pipe_sel.sel_klist, kn, kn_selnext);
1432 mutex_exit(lock);
1433
1434 return (0);
1435 }
1436
1437 /*
1438 * Handle pipe sysctls.
1439 */
1440 SYSCTL_SETUP(sysctl_kern_pipe_setup, "sysctl kern.pipe subtree setup")
1441 {
1442
1443 sysctl_createv(clog, 0, NULL, NULL,
1444 CTLFLAG_PERMANENT,
1445 CTLTYPE_NODE, "kern", NULL,
1446 NULL, 0, NULL, 0,
1447 CTL_KERN, CTL_EOL);
1448 sysctl_createv(clog, 0, NULL, NULL,
1449 CTLFLAG_PERMANENT,
1450 CTLTYPE_NODE, "pipe",
1451 SYSCTL_DESCR("Pipe settings"),
1452 NULL, 0, NULL, 0,
1453 CTL_KERN, KERN_PIPE, CTL_EOL);
1454
1455 sysctl_createv(clog, 0, NULL, NULL,
1456 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1457 CTLTYPE_INT, "maxkvasz",
1458 SYSCTL_DESCR("Maximum amount of kernel memory to be "
1459 "used for pipes"),
1460 NULL, 0, &maxpipekva, 0,
1461 CTL_KERN, KERN_PIPE, KERN_PIPE_MAXKVASZ, CTL_EOL);
1462 sysctl_createv(clog, 0, NULL, NULL,
1463 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1464 CTLTYPE_INT, "maxloankvasz",
1465 SYSCTL_DESCR("Limit for direct transfers via page loan"),
1466 NULL, 0, &limitpipekva, 0,
1467 CTL_KERN, KERN_PIPE, KERN_PIPE_LIMITKVA, CTL_EOL);
1468 sysctl_createv(clog, 0, NULL, NULL,
1469 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1470 CTLTYPE_INT, "maxbigpipes",
1471 SYSCTL_DESCR("Maximum number of \"big\" pipes"),
1472 NULL, 0, &maxbigpipes, 0,
1473 CTL_KERN, KERN_PIPE, KERN_PIPE_MAXBIGPIPES, CTL_EOL);
1474 sysctl_createv(clog, 0, NULL, NULL,
1475 CTLFLAG_PERMANENT,
1476 CTLTYPE_INT, "nbigpipes",
1477 SYSCTL_DESCR("Number of \"big\" pipes"),
1478 NULL, 0, &nbigpipe, 0,
1479 CTL_KERN, KERN_PIPE, KERN_PIPE_NBIGPIPES, CTL_EOL);
1480 sysctl_createv(clog, 0, NULL, NULL,
1481 CTLFLAG_PERMANENT,
1482 CTLTYPE_INT, "kvasize",
1483 SYSCTL_DESCR("Amount of kernel memory consumed by pipe "
1484 "buffers"),
1485 NULL, 0, &amountpipekva, 0,
1486 CTL_KERN, KERN_PIPE, KERN_PIPE_KVASIZE, CTL_EOL);
1487 }
1488