sys_pipe.c revision 1.162 1 /* $NetBSD: sys_pipe.c,v 1.162 2023/10/04 22:19:58 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2003, 2007, 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 1996 John S. Dyson
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice immediately at the beginning of the file, without modification,
41 * this list of conditions, and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. Absolutely no warranty of function or purpose is made by the author
46 * John S. Dyson.
47 * 4. Modifications may be freely made to this file if the above conditions
48 * are met.
49 */
50
51 /*
52 * This file contains a high-performance replacement for the socket-based
53 * pipes scheme originally used. It does not support all features of
54 * sockets, but does do everything that pipes normally do.
55 *
56 * This code has two modes of operation, a small write mode and a large
57 * write mode. The small write mode acts like conventional pipes with
58 * a kernel buffer. If the buffer is less than PIPE_MINDIRECT, then the
59 * "normal" pipe buffering is done. If the buffer is between PIPE_MINDIRECT
60 * and PIPE_SIZE in size it is mapped read-only into the kernel address space
61 * using the UVM page loan facility from where the receiving process can copy
62 * the data directly from the pages in the sending process.
63 *
64 * The constant PIPE_MINDIRECT is chosen to make sure that buffering will
65 * happen for small transfers so that the system will not spend all of
66 * its time context switching. PIPE_SIZE is constrained by the
67 * amount of kernel virtual memory.
68 */
69
70 #include <sys/cdefs.h>
71 __KERNEL_RCSID(0, "$NetBSD: sys_pipe.c,v 1.162 2023/10/04 22:19:58 ad Exp $");
72
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/proc.h>
76 #include <sys/fcntl.h>
77 #include <sys/file.h>
78 #include <sys/filedesc.h>
79 #include <sys/filio.h>
80 #include <sys/kernel.h>
81 #include <sys/ttycom.h>
82 #include <sys/stat.h>
83 #include <sys/poll.h>
84 #include <sys/signalvar.h>
85 #include <sys/vnode.h>
86 #include <sys/uio.h>
87 #include <sys/select.h>
88 #include <sys/mount.h>
89 #include <sys/syscallargs.h>
90 #include <sys/sysctl.h>
91 #include <sys/kauth.h>
92 #include <sys/atomic.h>
93 #include <sys/pipe.h>
94
95 static int pipe_read(file_t *, off_t *, struct uio *, kauth_cred_t, int);
96 static int pipe_write(file_t *, off_t *, struct uio *, kauth_cred_t, int);
97 static int pipe_close(file_t *);
98 static int pipe_poll(file_t *, int);
99 static int pipe_kqfilter(file_t *, struct knote *);
100 static int pipe_stat(file_t *, struct stat *);
101 static int pipe_ioctl(file_t *, u_long, void *);
102 static void pipe_restart(file_t *);
103 static int pipe_fpathconf(file_t *, int, register_t *);
104 static int pipe_posix_fadvise(file_t *, off_t, off_t, int);
105
106 static const struct fileops pipeops = {
107 .fo_name = "pipe",
108 .fo_read = pipe_read,
109 .fo_write = pipe_write,
110 .fo_ioctl = pipe_ioctl,
111 .fo_fcntl = fnullop_fcntl,
112 .fo_poll = pipe_poll,
113 .fo_stat = pipe_stat,
114 .fo_close = pipe_close,
115 .fo_kqfilter = pipe_kqfilter,
116 .fo_restart = pipe_restart,
117 .fo_fpathconf = pipe_fpathconf,
118 .fo_posix_fadvise = pipe_posix_fadvise,
119 };
120
121 /*
122 * Default pipe buffer size(s), this can be kind-of large now because pipe
123 * space is pageable. The pipe code will try to maintain locality of
124 * reference for performance reasons, so small amounts of outstanding I/O
125 * will not wipe the cache.
126 */
127 #define MINPIPESIZE (PIPE_SIZE / 3)
128 #define MAXPIPESIZE (2 * PIPE_SIZE / 3)
129
130 /*
131 * Limit the number of "big" pipes
132 */
133 #define LIMITBIGPIPES 32
134 static u_int maxbigpipes = LIMITBIGPIPES;
135 static u_int nbigpipe = 0;
136
137 /*
138 * Amount of KVA consumed by pipe buffers.
139 */
140 static u_int amountpipekva = 0;
141
142 static void pipeclose(struct pipe *);
143 static void pipe_free_kmem(struct pipe *);
144 static int pipe_create(struct pipe **, pool_cache_t);
145 static int pipelock(struct pipe *, bool);
146 static inline void pipeunlock(struct pipe *);
147 static void pipeselwakeup(struct pipe *, struct pipe *, int);
148 static int pipespace(struct pipe *, int);
149 static int pipe_ctor(void *, void *, int);
150 static void pipe_dtor(void *, void *);
151
152 static pool_cache_t pipe_wr_cache;
153 static pool_cache_t pipe_rd_cache;
154
155 void
156 pipe_init(void)
157 {
158
159 /* Writer side is not automatically allocated KVA. */
160 pipe_wr_cache = pool_cache_init(sizeof(struct pipe), 0, 0, 0, "pipewr",
161 NULL, IPL_NONE, pipe_ctor, pipe_dtor, NULL);
162 KASSERT(pipe_wr_cache != NULL);
163
164 /* Reader side gets preallocated KVA. */
165 pipe_rd_cache = pool_cache_init(sizeof(struct pipe), 0, 0, 0, "piperd",
166 NULL, IPL_NONE, pipe_ctor, pipe_dtor, (void *)1);
167 KASSERT(pipe_rd_cache != NULL);
168 }
169
170 static int
171 pipe_ctor(void *arg, void *obj, int flags)
172 {
173 struct pipe *pipe;
174 vaddr_t va;
175
176 pipe = obj;
177
178 memset(pipe, 0, sizeof(struct pipe));
179 if (arg != NULL) {
180 /* Preallocate space. */
181 va = uvm_km_alloc(kernel_map, PIPE_SIZE, 0,
182 UVM_KMF_PAGEABLE | UVM_KMF_WAITVA);
183 KASSERT(va != 0);
184 pipe->pipe_kmem = va;
185 atomic_add_int(&amountpipekva, PIPE_SIZE);
186 }
187 cv_init(&pipe->pipe_rcv, "pipe_rd");
188 cv_init(&pipe->pipe_wcv, "pipe_wr");
189 cv_init(&pipe->pipe_draincv, "pipe_drn");
190 cv_init(&pipe->pipe_lkcv, "pipe_lk");
191 selinit(&pipe->pipe_sel);
192 pipe->pipe_state = PIPE_SIGNALR;
193
194 return 0;
195 }
196
197 static void
198 pipe_dtor(void *arg, void *obj)
199 {
200 struct pipe *pipe;
201
202 pipe = obj;
203
204 cv_destroy(&pipe->pipe_rcv);
205 cv_destroy(&pipe->pipe_wcv);
206 cv_destroy(&pipe->pipe_draincv);
207 cv_destroy(&pipe->pipe_lkcv);
208 seldestroy(&pipe->pipe_sel);
209 if (pipe->pipe_kmem != 0) {
210 uvm_km_free(kernel_map, pipe->pipe_kmem, PIPE_SIZE,
211 UVM_KMF_PAGEABLE);
212 atomic_add_int(&amountpipekva, -PIPE_SIZE);
213 }
214 }
215
216 /*
217 * The pipe system call for the DTYPE_PIPE type of pipes
218 */
219 int
220 pipe1(struct lwp *l, int *fildes, int flags)
221 {
222 struct pipe *rpipe, *wpipe;
223 file_t *rf, *wf;
224 int fd, error;
225 proc_t *p;
226
227 if (flags & ~(O_CLOEXEC|O_NONBLOCK|O_NOSIGPIPE))
228 return EINVAL;
229 p = curproc;
230 rpipe = wpipe = NULL;
231 if ((error = pipe_create(&rpipe, pipe_rd_cache)) ||
232 (error = pipe_create(&wpipe, pipe_wr_cache))) {
233 goto free2;
234 }
235 rpipe->pipe_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
236 wpipe->pipe_lock = rpipe->pipe_lock;
237 mutex_obj_hold(wpipe->pipe_lock);
238
239 error = fd_allocfile(&rf, &fd);
240 if (error)
241 goto free2;
242 fildes[0] = fd;
243
244 error = fd_allocfile(&wf, &fd);
245 if (error)
246 goto free3;
247 fildes[1] = fd;
248
249 rf->f_flag = FREAD | flags;
250 rf->f_type = DTYPE_PIPE;
251 rf->f_pipe = rpipe;
252 rf->f_ops = &pipeops;
253 fd_set_exclose(l, fildes[0], (flags & O_CLOEXEC) != 0);
254
255 wf->f_flag = FWRITE | flags;
256 wf->f_type = DTYPE_PIPE;
257 wf->f_pipe = wpipe;
258 wf->f_ops = &pipeops;
259 fd_set_exclose(l, fildes[1], (flags & O_CLOEXEC) != 0);
260
261 rpipe->pipe_peer = wpipe;
262 wpipe->pipe_peer = rpipe;
263
264 fd_affix(p, rf, fildes[0]);
265 fd_affix(p, wf, fildes[1]);
266 return (0);
267 free3:
268 fd_abort(p, rf, fildes[0]);
269 free2:
270 pipeclose(wpipe);
271 pipeclose(rpipe);
272
273 return (error);
274 }
275
276 /*
277 * Allocate kva for pipe circular buffer, the space is pageable
278 * This routine will 'realloc' the size of a pipe safely, if it fails
279 * it will retain the old buffer.
280 * If it fails it will return ENOMEM.
281 */
282 static int
283 pipespace(struct pipe *pipe, int size)
284 {
285 void *buffer;
286
287 /*
288 * Allocate pageable virtual address space. Physical memory is
289 * allocated on demand.
290 */
291 if (size == PIPE_SIZE && pipe->pipe_kmem != 0) {
292 buffer = (void *)pipe->pipe_kmem;
293 } else {
294 buffer = (void *)uvm_km_alloc(kernel_map, round_page(size),
295 0, UVM_KMF_PAGEABLE);
296 if (buffer == NULL)
297 return (ENOMEM);
298 atomic_add_int(&amountpipekva, size);
299 }
300
301 /* free old resources if we're resizing */
302 pipe_free_kmem(pipe);
303 pipe->pipe_buffer.buffer = buffer;
304 pipe->pipe_buffer.size = size;
305 pipe->pipe_buffer.in = 0;
306 pipe->pipe_buffer.out = 0;
307 pipe->pipe_buffer.cnt = 0;
308 return (0);
309 }
310
311 /*
312 * Initialize and allocate VM and memory for pipe.
313 */
314 static int
315 pipe_create(struct pipe **pipep, pool_cache_t cache)
316 {
317 struct pipe *pipe;
318 int error;
319
320 pipe = pool_cache_get(cache, PR_WAITOK);
321 KASSERT(pipe != NULL);
322 *pipep = pipe;
323 error = 0;
324 getnanotime(&pipe->pipe_btime);
325 pipe->pipe_atime = pipe->pipe_mtime = pipe->pipe_btime;
326 pipe->pipe_lock = NULL;
327 if (cache == pipe_rd_cache) {
328 error = pipespace(pipe, PIPE_SIZE);
329 } else {
330 pipe->pipe_buffer.buffer = NULL;
331 pipe->pipe_buffer.size = 0;
332 pipe->pipe_buffer.in = 0;
333 pipe->pipe_buffer.out = 0;
334 pipe->pipe_buffer.cnt = 0;
335 }
336 return error;
337 }
338
339 /*
340 * Lock a pipe for I/O, blocking other access
341 * Called with pipe spin lock held.
342 */
343 static int
344 pipelock(struct pipe *pipe, bool catch_p)
345 {
346 int error;
347
348 KASSERT(mutex_owned(pipe->pipe_lock));
349
350 while (pipe->pipe_state & PIPE_LOCKFL) {
351 if (catch_p) {
352 error = cv_wait_sig(&pipe->pipe_lkcv, pipe->pipe_lock);
353 if (error != 0) {
354 return error;
355 }
356 } else
357 cv_wait(&pipe->pipe_lkcv, pipe->pipe_lock);
358 }
359
360 pipe->pipe_state |= PIPE_LOCKFL;
361
362 return 0;
363 }
364
365 /*
366 * unlock a pipe I/O lock
367 */
368 static inline void
369 pipeunlock(struct pipe *pipe)
370 {
371
372 KASSERT(pipe->pipe_state & PIPE_LOCKFL);
373
374 pipe->pipe_state &= ~PIPE_LOCKFL;
375 cv_signal(&pipe->pipe_lkcv);
376 }
377
378 /*
379 * Select/poll wakup. This also sends SIGIO to peer connected to
380 * 'sigpipe' side of pipe.
381 */
382 static void
383 pipeselwakeup(struct pipe *selp, struct pipe *sigp, int code)
384 {
385 int band;
386
387 switch (code) {
388 case POLL_IN:
389 band = POLLIN|POLLRDNORM;
390 break;
391 case POLL_OUT:
392 band = POLLOUT|POLLWRNORM;
393 break;
394 case POLL_HUP:
395 band = POLLHUP;
396 break;
397 case POLL_ERR:
398 band = POLLERR;
399 break;
400 default:
401 band = 0;
402 #ifdef DIAGNOSTIC
403 printf("bad siginfo code %d in pipe notification.\n", code);
404 #endif
405 break;
406 }
407
408 selnotify(&selp->pipe_sel, band, NOTE_SUBMIT);
409
410 if (sigp == NULL || (sigp->pipe_state & PIPE_ASYNC) == 0)
411 return;
412
413 fownsignal(sigp->pipe_pgid, SIGIO, code, band, selp);
414 }
415
416 static int
417 pipe_read(file_t *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
418 int flags)
419 {
420 struct pipe *rpipe = fp->f_pipe;
421 struct pipebuf *bp = &rpipe->pipe_buffer;
422 kmutex_t *lock = rpipe->pipe_lock;
423 int error;
424 size_t nread = 0;
425 size_t size;
426 size_t ocnt;
427 unsigned int wakeup_state = 0;
428
429 /*
430 * Try to avoid locking the pipe if we have nothing to do.
431 *
432 * There are programs which share one pipe amongst multiple processes
433 * and perform non-blocking reads in parallel, even if the pipe is
434 * empty. This in particular is the case with BSD make, which when
435 * spawned with a high -j number can find itself with over half of the
436 * calls failing to find anything.
437 */
438 if ((fp->f_flag & FNONBLOCK) != 0) {
439 if (__predict_false(uio->uio_resid == 0))
440 return (0);
441 if (atomic_load_relaxed(&bp->cnt) == 0 &&
442 (atomic_load_relaxed(&rpipe->pipe_state) & PIPE_EOF) == 0)
443 return (EAGAIN);
444 }
445
446 mutex_enter(lock);
447 ++rpipe->pipe_busy;
448 ocnt = bp->cnt;
449
450 again:
451 error = pipelock(rpipe, true);
452 if (error)
453 goto unlocked_error;
454
455 while (uio->uio_resid) {
456 /*
457 * Normal pipe buffer receive.
458 */
459 if (bp->cnt > 0) {
460 size = bp->size - bp->out;
461 if (size > bp->cnt)
462 size = bp->cnt;
463 if (size > uio->uio_resid)
464 size = uio->uio_resid;
465
466 mutex_exit(lock);
467 error = uiomove((char *)bp->buffer + bp->out, size, uio);
468 mutex_enter(lock);
469 if (error)
470 break;
471
472 bp->out += size;
473 if (bp->out >= bp->size)
474 bp->out = 0;
475
476 bp->cnt -= size;
477
478 /*
479 * If there is no more to read in the pipe, reset
480 * its pointers to the beginning. This improves
481 * cache hit stats.
482 */
483 if (bp->cnt == 0) {
484 bp->in = 0;
485 bp->out = 0;
486 }
487 nread += size;
488 continue;
489 }
490
491 /*
492 * Break if some data was read.
493 */
494 if (nread > 0)
495 break;
496
497 /*
498 * Detect EOF condition.
499 * Read returns 0 on EOF, no need to set error.
500 */
501 if (rpipe->pipe_state & PIPE_EOF)
502 break;
503
504 /*
505 * Don't block on non-blocking I/O.
506 */
507 if (fp->f_flag & FNONBLOCK) {
508 error = EAGAIN;
509 break;
510 }
511
512 /*
513 * Unlock the pipe buffer for our remaining processing.
514 * We will either break out with an error or we will
515 * sleep and relock to loop.
516 */
517 pipeunlock(rpipe);
518
519 #if 1 /* XXX (dsl) I'm sure these aren't needed here ... */
520 /*
521 * We want to read more, wake up select/poll.
522 */
523 pipeselwakeup(rpipe, rpipe->pipe_peer, POLL_OUT);
524
525 /*
526 * If the "write-side" is blocked, wake it up now.
527 */
528 cv_broadcast(&rpipe->pipe_wcv);
529 #endif
530
531 if (wakeup_state & PIPE_RESTART) {
532 error = ERESTART;
533 goto unlocked_error;
534 }
535
536 /* Now wait until the pipe is filled */
537 error = cv_wait_sig(&rpipe->pipe_rcv, lock);
538 if (error != 0)
539 goto unlocked_error;
540 wakeup_state = rpipe->pipe_state;
541 goto again;
542 }
543
544 if (error == 0)
545 getnanotime(&rpipe->pipe_atime);
546 pipeunlock(rpipe);
547
548 unlocked_error:
549 --rpipe->pipe_busy;
550 if (rpipe->pipe_busy == 0) {
551 rpipe->pipe_state &= ~PIPE_RESTART;
552 cv_broadcast(&rpipe->pipe_draincv);
553 }
554 if (bp->cnt < MINPIPESIZE) {
555 cv_broadcast(&rpipe->pipe_wcv);
556 }
557
558 /*
559 * If anything was read off the buffer, signal to the writer it's
560 * possible to write more data. Also send signal if we are here for the
561 * first time after last write.
562 */
563 if ((bp->size - bp->cnt) >= PIPE_BUF
564 && (ocnt != bp->cnt || (rpipe->pipe_state & PIPE_SIGNALR))) {
565 pipeselwakeup(rpipe, rpipe->pipe_peer, POLL_OUT);
566 rpipe->pipe_state &= ~PIPE_SIGNALR;
567 }
568
569 mutex_exit(lock);
570 return (error);
571 }
572
573 static int
574 pipe_write(file_t *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
575 int flags)
576 {
577 struct pipe *wpipe, *rpipe;
578 struct pipebuf *bp;
579 kmutex_t *lock;
580 int error;
581 unsigned int wakeup_state = 0;
582
583 /* We want to write to our peer */
584 rpipe = fp->f_pipe;
585 lock = rpipe->pipe_lock;
586 error = 0;
587
588 mutex_enter(lock);
589 wpipe = rpipe->pipe_peer;
590
591 /*
592 * Detect loss of pipe read side, issue SIGPIPE if lost.
593 */
594 if (wpipe == NULL || (wpipe->pipe_state & PIPE_EOF) != 0) {
595 mutex_exit(lock);
596 return EPIPE;
597 }
598 ++wpipe->pipe_busy;
599
600 /* Acquire the long-term pipe lock */
601 if ((error = pipelock(wpipe, true)) != 0) {
602 --wpipe->pipe_busy;
603 if (wpipe->pipe_busy == 0) {
604 wpipe->pipe_state &= ~PIPE_RESTART;
605 cv_broadcast(&wpipe->pipe_draincv);
606 }
607 mutex_exit(lock);
608 return (error);
609 }
610
611 bp = &wpipe->pipe_buffer;
612
613 /*
614 * If it is advantageous to resize the pipe buffer, do so.
615 */
616 if ((uio->uio_resid > PIPE_SIZE) &&
617 (nbigpipe < maxbigpipes) &&
618 (bp->size <= PIPE_SIZE) && (bp->cnt == 0)) {
619
620 if (pipespace(wpipe, BIG_PIPE_SIZE) == 0)
621 atomic_inc_uint(&nbigpipe);
622 }
623
624 while (uio->uio_resid) {
625 size_t space;
626
627 space = bp->size - bp->cnt;
628
629 /* Writes of size <= PIPE_BUF must be atomic. */
630 if ((space < uio->uio_resid) && (uio->uio_resid <= PIPE_BUF))
631 space = 0;
632
633 if (space > 0) {
634 int size; /* Transfer size */
635 int segsize; /* first segment to transfer */
636
637 /*
638 * Transfer size is minimum of uio transfer
639 * and free space in pipe buffer.
640 */
641 if (space > uio->uio_resid)
642 size = uio->uio_resid;
643 else
644 size = space;
645 /*
646 * First segment to transfer is minimum of
647 * transfer size and contiguous space in
648 * pipe buffer. If first segment to transfer
649 * is less than the transfer size, we've got
650 * a wraparound in the buffer.
651 */
652 segsize = bp->size - bp->in;
653 if (segsize > size)
654 segsize = size;
655
656 /* Transfer first segment */
657 mutex_exit(lock);
658 error = uiomove((char *)bp->buffer + bp->in, segsize,
659 uio);
660
661 if (error == 0 && segsize < size) {
662 /*
663 * Transfer remaining part now, to
664 * support atomic writes. Wraparound
665 * happened.
666 */
667 KASSERT(bp->in + segsize == bp->size);
668 error = uiomove(bp->buffer,
669 size - segsize, uio);
670 }
671 mutex_enter(lock);
672 if (error)
673 break;
674
675 bp->in += size;
676 if (bp->in >= bp->size) {
677 KASSERT(bp->in == size - segsize + bp->size);
678 bp->in = size - segsize;
679 }
680
681 bp->cnt += size;
682 KASSERT(bp->cnt <= bp->size);
683 wakeup_state = 0;
684 } else {
685 /*
686 * If the "read-side" has been blocked, wake it up now.
687 */
688 cv_broadcast(&wpipe->pipe_rcv);
689
690 /*
691 * Don't block on non-blocking I/O.
692 */
693 if (fp->f_flag & FNONBLOCK) {
694 error = EAGAIN;
695 break;
696 }
697
698 /*
699 * We have no more space and have something to offer,
700 * wake up select/poll.
701 */
702 if (bp->cnt)
703 pipeselwakeup(wpipe, wpipe, POLL_IN);
704
705 if (wakeup_state & PIPE_RESTART) {
706 error = ERESTART;
707 break;
708 }
709
710 /*
711 * If read side wants to go away, we just issue a signal
712 * to ourselves.
713 */
714 if (wpipe->pipe_state & PIPE_EOF) {
715 error = EPIPE;
716 break;
717 }
718
719 pipeunlock(wpipe);
720 error = cv_wait_sig(&wpipe->pipe_wcv, lock);
721 (void)pipelock(wpipe, false);
722 if (error != 0)
723 break;
724 wakeup_state = wpipe->pipe_state;
725 }
726 }
727
728 --wpipe->pipe_busy;
729 if (wpipe->pipe_busy == 0) {
730 wpipe->pipe_state &= ~PIPE_RESTART;
731 cv_broadcast(&wpipe->pipe_draincv);
732 }
733 if (bp->cnt > 0) {
734 cv_broadcast(&wpipe->pipe_rcv);
735 }
736
737 /*
738 * Don't return EPIPE if I/O was successful
739 */
740 if (error == EPIPE && bp->cnt == 0 && uio->uio_resid == 0)
741 error = 0;
742
743 if (error == 0)
744 getnanotime(&wpipe->pipe_mtime);
745
746 /*
747 * We have something to offer, wake up select/poll.
748 * wmap->cnt is always 0 in this point (direct write
749 * is only done synchronously), so check only wpipe->pipe_buffer.cnt
750 */
751 if (bp->cnt)
752 pipeselwakeup(wpipe, wpipe, POLL_IN);
753
754 /*
755 * Arrange for next read(2) to do a signal.
756 */
757 wpipe->pipe_state |= PIPE_SIGNALR;
758
759 pipeunlock(wpipe);
760 mutex_exit(lock);
761 return (error);
762 }
763
764 /*
765 * We implement a very minimal set of ioctls for compatibility with sockets.
766 */
767 int
768 pipe_ioctl(file_t *fp, u_long cmd, void *data)
769 {
770 struct pipe *pipe = fp->f_pipe;
771 kmutex_t *lock = pipe->pipe_lock;
772
773 switch (cmd) {
774
775 case FIONBIO:
776 return (0);
777
778 case FIOASYNC:
779 mutex_enter(lock);
780 if (*(int *)data) {
781 pipe->pipe_state |= PIPE_ASYNC;
782 } else {
783 pipe->pipe_state &= ~PIPE_ASYNC;
784 }
785 mutex_exit(lock);
786 return (0);
787
788 case FIONREAD:
789 mutex_enter(lock);
790 *(int *)data = pipe->pipe_buffer.cnt;
791 mutex_exit(lock);
792 return (0);
793
794 case FIONWRITE:
795 /* Look at other side */
796 mutex_enter(lock);
797 pipe = pipe->pipe_peer;
798 if (pipe == NULL)
799 *(int *)data = 0;
800 else
801 *(int *)data = pipe->pipe_buffer.cnt;
802 mutex_exit(lock);
803 return (0);
804
805 case FIONSPACE:
806 /* Look at other side */
807 mutex_enter(lock);
808 pipe = pipe->pipe_peer;
809 if (pipe == NULL)
810 *(int *)data = 0;
811 else
812 *(int *)data = pipe->pipe_buffer.size -
813 pipe->pipe_buffer.cnt;
814 mutex_exit(lock);
815 return (0);
816
817 case TIOCSPGRP:
818 case FIOSETOWN:
819 return fsetown(&pipe->pipe_pgid, cmd, data);
820
821 case TIOCGPGRP:
822 case FIOGETOWN:
823 return fgetown(pipe->pipe_pgid, cmd, data);
824
825 }
826 return (EPASSTHROUGH);
827 }
828
829 int
830 pipe_poll(file_t *fp, int events)
831 {
832 struct pipe *rpipe = fp->f_pipe;
833 struct pipe *wpipe;
834 int eof = 0;
835 int revents = 0;
836
837 mutex_enter(rpipe->pipe_lock);
838 wpipe = rpipe->pipe_peer;
839
840 if (events & (POLLIN | POLLRDNORM))
841 if ((rpipe->pipe_buffer.cnt > 0) ||
842 (rpipe->pipe_state & PIPE_EOF))
843 revents |= events & (POLLIN | POLLRDNORM);
844
845 eof |= (rpipe->pipe_state & PIPE_EOF);
846
847 if (wpipe == NULL)
848 revents |= events & (POLLOUT | POLLWRNORM);
849 else {
850 if (events & (POLLOUT | POLLWRNORM))
851 if ((wpipe->pipe_state & PIPE_EOF) || (
852 (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF))
853 revents |= events & (POLLOUT | POLLWRNORM);
854
855 eof |= (wpipe->pipe_state & PIPE_EOF);
856 }
857
858 if (wpipe == NULL || eof)
859 revents |= POLLHUP;
860
861 if (revents == 0) {
862 if (events & (POLLIN | POLLRDNORM))
863 selrecord(curlwp, &rpipe->pipe_sel);
864
865 if (events & (POLLOUT | POLLWRNORM))
866 selrecord(curlwp, &wpipe->pipe_sel);
867 }
868 mutex_exit(rpipe->pipe_lock);
869
870 return (revents);
871 }
872
873 static int
874 pipe_stat(file_t *fp, struct stat *ub)
875 {
876 struct pipe *pipe = fp->f_pipe;
877
878 mutex_enter(pipe->pipe_lock);
879 memset(ub, 0, sizeof(*ub));
880 ub->st_mode = S_IFIFO | S_IRUSR | S_IWUSR;
881 ub->st_blksize = pipe->pipe_buffer.size;
882 if (ub->st_blksize == 0 && pipe->pipe_peer)
883 ub->st_blksize = pipe->pipe_peer->pipe_buffer.size;
884 ub->st_size = pipe->pipe_buffer.cnt;
885 ub->st_blocks = (ub->st_size) ? 1 : 0;
886 ub->st_atimespec = pipe->pipe_atime;
887 ub->st_mtimespec = pipe->pipe_mtime;
888 ub->st_ctimespec = ub->st_birthtimespec = pipe->pipe_btime;
889 ub->st_uid = kauth_cred_geteuid(fp->f_cred);
890 ub->st_gid = kauth_cred_getegid(fp->f_cred);
891
892 /*
893 * Left as 0: st_dev, st_ino, st_nlink, st_rdev, st_flags, st_gen.
894 * XXX (st_dev, st_ino) should be unique.
895 */
896 mutex_exit(pipe->pipe_lock);
897 return 0;
898 }
899
900 static int
901 pipe_close(file_t *fp)
902 {
903 struct pipe *pipe = fp->f_pipe;
904
905 fp->f_pipe = NULL;
906 pipeclose(pipe);
907 return (0);
908 }
909
910 static void
911 pipe_restart(file_t *fp)
912 {
913 struct pipe *pipe = fp->f_pipe;
914
915 /*
916 * Unblock blocked reads/writes in order to allow close() to complete.
917 * System calls return ERESTART so that the fd is revalidated.
918 * (Partial writes return the transfer length.)
919 */
920 mutex_enter(pipe->pipe_lock);
921 pipe->pipe_state |= PIPE_RESTART;
922 /* Wakeup both cvs, maybe we only need one, but maybe there are some
923 * other paths where wakeup is needed, and it saves deciding which! */
924 cv_broadcast(&pipe->pipe_rcv);
925 cv_broadcast(&pipe->pipe_wcv);
926 mutex_exit(pipe->pipe_lock);
927 }
928
929 static int
930 pipe_fpathconf(struct file *fp, int name, register_t *retval)
931 {
932
933 switch (name) {
934 case _PC_PIPE_BUF:
935 *retval = PIPE_BUF;
936 return 0;
937 default:
938 return EINVAL;
939 }
940 }
941
942 static int
943 pipe_posix_fadvise(struct file *fp, off_t offset, off_t len, int advice)
944 {
945
946 return ESPIPE;
947 }
948
949 static void
950 pipe_free_kmem(struct pipe *pipe)
951 {
952
953 if (pipe->pipe_buffer.buffer != NULL) {
954 if (pipe->pipe_buffer.size > PIPE_SIZE) {
955 atomic_dec_uint(&nbigpipe);
956 }
957 if (pipe->pipe_buffer.buffer != (void *)pipe->pipe_kmem) {
958 uvm_km_free(kernel_map,
959 (vaddr_t)pipe->pipe_buffer.buffer,
960 pipe->pipe_buffer.size, UVM_KMF_PAGEABLE);
961 atomic_add_int(&amountpipekva,
962 -pipe->pipe_buffer.size);
963 }
964 pipe->pipe_buffer.buffer = NULL;
965 }
966 }
967
968 /*
969 * Shutdown the pipe.
970 */
971 static void
972 pipeclose(struct pipe *pipe)
973 {
974 kmutex_t *lock;
975 struct pipe *ppipe;
976
977 if (pipe == NULL)
978 return;
979
980 KASSERT(cv_is_valid(&pipe->pipe_rcv));
981 KASSERT(cv_is_valid(&pipe->pipe_wcv));
982 KASSERT(cv_is_valid(&pipe->pipe_draincv));
983 KASSERT(cv_is_valid(&pipe->pipe_lkcv));
984
985 lock = pipe->pipe_lock;
986 if (lock == NULL)
987 /* Must have failed during create */
988 goto free_resources;
989
990 mutex_enter(lock);
991 pipeselwakeup(pipe, pipe, POLL_HUP);
992
993 /*
994 * If the other side is blocked, wake it up saying that
995 * we want to close it down.
996 */
997 pipe->pipe_state |= PIPE_EOF;
998 if (pipe->pipe_busy) {
999 while (pipe->pipe_busy) {
1000 cv_broadcast(&pipe->pipe_wcv);
1001 cv_wait_sig(&pipe->pipe_draincv, lock);
1002 }
1003 }
1004
1005 /*
1006 * Disconnect from peer.
1007 */
1008 if ((ppipe = pipe->pipe_peer) != NULL) {
1009 pipeselwakeup(ppipe, ppipe, POLL_HUP);
1010 ppipe->pipe_state |= PIPE_EOF;
1011 cv_broadcast(&ppipe->pipe_rcv);
1012 ppipe->pipe_peer = NULL;
1013 }
1014
1015 /*
1016 * Any knote objects still left in the list are
1017 * the one attached by peer. Since no one will
1018 * traverse this list, we just clear it.
1019 *
1020 * XXX Exposes select/kqueue internals.
1021 */
1022 SLIST_INIT(&pipe->pipe_sel.sel_klist);
1023
1024 KASSERT((pipe->pipe_state & PIPE_LOCKFL) == 0);
1025 mutex_exit(lock);
1026 mutex_obj_free(lock);
1027
1028 /*
1029 * Free resources.
1030 */
1031 free_resources:
1032 pipe->pipe_pgid = 0;
1033 pipe->pipe_state = PIPE_SIGNALR;
1034 pipe->pipe_peer = NULL;
1035 pipe->pipe_lock = NULL;
1036 pipe_free_kmem(pipe);
1037 if (pipe->pipe_kmem != 0) {
1038 pool_cache_put(pipe_rd_cache, pipe);
1039 } else {
1040 pool_cache_put(pipe_wr_cache, pipe);
1041 }
1042 }
1043
1044 static void
1045 filt_pipedetach(struct knote *kn)
1046 {
1047 struct pipe *pipe;
1048 kmutex_t *lock;
1049
1050 pipe = ((file_t *)kn->kn_obj)->f_pipe;
1051 lock = pipe->pipe_lock;
1052
1053 mutex_enter(lock);
1054
1055 switch(kn->kn_filter) {
1056 case EVFILT_WRITE:
1057 /* Need the peer structure, not our own. */
1058 pipe = pipe->pipe_peer;
1059
1060 /* If reader end already closed, just return. */
1061 if (pipe == NULL) {
1062 mutex_exit(lock);
1063 return;
1064 }
1065
1066 break;
1067 default:
1068 /* Nothing to do. */
1069 break;
1070 }
1071
1072 KASSERT(kn->kn_hook == pipe);
1073 selremove_knote(&pipe->pipe_sel, kn);
1074 mutex_exit(lock);
1075 }
1076
1077 static int
1078 filt_piperead(struct knote *kn, long hint)
1079 {
1080 struct pipe *rpipe = ((file_t *)kn->kn_obj)->f_pipe;
1081 struct pipe *wpipe;
1082 int rv;
1083
1084 if ((hint & NOTE_SUBMIT) == 0) {
1085 mutex_enter(rpipe->pipe_lock);
1086 }
1087 wpipe = rpipe->pipe_peer;
1088 kn->kn_data = rpipe->pipe_buffer.cnt;
1089
1090 if ((rpipe->pipe_state & PIPE_EOF) ||
1091 (wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1092 knote_set_eof(kn, 0);
1093 rv = 1;
1094 } else {
1095 rv = kn->kn_data > 0;
1096 }
1097
1098 if ((hint & NOTE_SUBMIT) == 0) {
1099 mutex_exit(rpipe->pipe_lock);
1100 }
1101 return rv;
1102 }
1103
1104 static int
1105 filt_pipewrite(struct knote *kn, long hint)
1106 {
1107 struct pipe *rpipe = ((file_t *)kn->kn_obj)->f_pipe;
1108 struct pipe *wpipe;
1109 int rv;
1110
1111 if ((hint & NOTE_SUBMIT) == 0) {
1112 mutex_enter(rpipe->pipe_lock);
1113 }
1114 wpipe = rpipe->pipe_peer;
1115
1116 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1117 kn->kn_data = 0;
1118 knote_set_eof(kn, 0);
1119 rv = 1;
1120 } else {
1121 kn->kn_data = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
1122 rv = kn->kn_data >= PIPE_BUF;
1123 }
1124
1125 if ((hint & NOTE_SUBMIT) == 0) {
1126 mutex_exit(rpipe->pipe_lock);
1127 }
1128 return rv;
1129 }
1130
1131 static const struct filterops pipe_rfiltops = {
1132 .f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE,
1133 .f_attach = NULL,
1134 .f_detach = filt_pipedetach,
1135 .f_event = filt_piperead,
1136 };
1137
1138 static const struct filterops pipe_wfiltops = {
1139 .f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE,
1140 .f_attach = NULL,
1141 .f_detach = filt_pipedetach,
1142 .f_event = filt_pipewrite,
1143 };
1144
1145 static int
1146 pipe_kqfilter(file_t *fp, struct knote *kn)
1147 {
1148 struct pipe *pipe;
1149 kmutex_t *lock;
1150
1151 pipe = ((file_t *)kn->kn_obj)->f_pipe;
1152 lock = pipe->pipe_lock;
1153
1154 mutex_enter(lock);
1155
1156 switch (kn->kn_filter) {
1157 case EVFILT_READ:
1158 kn->kn_fop = &pipe_rfiltops;
1159 break;
1160 case EVFILT_WRITE:
1161 kn->kn_fop = &pipe_wfiltops;
1162 pipe = pipe->pipe_peer;
1163 if (pipe == NULL) {
1164 /* Other end of pipe has been closed. */
1165 mutex_exit(lock);
1166 return (EBADF);
1167 }
1168 break;
1169 default:
1170 mutex_exit(lock);
1171 return (EINVAL);
1172 }
1173
1174 kn->kn_hook = pipe;
1175 selrecord_knote(&pipe->pipe_sel, kn);
1176 mutex_exit(lock);
1177
1178 return (0);
1179 }
1180
1181 /*
1182 * Handle pipe sysctls.
1183 */
1184 SYSCTL_SETUP(sysctl_kern_pipe_setup, "sysctl kern.pipe subtree setup")
1185 {
1186
1187 sysctl_createv(clog, 0, NULL, NULL,
1188 CTLFLAG_PERMANENT,
1189 CTLTYPE_NODE, "pipe",
1190 SYSCTL_DESCR("Pipe settings"),
1191 NULL, 0, NULL, 0,
1192 CTL_KERN, KERN_PIPE, CTL_EOL);
1193
1194 sysctl_createv(clog, 0, NULL, NULL,
1195 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1196 CTLTYPE_INT, "maxbigpipes",
1197 SYSCTL_DESCR("Maximum number of \"big\" pipes"),
1198 NULL, 0, &maxbigpipes, 0,
1199 CTL_KERN, KERN_PIPE, KERN_PIPE_MAXBIGPIPES, CTL_EOL);
1200 sysctl_createv(clog, 0, NULL, NULL,
1201 CTLFLAG_PERMANENT,
1202 CTLTYPE_INT, "nbigpipes",
1203 SYSCTL_DESCR("Number of \"big\" pipes"),
1204 NULL, 0, &nbigpipe, 0,
1205 CTL_KERN, KERN_PIPE, KERN_PIPE_NBIGPIPES, CTL_EOL);
1206 sysctl_createv(clog, 0, NULL, NULL,
1207 CTLFLAG_PERMANENT,
1208 CTLTYPE_INT, "kvasize",
1209 SYSCTL_DESCR("Amount of kernel memory consumed by pipe "
1210 "buffers"),
1211 NULL, 0, &amountpipekva, 0,
1212 CTL_KERN, KERN_PIPE, KERN_PIPE_KVASIZE, CTL_EOL);
1213 }
1214