Home | History | Annotate | Download | only in kern

Lines Matching refs:pipe

80 #include <sys/pipe.h>
94 .fo_name = "pipe",
109 * Default pipe buffer size(s), this can be kind-of large now because pipe
110 * space is pageable. The pipe code will try to maintain locality of
125 * Amount of KVA consumed by pipe buffers.
129 static void pipeclose(struct pipe *);
130 static void pipe_free_kmem(struct pipe *);
131 static int pipe_create(struct pipe **, pool_cache_t, struct timespec *);
132 static int pipelock(struct pipe *, bool);
133 static inline void pipeunlock(struct pipe *);
134 static void pipeselwakeup(struct pipe *, struct pipe *, int);
135 static int pipespace(struct pipe *, int);
147 pipe_wr_cache = pool_cache_init(sizeof(struct pipe), 0, 0, 0, "pipewr",
152 pipe_rd_cache = pool_cache_init(sizeof(struct pipe), 0, 0, 0, "piperd",
160 struct pipe *pipe;
163 pipe = obj;
165 memset(pipe, 0, sizeof(struct pipe));
171 pipe->pipe_kmem = va;
174 cv_init(&pipe->pipe_rcv, "pipe_rd");
175 cv_init(&pipe->pipe_wcv, "pipe_wr");
176 cv_init(&pipe->pipe_draincv, "pipe_drn");
177 cv_init(&pipe->pipe_lkcv, "pipe_lk");
178 selinit(&pipe->pipe_sel);
179 pipe->pipe_state = PIPE_SIGNALR;
187 struct pipe *pipe;
189 pipe = obj;
191 cv_destroy(&pipe->pipe_rcv);
192 cv_destroy(&pipe->pipe_wcv);
193 cv_destroy(&pipe->pipe_draincv);
194 cv_destroy(&pipe->pipe_lkcv);
195 seldestroy(&pipe->pipe_sel);
196 if (pipe->pipe_kmem != 0) {
197 uvm_km_free(kernel_map, pipe->pipe_kmem, PIPE_SIZE,
204 * The pipe system call for the DTYPE_PIPE type of pipes
209 struct pipe *rpipe, *wpipe;
268 * Allocate kva for pipe circular buffer, the space is pageable
269 * This routine will 'realloc' the size of a pipe safely, if it fails
274 pipespace(struct pipe *pipe, int size)
282 if (size == PIPE_SIZE && pipe->pipe_kmem != 0) {
283 buffer = (void *)pipe->pipe_kmem;
293 pipe_free_kmem(pipe);
294 pipe->pipe_buffer.buffer = buffer;
295 pipe->pipe_buffer.size = size;
296 pipe->pipe_buffer.in = 0;
297 pipe->pipe_buffer.out = 0;
298 pipe->pipe_buffer.cnt = 0;
303 * Initialize and allocate VM and memory for pipe.
306 pipe_create(struct pipe **pipep, pool_cache_t cache, struct timespec *nt)
308 struct pipe *pipe;
311 pipe = pool_cache_get(cache, PR_WAITOK);
312 KASSERT(pipe != NULL);
313 *pipep = pipe;
315 pipe->pipe_atime = pipe->pipe_mtime = pipe->pipe_btime = *nt;
316 pipe->pipe_lock = NULL;
318 error = pipespace(pipe, PIPE_SIZE);
320 pipe->pipe_buffer.buffer = NULL;
321 pipe->pipe_buffer.size = 0;
322 pipe->pipe_buffer.in = 0;
323 pipe->pipe_buffer.out = 0;
324 pipe->pipe_buffer.cnt = 0;
330 * Lock a pipe for I/O, blocking other access
331 * Called with pipe spin lock held.
334 pipelock(struct pipe *pipe, bool catch_p)
338 KASSERT(mutex_owned(pipe->pipe_lock));
340 while (pipe->pipe_state & PIPE_LOCKFL) {
342 error = cv_wait_sig(&pipe->pipe_lkcv, pipe->pipe_lock);
347 cv_wait(&pipe->pipe_lkcv, pipe->pipe_lock);
350 pipe->pipe_state |= PIPE_LOCKFL;
356 * unlock a pipe I/O lock
359 pipeunlock(struct pipe *pipe)
362 KASSERT(pipe->pipe_state & PIPE_LOCKFL);
364 pipe->pipe_state &= ~PIPE_LOCKFL;
365 cv_signal(&pipe->pipe_lkcv);
370 * 'sigpipe' side of pipe.
373 pipeselwakeup(struct pipe *selp, struct pipe *sigp, int code)
393 printf("bad siginfo code %d in pipe notification.\n", code);
410 struct pipe *rpipe = fp->f_pipe;
420 * Try to avoid locking the pipe if we have nothing to do.
422 * There are programs which share one pipe amongst multiple processes
423 * and perform non-blocking reads in parallel, even if the pipe is
447 * Normal pipe buffer receive.
469 * If there is no more to read in the pipe, reset
503 * Unlock the pipe buffer for our remaining processing.
526 /* Now wait until the pipe is filled */
567 struct pipe *wpipe, *rpipe;
582 * Detect loss of pipe read side, issue SIGPIPE if lost.
590 /* Acquire the long-term pipe lock */
604 * If it is advantageous to resize the pipe buffer, do so.
629 * and free space in pipe buffer.
638 * pipe buffer. If first segment to transfer
758 struct pipe *pipe = fp->f_pipe;
759 kmutex_t *lock = pipe->pipe_lock;
769 pipe->pipe_state |= PIPE_ASYNC;
771 pipe->pipe_state &= ~PIPE_ASYNC;
778 *(int *)data = pipe->pipe_buffer.cnt;
785 pipe = pipe->pipe_peer;
786 if (pipe == NULL)
789 *(int *)data = pipe->pipe_buffer.cnt;
796 pipe = pipe->pipe_peer;
797 if (pipe == NULL)
800 *(int *)data = pipe->pipe_buffer.size -
801 pipe->pipe_buffer.cnt;
807 return fsetown(&pipe->pipe_pgid, cmd, data);
811 return fgetown(pipe->pipe_pgid, cmd, data);
820 struct pipe *rpipe = fp->f_pipe;
821 struct pipe *wpipe;
864 struct pipe *pipe = fp->f_pipe;
866 mutex_enter(pipe->pipe_lock);
869 ub->st_blksize = pipe->pipe_buffer.size;
870 if (ub->st_blksize == 0 && pipe->pipe_peer)
871 ub->st_blksize = pipe->pipe_peer->pipe_buffer.size;
872 ub->st_size = pipe->pipe_buffer.cnt;
874 ub->st_atimespec = pipe->pipe_atime;
875 ub->st_mtimespec = pipe->pipe_mtime;
876 ub->st_ctimespec = ub->st_birthtimespec = pipe->pipe_btime;
884 mutex_exit(pipe->pipe_lock);
891 struct pipe *pipe = fp->f_pipe;
894 pipeclose(pipe);
901 struct pipe *pipe = fp->f_pipe;
908 mutex_enter(pipe->pipe_lock);
909 pipe->pipe_state |= PIPE_RESTART;
912 cv_broadcast(&pipe->pipe_rcv);
913 cv_broadcast(&pipe->pipe_wcv);
914 mutex_exit(pipe->pipe_lock);
938 pipe_free_kmem(struct pipe *pipe)
941 if (pipe->pipe_buffer.buffer != NULL) {
942 if (pipe->pipe_buffer.size > PIPE_SIZE) {
945 if (pipe->pipe_buffer.buffer != (void *)pipe->pipe_kmem) {
947 (vaddr_t)pipe->pipe_buffer.buffer,
948 pipe->pipe_buffer.size, UVM_KMF_PAGEABLE);
950 -pipe->pipe_buffer.size);
952 pipe->pipe_buffer.buffer = NULL;
957 * Shutdown the pipe.
960 pipeclose(struct pipe *pipe)
963 struct pipe *ppipe;
965 if (pipe == NULL)
968 KASSERT(cv_is_valid(&pipe->pipe_rcv));
969 KASSERT(cv_is_valid(&pipe->pipe_wcv));
970 KASSERT(cv_is_valid(&pipe->pipe_draincv));
971 KASSERT(cv_is_valid(&pipe->pipe_lkcv));
973 lock = pipe->pipe_lock;
979 pipeselwakeup(pipe, pipe, POLL_HUP);
985 pipe->pipe_state |= PIPE_EOF;
986 if (pipe->pipe_busy) {
987 while (pipe->pipe_busy) {
988 cv_broadcast(&pipe->pipe_wcv);
989 cv_wait_sig(&pipe->pipe_draincv, lock);
996 if ((ppipe = pipe->pipe_peer) != NULL) {
1010 SLIST_INIT(&pipe->pipe_sel.sel_klist);
1012 KASSERT((pipe->pipe_state & PIPE_LOCKFL) == 0);
1020 pipe->pipe_pgid = 0;
1021 pipe->pipe_state = PIPE_SIGNALR;
1022 pipe->pipe_peer = NULL;
1023 pipe->pipe_lock = NULL;
1024 pipe_free_kmem(pipe);
1025 if (pipe->pipe_kmem != 0) {
1026 pool_cache_put(pipe_rd_cache, pipe);
1028 pool_cache_put(pipe_wr_cache, pipe);
1035 struct pipe *pipe;
1038 pipe = ((file_t *)kn->kn_obj)->f_pipe;
1039 lock = pipe->pipe_lock;
1046 pipe = pipe->pipe_peer;
1049 if (pipe == NULL) {
1060 KASSERT(kn->kn_hook == pipe);
1061 selremove_knote(&pipe->pipe_sel, kn);
1068 struct pipe *rpipe = ((file_t *)kn->kn_obj)->f_pipe;
1069 struct pipe *wpipe;
1095 struct pipe *rpipe = ((file_t *)kn->kn_obj)->f_pipe;
1096 struct pipe *wpipe;
1136 struct pipe *pipe;
1139 pipe = ((file_t *)kn->kn_obj)->f_pipe;
1140 lock = pipe->pipe_lock;
1150 pipe = pipe->pipe_peer;
1151 if (pipe == NULL) {
1152 /* Other end of pipe has been closed. */
1162 kn->kn_hook = pipe;
1163 selrecord_knote(&pipe->pipe_sel, kn);
1170 * Handle pipe sysctls.
1172 SYSCTL_SETUP(sysctl_kern_pipe_setup, "sysctl kern.pipe subtree setup")
1177 CTLTYPE_NODE, "pipe",
1178 SYSCTL_DESCR("Pipe settings"),
1197 SYSCTL_DESCR("Amount of kernel memory consumed by pipe "