sys_pipe.c revision 1.4.2.14 1 1.4.2.14 nathanw /* $NetBSD: sys_pipe.c,v 1.4.2.14 2002/11/11 22:14:00 nathanw Exp $ */
2 1.4.2.2 nathanw
3 1.4.2.2 nathanw /*
4 1.4.2.2 nathanw * Copyright (c) 1996 John S. Dyson
5 1.4.2.2 nathanw * All rights reserved.
6 1.4.2.2 nathanw *
7 1.4.2.2 nathanw * Redistribution and use in source and binary forms, with or without
8 1.4.2.2 nathanw * modification, are permitted provided that the following conditions
9 1.4.2.2 nathanw * are met:
10 1.4.2.2 nathanw * 1. Redistributions of source code must retain the above copyright
11 1.4.2.2 nathanw * notice immediately at the beginning of the file, without modification,
12 1.4.2.2 nathanw * this list of conditions, and the following disclaimer.
13 1.4.2.2 nathanw * 2. Redistributions in binary form must reproduce the above copyright
14 1.4.2.2 nathanw * notice, this list of conditions and the following disclaimer in the
15 1.4.2.2 nathanw * documentation and/or other materials provided with the distribution.
16 1.4.2.2 nathanw * 3. Absolutely no warranty of function or purpose is made by the author
17 1.4.2.2 nathanw * John S. Dyson.
18 1.4.2.2 nathanw * 4. Modifications may be freely made to this file if the above conditions
19 1.4.2.2 nathanw * are met.
20 1.4.2.2 nathanw *
21 1.4.2.11 nathanw * $FreeBSD: src/sys/kern/sys_pipe.c,v 1.95 2002/03/09 22:06:31 alfred Exp $
22 1.4.2.2 nathanw */
23 1.4.2.2 nathanw
24 1.4.2.2 nathanw /*
25 1.4.2.2 nathanw * This file contains a high-performance replacement for the socket-based
26 1.4.2.2 nathanw * pipes scheme originally used in FreeBSD/4.4Lite. It does not support
27 1.4.2.2 nathanw * all features of sockets, but does do everything that pipes normally
28 1.4.2.2 nathanw * do.
29 1.4.2.2 nathanw *
30 1.4.2.2 nathanw * Adaption for NetBSD UVM, including uvm_loan() based direct write, was
31 1.4.2.2 nathanw * written by Jaromir Dolecek.
32 1.4.2.2 nathanw */
33 1.4.2.2 nathanw
34 1.4.2.2 nathanw /*
35 1.4.2.2 nathanw * This code has two modes of operation, a small write mode and a large
36 1.4.2.2 nathanw * write mode. The small write mode acts like conventional pipes with
37 1.4.2.2 nathanw * a kernel buffer. If the buffer is less than PIPE_MINDIRECT, then the
38 1.4.2.2 nathanw * "normal" pipe buffering is done. If the buffer is between PIPE_MINDIRECT
39 1.4.2.2 nathanw * and PIPE_SIZE in size, it is fully mapped into the kernel (on FreeBSD,
40 1.4.2.2 nathanw * those pages are also wired), and the receiving process can copy it directly
41 1.4.2.2 nathanw * from the pages in the sending process.
42 1.4.2.2 nathanw *
43 1.4.2.2 nathanw * If the sending process receives a signal, it is possible that it will
44 1.4.2.2 nathanw * go away, and certainly its address space can change, because control
45 1.4.2.2 nathanw * is returned back to the user-mode side. In that case, the pipe code
46 1.4.2.2 nathanw * arranges to copy the buffer supplied by the user process on FreeBSD, to
47 1.4.2.2 nathanw * a pageable kernel buffer, and the receiving process will grab the data
48 1.4.2.2 nathanw * from the pageable kernel buffer. Since signals don't happen all that often,
49 1.4.2.2 nathanw * the copy operation is normally eliminated.
50 1.4.2.2 nathanw * For NetBSD, the pages are mapped read-only, COW for kernel by uvm_loan(),
51 1.4.2.2 nathanw * so no explicit handling need to be done, all is handled by standard VM
52 1.4.2.2 nathanw * facilities.
53 1.4.2.2 nathanw *
54 1.4.2.2 nathanw * The constant PIPE_MINDIRECT is chosen to make sure that buffering will
55 1.4.2.2 nathanw * happen for small transfers so that the system will not spend all of
56 1.4.2.2 nathanw * its time context switching. PIPE_SIZE is constrained by the
57 1.4.2.2 nathanw * amount of kernel virtual memory.
58 1.4.2.2 nathanw */
59 1.4.2.2 nathanw
60 1.4.2.8 nathanw #include <sys/cdefs.h>
61 1.4.2.14 nathanw __KERNEL_RCSID(0, "$NetBSD: sys_pipe.c,v 1.4.2.14 2002/11/11 22:14:00 nathanw Exp $");
62 1.4.2.8 nathanw
63 1.4.2.2 nathanw #include <sys/param.h>
64 1.4.2.2 nathanw #include <sys/systm.h>
65 1.4.2.2 nathanw #include <sys/proc.h>
66 1.4.2.2 nathanw #include <sys/fcntl.h>
67 1.4.2.2 nathanw #include <sys/file.h>
68 1.4.2.2 nathanw #include <sys/filedesc.h>
69 1.4.2.2 nathanw #include <sys/filio.h>
70 1.4.2.11 nathanw #include <sys/kernel.h>
71 1.4.2.11 nathanw #include <sys/lock.h>
72 1.4.2.2 nathanw #include <sys/ttycom.h>
73 1.4.2.2 nathanw #include <sys/stat.h>
74 1.4.2.11 nathanw #include <sys/malloc.h>
75 1.4.2.2 nathanw #include <sys/poll.h>
76 1.4.2.2 nathanw #include <sys/signalvar.h>
77 1.4.2.2 nathanw #include <sys/vnode.h>
78 1.4.2.2 nathanw #include <sys/uio.h>
79 1.4.2.2 nathanw #include <sys/lock.h>
80 1.4.2.2 nathanw #ifdef __FreeBSD__
81 1.4.2.2 nathanw #include <sys/mutex.h>
82 1.4.2.11 nathanw #endif
83 1.4.2.11 nathanw #ifdef __NetBSD__
84 1.4.2.2 nathanw #include <sys/select.h>
85 1.4.2.2 nathanw #include <sys/mount.h>
86 1.4.2.12 nathanw #include <sys/sa.h>
87 1.4.2.2 nathanw #include <sys/syscallargs.h>
88 1.4.2.2 nathanw #include <uvm/uvm.h>
89 1.4.2.2 nathanw #include <sys/sysctl.h>
90 1.4.2.8 nathanw #include <sys/kernel.h>
91 1.4.2.2 nathanw #endif /* NetBSD, FreeBSD */
92 1.4.2.2 nathanw
93 1.4.2.2 nathanw #include <sys/pipe.h>
94 1.4.2.2 nathanw
95 1.4.2.2 nathanw #ifdef __NetBSD__
96 1.4.2.8 nathanw /*
97 1.4.2.8 nathanw * Avoid microtime(9), it's slow. We don't guard the read from time(9)
98 1.4.2.8 nathanw * with splclock(9) since we don't actually need to be THAT sure the access
99 1.4.2.8 nathanw * is atomic.
100 1.4.2.8 nathanw */
101 1.4.2.8 nathanw #define vfs_timestamp(tv) (*(tv) = time)
102 1.4.2.14 nathanw
103 1.4.2.14 nathanw /* we call it si_klist */
104 1.4.2.14 nathanw #define si_note si_klist
105 1.4.2.14 nathanw
106 1.4.2.2 nathanw #endif
107 1.4.2.2 nathanw
108 1.4.2.2 nathanw /*
109 1.4.2.2 nathanw * Use this define if you want to disable *fancy* VM things. Expect an
110 1.4.2.2 nathanw * approx 30% decrease in transfer rate. This could be useful for
111 1.4.2.2 nathanw * OpenBSD.
112 1.4.2.2 nathanw */
113 1.4.2.2 nathanw /* #define PIPE_NODIRECT */
114 1.4.2.2 nathanw
115 1.4.2.2 nathanw /*
116 1.4.2.2 nathanw * interfaces to the outside world
117 1.4.2.2 nathanw */
118 1.4.2.2 nathanw #ifdef __FreeBSD__
119 1.4.2.11 nathanw static int pipe_read(struct file *fp, struct uio *uio,
120 1.4.2.11 nathanw struct ucred *cred, int flags, struct thread *td);
121 1.4.2.11 nathanw static int pipe_write(struct file *fp, struct uio *uio,
122 1.4.2.11 nathanw struct ucred *cred, int flags, struct thread *td);
123 1.4.2.11 nathanw static int pipe_close(struct file *fp, struct thread *td);
124 1.4.2.11 nathanw static int pipe_poll(struct file *fp, int events, struct ucred *cred,
125 1.4.2.11 nathanw struct thread *td);
126 1.4.2.11 nathanw static int pipe_kqfilter(struct file *fp, struct knote *kn);
127 1.4.2.11 nathanw static int pipe_stat(struct file *fp, struct stat *sb, struct thread *td);
128 1.4.2.11 nathanw static int pipe_ioctl(struct file *fp, u_long cmd, caddr_t data, struct thread *td);
129 1.4.2.2 nathanw
130 1.4.2.2 nathanw static struct fileops pipeops = {
131 1.4.2.2 nathanw pipe_read, pipe_write, pipe_ioctl, pipe_poll, pipe_kqfilter,
132 1.4.2.2 nathanw pipe_stat, pipe_close
133 1.4.2.2 nathanw };
134 1.4.2.2 nathanw
135 1.4.2.11 nathanw #define PIPE_GET_GIANT(pipe) \
136 1.4.2.11 nathanw do { \
137 1.4.2.11 nathanw PIPE_UNLOCK(wpipe); \
138 1.4.2.11 nathanw mtx_lock(&Giant); \
139 1.4.2.14 nathanw } while (/*CONSTCOND*/ 0)
140 1.4.2.11 nathanw
141 1.4.2.11 nathanw #define PIPE_DROP_GIANT(pipe) \
142 1.4.2.11 nathanw do { \
143 1.4.2.11 nathanw mtx_unlock(&Giant); \
144 1.4.2.11 nathanw PIPE_LOCK(wpipe); \
145 1.4.2.14 nathanw } while (/*CONSTCOND*/ 0)
146 1.4.2.11 nathanw
147 1.4.2.2 nathanw #endif /* FreeBSD */
148 1.4.2.2 nathanw
149 1.4.2.2 nathanw #ifdef __NetBSD__
150 1.4.2.11 nathanw static int pipe_read(struct file *fp, off_t *offset, struct uio *uio,
151 1.4.2.11 nathanw struct ucred *cred, int flags);
152 1.4.2.11 nathanw static int pipe_write(struct file *fp, off_t *offset, struct uio *uio,
153 1.4.2.11 nathanw struct ucred *cred, int flags);
154 1.4.2.11 nathanw static int pipe_close(struct file *fp, struct proc *p);
155 1.4.2.11 nathanw static int pipe_poll(struct file *fp, int events, struct proc *p);
156 1.4.2.11 nathanw static int pipe_fcntl(struct file *fp, u_int com, caddr_t data,
157 1.4.2.11 nathanw struct proc *p);
158 1.4.2.14 nathanw static int pipe_kqfilter(struct file *fp, struct knote *kn);
159 1.4.2.11 nathanw static int pipe_stat(struct file *fp, struct stat *sb, struct proc *p);
160 1.4.2.11 nathanw static int pipe_ioctl(struct file *fp, u_long cmd, caddr_t data, struct proc *p);
161 1.4.2.2 nathanw
162 1.4.2.2 nathanw static struct fileops pipeops =
163 1.4.2.2 nathanw { pipe_read, pipe_write, pipe_ioctl, pipe_fcntl, pipe_poll,
164 1.4.2.14 nathanw pipe_stat, pipe_close, pipe_kqfilter };
165 1.4.2.11 nathanw
166 1.4.2.11 nathanw /* XXXSMP perhaps use spinlocks & KERNEL_PROC_(UN)LOCK() ? just clear now */
167 1.4.2.11 nathanw #define PIPE_GET_GIANT(pipe)
168 1.4.2.11 nathanw #define PIPE_DROP_GIANT(pipe)
169 1.4.2.11 nathanw #define GIANT_REQUIRED
170 1.4.2.11 nathanw
171 1.4.2.2 nathanw #endif /* NetBSD */
172 1.4.2.2 nathanw
173 1.4.2.2 nathanw /*
174 1.4.2.2 nathanw * Default pipe buffer size(s), this can be kind-of large now because pipe
175 1.4.2.2 nathanw * space is pageable. The pipe code will try to maintain locality of
176 1.4.2.2 nathanw * reference for performance reasons, so small amounts of outstanding I/O
177 1.4.2.2 nathanw * will not wipe the cache.
178 1.4.2.2 nathanw */
179 1.4.2.2 nathanw #define MINPIPESIZE (PIPE_SIZE/3)
180 1.4.2.2 nathanw #define MAXPIPESIZE (2*PIPE_SIZE/3)
181 1.4.2.2 nathanw
182 1.4.2.2 nathanw /*
183 1.4.2.2 nathanw * Maximum amount of kva for pipes -- this is kind-of a soft limit, but
184 1.4.2.2 nathanw * is there so that on large systems, we don't exhaust it.
185 1.4.2.2 nathanw */
186 1.4.2.2 nathanw #define MAXPIPEKVA (8*1024*1024)
187 1.4.2.2 nathanw static int maxpipekva = MAXPIPEKVA;
188 1.4.2.2 nathanw
189 1.4.2.2 nathanw /*
190 1.4.2.2 nathanw * Limit for direct transfers, we cannot, of course limit
191 1.4.2.2 nathanw * the amount of kva for pipes in general though.
192 1.4.2.2 nathanw */
193 1.4.2.2 nathanw #define LIMITPIPEKVA (16*1024*1024)
194 1.4.2.2 nathanw static int limitpipekva = LIMITPIPEKVA;
195 1.4.2.2 nathanw
196 1.4.2.2 nathanw /*
197 1.4.2.2 nathanw * Limit the number of "big" pipes
198 1.4.2.2 nathanw */
199 1.4.2.2 nathanw #define LIMITBIGPIPES 32
200 1.4.2.2 nathanw static int maxbigpipes = LIMITBIGPIPES;
201 1.4.2.2 nathanw static int nbigpipe = 0;
202 1.4.2.2 nathanw
203 1.4.2.2 nathanw /*
204 1.4.2.2 nathanw * Amount of KVA consumed by pipe buffers.
205 1.4.2.2 nathanw */
206 1.4.2.2 nathanw static int amountpipekva = 0;
207 1.4.2.2 nathanw
208 1.4.2.11 nathanw static void pipeclose(struct pipe *cpipe);
209 1.4.2.11 nathanw static void pipe_free_kmem(struct pipe *cpipe);
210 1.4.2.11 nathanw static int pipe_create(struct pipe **cpipep, int allockva);
211 1.4.2.11 nathanw static __inline int pipelock(struct pipe *cpipe, int catch);
212 1.4.2.11 nathanw static __inline void pipeunlock(struct pipe *cpipe);
213 1.4.2.11 nathanw static __inline void pipeselwakeup(struct pipe *cpipe, struct pipe *sigp);
214 1.4.2.2 nathanw #ifndef PIPE_NODIRECT
215 1.4.2.11 nathanw static int pipe_direct_write(struct pipe *wpipe, struct uio *uio);
216 1.4.2.2 nathanw #endif
217 1.4.2.11 nathanw static int pipespace(struct pipe *cpipe, int size);
218 1.4.2.2 nathanw
219 1.4.2.2 nathanw #ifdef __NetBSD__
220 1.4.2.2 nathanw #ifndef PIPE_NODIRECT
221 1.4.2.11 nathanw static int pipe_loan_alloc(struct pipe *, int);
222 1.4.2.11 nathanw static void pipe_loan_free(struct pipe *);
223 1.4.2.2 nathanw #endif /* PIPE_NODIRECT */
224 1.4.2.2 nathanw
225 1.4.2.2 nathanw static struct pool pipe_pool;
226 1.4.2.2 nathanw #endif /* NetBSD */
227 1.4.2.2 nathanw
228 1.4.2.11 nathanw #ifdef __FreeBSD__
229 1.4.2.11 nathanw static vm_zone_t pipe_zone;
230 1.4.2.11 nathanw
231 1.4.2.11 nathanw static void pipeinit(void *dummy __unused);
232 1.4.2.11 nathanw #ifndef PIPE_NODIRECT
233 1.4.2.11 nathanw static int pipe_build_write_buffer(struct pipe *wpipe, struct uio *uio);
234 1.4.2.11 nathanw static void pipe_destroy_write_buffer(struct pipe *wpipe);
235 1.4.2.11 nathanw static void pipe_clone_write_buffer(struct pipe *wpipe);
236 1.4.2.11 nathanw #endif
237 1.4.2.11 nathanw
238 1.4.2.11 nathanw SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_ANY, pipeinit, NULL);
239 1.4.2.11 nathanw
240 1.4.2.11 nathanw static void
241 1.4.2.11 nathanw pipeinit(void *dummy __unused)
242 1.4.2.11 nathanw {
243 1.4.2.11 nathanw
244 1.4.2.11 nathanw pipe_zone = zinit("PIPE", sizeof(struct pipe), 0, 0, 4);
245 1.4.2.11 nathanw }
246 1.4.2.11 nathanw #endif /* FreeBSD */
247 1.4.2.11 nathanw
248 1.4.2.2 nathanw /*
249 1.4.2.2 nathanw * The pipe system call for the DTYPE_PIPE type of pipes
250 1.4.2.2 nathanw */
251 1.4.2.2 nathanw
252 1.4.2.2 nathanw /* ARGSUSED */
253 1.4.2.2 nathanw #ifdef __FreeBSD__
254 1.4.2.2 nathanw int
255 1.4.2.11 nathanw pipe(td, uap)
256 1.4.2.11 nathanw struct thread *td;
257 1.4.2.2 nathanw struct pipe_args /* {
258 1.4.2.2 nathanw int dummy;
259 1.4.2.2 nathanw } */ *uap;
260 1.4.2.2 nathanw #elif defined(__NetBSD__)
261 1.4.2.2 nathanw int
262 1.4.2.10 nathanw sys_pipe(l, v, retval)
263 1.4.2.10 nathanw struct lwp *l;
264 1.4.2.2 nathanw void *v;
265 1.4.2.2 nathanw register_t *retval;
266 1.4.2.2 nathanw #endif
267 1.4.2.2 nathanw {
268 1.4.2.2 nathanw struct file *rf, *wf;
269 1.4.2.2 nathanw struct pipe *rpipe, *wpipe;
270 1.4.2.2 nathanw int fd, error;
271 1.4.2.10 nathanw struct proc *p;
272 1.4.2.2 nathanw #ifdef __FreeBSD__
273 1.4.2.11 nathanw struct mtx *pmtx;
274 1.4.2.11 nathanw
275 1.4.2.11 nathanw KASSERT(pipe_zone != NULL, ("pipe_zone not initialized"));
276 1.4.2.2 nathanw
277 1.4.2.11 nathanw pmtx = malloc(sizeof(*pmtx), M_TEMP, M_WAITOK | M_ZERO);
278 1.4.2.11 nathanw
279 1.4.2.2 nathanw rpipe = wpipe = NULL;
280 1.4.2.3 nathanw if (pipe_create(&rpipe, 1) || pipe_create(&wpipe, 1)) {
281 1.4.2.11 nathanw pipeclose(rpipe);
282 1.4.2.11 nathanw pipeclose(wpipe);
283 1.4.2.11 nathanw free(pmtx, M_TEMP);
284 1.4.2.2 nathanw return (ENFILE);
285 1.4.2.2 nathanw }
286 1.4.2.2 nathanw
287 1.4.2.11 nathanw error = falloc(td, &rf, &fd);
288 1.4.2.2 nathanw if (error) {
289 1.4.2.2 nathanw pipeclose(rpipe);
290 1.4.2.2 nathanw pipeclose(wpipe);
291 1.4.2.11 nathanw free(pmtx, M_TEMP);
292 1.4.2.2 nathanw return (error);
293 1.4.2.2 nathanw }
294 1.4.2.2 nathanw fhold(rf);
295 1.4.2.11 nathanw td->td_retval[0] = fd;
296 1.4.2.2 nathanw
297 1.4.2.2 nathanw /*
298 1.4.2.2 nathanw * Warning: once we've gotten past allocation of the fd for the
299 1.4.2.2 nathanw * read-side, we can only drop the read side via fdrop() in order
300 1.4.2.2 nathanw * to avoid races against processes which manage to dup() the read
301 1.4.2.2 nathanw * side while we are blocked trying to allocate the write side.
302 1.4.2.2 nathanw */
303 1.4.2.11 nathanw FILE_LOCK(rf);
304 1.4.2.2 nathanw rf->f_flag = FREAD | FWRITE;
305 1.4.2.2 nathanw rf->f_type = DTYPE_PIPE;
306 1.4.2.2 nathanw rf->f_data = (caddr_t)rpipe;
307 1.4.2.2 nathanw rf->f_ops = &pipeops;
308 1.4.2.11 nathanw FILE_UNLOCK(rf);
309 1.4.2.11 nathanw error = falloc(td, &wf, &fd);
310 1.4.2.2 nathanw if (error) {
311 1.4.2.11 nathanw struct filedesc *fdp = td->td_proc->p_fd;
312 1.4.2.11 nathanw FILEDESC_LOCK(fdp);
313 1.4.2.11 nathanw if (fdp->fd_ofiles[td->td_retval[0]] == rf) {
314 1.4.2.11 nathanw fdp->fd_ofiles[td->td_retval[0]] = NULL;
315 1.4.2.11 nathanw FILEDESC_UNLOCK(fdp);
316 1.4.2.11 nathanw fdrop(rf, td);
317 1.4.2.11 nathanw } else
318 1.4.2.11 nathanw FILEDESC_UNLOCK(fdp);
319 1.4.2.11 nathanw fdrop(rf, td);
320 1.4.2.2 nathanw /* rpipe has been closed by fdrop(). */
321 1.4.2.2 nathanw pipeclose(wpipe);
322 1.4.2.11 nathanw free(pmtx, M_TEMP);
323 1.4.2.2 nathanw return (error);
324 1.4.2.2 nathanw }
325 1.4.2.11 nathanw FILE_LOCK(wf);
326 1.4.2.2 nathanw wf->f_flag = FREAD | FWRITE;
327 1.4.2.2 nathanw wf->f_type = DTYPE_PIPE;
328 1.4.2.2 nathanw wf->f_data = (caddr_t)wpipe;
329 1.4.2.2 nathanw wf->f_ops = &pipeops;
330 1.4.2.2 nathanw p->p_retval[1] = fd;
331 1.4.2.2 nathanw rpipe->pipe_peer = wpipe;
332 1.4.2.2 nathanw wpipe->pipe_peer = rpipe;
333 1.4.2.11 nathanw mtx_init(pmtx, "pipe mutex", MTX_DEF);
334 1.4.2.11 nathanw rpipe->pipe_mtxp = wpipe->pipe_mtxp = pmtx;
335 1.4.2.11 nathanw fdrop(rf, td);
336 1.4.2.2 nathanw #endif /* FreeBSD */
337 1.4.2.2 nathanw
338 1.4.2.2 nathanw #ifdef __NetBSD__
339 1.4.2.10 nathanw p = l->l_proc;
340 1.4.2.3 nathanw rpipe = wpipe = NULL;
341 1.4.2.3 nathanw if (pipe_create(&rpipe, 1) || pipe_create(&wpipe, 0)) {
342 1.4.2.3 nathanw pipeclose(rpipe);
343 1.4.2.3 nathanw pipeclose(wpipe);
344 1.4.2.3 nathanw return (ENFILE);
345 1.4.2.3 nathanw }
346 1.4.2.3 nathanw
347 1.4.2.2 nathanw /*
348 1.4.2.2 nathanw * Note: the file structure returned from falloc() is marked
349 1.4.2.2 nathanw * as 'larval' initially. Unless we mark it as 'mature' by
350 1.4.2.2 nathanw * FILE_SET_MATURE(), any attempt to do anything with it would
351 1.4.2.2 nathanw * return EBADF, including e.g. dup(2) or close(2). This avoids
352 1.4.2.2 nathanw * file descriptor races if we block in the second falloc().
353 1.4.2.2 nathanw */
354 1.4.2.2 nathanw
355 1.4.2.2 nathanw error = falloc(p, &rf, &fd);
356 1.4.2.2 nathanw if (error)
357 1.4.2.2 nathanw goto free2;
358 1.4.2.2 nathanw retval[0] = fd;
359 1.4.2.2 nathanw rf->f_flag = FREAD;
360 1.4.2.2 nathanw rf->f_type = DTYPE_PIPE;
361 1.4.2.2 nathanw rf->f_data = (caddr_t)rpipe;
362 1.4.2.2 nathanw rf->f_ops = &pipeops;
363 1.4.2.2 nathanw
364 1.4.2.2 nathanw error = falloc(p, &wf, &fd);
365 1.4.2.2 nathanw if (error)
366 1.4.2.2 nathanw goto free3;
367 1.4.2.2 nathanw retval[1] = fd;
368 1.4.2.2 nathanw wf->f_flag = FWRITE;
369 1.4.2.2 nathanw wf->f_type = DTYPE_PIPE;
370 1.4.2.2 nathanw wf->f_data = (caddr_t)wpipe;
371 1.4.2.2 nathanw wf->f_ops = &pipeops;
372 1.4.2.2 nathanw
373 1.4.2.2 nathanw rpipe->pipe_peer = wpipe;
374 1.4.2.2 nathanw wpipe->pipe_peer = rpipe;
375 1.4.2.2 nathanw
376 1.4.2.2 nathanw FILE_SET_MATURE(rf);
377 1.4.2.2 nathanw FILE_SET_MATURE(wf);
378 1.4.2.2 nathanw FILE_UNUSE(rf, p);
379 1.4.2.2 nathanw FILE_UNUSE(wf, p);
380 1.4.2.2 nathanw return (0);
381 1.4.2.2 nathanw free3:
382 1.4.2.2 nathanw FILE_UNUSE(rf, p);
383 1.4.2.2 nathanw ffree(rf);
384 1.4.2.3 nathanw fdremove(p->p_fd, retval[0]);
385 1.4.2.2 nathanw free2:
386 1.4.2.2 nathanw pipeclose(wpipe);
387 1.4.2.2 nathanw pipeclose(rpipe);
388 1.4.2.2 nathanw #endif /* NetBSD */
389 1.4.2.2 nathanw
390 1.4.2.2 nathanw return (error);
391 1.4.2.2 nathanw }
392 1.4.2.2 nathanw
393 1.4.2.2 nathanw /*
394 1.4.2.2 nathanw * Allocate kva for pipe circular buffer, the space is pageable
395 1.4.2.2 nathanw * This routine will 'realloc' the size of a pipe safely, if it fails
396 1.4.2.2 nathanw * it will retain the old buffer.
397 1.4.2.2 nathanw * If it fails it will return ENOMEM.
398 1.4.2.2 nathanw */
399 1.4.2.2 nathanw static int
400 1.4.2.2 nathanw pipespace(cpipe, size)
401 1.4.2.2 nathanw struct pipe *cpipe;
402 1.4.2.2 nathanw int size;
403 1.4.2.2 nathanw {
404 1.4.2.2 nathanw caddr_t buffer;
405 1.4.2.2 nathanw #ifdef __FreeBSD__
406 1.4.2.2 nathanw struct vm_object *object;
407 1.4.2.2 nathanw int npages, error;
408 1.4.2.2 nathanw
409 1.4.2.11 nathanw GIANT_REQUIRED;
410 1.4.2.11 nathanw KASSERT(cpipe->pipe_mtxp == NULL || !mtx_owned(PIPE_MTX(cpipe)),
411 1.4.2.11 nathanw ("pipespace: pipe mutex locked"));
412 1.4.2.11 nathanw
413 1.4.2.2 nathanw npages = round_page(size)/PAGE_SIZE;
414 1.4.2.2 nathanw /*
415 1.4.2.2 nathanw * Create an object, I don't like the idea of paging to/from
416 1.4.2.2 nathanw * kernel_object.
417 1.4.2.2 nathanw */
418 1.4.2.2 nathanw object = vm_object_allocate(OBJT_DEFAULT, npages);
419 1.4.2.2 nathanw buffer = (caddr_t) vm_map_min(kernel_map);
420 1.4.2.2 nathanw
421 1.4.2.2 nathanw /*
422 1.4.2.2 nathanw * Insert the object into the kernel map, and allocate kva for it.
423 1.4.2.2 nathanw * The map entry is, by default, pageable.
424 1.4.2.2 nathanw */
425 1.4.2.2 nathanw error = vm_map_find(kernel_map, object, 0,
426 1.4.2.2 nathanw (vm_offset_t *) &buffer, size, 1,
427 1.4.2.2 nathanw VM_PROT_ALL, VM_PROT_ALL, 0);
428 1.4.2.2 nathanw
429 1.4.2.2 nathanw if (error != KERN_SUCCESS) {
430 1.4.2.2 nathanw vm_object_deallocate(object);
431 1.4.2.2 nathanw return (ENOMEM);
432 1.4.2.2 nathanw }
433 1.4.2.2 nathanw #endif /* FreeBSD */
434 1.4.2.2 nathanw
435 1.4.2.2 nathanw #ifdef __NetBSD__
436 1.4.2.2 nathanw /*
437 1.4.2.2 nathanw * Allocate pageable virtual address space. Physical memory is allocated
438 1.4.2.2 nathanw * on demand.
439 1.4.2.2 nathanw */
440 1.4.2.2 nathanw buffer = (caddr_t) uvm_km_valloc(kernel_map, round_page(size));
441 1.4.2.2 nathanw if (buffer == NULL)
442 1.4.2.2 nathanw return (ENOMEM);
443 1.4.2.2 nathanw #endif /* NetBSD */
444 1.4.2.2 nathanw
445 1.4.2.2 nathanw /* free old resources if we're resizing */
446 1.4.2.2 nathanw pipe_free_kmem(cpipe);
447 1.4.2.2 nathanw #ifdef __FreeBSD__
448 1.4.2.2 nathanw cpipe->pipe_buffer.object = object;
449 1.4.2.2 nathanw #endif
450 1.4.2.2 nathanw cpipe->pipe_buffer.buffer = buffer;
451 1.4.2.2 nathanw cpipe->pipe_buffer.size = size;
452 1.4.2.2 nathanw cpipe->pipe_buffer.in = 0;
453 1.4.2.2 nathanw cpipe->pipe_buffer.out = 0;
454 1.4.2.2 nathanw cpipe->pipe_buffer.cnt = 0;
455 1.4.2.2 nathanw amountpipekva += cpipe->pipe_buffer.size;
456 1.4.2.2 nathanw return (0);
457 1.4.2.2 nathanw }
458 1.4.2.2 nathanw
459 1.4.2.2 nathanw /*
460 1.4.2.2 nathanw * initialize and allocate VM and memory for pipe
461 1.4.2.2 nathanw */
462 1.4.2.2 nathanw static int
463 1.4.2.3 nathanw pipe_create(cpipep, allockva)
464 1.4.2.2 nathanw struct pipe **cpipep;
465 1.4.2.3 nathanw int allockva;
466 1.4.2.2 nathanw {
467 1.4.2.2 nathanw struct pipe *cpipe;
468 1.4.2.2 nathanw int error;
469 1.4.2.2 nathanw
470 1.4.2.2 nathanw #ifdef __FreeBSD__
471 1.4.2.2 nathanw *cpipep = zalloc(pipe_zone);
472 1.4.2.2 nathanw #endif
473 1.4.2.2 nathanw #ifdef __NetBSD__
474 1.4.2.2 nathanw *cpipep = pool_get(&pipe_pool, M_WAITOK);
475 1.4.2.2 nathanw #endif
476 1.4.2.2 nathanw if (*cpipep == NULL)
477 1.4.2.2 nathanw return (ENOMEM);
478 1.4.2.2 nathanw
479 1.4.2.2 nathanw cpipe = *cpipep;
480 1.4.2.2 nathanw
481 1.4.2.3 nathanw /* Initialize */
482 1.4.2.3 nathanw memset(cpipe, 0, sizeof(*cpipe));
483 1.4.2.2 nathanw cpipe->pipe_state = PIPE_SIGNALR;
484 1.4.2.2 nathanw
485 1.4.2.11 nathanw #ifdef __FreeBSD__
486 1.4.2.11 nathanw cpipe->pipe_mtxp = NULL; /* avoid pipespace assertion */
487 1.4.2.11 nathanw #endif
488 1.4.2.3 nathanw if (allockva && (error = pipespace(cpipe, PIPE_SIZE)))
489 1.4.2.2 nathanw return (error);
490 1.4.2.2 nathanw
491 1.4.2.2 nathanw vfs_timestamp(&cpipe->pipe_ctime);
492 1.4.2.2 nathanw cpipe->pipe_atime = cpipe->pipe_ctime;
493 1.4.2.2 nathanw cpipe->pipe_mtime = cpipe->pipe_ctime;
494 1.4.2.2 nathanw #ifdef __NetBSD__
495 1.4.2.2 nathanw cpipe->pipe_pgid = NO_PID;
496 1.4.2.2 nathanw lockinit(&cpipe->pipe_lock, PRIBIO | PCATCH, "pipelk", 0, 0);
497 1.4.2.2 nathanw #endif
498 1.4.2.2 nathanw
499 1.4.2.2 nathanw return (0);
500 1.4.2.2 nathanw }
501 1.4.2.2 nathanw
502 1.4.2.2 nathanw
503 1.4.2.2 nathanw /*
504 1.4.2.2 nathanw * lock a pipe for I/O, blocking other access
505 1.4.2.2 nathanw */
506 1.4.2.2 nathanw static __inline int
507 1.4.2.2 nathanw pipelock(cpipe, catch)
508 1.4.2.2 nathanw struct pipe *cpipe;
509 1.4.2.2 nathanw int catch;
510 1.4.2.2 nathanw {
511 1.4.2.2 nathanw int error;
512 1.4.2.2 nathanw
513 1.4.2.2 nathanw #ifdef __FreeBSD__
514 1.4.2.11 nathanw PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
515 1.4.2.11 nathanw while (cpipe->pipe_state & PIPE_LOCKFL) {
516 1.4.2.2 nathanw cpipe->pipe_state |= PIPE_LWANT;
517 1.4.2.11 nathanw error = msleep(cpipe, PIPE_MTX(cpipe),
518 1.4.2.11 nathanw catch ? (PRIBIO | PCATCH) : PRIBIO,
519 1.4.2.2 nathanw "pipelk", 0);
520 1.4.2.2 nathanw if (error != 0)
521 1.4.2.2 nathanw return (error);
522 1.4.2.2 nathanw }
523 1.4.2.11 nathanw cpipe->pipe_state |= PIPE_LOCKFL;
524 1.4.2.2 nathanw return (0);
525 1.4.2.2 nathanw #endif
526 1.4.2.2 nathanw
527 1.4.2.2 nathanw #ifdef __NetBSD__
528 1.4.2.2 nathanw do {
529 1.4.2.2 nathanw error = lockmgr(&cpipe->pipe_lock, LK_EXCLUSIVE, NULL);
530 1.4.2.2 nathanw } while (!catch && (error == EINTR || error == ERESTART));
531 1.4.2.2 nathanw return (error);
532 1.4.2.2 nathanw #endif
533 1.4.2.2 nathanw }
534 1.4.2.2 nathanw
535 1.4.2.2 nathanw /*
536 1.4.2.2 nathanw * unlock a pipe I/O lock
537 1.4.2.2 nathanw */
538 1.4.2.2 nathanw static __inline void
539 1.4.2.2 nathanw pipeunlock(cpipe)
540 1.4.2.2 nathanw struct pipe *cpipe;
541 1.4.2.2 nathanw {
542 1.4.2.11 nathanw
543 1.4.2.2 nathanw #ifdef __FreeBSD__
544 1.4.2.11 nathanw PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
545 1.4.2.11 nathanw cpipe->pipe_state &= ~PIPE_LOCKFL;
546 1.4.2.2 nathanw if (cpipe->pipe_state & PIPE_LWANT) {
547 1.4.2.2 nathanw cpipe->pipe_state &= ~PIPE_LWANT;
548 1.4.2.2 nathanw wakeup(cpipe);
549 1.4.2.2 nathanw }
550 1.4.2.2 nathanw #endif
551 1.4.2.2 nathanw
552 1.4.2.2 nathanw #ifdef __NetBSD__
553 1.4.2.2 nathanw lockmgr(&cpipe->pipe_lock, LK_RELEASE, NULL);
554 1.4.2.2 nathanw #endif
555 1.4.2.2 nathanw }
556 1.4.2.2 nathanw
557 1.4.2.2 nathanw /*
558 1.4.2.2 nathanw * Select/poll wakup. This also sends SIGIO to peer connected to
559 1.4.2.2 nathanw * 'sigpipe' side of pipe.
560 1.4.2.2 nathanw */
561 1.4.2.2 nathanw static __inline void
562 1.4.2.3 nathanw pipeselwakeup(selp, sigp)
563 1.4.2.3 nathanw struct pipe *selp, *sigp;
564 1.4.2.2 nathanw {
565 1.4.2.14 nathanw
566 1.4.2.14 nathanw #ifdef __FreeBSD__
567 1.4.2.3 nathanw if (selp->pipe_state & PIPE_SEL) {
568 1.4.2.3 nathanw selp->pipe_state &= ~PIPE_SEL;
569 1.4.2.3 nathanw selwakeup(&selp->pipe_sel);
570 1.4.2.2 nathanw }
571 1.4.2.3 nathanw if (sigp && (sigp->pipe_state & PIPE_ASYNC) && sigp->pipe_sigio)
572 1.4.2.3 nathanw pgsigio(sigp->pipe_sigio, SIGIO, 0);
573 1.4.2.3 nathanw KNOTE(&selp->pipe_sel.si_note, 0);
574 1.4.2.2 nathanw #endif
575 1.4.2.2 nathanw
576 1.4.2.2 nathanw #ifdef __NetBSD__
577 1.4.2.14 nathanw selnotify(&selp->pipe_sel, 0);
578 1.4.2.14 nathanw if (sigp && (sigp->pipe_state & PIPE_ASYNC) &&
579 1.4.2.14 nathanw sigp->pipe_pgid != NO_PID) {
580 1.4.2.2 nathanw struct proc *p;
581 1.4.2.2 nathanw
582 1.4.2.3 nathanw if (sigp->pipe_pgid < 0)
583 1.4.2.3 nathanw gsignal(-sigp->pipe_pgid, SIGIO);
584 1.4.2.3 nathanw else if (sigp->pipe_pgid > 0 && (p = pfind(sigp->pipe_pgid)) != 0)
585 1.4.2.2 nathanw psignal(p, SIGIO);
586 1.4.2.2 nathanw }
587 1.4.2.2 nathanw #endif /* NetBSD */
588 1.4.2.2 nathanw }
589 1.4.2.2 nathanw
590 1.4.2.2 nathanw /* ARGSUSED */
591 1.4.2.2 nathanw #ifdef __FreeBSD__
592 1.4.2.2 nathanw static int
593 1.4.2.11 nathanw pipe_read(fp, uio, cred, flags, td)
594 1.4.2.2 nathanw struct file *fp;
595 1.4.2.2 nathanw struct uio *uio;
596 1.4.2.2 nathanw struct ucred *cred;
597 1.4.2.11 nathanw struct thread *td;
598 1.4.2.2 nathanw int flags;
599 1.4.2.2 nathanw struct proc *p;
600 1.4.2.2 nathanw #elif defined(__NetBSD__)
601 1.4.2.2 nathanw static int
602 1.4.2.2 nathanw pipe_read(fp, offset, uio, cred, flags)
603 1.4.2.2 nathanw struct file *fp;
604 1.4.2.2 nathanw off_t *offset;
605 1.4.2.2 nathanw struct uio *uio;
606 1.4.2.2 nathanw struct ucred *cred;
607 1.4.2.2 nathanw int flags;
608 1.4.2.2 nathanw #endif
609 1.4.2.2 nathanw {
610 1.4.2.2 nathanw struct pipe *rpipe = (struct pipe *) fp->f_data;
611 1.4.2.2 nathanw int error;
612 1.4.2.2 nathanw size_t nread = 0;
613 1.4.2.2 nathanw size_t size;
614 1.4.2.2 nathanw size_t ocnt;
615 1.4.2.2 nathanw
616 1.4.2.11 nathanw PIPE_LOCK(rpipe);
617 1.4.2.2 nathanw ++rpipe->pipe_busy;
618 1.4.2.14 nathanw ocnt = rpipe->pipe_buffer.cnt;
619 1.4.2.14 nathanw
620 1.4.2.2 nathanw error = pipelock(rpipe, 1);
621 1.4.2.2 nathanw if (error)
622 1.4.2.2 nathanw goto unlocked_error;
623 1.4.2.2 nathanw
624 1.4.2.2 nathanw while (uio->uio_resid) {
625 1.4.2.2 nathanw /*
626 1.4.2.2 nathanw * normal pipe buffer receive
627 1.4.2.2 nathanw */
628 1.4.2.2 nathanw if (rpipe->pipe_buffer.cnt > 0) {
629 1.4.2.2 nathanw size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out;
630 1.4.2.2 nathanw if (size > rpipe->pipe_buffer.cnt)
631 1.4.2.2 nathanw size = rpipe->pipe_buffer.cnt;
632 1.4.2.2 nathanw if (size > uio->uio_resid)
633 1.4.2.2 nathanw size = uio->uio_resid;
634 1.4.2.2 nathanw
635 1.4.2.11 nathanw PIPE_UNLOCK(rpipe);
636 1.4.2.2 nathanw error = uiomove(&rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out],
637 1.4.2.2 nathanw size, uio);
638 1.4.2.11 nathanw PIPE_LOCK(rpipe);
639 1.4.2.2 nathanw if (error)
640 1.4.2.2 nathanw break;
641 1.4.2.2 nathanw
642 1.4.2.2 nathanw rpipe->pipe_buffer.out += size;
643 1.4.2.2 nathanw if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size)
644 1.4.2.2 nathanw rpipe->pipe_buffer.out = 0;
645 1.4.2.2 nathanw
646 1.4.2.2 nathanw rpipe->pipe_buffer.cnt -= size;
647 1.4.2.2 nathanw
648 1.4.2.2 nathanw /*
649 1.4.2.2 nathanw * If there is no more to read in the pipe, reset
650 1.4.2.2 nathanw * its pointers to the beginning. This improves
651 1.4.2.2 nathanw * cache hit stats.
652 1.4.2.2 nathanw */
653 1.4.2.2 nathanw if (rpipe->pipe_buffer.cnt == 0) {
654 1.4.2.2 nathanw rpipe->pipe_buffer.in = 0;
655 1.4.2.2 nathanw rpipe->pipe_buffer.out = 0;
656 1.4.2.2 nathanw }
657 1.4.2.2 nathanw nread += size;
658 1.4.2.2 nathanw #ifndef PIPE_NODIRECT
659 1.4.2.2 nathanw /*
660 1.4.2.2 nathanw * Direct copy, bypassing a kernel buffer.
661 1.4.2.2 nathanw */
662 1.4.2.2 nathanw } else if ((size = rpipe->pipe_map.cnt) &&
663 1.4.2.2 nathanw (rpipe->pipe_state & PIPE_DIRECTW)) {
664 1.4.2.2 nathanw caddr_t va;
665 1.4.2.2 nathanw if (size > uio->uio_resid)
666 1.4.2.2 nathanw size = uio->uio_resid;
667 1.4.2.2 nathanw
668 1.4.2.2 nathanw va = (caddr_t) rpipe->pipe_map.kva +
669 1.4.2.2 nathanw rpipe->pipe_map.pos;
670 1.4.2.11 nathanw PIPE_UNLOCK(rpipe);
671 1.4.2.2 nathanw error = uiomove(va, size, uio);
672 1.4.2.11 nathanw PIPE_LOCK(rpipe);
673 1.4.2.2 nathanw if (error)
674 1.4.2.2 nathanw break;
675 1.4.2.2 nathanw nread += size;
676 1.4.2.2 nathanw rpipe->pipe_map.pos += size;
677 1.4.2.2 nathanw rpipe->pipe_map.cnt -= size;
678 1.4.2.2 nathanw if (rpipe->pipe_map.cnt == 0) {
679 1.4.2.2 nathanw rpipe->pipe_state &= ~PIPE_DIRECTW;
680 1.4.2.2 nathanw wakeup(rpipe);
681 1.4.2.2 nathanw }
682 1.4.2.2 nathanw #endif
683 1.4.2.2 nathanw } else {
684 1.4.2.2 nathanw /*
685 1.4.2.2 nathanw * detect EOF condition
686 1.4.2.2 nathanw * read returns 0 on EOF, no need to set error
687 1.4.2.2 nathanw */
688 1.4.2.2 nathanw if (rpipe->pipe_state & PIPE_EOF)
689 1.4.2.2 nathanw break;
690 1.4.2.2 nathanw
691 1.4.2.2 nathanw /*
692 1.4.2.2 nathanw * If the "write-side" has been blocked, wake it up now.
693 1.4.2.2 nathanw */
694 1.4.2.2 nathanw if (rpipe->pipe_state & PIPE_WANTW) {
695 1.4.2.2 nathanw rpipe->pipe_state &= ~PIPE_WANTW;
696 1.4.2.2 nathanw wakeup(rpipe);
697 1.4.2.2 nathanw }
698 1.4.2.2 nathanw
699 1.4.2.2 nathanw /*
700 1.4.2.2 nathanw * Break if some data was read.
701 1.4.2.2 nathanw */
702 1.4.2.2 nathanw if (nread > 0)
703 1.4.2.2 nathanw break;
704 1.4.2.2 nathanw
705 1.4.2.2 nathanw /*
706 1.4.2.2 nathanw * don't block on non-blocking I/O
707 1.4.2.2 nathanw */
708 1.4.2.2 nathanw if (fp->f_flag & FNONBLOCK) {
709 1.4.2.2 nathanw error = EAGAIN;
710 1.4.2.2 nathanw break;
711 1.4.2.2 nathanw }
712 1.4.2.2 nathanw
713 1.4.2.2 nathanw /*
714 1.4.2.2 nathanw * Unlock the pipe buffer for our remaining processing.
715 1.4.2.2 nathanw * We will either break out with an error or we will
716 1.4.2.2 nathanw * sleep and relock to loop.
717 1.4.2.2 nathanw */
718 1.4.2.2 nathanw pipeunlock(rpipe);
719 1.4.2.2 nathanw
720 1.4.2.2 nathanw /*
721 1.4.2.2 nathanw * We want to read more, wake up select/poll.
722 1.4.2.2 nathanw */
723 1.4.2.3 nathanw pipeselwakeup(rpipe, rpipe->pipe_peer);
724 1.4.2.2 nathanw
725 1.4.2.2 nathanw rpipe->pipe_state |= PIPE_WANTR;
726 1.4.2.11 nathanw #ifdef __FreeBSD__
727 1.4.2.11 nathanw error = msleep(rpipe, PIPE_MTX(rpipe), PRIBIO | PCATCH,
728 1.4.2.11 nathanw "piperd", 0);
729 1.4.2.11 nathanw #else
730 1.4.2.2 nathanw error = tsleep(rpipe, PRIBIO | PCATCH, "piperd", 0);
731 1.4.2.11 nathanw #endif
732 1.4.2.2 nathanw if (error != 0 || (error = pipelock(rpipe, 1)))
733 1.4.2.2 nathanw goto unlocked_error;
734 1.4.2.2 nathanw }
735 1.4.2.2 nathanw }
736 1.4.2.2 nathanw pipeunlock(rpipe);
737 1.4.2.2 nathanw
738 1.4.2.11 nathanw /* XXX: should probably do this before getting any locks. */
739 1.4.2.2 nathanw if (error == 0)
740 1.4.2.2 nathanw vfs_timestamp(&rpipe->pipe_atime);
741 1.4.2.2 nathanw unlocked_error:
742 1.4.2.2 nathanw --rpipe->pipe_busy;
743 1.4.2.2 nathanw
744 1.4.2.2 nathanw /*
745 1.4.2.2 nathanw * PIPE_WANTCLOSE processing only makes sense if pipe_busy is 0.
746 1.4.2.2 nathanw */
747 1.4.2.2 nathanw if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANTCLOSE)) {
748 1.4.2.2 nathanw rpipe->pipe_state &= ~(PIPE_WANTCLOSE|PIPE_WANTW);
749 1.4.2.2 nathanw wakeup(rpipe);
750 1.4.2.2 nathanw } else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) {
751 1.4.2.2 nathanw /*
752 1.4.2.2 nathanw * Handle write blocking hysteresis.
753 1.4.2.2 nathanw */
754 1.4.2.2 nathanw if (rpipe->pipe_state & PIPE_WANTW) {
755 1.4.2.2 nathanw rpipe->pipe_state &= ~PIPE_WANTW;
756 1.4.2.2 nathanw wakeup(rpipe);
757 1.4.2.2 nathanw }
758 1.4.2.2 nathanw }
759 1.4.2.2 nathanw
760 1.4.2.2 nathanw /*
761 1.4.2.2 nathanw * If anything was read off the buffer, signal to the writer it's
762 1.4.2.2 nathanw * possible to write more data. Also send signal if we are here for the
763 1.4.2.2 nathanw * first time after last write.
764 1.4.2.2 nathanw */
765 1.4.2.2 nathanw if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF
766 1.4.2.2 nathanw && (ocnt != rpipe->pipe_buffer.cnt || (rpipe->pipe_state & PIPE_SIGNALR))) {
767 1.4.2.3 nathanw pipeselwakeup(rpipe, rpipe->pipe_peer);
768 1.4.2.2 nathanw rpipe->pipe_state &= ~PIPE_SIGNALR;
769 1.4.2.2 nathanw }
770 1.4.2.2 nathanw
771 1.4.2.11 nathanw PIPE_UNLOCK(rpipe);
772 1.4.2.2 nathanw return (error);
773 1.4.2.2 nathanw }
774 1.4.2.2 nathanw
775 1.4.2.2 nathanw #ifdef __FreeBSD__
776 1.4.2.2 nathanw #ifndef PIPE_NODIRECT
777 1.4.2.2 nathanw /*
778 1.4.2.2 nathanw * Map the sending processes' buffer into kernel space and wire it.
779 1.4.2.2 nathanw * This is similar to a physical write operation.
780 1.4.2.2 nathanw */
781 1.4.2.2 nathanw static int
782 1.4.2.2 nathanw pipe_build_write_buffer(wpipe, uio)
783 1.4.2.2 nathanw struct pipe *wpipe;
784 1.4.2.2 nathanw struct uio *uio;
785 1.4.2.2 nathanw {
786 1.4.2.2 nathanw size_t size;
787 1.4.2.2 nathanw int i;
788 1.4.2.2 nathanw vm_offset_t addr, endaddr, paddr;
789 1.4.2.2 nathanw
790 1.4.2.11 nathanw GIANT_REQUIRED;
791 1.4.2.11 nathanw PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED);
792 1.4.2.11 nathanw
793 1.4.2.2 nathanw size = uio->uio_iov->iov_len;
794 1.4.2.2 nathanw if (size > wpipe->pipe_buffer.size)
795 1.4.2.2 nathanw size = wpipe->pipe_buffer.size;
796 1.4.2.2 nathanw
797 1.4.2.2 nathanw endaddr = round_page((vm_offset_t)uio->uio_iov->iov_base + size);
798 1.4.2.2 nathanw addr = trunc_page((vm_offset_t)uio->uio_iov->iov_base);
799 1.4.2.2 nathanw for (i = 0; addr < endaddr; addr += PAGE_SIZE, i++) {
800 1.4.2.2 nathanw vm_page_t m;
801 1.4.2.2 nathanw
802 1.4.2.2 nathanw if (vm_fault_quick((caddr_t)addr, VM_PROT_READ) < 0 ||
803 1.4.2.2 nathanw (paddr = pmap_kextract(addr)) == 0) {
804 1.4.2.2 nathanw int j;
805 1.4.2.2 nathanw
806 1.4.2.2 nathanw for (j = 0; j < i; j++)
807 1.4.2.2 nathanw vm_page_unwire(wpipe->pipe_map.ms[j], 1);
808 1.4.2.2 nathanw return (EFAULT);
809 1.4.2.2 nathanw }
810 1.4.2.2 nathanw
811 1.4.2.2 nathanw m = PHYS_TO_VM_PAGE(paddr);
812 1.4.2.2 nathanw vm_page_wire(m);
813 1.4.2.2 nathanw wpipe->pipe_map.ms[i] = m;
814 1.4.2.2 nathanw }
815 1.4.2.2 nathanw
816 1.4.2.2 nathanw /*
817 1.4.2.2 nathanw * set up the control block
818 1.4.2.2 nathanw */
819 1.4.2.2 nathanw wpipe->pipe_map.npages = i;
820 1.4.2.2 nathanw wpipe->pipe_map.pos =
821 1.4.2.2 nathanw ((vm_offset_t) uio->uio_iov->iov_base) & PAGE_MASK;
822 1.4.2.2 nathanw wpipe->pipe_map.cnt = size;
823 1.4.2.2 nathanw
824 1.4.2.2 nathanw /*
825 1.4.2.2 nathanw * and map the buffer
826 1.4.2.2 nathanw */
827 1.4.2.2 nathanw if (wpipe->pipe_map.kva == 0) {
828 1.4.2.2 nathanw /*
829 1.4.2.2 nathanw * We need to allocate space for an extra page because the
830 1.4.2.2 nathanw * address range might (will) span pages at times.
831 1.4.2.2 nathanw */
832 1.4.2.2 nathanw wpipe->pipe_map.kva = kmem_alloc_pageable(kernel_map,
833 1.4.2.2 nathanw wpipe->pipe_buffer.size + PAGE_SIZE);
834 1.4.2.2 nathanw amountpipekva += wpipe->pipe_buffer.size + PAGE_SIZE;
835 1.4.2.2 nathanw }
836 1.4.2.2 nathanw pmap_qenter(wpipe->pipe_map.kva, wpipe->pipe_map.ms,
837 1.4.2.2 nathanw wpipe->pipe_map.npages);
838 1.4.2.2 nathanw
839 1.4.2.2 nathanw /*
840 1.4.2.2 nathanw * and update the uio data
841 1.4.2.2 nathanw */
842 1.4.2.2 nathanw
843 1.4.2.2 nathanw uio->uio_iov->iov_len -= size;
844 1.4.2.2 nathanw uio->uio_iov->iov_base += size;
845 1.4.2.2 nathanw if (uio->uio_iov->iov_len == 0)
846 1.4.2.2 nathanw uio->uio_iov++;
847 1.4.2.2 nathanw uio->uio_resid -= size;
848 1.4.2.2 nathanw uio->uio_offset += size;
849 1.4.2.2 nathanw return (0);
850 1.4.2.2 nathanw }
851 1.4.2.2 nathanw
852 1.4.2.2 nathanw /*
853 1.4.2.2 nathanw * unmap and unwire the process buffer
854 1.4.2.2 nathanw */
855 1.4.2.2 nathanw static void
856 1.4.2.2 nathanw pipe_destroy_write_buffer(wpipe)
857 1.4.2.2 nathanw struct pipe *wpipe;
858 1.4.2.2 nathanw {
859 1.4.2.2 nathanw int i;
860 1.4.2.2 nathanw
861 1.4.2.11 nathanw GIANT_REQUIRED;
862 1.4.2.11 nathanw PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED);
863 1.4.2.11 nathanw
864 1.4.2.2 nathanw if (wpipe->pipe_map.kva) {
865 1.4.2.2 nathanw pmap_qremove(wpipe->pipe_map.kva, wpipe->pipe_map.npages);
866 1.4.2.2 nathanw
867 1.4.2.2 nathanw if (amountpipekva > maxpipekva) {
868 1.4.2.2 nathanw vm_offset_t kva = wpipe->pipe_map.kva;
869 1.4.2.2 nathanw wpipe->pipe_map.kva = 0;
870 1.4.2.2 nathanw kmem_free(kernel_map, kva,
871 1.4.2.2 nathanw wpipe->pipe_buffer.size + PAGE_SIZE);
872 1.4.2.2 nathanw amountpipekva -= wpipe->pipe_buffer.size + PAGE_SIZE;
873 1.4.2.2 nathanw }
874 1.4.2.2 nathanw }
875 1.4.2.2 nathanw for (i = 0; i < wpipe->pipe_map.npages; i++)
876 1.4.2.2 nathanw vm_page_unwire(wpipe->pipe_map.ms[i], 1);
877 1.4.2.11 nathanw wpipe->pipe_map.npages = 0;
878 1.4.2.2 nathanw }
879 1.4.2.2 nathanw
880 1.4.2.2 nathanw /*
881 1.4.2.2 nathanw * In the case of a signal, the writing process might go away. This
882 1.4.2.2 nathanw * code copies the data into the circular buffer so that the source
883 1.4.2.2 nathanw * pages can be freed without loss of data.
884 1.4.2.2 nathanw */
885 1.4.2.2 nathanw static void
886 1.4.2.2 nathanw pipe_clone_write_buffer(wpipe)
887 1.4.2.2 nathanw struct pipe *wpipe;
888 1.4.2.2 nathanw {
889 1.4.2.2 nathanw int size;
890 1.4.2.2 nathanw int pos;
891 1.4.2.2 nathanw
892 1.4.2.11 nathanw PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
893 1.4.2.2 nathanw size = wpipe->pipe_map.cnt;
894 1.4.2.2 nathanw pos = wpipe->pipe_map.pos;
895 1.4.2.3 nathanw memcpy((caddr_t) wpipe->pipe_buffer.buffer,
896 1.4.2.3 nathanw (caddr_t) wpipe->pipe_map.kva + pos, size);
897 1.4.2.2 nathanw
898 1.4.2.2 nathanw wpipe->pipe_buffer.in = size;
899 1.4.2.2 nathanw wpipe->pipe_buffer.out = 0;
900 1.4.2.2 nathanw wpipe->pipe_buffer.cnt = size;
901 1.4.2.2 nathanw wpipe->pipe_state &= ~PIPE_DIRECTW;
902 1.4.2.2 nathanw
903 1.4.2.11 nathanw PIPE_GET_GIANT(wpipe);
904 1.4.2.2 nathanw pipe_destroy_write_buffer(wpipe);
905 1.4.2.11 nathanw PIPE_DROP_GIANT(wpipe);
906 1.4.2.2 nathanw }
907 1.4.2.2 nathanw
908 1.4.2.2 nathanw /*
909 1.4.2.2 nathanw * This implements the pipe buffer write mechanism. Note that only
910 1.4.2.2 nathanw * a direct write OR a normal pipe write can be pending at any given time.
911 1.4.2.2 nathanw * If there are any characters in the pipe buffer, the direct write will
912 1.4.2.2 nathanw * be deferred until the receiving process grabs all of the bytes from
913 1.4.2.2 nathanw * the pipe buffer. Then the direct mapping write is set-up.
914 1.4.2.2 nathanw */
915 1.4.2.2 nathanw static int
916 1.4.2.2 nathanw pipe_direct_write(wpipe, uio)
917 1.4.2.2 nathanw struct pipe *wpipe;
918 1.4.2.2 nathanw struct uio *uio;
919 1.4.2.2 nathanw {
920 1.4.2.2 nathanw int error;
921 1.4.2.2 nathanw
922 1.4.2.2 nathanw retry:
923 1.4.2.11 nathanw PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
924 1.4.2.2 nathanw while (wpipe->pipe_state & PIPE_DIRECTW) {
925 1.4.2.2 nathanw if (wpipe->pipe_state & PIPE_WANTR) {
926 1.4.2.2 nathanw wpipe->pipe_state &= ~PIPE_WANTR;
927 1.4.2.2 nathanw wakeup(wpipe);
928 1.4.2.2 nathanw }
929 1.4.2.2 nathanw wpipe->pipe_state |= PIPE_WANTW;
930 1.4.2.11 nathanw error = msleep(wpipe, PIPE_MTX(wpipe),
931 1.4.2.11 nathanw PRIBIO | PCATCH, "pipdww", 0);
932 1.4.2.2 nathanw if (error)
933 1.4.2.2 nathanw goto error1;
934 1.4.2.2 nathanw if (wpipe->pipe_state & PIPE_EOF) {
935 1.4.2.2 nathanw error = EPIPE;
936 1.4.2.2 nathanw goto error1;
937 1.4.2.2 nathanw }
938 1.4.2.2 nathanw }
939 1.4.2.2 nathanw wpipe->pipe_map.cnt = 0; /* transfer not ready yet */
940 1.4.2.2 nathanw if (wpipe->pipe_buffer.cnt > 0) {
941 1.4.2.2 nathanw if (wpipe->pipe_state & PIPE_WANTR) {
942 1.4.2.2 nathanw wpipe->pipe_state &= ~PIPE_WANTR;
943 1.4.2.2 nathanw wakeup(wpipe);
944 1.4.2.2 nathanw }
945 1.4.2.8 nathanw
946 1.4.2.2 nathanw wpipe->pipe_state |= PIPE_WANTW;
947 1.4.2.11 nathanw error = msleep(wpipe, PIPE_MTX(wpipe),
948 1.4.2.11 nathanw PRIBIO | PCATCH, "pipdwc", 0);
949 1.4.2.2 nathanw if (error)
950 1.4.2.2 nathanw goto error1;
951 1.4.2.2 nathanw if (wpipe->pipe_state & PIPE_EOF) {
952 1.4.2.2 nathanw error = EPIPE;
953 1.4.2.2 nathanw goto error1;
954 1.4.2.2 nathanw }
955 1.4.2.2 nathanw goto retry;
956 1.4.2.2 nathanw }
957 1.4.2.2 nathanw
958 1.4.2.2 nathanw wpipe->pipe_state |= PIPE_DIRECTW;
959 1.4.2.2 nathanw
960 1.4.2.11 nathanw PIPE_GET_GIANT(wpipe);
961 1.4.2.2 nathanw error = pipe_build_write_buffer(wpipe, uio);
962 1.4.2.11 nathanw PIPE_DROP_GIANT(wpipe);
963 1.4.2.2 nathanw if (error) {
964 1.4.2.2 nathanw wpipe->pipe_state &= ~PIPE_DIRECTW;
965 1.4.2.2 nathanw goto error1;
966 1.4.2.2 nathanw }
967 1.4.2.2 nathanw
968 1.4.2.2 nathanw error = 0;
969 1.4.2.2 nathanw while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) {
970 1.4.2.2 nathanw if (wpipe->pipe_state & PIPE_EOF) {
971 1.4.2.2 nathanw pipelock(wpipe, 0);
972 1.4.2.11 nathanw PIPE_GET_GIANT(wpipe);
973 1.4.2.2 nathanw pipe_destroy_write_buffer(wpipe);
974 1.4.2.11 nathanw PIPE_DROP_GIANT(wpipe);
975 1.4.2.2 nathanw pipeunlock(wpipe);
976 1.4.2.3 nathanw pipeselwakeup(wpipe, wpipe);
977 1.4.2.2 nathanw error = EPIPE;
978 1.4.2.2 nathanw goto error1;
979 1.4.2.2 nathanw }
980 1.4.2.2 nathanw if (wpipe->pipe_state & PIPE_WANTR) {
981 1.4.2.2 nathanw wpipe->pipe_state &= ~PIPE_WANTR;
982 1.4.2.2 nathanw wakeup(wpipe);
983 1.4.2.2 nathanw }
984 1.4.2.3 nathanw pipeselwakeup(wpipe, wpipe);
985 1.4.2.11 nathanw error = msleep(wpipe, PIPE_MTX(wpipe), PRIBIO | PCATCH,
986 1.4.2.11 nathanw "pipdwt", 0);
987 1.4.2.2 nathanw }
988 1.4.2.2 nathanw
989 1.4.2.2 nathanw pipelock(wpipe,0);
990 1.4.2.2 nathanw if (wpipe->pipe_state & PIPE_DIRECTW) {
991 1.4.2.2 nathanw /*
992 1.4.2.2 nathanw * this bit of trickery substitutes a kernel buffer for
993 1.4.2.2 nathanw * the process that might be going away.
994 1.4.2.2 nathanw */
995 1.4.2.2 nathanw pipe_clone_write_buffer(wpipe);
996 1.4.2.2 nathanw } else {
997 1.4.2.11 nathanw PIPE_GET_GIANT(wpipe);
998 1.4.2.2 nathanw pipe_destroy_write_buffer(wpipe);
999 1.4.2.11 nathanw PIPE_DROP_GIANT(wpipe);
1000 1.4.2.2 nathanw }
1001 1.4.2.2 nathanw pipeunlock(wpipe);
1002 1.4.2.2 nathanw return (error);
1003 1.4.2.2 nathanw
1004 1.4.2.2 nathanw error1:
1005 1.4.2.2 nathanw wakeup(wpipe);
1006 1.4.2.2 nathanw return (error);
1007 1.4.2.2 nathanw }
1008 1.4.2.2 nathanw #endif /* !PIPE_NODIRECT */
1009 1.4.2.2 nathanw #endif /* FreeBSD */
1010 1.4.2.2 nathanw
1011 1.4.2.2 nathanw #ifdef __NetBSD__
1012 1.4.2.2 nathanw #ifndef PIPE_NODIRECT
1013 1.4.2.2 nathanw /*
1014 1.4.2.2 nathanw * Allocate structure for loan transfer.
1015 1.4.2.2 nathanw */
1016 1.4.2.8 nathanw static int
1017 1.4.2.8 nathanw pipe_loan_alloc(wpipe, npages)
1018 1.4.2.2 nathanw struct pipe *wpipe;
1019 1.4.2.2 nathanw int npages;
1020 1.4.2.2 nathanw {
1021 1.4.2.8 nathanw vsize_t len;
1022 1.4.2.8 nathanw
1023 1.4.2.8 nathanw len = (vsize_t)npages << PAGE_SHIFT;
1024 1.4.2.8 nathanw wpipe->pipe_map.kva = uvm_km_valloc_wait(kernel_map, len);
1025 1.4.2.11 nathanw if (wpipe->pipe_map.kva == 0)
1026 1.4.2.2 nathanw return (ENOMEM);
1027 1.4.2.2 nathanw
1028 1.4.2.8 nathanw amountpipekva += len;
1029 1.4.2.2 nathanw wpipe->pipe_map.npages = npages;
1030 1.4.2.8 nathanw wpipe->pipe_map.pgs = malloc(npages * sizeof(struct vm_page *), M_PIPE,
1031 1.4.2.8 nathanw M_WAITOK);
1032 1.4.2.2 nathanw return (0);
1033 1.4.2.2 nathanw }
1034 1.4.2.2 nathanw
1035 1.4.2.2 nathanw /*
1036 1.4.2.2 nathanw * Free resources allocated for loan transfer.
1037 1.4.2.2 nathanw */
1038 1.4.2.2 nathanw static void
1039 1.4.2.2 nathanw pipe_loan_free(wpipe)
1040 1.4.2.2 nathanw struct pipe *wpipe;
1041 1.4.2.2 nathanw {
1042 1.4.2.8 nathanw vsize_t len;
1043 1.4.2.8 nathanw
1044 1.4.2.8 nathanw len = (vsize_t)wpipe->pipe_map.npages << PAGE_SHIFT;
1045 1.4.2.8 nathanw uvm_km_free(kernel_map, wpipe->pipe_map.kva, len);
1046 1.4.2.11 nathanw wpipe->pipe_map.kva = 0;
1047 1.4.2.8 nathanw amountpipekva -= len;
1048 1.4.2.8 nathanw free(wpipe->pipe_map.pgs, M_PIPE);
1049 1.4.2.8 nathanw wpipe->pipe_map.pgs = NULL;
1050 1.4.2.2 nathanw }
1051 1.4.2.2 nathanw
1052 1.4.2.2 nathanw /*
1053 1.4.2.2 nathanw * NetBSD direct write, using uvm_loan() mechanism.
1054 1.4.2.2 nathanw * This implements the pipe buffer write mechanism. Note that only
1055 1.4.2.2 nathanw * a direct write OR a normal pipe write can be pending at any given time.
1056 1.4.2.2 nathanw * If there are any characters in the pipe buffer, the direct write will
1057 1.4.2.2 nathanw * be deferred until the receiving process grabs all of the bytes from
1058 1.4.2.2 nathanw * the pipe buffer. Then the direct mapping write is set-up.
1059 1.4.2.2 nathanw */
1060 1.4.2.8 nathanw static int
1061 1.4.2.2 nathanw pipe_direct_write(wpipe, uio)
1062 1.4.2.2 nathanw struct pipe *wpipe;
1063 1.4.2.2 nathanw struct uio *uio;
1064 1.4.2.2 nathanw {
1065 1.4.2.3 nathanw int error, npages, j;
1066 1.4.2.8 nathanw struct vm_page **pgs;
1067 1.4.2.2 nathanw vaddr_t bbase, kva, base, bend;
1068 1.4.2.2 nathanw vsize_t blen, bcnt;
1069 1.4.2.3 nathanw voff_t bpos;
1070 1.4.2.3 nathanw
1071 1.4.2.2 nathanw retry:
1072 1.4.2.2 nathanw while (wpipe->pipe_state & PIPE_DIRECTW) {
1073 1.4.2.2 nathanw if (wpipe->pipe_state & PIPE_WANTR) {
1074 1.4.2.2 nathanw wpipe->pipe_state &= ~PIPE_WANTR;
1075 1.4.2.2 nathanw wakeup(wpipe);
1076 1.4.2.2 nathanw }
1077 1.4.2.2 nathanw wpipe->pipe_state |= PIPE_WANTW;
1078 1.4.2.2 nathanw error = tsleep(wpipe, PRIBIO | PCATCH, "pipdww", 0);
1079 1.4.2.2 nathanw if (error)
1080 1.4.2.3 nathanw goto error;
1081 1.4.2.2 nathanw if (wpipe->pipe_state & PIPE_EOF) {
1082 1.4.2.2 nathanw error = EPIPE;
1083 1.4.2.3 nathanw goto error;
1084 1.4.2.2 nathanw }
1085 1.4.2.2 nathanw }
1086 1.4.2.2 nathanw wpipe->pipe_map.cnt = 0; /* transfer not ready yet */
1087 1.4.2.2 nathanw if (wpipe->pipe_buffer.cnt > 0) {
1088 1.4.2.3 nathanw if (wpipe->pipe_state & PIPE_WANTR) {
1089 1.4.2.2 nathanw wpipe->pipe_state &= ~PIPE_WANTR;
1090 1.4.2.2 nathanw wakeup(wpipe);
1091 1.4.2.2 nathanw }
1092 1.4.2.8 nathanw
1093 1.4.2.2 nathanw wpipe->pipe_state |= PIPE_WANTW;
1094 1.4.2.2 nathanw error = tsleep(wpipe, PRIBIO | PCATCH, "pipdwc", 0);
1095 1.4.2.2 nathanw if (error)
1096 1.4.2.3 nathanw goto error;
1097 1.4.2.2 nathanw if (wpipe->pipe_state & PIPE_EOF) {
1098 1.4.2.2 nathanw error = EPIPE;
1099 1.4.2.3 nathanw goto error;
1100 1.4.2.2 nathanw }
1101 1.4.2.2 nathanw goto retry;
1102 1.4.2.2 nathanw }
1103 1.4.2.2 nathanw
1104 1.4.2.2 nathanw /*
1105 1.4.2.5 nathanw * Handle first PIPE_CHUNK_SIZE bytes of buffer. Deal with buffers
1106 1.4.2.5 nathanw * not aligned to PAGE_SIZE.
1107 1.4.2.2 nathanw */
1108 1.4.2.5 nathanw bbase = (vaddr_t)uio->uio_iov->iov_base;
1109 1.4.2.3 nathanw base = trunc_page(bbase);
1110 1.4.2.5 nathanw bend = round_page(bbase + uio->uio_iov->iov_len);
1111 1.4.2.3 nathanw blen = bend - base;
1112 1.4.2.3 nathanw bpos = bbase - base;
1113 1.4.2.3 nathanw
1114 1.4.2.3 nathanw if (blen > PIPE_DIRECT_CHUNK) {
1115 1.4.2.3 nathanw blen = PIPE_DIRECT_CHUNK;
1116 1.4.2.3 nathanw bend = base + blen;
1117 1.4.2.3 nathanw bcnt = PIPE_DIRECT_CHUNK - bpos;
1118 1.4.2.8 nathanw } else {
1119 1.4.2.5 nathanw bcnt = uio->uio_iov->iov_len;
1120 1.4.2.8 nathanw }
1121 1.4.2.8 nathanw npages = blen >> PAGE_SHIFT;
1122 1.4.2.2 nathanw
1123 1.4.2.3 nathanw wpipe->pipe_map.pos = bpos;
1124 1.4.2.3 nathanw wpipe->pipe_map.cnt = bcnt;
1125 1.4.2.2 nathanw
1126 1.4.2.3 nathanw /*
1127 1.4.2.3 nathanw * Free the old kva if we need more pages than we have
1128 1.4.2.3 nathanw * allocated.
1129 1.4.2.3 nathanw */
1130 1.4.2.3 nathanw if (wpipe->pipe_map.kva && npages > wpipe->pipe_map.npages)
1131 1.4.2.3 nathanw pipe_loan_free(wpipe);
1132 1.4.2.2 nathanw
1133 1.4.2.3 nathanw /* Allocate new kva. */
1134 1.4.2.11 nathanw if (wpipe->pipe_map.kva == 0) {
1135 1.4.2.8 nathanw error = pipe_loan_alloc(wpipe, npages);
1136 1.4.2.8 nathanw if (error) {
1137 1.4.2.8 nathanw goto error;
1138 1.4.2.8 nathanw }
1139 1.4.2.8 nathanw }
1140 1.4.2.8 nathanw
1141 1.4.2.3 nathanw /* Loan the write buffer memory from writer process */
1142 1.4.2.8 nathanw pgs = wpipe->pipe_map.pgs;
1143 1.4.2.3 nathanw error = uvm_loan(&uio->uio_procp->p_vmspace->vm_map, base, blen,
1144 1.4.2.8 nathanw pgs, UVM_LOAN_TOPAGE);
1145 1.4.2.8 nathanw if (error) {
1146 1.4.2.8 nathanw pgs = NULL;
1147 1.4.2.3 nathanw goto cleanup;
1148 1.4.2.8 nathanw }
1149 1.4.2.8 nathanw
1150 1.4.2.3 nathanw /* Enter the loaned pages to kva */
1151 1.4.2.3 nathanw kva = wpipe->pipe_map.kva;
1152 1.4.2.8 nathanw for (j = 0; j < npages; j++, kva += PAGE_SIZE) {
1153 1.4.2.8 nathanw pmap_kenter_pa(kva, VM_PAGE_TO_PHYS(pgs[j]), VM_PROT_READ);
1154 1.4.2.8 nathanw }
1155 1.4.2.4 nathanw pmap_update(pmap_kernel());
1156 1.4.2.2 nathanw
1157 1.4.2.3 nathanw wpipe->pipe_state |= PIPE_DIRECTW;
1158 1.4.2.3 nathanw while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) {
1159 1.4.2.3 nathanw if (wpipe->pipe_state & PIPE_EOF) {
1160 1.4.2.3 nathanw error = EPIPE;
1161 1.4.2.3 nathanw break;
1162 1.4.2.2 nathanw }
1163 1.4.2.3 nathanw if (wpipe->pipe_state & PIPE_WANTR) {
1164 1.4.2.3 nathanw wpipe->pipe_state &= ~PIPE_WANTR;
1165 1.4.2.3 nathanw wakeup(wpipe);
1166 1.4.2.3 nathanw }
1167 1.4.2.3 nathanw pipeselwakeup(wpipe, wpipe);
1168 1.4.2.3 nathanw error = tsleep(wpipe, PRIBIO | PCATCH, "pipdwt", 0);
1169 1.4.2.3 nathanw }
1170 1.4.2.3 nathanw
1171 1.4.2.3 nathanw if (error)
1172 1.4.2.3 nathanw wpipe->pipe_state &= ~PIPE_DIRECTW;
1173 1.4.2.2 nathanw
1174 1.4.2.8 nathanw cleanup:
1175 1.4.2.3 nathanw pipelock(wpipe, 0);
1176 1.4.2.9 nathanw if (pgs != NULL) {
1177 1.4.2.9 nathanw pmap_kremove(wpipe->pipe_map.kva, blen);
1178 1.4.2.8 nathanw uvm_unloan(pgs, npages, UVM_LOAN_TOPAGE);
1179 1.4.2.9 nathanw }
1180 1.4.2.3 nathanw if (error || amountpipekva > maxpipekva)
1181 1.4.2.3 nathanw pipe_loan_free(wpipe);
1182 1.4.2.3 nathanw pipeunlock(wpipe);
1183 1.4.2.2 nathanw
1184 1.4.2.6 nathanw if (error) {
1185 1.4.2.3 nathanw pipeselwakeup(wpipe, wpipe);
1186 1.4.2.3 nathanw
1187 1.4.2.3 nathanw /*
1188 1.4.2.6 nathanw * If nothing was read from what we offered, return error
1189 1.4.2.8 nathanw * straight on. Otherwise update uio resid first. Caller
1190 1.4.2.6 nathanw * will deal with the error condition, returning short
1191 1.4.2.6 nathanw * write, error, or restarting the write(2) as appropriate.
1192 1.4.2.3 nathanw */
1193 1.4.2.6 nathanw if (wpipe->pipe_map.cnt == bcnt) {
1194 1.4.2.8 nathanw error:
1195 1.4.2.6 nathanw wakeup(wpipe);
1196 1.4.2.6 nathanw return (error);
1197 1.4.2.2 nathanw }
1198 1.4.2.2 nathanw
1199 1.4.2.6 nathanw bcnt -= wpipe->pipe_map.cnt;
1200 1.4.2.3 nathanw }
1201 1.4.2.2 nathanw
1202 1.4.2.8 nathanw uio->uio_resid -= bcnt;
1203 1.4.2.3 nathanw /* uio_offset not updated, not set/used for write(2) */
1204 1.4.2.8 nathanw uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + bcnt;
1205 1.4.2.5 nathanw uio->uio_iov->iov_len -= bcnt;
1206 1.4.2.5 nathanw if (uio->uio_iov->iov_len == 0) {
1207 1.4.2.5 nathanw uio->uio_iov++;
1208 1.4.2.5 nathanw uio->uio_iovcnt--;
1209 1.4.2.5 nathanw }
1210 1.4.2.2 nathanw
1211 1.4.2.6 nathanw return (error);
1212 1.4.2.2 nathanw }
1213 1.4.2.2 nathanw #endif /* !PIPE_NODIRECT */
1214 1.4.2.2 nathanw #endif /* NetBSD */
1215 1.4.2.2 nathanw
1216 1.4.2.2 nathanw #ifdef __FreeBSD__
1217 1.4.2.2 nathanw static int
1218 1.4.2.11 nathanw pipe_write(fp, uio, cred, flags, td)
1219 1.4.2.2 nathanw struct file *fp;
1220 1.4.2.2 nathanw off_t *offset;
1221 1.4.2.2 nathanw struct uio *uio;
1222 1.4.2.2 nathanw struct ucred *cred;
1223 1.4.2.2 nathanw int flags;
1224 1.4.2.11 nathanw struct thread *td;
1225 1.4.2.2 nathanw #elif defined(__NetBSD__)
1226 1.4.2.2 nathanw static int
1227 1.4.2.2 nathanw pipe_write(fp, offset, uio, cred, flags)
1228 1.4.2.2 nathanw struct file *fp;
1229 1.4.2.2 nathanw off_t *offset;
1230 1.4.2.2 nathanw struct uio *uio;
1231 1.4.2.2 nathanw struct ucred *cred;
1232 1.4.2.2 nathanw int flags;
1233 1.4.2.2 nathanw #endif
1234 1.4.2.2 nathanw {
1235 1.4.2.2 nathanw int error = 0;
1236 1.4.2.2 nathanw struct pipe *wpipe, *rpipe;
1237 1.4.2.2 nathanw
1238 1.4.2.2 nathanw rpipe = (struct pipe *) fp->f_data;
1239 1.4.2.2 nathanw wpipe = rpipe->pipe_peer;
1240 1.4.2.2 nathanw
1241 1.4.2.11 nathanw PIPE_LOCK(rpipe);
1242 1.4.2.2 nathanw /*
1243 1.4.2.2 nathanw * detect loss of pipe read side, issue SIGPIPE if lost.
1244 1.4.2.2 nathanw */
1245 1.4.2.11 nathanw if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1246 1.4.2.11 nathanw PIPE_UNLOCK(rpipe);
1247 1.4.2.2 nathanw return (EPIPE);
1248 1.4.2.11 nathanw }
1249 1.4.2.2 nathanw
1250 1.4.2.2 nathanw ++wpipe->pipe_busy;
1251 1.4.2.2 nathanw
1252 1.4.2.2 nathanw /*
1253 1.4.2.2 nathanw * If it is advantageous to resize the pipe buffer, do
1254 1.4.2.2 nathanw * so.
1255 1.4.2.2 nathanw */
1256 1.4.2.2 nathanw if ((uio->uio_resid > PIPE_SIZE) &&
1257 1.4.2.2 nathanw (nbigpipe < maxbigpipes) &&
1258 1.4.2.2 nathanw #ifndef PIPE_NODIRECT
1259 1.4.2.2 nathanw (wpipe->pipe_state & PIPE_DIRECTW) == 0 &&
1260 1.4.2.2 nathanw #endif
1261 1.4.2.2 nathanw (wpipe->pipe_buffer.size <= PIPE_SIZE) &&
1262 1.4.2.2 nathanw (wpipe->pipe_buffer.cnt == 0)) {
1263 1.4.2.2 nathanw
1264 1.4.2.2 nathanw if ((error = pipelock(wpipe,1)) == 0) {
1265 1.4.2.11 nathanw PIPE_GET_GIANT(rpipe);
1266 1.4.2.2 nathanw if (pipespace(wpipe, BIG_PIPE_SIZE) == 0)
1267 1.4.2.2 nathanw nbigpipe++;
1268 1.4.2.11 nathanw PIPE_DROP_GIANT(rpipe);
1269 1.4.2.2 nathanw pipeunlock(wpipe);
1270 1.4.2.2 nathanw } else {
1271 1.4.2.2 nathanw /*
1272 1.4.2.3 nathanw * If an error occurred, unbusy and return, waking up
1273 1.4.2.3 nathanw * any waiting readers.
1274 1.4.2.2 nathanw */
1275 1.4.2.2 nathanw --wpipe->pipe_busy;
1276 1.4.2.2 nathanw if (wpipe->pipe_busy == 0
1277 1.4.2.2 nathanw && (wpipe->pipe_state & PIPE_WANTCLOSE)) {
1278 1.4.2.2 nathanw wpipe->pipe_state &=
1279 1.4.2.2 nathanw ~(PIPE_WANTCLOSE | PIPE_WANTR);
1280 1.4.2.3 nathanw wakeup(wpipe);
1281 1.4.2.2 nathanw }
1282 1.4.2.2 nathanw
1283 1.4.2.2 nathanw return (error);
1284 1.4.2.2 nathanw }
1285 1.4.2.2 nathanw }
1286 1.4.2.8 nathanw
1287 1.4.2.2 nathanw #ifdef __FreeBSD__
1288 1.4.2.11 nathanw /*
1289 1.4.2.11 nathanw * If an early error occured unbusy and return, waking up any pending
1290 1.4.2.11 nathanw * readers.
1291 1.4.2.11 nathanw */
1292 1.4.2.11 nathanw if (error) {
1293 1.4.2.11 nathanw --wpipe->pipe_busy;
1294 1.4.2.11 nathanw if ((wpipe->pipe_busy == 0) &&
1295 1.4.2.11 nathanw (wpipe->pipe_state & PIPE_WANT)) {
1296 1.4.2.11 nathanw wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR);
1297 1.4.2.11 nathanw wakeup(wpipe);
1298 1.4.2.11 nathanw }
1299 1.4.2.11 nathanw PIPE_UNLOCK(rpipe);
1300 1.4.2.11 nathanw return(error);
1301 1.4.2.11 nathanw }
1302 1.4.2.11 nathanw
1303 1.4.2.2 nathanw KASSERT(wpipe->pipe_buffer.buffer != NULL, ("pipe buffer gone"));
1304 1.4.2.2 nathanw #endif
1305 1.4.2.2 nathanw
1306 1.4.2.2 nathanw while (uio->uio_resid) {
1307 1.4.2.13 nathanw size_t space;
1308 1.4.2.2 nathanw
1309 1.4.2.2 nathanw #ifndef PIPE_NODIRECT
1310 1.4.2.2 nathanw /*
1311 1.4.2.2 nathanw * If the transfer is large, we can gain performance if
1312 1.4.2.2 nathanw * we do process-to-process copies directly.
1313 1.4.2.2 nathanw * If the write is non-blocking, we don't use the
1314 1.4.2.2 nathanw * direct write mechanism.
1315 1.4.2.2 nathanw *
1316 1.4.2.2 nathanw * The direct write mechanism will detect the reader going
1317 1.4.2.2 nathanw * away on us.
1318 1.4.2.2 nathanw */
1319 1.4.2.5 nathanw if ((uio->uio_iov->iov_len >= PIPE_MINDIRECT) &&
1320 1.4.2.2 nathanw (fp->f_flag & FNONBLOCK) == 0 &&
1321 1.4.2.2 nathanw (wpipe->pipe_map.kva || (amountpipekva < limitpipekva))) {
1322 1.4.2.2 nathanw error = pipe_direct_write(wpipe, uio);
1323 1.4.2.3 nathanw
1324 1.4.2.3 nathanw /*
1325 1.4.2.5 nathanw * Break out if error occured, unless it's ENOMEM.
1326 1.4.2.5 nathanw * ENOMEM means we failed to allocate some resources
1327 1.4.2.5 nathanw * for direct write, so we just fallback to ordinary
1328 1.4.2.5 nathanw * write. If the direct write was successful,
1329 1.4.2.5 nathanw * process rest of data via ordinary write.
1330 1.4.2.3 nathanw */
1331 1.4.2.5 nathanw if (!error)
1332 1.4.2.5 nathanw continue;
1333 1.4.2.5 nathanw
1334 1.4.2.3 nathanw if (error != ENOMEM)
1335 1.4.2.2 nathanw break;
1336 1.4.2.2 nathanw }
1337 1.4.2.2 nathanw #endif /* PIPE_NODIRECT */
1338 1.4.2.2 nathanw
1339 1.4.2.2 nathanw /*
1340 1.4.2.2 nathanw * Pipe buffered writes cannot be coincidental with
1341 1.4.2.2 nathanw * direct writes. We wait until the currently executing
1342 1.4.2.2 nathanw * direct write is completed before we start filling the
1343 1.4.2.2 nathanw * pipe buffer. We break out if a signal occurs or the
1344 1.4.2.2 nathanw * reader goes away.
1345 1.4.2.2 nathanw */
1346 1.4.2.2 nathanw retrywrite:
1347 1.4.2.2 nathanw while (wpipe->pipe_state & PIPE_DIRECTW) {
1348 1.4.2.2 nathanw if (wpipe->pipe_state & PIPE_WANTR) {
1349 1.4.2.2 nathanw wpipe->pipe_state &= ~PIPE_WANTR;
1350 1.4.2.2 nathanw wakeup(wpipe);
1351 1.4.2.2 nathanw }
1352 1.4.2.11 nathanw #ifdef __FreeBSD__
1353 1.4.2.11 nathanw error = msleep(wpipe, PIPE_MTX(rpipe), PRIBIO | PCATCH,
1354 1.4.2.11 nathanw "pipbww", 0);
1355 1.4.2.11 nathanw #else
1356 1.4.2.2 nathanw error = tsleep(wpipe, PRIBIO | PCATCH, "pipbww", 0);
1357 1.4.2.11 nathanw #endif
1358 1.4.2.2 nathanw if (wpipe->pipe_state & PIPE_EOF)
1359 1.4.2.2 nathanw break;
1360 1.4.2.2 nathanw if (error)
1361 1.4.2.2 nathanw break;
1362 1.4.2.2 nathanw }
1363 1.4.2.2 nathanw if (wpipe->pipe_state & PIPE_EOF) {
1364 1.4.2.2 nathanw error = EPIPE;
1365 1.4.2.2 nathanw break;
1366 1.4.2.2 nathanw }
1367 1.4.2.2 nathanw
1368 1.4.2.2 nathanw space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
1369 1.4.2.2 nathanw
1370 1.4.2.2 nathanw /* Writes of size <= PIPE_BUF must be atomic. */
1371 1.4.2.5 nathanw if ((space < uio->uio_resid) && (uio->uio_resid <= PIPE_BUF))
1372 1.4.2.2 nathanw space = 0;
1373 1.4.2.2 nathanw
1374 1.4.2.7 nathanw if (space > 0) {
1375 1.4.2.2 nathanw int size; /* Transfer size */
1376 1.4.2.2 nathanw int segsize; /* first segment to transfer */
1377 1.4.2.2 nathanw
1378 1.4.2.2 nathanw if ((error = pipelock(wpipe,1)) != 0)
1379 1.4.2.2 nathanw break;
1380 1.4.2.2 nathanw
1381 1.4.2.2 nathanw /*
1382 1.4.2.2 nathanw * It is possible for a direct write to
1383 1.4.2.2 nathanw * slip in on us... handle it here...
1384 1.4.2.2 nathanw */
1385 1.4.2.2 nathanw if (wpipe->pipe_state & PIPE_DIRECTW) {
1386 1.4.2.2 nathanw pipeunlock(wpipe);
1387 1.4.2.2 nathanw goto retrywrite;
1388 1.4.2.2 nathanw }
1389 1.4.2.2 nathanw /*
1390 1.4.2.2 nathanw * If a process blocked in uiomove, our
1391 1.4.2.2 nathanw * value for space might be bad.
1392 1.4.2.2 nathanw *
1393 1.4.2.2 nathanw * XXX will we be ok if the reader has gone
1394 1.4.2.2 nathanw * away here?
1395 1.4.2.2 nathanw */
1396 1.4.2.2 nathanw if (space > wpipe->pipe_buffer.size -
1397 1.4.2.2 nathanw wpipe->pipe_buffer.cnt) {
1398 1.4.2.2 nathanw pipeunlock(wpipe);
1399 1.4.2.2 nathanw goto retrywrite;
1400 1.4.2.2 nathanw }
1401 1.4.2.2 nathanw
1402 1.4.2.2 nathanw /*
1403 1.4.2.2 nathanw * Transfer size is minimum of uio transfer
1404 1.4.2.2 nathanw * and free space in pipe buffer.
1405 1.4.2.2 nathanw */
1406 1.4.2.2 nathanw if (space > uio->uio_resid)
1407 1.4.2.2 nathanw size = uio->uio_resid;
1408 1.4.2.2 nathanw else
1409 1.4.2.2 nathanw size = space;
1410 1.4.2.2 nathanw /*
1411 1.4.2.2 nathanw * First segment to transfer is minimum of
1412 1.4.2.2 nathanw * transfer size and contiguous space in
1413 1.4.2.2 nathanw * pipe buffer. If first segment to transfer
1414 1.4.2.2 nathanw * is less than the transfer size, we've got
1415 1.4.2.2 nathanw * a wraparound in the buffer.
1416 1.4.2.2 nathanw */
1417 1.4.2.2 nathanw segsize = wpipe->pipe_buffer.size -
1418 1.4.2.2 nathanw wpipe->pipe_buffer.in;
1419 1.4.2.2 nathanw if (segsize > size)
1420 1.4.2.2 nathanw segsize = size;
1421 1.4.2.8 nathanw
1422 1.4.2.2 nathanw /* Transfer first segment */
1423 1.4.2.2 nathanw
1424 1.4.2.11 nathanw PIPE_UNLOCK(rpipe);
1425 1.4.2.2 nathanw error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in],
1426 1.4.2.2 nathanw segsize, uio);
1427 1.4.2.11 nathanw PIPE_LOCK(rpipe);
1428 1.4.2.8 nathanw
1429 1.4.2.2 nathanw if (error == 0 && segsize < size) {
1430 1.4.2.2 nathanw /*
1431 1.4.2.2 nathanw * Transfer remaining part now, to
1432 1.4.2.2 nathanw * support atomic writes. Wraparound
1433 1.4.2.2 nathanw * happened.
1434 1.4.2.2 nathanw */
1435 1.4.2.2 nathanw #ifdef DEBUG
1436 1.4.2.2 nathanw if (wpipe->pipe_buffer.in + segsize !=
1437 1.4.2.2 nathanw wpipe->pipe_buffer.size)
1438 1.4.2.2 nathanw panic("Expected pipe buffer wraparound disappeared");
1439 1.4.2.2 nathanw #endif
1440 1.4.2.8 nathanw
1441 1.4.2.11 nathanw PIPE_UNLOCK(rpipe);
1442 1.4.2.2 nathanw error = uiomove(&wpipe->pipe_buffer.buffer[0],
1443 1.4.2.2 nathanw size - segsize, uio);
1444 1.4.2.11 nathanw PIPE_LOCK(rpipe);
1445 1.4.2.2 nathanw }
1446 1.4.2.2 nathanw if (error == 0) {
1447 1.4.2.2 nathanw wpipe->pipe_buffer.in += size;
1448 1.4.2.2 nathanw if (wpipe->pipe_buffer.in >=
1449 1.4.2.2 nathanw wpipe->pipe_buffer.size) {
1450 1.4.2.2 nathanw #ifdef DEBUG
1451 1.4.2.2 nathanw if (wpipe->pipe_buffer.in != size - segsize + wpipe->pipe_buffer.size)
1452 1.4.2.2 nathanw panic("Expected wraparound bad");
1453 1.4.2.2 nathanw #endif
1454 1.4.2.2 nathanw wpipe->pipe_buffer.in = size - segsize;
1455 1.4.2.2 nathanw }
1456 1.4.2.8 nathanw
1457 1.4.2.2 nathanw wpipe->pipe_buffer.cnt += size;
1458 1.4.2.2 nathanw #ifdef DEBUG
1459 1.4.2.2 nathanw if (wpipe->pipe_buffer.cnt > wpipe->pipe_buffer.size)
1460 1.4.2.2 nathanw panic("Pipe buffer overflow");
1461 1.4.2.2 nathanw #endif
1462 1.4.2.2 nathanw }
1463 1.4.2.2 nathanw pipeunlock(wpipe);
1464 1.4.2.2 nathanw if (error)
1465 1.4.2.2 nathanw break;
1466 1.4.2.2 nathanw } else {
1467 1.4.2.2 nathanw /*
1468 1.4.2.2 nathanw * If the "read-side" has been blocked, wake it up now.
1469 1.4.2.2 nathanw */
1470 1.4.2.2 nathanw if (wpipe->pipe_state & PIPE_WANTR) {
1471 1.4.2.2 nathanw wpipe->pipe_state &= ~PIPE_WANTR;
1472 1.4.2.2 nathanw wakeup(wpipe);
1473 1.4.2.2 nathanw }
1474 1.4.2.2 nathanw
1475 1.4.2.2 nathanw /*
1476 1.4.2.2 nathanw * don't block on non-blocking I/O
1477 1.4.2.2 nathanw */
1478 1.4.2.2 nathanw if (fp->f_flag & FNONBLOCK) {
1479 1.4.2.2 nathanw error = EAGAIN;
1480 1.4.2.2 nathanw break;
1481 1.4.2.2 nathanw }
1482 1.4.2.2 nathanw
1483 1.4.2.2 nathanw /*
1484 1.4.2.2 nathanw * We have no more space and have something to offer,
1485 1.4.2.2 nathanw * wake up select/poll.
1486 1.4.2.2 nathanw */
1487 1.4.2.3 nathanw pipeselwakeup(wpipe, wpipe);
1488 1.4.2.2 nathanw
1489 1.4.2.2 nathanw wpipe->pipe_state |= PIPE_WANTW;
1490 1.4.2.11 nathanw #ifdef __FreeBSD__
1491 1.4.2.11 nathanw error = msleep(wpipe, PIPE_MTX(rpipe),
1492 1.4.2.11 nathanw PRIBIO | PCATCH, "pipewr", 0);
1493 1.4.2.11 nathanw #else
1494 1.4.2.2 nathanw error = tsleep(wpipe, PRIBIO | PCATCH, "pipewr", 0);
1495 1.4.2.11 nathanw #endif
1496 1.4.2.2 nathanw if (error != 0)
1497 1.4.2.2 nathanw break;
1498 1.4.2.2 nathanw /*
1499 1.4.2.2 nathanw * If read side wants to go away, we just issue a signal
1500 1.4.2.2 nathanw * to ourselves.
1501 1.4.2.2 nathanw */
1502 1.4.2.2 nathanw if (wpipe->pipe_state & PIPE_EOF) {
1503 1.4.2.2 nathanw error = EPIPE;
1504 1.4.2.2 nathanw break;
1505 1.4.2.8 nathanw }
1506 1.4.2.2 nathanw }
1507 1.4.2.2 nathanw }
1508 1.4.2.2 nathanw
1509 1.4.2.2 nathanw --wpipe->pipe_busy;
1510 1.4.2.2 nathanw if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANTCLOSE)) {
1511 1.4.2.2 nathanw wpipe->pipe_state &= ~(PIPE_WANTCLOSE | PIPE_WANTR);
1512 1.4.2.2 nathanw wakeup(wpipe);
1513 1.4.2.2 nathanw } else if (wpipe->pipe_buffer.cnt > 0) {
1514 1.4.2.2 nathanw /*
1515 1.4.2.2 nathanw * If we have put any characters in the buffer, we wake up
1516 1.4.2.2 nathanw * the reader.
1517 1.4.2.2 nathanw */
1518 1.4.2.2 nathanw if (wpipe->pipe_state & PIPE_WANTR) {
1519 1.4.2.2 nathanw wpipe->pipe_state &= ~PIPE_WANTR;
1520 1.4.2.2 nathanw wakeup(wpipe);
1521 1.4.2.2 nathanw }
1522 1.4.2.2 nathanw }
1523 1.4.2.2 nathanw
1524 1.4.2.2 nathanw /*
1525 1.4.2.2 nathanw * Don't return EPIPE if I/O was successful
1526 1.4.2.2 nathanw */
1527 1.4.2.2 nathanw if ((error == EPIPE) && (wpipe->pipe_buffer.cnt == 0)
1528 1.4.2.2 nathanw && (uio->uio_resid == 0))
1529 1.4.2.2 nathanw error = 0;
1530 1.4.2.2 nathanw
1531 1.4.2.2 nathanw if (error == 0)
1532 1.4.2.2 nathanw vfs_timestamp(&wpipe->pipe_mtime);
1533 1.4.2.2 nathanw
1534 1.4.2.2 nathanw /*
1535 1.4.2.2 nathanw * We have something to offer, wake up select/poll.
1536 1.4.2.2 nathanw * wpipe->pipe_map.cnt is always 0 in this point (direct write
1537 1.4.2.5 nathanw * is only done synchronously), so check only wpipe->pipe_buffer.cnt
1538 1.4.2.2 nathanw */
1539 1.4.2.2 nathanw if (wpipe->pipe_buffer.cnt)
1540 1.4.2.3 nathanw pipeselwakeup(wpipe, wpipe);
1541 1.4.2.2 nathanw
1542 1.4.2.2 nathanw /*
1543 1.4.2.2 nathanw * Arrange for next read(2) to do a signal.
1544 1.4.2.2 nathanw */
1545 1.4.2.2 nathanw wpipe->pipe_state |= PIPE_SIGNALR;
1546 1.4.2.2 nathanw
1547 1.4.2.11 nathanw PIPE_UNLOCK(rpipe);
1548 1.4.2.2 nathanw return (error);
1549 1.4.2.2 nathanw }
1550 1.4.2.2 nathanw
1551 1.4.2.2 nathanw /*
1552 1.4.2.2 nathanw * we implement a very minimal set of ioctls for compatibility with sockets.
1553 1.4.2.2 nathanw */
1554 1.4.2.2 nathanw int
1555 1.4.2.11 nathanw #ifdef __FreeBSD__
1556 1.4.2.11 nathanw pipe_ioctl(fp, cmd, data, td)
1557 1.4.2.11 nathanw struct file *fp;
1558 1.4.2.11 nathanw u_long cmd;
1559 1.4.2.11 nathanw caddr_t data;
1560 1.4.2.11 nathanw struct thread *td;
1561 1.4.2.11 nathanw #else
1562 1.4.2.2 nathanw pipe_ioctl(fp, cmd, data, p)
1563 1.4.2.2 nathanw struct file *fp;
1564 1.4.2.2 nathanw u_long cmd;
1565 1.4.2.2 nathanw caddr_t data;
1566 1.4.2.2 nathanw struct proc *p;
1567 1.4.2.11 nathanw #endif
1568 1.4.2.2 nathanw {
1569 1.4.2.2 nathanw struct pipe *mpipe = (struct pipe *)fp->f_data;
1570 1.4.2.2 nathanw
1571 1.4.2.2 nathanw switch (cmd) {
1572 1.4.2.2 nathanw
1573 1.4.2.2 nathanw case FIONBIO:
1574 1.4.2.2 nathanw return (0);
1575 1.4.2.2 nathanw
1576 1.4.2.2 nathanw case FIOASYNC:
1577 1.4.2.11 nathanw PIPE_LOCK(mpipe);
1578 1.4.2.2 nathanw if (*(int *)data) {
1579 1.4.2.2 nathanw mpipe->pipe_state |= PIPE_ASYNC;
1580 1.4.2.2 nathanw } else {
1581 1.4.2.2 nathanw mpipe->pipe_state &= ~PIPE_ASYNC;
1582 1.4.2.2 nathanw }
1583 1.4.2.11 nathanw PIPE_UNLOCK(mpipe);
1584 1.4.2.2 nathanw return (0);
1585 1.4.2.2 nathanw
1586 1.4.2.2 nathanw case FIONREAD:
1587 1.4.2.11 nathanw PIPE_LOCK(mpipe);
1588 1.4.2.2 nathanw #ifndef PIPE_NODIRECT
1589 1.4.2.2 nathanw if (mpipe->pipe_state & PIPE_DIRECTW)
1590 1.4.2.2 nathanw *(int *)data = mpipe->pipe_map.cnt;
1591 1.4.2.2 nathanw else
1592 1.4.2.2 nathanw #endif
1593 1.4.2.2 nathanw *(int *)data = mpipe->pipe_buffer.cnt;
1594 1.4.2.11 nathanw PIPE_UNLOCK(mpipe);
1595 1.4.2.2 nathanw return (0);
1596 1.4.2.2 nathanw
1597 1.4.2.2 nathanw #ifdef __FreeBSD__
1598 1.4.2.2 nathanw case FIOSETOWN:
1599 1.4.2.2 nathanw return (fsetown(*(int *)data, &mpipe->pipe_sigio));
1600 1.4.2.2 nathanw
1601 1.4.2.2 nathanw case FIOGETOWN:
1602 1.4.2.2 nathanw *(int *)data = fgetown(mpipe->pipe_sigio);
1603 1.4.2.2 nathanw return (0);
1604 1.4.2.2 nathanw
1605 1.4.2.2 nathanw /* This is deprecated, FIOSETOWN should be used instead. */
1606 1.4.2.2 nathanw case TIOCSPGRP:
1607 1.4.2.2 nathanw return (fsetown(-(*(int *)data), &mpipe->pipe_sigio));
1608 1.4.2.2 nathanw
1609 1.4.2.2 nathanw /* This is deprecated, FIOGETOWN should be used instead. */
1610 1.4.2.2 nathanw case TIOCGPGRP:
1611 1.4.2.2 nathanw *(int *)data = -fgetown(mpipe->pipe_sigio);
1612 1.4.2.2 nathanw return (0);
1613 1.4.2.2 nathanw #endif /* FreeBSD */
1614 1.4.2.2 nathanw #ifdef __NetBSD__
1615 1.4.2.2 nathanw case TIOCSPGRP:
1616 1.4.2.2 nathanw mpipe->pipe_pgid = *(int *)data;
1617 1.4.2.2 nathanw return (0);
1618 1.4.2.2 nathanw
1619 1.4.2.2 nathanw case TIOCGPGRP:
1620 1.4.2.2 nathanw *(int *)data = mpipe->pipe_pgid;
1621 1.4.2.2 nathanw return (0);
1622 1.4.2.2 nathanw #endif /* NetBSD */
1623 1.4.2.2 nathanw
1624 1.4.2.2 nathanw }
1625 1.4.2.11 nathanw return (EPASSTHROUGH);
1626 1.4.2.2 nathanw }
1627 1.4.2.2 nathanw
1628 1.4.2.2 nathanw int
1629 1.4.2.11 nathanw #ifdef __FreeBSD__
1630 1.4.2.11 nathanw pipe_poll(fp, events, cred, td)
1631 1.4.2.2 nathanw struct file *fp;
1632 1.4.2.2 nathanw int events;
1633 1.4.2.11 nathanw struct ucred *cred;
1634 1.4.2.11 nathanw struct thread *td;
1635 1.4.2.11 nathanw #elif defined(__NetBSD__)
1636 1.4.2.11 nathanw pipe_poll(fp, events, td)
1637 1.4.2.11 nathanw struct file *fp;
1638 1.4.2.11 nathanw int events;
1639 1.4.2.11 nathanw struct proc *td;
1640 1.4.2.11 nathanw #endif
1641 1.4.2.2 nathanw {
1642 1.4.2.2 nathanw struct pipe *rpipe = (struct pipe *)fp->f_data;
1643 1.4.2.2 nathanw struct pipe *wpipe;
1644 1.4.2.2 nathanw int revents = 0;
1645 1.4.2.2 nathanw
1646 1.4.2.2 nathanw wpipe = rpipe->pipe_peer;
1647 1.4.2.11 nathanw PIPE_LOCK(rpipe);
1648 1.4.2.2 nathanw if (events & (POLLIN | POLLRDNORM))
1649 1.4.2.2 nathanw if ((rpipe->pipe_buffer.cnt > 0) ||
1650 1.4.2.2 nathanw #ifndef PIPE_NODIRECT
1651 1.4.2.2 nathanw (rpipe->pipe_state & PIPE_DIRECTW) ||
1652 1.4.2.2 nathanw #endif
1653 1.4.2.2 nathanw (rpipe->pipe_state & PIPE_EOF))
1654 1.4.2.2 nathanw revents |= events & (POLLIN | POLLRDNORM);
1655 1.4.2.2 nathanw
1656 1.4.2.2 nathanw if (events & (POLLOUT | POLLWRNORM))
1657 1.4.2.2 nathanw if (wpipe == NULL || (wpipe->pipe_state & PIPE_EOF)
1658 1.4.2.2 nathanw || (
1659 1.4.2.2 nathanw #ifndef PIPE_NODIRECT
1660 1.4.2.2 nathanw ((wpipe->pipe_state & PIPE_DIRECTW) == 0) &&
1661 1.4.2.2 nathanw #endif
1662 1.4.2.2 nathanw (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF))
1663 1.4.2.2 nathanw revents |= events & (POLLOUT | POLLWRNORM);
1664 1.4.2.2 nathanw
1665 1.4.2.2 nathanw if ((rpipe->pipe_state & PIPE_EOF) ||
1666 1.4.2.2 nathanw (wpipe == NULL) ||
1667 1.4.2.2 nathanw (wpipe->pipe_state & PIPE_EOF))
1668 1.4.2.2 nathanw revents |= POLLHUP;
1669 1.4.2.2 nathanw
1670 1.4.2.2 nathanw if (revents == 0) {
1671 1.4.2.2 nathanw if (events & (POLLIN | POLLRDNORM)) {
1672 1.4.2.11 nathanw selrecord(td, &rpipe->pipe_sel);
1673 1.4.2.2 nathanw rpipe->pipe_state |= PIPE_SEL;
1674 1.4.2.2 nathanw }
1675 1.4.2.2 nathanw
1676 1.4.2.2 nathanw if (events & (POLLOUT | POLLWRNORM)) {
1677 1.4.2.11 nathanw selrecord(td, &wpipe->pipe_sel);
1678 1.4.2.2 nathanw wpipe->pipe_state |= PIPE_SEL;
1679 1.4.2.2 nathanw }
1680 1.4.2.2 nathanw }
1681 1.4.2.11 nathanw PIPE_UNLOCK(rpipe);
1682 1.4.2.2 nathanw
1683 1.4.2.2 nathanw return (revents);
1684 1.4.2.2 nathanw }
1685 1.4.2.2 nathanw
1686 1.4.2.2 nathanw static int
1687 1.4.2.11 nathanw #ifdef __FreeBSD__
1688 1.4.2.11 nathanw pipe_stat(fp, ub, td)
1689 1.4.2.2 nathanw struct file *fp;
1690 1.4.2.2 nathanw struct stat *ub;
1691 1.4.2.11 nathanw struct thread *td;
1692 1.4.2.11 nathanw #else
1693 1.4.2.11 nathanw pipe_stat(fp, ub, td)
1694 1.4.2.11 nathanw struct file *fp;
1695 1.4.2.11 nathanw struct stat *ub;
1696 1.4.2.11 nathanw struct proc *td;
1697 1.4.2.11 nathanw #endif
1698 1.4.2.2 nathanw {
1699 1.4.2.2 nathanw struct pipe *pipe = (struct pipe *)fp->f_data;
1700 1.4.2.2 nathanw
1701 1.4.2.3 nathanw memset((caddr_t)ub, 0, sizeof(*ub));
1702 1.4.2.2 nathanw ub->st_mode = S_IFIFO;
1703 1.4.2.2 nathanw ub->st_blksize = pipe->pipe_buffer.size;
1704 1.4.2.2 nathanw ub->st_size = pipe->pipe_buffer.cnt;
1705 1.4.2.2 nathanw ub->st_blocks = (ub->st_size) ? 1 : 0;
1706 1.4.2.2 nathanw #ifdef __FreeBSD__
1707 1.4.2.2 nathanw ub->st_atimespec = pipe->pipe_atime;
1708 1.4.2.2 nathanw ub->st_mtimespec = pipe->pipe_mtime;
1709 1.4.2.2 nathanw ub->st_ctimespec = pipe->pipe_ctime;
1710 1.4.2.2 nathanw #endif /* FreeBSD */
1711 1.4.2.2 nathanw #ifdef __NetBSD__
1712 1.4.2.2 nathanw TIMEVAL_TO_TIMESPEC(&pipe->pipe_atime, &ub->st_atimespec)
1713 1.4.2.2 nathanw TIMEVAL_TO_TIMESPEC(&pipe->pipe_mtime, &ub->st_mtimespec);
1714 1.4.2.2 nathanw TIMEVAL_TO_TIMESPEC(&pipe->pipe_ctime, &ub->st_ctimespec);
1715 1.4.2.2 nathanw #endif /* NetBSD */
1716 1.4.2.2 nathanw ub->st_uid = fp->f_cred->cr_uid;
1717 1.4.2.2 nathanw ub->st_gid = fp->f_cred->cr_gid;
1718 1.4.2.2 nathanw /*
1719 1.4.2.2 nathanw * Left as 0: st_dev, st_ino, st_nlink, st_rdev, st_flags, st_gen.
1720 1.4.2.2 nathanw * XXX (st_dev, st_ino) should be unique.
1721 1.4.2.2 nathanw */
1722 1.4.2.2 nathanw return (0);
1723 1.4.2.2 nathanw }
1724 1.4.2.2 nathanw
1725 1.4.2.2 nathanw /* ARGSUSED */
1726 1.4.2.2 nathanw static int
1727 1.4.2.11 nathanw #ifdef __FreeBSD__
1728 1.4.2.11 nathanw pipe_close(fp, td)
1729 1.4.2.2 nathanw struct file *fp;
1730 1.4.2.11 nathanw struct thread *td;
1731 1.4.2.11 nathanw #else
1732 1.4.2.11 nathanw pipe_close(fp, td)
1733 1.4.2.11 nathanw struct file *fp;
1734 1.4.2.11 nathanw struct proc *td;
1735 1.4.2.11 nathanw #endif
1736 1.4.2.2 nathanw {
1737 1.4.2.2 nathanw struct pipe *cpipe = (struct pipe *)fp->f_data;
1738 1.4.2.2 nathanw
1739 1.4.2.2 nathanw #ifdef __FreeBSD__
1740 1.4.2.2 nathanw fp->f_ops = &badfileops;
1741 1.4.2.2 nathanw funsetown(cpipe->pipe_sigio);
1742 1.4.2.2 nathanw #endif
1743 1.4.2.2 nathanw fp->f_data = NULL;
1744 1.4.2.2 nathanw pipeclose(cpipe);
1745 1.4.2.2 nathanw return (0);
1746 1.4.2.2 nathanw }
1747 1.4.2.2 nathanw
1748 1.4.2.2 nathanw static void
1749 1.4.2.2 nathanw pipe_free_kmem(cpipe)
1750 1.4.2.2 nathanw struct pipe *cpipe;
1751 1.4.2.2 nathanw {
1752 1.4.2.2 nathanw
1753 1.4.2.2 nathanw #ifdef __FreeBSD__
1754 1.4.2.11 nathanw
1755 1.4.2.11 nathanw GIANT_REQUIRED;
1756 1.4.2.11 nathanw KASSERT(cpipe->pipe_mtxp == NULL || !mtx_owned(PIPE_MTX(cpipe)),
1757 1.4.2.11 nathanw ("pipespace: pipe mutex locked"));
1758 1.4.2.2 nathanw #endif
1759 1.4.2.11 nathanw
1760 1.4.2.2 nathanw if (cpipe->pipe_buffer.buffer != NULL) {
1761 1.4.2.2 nathanw if (cpipe->pipe_buffer.size > PIPE_SIZE)
1762 1.4.2.2 nathanw --nbigpipe;
1763 1.4.2.2 nathanw amountpipekva -= cpipe->pipe_buffer.size;
1764 1.4.2.2 nathanw #ifdef __FreeBSD__
1765 1.4.2.2 nathanw kmem_free(kernel_map,
1766 1.4.2.2 nathanw (vm_offset_t)cpipe->pipe_buffer.buffer,
1767 1.4.2.2 nathanw cpipe->pipe_buffer.size);
1768 1.4.2.2 nathanw #elif defined(__NetBSD__)
1769 1.4.2.2 nathanw uvm_km_free(kernel_map,
1770 1.4.2.2 nathanw (vaddr_t)cpipe->pipe_buffer.buffer,
1771 1.4.2.2 nathanw cpipe->pipe_buffer.size);
1772 1.4.2.2 nathanw #endif /* NetBSD */
1773 1.4.2.2 nathanw cpipe->pipe_buffer.buffer = NULL;
1774 1.4.2.2 nathanw }
1775 1.4.2.2 nathanw #ifndef PIPE_NODIRECT
1776 1.4.2.11 nathanw if (cpipe->pipe_map.kva != 0) {
1777 1.4.2.2 nathanw #ifdef __FreeBSD__
1778 1.4.2.2 nathanw amountpipekva -= cpipe->pipe_buffer.size + PAGE_SIZE;
1779 1.4.2.2 nathanw kmem_free(kernel_map,
1780 1.4.2.2 nathanw cpipe->pipe_map.kva,
1781 1.4.2.2 nathanw cpipe->pipe_buffer.size + PAGE_SIZE);
1782 1.4.2.2 nathanw #elif defined(__NetBSD__)
1783 1.4.2.2 nathanw pipe_loan_free(cpipe);
1784 1.4.2.2 nathanw #endif /* NetBSD */
1785 1.4.2.2 nathanw cpipe->pipe_map.cnt = 0;
1786 1.4.2.11 nathanw cpipe->pipe_map.kva = 0;
1787 1.4.2.2 nathanw cpipe->pipe_map.pos = 0;
1788 1.4.2.2 nathanw cpipe->pipe_map.npages = 0;
1789 1.4.2.2 nathanw }
1790 1.4.2.2 nathanw #endif /* !PIPE_NODIRECT */
1791 1.4.2.2 nathanw }
1792 1.4.2.2 nathanw
1793 1.4.2.2 nathanw /*
1794 1.4.2.2 nathanw * shutdown the pipe
1795 1.4.2.2 nathanw */
1796 1.4.2.2 nathanw static void
1797 1.4.2.2 nathanw pipeclose(cpipe)
1798 1.4.2.2 nathanw struct pipe *cpipe;
1799 1.4.2.2 nathanw {
1800 1.4.2.2 nathanw struct pipe *ppipe;
1801 1.4.2.11 nathanw #ifdef __FreeBSD__
1802 1.4.2.11 nathanw int hadpeer = 0;
1803 1.4.2.11 nathanw #endif
1804 1.4.2.2 nathanw
1805 1.4.2.11 nathanw if (cpipe == NULL)
1806 1.4.2.2 nathanw return;
1807 1.4.2.2 nathanw
1808 1.4.2.11 nathanw /* partially created pipes won't have a valid mutex. */
1809 1.4.2.11 nathanw if (PIPE_MTX(cpipe) != NULL)
1810 1.4.2.11 nathanw PIPE_LOCK(cpipe);
1811 1.4.2.11 nathanw
1812 1.4.2.3 nathanw pipeselwakeup(cpipe, cpipe);
1813 1.4.2.2 nathanw
1814 1.4.2.2 nathanw /*
1815 1.4.2.2 nathanw * If the other side is blocked, wake it up saying that
1816 1.4.2.2 nathanw * we want to close it down.
1817 1.4.2.2 nathanw */
1818 1.4.2.2 nathanw while (cpipe->pipe_busy) {
1819 1.4.2.2 nathanw wakeup(cpipe);
1820 1.4.2.2 nathanw cpipe->pipe_state |= PIPE_WANTCLOSE | PIPE_EOF;
1821 1.4.2.11 nathanw #ifdef __FreeBSD__
1822 1.4.2.11 nathanw msleep(cpipe, PIPE_MTX(cpipe), PRIBIO, "pipecl", 0);
1823 1.4.2.11 nathanw #else
1824 1.4.2.2 nathanw tsleep(cpipe, PRIBIO, "pipecl", 0);
1825 1.4.2.11 nathanw #endif
1826 1.4.2.2 nathanw }
1827 1.4.2.2 nathanw
1828 1.4.2.2 nathanw /*
1829 1.4.2.2 nathanw * Disconnect from peer
1830 1.4.2.2 nathanw */
1831 1.4.2.2 nathanw if ((ppipe = cpipe->pipe_peer) != NULL) {
1832 1.4.2.11 nathanw #ifdef __FreeBSD__
1833 1.4.2.11 nathanw hadpeer++;
1834 1.4.2.11 nathanw #endif
1835 1.4.2.3 nathanw pipeselwakeup(ppipe, ppipe);
1836 1.4.2.2 nathanw
1837 1.4.2.2 nathanw ppipe->pipe_state |= PIPE_EOF;
1838 1.4.2.2 nathanw wakeup(ppipe);
1839 1.4.2.11 nathanw #ifdef __FreeBSD__
1840 1.4.2.11 nathanw KNOTE(&ppipe->pipe_sel.si_note, 0);
1841 1.4.2.11 nathanw #endif
1842 1.4.2.2 nathanw ppipe->pipe_peer = NULL;
1843 1.4.2.2 nathanw }
1844 1.4.2.2 nathanw /*
1845 1.4.2.2 nathanw * free resources
1846 1.4.2.2 nathanw */
1847 1.4.2.9 nathanw #ifdef __FreeBSD__
1848 1.4.2.11 nathanw if (PIPE_MTX(cpipe) != NULL) {
1849 1.4.2.11 nathanw PIPE_UNLOCK(cpipe);
1850 1.4.2.11 nathanw if (!hadpeer) {
1851 1.4.2.11 nathanw mtx_destroy(PIPE_MTX(cpipe));
1852 1.4.2.11 nathanw free(PIPE_MTX(cpipe), M_TEMP);
1853 1.4.2.11 nathanw }
1854 1.4.2.11 nathanw }
1855 1.4.2.11 nathanw mtx_lock(&Giant);
1856 1.4.2.2 nathanw pipe_free_kmem(cpipe);
1857 1.4.2.2 nathanw zfree(pipe_zone, cpipe);
1858 1.4.2.11 nathanw mtx_unlock(&Giant);
1859 1.4.2.11 nathanw #endif
1860 1.4.2.3 nathanw
1861 1.4.2.2 nathanw #ifdef __NetBSD__
1862 1.4.2.11 nathanw if (PIPE_MTX(cpipe) != NULL)
1863 1.4.2.11 nathanw PIPE_UNLOCK(cpipe);
1864 1.4.2.11 nathanw
1865 1.4.2.3 nathanw pipe_free_kmem(cpipe);
1866 1.4.2.3 nathanw (void) lockmgr(&cpipe->pipe_lock, LK_DRAIN, NULL);
1867 1.4.2.2 nathanw pool_put(&pipe_pool, cpipe);
1868 1.4.2.2 nathanw #endif
1869 1.4.2.2 nathanw }
1870 1.4.2.2 nathanw
1871 1.4.2.14 nathanw static void
1872 1.4.2.14 nathanw filt_pipedetach(struct knote *kn)
1873 1.4.2.2 nathanw {
1874 1.4.2.14 nathanw struct pipe *cpipe = (struct pipe *)kn->kn_fp->f_data;
1875 1.4.2.2 nathanw
1876 1.4.2.14 nathanw switch(kn->kn_filter) {
1877 1.4.2.2 nathanw case EVFILT_WRITE:
1878 1.4.2.14 nathanw /* need the peer structure, not our own */
1879 1.4.2.2 nathanw cpipe = cpipe->pipe_peer;
1880 1.4.2.14 nathanw
1881 1.4.2.14 nathanw /* if reader end already closed, just return */
1882 1.4.2.14 nathanw if (!cpipe)
1883 1.4.2.14 nathanw return;
1884 1.4.2.14 nathanw
1885 1.4.2.2 nathanw break;
1886 1.4.2.2 nathanw default:
1887 1.4.2.14 nathanw /* nothing to do */
1888 1.4.2.14 nathanw break;
1889 1.4.2.2 nathanw }
1890 1.4.2.11 nathanw
1891 1.4.2.14 nathanw #ifdef DIAGNOSTIC
1892 1.4.2.14 nathanw if (kn->kn_hook != cpipe)
1893 1.4.2.14 nathanw panic("filt_pipedetach: inconsistent knote");
1894 1.4.2.14 nathanw #endif
1895 1.4.2.2 nathanw
1896 1.4.2.11 nathanw PIPE_LOCK(cpipe);
1897 1.4.2.2 nathanw SLIST_REMOVE(&cpipe->pipe_sel.si_note, kn, knote, kn_selnext);
1898 1.4.2.11 nathanw PIPE_UNLOCK(cpipe);
1899 1.4.2.2 nathanw }
1900 1.4.2.2 nathanw
1901 1.4.2.2 nathanw /*ARGSUSED*/
1902 1.4.2.2 nathanw static int
1903 1.4.2.2 nathanw filt_piperead(struct knote *kn, long hint)
1904 1.4.2.2 nathanw {
1905 1.4.2.2 nathanw struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1906 1.4.2.2 nathanw struct pipe *wpipe = rpipe->pipe_peer;
1907 1.4.2.2 nathanw
1908 1.4.2.11 nathanw PIPE_LOCK(rpipe);
1909 1.4.2.2 nathanw kn->kn_data = rpipe->pipe_buffer.cnt;
1910 1.4.2.2 nathanw if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW))
1911 1.4.2.2 nathanw kn->kn_data = rpipe->pipe_map.cnt;
1912 1.4.2.2 nathanw
1913 1.4.2.2 nathanw if ((rpipe->pipe_state & PIPE_EOF) ||
1914 1.4.2.2 nathanw (wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1915 1.4.2.11 nathanw kn->kn_flags |= EV_EOF;
1916 1.4.2.11 nathanw PIPE_UNLOCK(rpipe);
1917 1.4.2.2 nathanw return (1);
1918 1.4.2.2 nathanw }
1919 1.4.2.11 nathanw PIPE_UNLOCK(rpipe);
1920 1.4.2.2 nathanw return (kn->kn_data > 0);
1921 1.4.2.2 nathanw }
1922 1.4.2.2 nathanw
1923 1.4.2.2 nathanw /*ARGSUSED*/
1924 1.4.2.2 nathanw static int
1925 1.4.2.2 nathanw filt_pipewrite(struct knote *kn, long hint)
1926 1.4.2.2 nathanw {
1927 1.4.2.2 nathanw struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1928 1.4.2.2 nathanw struct pipe *wpipe = rpipe->pipe_peer;
1929 1.4.2.2 nathanw
1930 1.4.2.11 nathanw PIPE_LOCK(rpipe);
1931 1.4.2.2 nathanw if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1932 1.4.2.2 nathanw kn->kn_data = 0;
1933 1.4.2.2 nathanw kn->kn_flags |= EV_EOF;
1934 1.4.2.11 nathanw PIPE_UNLOCK(rpipe);
1935 1.4.2.2 nathanw return (1);
1936 1.4.2.2 nathanw }
1937 1.4.2.2 nathanw kn->kn_data = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
1938 1.4.2.2 nathanw if (wpipe->pipe_state & PIPE_DIRECTW)
1939 1.4.2.2 nathanw kn->kn_data = 0;
1940 1.4.2.2 nathanw
1941 1.4.2.11 nathanw PIPE_UNLOCK(rpipe);
1942 1.4.2.2 nathanw return (kn->kn_data >= PIPE_BUF);
1943 1.4.2.2 nathanw }
1944 1.4.2.14 nathanw
1945 1.4.2.14 nathanw static const struct filterops pipe_rfiltops =
1946 1.4.2.14 nathanw { 1, NULL, filt_pipedetach, filt_piperead };
1947 1.4.2.14 nathanw static const struct filterops pipe_wfiltops =
1948 1.4.2.14 nathanw { 1, NULL, filt_pipedetach, filt_pipewrite };
1949 1.4.2.14 nathanw
1950 1.4.2.14 nathanw /*ARGSUSED*/
1951 1.4.2.14 nathanw static int
1952 1.4.2.14 nathanw pipe_kqfilter(struct file *fp, struct knote *kn)
1953 1.4.2.14 nathanw {
1954 1.4.2.14 nathanw struct pipe *cpipe;
1955 1.4.2.14 nathanw
1956 1.4.2.14 nathanw cpipe = (struct pipe *)kn->kn_fp->f_data;
1957 1.4.2.14 nathanw switch (kn->kn_filter) {
1958 1.4.2.14 nathanw case EVFILT_READ:
1959 1.4.2.14 nathanw kn->kn_fop = &pipe_rfiltops;
1960 1.4.2.14 nathanw break;
1961 1.4.2.14 nathanw case EVFILT_WRITE:
1962 1.4.2.14 nathanw kn->kn_fop = &pipe_wfiltops;
1963 1.4.2.14 nathanw cpipe = cpipe->pipe_peer;
1964 1.4.2.14 nathanw if (cpipe == NULL) {
1965 1.4.2.14 nathanw /* other end of pipe has been closed */
1966 1.4.2.14 nathanw return (EBADF);
1967 1.4.2.14 nathanw }
1968 1.4.2.14 nathanw break;
1969 1.4.2.14 nathanw default:
1970 1.4.2.14 nathanw return (1);
1971 1.4.2.14 nathanw }
1972 1.4.2.14 nathanw kn->kn_hook = cpipe;
1973 1.4.2.14 nathanw
1974 1.4.2.14 nathanw PIPE_LOCK(cpipe);
1975 1.4.2.14 nathanw SLIST_INSERT_HEAD(&cpipe->pipe_sel.si_note, kn, kn_selnext);
1976 1.4.2.14 nathanw PIPE_UNLOCK(cpipe);
1977 1.4.2.14 nathanw return (0);
1978 1.4.2.14 nathanw }
1979 1.4.2.2 nathanw
1980 1.4.2.2 nathanw #ifdef __NetBSD__
1981 1.4.2.2 nathanw static int
1982 1.4.2.2 nathanw pipe_fcntl(fp, cmd, data, p)
1983 1.4.2.2 nathanw struct file *fp;
1984 1.4.2.2 nathanw u_int cmd;
1985 1.4.2.2 nathanw caddr_t data;
1986 1.4.2.2 nathanw struct proc *p;
1987 1.4.2.2 nathanw {
1988 1.4.2.2 nathanw if (cmd == F_SETFL)
1989 1.4.2.2 nathanw return (0);
1990 1.4.2.2 nathanw else
1991 1.4.2.2 nathanw return (EOPNOTSUPP);
1992 1.4.2.2 nathanw }
1993 1.4.2.2 nathanw
1994 1.4.2.2 nathanw /*
1995 1.4.2.2 nathanw * Handle pipe sysctls.
1996 1.4.2.2 nathanw */
1997 1.4.2.2 nathanw int
1998 1.4.2.2 nathanw sysctl_dopipe(name, namelen, oldp, oldlenp, newp, newlen)
1999 1.4.2.2 nathanw int *name;
2000 1.4.2.2 nathanw u_int namelen;
2001 1.4.2.2 nathanw void *oldp;
2002 1.4.2.2 nathanw size_t *oldlenp;
2003 1.4.2.2 nathanw void *newp;
2004 1.4.2.2 nathanw size_t newlen;
2005 1.4.2.2 nathanw {
2006 1.4.2.2 nathanw /* All sysctl names at this level are terminal. */
2007 1.4.2.2 nathanw if (namelen != 1)
2008 1.4.2.2 nathanw return (ENOTDIR); /* overloaded */
2009 1.4.2.2 nathanw
2010 1.4.2.2 nathanw switch (name[0]) {
2011 1.4.2.2 nathanw case KERN_PIPE_MAXKVASZ:
2012 1.4.2.2 nathanw return (sysctl_int(oldp, oldlenp, newp, newlen, &maxpipekva));
2013 1.4.2.2 nathanw case KERN_PIPE_LIMITKVA:
2014 1.4.2.2 nathanw return (sysctl_int(oldp, oldlenp, newp, newlen, &limitpipekva));
2015 1.4.2.2 nathanw case KERN_PIPE_MAXBIGPIPES:
2016 1.4.2.2 nathanw return (sysctl_int(oldp, oldlenp, newp, newlen, &maxbigpipes));
2017 1.4.2.2 nathanw case KERN_PIPE_NBIGPIPES:
2018 1.4.2.2 nathanw return (sysctl_rdint(oldp, oldlenp, newp, nbigpipe));
2019 1.4.2.2 nathanw case KERN_PIPE_KVASIZE:
2020 1.4.2.2 nathanw return (sysctl_rdint(oldp, oldlenp, newp, amountpipekva));
2021 1.4.2.2 nathanw default:
2022 1.4.2.2 nathanw return (EOPNOTSUPP);
2023 1.4.2.2 nathanw }
2024 1.4.2.2 nathanw /* NOTREACHED */
2025 1.4.2.2 nathanw }
2026 1.4.2.2 nathanw
2027 1.4.2.2 nathanw /*
2028 1.4.2.2 nathanw * Initialize pipe structs.
2029 1.4.2.2 nathanw */
2030 1.4.2.2 nathanw void
2031 1.4.2.2 nathanw pipe_init(void)
2032 1.4.2.2 nathanw {
2033 1.4.2.11 nathanw pool_init(&pipe_pool, sizeof(struct pipe), 0, 0, 0, "pipepl", NULL);
2034 1.4.2.2 nathanw }
2035 1.4.2.2 nathanw
2036 1.4.2.2 nathanw #endif /* __NetBSD __ */
2037