sys_pipe.c revision 1.5.2.7 1 /* $NetBSD: sys_pipe.c,v 1.5.2.7 2002/08/07 14:48:16 jdolecek Exp $ */
2
3 /*
4 * Copyright (c) 1996 John S. Dyson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Absolutely no warranty of function or purpose is made by the author
17 * John S. Dyson.
18 * 4. Modifications may be freely made to this file if the above conditions
19 * are met.
20 *
21 * $FreeBSD: src/sys/kern/sys_pipe.c,v 1.95 2002/03/09 22:06:31 alfred Exp $
22 */
23
24 /*
25 * This file contains a high-performance replacement for the socket-based
26 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support
27 * all features of sockets, but does do everything that pipes normally
28 * do.
29 *
30 * Adaption for NetBSD UVM, including uvm_loan() based direct write, was
31 * written by Jaromir Dolecek.
32 */
33
34 /*
35 * This code has two modes of operation, a small write mode and a large
36 * write mode. The small write mode acts like conventional pipes with
37 * a kernel buffer. If the buffer is less than PIPE_MINDIRECT, then the
38 * "normal" pipe buffering is done. If the buffer is between PIPE_MINDIRECT
39 * and PIPE_SIZE in size, it is fully mapped into the kernel (on FreeBSD,
40 * those pages are also wired), and the receiving process can copy it directly
41 * from the pages in the sending process.
42 *
43 * If the sending process receives a signal, it is possible that it will
44 * go away, and certainly its address space can change, because control
45 * is returned back to the user-mode side. In that case, the pipe code
46 * arranges to copy the buffer supplied by the user process on FreeBSD, to
47 * a pageable kernel buffer, and the receiving process will grab the data
48 * from the pageable kernel buffer. Since signals don't happen all that often,
49 * the copy operation is normally eliminated.
50 * For NetBSD, the pages are mapped read-only, COW for kernel by uvm_loan(),
51 * so no explicit handling need to be done, all is handled by standard VM
52 * facilities.
53 *
54 * The constant PIPE_MINDIRECT is chosen to make sure that buffering will
55 * happen for small transfers so that the system will not spend all of
56 * its time context switching. PIPE_SIZE is constrained by the
57 * amount of kernel virtual memory.
58 */
59
60 #include <sys/cdefs.h>
61 __KERNEL_RCSID(0, "$NetBSD: sys_pipe.c,v 1.5.2.7 2002/08/07 14:48:16 jdolecek Exp $");
62
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/proc.h>
66 #include <sys/fcntl.h>
67 #include <sys/file.h>
68 #include <sys/filedesc.h>
69 #include <sys/filio.h>
70 #include <sys/kernel.h>
71 #include <sys/lock.h>
72 #include <sys/ttycom.h>
73 #include <sys/stat.h>
74 #include <sys/malloc.h>
75 #include <sys/poll.h>
76 #include <sys/signalvar.h>
77 #include <sys/vnode.h>
78 #include <sys/uio.h>
79 #include <sys/lock.h>
80 #ifdef __FreeBSD__
81 #include <sys/mutex.h>
82 #endif
83 #ifdef __NetBSD__
84 #include <sys/select.h>
85 #include <sys/mount.h>
86 #include <sys/syscallargs.h>
87 #include <uvm/uvm.h>
88 #include <sys/sysctl.h>
89 #include <sys/kernel.h>
90 #endif /* NetBSD, FreeBSD */
91
92 #include <sys/pipe.h>
93
94 #ifdef __NetBSD__
95 /*
96 * Avoid microtime(9), it's slow. We don't guard the read from time(9)
97 * with splclock(9) since we don't actually need to be THAT sure the access
98 * is atomic.
99 */
100 #define vfs_timestamp(tv) (*(tv) = time)
101 #endif
102
103 /*
104 * Use this define if you want to disable *fancy* VM things. Expect an
105 * approx 30% decrease in transfer rate. This could be useful for
106 * OpenBSD.
107 */
108 /* #define PIPE_NODIRECT */
109
110 /*
111 * interfaces to the outside world
112 */
113 #ifdef __FreeBSD__
114 static int pipe_read(struct file *fp, struct uio *uio,
115 struct ucred *cred, int flags, struct thread *td);
116 static int pipe_write(struct file *fp, struct uio *uio,
117 struct ucred *cred, int flags, struct thread *td);
118 static int pipe_close(struct file *fp, struct thread *td);
119 static int pipe_poll(struct file *fp, int events, struct ucred *cred,
120 struct thread *td);
121 static int pipe_kqfilter(struct file *fp, struct knote *kn);
122 static int pipe_stat(struct file *fp, struct stat *sb, struct thread *td);
123 static int pipe_ioctl(struct file *fp, u_long cmd, caddr_t data, struct thread *td);
124
125 static struct fileops pipeops = {
126 pipe_read, pipe_write, pipe_ioctl, pipe_poll, pipe_kqfilter,
127 pipe_stat, pipe_close
128 };
129
130 #define PIPE_GET_GIANT(pipe) \
131 do { \
132 PIPE_UNLOCK(wpipe); \
133 mtx_lock(&Giant); \
134 } while (0)
135
136 #define PIPE_DROP_GIANT(pipe) \
137 do { \
138 mtx_unlock(&Giant); \
139 PIPE_LOCK(wpipe); \
140 } while (0)
141
142 #endif /* FreeBSD */
143
144 static void filt_pipedetach(struct knote *kn);
145 static int filt_piperead(struct knote *kn, long hint);
146 static int filt_pipewrite(struct knote *kn, long hint);
147
148 static const struct filterops pipe_rfiltops =
149 { 1, NULL, filt_pipedetach, filt_piperead };
150 static const struct filterops pipe_wfiltops =
151 { 1, NULL, filt_pipedetach, filt_pipewrite };
152
153 #ifdef __NetBSD__
154 static int pipe_read(struct file *fp, off_t *offset, struct uio *uio,
155 struct ucred *cred, int flags);
156 static int pipe_write(struct file *fp, off_t *offset, struct uio *uio,
157 struct ucred *cred, int flags);
158 static int pipe_close(struct file *fp, struct proc *p);
159 static int pipe_poll(struct file *fp, int events, struct proc *p);
160 static int pipe_fcntl(struct file *fp, u_int com, caddr_t data,
161 struct proc *p);
162 static int pipe_kqfilter(struct file *fp, struct knote *kn);
163 static int pipe_stat(struct file *fp, struct stat *sb, struct proc *p);
164 static int pipe_ioctl(struct file *fp, u_long cmd, caddr_t data, struct proc *p);
165
166 static struct fileops pipeops =
167 { pipe_read, pipe_write, pipe_ioctl, pipe_fcntl, pipe_poll,
168 pipe_stat, pipe_close, pipe_kqfilter };
169
170 /* XXXSMP perhaps use spinlocks & KERNEL_PROC_(UN)LOCK() ? just clear now */
171 #define PIPE_GET_GIANT(pipe)
172 #define PIPE_DROP_GIANT(pipe)
173 #define GIANT_REQUIRED
174
175 #endif /* NetBSD */
176
177 /*
178 * Default pipe buffer size(s), this can be kind-of large now because pipe
179 * space is pageable. The pipe code will try to maintain locality of
180 * reference for performance reasons, so small amounts of outstanding I/O
181 * will not wipe the cache.
182 */
183 #define MINPIPESIZE (PIPE_SIZE/3)
184 #define MAXPIPESIZE (2*PIPE_SIZE/3)
185
186 /*
187 * Maximum amount of kva for pipes -- this is kind-of a soft limit, but
188 * is there so that on large systems, we don't exhaust it.
189 */
190 #define MAXPIPEKVA (8*1024*1024)
191 static int maxpipekva = MAXPIPEKVA;
192
193 /*
194 * Limit for direct transfers, we cannot, of course limit
195 * the amount of kva for pipes in general though.
196 */
197 #define LIMITPIPEKVA (16*1024*1024)
198 static int limitpipekva = LIMITPIPEKVA;
199
200 /*
201 * Limit the number of "big" pipes
202 */
203 #define LIMITBIGPIPES 32
204 static int maxbigpipes = LIMITBIGPIPES;
205 static int nbigpipe = 0;
206
207 /*
208 * Amount of KVA consumed by pipe buffers.
209 */
210 static int amountpipekva = 0;
211
212 static void pipeclose(struct pipe *cpipe);
213 static void pipe_free_kmem(struct pipe *cpipe);
214 static int pipe_create(struct pipe **cpipep, int allockva);
215 static __inline int pipelock(struct pipe *cpipe, int catch);
216 static __inline void pipeunlock(struct pipe *cpipe);
217 static __inline void pipeselwakeup(struct pipe *cpipe, struct pipe *sigp);
218 #ifndef PIPE_NODIRECT
219 static int pipe_direct_write(struct pipe *wpipe, struct uio *uio);
220 #endif
221 static int pipespace(struct pipe *cpipe, int size);
222
223 #ifdef __NetBSD__
224 #ifndef PIPE_NODIRECT
225 static int pipe_loan_alloc(struct pipe *, int);
226 static void pipe_loan_free(struct pipe *);
227 #endif /* PIPE_NODIRECT */
228
229 static struct pool pipe_pool;
230 #endif /* NetBSD */
231
232 #ifdef __FreeBSD__
233 static vm_zone_t pipe_zone;
234
235 static void pipeinit(void *dummy __unused);
236 #ifndef PIPE_NODIRECT
237 static int pipe_build_write_buffer(struct pipe *wpipe, struct uio *uio);
238 static void pipe_destroy_write_buffer(struct pipe *wpipe);
239 static void pipe_clone_write_buffer(struct pipe *wpipe);
240 #endif
241
242 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_ANY, pipeinit, NULL);
243
244 static void
245 pipeinit(void *dummy __unused)
246 {
247
248 pipe_zone = zinit("PIPE", sizeof(struct pipe), 0, 0, 4);
249 }
250 #endif /* FreeBSD */
251
252 /*
253 * The pipe system call for the DTYPE_PIPE type of pipes
254 */
255
256 /* ARGSUSED */
257 #ifdef __FreeBSD__
258 int
259 pipe(td, uap)
260 struct thread *td;
261 struct pipe_args /* {
262 int dummy;
263 } */ *uap;
264 #elif defined(__NetBSD__)
265 int
266 sys_pipe(p, v, retval)
267 struct proc *p;
268 void *v;
269 register_t *retval;
270 #endif
271 {
272 struct file *rf, *wf;
273 struct pipe *rpipe, *wpipe;
274 int fd, error;
275 #ifdef __FreeBSD__
276 struct mtx *pmtx;
277
278 KASSERT(pipe_zone != NULL, ("pipe_zone not initialized"));
279
280 pmtx = malloc(sizeof(*pmtx), M_TEMP, M_WAITOK | M_ZERO);
281
282 rpipe = wpipe = NULL;
283 if (pipe_create(&rpipe, 1) || pipe_create(&wpipe, 1)) {
284 pipeclose(rpipe);
285 pipeclose(wpipe);
286 free(pmtx, M_TEMP);
287 return (ENFILE);
288 }
289
290 error = falloc(td, &rf, &fd);
291 if (error) {
292 pipeclose(rpipe);
293 pipeclose(wpipe);
294 free(pmtx, M_TEMP);
295 return (error);
296 }
297 fhold(rf);
298 td->td_retval[0] = fd;
299
300 /*
301 * Warning: once we've gotten past allocation of the fd for the
302 * read-side, we can only drop the read side via fdrop() in order
303 * to avoid races against processes which manage to dup() the read
304 * side while we are blocked trying to allocate the write side.
305 */
306 FILE_LOCK(rf);
307 rf->f_flag = FREAD | FWRITE;
308 rf->f_type = DTYPE_PIPE;
309 rf->f_data = (caddr_t)rpipe;
310 rf->f_ops = &pipeops;
311 FILE_UNLOCK(rf);
312 error = falloc(td, &wf, &fd);
313 if (error) {
314 struct filedesc *fdp = td->td_proc->p_fd;
315 FILEDESC_LOCK(fdp);
316 if (fdp->fd_ofiles[td->td_retval[0]] == rf) {
317 fdp->fd_ofiles[td->td_retval[0]] = NULL;
318 FILEDESC_UNLOCK(fdp);
319 fdrop(rf, td);
320 } else
321 FILEDESC_UNLOCK(fdp);
322 fdrop(rf, td);
323 /* rpipe has been closed by fdrop(). */
324 pipeclose(wpipe);
325 free(pmtx, M_TEMP);
326 return (error);
327 }
328 FILE_LOCK(wf);
329 wf->f_flag = FREAD | FWRITE;
330 wf->f_type = DTYPE_PIPE;
331 wf->f_data = (caddr_t)wpipe;
332 wf->f_ops = &pipeops;
333 p->p_retval[1] = fd;
334 rpipe->pipe_peer = wpipe;
335 wpipe->pipe_peer = rpipe;
336 mtx_init(pmtx, "pipe mutex", MTX_DEF);
337 rpipe->pipe_mtxp = wpipe->pipe_mtxp = pmtx;
338 fdrop(rf, td);
339 #endif /* FreeBSD */
340
341 #ifdef __NetBSD__
342 rpipe = wpipe = NULL;
343 if (pipe_create(&rpipe, 1) || pipe_create(&wpipe, 0)) {
344 pipeclose(rpipe);
345 pipeclose(wpipe);
346 return (ENFILE);
347 }
348
349 /*
350 * Note: the file structure returned from falloc() is marked
351 * as 'larval' initially. Unless we mark it as 'mature' by
352 * FILE_SET_MATURE(), any attempt to do anything with it would
353 * return EBADF, including e.g. dup(2) or close(2). This avoids
354 * file descriptor races if we block in the second falloc().
355 */
356
357 error = falloc(p, &rf, &fd);
358 if (error)
359 goto free2;
360 retval[0] = fd;
361 rf->f_flag = FREAD;
362 rf->f_type = DTYPE_PIPE;
363 rf->f_data = (caddr_t)rpipe;
364 rf->f_ops = &pipeops;
365
366 error = falloc(p, &wf, &fd);
367 if (error)
368 goto free3;
369 retval[1] = fd;
370 wf->f_flag = FWRITE;
371 wf->f_type = DTYPE_PIPE;
372 wf->f_data = (caddr_t)wpipe;
373 wf->f_ops = &pipeops;
374
375 rpipe->pipe_peer = wpipe;
376 wpipe->pipe_peer = rpipe;
377
378 FILE_SET_MATURE(rf);
379 FILE_SET_MATURE(wf);
380 FILE_UNUSE(rf, p);
381 FILE_UNUSE(wf, p);
382 return (0);
383 free3:
384 FILE_UNUSE(rf, p);
385 ffree(rf);
386 fdremove(p->p_fd, retval[0]);
387 free2:
388 pipeclose(wpipe);
389 pipeclose(rpipe);
390 #endif /* NetBSD */
391
392 return (error);
393 }
394
395 /*
396 * Allocate kva for pipe circular buffer, the space is pageable
397 * This routine will 'realloc' the size of a pipe safely, if it fails
398 * it will retain the old buffer.
399 * If it fails it will return ENOMEM.
400 */
401 static int
402 pipespace(cpipe, size)
403 struct pipe *cpipe;
404 int size;
405 {
406 caddr_t buffer;
407 #ifdef __FreeBSD__
408 struct vm_object *object;
409 int npages, error;
410
411 GIANT_REQUIRED;
412 KASSERT(cpipe->pipe_mtxp == NULL || !mtx_owned(PIPE_MTX(cpipe)),
413 ("pipespace: pipe mutex locked"));
414
415 npages = round_page(size)/PAGE_SIZE;
416 /*
417 * Create an object, I don't like the idea of paging to/from
418 * kernel_object.
419 */
420 object = vm_object_allocate(OBJT_DEFAULT, npages);
421 buffer = (caddr_t) vm_map_min(kernel_map);
422
423 /*
424 * Insert the object into the kernel map, and allocate kva for it.
425 * The map entry is, by default, pageable.
426 */
427 error = vm_map_find(kernel_map, object, 0,
428 (vm_offset_t *) &buffer, size, 1,
429 VM_PROT_ALL, VM_PROT_ALL, 0);
430
431 if (error != KERN_SUCCESS) {
432 vm_object_deallocate(object);
433 return (ENOMEM);
434 }
435 #endif /* FreeBSD */
436
437 #ifdef __NetBSD__
438 /*
439 * Allocate pageable virtual address space. Physical memory is allocated
440 * on demand.
441 */
442 buffer = (caddr_t) uvm_km_valloc(kernel_map, round_page(size));
443 if (buffer == NULL)
444 return (ENOMEM);
445 #endif /* NetBSD */
446
447 /* free old resources if we're resizing */
448 pipe_free_kmem(cpipe);
449 #ifdef __FreeBSD__
450 cpipe->pipe_buffer.object = object;
451 #endif
452 cpipe->pipe_buffer.buffer = buffer;
453 cpipe->pipe_buffer.size = size;
454 cpipe->pipe_buffer.in = 0;
455 cpipe->pipe_buffer.out = 0;
456 cpipe->pipe_buffer.cnt = 0;
457 amountpipekva += cpipe->pipe_buffer.size;
458 return (0);
459 }
460
461 /*
462 * initialize and allocate VM and memory for pipe
463 */
464 static int
465 pipe_create(cpipep, allockva)
466 struct pipe **cpipep;
467 int allockva;
468 {
469 struct pipe *cpipe;
470 int error;
471
472 #ifdef __FreeBSD__
473 *cpipep = zalloc(pipe_zone);
474 #endif
475 #ifdef __NetBSD__
476 *cpipep = pool_get(&pipe_pool, M_WAITOK);
477 #endif
478 if (*cpipep == NULL)
479 return (ENOMEM);
480
481 cpipe = *cpipep;
482
483 /* Initialize */
484 memset(cpipe, 0, sizeof(*cpipe));
485 cpipe->pipe_state = PIPE_SIGNALR;
486
487 #ifdef __FreeBSD__
488 cpipe->pipe_mtxp = NULL; /* avoid pipespace assertion */
489 #endif
490 if (allockva && (error = pipespace(cpipe, PIPE_SIZE)))
491 return (error);
492
493 vfs_timestamp(&cpipe->pipe_ctime);
494 cpipe->pipe_atime = cpipe->pipe_ctime;
495 cpipe->pipe_mtime = cpipe->pipe_ctime;
496 #ifdef __NetBSD__
497 cpipe->pipe_pgid = NO_PID;
498 lockinit(&cpipe->pipe_lock, PRIBIO | PCATCH, "pipelk", 0, 0);
499 #endif
500
501 return (0);
502 }
503
504
505 /*
506 * lock a pipe for I/O, blocking other access
507 */
508 static __inline int
509 pipelock(cpipe, catch)
510 struct pipe *cpipe;
511 int catch;
512 {
513 int error;
514
515 #ifdef __FreeBSD__
516 PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
517 while (cpipe->pipe_state & PIPE_LOCKFL) {
518 cpipe->pipe_state |= PIPE_LWANT;
519 error = msleep(cpipe, PIPE_MTX(cpipe),
520 catch ? (PRIBIO | PCATCH) : PRIBIO,
521 "pipelk", 0);
522 if (error != 0)
523 return (error);
524 }
525 cpipe->pipe_state |= PIPE_LOCKFL;
526 return (0);
527 #endif
528
529 #ifdef __NetBSD__
530 do {
531 error = lockmgr(&cpipe->pipe_lock, LK_EXCLUSIVE, NULL);
532 } while (!catch && (error == EINTR || error == ERESTART));
533 return (error);
534 #endif
535 }
536
537 /*
538 * unlock a pipe I/O lock
539 */
540 static __inline void
541 pipeunlock(cpipe)
542 struct pipe *cpipe;
543 {
544
545 #ifdef __FreeBSD__
546 PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
547 cpipe->pipe_state &= ~PIPE_LOCKFL;
548 if (cpipe->pipe_state & PIPE_LWANT) {
549 cpipe->pipe_state &= ~PIPE_LWANT;
550 wakeup(cpipe);
551 }
552 #endif
553
554 #ifdef __NetBSD__
555 lockmgr(&cpipe->pipe_lock, LK_RELEASE, NULL);
556 #endif
557 }
558
559 /*
560 * Select/poll wakup. This also sends SIGIO to peer connected to
561 * 'sigpipe' side of pipe.
562 */
563 static __inline void
564 pipeselwakeup(selp, sigp)
565 struct pipe *selp, *sigp;
566 {
567
568 #ifdef __FreeBSD__
569 if (selp->pipe_state & PIPE_SEL) {
570 selp->pipe_state &= ~PIPE_SEL;
571 selwakeup(&selp->pipe_sel);
572 }
573 if (sigp && (sigp->pipe_state & PIPE_ASYNC) && sigp->pipe_sigio)
574 pgsigio(sigp->pipe_sigio, SIGIO, 0);
575 KNOTE(&selp->pipe_sel.si_note, 0);
576 #endif
577
578 #ifdef __NetBSD__
579 selnotify(&selp->pipe_sel, 0);
580 if (sigp && (sigp->pipe_state & PIPE_ASYNC) &&
581 sigp->pipe_pgid != NO_PID) {
582 struct proc *p;
583
584 if (sigp->pipe_pgid < 0)
585 gsignal(-sigp->pipe_pgid, SIGIO);
586 else if (sigp->pipe_pgid > 0 &&
587 (p = pfind(sigp->pipe_pgid)) != NULL)
588 psignal(p, SIGIO);
589 }
590 #endif /* NetBSD */
591 }
592
593 /* ARGSUSED */
594 #ifdef __FreeBSD__
595 static int
596 pipe_read(fp, uio, cred, flags, td)
597 struct file *fp;
598 struct uio *uio;
599 struct ucred *cred;
600 struct thread *td;
601 int flags;
602 struct proc *p;
603 #elif defined(__NetBSD__)
604 static int
605 pipe_read(fp, offset, uio, cred, flags)
606 struct file *fp;
607 off_t *offset;
608 struct uio *uio;
609 struct ucred *cred;
610 int flags;
611 #endif
612 {
613 struct pipe *rpipe = (struct pipe *) fp->f_data;
614 int error;
615 size_t nread = 0;
616 size_t size;
617 size_t ocnt;
618
619 PIPE_LOCK(rpipe);
620 ++rpipe->pipe_busy;
621 error = pipelock(rpipe, 1);
622 if (error)
623 goto unlocked_error;
624
625 ocnt = rpipe->pipe_buffer.cnt;
626
627 while (uio->uio_resid) {
628 /*
629 * normal pipe buffer receive
630 */
631 if (rpipe->pipe_buffer.cnt > 0) {
632 size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out;
633 if (size > rpipe->pipe_buffer.cnt)
634 size = rpipe->pipe_buffer.cnt;
635 if (size > uio->uio_resid)
636 size = uio->uio_resid;
637
638 PIPE_UNLOCK(rpipe);
639 error = uiomove(&rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out],
640 size, uio);
641 PIPE_LOCK(rpipe);
642 if (error)
643 break;
644
645 rpipe->pipe_buffer.out += size;
646 if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size)
647 rpipe->pipe_buffer.out = 0;
648
649 rpipe->pipe_buffer.cnt -= size;
650
651 /*
652 * If there is no more to read in the pipe, reset
653 * its pointers to the beginning. This improves
654 * cache hit stats.
655 */
656 if (rpipe->pipe_buffer.cnt == 0) {
657 rpipe->pipe_buffer.in = 0;
658 rpipe->pipe_buffer.out = 0;
659 }
660 nread += size;
661 #ifndef PIPE_NODIRECT
662 /*
663 * Direct copy, bypassing a kernel buffer.
664 */
665 } else if ((size = rpipe->pipe_map.cnt) &&
666 (rpipe->pipe_state & PIPE_DIRECTW)) {
667 caddr_t va;
668 if (size > uio->uio_resid)
669 size = uio->uio_resid;
670
671 va = (caddr_t) rpipe->pipe_map.kva +
672 rpipe->pipe_map.pos;
673 PIPE_UNLOCK(rpipe);
674 error = uiomove(va, size, uio);
675 PIPE_LOCK(rpipe);
676 if (error)
677 break;
678 nread += size;
679 rpipe->pipe_map.pos += size;
680 rpipe->pipe_map.cnt -= size;
681 if (rpipe->pipe_map.cnt == 0) {
682 rpipe->pipe_state &= ~PIPE_DIRECTW;
683 wakeup(rpipe);
684 }
685 #endif
686 } else {
687 /*
688 * detect EOF condition
689 * read returns 0 on EOF, no need to set error
690 */
691 if (rpipe->pipe_state & PIPE_EOF)
692 break;
693
694 /*
695 * If the "write-side" has been blocked, wake it up now.
696 */
697 if (rpipe->pipe_state & PIPE_WANTW) {
698 rpipe->pipe_state &= ~PIPE_WANTW;
699 wakeup(rpipe);
700 }
701
702 /*
703 * Break if some data was read.
704 */
705 if (nread > 0)
706 break;
707
708 /*
709 * don't block on non-blocking I/O
710 */
711 if (fp->f_flag & FNONBLOCK) {
712 error = EAGAIN;
713 break;
714 }
715
716 /*
717 * Unlock the pipe buffer for our remaining processing.
718 * We will either break out with an error or we will
719 * sleep and relock to loop.
720 */
721 pipeunlock(rpipe);
722
723 /*
724 * We want to read more, wake up select/poll.
725 */
726 pipeselwakeup(rpipe, rpipe->pipe_peer);
727
728 rpipe->pipe_state |= PIPE_WANTR;
729 #ifdef __FreeBSD__
730 error = msleep(rpipe, PIPE_MTX(rpipe), PRIBIO | PCATCH,
731 "piperd", 0);
732 #else
733 error = tsleep(rpipe, PRIBIO | PCATCH, "piperd", 0);
734 #endif
735 if (error != 0 || (error = pipelock(rpipe, 1)))
736 goto unlocked_error;
737 }
738 }
739 pipeunlock(rpipe);
740
741 /* XXX: should probably do this before getting any locks. */
742 if (error == 0)
743 vfs_timestamp(&rpipe->pipe_atime);
744 unlocked_error:
745 --rpipe->pipe_busy;
746
747 /*
748 * PIPE_WANTCLOSE processing only makes sense if pipe_busy is 0.
749 */
750 if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANTCLOSE)) {
751 rpipe->pipe_state &= ~(PIPE_WANTCLOSE|PIPE_WANTW);
752 wakeup(rpipe);
753 } else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) {
754 /*
755 * Handle write blocking hysteresis.
756 */
757 if (rpipe->pipe_state & PIPE_WANTW) {
758 rpipe->pipe_state &= ~PIPE_WANTW;
759 wakeup(rpipe);
760 }
761 }
762
763 /*
764 * If anything was read off the buffer, signal to the writer it's
765 * possible to write more data. Also send signal if we are here for the
766 * first time after last write.
767 */
768 if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF
769 && (ocnt != rpipe->pipe_buffer.cnt || (rpipe->pipe_state & PIPE_SIGNALR))) {
770 pipeselwakeup(rpipe, rpipe->pipe_peer);
771 rpipe->pipe_state &= ~PIPE_SIGNALR;
772 }
773
774 PIPE_UNLOCK(rpipe);
775 return (error);
776 }
777
778 #ifdef __FreeBSD__
779 #ifndef PIPE_NODIRECT
780 /*
781 * Map the sending processes' buffer into kernel space and wire it.
782 * This is similar to a physical write operation.
783 */
784 static int
785 pipe_build_write_buffer(wpipe, uio)
786 struct pipe *wpipe;
787 struct uio *uio;
788 {
789 size_t size;
790 int i;
791 vm_offset_t addr, endaddr, paddr;
792
793 GIANT_REQUIRED;
794 PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED);
795
796 size = uio->uio_iov->iov_len;
797 if (size > wpipe->pipe_buffer.size)
798 size = wpipe->pipe_buffer.size;
799
800 endaddr = round_page((vm_offset_t)uio->uio_iov->iov_base + size);
801 addr = trunc_page((vm_offset_t)uio->uio_iov->iov_base);
802 for (i = 0; addr < endaddr; addr += PAGE_SIZE, i++) {
803 vm_page_t m;
804
805 if (vm_fault_quick((caddr_t)addr, VM_PROT_READ) < 0 ||
806 (paddr = pmap_kextract(addr)) == 0) {
807 int j;
808
809 for (j = 0; j < i; j++)
810 vm_page_unwire(wpipe->pipe_map.ms[j], 1);
811 return (EFAULT);
812 }
813
814 m = PHYS_TO_VM_PAGE(paddr);
815 vm_page_wire(m);
816 wpipe->pipe_map.ms[i] = m;
817 }
818
819 /*
820 * set up the control block
821 */
822 wpipe->pipe_map.npages = i;
823 wpipe->pipe_map.pos =
824 ((vm_offset_t) uio->uio_iov->iov_base) & PAGE_MASK;
825 wpipe->pipe_map.cnt = size;
826
827 /*
828 * and map the buffer
829 */
830 if (wpipe->pipe_map.kva == 0) {
831 /*
832 * We need to allocate space for an extra page because the
833 * address range might (will) span pages at times.
834 */
835 wpipe->pipe_map.kva = kmem_alloc_pageable(kernel_map,
836 wpipe->pipe_buffer.size + PAGE_SIZE);
837 amountpipekva += wpipe->pipe_buffer.size + PAGE_SIZE;
838 }
839 pmap_qenter(wpipe->pipe_map.kva, wpipe->pipe_map.ms,
840 wpipe->pipe_map.npages);
841
842 /*
843 * and update the uio data
844 */
845
846 uio->uio_iov->iov_len -= size;
847 uio->uio_iov->iov_base += size;
848 if (uio->uio_iov->iov_len == 0)
849 uio->uio_iov++;
850 uio->uio_resid -= size;
851 uio->uio_offset += size;
852 return (0);
853 }
854
855 /*
856 * unmap and unwire the process buffer
857 */
858 static void
859 pipe_destroy_write_buffer(wpipe)
860 struct pipe *wpipe;
861 {
862 int i;
863
864 GIANT_REQUIRED;
865 PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED);
866
867 if (wpipe->pipe_map.kva) {
868 pmap_qremove(wpipe->pipe_map.kva, wpipe->pipe_map.npages);
869
870 if (amountpipekva > maxpipekva) {
871 vm_offset_t kva = wpipe->pipe_map.kva;
872 wpipe->pipe_map.kva = 0;
873 kmem_free(kernel_map, kva,
874 wpipe->pipe_buffer.size + PAGE_SIZE);
875 amountpipekva -= wpipe->pipe_buffer.size + PAGE_SIZE;
876 }
877 }
878 for (i = 0; i < wpipe->pipe_map.npages; i++)
879 vm_page_unwire(wpipe->pipe_map.ms[i], 1);
880 wpipe->pipe_map.npages = 0;
881 }
882
883 /*
884 * In the case of a signal, the writing process might go away. This
885 * code copies the data into the circular buffer so that the source
886 * pages can be freed without loss of data.
887 */
888 static void
889 pipe_clone_write_buffer(wpipe)
890 struct pipe *wpipe;
891 {
892 int size;
893 int pos;
894
895 PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
896 size = wpipe->pipe_map.cnt;
897 pos = wpipe->pipe_map.pos;
898 memcpy((caddr_t) wpipe->pipe_buffer.buffer,
899 (caddr_t) wpipe->pipe_map.kva + pos, size);
900
901 wpipe->pipe_buffer.in = size;
902 wpipe->pipe_buffer.out = 0;
903 wpipe->pipe_buffer.cnt = size;
904 wpipe->pipe_state &= ~PIPE_DIRECTW;
905
906 PIPE_GET_GIANT(wpipe);
907 pipe_destroy_write_buffer(wpipe);
908 PIPE_DROP_GIANT(wpipe);
909 }
910
911 /*
912 * This implements the pipe buffer write mechanism. Note that only
913 * a direct write OR a normal pipe write can be pending at any given time.
914 * If there are any characters in the pipe buffer, the direct write will
915 * be deferred until the receiving process grabs all of the bytes from
916 * the pipe buffer. Then the direct mapping write is set-up.
917 */
918 static int
919 pipe_direct_write(wpipe, uio)
920 struct pipe *wpipe;
921 struct uio *uio;
922 {
923 int error;
924
925 retry:
926 PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
927 while (wpipe->pipe_state & PIPE_DIRECTW) {
928 if (wpipe->pipe_state & PIPE_WANTR) {
929 wpipe->pipe_state &= ~PIPE_WANTR;
930 wakeup(wpipe);
931 }
932 wpipe->pipe_state |= PIPE_WANTW;
933 error = msleep(wpipe, PIPE_MTX(wpipe),
934 PRIBIO | PCATCH, "pipdww", 0);
935 if (error)
936 goto error1;
937 if (wpipe->pipe_state & PIPE_EOF) {
938 error = EPIPE;
939 goto error1;
940 }
941 }
942 wpipe->pipe_map.cnt = 0; /* transfer not ready yet */
943 if (wpipe->pipe_buffer.cnt > 0) {
944 if (wpipe->pipe_state & PIPE_WANTR) {
945 wpipe->pipe_state &= ~PIPE_WANTR;
946 wakeup(wpipe);
947 }
948
949 wpipe->pipe_state |= PIPE_WANTW;
950 error = msleep(wpipe, PIPE_MTX(wpipe),
951 PRIBIO | PCATCH, "pipdwc", 0);
952 if (error)
953 goto error1;
954 if (wpipe->pipe_state & PIPE_EOF) {
955 error = EPIPE;
956 goto error1;
957 }
958 goto retry;
959 }
960
961 wpipe->pipe_state |= PIPE_DIRECTW;
962
963 PIPE_GET_GIANT(wpipe);
964 error = pipe_build_write_buffer(wpipe, uio);
965 PIPE_DROP_GIANT(wpipe);
966 if (error) {
967 wpipe->pipe_state &= ~PIPE_DIRECTW;
968 goto error1;
969 }
970
971 error = 0;
972 while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) {
973 if (wpipe->pipe_state & PIPE_EOF) {
974 pipelock(wpipe, 0);
975 PIPE_GET_GIANT(wpipe);
976 pipe_destroy_write_buffer(wpipe);
977 PIPE_DROP_GIANT(wpipe);
978 pipeunlock(wpipe);
979 pipeselwakeup(wpipe, wpipe);
980 error = EPIPE;
981 goto error1;
982 }
983 if (wpipe->pipe_state & PIPE_WANTR) {
984 wpipe->pipe_state &= ~PIPE_WANTR;
985 wakeup(wpipe);
986 }
987 pipeselwakeup(wpipe, wpipe);
988 error = msleep(wpipe, PIPE_MTX(wpipe), PRIBIO | PCATCH,
989 "pipdwt", 0);
990 }
991
992 pipelock(wpipe,0);
993 if (wpipe->pipe_state & PIPE_DIRECTW) {
994 /*
995 * this bit of trickery substitutes a kernel buffer for
996 * the process that might be going away.
997 */
998 pipe_clone_write_buffer(wpipe);
999 } else {
1000 PIPE_GET_GIANT(wpipe);
1001 pipe_destroy_write_buffer(wpipe);
1002 PIPE_DROP_GIANT(wpipe);
1003 }
1004 pipeunlock(wpipe);
1005 return (error);
1006
1007 error1:
1008 wakeup(wpipe);
1009 return (error);
1010 }
1011 #endif /* !PIPE_NODIRECT */
1012 #endif /* FreeBSD */
1013
1014 #ifdef __NetBSD__
1015 #ifndef PIPE_NODIRECT
1016 /*
1017 * Allocate structure for loan transfer.
1018 */
1019 static int
1020 pipe_loan_alloc(wpipe, npages)
1021 struct pipe *wpipe;
1022 int npages;
1023 {
1024 vsize_t len;
1025
1026 len = (vsize_t)npages << PAGE_SHIFT;
1027 wpipe->pipe_map.kva = uvm_km_valloc_wait(kernel_map, len);
1028 if (wpipe->pipe_map.kva == 0)
1029 return (ENOMEM);
1030
1031 amountpipekva += len;
1032 wpipe->pipe_map.npages = npages;
1033 wpipe->pipe_map.pgs = malloc(npages * sizeof(struct vm_page *), M_PIPE,
1034 M_WAITOK);
1035 return (0);
1036 }
1037
1038 /*
1039 * Free resources allocated for loan transfer.
1040 */
1041 static void
1042 pipe_loan_free(wpipe)
1043 struct pipe *wpipe;
1044 {
1045 vsize_t len;
1046
1047 len = (vsize_t)wpipe->pipe_map.npages << PAGE_SHIFT;
1048 uvm_km_free(kernel_map, wpipe->pipe_map.kva, len);
1049 wpipe->pipe_map.kva = 0;
1050 amountpipekva -= len;
1051 free(wpipe->pipe_map.pgs, M_PIPE);
1052 wpipe->pipe_map.pgs = NULL;
1053 }
1054
1055 /*
1056 * NetBSD direct write, using uvm_loan() mechanism.
1057 * This implements the pipe buffer write mechanism. Note that only
1058 * a direct write OR a normal pipe write can be pending at any given time.
1059 * If there are any characters in the pipe buffer, the direct write will
1060 * be deferred until the receiving process grabs all of the bytes from
1061 * the pipe buffer. Then the direct mapping write is set-up.
1062 */
1063 static int
1064 pipe_direct_write(wpipe, uio)
1065 struct pipe *wpipe;
1066 struct uio *uio;
1067 {
1068 int error, npages, j;
1069 struct vm_page **pgs;
1070 vaddr_t bbase, kva, base, bend;
1071 vsize_t blen, bcnt;
1072 voff_t bpos;
1073
1074 retry:
1075 while (wpipe->pipe_state & PIPE_DIRECTW) {
1076 if (wpipe->pipe_state & PIPE_WANTR) {
1077 wpipe->pipe_state &= ~PIPE_WANTR;
1078 wakeup(wpipe);
1079 }
1080 wpipe->pipe_state |= PIPE_WANTW;
1081 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdww", 0);
1082 if (error)
1083 goto error;
1084 if (wpipe->pipe_state & PIPE_EOF) {
1085 error = EPIPE;
1086 goto error;
1087 }
1088 }
1089 wpipe->pipe_map.cnt = 0; /* transfer not ready yet */
1090 if (wpipe->pipe_buffer.cnt > 0) {
1091 if (wpipe->pipe_state & PIPE_WANTR) {
1092 wpipe->pipe_state &= ~PIPE_WANTR;
1093 wakeup(wpipe);
1094 }
1095
1096 wpipe->pipe_state |= PIPE_WANTW;
1097 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdwc", 0);
1098 if (error)
1099 goto error;
1100 if (wpipe->pipe_state & PIPE_EOF) {
1101 error = EPIPE;
1102 goto error;
1103 }
1104 goto retry;
1105 }
1106
1107 /*
1108 * Handle first PIPE_CHUNK_SIZE bytes of buffer. Deal with buffers
1109 * not aligned to PAGE_SIZE.
1110 */
1111 bbase = (vaddr_t)uio->uio_iov->iov_base;
1112 base = trunc_page(bbase);
1113 bend = round_page(bbase + uio->uio_iov->iov_len);
1114 blen = bend - base;
1115 bpos = bbase - base;
1116
1117 if (blen > PIPE_DIRECT_CHUNK) {
1118 blen = PIPE_DIRECT_CHUNK;
1119 bend = base + blen;
1120 bcnt = PIPE_DIRECT_CHUNK - bpos;
1121 } else {
1122 bcnt = uio->uio_iov->iov_len;
1123 }
1124 npages = blen >> PAGE_SHIFT;
1125
1126 wpipe->pipe_map.pos = bpos;
1127 wpipe->pipe_map.cnt = bcnt;
1128
1129 /*
1130 * Free the old kva if we need more pages than we have
1131 * allocated.
1132 */
1133 if (wpipe->pipe_map.kva && npages > wpipe->pipe_map.npages)
1134 pipe_loan_free(wpipe);
1135
1136 /* Allocate new kva. */
1137 if (wpipe->pipe_map.kva == 0) {
1138 error = pipe_loan_alloc(wpipe, npages);
1139 if (error) {
1140 goto error;
1141 }
1142 }
1143
1144 /* Loan the write buffer memory from writer process */
1145 pgs = wpipe->pipe_map.pgs;
1146 error = uvm_loan(&uio->uio_procp->p_vmspace->vm_map, base, blen,
1147 pgs, UVM_LOAN_TOPAGE);
1148 if (error) {
1149 pgs = NULL;
1150 goto cleanup;
1151 }
1152
1153 /* Enter the loaned pages to kva */
1154 kva = wpipe->pipe_map.kva;
1155 for (j = 0; j < npages; j++, kva += PAGE_SIZE) {
1156 pmap_kenter_pa(kva, VM_PAGE_TO_PHYS(pgs[j]), VM_PROT_READ);
1157 }
1158 pmap_update(pmap_kernel());
1159
1160 wpipe->pipe_state |= PIPE_DIRECTW;
1161 while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) {
1162 if (wpipe->pipe_state & PIPE_EOF) {
1163 error = EPIPE;
1164 break;
1165 }
1166 if (wpipe->pipe_state & PIPE_WANTR) {
1167 wpipe->pipe_state &= ~PIPE_WANTR;
1168 wakeup(wpipe);
1169 }
1170 pipeselwakeup(wpipe, wpipe);
1171 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdwt", 0);
1172 }
1173
1174 if (error)
1175 wpipe->pipe_state &= ~PIPE_DIRECTW;
1176
1177 cleanup:
1178 pipelock(wpipe, 0);
1179 if (pgs != NULL) {
1180 pmap_kremove(wpipe->pipe_map.kva, blen);
1181 uvm_unloan(pgs, npages, UVM_LOAN_TOPAGE);
1182 }
1183 if (error || amountpipekva > maxpipekva)
1184 pipe_loan_free(wpipe);
1185 pipeunlock(wpipe);
1186
1187 if (error) {
1188 pipeselwakeup(wpipe, wpipe);
1189
1190 /*
1191 * If nothing was read from what we offered, return error
1192 * straight on. Otherwise update uio resid first. Caller
1193 * will deal with the error condition, returning short
1194 * write, error, or restarting the write(2) as appropriate.
1195 */
1196 if (wpipe->pipe_map.cnt == bcnt) {
1197 error:
1198 wakeup(wpipe);
1199 return (error);
1200 }
1201
1202 bcnt -= wpipe->pipe_map.cnt;
1203 }
1204
1205 uio->uio_resid -= bcnt;
1206 /* uio_offset not updated, not set/used for write(2) */
1207 uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + bcnt;
1208 uio->uio_iov->iov_len -= bcnt;
1209 if (uio->uio_iov->iov_len == 0) {
1210 uio->uio_iov++;
1211 uio->uio_iovcnt--;
1212 }
1213
1214 return (error);
1215 }
1216 #endif /* !PIPE_NODIRECT */
1217 #endif /* NetBSD */
1218
1219 #ifdef __FreeBSD__
1220 static int
1221 pipe_write(fp, uio, cred, flags, td)
1222 struct file *fp;
1223 off_t *offset;
1224 struct uio *uio;
1225 struct ucred *cred;
1226 int flags;
1227 struct thread *td;
1228 #elif defined(__NetBSD__)
1229 static int
1230 pipe_write(fp, offset, uio, cred, flags)
1231 struct file *fp;
1232 off_t *offset;
1233 struct uio *uio;
1234 struct ucred *cred;
1235 int flags;
1236 #endif
1237 {
1238 int error = 0;
1239 struct pipe *wpipe, *rpipe;
1240
1241 rpipe = (struct pipe *) fp->f_data;
1242 wpipe = rpipe->pipe_peer;
1243
1244 PIPE_LOCK(rpipe);
1245 /*
1246 * detect loss of pipe read side, issue SIGPIPE if lost.
1247 */
1248 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1249 PIPE_UNLOCK(rpipe);
1250 return (EPIPE);
1251 }
1252
1253 ++wpipe->pipe_busy;
1254
1255 /*
1256 * If it is advantageous to resize the pipe buffer, do
1257 * so.
1258 */
1259 if ((uio->uio_resid > PIPE_SIZE) &&
1260 (nbigpipe < maxbigpipes) &&
1261 #ifndef PIPE_NODIRECT
1262 (wpipe->pipe_state & PIPE_DIRECTW) == 0 &&
1263 #endif
1264 (wpipe->pipe_buffer.size <= PIPE_SIZE) &&
1265 (wpipe->pipe_buffer.cnt == 0)) {
1266
1267 if ((error = pipelock(wpipe,1)) == 0) {
1268 PIPE_GET_GIANT(rpipe);
1269 if (pipespace(wpipe, BIG_PIPE_SIZE) == 0)
1270 nbigpipe++;
1271 PIPE_DROP_GIANT(rpipe);
1272 pipeunlock(wpipe);
1273 } else {
1274 /*
1275 * If an error occurred, unbusy and return, waking up
1276 * any waiting readers.
1277 */
1278 --wpipe->pipe_busy;
1279 if (wpipe->pipe_busy == 0
1280 && (wpipe->pipe_state & PIPE_WANTCLOSE)) {
1281 wpipe->pipe_state &=
1282 ~(PIPE_WANTCLOSE | PIPE_WANTR);
1283 wakeup(wpipe);
1284 }
1285
1286 return (error);
1287 }
1288 }
1289
1290 #ifdef __FreeBSD__
1291 /*
1292 * If an early error occured unbusy and return, waking up any pending
1293 * readers.
1294 */
1295 if (error) {
1296 --wpipe->pipe_busy;
1297 if ((wpipe->pipe_busy == 0) &&
1298 (wpipe->pipe_state & PIPE_WANT)) {
1299 wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR);
1300 wakeup(wpipe);
1301 }
1302 PIPE_UNLOCK(rpipe);
1303 return(error);
1304 }
1305
1306 KASSERT(wpipe->pipe_buffer.buffer != NULL, ("pipe buffer gone"));
1307 #endif
1308
1309 while (uio->uio_resid) {
1310 int space;
1311
1312 #ifndef PIPE_NODIRECT
1313 /*
1314 * If the transfer is large, we can gain performance if
1315 * we do process-to-process copies directly.
1316 * If the write is non-blocking, we don't use the
1317 * direct write mechanism.
1318 *
1319 * The direct write mechanism will detect the reader going
1320 * away on us.
1321 */
1322 if ((uio->uio_iov->iov_len >= PIPE_MINDIRECT) &&
1323 (fp->f_flag & FNONBLOCK) == 0 &&
1324 (wpipe->pipe_map.kva || (amountpipekva < limitpipekva))) {
1325 error = pipe_direct_write(wpipe, uio);
1326
1327 /*
1328 * Break out if error occured, unless it's ENOMEM.
1329 * ENOMEM means we failed to allocate some resources
1330 * for direct write, so we just fallback to ordinary
1331 * write. If the direct write was successful,
1332 * process rest of data via ordinary write.
1333 */
1334 if (!error)
1335 continue;
1336
1337 if (error != ENOMEM)
1338 break;
1339 }
1340 #endif /* PIPE_NODIRECT */
1341
1342 /*
1343 * Pipe buffered writes cannot be coincidental with
1344 * direct writes. We wait until the currently executing
1345 * direct write is completed before we start filling the
1346 * pipe buffer. We break out if a signal occurs or the
1347 * reader goes away.
1348 */
1349 retrywrite:
1350 while (wpipe->pipe_state & PIPE_DIRECTW) {
1351 if (wpipe->pipe_state & PIPE_WANTR) {
1352 wpipe->pipe_state &= ~PIPE_WANTR;
1353 wakeup(wpipe);
1354 }
1355 #ifdef __FreeBSD__
1356 error = msleep(wpipe, PIPE_MTX(rpipe), PRIBIO | PCATCH,
1357 "pipbww", 0);
1358 #else
1359 error = tsleep(wpipe, PRIBIO | PCATCH, "pipbww", 0);
1360 #endif
1361 if (wpipe->pipe_state & PIPE_EOF)
1362 break;
1363 if (error)
1364 break;
1365 }
1366 if (wpipe->pipe_state & PIPE_EOF) {
1367 error = EPIPE;
1368 break;
1369 }
1370
1371 space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
1372
1373 /* Writes of size <= PIPE_BUF must be atomic. */
1374 if ((space < uio->uio_resid) && (uio->uio_resid <= PIPE_BUF))
1375 space = 0;
1376
1377 if (space > 0) {
1378 int size; /* Transfer size */
1379 int segsize; /* first segment to transfer */
1380
1381 if ((error = pipelock(wpipe,1)) != 0)
1382 break;
1383
1384 /*
1385 * It is possible for a direct write to
1386 * slip in on us... handle it here...
1387 */
1388 if (wpipe->pipe_state & PIPE_DIRECTW) {
1389 pipeunlock(wpipe);
1390 goto retrywrite;
1391 }
1392 /*
1393 * If a process blocked in uiomove, our
1394 * value for space might be bad.
1395 *
1396 * XXX will we be ok if the reader has gone
1397 * away here?
1398 */
1399 if (space > wpipe->pipe_buffer.size -
1400 wpipe->pipe_buffer.cnt) {
1401 pipeunlock(wpipe);
1402 goto retrywrite;
1403 }
1404
1405 /*
1406 * Transfer size is minimum of uio transfer
1407 * and free space in pipe buffer.
1408 */
1409 if (space > uio->uio_resid)
1410 size = uio->uio_resid;
1411 else
1412 size = space;
1413 /*
1414 * First segment to transfer is minimum of
1415 * transfer size and contiguous space in
1416 * pipe buffer. If first segment to transfer
1417 * is less than the transfer size, we've got
1418 * a wraparound in the buffer.
1419 */
1420 segsize = wpipe->pipe_buffer.size -
1421 wpipe->pipe_buffer.in;
1422 if (segsize > size)
1423 segsize = size;
1424
1425 /* Transfer first segment */
1426
1427 PIPE_UNLOCK(rpipe);
1428 error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in],
1429 segsize, uio);
1430 PIPE_LOCK(rpipe);
1431
1432 if (error == 0 && segsize < size) {
1433 /*
1434 * Transfer remaining part now, to
1435 * support atomic writes. Wraparound
1436 * happened.
1437 */
1438 #ifdef DEBUG
1439 if (wpipe->pipe_buffer.in + segsize !=
1440 wpipe->pipe_buffer.size)
1441 panic("Expected pipe buffer wraparound disappeared");
1442 #endif
1443
1444 PIPE_UNLOCK(rpipe);
1445 error = uiomove(&wpipe->pipe_buffer.buffer[0],
1446 size - segsize, uio);
1447 PIPE_LOCK(rpipe);
1448 }
1449 if (error == 0) {
1450 wpipe->pipe_buffer.in += size;
1451 if (wpipe->pipe_buffer.in >=
1452 wpipe->pipe_buffer.size) {
1453 #ifdef DEBUG
1454 if (wpipe->pipe_buffer.in != size - segsize + wpipe->pipe_buffer.size)
1455 panic("Expected wraparound bad");
1456 #endif
1457 wpipe->pipe_buffer.in = size - segsize;
1458 }
1459
1460 wpipe->pipe_buffer.cnt += size;
1461 #ifdef DEBUG
1462 if (wpipe->pipe_buffer.cnt > wpipe->pipe_buffer.size)
1463 panic("Pipe buffer overflow");
1464 #endif
1465 }
1466 pipeunlock(wpipe);
1467 if (error)
1468 break;
1469 } else {
1470 /*
1471 * If the "read-side" has been blocked, wake it up now.
1472 */
1473 if (wpipe->pipe_state & PIPE_WANTR) {
1474 wpipe->pipe_state &= ~PIPE_WANTR;
1475 wakeup(wpipe);
1476 }
1477
1478 /*
1479 * don't block on non-blocking I/O
1480 */
1481 if (fp->f_flag & FNONBLOCK) {
1482 error = EAGAIN;
1483 break;
1484 }
1485
1486 /*
1487 * We have no more space and have something to offer,
1488 * wake up select/poll.
1489 */
1490 pipeselwakeup(wpipe, wpipe);
1491
1492 wpipe->pipe_state |= PIPE_WANTW;
1493 #ifdef __FreeBSD__
1494 error = msleep(wpipe, PIPE_MTX(rpipe),
1495 PRIBIO | PCATCH, "pipewr", 0);
1496 #else
1497 error = tsleep(wpipe, PRIBIO | PCATCH, "pipewr", 0);
1498 #endif
1499 if (error != 0)
1500 break;
1501 /*
1502 * If read side wants to go away, we just issue a signal
1503 * to ourselves.
1504 */
1505 if (wpipe->pipe_state & PIPE_EOF) {
1506 error = EPIPE;
1507 break;
1508 }
1509 }
1510 }
1511
1512 --wpipe->pipe_busy;
1513 if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANTCLOSE)) {
1514 wpipe->pipe_state &= ~(PIPE_WANTCLOSE | PIPE_WANTR);
1515 wakeup(wpipe);
1516 } else if (wpipe->pipe_buffer.cnt > 0) {
1517 /*
1518 * If we have put any characters in the buffer, we wake up
1519 * the reader.
1520 */
1521 if (wpipe->pipe_state & PIPE_WANTR) {
1522 wpipe->pipe_state &= ~PIPE_WANTR;
1523 wakeup(wpipe);
1524 }
1525 }
1526
1527 /*
1528 * Don't return EPIPE if I/O was successful
1529 */
1530 if ((error == EPIPE) && (wpipe->pipe_buffer.cnt == 0)
1531 && (uio->uio_resid == 0))
1532 error = 0;
1533
1534 if (error == 0)
1535 vfs_timestamp(&wpipe->pipe_mtime);
1536
1537 /*
1538 * We have something to offer, wake up select/poll.
1539 * wpipe->pipe_map.cnt is always 0 in this point (direct write
1540 * is only done synchronously), so check only wpipe->pipe_buffer.cnt
1541 */
1542 if (wpipe->pipe_buffer.cnt)
1543 pipeselwakeup(wpipe, wpipe);
1544
1545 /*
1546 * Arrange for next read(2) to do a signal.
1547 */
1548 wpipe->pipe_state |= PIPE_SIGNALR;
1549
1550 PIPE_UNLOCK(rpipe);
1551 return (error);
1552 }
1553
1554 /*
1555 * we implement a very minimal set of ioctls for compatibility with sockets.
1556 */
1557 int
1558 #ifdef __FreeBSD__
1559 pipe_ioctl(fp, cmd, data, td)
1560 struct file *fp;
1561 u_long cmd;
1562 caddr_t data;
1563 struct thread *td;
1564 #else
1565 pipe_ioctl(fp, cmd, data, p)
1566 struct file *fp;
1567 u_long cmd;
1568 caddr_t data;
1569 struct proc *p;
1570 #endif
1571 {
1572 struct pipe *mpipe = (struct pipe *)fp->f_data;
1573
1574 switch (cmd) {
1575
1576 case FIONBIO:
1577 return (0);
1578
1579 case FIOASYNC:
1580 PIPE_LOCK(mpipe);
1581 if (*(int *)data) {
1582 mpipe->pipe_state |= PIPE_ASYNC;
1583 } else {
1584 mpipe->pipe_state &= ~PIPE_ASYNC;
1585 }
1586 PIPE_UNLOCK(mpipe);
1587 return (0);
1588
1589 case FIONREAD:
1590 PIPE_LOCK(mpipe);
1591 #ifndef PIPE_NODIRECT
1592 if (mpipe->pipe_state & PIPE_DIRECTW)
1593 *(int *)data = mpipe->pipe_map.cnt;
1594 else
1595 #endif
1596 *(int *)data = mpipe->pipe_buffer.cnt;
1597 PIPE_UNLOCK(mpipe);
1598 return (0);
1599
1600 #ifdef __FreeBSD__
1601 case FIOSETOWN:
1602 return (fsetown(*(int *)data, &mpipe->pipe_sigio));
1603
1604 case FIOGETOWN:
1605 *(int *)data = fgetown(mpipe->pipe_sigio);
1606 return (0);
1607
1608 /* This is deprecated, FIOSETOWN should be used instead. */
1609 case TIOCSPGRP:
1610 return (fsetown(-(*(int *)data), &mpipe->pipe_sigio));
1611
1612 /* This is deprecated, FIOGETOWN should be used instead. */
1613 case TIOCGPGRP:
1614 *(int *)data = -fgetown(mpipe->pipe_sigio);
1615 return (0);
1616 #endif /* FreeBSD */
1617 #ifdef __NetBSD__
1618 case TIOCSPGRP:
1619 mpipe->pipe_pgid = *(int *)data;
1620 return (0);
1621
1622 case TIOCGPGRP:
1623 *(int *)data = mpipe->pipe_pgid;
1624 return (0);
1625 #endif /* NetBSD */
1626
1627 }
1628 return (EPASSTHROUGH);
1629 }
1630
1631 int
1632 #ifdef __FreeBSD__
1633 pipe_poll(fp, events, cred, td)
1634 struct file *fp;
1635 int events;
1636 struct ucred *cred;
1637 struct thread *td;
1638 #elif defined(__NetBSD__)
1639 pipe_poll(fp, events, td)
1640 struct file *fp;
1641 int events;
1642 struct proc *td;
1643 #endif
1644 {
1645 struct pipe *rpipe = (struct pipe *)fp->f_data;
1646 struct pipe *wpipe;
1647 int revents = 0;
1648
1649 wpipe = rpipe->pipe_peer;
1650 PIPE_LOCK(rpipe);
1651 if (events & (POLLIN | POLLRDNORM))
1652 if ((rpipe->pipe_buffer.cnt > 0) ||
1653 #ifndef PIPE_NODIRECT
1654 (rpipe->pipe_state & PIPE_DIRECTW) ||
1655 #endif
1656 (rpipe->pipe_state & PIPE_EOF))
1657 revents |= events & (POLLIN | POLLRDNORM);
1658
1659 if (events & (POLLOUT | POLLWRNORM))
1660 if (wpipe == NULL || (wpipe->pipe_state & PIPE_EOF)
1661 || (
1662 #ifndef PIPE_NODIRECT
1663 ((wpipe->pipe_state & PIPE_DIRECTW) == 0) &&
1664 #endif
1665 (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF))
1666 revents |= events & (POLLOUT | POLLWRNORM);
1667
1668 if ((rpipe->pipe_state & PIPE_EOF) ||
1669 (wpipe == NULL) ||
1670 (wpipe->pipe_state & PIPE_EOF))
1671 revents |= POLLHUP;
1672
1673 if (revents == 0) {
1674 if (events & (POLLIN | POLLRDNORM)) {
1675 selrecord(td, &rpipe->pipe_sel);
1676 rpipe->pipe_state |= PIPE_SEL;
1677 }
1678
1679 if (events & (POLLOUT | POLLWRNORM)) {
1680 selrecord(td, &wpipe->pipe_sel);
1681 wpipe->pipe_state |= PIPE_SEL;
1682 }
1683 }
1684 PIPE_UNLOCK(rpipe);
1685
1686 return (revents);
1687 }
1688
1689 static int
1690 #ifdef __FreeBSD__
1691 pipe_stat(fp, ub, td)
1692 struct file *fp;
1693 struct stat *ub;
1694 struct thread *td;
1695 #else
1696 pipe_stat(fp, ub, td)
1697 struct file *fp;
1698 struct stat *ub;
1699 struct proc *td;
1700 #endif
1701 {
1702 struct pipe *pipe = (struct pipe *)fp->f_data;
1703
1704 memset((caddr_t)ub, 0, sizeof(*ub));
1705 ub->st_mode = S_IFIFO;
1706 ub->st_blksize = pipe->pipe_buffer.size;
1707 ub->st_size = pipe->pipe_buffer.cnt;
1708 ub->st_blocks = (ub->st_size) ? 1 : 0;
1709 #ifdef __FreeBSD__
1710 ub->st_atimespec = pipe->pipe_atime;
1711 ub->st_mtimespec = pipe->pipe_mtime;
1712 ub->st_ctimespec = pipe->pipe_ctime;
1713 #endif /* FreeBSD */
1714 #ifdef __NetBSD__
1715 TIMEVAL_TO_TIMESPEC(&pipe->pipe_atime, &ub->st_atimespec)
1716 TIMEVAL_TO_TIMESPEC(&pipe->pipe_mtime, &ub->st_mtimespec);
1717 TIMEVAL_TO_TIMESPEC(&pipe->pipe_ctime, &ub->st_ctimespec);
1718 #endif /* NetBSD */
1719 ub->st_uid = fp->f_cred->cr_uid;
1720 ub->st_gid = fp->f_cred->cr_gid;
1721 /*
1722 * Left as 0: st_dev, st_ino, st_nlink, st_rdev, st_flags, st_gen.
1723 * XXX (st_dev, st_ino) should be unique.
1724 */
1725 return (0);
1726 }
1727
1728 /* ARGSUSED */
1729 static int
1730 #ifdef __FreeBSD__
1731 pipe_close(fp, td)
1732 struct file *fp;
1733 struct thread *td;
1734 #else
1735 pipe_close(fp, td)
1736 struct file *fp;
1737 struct proc *td;
1738 #endif
1739 {
1740 struct pipe *cpipe = (struct pipe *)fp->f_data;
1741
1742 #ifdef __FreeBSD__
1743 fp->f_ops = &badfileops;
1744 funsetown(cpipe->pipe_sigio);
1745 #endif
1746 fp->f_data = NULL;
1747 pipeclose(cpipe);
1748 return (0);
1749 }
1750
1751 static void
1752 pipe_free_kmem(cpipe)
1753 struct pipe *cpipe;
1754 {
1755
1756 #ifdef __FreeBSD__
1757
1758 GIANT_REQUIRED;
1759 KASSERT(cpipe->pipe_mtxp == NULL || !mtx_owned(PIPE_MTX(cpipe)),
1760 ("pipespace: pipe mutex locked"));
1761 #endif
1762
1763 if (cpipe->pipe_buffer.buffer != NULL) {
1764 if (cpipe->pipe_buffer.size > PIPE_SIZE)
1765 --nbigpipe;
1766 amountpipekva -= cpipe->pipe_buffer.size;
1767 #ifdef __FreeBSD__
1768 kmem_free(kernel_map,
1769 (vm_offset_t)cpipe->pipe_buffer.buffer,
1770 cpipe->pipe_buffer.size);
1771 #elif defined(__NetBSD__)
1772 uvm_km_free(kernel_map,
1773 (vaddr_t)cpipe->pipe_buffer.buffer,
1774 cpipe->pipe_buffer.size);
1775 #endif /* NetBSD */
1776 cpipe->pipe_buffer.buffer = NULL;
1777 }
1778 #ifndef PIPE_NODIRECT
1779 if (cpipe->pipe_map.kva != 0) {
1780 #ifdef __FreeBSD__
1781 amountpipekva -= cpipe->pipe_buffer.size + PAGE_SIZE;
1782 kmem_free(kernel_map,
1783 cpipe->pipe_map.kva,
1784 cpipe->pipe_buffer.size + PAGE_SIZE);
1785 #elif defined(__NetBSD__)
1786 pipe_loan_free(cpipe);
1787 #endif /* NetBSD */
1788 cpipe->pipe_map.cnt = 0;
1789 cpipe->pipe_map.kva = 0;
1790 cpipe->pipe_map.pos = 0;
1791 cpipe->pipe_map.npages = 0;
1792 }
1793 #endif /* !PIPE_NODIRECT */
1794 }
1795
1796 /*
1797 * shutdown the pipe
1798 */
1799 static void
1800 pipeclose(cpipe)
1801 struct pipe *cpipe;
1802 {
1803 struct pipe *ppipe;
1804 #ifdef __FreeBSD__
1805 int hadpeer = 0;
1806 #endif
1807
1808 if (cpipe == NULL)
1809 return;
1810
1811 /* partially created pipes won't have a valid mutex. */
1812 if (PIPE_MTX(cpipe) != NULL)
1813 PIPE_LOCK(cpipe);
1814
1815 pipeselwakeup(cpipe, cpipe);
1816
1817 /*
1818 * If the other side is blocked, wake it up saying that
1819 * we want to close it down.
1820 */
1821 while (cpipe->pipe_busy) {
1822 wakeup(cpipe);
1823 cpipe->pipe_state |= PIPE_WANTCLOSE | PIPE_EOF;
1824 #ifdef __FreeBSD__
1825 msleep(cpipe, PIPE_MTX(cpipe), PRIBIO, "pipecl", 0);
1826 #else
1827 tsleep(cpipe, PRIBIO, "pipecl", 0);
1828 #endif
1829 }
1830
1831 /*
1832 * Disconnect from peer
1833 */
1834 if ((ppipe = cpipe->pipe_peer) != NULL) {
1835 #ifdef __FreeBSD__
1836 hadpeer++;
1837 #endif
1838 pipeselwakeup(ppipe, ppipe);
1839
1840 ppipe->pipe_state |= PIPE_EOF;
1841 wakeup(ppipe);
1842 #ifdef __FreeBSD__
1843 KNOTE(&ppipe->pipe_sel.si_note, 0);
1844 #endif
1845 ppipe->pipe_peer = NULL;
1846 }
1847 /*
1848 * free resources
1849 */
1850 #ifdef __FreeBSD__
1851 if (PIPE_MTX(cpipe) != NULL) {
1852 PIPE_UNLOCK(cpipe);
1853 if (!hadpeer) {
1854 mtx_destroy(PIPE_MTX(cpipe));
1855 free(PIPE_MTX(cpipe), M_TEMP);
1856 }
1857 }
1858 mtx_lock(&Giant);
1859 pipe_free_kmem(cpipe);
1860 zfree(pipe_zone, cpipe);
1861 mtx_unlock(&Giant);
1862 #endif
1863
1864 #ifdef __NetBSD__
1865 if (PIPE_MTX(cpipe) != NULL)
1866 PIPE_UNLOCK(cpipe);
1867
1868 pipe_free_kmem(cpipe);
1869 (void) lockmgr(&cpipe->pipe_lock, LK_DRAIN, NULL);
1870 pool_put(&pipe_pool, cpipe);
1871 #endif
1872 }
1873
1874 /*ARGSUSED*/
1875 static int
1876 pipe_kqfilter(struct file *fp, struct knote *kn)
1877 {
1878 struct pipe *cpipe;
1879
1880 cpipe = (struct pipe *)kn->kn_fp->f_data;
1881 switch (kn->kn_filter) {
1882 case EVFILT_READ:
1883 kn->kn_fop = &pipe_rfiltops;
1884 break;
1885 case EVFILT_WRITE:
1886 kn->kn_fop = &pipe_wfiltops;
1887 cpipe = cpipe->pipe_peer;
1888 if (cpipe == NULL) {
1889 /* other end of pipe has been closed */
1890 return (EBADF);
1891 }
1892 break;
1893 default:
1894 return (1);
1895 }
1896 kn->kn_hook = (caddr_t)cpipe;
1897
1898 PIPE_LOCK(cpipe);
1899 SLIST_INSERT_HEAD(&cpipe->pipe_sel.si_note, kn, kn_selnext);
1900 PIPE_UNLOCK(cpipe);
1901 return (0);
1902 }
1903
1904 static void
1905 filt_pipedetach(struct knote *kn)
1906 {
1907 struct pipe *cpipe = (struct pipe *)kn->kn_fp->f_data;
1908
1909 PIPE_LOCK(cpipe);
1910 SLIST_REMOVE(&cpipe->pipe_sel.si_note, kn, knote, kn_selnext);
1911 PIPE_UNLOCK(cpipe);
1912 }
1913
1914 /*ARGSUSED*/
1915 static int
1916 filt_piperead(struct knote *kn, long hint)
1917 {
1918 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1919 struct pipe *wpipe = rpipe->pipe_peer;
1920
1921 PIPE_LOCK(rpipe);
1922 kn->kn_data = rpipe->pipe_buffer.cnt;
1923 if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW))
1924 kn->kn_data = rpipe->pipe_map.cnt;
1925
1926 if ((rpipe->pipe_state & PIPE_EOF) ||
1927 (wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1928 kn->kn_flags |= EV_EOF;
1929 PIPE_UNLOCK(rpipe);
1930 return (1);
1931 }
1932 PIPE_UNLOCK(rpipe);
1933 return (kn->kn_data > 0);
1934 }
1935
1936 /*ARGSUSED*/
1937 static int
1938 filt_pipewrite(struct knote *kn, long hint)
1939 {
1940 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1941 struct pipe *wpipe = rpipe->pipe_peer;
1942
1943 PIPE_LOCK(rpipe);
1944 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1945 kn->kn_data = 0;
1946 kn->kn_flags |= EV_EOF;
1947 PIPE_UNLOCK(rpipe);
1948 return (1);
1949 }
1950 kn->kn_data = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
1951 if (wpipe->pipe_state & PIPE_DIRECTW)
1952 kn->kn_data = 0;
1953
1954 PIPE_UNLOCK(rpipe);
1955 return (kn->kn_data >= PIPE_BUF);
1956 }
1957
1958 #ifdef __NetBSD__
1959 static int
1960 pipe_fcntl(fp, cmd, data, p)
1961 struct file *fp;
1962 u_int cmd;
1963 caddr_t data;
1964 struct proc *p;
1965 {
1966 if (cmd == F_SETFL)
1967 return (0);
1968 else
1969 return (EOPNOTSUPP);
1970 }
1971
1972 /*
1973 * Handle pipe sysctls.
1974 */
1975 int
1976 sysctl_dopipe(name, namelen, oldp, oldlenp, newp, newlen)
1977 int *name;
1978 u_int namelen;
1979 void *oldp;
1980 size_t *oldlenp;
1981 void *newp;
1982 size_t newlen;
1983 {
1984 /* All sysctl names at this level are terminal. */
1985 if (namelen != 1)
1986 return (ENOTDIR); /* overloaded */
1987
1988 switch (name[0]) {
1989 case KERN_PIPE_MAXKVASZ:
1990 return (sysctl_int(oldp, oldlenp, newp, newlen, &maxpipekva));
1991 case KERN_PIPE_LIMITKVA:
1992 return (sysctl_int(oldp, oldlenp, newp, newlen, &limitpipekva));
1993 case KERN_PIPE_MAXBIGPIPES:
1994 return (sysctl_int(oldp, oldlenp, newp, newlen, &maxbigpipes));
1995 case KERN_PIPE_NBIGPIPES:
1996 return (sysctl_rdint(oldp, oldlenp, newp, nbigpipe));
1997 case KERN_PIPE_KVASIZE:
1998 return (sysctl_rdint(oldp, oldlenp, newp, amountpipekva));
1999 default:
2000 return (EOPNOTSUPP);
2001 }
2002 /* NOTREACHED */
2003 }
2004
2005 /*
2006 * Initialize pipe structs.
2007 */
2008 void
2009 pipe_init(void)
2010 {
2011 pool_init(&pipe_pool, sizeof(struct pipe), 0, 0, 0, "pipepl", NULL);
2012 }
2013
2014 #endif /* __NetBSD __ */
2015