sys_pipe.c revision 1.34 1 /* $NetBSD: sys_pipe.c,v 1.34 2003/02/01 06:23:44 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1996 John S. Dyson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Absolutely no warranty of function or purpose is made by the author
17 * John S. Dyson.
18 * 4. Modifications may be freely made to this file if the above conditions
19 * are met.
20 *
21 * $FreeBSD: src/sys/kern/sys_pipe.c,v 1.95 2002/03/09 22:06:31 alfred Exp $
22 */
23
24 /*
25 * This file contains a high-performance replacement for the socket-based
26 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support
27 * all features of sockets, but does do everything that pipes normally
28 * do.
29 *
30 * Adaption for NetBSD UVM, including uvm_loan() based direct write, was
31 * written by Jaromir Dolecek.
32 */
33
34 /*
35 * This code has two modes of operation, a small write mode and a large
36 * write mode. The small write mode acts like conventional pipes with
37 * a kernel buffer. If the buffer is less than PIPE_MINDIRECT, then the
38 * "normal" pipe buffering is done. If the buffer is between PIPE_MINDIRECT
39 * and PIPE_SIZE in size, it is fully mapped into the kernel (on FreeBSD,
40 * those pages are also wired), and the receiving process can copy it directly
41 * from the pages in the sending process.
42 *
43 * If the sending process receives a signal, it is possible that it will
44 * go away, and certainly its address space can change, because control
45 * is returned back to the user-mode side. In that case, the pipe code
46 * arranges to copy the buffer supplied by the user process on FreeBSD, to
47 * a pageable kernel buffer, and the receiving process will grab the data
48 * from the pageable kernel buffer. Since signals don't happen all that often,
49 * the copy operation is normally eliminated.
50 * For NetBSD, the pages are mapped read-only, COW for kernel by uvm_loan(),
51 * so no explicit handling need to be done, all is handled by standard VM
52 * facilities.
53 *
54 * The constant PIPE_MINDIRECT is chosen to make sure that buffering will
55 * happen for small transfers so that the system will not spend all of
56 * its time context switching. PIPE_SIZE is constrained by the
57 * amount of kernel virtual memory.
58 */
59
60 #include <sys/cdefs.h>
61 __KERNEL_RCSID(0, "$NetBSD: sys_pipe.c,v 1.34 2003/02/01 06:23:44 thorpej Exp $");
62
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/proc.h>
66 #include <sys/fcntl.h>
67 #include <sys/file.h>
68 #include <sys/filedesc.h>
69 #include <sys/filio.h>
70 #include <sys/kernel.h>
71 #include <sys/lock.h>
72 #include <sys/ttycom.h>
73 #include <sys/stat.h>
74 #include <sys/malloc.h>
75 #include <sys/poll.h>
76 #include <sys/signalvar.h>
77 #include <sys/vnode.h>
78 #include <sys/uio.h>
79 #include <sys/lock.h>
80 #ifdef __FreeBSD__
81 #include <sys/mutex.h>
82 #endif
83 #ifdef __NetBSD__
84 #include <sys/select.h>
85 #include <sys/mount.h>
86 #include <sys/sa.h>
87 #include <sys/syscallargs.h>
88 #include <uvm/uvm.h>
89 #include <sys/sysctl.h>
90 #include <sys/kernel.h>
91 #endif /* NetBSD, FreeBSD */
92
93 #include <sys/pipe.h>
94
95 #ifdef __NetBSD__
96 /*
97 * Avoid microtime(9), it's slow. We don't guard the read from time(9)
98 * with splclock(9) since we don't actually need to be THAT sure the access
99 * is atomic.
100 */
101 #define vfs_timestamp(tv) (*(tv) = time)
102
103 /* we call it sel_klist */
104 #define sel_note sel_klist
105
106 #endif
107
108 /*
109 * Use this define if you want to disable *fancy* VM things. Expect an
110 * approx 30% decrease in transfer rate. This could be useful for
111 * OpenBSD.
112 */
113 /* #define PIPE_NODIRECT */
114
115 /*
116 * interfaces to the outside world
117 */
118 #ifdef __FreeBSD__
119 static int pipe_read(struct file *fp, struct uio *uio,
120 struct ucred *cred, int flags, struct thread *td);
121 static int pipe_write(struct file *fp, struct uio *uio,
122 struct ucred *cred, int flags, struct thread *td);
123 static int pipe_close(struct file *fp, struct thread *td);
124 static int pipe_poll(struct file *fp, int events, struct ucred *cred,
125 struct thread *td);
126 static int pipe_kqfilter(struct file *fp, struct knote *kn);
127 static int pipe_stat(struct file *fp, struct stat *sb, struct thread *td);
128 static int pipe_ioctl(struct file *fp, u_long cmd, caddr_t data, struct thread *td);
129
130 static struct fileops pipeops = {
131 pipe_read, pipe_write, pipe_ioctl, pipe_poll, pipe_kqfilter,
132 pipe_stat, pipe_close
133 };
134
135 #define PIPE_GET_GIANT(pipe) \
136 do { \
137 PIPE_UNLOCK(wpipe); \
138 mtx_lock(&Giant); \
139 } while (/*CONSTCOND*/ 0)
140
141 #define PIPE_DROP_GIANT(pipe) \
142 do { \
143 mtx_unlock(&Giant); \
144 PIPE_LOCK(wpipe); \
145 } while (/*CONSTCOND*/ 0)
146
147 #endif /* FreeBSD */
148
149 #ifdef __NetBSD__
150 static int pipe_read(struct file *fp, off_t *offset, struct uio *uio,
151 struct ucred *cred, int flags);
152 static int pipe_write(struct file *fp, off_t *offset, struct uio *uio,
153 struct ucred *cred, int flags);
154 static int pipe_close(struct file *fp, struct proc *p);
155 static int pipe_poll(struct file *fp, int events, struct proc *p);
156 static int pipe_fcntl(struct file *fp, u_int com, caddr_t data,
157 struct proc *p);
158 static int pipe_kqfilter(struct file *fp, struct knote *kn);
159 static int pipe_stat(struct file *fp, struct stat *sb, struct proc *p);
160 static int pipe_ioctl(struct file *fp, u_long cmd, caddr_t data, struct proc *p);
161
162 static struct fileops pipeops =
163 { pipe_read, pipe_write, pipe_ioctl, pipe_fcntl, pipe_poll,
164 pipe_stat, pipe_close, pipe_kqfilter };
165
166 /* XXXSMP perhaps use spinlocks & KERNEL_PROC_(UN)LOCK() ? just clear now */
167 #define PIPE_GET_GIANT(pipe)
168 #define PIPE_DROP_GIANT(pipe)
169 #define GIANT_REQUIRED
170
171 #endif /* NetBSD */
172
173 /*
174 * Default pipe buffer size(s), this can be kind-of large now because pipe
175 * space is pageable. The pipe code will try to maintain locality of
176 * reference for performance reasons, so small amounts of outstanding I/O
177 * will not wipe the cache.
178 */
179 #define MINPIPESIZE (PIPE_SIZE/3)
180 #define MAXPIPESIZE (2*PIPE_SIZE/3)
181
182 /*
183 * Maximum amount of kva for pipes -- this is kind-of a soft limit, but
184 * is there so that on large systems, we don't exhaust it.
185 */
186 #define MAXPIPEKVA (8*1024*1024)
187 static int maxpipekva = MAXPIPEKVA;
188
189 /*
190 * Limit for direct transfers, we cannot, of course limit
191 * the amount of kva for pipes in general though.
192 */
193 #define LIMITPIPEKVA (16*1024*1024)
194 static int limitpipekva = LIMITPIPEKVA;
195
196 /*
197 * Limit the number of "big" pipes
198 */
199 #define LIMITBIGPIPES 32
200 static int maxbigpipes = LIMITBIGPIPES;
201 static int nbigpipe = 0;
202
203 /*
204 * Amount of KVA consumed by pipe buffers.
205 */
206 static int amountpipekva = 0;
207
208 MALLOC_DEFINE(M_PIPE, "pipe", "Pipe structures");
209
210 static void pipeclose(struct pipe *cpipe);
211 static void pipe_free_kmem(struct pipe *cpipe);
212 static int pipe_create(struct pipe **cpipep, int allockva);
213 static __inline int pipelock(struct pipe *cpipe, int catch);
214 static __inline void pipeunlock(struct pipe *cpipe);
215 static __inline void pipeselwakeup(struct pipe *cpipe, struct pipe *sigp);
216 #ifndef PIPE_NODIRECT
217 static int pipe_direct_write(struct pipe *wpipe, struct uio *uio);
218 #endif
219 static int pipespace(struct pipe *cpipe, int size);
220
221 #ifdef __NetBSD__
222 #ifndef PIPE_NODIRECT
223 static int pipe_loan_alloc(struct pipe *, int);
224 static void pipe_loan_free(struct pipe *);
225 #endif /* PIPE_NODIRECT */
226
227 static struct pool pipe_pool;
228 #endif /* NetBSD */
229
230 #ifdef __FreeBSD__
231 static vm_zone_t pipe_zone;
232
233 static void pipeinit(void *dummy __unused);
234 #ifndef PIPE_NODIRECT
235 static int pipe_build_write_buffer(struct pipe *wpipe, struct uio *uio);
236 static void pipe_destroy_write_buffer(struct pipe *wpipe);
237 static void pipe_clone_write_buffer(struct pipe *wpipe);
238 #endif
239
240 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_ANY, pipeinit, NULL);
241
242 static void
243 pipeinit(void *dummy __unused)
244 {
245
246 pipe_zone = zinit("PIPE", sizeof(struct pipe), 0, 0, 4);
247 }
248 #endif /* FreeBSD */
249
250 /*
251 * The pipe system call for the DTYPE_PIPE type of pipes
252 */
253
254 /* ARGSUSED */
255 #ifdef __FreeBSD__
256 int
257 pipe(td, uap)
258 struct thread *td;
259 struct pipe_args /* {
260 int dummy;
261 } */ *uap;
262 #elif defined(__NetBSD__)
263 int
264 sys_pipe(l, v, retval)
265 struct lwp *l;
266 void *v;
267 register_t *retval;
268 #endif
269 {
270 struct file *rf, *wf;
271 struct pipe *rpipe, *wpipe;
272 int fd, error;
273 struct proc *p;
274 #ifdef __FreeBSD__
275 struct mtx *pmtx;
276
277 KASSERT(pipe_zone != NULL, ("pipe_zone not initialized"));
278
279 pmtx = malloc(sizeof(*pmtx), M_TEMP, M_WAITOK | M_ZERO);
280
281 rpipe = wpipe = NULL;
282 if (pipe_create(&rpipe, 1) || pipe_create(&wpipe, 1)) {
283 pipeclose(rpipe);
284 pipeclose(wpipe);
285 free(pmtx, M_TEMP);
286 return (ENFILE);
287 }
288
289 error = falloc(td, &rf, &fd);
290 if (error) {
291 pipeclose(rpipe);
292 pipeclose(wpipe);
293 free(pmtx, M_TEMP);
294 return (error);
295 }
296 fhold(rf);
297 td->td_retval[0] = fd;
298
299 /*
300 * Warning: once we've gotten past allocation of the fd for the
301 * read-side, we can only drop the read side via fdrop() in order
302 * to avoid races against processes which manage to dup() the read
303 * side while we are blocked trying to allocate the write side.
304 */
305 FILE_LOCK(rf);
306 rf->f_flag = FREAD | FWRITE;
307 rf->f_type = DTYPE_PIPE;
308 rf->f_data = (caddr_t)rpipe;
309 rf->f_ops = &pipeops;
310 FILE_UNLOCK(rf);
311 error = falloc(td, &wf, &fd);
312 if (error) {
313 struct filedesc *fdp = td->td_proc->p_fd;
314 FILEDESC_LOCK(fdp);
315 if (fdp->fd_ofiles[td->td_retval[0]] == rf) {
316 fdp->fd_ofiles[td->td_retval[0]] = NULL;
317 FILEDESC_UNLOCK(fdp);
318 fdrop(rf, td);
319 } else
320 FILEDESC_UNLOCK(fdp);
321 fdrop(rf, td);
322 /* rpipe has been closed by fdrop(). */
323 pipeclose(wpipe);
324 free(pmtx, M_TEMP);
325 return (error);
326 }
327 FILE_LOCK(wf);
328 wf->f_flag = FREAD | FWRITE;
329 wf->f_type = DTYPE_PIPE;
330 wf->f_data = (caddr_t)wpipe;
331 wf->f_ops = &pipeops;
332 p->p_retval[1] = fd;
333 rpipe->pipe_peer = wpipe;
334 wpipe->pipe_peer = rpipe;
335 mtx_init(pmtx, "pipe mutex", MTX_DEF);
336 rpipe->pipe_mtxp = wpipe->pipe_mtxp = pmtx;
337 fdrop(rf, td);
338 #endif /* FreeBSD */
339
340 #ifdef __NetBSD__
341 p = l->l_proc;
342 rpipe = wpipe = NULL;
343 if (pipe_create(&rpipe, 1) || pipe_create(&wpipe, 0)) {
344 pipeclose(rpipe);
345 pipeclose(wpipe);
346 return (ENFILE);
347 }
348
349 /*
350 * Note: the file structure returned from falloc() is marked
351 * as 'larval' initially. Unless we mark it as 'mature' by
352 * FILE_SET_MATURE(), any attempt to do anything with it would
353 * return EBADF, including e.g. dup(2) or close(2). This avoids
354 * file descriptor races if we block in the second falloc().
355 */
356
357 error = falloc(p, &rf, &fd);
358 if (error)
359 goto free2;
360 retval[0] = fd;
361 rf->f_flag = FREAD;
362 rf->f_type = DTYPE_PIPE;
363 rf->f_data = (caddr_t)rpipe;
364 rf->f_ops = &pipeops;
365
366 error = falloc(p, &wf, &fd);
367 if (error)
368 goto free3;
369 retval[1] = fd;
370 wf->f_flag = FWRITE;
371 wf->f_type = DTYPE_PIPE;
372 wf->f_data = (caddr_t)wpipe;
373 wf->f_ops = &pipeops;
374
375 rpipe->pipe_peer = wpipe;
376 wpipe->pipe_peer = rpipe;
377
378 FILE_SET_MATURE(rf);
379 FILE_SET_MATURE(wf);
380 FILE_UNUSE(rf, p);
381 FILE_UNUSE(wf, p);
382 return (0);
383 free3:
384 FILE_UNUSE(rf, p);
385 ffree(rf);
386 fdremove(p->p_fd, retval[0]);
387 free2:
388 pipeclose(wpipe);
389 pipeclose(rpipe);
390 #endif /* NetBSD */
391
392 return (error);
393 }
394
395 /*
396 * Allocate kva for pipe circular buffer, the space is pageable
397 * This routine will 'realloc' the size of a pipe safely, if it fails
398 * it will retain the old buffer.
399 * If it fails it will return ENOMEM.
400 */
401 static int
402 pipespace(cpipe, size)
403 struct pipe *cpipe;
404 int size;
405 {
406 caddr_t buffer;
407 #ifdef __FreeBSD__
408 struct vm_object *object;
409 int npages, error;
410
411 GIANT_REQUIRED;
412 KASSERT(cpipe->pipe_mtxp == NULL || !mtx_owned(PIPE_MTX(cpipe)),
413 ("pipespace: pipe mutex locked"));
414
415 npages = round_page(size)/PAGE_SIZE;
416 /*
417 * Create an object, I don't like the idea of paging to/from
418 * kernel_object.
419 */
420 object = vm_object_allocate(OBJT_DEFAULT, npages);
421 buffer = (caddr_t) vm_map_min(kernel_map);
422
423 /*
424 * Insert the object into the kernel map, and allocate kva for it.
425 * The map entry is, by default, pageable.
426 */
427 error = vm_map_find(kernel_map, object, 0,
428 (vm_offset_t *) &buffer, size, 1,
429 VM_PROT_ALL, VM_PROT_ALL, 0);
430
431 if (error != KERN_SUCCESS) {
432 vm_object_deallocate(object);
433 return (ENOMEM);
434 }
435 #endif /* FreeBSD */
436
437 #ifdef __NetBSD__
438 /*
439 * Allocate pageable virtual address space. Physical memory is allocated
440 * on demand.
441 */
442 buffer = (caddr_t) uvm_km_valloc(kernel_map, round_page(size));
443 if (buffer == NULL)
444 return (ENOMEM);
445 #endif /* NetBSD */
446
447 /* free old resources if we're resizing */
448 pipe_free_kmem(cpipe);
449 #ifdef __FreeBSD__
450 cpipe->pipe_buffer.object = object;
451 #endif
452 cpipe->pipe_buffer.buffer = buffer;
453 cpipe->pipe_buffer.size = size;
454 cpipe->pipe_buffer.in = 0;
455 cpipe->pipe_buffer.out = 0;
456 cpipe->pipe_buffer.cnt = 0;
457 amountpipekva += cpipe->pipe_buffer.size;
458 return (0);
459 }
460
461 /*
462 * initialize and allocate VM and memory for pipe
463 */
464 static int
465 pipe_create(cpipep, allockva)
466 struct pipe **cpipep;
467 int allockva;
468 {
469 struct pipe *cpipe;
470 int error;
471
472 #ifdef __FreeBSD__
473 *cpipep = zalloc(pipe_zone);
474 #endif
475 #ifdef __NetBSD__
476 *cpipep = pool_get(&pipe_pool, M_WAITOK);
477 #endif
478 if (*cpipep == NULL)
479 return (ENOMEM);
480
481 cpipe = *cpipep;
482
483 /* Initialize */
484 memset(cpipe, 0, sizeof(*cpipe));
485 cpipe->pipe_state = PIPE_SIGNALR;
486
487 #ifdef __FreeBSD__
488 cpipe->pipe_mtxp = NULL; /* avoid pipespace assertion */
489 #endif
490 if (allockva && (error = pipespace(cpipe, PIPE_SIZE)))
491 return (error);
492
493 vfs_timestamp(&cpipe->pipe_ctime);
494 cpipe->pipe_atime = cpipe->pipe_ctime;
495 cpipe->pipe_mtime = cpipe->pipe_ctime;
496 #ifdef __NetBSD__
497 cpipe->pipe_pgid = NO_PID;
498 lockinit(&cpipe->pipe_lock, PRIBIO | PCATCH, "pipelk", 0, 0);
499 #endif
500
501 return (0);
502 }
503
504
505 /*
506 * lock a pipe for I/O, blocking other access
507 */
508 static __inline int
509 pipelock(cpipe, catch)
510 struct pipe *cpipe;
511 int catch;
512 {
513 int error;
514
515 #ifdef __FreeBSD__
516 PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
517 while (cpipe->pipe_state & PIPE_LOCKFL) {
518 cpipe->pipe_state |= PIPE_LWANT;
519 error = msleep(cpipe, PIPE_MTX(cpipe),
520 catch ? (PRIBIO | PCATCH) : PRIBIO,
521 "pipelk", 0);
522 if (error != 0)
523 return (error);
524 }
525 cpipe->pipe_state |= PIPE_LOCKFL;
526 return (0);
527 #endif
528
529 #ifdef __NetBSD__
530 do {
531 error = lockmgr(&cpipe->pipe_lock, LK_EXCLUSIVE, NULL);
532 } while (!catch && (error == EINTR || error == ERESTART));
533 return (error);
534 #endif
535 }
536
537 /*
538 * unlock a pipe I/O lock
539 */
540 static __inline void
541 pipeunlock(cpipe)
542 struct pipe *cpipe;
543 {
544
545 #ifdef __FreeBSD__
546 PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
547 cpipe->pipe_state &= ~PIPE_LOCKFL;
548 if (cpipe->pipe_state & PIPE_LWANT) {
549 cpipe->pipe_state &= ~PIPE_LWANT;
550 wakeup(cpipe);
551 }
552 #endif
553
554 #ifdef __NetBSD__
555 lockmgr(&cpipe->pipe_lock, LK_RELEASE, NULL);
556 #endif
557 }
558
559 /*
560 * Select/poll wakup. This also sends SIGIO to peer connected to
561 * 'sigpipe' side of pipe.
562 */
563 static __inline void
564 pipeselwakeup(selp, sigp)
565 struct pipe *selp, *sigp;
566 {
567
568 #ifdef __FreeBSD__
569 if (selp->pipe_state & PIPE_SEL) {
570 selp->pipe_state &= ~PIPE_SEL;
571 selwakeup(&selp->pipe_sel);
572 }
573 if (sigp && (sigp->pipe_state & PIPE_ASYNC) && sigp->pipe_sigio)
574 pgsigio(sigp->pipe_sigio, SIGIO, 0);
575 KNOTE(&selp->pipe_sel.sel_note, 0);
576 #endif
577
578 #ifdef __NetBSD__
579 selnotify(&selp->pipe_sel, 0);
580 if (sigp && (sigp->pipe_state & PIPE_ASYNC) &&
581 sigp->pipe_pgid != NO_PID) {
582 struct proc *p;
583
584 if (sigp->pipe_pgid < 0)
585 gsignal(-sigp->pipe_pgid, SIGIO);
586 else if (sigp->pipe_pgid > 0 && (p = pfind(sigp->pipe_pgid)) != 0)
587 psignal(p, SIGIO);
588 }
589 #endif /* NetBSD */
590 }
591
592 /* ARGSUSED */
593 #ifdef __FreeBSD__
594 static int
595 pipe_read(fp, uio, cred, flags, td)
596 struct file *fp;
597 struct uio *uio;
598 struct ucred *cred;
599 struct thread *td;
600 int flags;
601 struct proc *p;
602 #elif defined(__NetBSD__)
603 static int
604 pipe_read(fp, offset, uio, cred, flags)
605 struct file *fp;
606 off_t *offset;
607 struct uio *uio;
608 struct ucred *cred;
609 int flags;
610 #endif
611 {
612 struct pipe *rpipe = (struct pipe *) fp->f_data;
613 int error;
614 size_t nread = 0;
615 size_t size;
616 size_t ocnt;
617
618 PIPE_LOCK(rpipe);
619 ++rpipe->pipe_busy;
620 ocnt = rpipe->pipe_buffer.cnt;
621
622 error = pipelock(rpipe, 1);
623 if (error)
624 goto unlocked_error;
625
626 while (uio->uio_resid) {
627 /*
628 * normal pipe buffer receive
629 */
630 if (rpipe->pipe_buffer.cnt > 0) {
631 size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out;
632 if (size > rpipe->pipe_buffer.cnt)
633 size = rpipe->pipe_buffer.cnt;
634 if (size > uio->uio_resid)
635 size = uio->uio_resid;
636
637 PIPE_UNLOCK(rpipe);
638 error = uiomove(&rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out],
639 size, uio);
640 PIPE_LOCK(rpipe);
641 if (error)
642 break;
643
644 rpipe->pipe_buffer.out += size;
645 if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size)
646 rpipe->pipe_buffer.out = 0;
647
648 rpipe->pipe_buffer.cnt -= size;
649
650 /*
651 * If there is no more to read in the pipe, reset
652 * its pointers to the beginning. This improves
653 * cache hit stats.
654 */
655 if (rpipe->pipe_buffer.cnt == 0) {
656 rpipe->pipe_buffer.in = 0;
657 rpipe->pipe_buffer.out = 0;
658 }
659 nread += size;
660 #ifndef PIPE_NODIRECT
661 /*
662 * Direct copy, bypassing a kernel buffer.
663 */
664 } else if ((size = rpipe->pipe_map.cnt) &&
665 (rpipe->pipe_state & PIPE_DIRECTW)) {
666 caddr_t va;
667 if (size > uio->uio_resid)
668 size = uio->uio_resid;
669
670 va = (caddr_t) rpipe->pipe_map.kva +
671 rpipe->pipe_map.pos;
672 PIPE_UNLOCK(rpipe);
673 error = uiomove(va, size, uio);
674 PIPE_LOCK(rpipe);
675 if (error)
676 break;
677 nread += size;
678 rpipe->pipe_map.pos += size;
679 rpipe->pipe_map.cnt -= size;
680 if (rpipe->pipe_map.cnt == 0) {
681 rpipe->pipe_state &= ~PIPE_DIRECTW;
682 wakeup(rpipe);
683 }
684 #endif
685 } else {
686 /*
687 * detect EOF condition
688 * read returns 0 on EOF, no need to set error
689 */
690 if (rpipe->pipe_state & PIPE_EOF)
691 break;
692
693 /*
694 * If the "write-side" has been blocked, wake it up now.
695 */
696 if (rpipe->pipe_state & PIPE_WANTW) {
697 rpipe->pipe_state &= ~PIPE_WANTW;
698 wakeup(rpipe);
699 }
700
701 /*
702 * Break if some data was read.
703 */
704 if (nread > 0)
705 break;
706
707 /*
708 * don't block on non-blocking I/O
709 */
710 if (fp->f_flag & FNONBLOCK) {
711 error = EAGAIN;
712 break;
713 }
714
715 /*
716 * Unlock the pipe buffer for our remaining processing.
717 * We will either break out with an error or we will
718 * sleep and relock to loop.
719 */
720 pipeunlock(rpipe);
721
722 /*
723 * We want to read more, wake up select/poll.
724 */
725 pipeselwakeup(rpipe, rpipe->pipe_peer);
726
727 rpipe->pipe_state |= PIPE_WANTR;
728 #ifdef __FreeBSD__
729 error = msleep(rpipe, PIPE_MTX(rpipe), PRIBIO | PCATCH,
730 "piperd", 0);
731 #else
732 error = tsleep(rpipe, PRIBIO | PCATCH, "piperd", 0);
733 #endif
734 if (error != 0 || (error = pipelock(rpipe, 1)))
735 goto unlocked_error;
736 }
737 }
738 pipeunlock(rpipe);
739
740 /* XXX: should probably do this before getting any locks. */
741 if (error == 0)
742 vfs_timestamp(&rpipe->pipe_atime);
743 unlocked_error:
744 --rpipe->pipe_busy;
745
746 /*
747 * PIPE_WANTCLOSE processing only makes sense if pipe_busy is 0.
748 */
749 if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANTCLOSE)) {
750 rpipe->pipe_state &= ~(PIPE_WANTCLOSE|PIPE_WANTW);
751 wakeup(rpipe);
752 } else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) {
753 /*
754 * Handle write blocking hysteresis.
755 */
756 if (rpipe->pipe_state & PIPE_WANTW) {
757 rpipe->pipe_state &= ~PIPE_WANTW;
758 wakeup(rpipe);
759 }
760 }
761
762 /*
763 * If anything was read off the buffer, signal to the writer it's
764 * possible to write more data. Also send signal if we are here for the
765 * first time after last write.
766 */
767 if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF
768 && (ocnt != rpipe->pipe_buffer.cnt || (rpipe->pipe_state & PIPE_SIGNALR))) {
769 pipeselwakeup(rpipe, rpipe->pipe_peer);
770 rpipe->pipe_state &= ~PIPE_SIGNALR;
771 }
772
773 PIPE_UNLOCK(rpipe);
774 return (error);
775 }
776
777 #ifdef __FreeBSD__
778 #ifndef PIPE_NODIRECT
779 /*
780 * Map the sending processes' buffer into kernel space and wire it.
781 * This is similar to a physical write operation.
782 */
783 static int
784 pipe_build_write_buffer(wpipe, uio)
785 struct pipe *wpipe;
786 struct uio *uio;
787 {
788 size_t size;
789 int i;
790 vm_offset_t addr, endaddr, paddr;
791
792 GIANT_REQUIRED;
793 PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED);
794
795 size = uio->uio_iov->iov_len;
796 if (size > wpipe->pipe_buffer.size)
797 size = wpipe->pipe_buffer.size;
798
799 endaddr = round_page((vm_offset_t)uio->uio_iov->iov_base + size);
800 addr = trunc_page((vm_offset_t)uio->uio_iov->iov_base);
801 for (i = 0; addr < endaddr; addr += PAGE_SIZE, i++) {
802 vm_page_t m;
803
804 if (vm_fault_quick((caddr_t)addr, VM_PROT_READ) < 0 ||
805 (paddr = pmap_kextract(addr)) == 0) {
806 int j;
807
808 for (j = 0; j < i; j++)
809 vm_page_unwire(wpipe->pipe_map.ms[j], 1);
810 return (EFAULT);
811 }
812
813 m = PHYS_TO_VM_PAGE(paddr);
814 vm_page_wire(m);
815 wpipe->pipe_map.ms[i] = m;
816 }
817
818 /*
819 * set up the control block
820 */
821 wpipe->pipe_map.npages = i;
822 wpipe->pipe_map.pos =
823 ((vm_offset_t) uio->uio_iov->iov_base) & PAGE_MASK;
824 wpipe->pipe_map.cnt = size;
825
826 /*
827 * and map the buffer
828 */
829 if (wpipe->pipe_map.kva == 0) {
830 /*
831 * We need to allocate space for an extra page because the
832 * address range might (will) span pages at times.
833 */
834 wpipe->pipe_map.kva = kmem_alloc_pageable(kernel_map,
835 wpipe->pipe_buffer.size + PAGE_SIZE);
836 amountpipekva += wpipe->pipe_buffer.size + PAGE_SIZE;
837 }
838 pmap_qenter(wpipe->pipe_map.kva, wpipe->pipe_map.ms,
839 wpipe->pipe_map.npages);
840
841 /*
842 * and update the uio data
843 */
844
845 uio->uio_iov->iov_len -= size;
846 uio->uio_iov->iov_base += size;
847 if (uio->uio_iov->iov_len == 0)
848 uio->uio_iov++;
849 uio->uio_resid -= size;
850 uio->uio_offset += size;
851 return (0);
852 }
853
854 /*
855 * unmap and unwire the process buffer
856 */
857 static void
858 pipe_destroy_write_buffer(wpipe)
859 struct pipe *wpipe;
860 {
861 int i;
862
863 GIANT_REQUIRED;
864 PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED);
865
866 if (wpipe->pipe_map.kva) {
867 pmap_qremove(wpipe->pipe_map.kva, wpipe->pipe_map.npages);
868
869 if (amountpipekva > maxpipekva) {
870 vm_offset_t kva = wpipe->pipe_map.kva;
871 wpipe->pipe_map.kva = 0;
872 kmem_free(kernel_map, kva,
873 wpipe->pipe_buffer.size + PAGE_SIZE);
874 amountpipekva -= wpipe->pipe_buffer.size + PAGE_SIZE;
875 }
876 }
877 for (i = 0; i < wpipe->pipe_map.npages; i++)
878 vm_page_unwire(wpipe->pipe_map.ms[i], 1);
879 wpipe->pipe_map.npages = 0;
880 }
881
882 /*
883 * In the case of a signal, the writing process might go away. This
884 * code copies the data into the circular buffer so that the source
885 * pages can be freed without loss of data.
886 */
887 static void
888 pipe_clone_write_buffer(wpipe)
889 struct pipe *wpipe;
890 {
891 int size;
892 int pos;
893
894 PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
895 size = wpipe->pipe_map.cnt;
896 pos = wpipe->pipe_map.pos;
897 memcpy((caddr_t) wpipe->pipe_buffer.buffer,
898 (caddr_t) wpipe->pipe_map.kva + pos, size);
899
900 wpipe->pipe_buffer.in = size;
901 wpipe->pipe_buffer.out = 0;
902 wpipe->pipe_buffer.cnt = size;
903 wpipe->pipe_state &= ~PIPE_DIRECTW;
904
905 PIPE_GET_GIANT(wpipe);
906 pipe_destroy_write_buffer(wpipe);
907 PIPE_DROP_GIANT(wpipe);
908 }
909
910 /*
911 * This implements the pipe buffer write mechanism. Note that only
912 * a direct write OR a normal pipe write can be pending at any given time.
913 * If there are any characters in the pipe buffer, the direct write will
914 * be deferred until the receiving process grabs all of the bytes from
915 * the pipe buffer. Then the direct mapping write is set-up.
916 */
917 static int
918 pipe_direct_write(wpipe, uio)
919 struct pipe *wpipe;
920 struct uio *uio;
921 {
922 int error;
923
924 retry:
925 PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
926 while (wpipe->pipe_state & PIPE_DIRECTW) {
927 if (wpipe->pipe_state & PIPE_WANTR) {
928 wpipe->pipe_state &= ~PIPE_WANTR;
929 wakeup(wpipe);
930 }
931 wpipe->pipe_state |= PIPE_WANTW;
932 error = msleep(wpipe, PIPE_MTX(wpipe),
933 PRIBIO | PCATCH, "pipdww", 0);
934 if (error)
935 goto error1;
936 if (wpipe->pipe_state & PIPE_EOF) {
937 error = EPIPE;
938 goto error1;
939 }
940 }
941 wpipe->pipe_map.cnt = 0; /* transfer not ready yet */
942 if (wpipe->pipe_buffer.cnt > 0) {
943 if (wpipe->pipe_state & PIPE_WANTR) {
944 wpipe->pipe_state &= ~PIPE_WANTR;
945 wakeup(wpipe);
946 }
947
948 wpipe->pipe_state |= PIPE_WANTW;
949 error = msleep(wpipe, PIPE_MTX(wpipe),
950 PRIBIO | PCATCH, "pipdwc", 0);
951 if (error)
952 goto error1;
953 if (wpipe->pipe_state & PIPE_EOF) {
954 error = EPIPE;
955 goto error1;
956 }
957 goto retry;
958 }
959
960 wpipe->pipe_state |= PIPE_DIRECTW;
961
962 PIPE_GET_GIANT(wpipe);
963 error = pipe_build_write_buffer(wpipe, uio);
964 PIPE_DROP_GIANT(wpipe);
965 if (error) {
966 wpipe->pipe_state &= ~PIPE_DIRECTW;
967 goto error1;
968 }
969
970 error = 0;
971 while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) {
972 if (wpipe->pipe_state & PIPE_EOF) {
973 pipelock(wpipe, 0);
974 PIPE_GET_GIANT(wpipe);
975 pipe_destroy_write_buffer(wpipe);
976 PIPE_DROP_GIANT(wpipe);
977 pipeunlock(wpipe);
978 pipeselwakeup(wpipe, wpipe);
979 error = EPIPE;
980 goto error1;
981 }
982 if (wpipe->pipe_state & PIPE_WANTR) {
983 wpipe->pipe_state &= ~PIPE_WANTR;
984 wakeup(wpipe);
985 }
986 pipeselwakeup(wpipe, wpipe);
987 error = msleep(wpipe, PIPE_MTX(wpipe), PRIBIO | PCATCH,
988 "pipdwt", 0);
989 }
990
991 pipelock(wpipe,0);
992 if (wpipe->pipe_state & PIPE_DIRECTW) {
993 /*
994 * this bit of trickery substitutes a kernel buffer for
995 * the process that might be going away.
996 */
997 pipe_clone_write_buffer(wpipe);
998 } else {
999 PIPE_GET_GIANT(wpipe);
1000 pipe_destroy_write_buffer(wpipe);
1001 PIPE_DROP_GIANT(wpipe);
1002 }
1003 pipeunlock(wpipe);
1004 return (error);
1005
1006 error1:
1007 wakeup(wpipe);
1008 return (error);
1009 }
1010 #endif /* !PIPE_NODIRECT */
1011 #endif /* FreeBSD */
1012
1013 #ifdef __NetBSD__
1014 #ifndef PIPE_NODIRECT
1015 /*
1016 * Allocate structure for loan transfer.
1017 */
1018 static int
1019 pipe_loan_alloc(wpipe, npages)
1020 struct pipe *wpipe;
1021 int npages;
1022 {
1023 vsize_t len;
1024
1025 len = (vsize_t)npages << PAGE_SHIFT;
1026 wpipe->pipe_map.kva = uvm_km_valloc_wait(kernel_map, len);
1027 if (wpipe->pipe_map.kva == 0)
1028 return (ENOMEM);
1029
1030 amountpipekva += len;
1031 wpipe->pipe_map.npages = npages;
1032 wpipe->pipe_map.pgs = malloc(npages * sizeof(struct vm_page *), M_PIPE,
1033 M_WAITOK);
1034 return (0);
1035 }
1036
1037 /*
1038 * Free resources allocated for loan transfer.
1039 */
1040 static void
1041 pipe_loan_free(wpipe)
1042 struct pipe *wpipe;
1043 {
1044 vsize_t len;
1045
1046 len = (vsize_t)wpipe->pipe_map.npages << PAGE_SHIFT;
1047 uvm_km_free(kernel_map, wpipe->pipe_map.kva, len);
1048 wpipe->pipe_map.kva = 0;
1049 amountpipekva -= len;
1050 free(wpipe->pipe_map.pgs, M_PIPE);
1051 wpipe->pipe_map.pgs = NULL;
1052 }
1053
1054 /*
1055 * NetBSD direct write, using uvm_loan() mechanism.
1056 * This implements the pipe buffer write mechanism. Note that only
1057 * a direct write OR a normal pipe write can be pending at any given time.
1058 * If there are any characters in the pipe buffer, the direct write will
1059 * be deferred until the receiving process grabs all of the bytes from
1060 * the pipe buffer. Then the direct mapping write is set-up.
1061 */
1062 static int
1063 pipe_direct_write(wpipe, uio)
1064 struct pipe *wpipe;
1065 struct uio *uio;
1066 {
1067 int error, npages, j;
1068 struct vm_page **pgs;
1069 vaddr_t bbase, kva, base, bend;
1070 vsize_t blen, bcnt;
1071 voff_t bpos;
1072
1073 retry:
1074 while (wpipe->pipe_state & PIPE_DIRECTW) {
1075 if (wpipe->pipe_state & PIPE_WANTR) {
1076 wpipe->pipe_state &= ~PIPE_WANTR;
1077 wakeup(wpipe);
1078 }
1079 wpipe->pipe_state |= PIPE_WANTW;
1080 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdww", 0);
1081 if (error)
1082 goto error;
1083 if (wpipe->pipe_state & PIPE_EOF) {
1084 error = EPIPE;
1085 goto error;
1086 }
1087 }
1088 wpipe->pipe_map.cnt = 0; /* transfer not ready yet */
1089 if (wpipe->pipe_buffer.cnt > 0) {
1090 if (wpipe->pipe_state & PIPE_WANTR) {
1091 wpipe->pipe_state &= ~PIPE_WANTR;
1092 wakeup(wpipe);
1093 }
1094
1095 wpipe->pipe_state |= PIPE_WANTW;
1096 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdwc", 0);
1097 if (error)
1098 goto error;
1099 if (wpipe->pipe_state & PIPE_EOF) {
1100 error = EPIPE;
1101 goto error;
1102 }
1103 goto retry;
1104 }
1105
1106 /*
1107 * Handle first PIPE_CHUNK_SIZE bytes of buffer. Deal with buffers
1108 * not aligned to PAGE_SIZE.
1109 */
1110 bbase = (vaddr_t)uio->uio_iov->iov_base;
1111 base = trunc_page(bbase);
1112 bend = round_page(bbase + uio->uio_iov->iov_len);
1113 blen = bend - base;
1114 bpos = bbase - base;
1115
1116 if (blen > PIPE_DIRECT_CHUNK) {
1117 blen = PIPE_DIRECT_CHUNK;
1118 bend = base + blen;
1119 bcnt = PIPE_DIRECT_CHUNK - bpos;
1120 } else {
1121 bcnt = uio->uio_iov->iov_len;
1122 }
1123 npages = blen >> PAGE_SHIFT;
1124
1125 wpipe->pipe_map.pos = bpos;
1126 wpipe->pipe_map.cnt = bcnt;
1127
1128 /*
1129 * Free the old kva if we need more pages than we have
1130 * allocated.
1131 */
1132 if (wpipe->pipe_map.kva && npages > wpipe->pipe_map.npages)
1133 pipe_loan_free(wpipe);
1134
1135 /* Allocate new kva. */
1136 if (wpipe->pipe_map.kva == 0) {
1137 error = pipe_loan_alloc(wpipe, npages);
1138 if (error) {
1139 goto error;
1140 }
1141 }
1142
1143 /* Loan the write buffer memory from writer process */
1144 pgs = wpipe->pipe_map.pgs;
1145 error = uvm_loan(&uio->uio_procp->p_vmspace->vm_map, base, blen,
1146 pgs, UVM_LOAN_TOPAGE);
1147 if (error) {
1148 pgs = NULL;
1149 goto cleanup;
1150 }
1151
1152 /* Enter the loaned pages to kva */
1153 kva = wpipe->pipe_map.kva;
1154 for (j = 0; j < npages; j++, kva += PAGE_SIZE) {
1155 pmap_kenter_pa(kva, VM_PAGE_TO_PHYS(pgs[j]), VM_PROT_READ);
1156 }
1157 pmap_update(pmap_kernel());
1158
1159 wpipe->pipe_state |= PIPE_DIRECTW;
1160 while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) {
1161 if (wpipe->pipe_state & PIPE_EOF) {
1162 error = EPIPE;
1163 break;
1164 }
1165 if (wpipe->pipe_state & PIPE_WANTR) {
1166 wpipe->pipe_state &= ~PIPE_WANTR;
1167 wakeup(wpipe);
1168 }
1169 pipeselwakeup(wpipe, wpipe);
1170 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdwt", 0);
1171 }
1172
1173 if (error)
1174 wpipe->pipe_state &= ~PIPE_DIRECTW;
1175
1176 cleanup:
1177 pipelock(wpipe, 0);
1178 if (pgs != NULL) {
1179 pmap_kremove(wpipe->pipe_map.kva, blen);
1180 uvm_unloan(pgs, npages, UVM_LOAN_TOPAGE);
1181 }
1182 if (error || amountpipekva > maxpipekva)
1183 pipe_loan_free(wpipe);
1184 pipeunlock(wpipe);
1185
1186 if (error) {
1187 pipeselwakeup(wpipe, wpipe);
1188
1189 /*
1190 * If nothing was read from what we offered, return error
1191 * straight on. Otherwise update uio resid first. Caller
1192 * will deal with the error condition, returning short
1193 * write, error, or restarting the write(2) as appropriate.
1194 */
1195 if (wpipe->pipe_map.cnt == bcnt) {
1196 error:
1197 wakeup(wpipe);
1198 return (error);
1199 }
1200
1201 bcnt -= wpipe->pipe_map.cnt;
1202 }
1203
1204 uio->uio_resid -= bcnt;
1205 /* uio_offset not updated, not set/used for write(2) */
1206 uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + bcnt;
1207 uio->uio_iov->iov_len -= bcnt;
1208 if (uio->uio_iov->iov_len == 0) {
1209 uio->uio_iov++;
1210 uio->uio_iovcnt--;
1211 }
1212
1213 return (error);
1214 }
1215 #endif /* !PIPE_NODIRECT */
1216 #endif /* NetBSD */
1217
1218 #ifdef __FreeBSD__
1219 static int
1220 pipe_write(fp, uio, cred, flags, td)
1221 struct file *fp;
1222 off_t *offset;
1223 struct uio *uio;
1224 struct ucred *cred;
1225 int flags;
1226 struct thread *td;
1227 #elif defined(__NetBSD__)
1228 static int
1229 pipe_write(fp, offset, uio, cred, flags)
1230 struct file *fp;
1231 off_t *offset;
1232 struct uio *uio;
1233 struct ucred *cred;
1234 int flags;
1235 #endif
1236 {
1237 int error = 0;
1238 struct pipe *wpipe, *rpipe;
1239
1240 rpipe = (struct pipe *) fp->f_data;
1241 wpipe = rpipe->pipe_peer;
1242
1243 PIPE_LOCK(rpipe);
1244 /*
1245 * detect loss of pipe read side, issue SIGPIPE if lost.
1246 */
1247 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1248 PIPE_UNLOCK(rpipe);
1249 return (EPIPE);
1250 }
1251
1252 ++wpipe->pipe_busy;
1253
1254 /*
1255 * If it is advantageous to resize the pipe buffer, do
1256 * so.
1257 */
1258 if ((uio->uio_resid > PIPE_SIZE) &&
1259 (nbigpipe < maxbigpipes) &&
1260 #ifndef PIPE_NODIRECT
1261 (wpipe->pipe_state & PIPE_DIRECTW) == 0 &&
1262 #endif
1263 (wpipe->pipe_buffer.size <= PIPE_SIZE) &&
1264 (wpipe->pipe_buffer.cnt == 0)) {
1265
1266 if ((error = pipelock(wpipe,1)) == 0) {
1267 PIPE_GET_GIANT(rpipe);
1268 if (pipespace(wpipe, BIG_PIPE_SIZE) == 0)
1269 nbigpipe++;
1270 PIPE_DROP_GIANT(rpipe);
1271 pipeunlock(wpipe);
1272 } else {
1273 /*
1274 * If an error occurred, unbusy and return, waking up
1275 * any waiting readers.
1276 */
1277 --wpipe->pipe_busy;
1278 if (wpipe->pipe_busy == 0
1279 && (wpipe->pipe_state & PIPE_WANTCLOSE)) {
1280 wpipe->pipe_state &=
1281 ~(PIPE_WANTCLOSE | PIPE_WANTR);
1282 wakeup(wpipe);
1283 }
1284
1285 return (error);
1286 }
1287 }
1288
1289 #ifdef __FreeBSD__
1290 /*
1291 * If an early error occured unbusy and return, waking up any pending
1292 * readers.
1293 */
1294 if (error) {
1295 --wpipe->pipe_busy;
1296 if ((wpipe->pipe_busy == 0) &&
1297 (wpipe->pipe_state & PIPE_WANT)) {
1298 wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR);
1299 wakeup(wpipe);
1300 }
1301 PIPE_UNLOCK(rpipe);
1302 return(error);
1303 }
1304
1305 KASSERT(wpipe->pipe_buffer.buffer != NULL, ("pipe buffer gone"));
1306 #endif
1307
1308 while (uio->uio_resid) {
1309 size_t space;
1310
1311 #ifndef PIPE_NODIRECT
1312 /*
1313 * If the transfer is large, we can gain performance if
1314 * we do process-to-process copies directly.
1315 * If the write is non-blocking, we don't use the
1316 * direct write mechanism.
1317 *
1318 * The direct write mechanism will detect the reader going
1319 * away on us.
1320 */
1321 if ((uio->uio_iov->iov_len >= PIPE_MINDIRECT) &&
1322 (fp->f_flag & FNONBLOCK) == 0 &&
1323 (wpipe->pipe_map.kva || (amountpipekva < limitpipekva))) {
1324 error = pipe_direct_write(wpipe, uio);
1325
1326 /*
1327 * Break out if error occured, unless it's ENOMEM.
1328 * ENOMEM means we failed to allocate some resources
1329 * for direct write, so we just fallback to ordinary
1330 * write. If the direct write was successful,
1331 * process rest of data via ordinary write.
1332 */
1333 if (!error)
1334 continue;
1335
1336 if (error != ENOMEM)
1337 break;
1338 }
1339 #endif /* PIPE_NODIRECT */
1340
1341 /*
1342 * Pipe buffered writes cannot be coincidental with
1343 * direct writes. We wait until the currently executing
1344 * direct write is completed before we start filling the
1345 * pipe buffer. We break out if a signal occurs or the
1346 * reader goes away.
1347 */
1348 retrywrite:
1349 while (wpipe->pipe_state & PIPE_DIRECTW) {
1350 if (wpipe->pipe_state & PIPE_WANTR) {
1351 wpipe->pipe_state &= ~PIPE_WANTR;
1352 wakeup(wpipe);
1353 }
1354 #ifdef __FreeBSD__
1355 error = msleep(wpipe, PIPE_MTX(rpipe), PRIBIO | PCATCH,
1356 "pipbww", 0);
1357 #else
1358 error = tsleep(wpipe, PRIBIO | PCATCH, "pipbww", 0);
1359 #endif
1360 if (wpipe->pipe_state & PIPE_EOF)
1361 break;
1362 if (error)
1363 break;
1364 }
1365 if (wpipe->pipe_state & PIPE_EOF) {
1366 error = EPIPE;
1367 break;
1368 }
1369
1370 space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
1371
1372 /* Writes of size <= PIPE_BUF must be atomic. */
1373 if ((space < uio->uio_resid) && (uio->uio_resid <= PIPE_BUF))
1374 space = 0;
1375
1376 if (space > 0) {
1377 int size; /* Transfer size */
1378 int segsize; /* first segment to transfer */
1379
1380 if ((error = pipelock(wpipe,1)) != 0)
1381 break;
1382
1383 /*
1384 * It is possible for a direct write to
1385 * slip in on us... handle it here...
1386 */
1387 if (wpipe->pipe_state & PIPE_DIRECTW) {
1388 pipeunlock(wpipe);
1389 goto retrywrite;
1390 }
1391 /*
1392 * If a process blocked in uiomove, our
1393 * value for space might be bad.
1394 *
1395 * XXX will we be ok if the reader has gone
1396 * away here?
1397 */
1398 if (space > wpipe->pipe_buffer.size -
1399 wpipe->pipe_buffer.cnt) {
1400 pipeunlock(wpipe);
1401 goto retrywrite;
1402 }
1403
1404 /*
1405 * Transfer size is minimum of uio transfer
1406 * and free space in pipe buffer.
1407 */
1408 if (space > uio->uio_resid)
1409 size = uio->uio_resid;
1410 else
1411 size = space;
1412 /*
1413 * First segment to transfer is minimum of
1414 * transfer size and contiguous space in
1415 * pipe buffer. If first segment to transfer
1416 * is less than the transfer size, we've got
1417 * a wraparound in the buffer.
1418 */
1419 segsize = wpipe->pipe_buffer.size -
1420 wpipe->pipe_buffer.in;
1421 if (segsize > size)
1422 segsize = size;
1423
1424 /* Transfer first segment */
1425
1426 PIPE_UNLOCK(rpipe);
1427 error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in],
1428 segsize, uio);
1429 PIPE_LOCK(rpipe);
1430
1431 if (error == 0 && segsize < size) {
1432 /*
1433 * Transfer remaining part now, to
1434 * support atomic writes. Wraparound
1435 * happened.
1436 */
1437 #ifdef DEBUG
1438 if (wpipe->pipe_buffer.in + segsize !=
1439 wpipe->pipe_buffer.size)
1440 panic("Expected pipe buffer wraparound disappeared");
1441 #endif
1442
1443 PIPE_UNLOCK(rpipe);
1444 error = uiomove(&wpipe->pipe_buffer.buffer[0],
1445 size - segsize, uio);
1446 PIPE_LOCK(rpipe);
1447 }
1448 if (error == 0) {
1449 wpipe->pipe_buffer.in += size;
1450 if (wpipe->pipe_buffer.in >=
1451 wpipe->pipe_buffer.size) {
1452 #ifdef DEBUG
1453 if (wpipe->pipe_buffer.in != size - segsize + wpipe->pipe_buffer.size)
1454 panic("Expected wraparound bad");
1455 #endif
1456 wpipe->pipe_buffer.in = size - segsize;
1457 }
1458
1459 wpipe->pipe_buffer.cnt += size;
1460 #ifdef DEBUG
1461 if (wpipe->pipe_buffer.cnt > wpipe->pipe_buffer.size)
1462 panic("Pipe buffer overflow");
1463 #endif
1464 }
1465 pipeunlock(wpipe);
1466 if (error)
1467 break;
1468 } else {
1469 /*
1470 * If the "read-side" has been blocked, wake it up now.
1471 */
1472 if (wpipe->pipe_state & PIPE_WANTR) {
1473 wpipe->pipe_state &= ~PIPE_WANTR;
1474 wakeup(wpipe);
1475 }
1476
1477 /*
1478 * don't block on non-blocking I/O
1479 */
1480 if (fp->f_flag & FNONBLOCK) {
1481 error = EAGAIN;
1482 break;
1483 }
1484
1485 /*
1486 * We have no more space and have something to offer,
1487 * wake up select/poll.
1488 */
1489 pipeselwakeup(wpipe, wpipe);
1490
1491 wpipe->pipe_state |= PIPE_WANTW;
1492 #ifdef __FreeBSD__
1493 error = msleep(wpipe, PIPE_MTX(rpipe),
1494 PRIBIO | PCATCH, "pipewr", 0);
1495 #else
1496 error = tsleep(wpipe, PRIBIO | PCATCH, "pipewr", 0);
1497 #endif
1498 if (error != 0)
1499 break;
1500 /*
1501 * If read side wants to go away, we just issue a signal
1502 * to ourselves.
1503 */
1504 if (wpipe->pipe_state & PIPE_EOF) {
1505 error = EPIPE;
1506 break;
1507 }
1508 }
1509 }
1510
1511 --wpipe->pipe_busy;
1512 if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANTCLOSE)) {
1513 wpipe->pipe_state &= ~(PIPE_WANTCLOSE | PIPE_WANTR);
1514 wakeup(wpipe);
1515 } else if (wpipe->pipe_buffer.cnt > 0) {
1516 /*
1517 * If we have put any characters in the buffer, we wake up
1518 * the reader.
1519 */
1520 if (wpipe->pipe_state & PIPE_WANTR) {
1521 wpipe->pipe_state &= ~PIPE_WANTR;
1522 wakeup(wpipe);
1523 }
1524 }
1525
1526 /*
1527 * Don't return EPIPE if I/O was successful
1528 */
1529 if ((error == EPIPE) && (wpipe->pipe_buffer.cnt == 0)
1530 && (uio->uio_resid == 0))
1531 error = 0;
1532
1533 if (error == 0)
1534 vfs_timestamp(&wpipe->pipe_mtime);
1535
1536 /*
1537 * We have something to offer, wake up select/poll.
1538 * wpipe->pipe_map.cnt is always 0 in this point (direct write
1539 * is only done synchronously), so check only wpipe->pipe_buffer.cnt
1540 */
1541 if (wpipe->pipe_buffer.cnt)
1542 pipeselwakeup(wpipe, wpipe);
1543
1544 /*
1545 * Arrange for next read(2) to do a signal.
1546 */
1547 wpipe->pipe_state |= PIPE_SIGNALR;
1548
1549 PIPE_UNLOCK(rpipe);
1550 return (error);
1551 }
1552
1553 /*
1554 * we implement a very minimal set of ioctls for compatibility with sockets.
1555 */
1556 int
1557 #ifdef __FreeBSD__
1558 pipe_ioctl(fp, cmd, data, td)
1559 struct file *fp;
1560 u_long cmd;
1561 caddr_t data;
1562 struct thread *td;
1563 #else
1564 pipe_ioctl(fp, cmd, data, p)
1565 struct file *fp;
1566 u_long cmd;
1567 caddr_t data;
1568 struct proc *p;
1569 #endif
1570 {
1571 struct pipe *mpipe = (struct pipe *)fp->f_data;
1572
1573 switch (cmd) {
1574
1575 case FIONBIO:
1576 return (0);
1577
1578 case FIOASYNC:
1579 PIPE_LOCK(mpipe);
1580 if (*(int *)data) {
1581 mpipe->pipe_state |= PIPE_ASYNC;
1582 } else {
1583 mpipe->pipe_state &= ~PIPE_ASYNC;
1584 }
1585 PIPE_UNLOCK(mpipe);
1586 return (0);
1587
1588 case FIONREAD:
1589 PIPE_LOCK(mpipe);
1590 #ifndef PIPE_NODIRECT
1591 if (mpipe->pipe_state & PIPE_DIRECTW)
1592 *(int *)data = mpipe->pipe_map.cnt;
1593 else
1594 #endif
1595 *(int *)data = mpipe->pipe_buffer.cnt;
1596 PIPE_UNLOCK(mpipe);
1597 return (0);
1598
1599 #ifdef __FreeBSD__
1600 case FIOSETOWN:
1601 return (fsetown(*(int *)data, &mpipe->pipe_sigio));
1602
1603 case FIOGETOWN:
1604 *(int *)data = fgetown(mpipe->pipe_sigio);
1605 return (0);
1606
1607 /* This is deprecated, FIOSETOWN should be used instead. */
1608 case TIOCSPGRP:
1609 return (fsetown(-(*(int *)data), &mpipe->pipe_sigio));
1610
1611 /* This is deprecated, FIOGETOWN should be used instead. */
1612 case TIOCGPGRP:
1613 *(int *)data = -fgetown(mpipe->pipe_sigio);
1614 return (0);
1615 #endif /* FreeBSD */
1616 #ifdef __NetBSD__
1617 case TIOCSPGRP:
1618 mpipe->pipe_pgid = *(int *)data;
1619 return (0);
1620
1621 case TIOCGPGRP:
1622 *(int *)data = mpipe->pipe_pgid;
1623 return (0);
1624 #endif /* NetBSD */
1625
1626 }
1627 return (EPASSTHROUGH);
1628 }
1629
1630 int
1631 #ifdef __FreeBSD__
1632 pipe_poll(fp, events, cred, td)
1633 struct file *fp;
1634 int events;
1635 struct ucred *cred;
1636 struct thread *td;
1637 #elif defined(__NetBSD__)
1638 pipe_poll(fp, events, td)
1639 struct file *fp;
1640 int events;
1641 struct proc *td;
1642 #endif
1643 {
1644 struct pipe *rpipe = (struct pipe *)fp->f_data;
1645 struct pipe *wpipe;
1646 int revents = 0;
1647
1648 wpipe = rpipe->pipe_peer;
1649 PIPE_LOCK(rpipe);
1650 if (events & (POLLIN | POLLRDNORM))
1651 if ((rpipe->pipe_buffer.cnt > 0) ||
1652 #ifndef PIPE_NODIRECT
1653 (rpipe->pipe_state & PIPE_DIRECTW) ||
1654 #endif
1655 (rpipe->pipe_state & PIPE_EOF))
1656 revents |= events & (POLLIN | POLLRDNORM);
1657
1658 if (events & (POLLOUT | POLLWRNORM))
1659 if (wpipe == NULL || (wpipe->pipe_state & PIPE_EOF)
1660 || (
1661 #ifndef PIPE_NODIRECT
1662 ((wpipe->pipe_state & PIPE_DIRECTW) == 0) &&
1663 #endif
1664 (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF))
1665 revents |= events & (POLLOUT | POLLWRNORM);
1666
1667 if ((rpipe->pipe_state & PIPE_EOF) ||
1668 (wpipe == NULL) ||
1669 (wpipe->pipe_state & PIPE_EOF))
1670 revents |= POLLHUP;
1671
1672 if (revents == 0) {
1673 if (events & (POLLIN | POLLRDNORM)) {
1674 selrecord(td, &rpipe->pipe_sel);
1675 rpipe->pipe_state |= PIPE_SEL;
1676 }
1677
1678 if (events & (POLLOUT | POLLWRNORM)) {
1679 selrecord(td, &wpipe->pipe_sel);
1680 wpipe->pipe_state |= PIPE_SEL;
1681 }
1682 }
1683 PIPE_UNLOCK(rpipe);
1684
1685 return (revents);
1686 }
1687
1688 static int
1689 #ifdef __FreeBSD__
1690 pipe_stat(fp, ub, td)
1691 struct file *fp;
1692 struct stat *ub;
1693 struct thread *td;
1694 #else
1695 pipe_stat(fp, ub, td)
1696 struct file *fp;
1697 struct stat *ub;
1698 struct proc *td;
1699 #endif
1700 {
1701 struct pipe *pipe = (struct pipe *)fp->f_data;
1702
1703 memset((caddr_t)ub, 0, sizeof(*ub));
1704 ub->st_mode = S_IFIFO | S_IRUSR | S_IWUSR;
1705 ub->st_blksize = pipe->pipe_buffer.size;
1706 ub->st_size = pipe->pipe_buffer.cnt;
1707 ub->st_blocks = (ub->st_size) ? 1 : 0;
1708 #ifdef __FreeBSD__
1709 ub->st_atimespec = pipe->pipe_atime;
1710 ub->st_mtimespec = pipe->pipe_mtime;
1711 ub->st_ctimespec = pipe->pipe_ctime;
1712 #endif /* FreeBSD */
1713 #ifdef __NetBSD__
1714 TIMEVAL_TO_TIMESPEC(&pipe->pipe_atime, &ub->st_atimespec)
1715 TIMEVAL_TO_TIMESPEC(&pipe->pipe_mtime, &ub->st_mtimespec);
1716 TIMEVAL_TO_TIMESPEC(&pipe->pipe_ctime, &ub->st_ctimespec);
1717 #endif /* NetBSD */
1718 ub->st_uid = fp->f_cred->cr_uid;
1719 ub->st_gid = fp->f_cred->cr_gid;
1720 /*
1721 * Left as 0: st_dev, st_ino, st_nlink, st_rdev, st_flags, st_gen.
1722 * XXX (st_dev, st_ino) should be unique.
1723 */
1724 return (0);
1725 }
1726
1727 /* ARGSUSED */
1728 static int
1729 #ifdef __FreeBSD__
1730 pipe_close(fp, td)
1731 struct file *fp;
1732 struct thread *td;
1733 #else
1734 pipe_close(fp, td)
1735 struct file *fp;
1736 struct proc *td;
1737 #endif
1738 {
1739 struct pipe *cpipe = (struct pipe *)fp->f_data;
1740
1741 #ifdef __FreeBSD__
1742 fp->f_ops = &badfileops;
1743 funsetown(cpipe->pipe_sigio);
1744 #endif
1745 fp->f_data = NULL;
1746 pipeclose(cpipe);
1747 return (0);
1748 }
1749
1750 static void
1751 pipe_free_kmem(cpipe)
1752 struct pipe *cpipe;
1753 {
1754
1755 #ifdef __FreeBSD__
1756
1757 GIANT_REQUIRED;
1758 KASSERT(cpipe->pipe_mtxp == NULL || !mtx_owned(PIPE_MTX(cpipe)),
1759 ("pipespace: pipe mutex locked"));
1760 #endif
1761
1762 if (cpipe->pipe_buffer.buffer != NULL) {
1763 if (cpipe->pipe_buffer.size > PIPE_SIZE)
1764 --nbigpipe;
1765 amountpipekva -= cpipe->pipe_buffer.size;
1766 #ifdef __FreeBSD__
1767 kmem_free(kernel_map,
1768 (vm_offset_t)cpipe->pipe_buffer.buffer,
1769 cpipe->pipe_buffer.size);
1770 #elif defined(__NetBSD__)
1771 uvm_km_free(kernel_map,
1772 (vaddr_t)cpipe->pipe_buffer.buffer,
1773 cpipe->pipe_buffer.size);
1774 #endif /* NetBSD */
1775 cpipe->pipe_buffer.buffer = NULL;
1776 }
1777 #ifndef PIPE_NODIRECT
1778 if (cpipe->pipe_map.kva != 0) {
1779 #ifdef __FreeBSD__
1780 amountpipekva -= cpipe->pipe_buffer.size + PAGE_SIZE;
1781 kmem_free(kernel_map,
1782 cpipe->pipe_map.kva,
1783 cpipe->pipe_buffer.size + PAGE_SIZE);
1784 #elif defined(__NetBSD__)
1785 pipe_loan_free(cpipe);
1786 #endif /* NetBSD */
1787 cpipe->pipe_map.cnt = 0;
1788 cpipe->pipe_map.kva = 0;
1789 cpipe->pipe_map.pos = 0;
1790 cpipe->pipe_map.npages = 0;
1791 }
1792 #endif /* !PIPE_NODIRECT */
1793 }
1794
1795 /*
1796 * shutdown the pipe
1797 */
1798 static void
1799 pipeclose(cpipe)
1800 struct pipe *cpipe;
1801 {
1802 struct pipe *ppipe;
1803 #ifdef __FreeBSD__
1804 int hadpeer = 0;
1805 #endif
1806
1807 if (cpipe == NULL)
1808 return;
1809
1810 /* partially created pipes won't have a valid mutex. */
1811 if (PIPE_MTX(cpipe) != NULL)
1812 PIPE_LOCK(cpipe);
1813
1814 pipeselwakeup(cpipe, cpipe);
1815
1816 /*
1817 * If the other side is blocked, wake it up saying that
1818 * we want to close it down.
1819 */
1820 while (cpipe->pipe_busy) {
1821 wakeup(cpipe);
1822 cpipe->pipe_state |= PIPE_WANTCLOSE | PIPE_EOF;
1823 #ifdef __FreeBSD__
1824 msleep(cpipe, PIPE_MTX(cpipe), PRIBIO, "pipecl", 0);
1825 #else
1826 tsleep(cpipe, PRIBIO, "pipecl", 0);
1827 #endif
1828 }
1829
1830 /*
1831 * Disconnect from peer
1832 */
1833 if ((ppipe = cpipe->pipe_peer) != NULL) {
1834 #ifdef __FreeBSD__
1835 hadpeer++;
1836 #endif
1837 pipeselwakeup(ppipe, ppipe);
1838
1839 ppipe->pipe_state |= PIPE_EOF;
1840 wakeup(ppipe);
1841 #ifdef __FreeBSD__
1842 KNOTE(&ppipe->pipe_sel.sel_note, 0);
1843 #endif
1844 ppipe->pipe_peer = NULL;
1845 }
1846 /*
1847 * free resources
1848 */
1849 #ifdef __FreeBSD__
1850 if (PIPE_MTX(cpipe) != NULL) {
1851 PIPE_UNLOCK(cpipe);
1852 if (!hadpeer) {
1853 mtx_destroy(PIPE_MTX(cpipe));
1854 free(PIPE_MTX(cpipe), M_TEMP);
1855 }
1856 }
1857 mtx_lock(&Giant);
1858 pipe_free_kmem(cpipe);
1859 zfree(pipe_zone, cpipe);
1860 mtx_unlock(&Giant);
1861 #endif
1862
1863 #ifdef __NetBSD__
1864 if (PIPE_MTX(cpipe) != NULL)
1865 PIPE_UNLOCK(cpipe);
1866
1867 pipe_free_kmem(cpipe);
1868 (void) lockmgr(&cpipe->pipe_lock, LK_DRAIN, NULL);
1869 pool_put(&pipe_pool, cpipe);
1870 #endif
1871 }
1872
1873 static void
1874 filt_pipedetach(struct knote *kn)
1875 {
1876 struct pipe *cpipe = (struct pipe *)kn->kn_fp->f_data;
1877
1878 switch(kn->kn_filter) {
1879 case EVFILT_WRITE:
1880 /* need the peer structure, not our own */
1881 cpipe = cpipe->pipe_peer;
1882
1883 /* if reader end already closed, just return */
1884 if (!cpipe)
1885 return;
1886
1887 break;
1888 default:
1889 /* nothing to do */
1890 break;
1891 }
1892
1893 #ifdef DIAGNOSTIC
1894 if (kn->kn_hook != cpipe)
1895 panic("filt_pipedetach: inconsistent knote");
1896 #endif
1897
1898 PIPE_LOCK(cpipe);
1899 SLIST_REMOVE(&cpipe->pipe_sel.sel_note, kn, knote, kn_selnext);
1900 PIPE_UNLOCK(cpipe);
1901 }
1902
1903 /*ARGSUSED*/
1904 static int
1905 filt_piperead(struct knote *kn, long hint)
1906 {
1907 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1908 struct pipe *wpipe = rpipe->pipe_peer;
1909
1910 PIPE_LOCK(rpipe);
1911 kn->kn_data = rpipe->pipe_buffer.cnt;
1912 if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW))
1913 kn->kn_data = rpipe->pipe_map.cnt;
1914
1915 if ((rpipe->pipe_state & PIPE_EOF) ||
1916 (wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1917 kn->kn_flags |= EV_EOF;
1918 PIPE_UNLOCK(rpipe);
1919 return (1);
1920 }
1921 PIPE_UNLOCK(rpipe);
1922 return (kn->kn_data > 0);
1923 }
1924
1925 /*ARGSUSED*/
1926 static int
1927 filt_pipewrite(struct knote *kn, long hint)
1928 {
1929 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1930 struct pipe *wpipe = rpipe->pipe_peer;
1931
1932 PIPE_LOCK(rpipe);
1933 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1934 kn->kn_data = 0;
1935 kn->kn_flags |= EV_EOF;
1936 PIPE_UNLOCK(rpipe);
1937 return (1);
1938 }
1939 kn->kn_data = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
1940 if (wpipe->pipe_state & PIPE_DIRECTW)
1941 kn->kn_data = 0;
1942
1943 PIPE_UNLOCK(rpipe);
1944 return (kn->kn_data >= PIPE_BUF);
1945 }
1946
1947 static const struct filterops pipe_rfiltops =
1948 { 1, NULL, filt_pipedetach, filt_piperead };
1949 static const struct filterops pipe_wfiltops =
1950 { 1, NULL, filt_pipedetach, filt_pipewrite };
1951
1952 /*ARGSUSED*/
1953 static int
1954 pipe_kqfilter(struct file *fp, struct knote *kn)
1955 {
1956 struct pipe *cpipe;
1957
1958 cpipe = (struct pipe *)kn->kn_fp->f_data;
1959 switch (kn->kn_filter) {
1960 case EVFILT_READ:
1961 kn->kn_fop = &pipe_rfiltops;
1962 break;
1963 case EVFILT_WRITE:
1964 kn->kn_fop = &pipe_wfiltops;
1965 cpipe = cpipe->pipe_peer;
1966 if (cpipe == NULL) {
1967 /* other end of pipe has been closed */
1968 return (EBADF);
1969 }
1970 break;
1971 default:
1972 return (1);
1973 }
1974 kn->kn_hook = cpipe;
1975
1976 PIPE_LOCK(cpipe);
1977 SLIST_INSERT_HEAD(&cpipe->pipe_sel.sel_note, kn, kn_selnext);
1978 PIPE_UNLOCK(cpipe);
1979 return (0);
1980 }
1981
1982 #ifdef __NetBSD__
1983 static int
1984 pipe_fcntl(fp, cmd, data, p)
1985 struct file *fp;
1986 u_int cmd;
1987 caddr_t data;
1988 struct proc *p;
1989 {
1990 if (cmd == F_SETFL)
1991 return (0);
1992 else
1993 return (EOPNOTSUPP);
1994 }
1995
1996 /*
1997 * Handle pipe sysctls.
1998 */
1999 int
2000 sysctl_dopipe(name, namelen, oldp, oldlenp, newp, newlen)
2001 int *name;
2002 u_int namelen;
2003 void *oldp;
2004 size_t *oldlenp;
2005 void *newp;
2006 size_t newlen;
2007 {
2008 /* All sysctl names at this level are terminal. */
2009 if (namelen != 1)
2010 return (ENOTDIR); /* overloaded */
2011
2012 switch (name[0]) {
2013 case KERN_PIPE_MAXKVASZ:
2014 return (sysctl_int(oldp, oldlenp, newp, newlen, &maxpipekva));
2015 case KERN_PIPE_LIMITKVA:
2016 return (sysctl_int(oldp, oldlenp, newp, newlen, &limitpipekva));
2017 case KERN_PIPE_MAXBIGPIPES:
2018 return (sysctl_int(oldp, oldlenp, newp, newlen, &maxbigpipes));
2019 case KERN_PIPE_NBIGPIPES:
2020 return (sysctl_rdint(oldp, oldlenp, newp, nbigpipe));
2021 case KERN_PIPE_KVASIZE:
2022 return (sysctl_rdint(oldp, oldlenp, newp, amountpipekva));
2023 default:
2024 return (EOPNOTSUPP);
2025 }
2026 /* NOTREACHED */
2027 }
2028
2029 /*
2030 * Initialize pipe structs.
2031 */
2032 void
2033 pipe_init(void)
2034 {
2035 pool_init(&pipe_pool, sizeof(struct pipe), 0, 0, 0, "pipepl", NULL);
2036 }
2037
2038 #endif /* __NetBSD __ */
2039