sys_pipe.c revision 1.4.2.2 1 /* $NetBSD: sys_pipe.c,v 1.4.2.2 2001/06/21 20:07:02 nathanw Exp $ */
2
3 /*
4 * Copyright (c) 1996 John S. Dyson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Absolutely no warranty of function or purpose is made by the author
17 * John S. Dyson.
18 * 4. Modifications may be freely made to this file if the above conditions
19 * are met.
20 *
21 * $FreeBSD: src/sys/kern/sys_pipe.c,v 1.82 2001/06/15 20:45:01 jlemon Exp $
22 */
23
24 /*
25 * This file contains a high-performance replacement for the socket-based
26 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support
27 * all features of sockets, but does do everything that pipes normally
28 * do.
29 *
30 * Adaption for NetBSD UVM, including uvm_loan() based direct write, was
31 * written by Jaromir Dolecek.
32 */
33
34 /*
35 * This code has two modes of operation, a small write mode and a large
36 * write mode. The small write mode acts like conventional pipes with
37 * a kernel buffer. If the buffer is less than PIPE_MINDIRECT, then the
38 * "normal" pipe buffering is done. If the buffer is between PIPE_MINDIRECT
39 * and PIPE_SIZE in size, it is fully mapped into the kernel (on FreeBSD,
40 * those pages are also wired), and the receiving process can copy it directly
41 * from the pages in the sending process.
42 *
43 * If the sending process receives a signal, it is possible that it will
44 * go away, and certainly its address space can change, because control
45 * is returned back to the user-mode side. In that case, the pipe code
46 * arranges to copy the buffer supplied by the user process on FreeBSD, to
47 * a pageable kernel buffer, and the receiving process will grab the data
48 * from the pageable kernel buffer. Since signals don't happen all that often,
49 * the copy operation is normally eliminated.
50 * For NetBSD, the pages are mapped read-only, COW for kernel by uvm_loan(),
51 * so no explicit handling need to be done, all is handled by standard VM
52 * facilities.
53 *
54 * The constant PIPE_MINDIRECT is chosen to make sure that buffering will
55 * happen for small transfers so that the system will not spend all of
56 * its time context switching. PIPE_SIZE is constrained by the
57 * amount of kernel virtual memory.
58 */
59
60 #ifdef __NetBSD__
61 #include "opt_new_pipe.h"
62 #endif
63
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/proc.h>
67 #include <sys/fcntl.h>
68 #include <sys/file.h>
69 #include <sys/filedesc.h>
70 #include <sys/filio.h>
71 #include <sys/ttycom.h>
72 #include <sys/stat.h>
73 #include <sys/poll.h>
74 #include <sys/signalvar.h>
75 #include <sys/vnode.h>
76 #include <sys/uio.h>
77 #include <sys/lock.h>
78 #ifdef __FreeBSD__
79 #include <sys/mutex.h>
80 #include <sys/selinfo.h>
81 #include <sys/sysproto.h>
82 #elif defined(__NetBSD__)
83 #include <sys/select.h>
84 #include <sys/malloc.h>
85 #include <sys/mount.h>
86 #include <sys/syscallargs.h>
87 #include <uvm/uvm.h>
88 #include <sys/sysctl.h>
89 #endif /* NetBSD, FreeBSD */
90
91 #include <sys/pipe.h>
92
93 #ifdef __NetBSD__
94 #define vfs_timestamp(tv) microtime(tv)
95 #endif
96
97 /*
98 * Use this define if you want to disable *fancy* VM things. Expect an
99 * approx 30% decrease in transfer rate. This could be useful for
100 * OpenBSD.
101 */
102 /* #define PIPE_NODIRECT */
103
104 /*
105 * interfaces to the outside world
106 */
107 #ifdef __FreeBSD__
108 static int pipe_read __P((struct file *fp, struct uio *uio,
109 struct ucred *cred, int flags, struct proc *p));
110 static int pipe_write __P((struct file *fp, struct uio *uio,
111 struct ucred *cred, int flags, struct proc *p));
112 static int pipe_close __P((struct file *fp, struct proc *p));
113 static int pipe_poll __P((struct file *fp, int events, struct ucred *cred,
114 struct proc *p));
115 static int pipe_kqfilter __P((struct file *fp, struct knote *kn));
116 static int pipe_stat __P((struct file *fp, struct stat *sb, struct proc *p));
117 static int pipe_ioctl __P((struct file *fp, u_long cmd, caddr_t data, struct proc *p));
118
119 static struct fileops pipeops = {
120 pipe_read, pipe_write, pipe_ioctl, pipe_poll, pipe_kqfilter,
121 pipe_stat, pipe_close
122 };
123
124 static void filt_pipedetach(struct knote *kn);
125 static int filt_piperead(struct knote *kn, long hint);
126 static int filt_pipewrite(struct knote *kn, long hint);
127
128 static struct filterops pipe_rfiltops =
129 { 1, NULL, filt_pipedetach, filt_piperead };
130 static struct filterops pipe_wfiltops =
131 { 1, NULL, filt_pipedetach, filt_pipewrite };
132 #endif /* FreeBSD */
133
134 #ifdef __NetBSD__
135 static int pipe_read __P((struct file *fp, off_t *offset, struct uio *uio,
136 struct ucred *cred, int flags));
137 static int pipe_write __P((struct file *fp, off_t *offset, struct uio *uio,
138 struct ucred *cred, int flags));
139 static int pipe_close __P((struct file *fp, struct proc *p));
140 static int pipe_poll __P((struct file *fp, int events, struct proc *p));
141 static int pipe_fcntl __P((struct file *fp, u_int com, caddr_t data,
142 struct proc *p));
143 static int pipe_stat __P((struct file *fp, struct stat *sb, struct proc *p));
144 static int pipe_ioctl __P((struct file *fp, u_long cmd, caddr_t data, struct proc *p));
145
146 static struct fileops pipeops =
147 { pipe_read, pipe_write, pipe_ioctl, pipe_fcntl, pipe_poll,
148 pipe_stat, pipe_close };
149 #endif /* NetBSD */
150
151 /*
152 * Default pipe buffer size(s), this can be kind-of large now because pipe
153 * space is pageable. The pipe code will try to maintain locality of
154 * reference for performance reasons, so small amounts of outstanding I/O
155 * will not wipe the cache.
156 */
157 #define MINPIPESIZE (PIPE_SIZE/3)
158 #define MAXPIPESIZE (2*PIPE_SIZE/3)
159
160 /*
161 * Maximum amount of kva for pipes -- this is kind-of a soft limit, but
162 * is there so that on large systems, we don't exhaust it.
163 */
164 #define MAXPIPEKVA (8*1024*1024)
165 static int maxpipekva = MAXPIPEKVA;
166
167 /*
168 * Limit for direct transfers, we cannot, of course limit
169 * the amount of kva for pipes in general though.
170 */
171 #define LIMITPIPEKVA (16*1024*1024)
172 static int limitpipekva = LIMITPIPEKVA;
173
174 /*
175 * Limit the number of "big" pipes
176 */
177 #define LIMITBIGPIPES 32
178 static int maxbigpipes = LIMITBIGPIPES;
179 static int nbigpipe = 0;
180
181 /*
182 * Amount of KVA consumed by pipe buffers.
183 */
184 static int amountpipekva = 0;
185
186 static void pipeclose __P((struct pipe *cpipe));
187 static void pipe_free_kmem __P((struct pipe *cpipe));
188 static int pipe_create __P((struct pipe **cpipep));
189 static __inline int pipelock __P((struct pipe *cpipe, int catch));
190 static __inline void pipeunlock __P((struct pipe *cpipe));
191 static __inline void pipeselwakeup __P((struct pipe *cpipe));
192 static int pipespace __P((struct pipe *cpipe, int size));
193
194 #ifdef __FreeBSD__
195 #ifndef PIPE_NODIRECT
196 static int pipe_build_write_buffer __P((struct pipe *wpipe, struct uio *uio));
197 static void pipe_destroy_write_buffer __P((struct pipe *wpipe));
198 static int pipe_direct_write __P((struct pipe *wpipe, struct uio *uio));
199 static void pipe_clone_write_buffer __P((struct pipe *wpipe));
200 #endif
201
202 static vm_zone_t pipe_zone;
203 #endif /* FreeBSD */
204
205 #ifdef __NetBSD__
206 #ifndef PIPE_NODIRECT
207 static __inline int pipe_direct_write __P((struct pipe *wpipe, struct uio *uio));
208 static __inline int pipe_loan_alloc __P((struct pipe *wpipe, int npages,
209 vsize_t blen));
210 static void pipe_loan_free __P((struct pipe *wpipe));
211 #endif /* PIPE_NODIRECT */
212
213 static struct pool pipe_pool;
214 #endif /* NetBSD */
215
216 /*
217 * The pipe system call for the DTYPE_PIPE type of pipes
218 */
219
220 /* ARGSUSED */
221 #ifdef __FreeBSD__
222 int
223 pipe(p, uap)
224 struct proc *p;
225 struct pipe_args /* {
226 int dummy;
227 } */ *uap;
228 #elif defined(__NetBSD__)
229 int
230 sys_pipe(p, v, retval)
231 struct proc *p;
232 void *v;
233 register_t *retval;
234 #endif
235 {
236 struct filedesc *fdp = p->p_fd;
237 struct file *rf, *wf;
238 struct pipe *rpipe, *wpipe;
239 int fd, error;
240
241 #ifdef __FreeBSD__
242 if (pipe_zone == NULL)
243 pipe_zone = zinit("PIPE", sizeof(struct pipe), 0, 0, 4);
244 #endif
245
246 rpipe = wpipe = NULL;
247 if (pipe_create(&rpipe) || pipe_create(&wpipe)) {
248 pipeclose(rpipe);
249 pipeclose(wpipe);
250 return (ENFILE);
251 }
252
253 #ifdef __FreeBSD__
254 error = falloc(p, &rf, &fd);
255 if (error) {
256 pipeclose(rpipe);
257 pipeclose(wpipe);
258 return (error);
259 }
260 fhold(rf);
261 p->p_retval[0] = fd;
262
263 /*
264 * Warning: once we've gotten past allocation of the fd for the
265 * read-side, we can only drop the read side via fdrop() in order
266 * to avoid races against processes which manage to dup() the read
267 * side while we are blocked trying to allocate the write side.
268 */
269 rf->f_flag = FREAD | FWRITE;
270 rf->f_type = DTYPE_PIPE;
271 rf->f_data = (caddr_t)rpipe;
272 rf->f_ops = &pipeops;
273 error = falloc(p, &wf, &fd);
274 if (error) {
275 if (fdp->fd_ofiles[p->p_retval[0]] == rf) {
276 fdp->fd_ofiles[p->p_retval[0]] = NULL;
277 fdrop(rf, p);
278 }
279 fdrop(rf, p);
280 /* rpipe has been closed by fdrop(). */
281 pipeclose(wpipe);
282 return (error);
283 }
284 wf->f_flag = FREAD | FWRITE;
285 wf->f_type = DTYPE_PIPE;
286 wf->f_data = (caddr_t)wpipe;
287 wf->f_ops = &pipeops;
288 p->p_retval[1] = fd;
289
290 rpipe->pipe_peer = wpipe;
291 wpipe->pipe_peer = rpipe;
292 fdrop(rf, p);
293 #endif /* FreeBSD */
294
295 #ifdef __NetBSD__
296 /*
297 * Note: the file structure returned from falloc() is marked
298 * as 'larval' initially. Unless we mark it as 'mature' by
299 * FILE_SET_MATURE(), any attempt to do anything with it would
300 * return EBADF, including e.g. dup(2) or close(2). This avoids
301 * file descriptor races if we block in the second falloc().
302 */
303
304 error = falloc(p, &rf, &fd);
305 if (error)
306 goto free2;
307 retval[0] = fd;
308 rf->f_flag = FREAD;
309 rf->f_type = DTYPE_PIPE;
310 rf->f_data = (caddr_t)rpipe;
311 rf->f_ops = &pipeops;
312
313 error = falloc(p, &wf, &fd);
314 if (error)
315 goto free3;
316 retval[1] = fd;
317 wf->f_flag = FWRITE;
318 wf->f_type = DTYPE_PIPE;
319 wf->f_data = (caddr_t)wpipe;
320 wf->f_ops = &pipeops;
321
322 rpipe->pipe_peer = wpipe;
323 wpipe->pipe_peer = rpipe;
324
325 FILE_SET_MATURE(rf);
326 FILE_SET_MATURE(wf);
327 FILE_UNUSE(rf, p);
328 FILE_UNUSE(wf, p);
329 return (0);
330 free3:
331 FILE_UNUSE(rf, p);
332 ffree(rf);
333 fdremove(fdp, retval[0]);
334 free2:
335 pipeclose(wpipe);
336 pipeclose(rpipe);
337 #endif /* NetBSD */
338
339 return (error);
340 }
341
342 /*
343 * Allocate kva for pipe circular buffer, the space is pageable
344 * This routine will 'realloc' the size of a pipe safely, if it fails
345 * it will retain the old buffer.
346 * If it fails it will return ENOMEM.
347 */
348 static int
349 pipespace(cpipe, size)
350 struct pipe *cpipe;
351 int size;
352 {
353 caddr_t buffer;
354 #ifdef __FreeBSD__
355 struct vm_object *object;
356 int npages, error;
357
358 npages = round_page(size)/PAGE_SIZE;
359 /*
360 * Create an object, I don't like the idea of paging to/from
361 * kernel_object.
362 */
363 mtx_lock(&vm_mtx);
364 object = vm_object_allocate(OBJT_DEFAULT, npages);
365 buffer = (caddr_t) vm_map_min(kernel_map);
366
367 /*
368 * Insert the object into the kernel map, and allocate kva for it.
369 * The map entry is, by default, pageable.
370 */
371 error = vm_map_find(kernel_map, object, 0,
372 (vm_offset_t *) &buffer, size, 1,
373 VM_PROT_ALL, VM_PROT_ALL, 0);
374
375 if (error != KERN_SUCCESS) {
376 vm_object_deallocate(object);
377 mtx_unlock(&vm_mtx);
378 return (ENOMEM);
379 }
380 #endif /* FreeBSD */
381
382 #ifdef __NetBSD__
383 /*
384 * Allocate pageable virtual address space. Physical memory is allocated
385 * on demand.
386 */
387 buffer = (caddr_t) uvm_km_valloc(kernel_map, round_page(size));
388 if (buffer == NULL)
389 return (ENOMEM);
390 #endif /* NetBSD */
391
392 /* free old resources if we're resizing */
393 pipe_free_kmem(cpipe);
394 #ifdef __FreeBSD__
395 mtx_unlock(&vm_mtx);
396 cpipe->pipe_buffer.object = object;
397 #endif
398 cpipe->pipe_buffer.buffer = buffer;
399 cpipe->pipe_buffer.size = size;
400 cpipe->pipe_buffer.in = 0;
401 cpipe->pipe_buffer.out = 0;
402 cpipe->pipe_buffer.cnt = 0;
403 amountpipekva += cpipe->pipe_buffer.size;
404 return (0);
405 }
406
407 /*
408 * initialize and allocate VM and memory for pipe
409 */
410 static int
411 pipe_create(cpipep)
412 struct pipe **cpipep;
413 {
414 struct pipe *cpipe;
415 int error;
416
417 #ifdef __FreeBSD__
418 *cpipep = zalloc(pipe_zone);
419 #endif
420 #ifdef __NetBSD__
421 *cpipep = pool_get(&pipe_pool, M_WAITOK);
422 #endif
423 if (*cpipep == NULL)
424 return (ENOMEM);
425
426 cpipe = *cpipep;
427
428 #ifdef __FreeBSD__
429 /* so pipespace()->pipe_free_kmem() doesn't follow junk pointer */
430 cpipe->pipe_buffer.object = NULL;
431 #endif /* FreeBSD */
432 /*
433 * protect so pipeclose() doesn't follow a junk pointer
434 * if pipespace() fails.
435 */
436 cpipe->pipe_buffer.buffer = NULL;
437 bzero(&cpipe->pipe_sel, sizeof(cpipe->pipe_sel));
438 cpipe->pipe_state = PIPE_SIGNALR;
439 cpipe->pipe_peer = NULL;
440 cpipe->pipe_busy = 0;
441
442 #ifndef PIPE_NODIRECT
443 /*
444 * pipe data structure initializations to support direct pipe I/O
445 */
446 cpipe->pipe_map.cnt = 0;
447 cpipe->pipe_map.kva = NULL;
448 cpipe->pipe_map.pos = 0;
449 cpipe->pipe_map.npages = 0;
450 #ifdef __NetBSD__
451 cpipe->pipe_map.ms = NULL;
452 #endif
453 #endif /* !PIPE_NODIRECT */
454
455 if ((error = pipespace(cpipe, PIPE_SIZE)))
456 return (error);
457
458 vfs_timestamp(&cpipe->pipe_ctime);
459 cpipe->pipe_atime = cpipe->pipe_ctime;
460 cpipe->pipe_mtime = cpipe->pipe_ctime;
461 #ifdef __NetBSD__
462 cpipe->pipe_pgid = NO_PID;
463 lockinit(&cpipe->pipe_lock, PRIBIO | PCATCH, "pipelk", 0, 0);
464 #endif
465
466 return (0);
467 }
468
469
470 /*
471 * lock a pipe for I/O, blocking other access
472 */
473 static __inline int
474 pipelock(cpipe, catch)
475 struct pipe *cpipe;
476 int catch;
477 {
478 int error;
479
480 #ifdef __FreeBSD__
481 while (cpipe->pipe_state & PIPE_LOCK) {
482 cpipe->pipe_state |= PIPE_LWANT;
483 error = tsleep(cpipe, catch ? (PRIBIO | PCATCH) : PRIBIO,
484 "pipelk", 0);
485 if (error != 0)
486 return (error);
487 }
488 cpipe->pipe_state |= PIPE_LOCK;
489 return (0);
490 #endif
491
492 #ifdef __NetBSD__
493 do {
494 error = lockmgr(&cpipe->pipe_lock, LK_EXCLUSIVE, NULL);
495 } while (!catch && (error == EINTR || error == ERESTART));
496 return (error);
497 #endif
498 }
499
500 /*
501 * unlock a pipe I/O lock
502 */
503 static __inline void
504 pipeunlock(cpipe)
505 struct pipe *cpipe;
506 {
507 #ifdef __FreeBSD__
508 cpipe->pipe_state &= ~PIPE_LOCK;
509 if (cpipe->pipe_state & PIPE_LWANT) {
510 cpipe->pipe_state &= ~PIPE_LWANT;
511 wakeup(cpipe);
512 }
513 #endif
514
515 #ifdef __NetBSD__
516 lockmgr(&cpipe->pipe_lock, LK_RELEASE, NULL);
517 #endif
518 }
519
520 /*
521 * Select/poll wakup. This also sends SIGIO to peer connected to
522 * 'sigpipe' side of pipe.
523 */
524 static __inline void
525 pipeselwakeup(cpipe)
526 struct pipe *cpipe;
527 {
528 if (!cpipe)
529 return;
530
531 if (cpipe->pipe_state & PIPE_SEL) {
532 cpipe->pipe_state &= ~PIPE_SEL;
533 selwakeup(&cpipe->pipe_sel);
534 }
535 #ifdef __FreeBSD__
536 if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio)
537 pgsigio(cpipe->pipe_sigio, SIGIO, 0);
538 KNOTE(&cpipe->pipe_sel.si_note, 0);
539 #endif
540
541 #ifdef __NetBSD__
542 if (cpipe && (cpipe->pipe_state & PIPE_ASYNC)
543 && cpipe->pipe_pgid != NO_PID){
544 struct proc *p;
545
546 if (cpipe->pipe_pgid < 0)
547 gsignal(-cpipe->pipe_pgid, SIGIO);
548 else if (cpipe->pipe_pgid > 0 && (p = pfind(cpipe->pipe_pgid)) != 0)
549 psignal(p, SIGIO);
550 }
551 #endif /* NetBSD */
552 }
553
554 /* ARGSUSED */
555 #ifdef __FreeBSD__
556 static int
557 pipe_read(fp, uio, cred, flags, p)
558 struct file *fp;
559 struct uio *uio;
560 struct ucred *cred;
561 int flags;
562 struct proc *p;
563 #elif defined(__NetBSD__)
564 static int
565 pipe_read(fp, offset, uio, cred, flags)
566 struct file *fp;
567 off_t *offset;
568 struct uio *uio;
569 struct ucred *cred;
570 int flags;
571 #endif
572 {
573 struct pipe *rpipe = (struct pipe *) fp->f_data;
574 int error;
575 size_t nread = 0;
576 size_t size;
577 size_t ocnt;
578
579 ++rpipe->pipe_busy;
580 error = pipelock(rpipe, 1);
581 if (error)
582 goto unlocked_error;
583
584 ocnt = rpipe->pipe_buffer.cnt;
585
586 while (uio->uio_resid) {
587 /*
588 * normal pipe buffer receive
589 */
590 if (rpipe->pipe_buffer.cnt > 0) {
591 size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out;
592 if (size > rpipe->pipe_buffer.cnt)
593 size = rpipe->pipe_buffer.cnt;
594 if (size > uio->uio_resid)
595 size = uio->uio_resid;
596
597 error = uiomove(&rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out],
598 size, uio);
599 if (error)
600 break;
601
602 rpipe->pipe_buffer.out += size;
603 if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size)
604 rpipe->pipe_buffer.out = 0;
605
606 rpipe->pipe_buffer.cnt -= size;
607
608 /*
609 * If there is no more to read in the pipe, reset
610 * its pointers to the beginning. This improves
611 * cache hit stats.
612 */
613 if (rpipe->pipe_buffer.cnt == 0) {
614 rpipe->pipe_buffer.in = 0;
615 rpipe->pipe_buffer.out = 0;
616 }
617 nread += size;
618 #ifndef PIPE_NODIRECT
619 /*
620 * Direct copy, bypassing a kernel buffer.
621 */
622 } else if ((size = rpipe->pipe_map.cnt) &&
623 (rpipe->pipe_state & PIPE_DIRECTW)) {
624 caddr_t va;
625 if (size > uio->uio_resid)
626 size = uio->uio_resid;
627
628 va = (caddr_t) rpipe->pipe_map.kva +
629 rpipe->pipe_map.pos;
630 error = uiomove(va, size, uio);
631 if (error)
632 break;
633 nread += size;
634 rpipe->pipe_map.pos += size;
635 rpipe->pipe_map.cnt -= size;
636 if (rpipe->pipe_map.cnt == 0) {
637 rpipe->pipe_state &= ~PIPE_DIRECTW;
638 wakeup(rpipe);
639 #ifdef __NetBSD__
640 if (uio->uio_resid > 0 &&
641 (rpipe->pipe_state & PIPE_MOREW))
642 goto waitformore;
643 #endif /* NetBSD */
644 }
645 #endif
646 } else {
647 /*
648 * detect EOF condition
649 * read returns 0 on EOF, no need to set error
650 */
651 if (rpipe->pipe_state & PIPE_EOF)
652 break;
653
654 /*
655 * If the "write-side" has been blocked, wake it up now.
656 */
657 if (rpipe->pipe_state & PIPE_WANTW) {
658 rpipe->pipe_state &= ~PIPE_WANTW;
659 wakeup(rpipe);
660 }
661
662 /*
663 * Break if some data was read.
664 */
665 if (nread > 0)
666 break;
667
668 /*
669 * don't block on non-blocking I/O
670 */
671 if (fp->f_flag & FNONBLOCK) {
672 error = EAGAIN;
673 break;
674 }
675
676 #if defined(__NetBSD__) && !defined(PIPE_NODIRECT)
677 waitformore:
678 #endif
679 /*
680 * Unlock the pipe buffer for our remaining processing.
681 * We will either break out with an error or we will
682 * sleep and relock to loop.
683 */
684 pipeunlock(rpipe);
685
686 /*
687 * We want to read more, wake up select/poll.
688 */
689 pipeselwakeup(rpipe->pipe_peer);
690
691 rpipe->pipe_state |= PIPE_WANTR;
692 error = tsleep(rpipe, PRIBIO | PCATCH, "piperd", 0);
693 if (error != 0 || (error = pipelock(rpipe, 1)))
694 goto unlocked_error;
695 }
696 }
697 pipeunlock(rpipe);
698
699 if (error == 0)
700 vfs_timestamp(&rpipe->pipe_atime);
701 unlocked_error:
702 --rpipe->pipe_busy;
703
704 /*
705 * PIPE_WANTCLOSE processing only makes sense if pipe_busy is 0.
706 */
707 if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANTCLOSE)) {
708 rpipe->pipe_state &= ~(PIPE_WANTCLOSE|PIPE_WANTW);
709 wakeup(rpipe);
710 } else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) {
711 /*
712 * Handle write blocking hysteresis.
713 */
714 if (rpipe->pipe_state & PIPE_WANTW) {
715 rpipe->pipe_state &= ~PIPE_WANTW;
716 wakeup(rpipe);
717 }
718 }
719
720 /*
721 * If anything was read off the buffer, signal to the writer it's
722 * possible to write more data. Also send signal if we are here for the
723 * first time after last write.
724 */
725 if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF
726 && (ocnt != rpipe->pipe_buffer.cnt || (rpipe->pipe_state & PIPE_SIGNALR))) {
727 pipeselwakeup(rpipe->pipe_peer);
728 rpipe->pipe_state &= ~PIPE_SIGNALR;
729 }
730
731 return (error);
732 }
733
734 #ifdef __FreeBSD__
735 #ifndef PIPE_NODIRECT
736 /*
737 * Map the sending processes' buffer into kernel space and wire it.
738 * This is similar to a physical write operation.
739 */
740 static int
741 pipe_build_write_buffer(wpipe, uio)
742 struct pipe *wpipe;
743 struct uio *uio;
744 {
745 size_t size;
746 int i;
747 vm_offset_t addr, endaddr, paddr;
748
749 size = uio->uio_iov->iov_len;
750 if (size > wpipe->pipe_buffer.size)
751 size = wpipe->pipe_buffer.size;
752
753 endaddr = round_page((vm_offset_t)uio->uio_iov->iov_base + size);
754 mtx_lock(&vm_mtx);
755 addr = trunc_page((vm_offset_t)uio->uio_iov->iov_base);
756 for (i = 0; addr < endaddr; addr += PAGE_SIZE, i++) {
757 vm_page_t m;
758
759 if (vm_fault_quick((caddr_t)addr, VM_PROT_READ) < 0 ||
760 (paddr = pmap_kextract(addr)) == 0) {
761 int j;
762
763 for (j = 0; j < i; j++)
764 vm_page_unwire(wpipe->pipe_map.ms[j], 1);
765 mtx_unlock(&vm_mtx);
766 return (EFAULT);
767 }
768
769 m = PHYS_TO_VM_PAGE(paddr);
770 vm_page_wire(m);
771 wpipe->pipe_map.ms[i] = m;
772 }
773
774 /*
775 * set up the control block
776 */
777 wpipe->pipe_map.npages = i;
778 wpipe->pipe_map.pos =
779 ((vm_offset_t) uio->uio_iov->iov_base) & PAGE_MASK;
780 wpipe->pipe_map.cnt = size;
781
782 /*
783 * and map the buffer
784 */
785 if (wpipe->pipe_map.kva == 0) {
786 /*
787 * We need to allocate space for an extra page because the
788 * address range might (will) span pages at times.
789 */
790 wpipe->pipe_map.kva = kmem_alloc_pageable(kernel_map,
791 wpipe->pipe_buffer.size + PAGE_SIZE);
792 amountpipekva += wpipe->pipe_buffer.size + PAGE_SIZE;
793 }
794 pmap_qenter(wpipe->pipe_map.kva, wpipe->pipe_map.ms,
795 wpipe->pipe_map.npages);
796
797 mtx_unlock(&vm_mtx);
798 /*
799 * and update the uio data
800 */
801
802 uio->uio_iov->iov_len -= size;
803 uio->uio_iov->iov_base += size;
804 if (uio->uio_iov->iov_len == 0)
805 uio->uio_iov++;
806 uio->uio_resid -= size;
807 uio->uio_offset += size;
808 return (0);
809 }
810
811 /*
812 * unmap and unwire the process buffer
813 */
814 static void
815 pipe_destroy_write_buffer(wpipe)
816 struct pipe *wpipe;
817 {
818 int i;
819
820 mtx_lock(&vm_mtx);
821 if (wpipe->pipe_map.kva) {
822 pmap_qremove(wpipe->pipe_map.kva, wpipe->pipe_map.npages);
823
824 if (amountpipekva > maxpipekva) {
825 vm_offset_t kva = wpipe->pipe_map.kva;
826 wpipe->pipe_map.kva = 0;
827 kmem_free(kernel_map, kva,
828 wpipe->pipe_buffer.size + PAGE_SIZE);
829 amountpipekva -= wpipe->pipe_buffer.size + PAGE_SIZE;
830 }
831 }
832 for (i = 0; i < wpipe->pipe_map.npages; i++)
833 vm_page_unwire(wpipe->pipe_map.ms[i], 1);
834 mtx_unlock(&vm_mtx);
835 }
836
837 /*
838 * In the case of a signal, the writing process might go away. This
839 * code copies the data into the circular buffer so that the source
840 * pages can be freed without loss of data.
841 */
842 static void
843 pipe_clone_write_buffer(wpipe)
844 struct pipe *wpipe;
845 {
846 int size;
847 int pos;
848
849 size = wpipe->pipe_map.cnt;
850 pos = wpipe->pipe_map.pos;
851 bcopy((caddr_t) wpipe->pipe_map.kva + pos,
852 (caddr_t) wpipe->pipe_buffer.buffer, size);
853
854 wpipe->pipe_buffer.in = size;
855 wpipe->pipe_buffer.out = 0;
856 wpipe->pipe_buffer.cnt = size;
857 wpipe->pipe_state &= ~PIPE_DIRECTW;
858
859 pipe_destroy_write_buffer(wpipe);
860 }
861
862 /*
863 * This implements the pipe buffer write mechanism. Note that only
864 * a direct write OR a normal pipe write can be pending at any given time.
865 * If there are any characters in the pipe buffer, the direct write will
866 * be deferred until the receiving process grabs all of the bytes from
867 * the pipe buffer. Then the direct mapping write is set-up.
868 */
869 static int
870 pipe_direct_write(wpipe, uio)
871 struct pipe *wpipe;
872 struct uio *uio;
873 {
874 int error;
875
876 retry:
877 while (wpipe->pipe_state & PIPE_DIRECTW) {
878 if (wpipe->pipe_state & PIPE_WANTR) {
879 wpipe->pipe_state &= ~PIPE_WANTR;
880 wakeup(wpipe);
881 }
882 wpipe->pipe_state |= PIPE_WANTW;
883 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdww", 0);
884 if (error)
885 goto error1;
886 if (wpipe->pipe_state & PIPE_EOF) {
887 error = EPIPE;
888 goto error1;
889 }
890 }
891 wpipe->pipe_map.cnt = 0; /* transfer not ready yet */
892 if (wpipe->pipe_buffer.cnt > 0) {
893 if (wpipe->pipe_state & PIPE_WANTR) {
894 wpipe->pipe_state &= ~PIPE_WANTR;
895 wakeup(wpipe);
896 }
897
898 wpipe->pipe_state |= PIPE_WANTW;
899 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdwc", 0);
900 if (error)
901 goto error1;
902 if (wpipe->pipe_state & PIPE_EOF) {
903 error = EPIPE;
904 goto error1;
905 }
906 goto retry;
907 }
908
909 wpipe->pipe_state |= PIPE_DIRECTW;
910
911 error = pipe_build_write_buffer(wpipe, uio);
912 if (error) {
913 wpipe->pipe_state &= ~PIPE_DIRECTW;
914 goto error1;
915 }
916
917 error = 0;
918 while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) {
919 if (wpipe->pipe_state & PIPE_EOF) {
920 pipelock(wpipe, 0);
921 pipe_destroy_write_buffer(wpipe);
922 pipeunlock(wpipe);
923 pipeselwakeup(wpipe);
924 error = EPIPE;
925 goto error1;
926 }
927 if (wpipe->pipe_state & PIPE_WANTR) {
928 wpipe->pipe_state &= ~PIPE_WANTR;
929 wakeup(wpipe);
930 }
931 pipeselwakeup(wpipe);
932 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdwt", 0);
933 }
934
935 pipelock(wpipe,0);
936 if (wpipe->pipe_state & PIPE_DIRECTW) {
937 /*
938 * this bit of trickery substitutes a kernel buffer for
939 * the process that might be going away.
940 */
941 pipe_clone_write_buffer(wpipe);
942 } else {
943 pipe_destroy_write_buffer(wpipe);
944 }
945 pipeunlock(wpipe);
946 return (error);
947
948 error1:
949 wakeup(wpipe);
950 return (error);
951 }
952 #endif /* !PIPE_NODIRECT */
953 #endif /* FreeBSD */
954
955 #ifdef __NetBSD__
956 #ifndef PIPE_NODIRECT
957 /*
958 * Allocate structure for loan transfer.
959 */
960 static __inline int
961 pipe_loan_alloc(wpipe, npages, blen)
962 struct pipe *wpipe;
963 int npages;
964 vsize_t blen;
965 {
966 wpipe->pipe_map.kva = uvm_km_valloc(kernel_map, blen);
967 if (wpipe->pipe_map.kva == NULL)
968 return (ENOMEM);
969
970 amountpipekva += blen;
971 wpipe->pipe_map.npages = npages;
972 wpipe->pipe_map.ms = (struct vm_page **) malloc(
973 npages * sizeof(struct vm_page *), M_PIPE, M_WAITOK);
974
975 return (0);
976 }
977
978 /*
979 * Free resources allocated for loan transfer.
980 */
981 static void
982 pipe_loan_free(wpipe)
983 struct pipe *wpipe;
984 {
985 uvm_km_free(kernel_map, wpipe->pipe_map.kva,
986 wpipe->pipe_map.npages * PAGE_SIZE);
987 wpipe->pipe_map.kva = NULL;
988 amountpipekva -= wpipe->pipe_map.npages * PAGE_SIZE;
989 free(wpipe->pipe_map.ms, M_PIPE);
990 wpipe->pipe_map.ms = NULL;
991 }
992
993 /*
994 * NetBSD direct write, using uvm_loan() mechanism.
995 * This implements the pipe buffer write mechanism. Note that only
996 * a direct write OR a normal pipe write can be pending at any given time.
997 * If there are any characters in the pipe buffer, the direct write will
998 * be deferred until the receiving process grabs all of the bytes from
999 * the pipe buffer. Then the direct mapping write is set-up.
1000 */
1001 static __inline int
1002 pipe_direct_write(wpipe, uio)
1003 struct pipe *wpipe;
1004 struct uio *uio;
1005 {
1006 int error, i, npages, j;
1007 struct vm_page **res;
1008 vaddr_t bbase, kva, base, bend;
1009 vsize_t blen, bcnt;
1010 voff_t boff, bpos;
1011 struct vm_map *wmap = &uio->uio_procp->p_vmspace->vm_map;
1012 retry:
1013 while (wpipe->pipe_state & PIPE_DIRECTW) {
1014 if (wpipe->pipe_state & PIPE_WANTR) {
1015 wpipe->pipe_state &= ~PIPE_WANTR;
1016 wakeup(wpipe);
1017 }
1018 wpipe->pipe_state |= PIPE_WANTW;
1019 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdww", 0);
1020 if (error)
1021 goto error1;
1022 if (wpipe->pipe_state & PIPE_EOF) {
1023 error = EPIPE;
1024 goto error1;
1025 }
1026 }
1027 wpipe->pipe_map.cnt = 0; /* transfer not ready yet */
1028 if (wpipe->pipe_buffer.cnt > 0) {
1029 if ( wpipe->pipe_state & PIPE_WANTR) {
1030 wpipe->pipe_state &= ~PIPE_WANTR;
1031 wakeup(wpipe);
1032 }
1033
1034 wpipe->pipe_state |= PIPE_WANTW;
1035 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdwc", 0);
1036 if (error)
1037 goto error1;
1038 if (wpipe->pipe_state & PIPE_EOF) {
1039 error = EPIPE;
1040 goto error1;
1041 }
1042 goto retry;
1043 }
1044
1045 /*
1046 * For each iovec:
1047 * 1. Loan the pages to kernel.
1048 * 2. Set up pipe structures.
1049 * 3. Wait until consumer reads it all or exits.
1050 */
1051 boff = 0;
1052 for(i=0; i < uio->uio_iovcnt; ) {
1053 /*
1054 * Note: need to handle buffers not aligned to PAGE_SIZE.
1055 */
1056 bbase = (vaddr_t)uio->uio_iov[i].iov_base;
1057 base = trunc_page(bbase + boff);
1058 bend = round_page(bbase + uio->uio_iov[i].iov_len);
1059 blen = bend - base;
1060
1061 if (boff == 0)
1062 bpos = bbase % PAGE_SIZE;
1063 else
1064 bpos = 0;
1065
1066 if (blen > PIPE_DIRECT_CHUNK) {
1067 blen = PIPE_DIRECT_CHUNK;
1068 boff += PIPE_DIRECT_CHUNK;
1069 bend = base + blen;
1070 bcnt = PIPE_DIRECT_CHUNK - bpos;
1071 wpipe->pipe_state |= PIPE_MOREW;
1072 } else {
1073 if (boff == 0)
1074 bcnt = uio->uio_iov[i].iov_len;
1075 else
1076 bcnt = ((bbase % PAGE_SIZE) +
1077 uio->uio_iov[i].iov_len) %PIPE_DIRECT_CHUNK;
1078 boff = 0;
1079 i++;
1080 wpipe->pipe_state &= ~PIPE_MOREW;
1081 }
1082
1083 npages = blen / PAGE_SIZE;
1084
1085 /*
1086 * Free the old kva if we need more pages than we have
1087 * allocated.
1088 */
1089 if (wpipe->pipe_map.kva
1090 && npages > wpipe->pipe_map.npages)
1091 pipe_loan_free(wpipe);
1092
1093 /* Allocate new kva. */
1094 if (!wpipe->pipe_map.kva) {
1095 if ((error = pipe_loan_alloc(wpipe,
1096 npages, blen)))
1097 goto error;
1098 }
1099
1100 /* Loan the write buffer memory from writer process */
1101 res = wpipe->pipe_map.ms;
1102 error = uvm_loan(wmap, base, blen,
1103 (void **) res, UVM_LOAN_TOPAGE);
1104 if (error)
1105 goto cleanup;
1106
1107 /* Enter the loaned pages to kva */
1108 kva = wpipe->pipe_map.kva;
1109 for(j=0; j < npages; j++, kva += PAGE_SIZE)
1110 pmap_enter(pmap_kernel(), kva, res[j]->phys_addr,
1111 VM_PROT_READ, 0);
1112
1113 wpipe->pipe_map.pos = bpos;
1114 wpipe->pipe_map.cnt = bcnt;
1115 wpipe->pipe_state |= PIPE_DIRECTW;
1116
1117 error = 0;
1118 while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) {
1119 if (wpipe->pipe_state & PIPE_EOF) {
1120 error = EPIPE;
1121 break;
1122 }
1123 if (wpipe->pipe_state & PIPE_WANTR) {
1124 wpipe->pipe_state &= ~PIPE_WANTR;
1125 wakeup(wpipe);
1126 }
1127 pipeselwakeup(wpipe);
1128 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdwt", 0);
1129 }
1130
1131 cleanup:
1132 pipelock(wpipe,0);
1133 if (amountpipekva > maxpipekva)
1134 pipe_loan_free(wpipe);
1135 uvm_unloanpage(res, npages);
1136 pipeunlock(wpipe);
1137 if (error) {
1138 error:
1139 /* XXX update uio ? */
1140 if (error == EPIPE)
1141 pipeselwakeup(wpipe);
1142
1143 wpipe->pipe_state &= ~PIPE_MOREW;
1144 goto error1;
1145 }
1146
1147 uio->uio_offset += bcnt;
1148 uio->uio_resid -= bcnt;
1149
1150 } /* for */
1151
1152 return (error);
1153
1154 error1:
1155 wakeup(wpipe);
1156 return (error);
1157 }
1158 #endif /* !PIPE_NODIRECT */
1159 #endif /* NetBSD */
1160
1161 #ifdef __FreeBSD__
1162 static int
1163 pipe_write(fp, uio, cred, flags, p)
1164 struct file *fp;
1165 off_t *offset;
1166 struct uio *uio;
1167 struct ucred *cred;
1168 int flags;
1169 struct proc *p;
1170 #elif defined(__NetBSD__)
1171 static int
1172 pipe_write(fp, offset, uio, cred, flags)
1173 struct file *fp;
1174 off_t *offset;
1175 struct uio *uio;
1176 struct ucred *cred;
1177 int flags;
1178 #endif
1179 {
1180 int error = 0;
1181 int orig_resid;
1182 struct pipe *wpipe, *rpipe;
1183
1184 rpipe = (struct pipe *) fp->f_data;
1185 wpipe = rpipe->pipe_peer;
1186
1187 /*
1188 * detect loss of pipe read side, issue SIGPIPE if lost.
1189 */
1190 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF))
1191 return (EPIPE);
1192
1193 ++wpipe->pipe_busy;
1194
1195 /*
1196 * If it is advantageous to resize the pipe buffer, do
1197 * so.
1198 */
1199 if ((uio->uio_resid > PIPE_SIZE) &&
1200 (nbigpipe < maxbigpipes) &&
1201 #ifndef PIPE_NODIRECT
1202 (wpipe->pipe_state & PIPE_DIRECTW) == 0 &&
1203 #endif
1204 (wpipe->pipe_buffer.size <= PIPE_SIZE) &&
1205 (wpipe->pipe_buffer.cnt == 0)) {
1206
1207 if ((error = pipelock(wpipe,1)) == 0) {
1208 if (pipespace(wpipe, BIG_PIPE_SIZE) == 0)
1209 nbigpipe++;
1210 pipeunlock(wpipe);
1211 } else {
1212 /*
1213 * If an error occured unbusy and return, waking up any
1214 * pending readers.
1215 */
1216 --wpipe->pipe_busy;
1217 if (wpipe->pipe_busy == 0
1218 && (wpipe->pipe_state & PIPE_WANTCLOSE)) {
1219 wpipe->pipe_state &=
1220 ~(PIPE_WANTCLOSE | PIPE_WANTR);
1221 }
1222
1223 return (error);
1224 }
1225 }
1226
1227 #ifdef __FreeBSD__
1228 KASSERT(wpipe->pipe_buffer.buffer != NULL, ("pipe buffer gone"));
1229 #endif
1230
1231 orig_resid = uio->uio_resid;
1232 while (uio->uio_resid) {
1233 int space;
1234
1235 #ifndef PIPE_NODIRECT
1236 /*
1237 * If the transfer is large, we can gain performance if
1238 * we do process-to-process copies directly.
1239 * If the write is non-blocking, we don't use the
1240 * direct write mechanism.
1241 *
1242 * The direct write mechanism will detect the reader going
1243 * away on us.
1244 */
1245 if ((uio->uio_iov->iov_len >= PIPE_MINDIRECT) &&
1246 (fp->f_flag & FNONBLOCK) == 0 &&
1247 (wpipe->pipe_map.kva || (amountpipekva < limitpipekva))) {
1248 error = pipe_direct_write(wpipe, uio);
1249 if (error)
1250 break;
1251 continue;
1252 }
1253 #endif /* PIPE_NODIRECT */
1254
1255 /*
1256 * Pipe buffered writes cannot be coincidental with
1257 * direct writes. We wait until the currently executing
1258 * direct write is completed before we start filling the
1259 * pipe buffer. We break out if a signal occurs or the
1260 * reader goes away.
1261 */
1262 retrywrite:
1263 while (wpipe->pipe_state & PIPE_DIRECTW) {
1264 if (wpipe->pipe_state & PIPE_WANTR) {
1265 wpipe->pipe_state &= ~PIPE_WANTR;
1266 wakeup(wpipe);
1267 }
1268 error = tsleep(wpipe, PRIBIO | PCATCH, "pipbww", 0);
1269 if (wpipe->pipe_state & PIPE_EOF)
1270 break;
1271 if (error)
1272 break;
1273 }
1274 if (wpipe->pipe_state & PIPE_EOF) {
1275 error = EPIPE;
1276 break;
1277 }
1278
1279 space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
1280
1281 /* Writes of size <= PIPE_BUF must be atomic. */
1282 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
1283 space = 0;
1284
1285 if (space > 0 && (wpipe->pipe_buffer.cnt < PIPE_SIZE)) {
1286 int size; /* Transfer size */
1287 int segsize; /* first segment to transfer */
1288
1289 if ((error = pipelock(wpipe,1)) != 0)
1290 break;
1291
1292 /*
1293 * It is possible for a direct write to
1294 * slip in on us... handle it here...
1295 */
1296 if (wpipe->pipe_state & PIPE_DIRECTW) {
1297 pipeunlock(wpipe);
1298 goto retrywrite;
1299 }
1300 /*
1301 * If a process blocked in uiomove, our
1302 * value for space might be bad.
1303 *
1304 * XXX will we be ok if the reader has gone
1305 * away here?
1306 */
1307 if (space > wpipe->pipe_buffer.size -
1308 wpipe->pipe_buffer.cnt) {
1309 pipeunlock(wpipe);
1310 goto retrywrite;
1311 }
1312
1313 /*
1314 * Transfer size is minimum of uio transfer
1315 * and free space in pipe buffer.
1316 */
1317 if (space > uio->uio_resid)
1318 size = uio->uio_resid;
1319 else
1320 size = space;
1321 /*
1322 * First segment to transfer is minimum of
1323 * transfer size and contiguous space in
1324 * pipe buffer. If first segment to transfer
1325 * is less than the transfer size, we've got
1326 * a wraparound in the buffer.
1327 */
1328 segsize = wpipe->pipe_buffer.size -
1329 wpipe->pipe_buffer.in;
1330 if (segsize > size)
1331 segsize = size;
1332
1333 /* Transfer first segment */
1334
1335 error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in],
1336 segsize, uio);
1337
1338 if (error == 0 && segsize < size) {
1339 /*
1340 * Transfer remaining part now, to
1341 * support atomic writes. Wraparound
1342 * happened.
1343 */
1344 #ifdef DEBUG
1345 if (wpipe->pipe_buffer.in + segsize !=
1346 wpipe->pipe_buffer.size)
1347 panic("Expected pipe buffer wraparound disappeared");
1348 #endif
1349
1350 error = uiomove(&wpipe->pipe_buffer.buffer[0],
1351 size - segsize, uio);
1352 }
1353 if (error == 0) {
1354 wpipe->pipe_buffer.in += size;
1355 if (wpipe->pipe_buffer.in >=
1356 wpipe->pipe_buffer.size) {
1357 #ifdef DEBUG
1358 if (wpipe->pipe_buffer.in != size - segsize + wpipe->pipe_buffer.size)
1359 panic("Expected wraparound bad");
1360 #endif
1361 wpipe->pipe_buffer.in = size - segsize;
1362 }
1363
1364 wpipe->pipe_buffer.cnt += size;
1365 #ifdef DEBUG
1366 if (wpipe->pipe_buffer.cnt > wpipe->pipe_buffer.size)
1367 panic("Pipe buffer overflow");
1368 #endif
1369
1370 }
1371 pipeunlock(wpipe);
1372 if (error)
1373 break;
1374
1375 } else {
1376 /*
1377 * If the "read-side" has been blocked, wake it up now.
1378 */
1379 if (wpipe->pipe_state & PIPE_WANTR) {
1380 wpipe->pipe_state &= ~PIPE_WANTR;
1381 wakeup(wpipe);
1382 }
1383
1384 /*
1385 * don't block on non-blocking I/O
1386 */
1387 if (fp->f_flag & FNONBLOCK) {
1388 error = EAGAIN;
1389 break;
1390 }
1391
1392 /*
1393 * We have no more space and have something to offer,
1394 * wake up select/poll.
1395 */
1396 pipeselwakeup(wpipe);
1397
1398 wpipe->pipe_state |= PIPE_WANTW;
1399 error = tsleep(wpipe, PRIBIO | PCATCH, "pipewr", 0);
1400 if (error != 0)
1401 break;
1402 /*
1403 * If read side wants to go away, we just issue a signal
1404 * to ourselves.
1405 */
1406 if (wpipe->pipe_state & PIPE_EOF) {
1407 error = EPIPE;
1408 break;
1409 }
1410 }
1411 }
1412
1413 --wpipe->pipe_busy;
1414 if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANTCLOSE)) {
1415 wpipe->pipe_state &= ~(PIPE_WANTCLOSE | PIPE_WANTR);
1416 wakeup(wpipe);
1417 } else if (wpipe->pipe_buffer.cnt > 0) {
1418 /*
1419 * If we have put any characters in the buffer, we wake up
1420 * the reader.
1421 */
1422 if (wpipe->pipe_state & PIPE_WANTR) {
1423 wpipe->pipe_state &= ~PIPE_WANTR;
1424 wakeup(wpipe);
1425 }
1426 }
1427
1428 /*
1429 * Don't return EPIPE if I/O was successful
1430 */
1431 if ((error == EPIPE) && (wpipe->pipe_buffer.cnt == 0)
1432 && (uio->uio_resid == 0))
1433 error = 0;
1434
1435 if (error == 0)
1436 vfs_timestamp(&wpipe->pipe_mtime);
1437
1438 /*
1439 * We have something to offer, wake up select/poll.
1440 * wpipe->pipe_map.cnt is always 0 in this point (direct write
1441 * is only done synchronously), so check wpipe->only pipe_buffer.cnt
1442 */
1443 if (wpipe->pipe_buffer.cnt)
1444 pipeselwakeup(wpipe);
1445
1446 /*
1447 * Arrange for next read(2) to do a signal.
1448 */
1449 wpipe->pipe_state |= PIPE_SIGNALR;
1450
1451 return (error);
1452 }
1453
1454 /*
1455 * we implement a very minimal set of ioctls for compatibility with sockets.
1456 */
1457 int
1458 pipe_ioctl(fp, cmd, data, p)
1459 struct file *fp;
1460 u_long cmd;
1461 caddr_t data;
1462 struct proc *p;
1463 {
1464 struct pipe *mpipe = (struct pipe *)fp->f_data;
1465
1466 switch (cmd) {
1467
1468 case FIONBIO:
1469 return (0);
1470
1471 case FIOASYNC:
1472 if (*(int *)data) {
1473 mpipe->pipe_state |= PIPE_ASYNC;
1474 } else {
1475 mpipe->pipe_state &= ~PIPE_ASYNC;
1476 }
1477 return (0);
1478
1479 case FIONREAD:
1480 #ifndef PIPE_NODIRECT
1481 if (mpipe->pipe_state & PIPE_DIRECTW)
1482 *(int *)data = mpipe->pipe_map.cnt;
1483 else
1484 #endif
1485 *(int *)data = mpipe->pipe_buffer.cnt;
1486 return (0);
1487
1488 #ifdef __FreeBSD__
1489 case FIOSETOWN:
1490 return (fsetown(*(int *)data, &mpipe->pipe_sigio));
1491
1492 case FIOGETOWN:
1493 *(int *)data = fgetown(mpipe->pipe_sigio);
1494 return (0);
1495
1496 /* This is deprecated, FIOSETOWN should be used instead. */
1497 case TIOCSPGRP:
1498 return (fsetown(-(*(int *)data), &mpipe->pipe_sigio));
1499
1500 /* This is deprecated, FIOGETOWN should be used instead. */
1501 case TIOCGPGRP:
1502 *(int *)data = -fgetown(mpipe->pipe_sigio);
1503 return (0);
1504 #endif /* FreeBSD */
1505 #ifdef __NetBSD__
1506 case TIOCSPGRP:
1507 mpipe->pipe_pgid = *(int *)data;
1508 return (0);
1509
1510 case TIOCGPGRP:
1511 *(int *)data = mpipe->pipe_pgid;
1512 return (0);
1513 #endif /* NetBSD */
1514
1515 }
1516 return (ENOTTY);
1517 }
1518
1519 int
1520 pipe_poll(fp, events, p)
1521 struct file *fp;
1522 int events;
1523 struct proc *p;
1524 {
1525 struct pipe *rpipe = (struct pipe *)fp->f_data;
1526 struct pipe *wpipe;
1527 int revents = 0;
1528
1529 wpipe = rpipe->pipe_peer;
1530 if (events & (POLLIN | POLLRDNORM))
1531 if ((rpipe->pipe_buffer.cnt > 0) ||
1532 #ifndef PIPE_NODIRECT
1533 (rpipe->pipe_state & PIPE_DIRECTW) ||
1534 #endif
1535 (rpipe->pipe_state & PIPE_EOF))
1536 revents |= events & (POLLIN | POLLRDNORM);
1537
1538 if (events & (POLLOUT | POLLWRNORM))
1539 if (wpipe == NULL || (wpipe->pipe_state & PIPE_EOF)
1540 || (
1541 #ifndef PIPE_NODIRECT
1542 ((wpipe->pipe_state & PIPE_DIRECTW) == 0) &&
1543 #endif
1544 (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF))
1545 revents |= events & (POLLOUT | POLLWRNORM);
1546
1547 if ((rpipe->pipe_state & PIPE_EOF) ||
1548 (wpipe == NULL) ||
1549 (wpipe->pipe_state & PIPE_EOF))
1550 revents |= POLLHUP;
1551
1552 if (revents == 0) {
1553 if (events & (POLLIN | POLLRDNORM)) {
1554 selrecord(p, &rpipe->pipe_sel);
1555 rpipe->pipe_state |= PIPE_SEL;
1556 }
1557
1558 if (events & (POLLOUT | POLLWRNORM)) {
1559 selrecord(p, &wpipe->pipe_sel);
1560 wpipe->pipe_state |= PIPE_SEL;
1561 }
1562 }
1563
1564 return (revents);
1565 }
1566
1567 static int
1568 pipe_stat(fp, ub, p)
1569 struct file *fp;
1570 struct stat *ub;
1571 struct proc *p;
1572 {
1573 struct pipe *pipe = (struct pipe *)fp->f_data;
1574
1575 bzero((caddr_t)ub, sizeof(*ub));
1576 ub->st_mode = S_IFIFO;
1577 ub->st_blksize = pipe->pipe_buffer.size;
1578 ub->st_size = pipe->pipe_buffer.cnt;
1579 ub->st_blocks = (ub->st_size) ? 1 : 0;
1580 #ifdef __FreeBSD__
1581 ub->st_atimespec = pipe->pipe_atime;
1582 ub->st_mtimespec = pipe->pipe_mtime;
1583 ub->st_ctimespec = pipe->pipe_ctime;
1584 #endif /* FreeBSD */
1585 #ifdef __NetBSD__
1586 TIMEVAL_TO_TIMESPEC(&pipe->pipe_atime, &ub->st_atimespec)
1587 TIMEVAL_TO_TIMESPEC(&pipe->pipe_mtime, &ub->st_mtimespec);
1588 TIMEVAL_TO_TIMESPEC(&pipe->pipe_ctime, &ub->st_ctimespec);
1589 #endif /* NetBSD */
1590 ub->st_uid = fp->f_cred->cr_uid;
1591 ub->st_gid = fp->f_cred->cr_gid;
1592 /*
1593 * Left as 0: st_dev, st_ino, st_nlink, st_rdev, st_flags, st_gen.
1594 * XXX (st_dev, st_ino) should be unique.
1595 */
1596 return (0);
1597 }
1598
1599 /* ARGSUSED */
1600 static int
1601 pipe_close(fp, p)
1602 struct file *fp;
1603 struct proc *p;
1604 {
1605 struct pipe *cpipe = (struct pipe *)fp->f_data;
1606
1607 #ifdef __FreeBSD__
1608 fp->f_ops = &badfileops;
1609 funsetown(cpipe->pipe_sigio);
1610 #endif
1611 fp->f_data = NULL;
1612 pipeclose(cpipe);
1613 return (0);
1614 }
1615
1616 static void
1617 pipe_free_kmem(cpipe)
1618 struct pipe *cpipe;
1619 {
1620
1621 #ifdef __FreeBSD__
1622 mtx_assert(&vm_mtx, MA_OWNED);
1623 #endif
1624 if (cpipe->pipe_buffer.buffer != NULL) {
1625 if (cpipe->pipe_buffer.size > PIPE_SIZE)
1626 --nbigpipe;
1627 amountpipekva -= cpipe->pipe_buffer.size;
1628 #ifdef __FreeBSD__
1629 kmem_free(kernel_map,
1630 (vm_offset_t)cpipe->pipe_buffer.buffer,
1631 cpipe->pipe_buffer.size);
1632 #elif defined(__NetBSD__)
1633 uvm_km_free(kernel_map,
1634 (vaddr_t)cpipe->pipe_buffer.buffer,
1635 cpipe->pipe_buffer.size);
1636 #endif /* NetBSD */
1637
1638 cpipe->pipe_buffer.buffer = NULL;
1639 }
1640 #ifndef PIPE_NODIRECT
1641 if (cpipe->pipe_map.kva != NULL) {
1642 #ifdef __FreeBSD__
1643 amountpipekva -= cpipe->pipe_buffer.size + PAGE_SIZE;
1644 kmem_free(kernel_map,
1645 cpipe->pipe_map.kva,
1646 cpipe->pipe_buffer.size + PAGE_SIZE);
1647 #elif defined(__NetBSD__)
1648 pipe_loan_free(cpipe);
1649 #endif /* NetBSD */
1650 cpipe->pipe_map.cnt = 0;
1651 cpipe->pipe_map.kva = NULL;
1652 cpipe->pipe_map.pos = 0;
1653 cpipe->pipe_map.npages = 0;
1654 }
1655 #endif /* !PIPE_NODIRECT */
1656 }
1657
1658 /*
1659 * shutdown the pipe
1660 */
1661 static void
1662 pipeclose(cpipe)
1663 struct pipe *cpipe;
1664 {
1665 struct pipe *ppipe;
1666
1667 if (!cpipe)
1668 return;
1669
1670 pipeselwakeup(cpipe->pipe_peer);
1671
1672 /*
1673 * If the other side is blocked, wake it up saying that
1674 * we want to close it down.
1675 */
1676 while (cpipe->pipe_busy) {
1677 wakeup(cpipe);
1678 cpipe->pipe_state |= PIPE_WANTCLOSE | PIPE_EOF;
1679 tsleep(cpipe, PRIBIO, "pipecl", 0);
1680 }
1681
1682 /*
1683 * Disconnect from peer
1684 */
1685 if ((ppipe = cpipe->pipe_peer) != NULL) {
1686 pipeselwakeup(ppipe);
1687
1688 ppipe->pipe_state |= PIPE_EOF;
1689 wakeup(ppipe);
1690 ppipe->pipe_peer = NULL;
1691 }
1692
1693 /*
1694 * free resources
1695 */
1696 #ifdef _FreeBSD__
1697 mtx_lock(&vm_mtx);
1698 #endif
1699 pipe_free_kmem(cpipe);
1700 #ifdef __FreeBSD__
1701 /* XXX: erm, doesn't zalloc already have its own locks and
1702 * not need the giant vm lock?
1703 */
1704 zfree(pipe_zone, cpipe);
1705 mtx_unlock(&vm_mtx);
1706 #endif /* FreeBSD */
1707 #ifdef __NetBSD__
1708 pool_put(&pipe_pool, cpipe);
1709 #endif
1710 }
1711
1712 #ifdef __FreeBSD__
1713 /*ARGSUSED*/
1714 static int
1715 pipe_kqfilter(struct file *fp, struct knote *kn)
1716 {
1717 struct pipe *cpipe = (struct pipe *)kn->kn_fp->f_data;
1718
1719 switch (kn->kn_filter) {
1720 case EVFILT_READ:
1721 kn->kn_fop = &pipe_rfiltops;
1722 break;
1723 case EVFILT_WRITE:
1724 kn->kn_fop = &pipe_wfiltops;
1725 cpipe = cpipe->pipe_peer;
1726 break;
1727 default:
1728 return (1);
1729 }
1730 kn->kn_hook = (caddr_t)cpipe;
1731
1732 SLIST_INSERT_HEAD(&cpipe->pipe_sel.si_note, kn, kn_selnext);
1733 return (0);
1734 }
1735
1736 static void
1737 filt_pipedetach(struct knote *kn)
1738 {
1739 struct pipe *cpipe = (struct pipe *)kn->kn_fp->f_data;
1740
1741 SLIST_REMOVE(&cpipe->pipe_sel.si_note, kn, knote, kn_selnext);
1742 }
1743
1744 /*ARGSUSED*/
1745 static int
1746 filt_piperead(struct knote *kn, long hint)
1747 {
1748 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1749 struct pipe *wpipe = rpipe->pipe_peer;
1750
1751 kn->kn_data = rpipe->pipe_buffer.cnt;
1752 if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW))
1753 kn->kn_data = rpipe->pipe_map.cnt;
1754
1755 if ((rpipe->pipe_state & PIPE_EOF) ||
1756 (wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1757 kn->kn_flags |= EV_EOF;
1758 return (1);
1759 }
1760 return (kn->kn_data > 0);
1761 }
1762
1763 /*ARGSUSED*/
1764 static int
1765 filt_pipewrite(struct knote *kn, long hint)
1766 {
1767 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1768 struct pipe *wpipe = rpipe->pipe_peer;
1769
1770 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1771 kn->kn_data = 0;
1772 kn->kn_flags |= EV_EOF;
1773 return (1);
1774 }
1775 kn->kn_data = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
1776 if (wpipe->pipe_state & PIPE_DIRECTW)
1777 kn->kn_data = 0;
1778
1779 return (kn->kn_data >= PIPE_BUF);
1780 }
1781 #endif /* FreeBSD */
1782
1783 #ifdef __NetBSD__
1784 static int
1785 pipe_fcntl(fp, cmd, data, p)
1786 struct file *fp;
1787 u_int cmd;
1788 caddr_t data;
1789 struct proc *p;
1790 {
1791 if (cmd == F_SETFL)
1792 return (0);
1793 else
1794 return (EOPNOTSUPP);
1795 }
1796
1797 /*
1798 * Handle pipe sysctls.
1799 */
1800 int
1801 sysctl_dopipe(name, namelen, oldp, oldlenp, newp, newlen)
1802 int *name;
1803 u_int namelen;
1804 void *oldp;
1805 size_t *oldlenp;
1806 void *newp;
1807 size_t newlen;
1808 {
1809 /* All sysctl names at this level are terminal. */
1810 if (namelen != 1)
1811 return (ENOTDIR); /* overloaded */
1812
1813 switch (name[0]) {
1814 case KERN_PIPE_MAXKVASZ:
1815 return (sysctl_int(oldp, oldlenp, newp, newlen, &maxpipekva));
1816 case KERN_PIPE_LIMITKVA:
1817 return (sysctl_int(oldp, oldlenp, newp, newlen, &limitpipekva));
1818 case KERN_PIPE_MAXBIGPIPES:
1819 return (sysctl_int(oldp, oldlenp, newp, newlen, &maxbigpipes));
1820 case KERN_PIPE_NBIGPIPES:
1821 return (sysctl_rdint(oldp, oldlenp, newp, nbigpipe));
1822 case KERN_PIPE_KVASIZE:
1823 return (sysctl_rdint(oldp, oldlenp, newp, amountpipekva));
1824 default:
1825 return (EOPNOTSUPP);
1826 }
1827 /* NOTREACHED */
1828 }
1829
1830 /*
1831 * Initialize pipe structs.
1832 */
1833 void
1834 pipe_init(void)
1835 {
1836 pool_init(&pipe_pool, sizeof(struct pipe), 0, 0, 0, "pipepl",
1837 0, NULL, NULL, M_PIPE);
1838 }
1839
1840 #endif /* __NetBSD __ */
1841