sys_pipe.c revision 1.9 1 /* $NetBSD: sys_pipe.c,v 1.9 2001/07/18 06:48:27 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1996 John S. Dyson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Absolutely no warranty of function or purpose is made by the author
17 * John S. Dyson.
18 * 4. Modifications may be freely made to this file if the above conditions
19 * are met.
20 *
21 * $FreeBSD: src/sys/kern/sys_pipe.c,v 1.82 2001/06/15 20:45:01 jlemon Exp $
22 */
23
24 /*
25 * This file contains a high-performance replacement for the socket-based
26 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support
27 * all features of sockets, but does do everything that pipes normally
28 * do.
29 *
30 * Adaption for NetBSD UVM, including uvm_loan() based direct write, was
31 * written by Jaromir Dolecek.
32 */
33
34 /*
35 * This code has two modes of operation, a small write mode and a large
36 * write mode. The small write mode acts like conventional pipes with
37 * a kernel buffer. If the buffer is less than PIPE_MINDIRECT, then the
38 * "normal" pipe buffering is done. If the buffer is between PIPE_MINDIRECT
39 * and PIPE_SIZE in size, it is fully mapped into the kernel (on FreeBSD,
40 * those pages are also wired), and the receiving process can copy it directly
41 * from the pages in the sending process.
42 *
43 * If the sending process receives a signal, it is possible that it will
44 * go away, and certainly its address space can change, because control
45 * is returned back to the user-mode side. In that case, the pipe code
46 * arranges to copy the buffer supplied by the user process on FreeBSD, to
47 * a pageable kernel buffer, and the receiving process will grab the data
48 * from the pageable kernel buffer. Since signals don't happen all that often,
49 * the copy operation is normally eliminated.
50 * For NetBSD, the pages are mapped read-only, COW for kernel by uvm_loan(),
51 * so no explicit handling need to be done, all is handled by standard VM
52 * facilities.
53 *
54 * The constant PIPE_MINDIRECT is chosen to make sure that buffering will
55 * happen for small transfers so that the system will not spend all of
56 * its time context switching. PIPE_SIZE is constrained by the
57 * amount of kernel virtual memory.
58 */
59
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/proc.h>
63 #include <sys/fcntl.h>
64 #include <sys/file.h>
65 #include <sys/filedesc.h>
66 #include <sys/filio.h>
67 #include <sys/ttycom.h>
68 #include <sys/stat.h>
69 #include <sys/poll.h>
70 #include <sys/signalvar.h>
71 #include <sys/vnode.h>
72 #include <sys/uio.h>
73 #include <sys/lock.h>
74 #ifdef __FreeBSD__
75 #include <sys/mutex.h>
76 #include <sys/selinfo.h>
77 #include <sys/sysproto.h>
78 #elif defined(__NetBSD__)
79 #include <sys/select.h>
80 #include <sys/malloc.h>
81 #include <sys/mount.h>
82 #include <sys/syscallargs.h>
83 #include <uvm/uvm.h>
84 #include <sys/sysctl.h>
85 #endif /* NetBSD, FreeBSD */
86
87 #include <sys/pipe.h>
88
89 #ifdef __NetBSD__
90 #define vfs_timestamp(tv) microtime(tv)
91 #endif
92
93 /*
94 * Use this define if you want to disable *fancy* VM things. Expect an
95 * approx 30% decrease in transfer rate. This could be useful for
96 * OpenBSD.
97 */
98 /* #define PIPE_NODIRECT */
99
100 /*
101 * interfaces to the outside world
102 */
103 #ifdef __FreeBSD__
104 static int pipe_read __P((struct file *fp, struct uio *uio,
105 struct ucred *cred, int flags, struct proc *p));
106 static int pipe_write __P((struct file *fp, struct uio *uio,
107 struct ucred *cred, int flags, struct proc *p));
108 static int pipe_close __P((struct file *fp, struct proc *p));
109 static int pipe_poll __P((struct file *fp, int events, struct ucred *cred,
110 struct proc *p));
111 static int pipe_kqfilter __P((struct file *fp, struct knote *kn));
112 static int pipe_stat __P((struct file *fp, struct stat *sb, struct proc *p));
113 static int pipe_ioctl __P((struct file *fp, u_long cmd, caddr_t data, struct proc *p));
114
115 static struct fileops pipeops = {
116 pipe_read, pipe_write, pipe_ioctl, pipe_poll, pipe_kqfilter,
117 pipe_stat, pipe_close
118 };
119
120 static void filt_pipedetach(struct knote *kn);
121 static int filt_piperead(struct knote *kn, long hint);
122 static int filt_pipewrite(struct knote *kn, long hint);
123
124 static struct filterops pipe_rfiltops =
125 { 1, NULL, filt_pipedetach, filt_piperead };
126 static struct filterops pipe_wfiltops =
127 { 1, NULL, filt_pipedetach, filt_pipewrite };
128 #endif /* FreeBSD */
129
130 #ifdef __NetBSD__
131 static int pipe_read __P((struct file *fp, off_t *offset, struct uio *uio,
132 struct ucred *cred, int flags));
133 static int pipe_write __P((struct file *fp, off_t *offset, struct uio *uio,
134 struct ucred *cred, int flags));
135 static int pipe_close __P((struct file *fp, struct proc *p));
136 static int pipe_poll __P((struct file *fp, int events, struct proc *p));
137 static int pipe_fcntl __P((struct file *fp, u_int com, caddr_t data,
138 struct proc *p));
139 static int pipe_stat __P((struct file *fp, struct stat *sb, struct proc *p));
140 static int pipe_ioctl __P((struct file *fp, u_long cmd, caddr_t data, struct proc *p));
141
142 static struct fileops pipeops =
143 { pipe_read, pipe_write, pipe_ioctl, pipe_fcntl, pipe_poll,
144 pipe_stat, pipe_close };
145 #endif /* NetBSD */
146
147 /*
148 * Default pipe buffer size(s), this can be kind-of large now because pipe
149 * space is pageable. The pipe code will try to maintain locality of
150 * reference for performance reasons, so small amounts of outstanding I/O
151 * will not wipe the cache.
152 */
153 #define MINPIPESIZE (PIPE_SIZE/3)
154 #define MAXPIPESIZE (2*PIPE_SIZE/3)
155
156 /*
157 * Maximum amount of kva for pipes -- this is kind-of a soft limit, but
158 * is there so that on large systems, we don't exhaust it.
159 */
160 #define MAXPIPEKVA (8*1024*1024)
161 static int maxpipekva = MAXPIPEKVA;
162
163 /*
164 * Limit for direct transfers, we cannot, of course limit
165 * the amount of kva for pipes in general though.
166 */
167 #define LIMITPIPEKVA (16*1024*1024)
168 static int limitpipekva = LIMITPIPEKVA;
169
170 /*
171 * Limit the number of "big" pipes
172 */
173 #define LIMITBIGPIPES 32
174 static int maxbigpipes = LIMITBIGPIPES;
175 static int nbigpipe = 0;
176
177 /*
178 * Amount of KVA consumed by pipe buffers.
179 */
180 static int amountpipekva = 0;
181
182 static void pipeclose __P((struct pipe *cpipe));
183 static void pipe_free_kmem __P((struct pipe *cpipe));
184 static int pipe_create __P((struct pipe **cpipep, int allockva));
185 static __inline int pipelock __P((struct pipe *cpipe, int catch));
186 static __inline void pipeunlock __P((struct pipe *cpipe));
187 static __inline void pipeselwakeup __P((struct pipe *selp,
188 struct pipe *sigp));
189 static int pipespace __P((struct pipe *cpipe, int size));
190
191 #ifdef __FreeBSD__
192 #ifndef PIPE_NODIRECT
193 static int pipe_build_write_buffer __P((struct pipe *wpipe, struct uio *uio));
194 static void pipe_destroy_write_buffer __P((struct pipe *wpipe));
195 static int pipe_direct_write __P((struct pipe *wpipe, struct uio *uio));
196 static void pipe_clone_write_buffer __P((struct pipe *wpipe));
197 #endif
198
199 static vm_zone_t pipe_zone;
200 #endif /* FreeBSD */
201
202 #ifdef __NetBSD__
203 #ifndef PIPE_NODIRECT
204 static __inline int pipe_direct_write __P((struct pipe *wpipe, struct uio *uio));
205 static __inline int pipe_loan_alloc __P((struct pipe *wpipe, int npages,
206 vsize_t blen));
207 static void pipe_loan_free __P((struct pipe *wpipe));
208 #endif /* PIPE_NODIRECT */
209
210 static struct pool pipe_pool;
211 #endif /* NetBSD */
212
213 /*
214 * The pipe system call for the DTYPE_PIPE type of pipes
215 */
216
217 /* ARGSUSED */
218 #ifdef __FreeBSD__
219 int
220 pipe(p, uap)
221 struct proc *p;
222 struct pipe_args /* {
223 int dummy;
224 } */ *uap;
225 #elif defined(__NetBSD__)
226 int
227 sys_pipe(p, v, retval)
228 struct proc *p;
229 void *v;
230 register_t *retval;
231 #endif
232 {
233 struct filedesc *fdp = p->p_fd;
234 struct file *rf, *wf;
235 struct pipe *rpipe, *wpipe;
236 int fd, error;
237
238 #ifdef __FreeBSD__
239 if (pipe_zone == NULL)
240 pipe_zone = zinit("PIPE", sizeof(struct pipe), 0, 0, 4);
241
242 rpipe = wpipe = NULL;
243 if (pipe_create(&rpipe, 1) || pipe_create(&wpipe, 1)) {
244 pipeclose(rpipe);
245 pipeclose(wpipe);
246 return (ENFILE);
247 }
248
249 error = falloc(p, &rf, &fd);
250 if (error) {
251 pipeclose(rpipe);
252 pipeclose(wpipe);
253 return (error);
254 }
255 fhold(rf);
256 p->p_retval[0] = fd;
257
258 /*
259 * Warning: once we've gotten past allocation of the fd for the
260 * read-side, we can only drop the read side via fdrop() in order
261 * to avoid races against processes which manage to dup() the read
262 * side while we are blocked trying to allocate the write side.
263 */
264 rf->f_flag = FREAD | FWRITE;
265 rf->f_type = DTYPE_PIPE;
266 rf->f_data = (caddr_t)rpipe;
267 rf->f_ops = &pipeops;
268 error = falloc(p, &wf, &fd);
269 if (error) {
270 if (fdp->fd_ofiles[p->p_retval[0]] == rf) {
271 fdp->fd_ofiles[p->p_retval[0]] = NULL;
272 fdrop(rf, p);
273 }
274 fdrop(rf, p);
275 /* rpipe has been closed by fdrop(). */
276 pipeclose(wpipe);
277 return (error);
278 }
279 wf->f_flag = FREAD | FWRITE;
280 wf->f_type = DTYPE_PIPE;
281 wf->f_data = (caddr_t)wpipe;
282 wf->f_ops = &pipeops;
283 p->p_retval[1] = fd;
284
285 rpipe->pipe_peer = wpipe;
286 wpipe->pipe_peer = rpipe;
287 fdrop(rf, p);
288 #endif /* FreeBSD */
289
290 #ifdef __NetBSD__
291 rpipe = wpipe = NULL;
292 if (pipe_create(&rpipe, 1) || pipe_create(&wpipe, 0)) {
293 pipeclose(rpipe);
294 pipeclose(wpipe);
295 return (ENFILE);
296 }
297
298 /*
299 * Note: the file structure returned from falloc() is marked
300 * as 'larval' initially. Unless we mark it as 'mature' by
301 * FILE_SET_MATURE(), any attempt to do anything with it would
302 * return EBADF, including e.g. dup(2) or close(2). This avoids
303 * file descriptor races if we block in the second falloc().
304 */
305
306 error = falloc(p, &rf, &fd);
307 if (error)
308 goto free2;
309 retval[0] = fd;
310 rf->f_flag = FREAD;
311 rf->f_type = DTYPE_PIPE;
312 rf->f_data = (caddr_t)rpipe;
313 rf->f_ops = &pipeops;
314
315 error = falloc(p, &wf, &fd);
316 if (error)
317 goto free3;
318 retval[1] = fd;
319 wf->f_flag = FWRITE;
320 wf->f_type = DTYPE_PIPE;
321 wf->f_data = (caddr_t)wpipe;
322 wf->f_ops = &pipeops;
323
324 rpipe->pipe_peer = wpipe;
325 wpipe->pipe_peer = rpipe;
326
327 FILE_SET_MATURE(rf);
328 FILE_SET_MATURE(wf);
329 FILE_UNUSE(rf, p);
330 FILE_UNUSE(wf, p);
331 return (0);
332 free3:
333 FILE_UNUSE(rf, p);
334 ffree(rf);
335 fdremove(fdp, retval[0]);
336 free2:
337 pipeclose(wpipe);
338 pipeclose(rpipe);
339 #endif /* NetBSD */
340
341 return (error);
342 }
343
344 /*
345 * Allocate kva for pipe circular buffer, the space is pageable
346 * This routine will 'realloc' the size of a pipe safely, if it fails
347 * it will retain the old buffer.
348 * If it fails it will return ENOMEM.
349 */
350 static int
351 pipespace(cpipe, size)
352 struct pipe *cpipe;
353 int size;
354 {
355 caddr_t buffer;
356 #ifdef __FreeBSD__
357 struct vm_object *object;
358 int npages, error;
359
360 npages = round_page(size)/PAGE_SIZE;
361 /*
362 * Create an object, I don't like the idea of paging to/from
363 * kernel_object.
364 */
365 mtx_lock(&vm_mtx);
366 object = vm_object_allocate(OBJT_DEFAULT, npages);
367 buffer = (caddr_t) vm_map_min(kernel_map);
368
369 /*
370 * Insert the object into the kernel map, and allocate kva for it.
371 * The map entry is, by default, pageable.
372 */
373 error = vm_map_find(kernel_map, object, 0,
374 (vm_offset_t *) &buffer, size, 1,
375 VM_PROT_ALL, VM_PROT_ALL, 0);
376
377 if (error != KERN_SUCCESS) {
378 vm_object_deallocate(object);
379 mtx_unlock(&vm_mtx);
380 return (ENOMEM);
381 }
382 #endif /* FreeBSD */
383
384 #ifdef __NetBSD__
385 /*
386 * Allocate pageable virtual address space. Physical memory is allocated
387 * on demand.
388 */
389 buffer = (caddr_t) uvm_km_valloc(kernel_map, round_page(size));
390 if (buffer == NULL)
391 return (ENOMEM);
392 #endif /* NetBSD */
393
394 /* free old resources if we're resizing */
395 pipe_free_kmem(cpipe);
396 #ifdef __FreeBSD__
397 mtx_unlock(&vm_mtx);
398 cpipe->pipe_buffer.object = object;
399 #endif
400 cpipe->pipe_buffer.buffer = buffer;
401 cpipe->pipe_buffer.size = size;
402 cpipe->pipe_buffer.in = 0;
403 cpipe->pipe_buffer.out = 0;
404 cpipe->pipe_buffer.cnt = 0;
405 amountpipekva += cpipe->pipe_buffer.size;
406 return (0);
407 }
408
409 /*
410 * initialize and allocate VM and memory for pipe
411 */
412 static int
413 pipe_create(cpipep, allockva)
414 struct pipe **cpipep;
415 int allockva;
416 {
417 struct pipe *cpipe;
418 int error;
419
420 #ifdef __FreeBSD__
421 *cpipep = zalloc(pipe_zone);
422 #endif
423 #ifdef __NetBSD__
424 *cpipep = pool_get(&pipe_pool, M_WAITOK);
425 #endif
426 if (*cpipep == NULL)
427 return (ENOMEM);
428
429 cpipe = *cpipep;
430
431 #ifdef __FreeBSD__
432 /* so pipespace()->pipe_free_kmem() doesn't follow junk pointer */
433 cpipe->pipe_buffer.object = NULL;
434 #endif /* FreeBSD */
435 /*
436 * protect so pipeclose() doesn't follow a junk pointer
437 * if pipespace() fails.
438 */
439 cpipe->pipe_buffer.buffer = NULL;
440 memset(&cpipe->pipe_sel, 0, sizeof(cpipe->pipe_sel));
441 cpipe->pipe_state = PIPE_SIGNALR;
442 cpipe->pipe_peer = NULL;
443 cpipe->pipe_busy = 0;
444
445 #ifndef PIPE_NODIRECT
446 /*
447 * pipe data structure initializations to support direct pipe I/O
448 */
449 cpipe->pipe_map.cnt = 0;
450 cpipe->pipe_map.kva = NULL;
451 cpipe->pipe_map.pos = 0;
452 cpipe->pipe_map.npages = 0;
453 #ifdef __NetBSD__
454 cpipe->pipe_map.ms = NULL;
455 #endif
456 #endif /* !PIPE_NODIRECT */
457
458 if (allockva && (error = pipespace(cpipe, PIPE_SIZE)))
459 return (error);
460
461 vfs_timestamp(&cpipe->pipe_ctime);
462 cpipe->pipe_atime = cpipe->pipe_ctime;
463 cpipe->pipe_mtime = cpipe->pipe_ctime;
464 #ifdef __NetBSD__
465 cpipe->pipe_pgid = NO_PID;
466 lockinit(&cpipe->pipe_lock, PRIBIO | PCATCH, "pipelk", 0, 0);
467 #endif
468
469 return (0);
470 }
471
472
473 /*
474 * lock a pipe for I/O, blocking other access
475 */
476 static __inline int
477 pipelock(cpipe, catch)
478 struct pipe *cpipe;
479 int catch;
480 {
481 int error;
482
483 #ifdef __FreeBSD__
484 while (cpipe->pipe_state & PIPE_LOCK) {
485 cpipe->pipe_state |= PIPE_LWANT;
486 error = tsleep(cpipe, catch ? (PRIBIO | PCATCH) : PRIBIO,
487 "pipelk", 0);
488 if (error != 0)
489 return (error);
490 }
491 cpipe->pipe_state |= PIPE_LOCK;
492 return (0);
493 #endif
494
495 #ifdef __NetBSD__
496 do {
497 error = lockmgr(&cpipe->pipe_lock, LK_EXCLUSIVE, NULL);
498 } while (!catch && (error == EINTR || error == ERESTART));
499 return (error);
500 #endif
501 }
502
503 /*
504 * unlock a pipe I/O lock
505 */
506 static __inline void
507 pipeunlock(cpipe)
508 struct pipe *cpipe;
509 {
510 #ifdef __FreeBSD__
511 cpipe->pipe_state &= ~PIPE_LOCK;
512 if (cpipe->pipe_state & PIPE_LWANT) {
513 cpipe->pipe_state &= ~PIPE_LWANT;
514 wakeup(cpipe);
515 }
516 #endif
517
518 #ifdef __NetBSD__
519 lockmgr(&cpipe->pipe_lock, LK_RELEASE, NULL);
520 #endif
521 }
522
523 /*
524 * Select/poll wakup. This also sends SIGIO to peer connected to
525 * 'sigpipe' side of pipe.
526 */
527 static __inline void
528 pipeselwakeup(selp, sigp)
529 struct pipe *selp, *sigp;
530 {
531 if (selp->pipe_state & PIPE_SEL) {
532 selp->pipe_state &= ~PIPE_SEL;
533 selwakeup(&selp->pipe_sel);
534 }
535 #ifdef __FreeBSD__
536 if (sigp && (sigp->pipe_state & PIPE_ASYNC) && sigp->pipe_sigio)
537 pgsigio(sigp->pipe_sigio, SIGIO, 0);
538 KNOTE(&selp->pipe_sel.si_note, 0);
539 #endif
540
541 #ifdef __NetBSD__
542 if (sigp && (sigp->pipe_state & PIPE_ASYNC)
543 && sigp->pipe_pgid != NO_PID){
544 struct proc *p;
545
546 if (sigp->pipe_pgid < 0)
547 gsignal(-sigp->pipe_pgid, SIGIO);
548 else if (sigp->pipe_pgid > 0 && (p = pfind(sigp->pipe_pgid)) != 0)
549 psignal(p, SIGIO);
550 }
551 #endif /* NetBSD */
552 }
553
554 /* ARGSUSED */
555 #ifdef __FreeBSD__
556 static int
557 pipe_read(fp, uio, cred, flags, p)
558 struct file *fp;
559 struct uio *uio;
560 struct ucred *cred;
561 int flags;
562 struct proc *p;
563 #elif defined(__NetBSD__)
564 static int
565 pipe_read(fp, offset, uio, cred, flags)
566 struct file *fp;
567 off_t *offset;
568 struct uio *uio;
569 struct ucred *cred;
570 int flags;
571 #endif
572 {
573 struct pipe *rpipe = (struct pipe *) fp->f_data;
574 int error;
575 size_t nread = 0;
576 size_t size;
577 size_t ocnt;
578
579 ++rpipe->pipe_busy;
580 error = pipelock(rpipe, 1);
581 if (error)
582 goto unlocked_error;
583
584 ocnt = rpipe->pipe_buffer.cnt;
585
586 while (uio->uio_resid) {
587 /*
588 * normal pipe buffer receive
589 */
590 if (rpipe->pipe_buffer.cnt > 0) {
591 size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out;
592 if (size > rpipe->pipe_buffer.cnt)
593 size = rpipe->pipe_buffer.cnt;
594 if (size > uio->uio_resid)
595 size = uio->uio_resid;
596
597 error = uiomove(&rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out],
598 size, uio);
599 if (error)
600 break;
601
602 rpipe->pipe_buffer.out += size;
603 if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size)
604 rpipe->pipe_buffer.out = 0;
605
606 rpipe->pipe_buffer.cnt -= size;
607
608 /*
609 * If there is no more to read in the pipe, reset
610 * its pointers to the beginning. This improves
611 * cache hit stats.
612 */
613 if (rpipe->pipe_buffer.cnt == 0) {
614 rpipe->pipe_buffer.in = 0;
615 rpipe->pipe_buffer.out = 0;
616 }
617 nread += size;
618 #ifndef PIPE_NODIRECT
619 /*
620 * Direct copy, bypassing a kernel buffer.
621 */
622 } else if ((size = rpipe->pipe_map.cnt) &&
623 (rpipe->pipe_state & PIPE_DIRECTW)) {
624 caddr_t va;
625 if (size > uio->uio_resid)
626 size = uio->uio_resid;
627
628 va = (caddr_t) rpipe->pipe_map.kva +
629 rpipe->pipe_map.pos;
630 error = uiomove(va, size, uio);
631 if (error)
632 break;
633 nread += size;
634 rpipe->pipe_map.pos += size;
635 rpipe->pipe_map.cnt -= size;
636 if (rpipe->pipe_map.cnt == 0) {
637 rpipe->pipe_state &= ~PIPE_DIRECTW;
638 wakeup(rpipe);
639 }
640 #endif
641 } else {
642 /*
643 * detect EOF condition
644 * read returns 0 on EOF, no need to set error
645 */
646 if (rpipe->pipe_state & PIPE_EOF)
647 break;
648
649 /*
650 * If the "write-side" has been blocked, wake it up now.
651 */
652 if (rpipe->pipe_state & PIPE_WANTW) {
653 rpipe->pipe_state &= ~PIPE_WANTW;
654 wakeup(rpipe);
655 }
656
657 /*
658 * Break if some data was read.
659 */
660 if (nread > 0)
661 break;
662
663 /*
664 * don't block on non-blocking I/O
665 */
666 if (fp->f_flag & FNONBLOCK) {
667 error = EAGAIN;
668 break;
669 }
670
671 /*
672 * Unlock the pipe buffer for our remaining processing.
673 * We will either break out with an error or we will
674 * sleep and relock to loop.
675 */
676 pipeunlock(rpipe);
677
678 /*
679 * We want to read more, wake up select/poll.
680 */
681 pipeselwakeup(rpipe, rpipe->pipe_peer);
682
683 rpipe->pipe_state |= PIPE_WANTR;
684 error = tsleep(rpipe, PRIBIO | PCATCH, "piperd", 0);
685 if (error != 0 || (error = pipelock(rpipe, 1)))
686 goto unlocked_error;
687 }
688 }
689 pipeunlock(rpipe);
690
691 if (error == 0)
692 vfs_timestamp(&rpipe->pipe_atime);
693 unlocked_error:
694 --rpipe->pipe_busy;
695
696 /*
697 * PIPE_WANTCLOSE processing only makes sense if pipe_busy is 0.
698 */
699 if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANTCLOSE)) {
700 rpipe->pipe_state &= ~(PIPE_WANTCLOSE|PIPE_WANTW);
701 wakeup(rpipe);
702 } else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) {
703 /*
704 * Handle write blocking hysteresis.
705 */
706 if (rpipe->pipe_state & PIPE_WANTW) {
707 rpipe->pipe_state &= ~PIPE_WANTW;
708 wakeup(rpipe);
709 }
710 }
711
712 /*
713 * If anything was read off the buffer, signal to the writer it's
714 * possible to write more data. Also send signal if we are here for the
715 * first time after last write.
716 */
717 if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF
718 && (ocnt != rpipe->pipe_buffer.cnt || (rpipe->pipe_state & PIPE_SIGNALR))) {
719 pipeselwakeup(rpipe, rpipe->pipe_peer);
720 rpipe->pipe_state &= ~PIPE_SIGNALR;
721 }
722
723 return (error);
724 }
725
726 #ifdef __FreeBSD__
727 #ifndef PIPE_NODIRECT
728 /*
729 * Map the sending processes' buffer into kernel space and wire it.
730 * This is similar to a physical write operation.
731 */
732 static int
733 pipe_build_write_buffer(wpipe, uio)
734 struct pipe *wpipe;
735 struct uio *uio;
736 {
737 size_t size;
738 int i;
739 vm_offset_t addr, endaddr, paddr;
740
741 size = uio->uio_iov->iov_len;
742 if (size > wpipe->pipe_buffer.size)
743 size = wpipe->pipe_buffer.size;
744
745 endaddr = round_page((vm_offset_t)uio->uio_iov->iov_base + size);
746 mtx_lock(&vm_mtx);
747 addr = trunc_page((vm_offset_t)uio->uio_iov->iov_base);
748 for (i = 0; addr < endaddr; addr += PAGE_SIZE, i++) {
749 vm_page_t m;
750
751 if (vm_fault_quick((caddr_t)addr, VM_PROT_READ) < 0 ||
752 (paddr = pmap_kextract(addr)) == 0) {
753 int j;
754
755 for (j = 0; j < i; j++)
756 vm_page_unwire(wpipe->pipe_map.ms[j], 1);
757 mtx_unlock(&vm_mtx);
758 return (EFAULT);
759 }
760
761 m = PHYS_TO_VM_PAGE(paddr);
762 vm_page_wire(m);
763 wpipe->pipe_map.ms[i] = m;
764 }
765
766 /*
767 * set up the control block
768 */
769 wpipe->pipe_map.npages = i;
770 wpipe->pipe_map.pos =
771 ((vm_offset_t) uio->uio_iov->iov_base) & PAGE_MASK;
772 wpipe->pipe_map.cnt = size;
773
774 /*
775 * and map the buffer
776 */
777 if (wpipe->pipe_map.kva == 0) {
778 /*
779 * We need to allocate space for an extra page because the
780 * address range might (will) span pages at times.
781 */
782 wpipe->pipe_map.kva = kmem_alloc_pageable(kernel_map,
783 wpipe->pipe_buffer.size + PAGE_SIZE);
784 amountpipekva += wpipe->pipe_buffer.size + PAGE_SIZE;
785 }
786 pmap_qenter(wpipe->pipe_map.kva, wpipe->pipe_map.ms,
787 wpipe->pipe_map.npages);
788
789 mtx_unlock(&vm_mtx);
790 /*
791 * and update the uio data
792 */
793
794 uio->uio_iov->iov_len -= size;
795 uio->uio_iov->iov_base += size;
796 if (uio->uio_iov->iov_len == 0)
797 uio->uio_iov++;
798 uio->uio_resid -= size;
799 uio->uio_offset += size;
800 return (0);
801 }
802
803 /*
804 * unmap and unwire the process buffer
805 */
806 static void
807 pipe_destroy_write_buffer(wpipe)
808 struct pipe *wpipe;
809 {
810 int i;
811
812 mtx_lock(&vm_mtx);
813 if (wpipe->pipe_map.kva) {
814 pmap_qremove(wpipe->pipe_map.kva, wpipe->pipe_map.npages);
815
816 if (amountpipekva > maxpipekva) {
817 vm_offset_t kva = wpipe->pipe_map.kva;
818 wpipe->pipe_map.kva = 0;
819 kmem_free(kernel_map, kva,
820 wpipe->pipe_buffer.size + PAGE_SIZE);
821 amountpipekva -= wpipe->pipe_buffer.size + PAGE_SIZE;
822 }
823 }
824 for (i = 0; i < wpipe->pipe_map.npages; i++)
825 vm_page_unwire(wpipe->pipe_map.ms[i], 1);
826 mtx_unlock(&vm_mtx);
827 }
828
829 /*
830 * In the case of a signal, the writing process might go away. This
831 * code copies the data into the circular buffer so that the source
832 * pages can be freed without loss of data.
833 */
834 static void
835 pipe_clone_write_buffer(wpipe)
836 struct pipe *wpipe;
837 {
838 int size;
839 int pos;
840
841 size = wpipe->pipe_map.cnt;
842 pos = wpipe->pipe_map.pos;
843 bcopy((caddr_t) wpipe->pipe_map.kva + pos,
844 (caddr_t) wpipe->pipe_buffer.buffer, size);
845
846 wpipe->pipe_buffer.in = size;
847 wpipe->pipe_buffer.out = 0;
848 wpipe->pipe_buffer.cnt = size;
849 wpipe->pipe_state &= ~PIPE_DIRECTW;
850
851 pipe_destroy_write_buffer(wpipe);
852 }
853
854 /*
855 * This implements the pipe buffer write mechanism. Note that only
856 * a direct write OR a normal pipe write can be pending at any given time.
857 * If there are any characters in the pipe buffer, the direct write will
858 * be deferred until the receiving process grabs all of the bytes from
859 * the pipe buffer. Then the direct mapping write is set-up.
860 */
861 static int
862 pipe_direct_write(wpipe, uio)
863 struct pipe *wpipe;
864 struct uio *uio;
865 {
866 int error;
867
868 retry:
869 while (wpipe->pipe_state & PIPE_DIRECTW) {
870 if (wpipe->pipe_state & PIPE_WANTR) {
871 wpipe->pipe_state &= ~PIPE_WANTR;
872 wakeup(wpipe);
873 }
874 wpipe->pipe_state |= PIPE_WANTW;
875 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdww", 0);
876 if (error)
877 goto error1;
878 if (wpipe->pipe_state & PIPE_EOF) {
879 error = EPIPE;
880 goto error1;
881 }
882 }
883 wpipe->pipe_map.cnt = 0; /* transfer not ready yet */
884 if (wpipe->pipe_buffer.cnt > 0) {
885 if (wpipe->pipe_state & PIPE_WANTR) {
886 wpipe->pipe_state &= ~PIPE_WANTR;
887 wakeup(wpipe);
888 }
889
890 wpipe->pipe_state |= PIPE_WANTW;
891 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdwc", 0);
892 if (error)
893 goto error1;
894 if (wpipe->pipe_state & PIPE_EOF) {
895 error = EPIPE;
896 goto error1;
897 }
898 goto retry;
899 }
900
901 wpipe->pipe_state |= PIPE_DIRECTW;
902
903 error = pipe_build_write_buffer(wpipe, uio);
904 if (error) {
905 wpipe->pipe_state &= ~PIPE_DIRECTW;
906 goto error1;
907 }
908
909 error = 0;
910 while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) {
911 if (wpipe->pipe_state & PIPE_EOF) {
912 pipelock(wpipe, 0);
913 pipe_destroy_write_buffer(wpipe);
914 pipeunlock(wpipe);
915 pipeselwakeup(wpipe, wpipe);
916 error = EPIPE;
917 goto error1;
918 }
919 if (wpipe->pipe_state & PIPE_WANTR) {
920 wpipe->pipe_state &= ~PIPE_WANTR;
921 wakeup(wpipe);
922 }
923 pipeselwakeup(wpipe, wpipe);
924 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdwt", 0);
925 }
926
927 pipelock(wpipe,0);
928 if (wpipe->pipe_state & PIPE_DIRECTW) {
929 /*
930 * this bit of trickery substitutes a kernel buffer for
931 * the process that might be going away.
932 */
933 pipe_clone_write_buffer(wpipe);
934 } else {
935 pipe_destroy_write_buffer(wpipe);
936 }
937 pipeunlock(wpipe);
938 return (error);
939
940 error1:
941 wakeup(wpipe);
942 return (error);
943 }
944 #endif /* !PIPE_NODIRECT */
945 #endif /* FreeBSD */
946
947 #ifdef __NetBSD__
948 #ifndef PIPE_NODIRECT
949 /*
950 * Allocate structure for loan transfer.
951 */
952 static __inline int
953 pipe_loan_alloc(wpipe, npages, blen)
954 struct pipe *wpipe;
955 int npages;
956 vsize_t blen;
957 {
958 wpipe->pipe_map.kva = uvm_km_valloc_wait(kernel_map, blen);
959 if (wpipe->pipe_map.kva == NULL)
960 return (ENOMEM);
961
962 amountpipekva += blen;
963 wpipe->pipe_map.npages = npages;
964 wpipe->pipe_map.ms = (struct vm_page **) malloc(
965 npages * sizeof(struct vm_page *), M_PIPE, M_WAITOK);
966
967 return (0);
968 }
969
970 /*
971 * Free resources allocated for loan transfer.
972 */
973 static void
974 pipe_loan_free(wpipe)
975 struct pipe *wpipe;
976 {
977 uvm_km_free(kernel_map, wpipe->pipe_map.kva,
978 wpipe->pipe_map.npages * PAGE_SIZE);
979 wpipe->pipe_map.kva = NULL;
980 amountpipekva -= wpipe->pipe_map.npages * PAGE_SIZE;
981 free(wpipe->pipe_map.ms, M_PIPE);
982 wpipe->pipe_map.ms = NULL;
983 }
984
985 /*
986 * NetBSD direct write, using uvm_loan() mechanism.
987 * This implements the pipe buffer write mechanism. Note that only
988 * a direct write OR a normal pipe write can be pending at any given time.
989 * If there are any characters in the pipe buffer, the direct write will
990 * be deferred until the receiving process grabs all of the bytes from
991 * the pipe buffer. Then the direct mapping write is set-up.
992 */
993 static __inline int
994 pipe_direct_write(wpipe, uio)
995 struct pipe *wpipe;
996 struct uio *uio;
997 {
998 int error, npages, j;
999 struct vm_page **res = NULL;
1000 vaddr_t bbase, kva, base, bend;
1001 vsize_t blen, bcnt;
1002 voff_t bpos;
1003
1004 retry:
1005 while (wpipe->pipe_state & PIPE_DIRECTW) {
1006 if (wpipe->pipe_state & PIPE_WANTR) {
1007 wpipe->pipe_state &= ~PIPE_WANTR;
1008 wakeup(wpipe);
1009 }
1010 wpipe->pipe_state |= PIPE_WANTW;
1011 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdww", 0);
1012 if (error)
1013 goto error;
1014 if (wpipe->pipe_state & PIPE_EOF) {
1015 error = EPIPE;
1016 goto error;
1017 }
1018 }
1019 wpipe->pipe_map.cnt = 0; /* transfer not ready yet */
1020 if (wpipe->pipe_buffer.cnt > 0) {
1021 if (wpipe->pipe_state & PIPE_WANTR) {
1022 wpipe->pipe_state &= ~PIPE_WANTR;
1023 wakeup(wpipe);
1024 }
1025
1026 wpipe->pipe_state |= PIPE_WANTW;
1027 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdwc", 0);
1028 if (error)
1029 goto error;
1030 if (wpipe->pipe_state & PIPE_EOF) {
1031 error = EPIPE;
1032 goto error;
1033 }
1034 goto retry;
1035 }
1036
1037 /*
1038 * Handle first iovec, first PIPE_CHUNK_SIZE bytes. Expect caller
1039 * to deal with short write.
1040 *
1041 * Note: need to deal with buffers not aligned to PAGE_SIZE.
1042 */
1043 bbase = (vaddr_t)uio->uio_iov[0].iov_base;
1044 base = trunc_page(bbase);
1045 bend = round_page(bbase + uio->uio_iov[0].iov_len);
1046 blen = bend - base;
1047 bpos = bbase - base;
1048
1049 if (blen > PIPE_DIRECT_CHUNK) {
1050 blen = PIPE_DIRECT_CHUNK;
1051 bend = base + blen;
1052 bcnt = PIPE_DIRECT_CHUNK - bpos;
1053 } else
1054 bcnt = uio->uio_iov[0].iov_len;
1055
1056 npages = blen / PAGE_SIZE;
1057
1058 wpipe->pipe_map.pos = bpos;
1059 wpipe->pipe_map.cnt = bcnt;
1060
1061 /*
1062 * Free the old kva if we need more pages than we have
1063 * allocated.
1064 */
1065 if (wpipe->pipe_map.kva && npages > wpipe->pipe_map.npages)
1066 pipe_loan_free(wpipe);
1067
1068 /* Allocate new kva. */
1069 if (!wpipe->pipe_map.kva
1070 && (error = pipe_loan_alloc(wpipe, npages, blen)))
1071 goto error;
1072
1073 /* Loan the write buffer memory from writer process */
1074 error = uvm_loan(&uio->uio_procp->p_vmspace->vm_map, base, blen,
1075 (void **) wpipe->pipe_map.ms, UVM_LOAN_TOPAGE);
1076 if (error)
1077 goto cleanup;
1078 res = wpipe->pipe_map.ms;
1079
1080 /* Enter the loaned pages to kva */
1081 kva = wpipe->pipe_map.kva;
1082 for(j=0; j < npages; j++, kva += PAGE_SIZE)
1083 pmap_enter(pmap_kernel(), kva, res[j]->phys_addr,
1084 VM_PROT_READ, 0);
1085
1086 wpipe->pipe_state |= PIPE_DIRECTW;
1087 error = 0;
1088 while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) {
1089 if (wpipe->pipe_state & PIPE_EOF) {
1090 error = EPIPE;
1091 break;
1092 }
1093 if (wpipe->pipe_state & PIPE_WANTR) {
1094 wpipe->pipe_state &= ~PIPE_WANTR;
1095 wakeup(wpipe);
1096 }
1097 pipeselwakeup(wpipe, wpipe);
1098 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdwt", 0);
1099 }
1100
1101 if (error)
1102 wpipe->pipe_state &= ~PIPE_DIRECTW;
1103
1104 cleanup:
1105 pipelock(wpipe, 0);
1106 if (error || amountpipekva > maxpipekva)
1107 pipe_loan_free(wpipe);
1108 else if (res)
1109 uvm_unloanpage(res, npages);
1110 pipeunlock(wpipe);
1111
1112 if (error == EPIPE) {
1113 pipeselwakeup(wpipe, wpipe);
1114
1115 /*
1116 * If anything was read from what we offered, return success
1117 * and short write. We return EOF on next write(2).
1118 */
1119 if (wpipe->pipe_map.cnt < bcnt) {
1120 bcnt -= wpipe->pipe_map.cnt;
1121 error = 0;
1122 }
1123 }
1124
1125 if (error) {
1126 error:
1127 wakeup(wpipe);
1128 return (error);
1129 }
1130
1131 uio->uio_resid -= bcnt;
1132 /* uio_offset not updated, not set/used for write(2) */
1133
1134 return (0);
1135 }
1136 #endif /* !PIPE_NODIRECT */
1137 #endif /* NetBSD */
1138
1139 #ifdef __FreeBSD__
1140 static int
1141 pipe_write(fp, uio, cred, flags, p)
1142 struct file *fp;
1143 off_t *offset;
1144 struct uio *uio;
1145 struct ucred *cred;
1146 int flags;
1147 struct proc *p;
1148 #elif defined(__NetBSD__)
1149 static int
1150 pipe_write(fp, offset, uio, cred, flags)
1151 struct file *fp;
1152 off_t *offset;
1153 struct uio *uio;
1154 struct ucred *cred;
1155 int flags;
1156 #endif
1157 {
1158 int error = 0;
1159 int orig_resid;
1160 struct pipe *wpipe, *rpipe;
1161
1162 rpipe = (struct pipe *) fp->f_data;
1163 wpipe = rpipe->pipe_peer;
1164
1165 /*
1166 * detect loss of pipe read side, issue SIGPIPE if lost.
1167 */
1168 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF))
1169 return (EPIPE);
1170
1171 ++wpipe->pipe_busy;
1172
1173 /*
1174 * If it is advantageous to resize the pipe buffer, do
1175 * so.
1176 */
1177 if ((uio->uio_resid > PIPE_SIZE) &&
1178 (nbigpipe < maxbigpipes) &&
1179 #ifndef PIPE_NODIRECT
1180 (wpipe->pipe_state & PIPE_DIRECTW) == 0 &&
1181 #endif
1182 (wpipe->pipe_buffer.size <= PIPE_SIZE) &&
1183 (wpipe->pipe_buffer.cnt == 0)) {
1184
1185 if ((error = pipelock(wpipe,1)) == 0) {
1186 if (pipespace(wpipe, BIG_PIPE_SIZE) == 0)
1187 nbigpipe++;
1188 pipeunlock(wpipe);
1189 } else {
1190 /*
1191 * If an error occurred, unbusy and return, waking up
1192 * any waiting readers.
1193 */
1194 --wpipe->pipe_busy;
1195 if (wpipe->pipe_busy == 0
1196 && (wpipe->pipe_state & PIPE_WANTCLOSE)) {
1197 wpipe->pipe_state &=
1198 ~(PIPE_WANTCLOSE | PIPE_WANTR);
1199 wakeup(wpipe);
1200 }
1201
1202 return (error);
1203 }
1204 }
1205
1206 #ifdef __FreeBSD__
1207 KASSERT(wpipe->pipe_buffer.buffer != NULL, ("pipe buffer gone"));
1208 #endif
1209
1210 orig_resid = uio->uio_resid;
1211 while (uio->uio_resid) {
1212 int space;
1213
1214 #ifndef PIPE_NODIRECT
1215 /*
1216 * If the transfer is large, we can gain performance if
1217 * we do process-to-process copies directly.
1218 * If the write is non-blocking, we don't use the
1219 * direct write mechanism.
1220 *
1221 * The direct write mechanism will detect the reader going
1222 * away on us.
1223 */
1224 if ((uio->uio_iov[0].iov_len >= PIPE_MINDIRECT) &&
1225 (uio->uio_resid == orig_resid) &&
1226 (fp->f_flag & FNONBLOCK) == 0 &&
1227 (wpipe->pipe_map.kva || (amountpipekva < limitpipekva))) {
1228 error = pipe_direct_write(wpipe, uio);
1229
1230 /*
1231 * We either errorred, wrote whole buffer, or
1232 * wrote part of buffer. If the error is ENOMEM,
1233 * we failed to allocate some resources for direct
1234 * write and fall back to ordinary write. Otherwise,
1235 * break out now.
1236 */
1237 if (error != ENOMEM)
1238 break;
1239 }
1240 #endif /* PIPE_NODIRECT */
1241
1242 /*
1243 * Pipe buffered writes cannot be coincidental with
1244 * direct writes. We wait until the currently executing
1245 * direct write is completed before we start filling the
1246 * pipe buffer. We break out if a signal occurs or the
1247 * reader goes away.
1248 */
1249 retrywrite:
1250 while (wpipe->pipe_state & PIPE_DIRECTW) {
1251 if (wpipe->pipe_state & PIPE_WANTR) {
1252 wpipe->pipe_state &= ~PIPE_WANTR;
1253 wakeup(wpipe);
1254 }
1255 error = tsleep(wpipe, PRIBIO | PCATCH, "pipbww", 0);
1256 if (wpipe->pipe_state & PIPE_EOF)
1257 break;
1258 if (error)
1259 break;
1260 }
1261 if (wpipe->pipe_state & PIPE_EOF) {
1262 error = EPIPE;
1263 break;
1264 }
1265
1266 space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
1267
1268 /* Writes of size <= PIPE_BUF must be atomic. */
1269 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
1270 space = 0;
1271
1272 if (space > 0 && (wpipe->pipe_buffer.cnt < PIPE_SIZE)) {
1273 int size; /* Transfer size */
1274 int segsize; /* first segment to transfer */
1275
1276 if ((error = pipelock(wpipe,1)) != 0)
1277 break;
1278
1279 /*
1280 * It is possible for a direct write to
1281 * slip in on us... handle it here...
1282 */
1283 if (wpipe->pipe_state & PIPE_DIRECTW) {
1284 pipeunlock(wpipe);
1285 goto retrywrite;
1286 }
1287 /*
1288 * If a process blocked in uiomove, our
1289 * value for space might be bad.
1290 *
1291 * XXX will we be ok if the reader has gone
1292 * away here?
1293 */
1294 if (space > wpipe->pipe_buffer.size -
1295 wpipe->pipe_buffer.cnt) {
1296 pipeunlock(wpipe);
1297 goto retrywrite;
1298 }
1299
1300 /*
1301 * Transfer size is minimum of uio transfer
1302 * and free space in pipe buffer.
1303 */
1304 if (space > uio->uio_resid)
1305 size = uio->uio_resid;
1306 else
1307 size = space;
1308 /*
1309 * First segment to transfer is minimum of
1310 * transfer size and contiguous space in
1311 * pipe buffer. If first segment to transfer
1312 * is less than the transfer size, we've got
1313 * a wraparound in the buffer.
1314 */
1315 segsize = wpipe->pipe_buffer.size -
1316 wpipe->pipe_buffer.in;
1317 if (segsize > size)
1318 segsize = size;
1319
1320 /* Transfer first segment */
1321
1322 error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in],
1323 segsize, uio);
1324
1325 if (error == 0 && segsize < size) {
1326 /*
1327 * Transfer remaining part now, to
1328 * support atomic writes. Wraparound
1329 * happened.
1330 */
1331 #ifdef DEBUG
1332 if (wpipe->pipe_buffer.in + segsize !=
1333 wpipe->pipe_buffer.size)
1334 panic("Expected pipe buffer wraparound disappeared");
1335 #endif
1336
1337 error = uiomove(&wpipe->pipe_buffer.buffer[0],
1338 size - segsize, uio);
1339 }
1340 if (error == 0) {
1341 wpipe->pipe_buffer.in += size;
1342 if (wpipe->pipe_buffer.in >=
1343 wpipe->pipe_buffer.size) {
1344 #ifdef DEBUG
1345 if (wpipe->pipe_buffer.in != size - segsize + wpipe->pipe_buffer.size)
1346 panic("Expected wraparound bad");
1347 #endif
1348 wpipe->pipe_buffer.in = size - segsize;
1349 }
1350
1351 wpipe->pipe_buffer.cnt += size;
1352 #ifdef DEBUG
1353 if (wpipe->pipe_buffer.cnt > wpipe->pipe_buffer.size)
1354 panic("Pipe buffer overflow");
1355 #endif
1356
1357 }
1358 pipeunlock(wpipe);
1359 if (error)
1360 break;
1361
1362 } else {
1363 /*
1364 * If the "read-side" has been blocked, wake it up now.
1365 */
1366 if (wpipe->pipe_state & PIPE_WANTR) {
1367 wpipe->pipe_state &= ~PIPE_WANTR;
1368 wakeup(wpipe);
1369 }
1370
1371 /*
1372 * don't block on non-blocking I/O
1373 */
1374 if (fp->f_flag & FNONBLOCK) {
1375 error = EAGAIN;
1376 break;
1377 }
1378
1379 /*
1380 * We have no more space and have something to offer,
1381 * wake up select/poll.
1382 */
1383 pipeselwakeup(wpipe, wpipe);
1384
1385 wpipe->pipe_state |= PIPE_WANTW;
1386 error = tsleep(wpipe, PRIBIO | PCATCH, "pipewr", 0);
1387 if (error != 0)
1388 break;
1389 /*
1390 * If read side wants to go away, we just issue a signal
1391 * to ourselves.
1392 */
1393 if (wpipe->pipe_state & PIPE_EOF) {
1394 error = EPIPE;
1395 break;
1396 }
1397 }
1398 }
1399
1400 --wpipe->pipe_busy;
1401 if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANTCLOSE)) {
1402 wpipe->pipe_state &= ~(PIPE_WANTCLOSE | PIPE_WANTR);
1403 wakeup(wpipe);
1404 } else if (wpipe->pipe_buffer.cnt > 0) {
1405 /*
1406 * If we have put any characters in the buffer, we wake up
1407 * the reader.
1408 */
1409 if (wpipe->pipe_state & PIPE_WANTR) {
1410 wpipe->pipe_state &= ~PIPE_WANTR;
1411 wakeup(wpipe);
1412 }
1413 }
1414
1415 /*
1416 * Don't return EPIPE if I/O was successful
1417 */
1418 if ((error == EPIPE) && (wpipe->pipe_buffer.cnt == 0)
1419 && (uio->uio_resid == 0))
1420 error = 0;
1421
1422 if (error == 0)
1423 vfs_timestamp(&wpipe->pipe_mtime);
1424
1425 /*
1426 * We have something to offer, wake up select/poll.
1427 * wpipe->pipe_map.cnt is always 0 in this point (direct write
1428 * is only done synchronously), so check wpipe->only pipe_buffer.cnt
1429 */
1430 if (wpipe->pipe_buffer.cnt)
1431 pipeselwakeup(wpipe, wpipe);
1432
1433 /*
1434 * Arrange for next read(2) to do a signal.
1435 */
1436 wpipe->pipe_state |= PIPE_SIGNALR;
1437
1438 return (error);
1439 }
1440
1441 /*
1442 * we implement a very minimal set of ioctls for compatibility with sockets.
1443 */
1444 int
1445 pipe_ioctl(fp, cmd, data, p)
1446 struct file *fp;
1447 u_long cmd;
1448 caddr_t data;
1449 struct proc *p;
1450 {
1451 struct pipe *mpipe = (struct pipe *)fp->f_data;
1452
1453 switch (cmd) {
1454
1455 case FIONBIO:
1456 return (0);
1457
1458 case FIOASYNC:
1459 if (*(int *)data) {
1460 mpipe->pipe_state |= PIPE_ASYNC;
1461 } else {
1462 mpipe->pipe_state &= ~PIPE_ASYNC;
1463 }
1464 return (0);
1465
1466 case FIONREAD:
1467 #ifndef PIPE_NODIRECT
1468 if (mpipe->pipe_state & PIPE_DIRECTW)
1469 *(int *)data = mpipe->pipe_map.cnt;
1470 else
1471 #endif
1472 *(int *)data = mpipe->pipe_buffer.cnt;
1473 return (0);
1474
1475 #ifdef __FreeBSD__
1476 case FIOSETOWN:
1477 return (fsetown(*(int *)data, &mpipe->pipe_sigio));
1478
1479 case FIOGETOWN:
1480 *(int *)data = fgetown(mpipe->pipe_sigio);
1481 return (0);
1482
1483 /* This is deprecated, FIOSETOWN should be used instead. */
1484 case TIOCSPGRP:
1485 return (fsetown(-(*(int *)data), &mpipe->pipe_sigio));
1486
1487 /* This is deprecated, FIOGETOWN should be used instead. */
1488 case TIOCGPGRP:
1489 *(int *)data = -fgetown(mpipe->pipe_sigio);
1490 return (0);
1491 #endif /* FreeBSD */
1492 #ifdef __NetBSD__
1493 case TIOCSPGRP:
1494 mpipe->pipe_pgid = *(int *)data;
1495 return (0);
1496
1497 case TIOCGPGRP:
1498 *(int *)data = mpipe->pipe_pgid;
1499 return (0);
1500 #endif /* NetBSD */
1501
1502 }
1503 return (ENOTTY);
1504 }
1505
1506 int
1507 pipe_poll(fp, events, p)
1508 struct file *fp;
1509 int events;
1510 struct proc *p;
1511 {
1512 struct pipe *rpipe = (struct pipe *)fp->f_data;
1513 struct pipe *wpipe;
1514 int revents = 0;
1515
1516 wpipe = rpipe->pipe_peer;
1517 if (events & (POLLIN | POLLRDNORM))
1518 if ((rpipe->pipe_buffer.cnt > 0) ||
1519 #ifndef PIPE_NODIRECT
1520 (rpipe->pipe_state & PIPE_DIRECTW) ||
1521 #endif
1522 (rpipe->pipe_state & PIPE_EOF))
1523 revents |= events & (POLLIN | POLLRDNORM);
1524
1525 if (events & (POLLOUT | POLLWRNORM))
1526 if (wpipe == NULL || (wpipe->pipe_state & PIPE_EOF)
1527 || (
1528 #ifndef PIPE_NODIRECT
1529 ((wpipe->pipe_state & PIPE_DIRECTW) == 0) &&
1530 #endif
1531 (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF))
1532 revents |= events & (POLLOUT | POLLWRNORM);
1533
1534 if ((rpipe->pipe_state & PIPE_EOF) ||
1535 (wpipe == NULL) ||
1536 (wpipe->pipe_state & PIPE_EOF))
1537 revents |= POLLHUP;
1538
1539 if (revents == 0) {
1540 if (events & (POLLIN | POLLRDNORM)) {
1541 selrecord(p, &rpipe->pipe_sel);
1542 rpipe->pipe_state |= PIPE_SEL;
1543 }
1544
1545 if (events & (POLLOUT | POLLWRNORM)) {
1546 selrecord(p, &wpipe->pipe_sel);
1547 wpipe->pipe_state |= PIPE_SEL;
1548 }
1549 }
1550
1551 return (revents);
1552 }
1553
1554 static int
1555 pipe_stat(fp, ub, p)
1556 struct file *fp;
1557 struct stat *ub;
1558 struct proc *p;
1559 {
1560 struct pipe *pipe = (struct pipe *)fp->f_data;
1561
1562 memset((caddr_t)ub, 0, sizeof(*ub));
1563 ub->st_mode = S_IFIFO;
1564 ub->st_blksize = pipe->pipe_buffer.size;
1565 ub->st_size = pipe->pipe_buffer.cnt;
1566 ub->st_blocks = (ub->st_size) ? 1 : 0;
1567 #ifdef __FreeBSD__
1568 ub->st_atimespec = pipe->pipe_atime;
1569 ub->st_mtimespec = pipe->pipe_mtime;
1570 ub->st_ctimespec = pipe->pipe_ctime;
1571 #endif /* FreeBSD */
1572 #ifdef __NetBSD__
1573 TIMEVAL_TO_TIMESPEC(&pipe->pipe_atime, &ub->st_atimespec)
1574 TIMEVAL_TO_TIMESPEC(&pipe->pipe_mtime, &ub->st_mtimespec);
1575 TIMEVAL_TO_TIMESPEC(&pipe->pipe_ctime, &ub->st_ctimespec);
1576 #endif /* NetBSD */
1577 ub->st_uid = fp->f_cred->cr_uid;
1578 ub->st_gid = fp->f_cred->cr_gid;
1579 /*
1580 * Left as 0: st_dev, st_ino, st_nlink, st_rdev, st_flags, st_gen.
1581 * XXX (st_dev, st_ino) should be unique.
1582 */
1583 return (0);
1584 }
1585
1586 /* ARGSUSED */
1587 static int
1588 pipe_close(fp, p)
1589 struct file *fp;
1590 struct proc *p;
1591 {
1592 struct pipe *cpipe = (struct pipe *)fp->f_data;
1593
1594 #ifdef __FreeBSD__
1595 fp->f_ops = &badfileops;
1596 funsetown(cpipe->pipe_sigio);
1597 #endif
1598 fp->f_data = NULL;
1599 pipeclose(cpipe);
1600 return (0);
1601 }
1602
1603 static void
1604 pipe_free_kmem(cpipe)
1605 struct pipe *cpipe;
1606 {
1607
1608 #ifdef __FreeBSD__
1609 mtx_assert(&vm_mtx, MA_OWNED);
1610 #endif
1611 if (cpipe->pipe_buffer.buffer != NULL) {
1612 if (cpipe->pipe_buffer.size > PIPE_SIZE)
1613 --nbigpipe;
1614 amountpipekva -= cpipe->pipe_buffer.size;
1615 #ifdef __FreeBSD__
1616 kmem_free(kernel_map,
1617 (vm_offset_t)cpipe->pipe_buffer.buffer,
1618 cpipe->pipe_buffer.size);
1619 #elif defined(__NetBSD__)
1620 uvm_km_free(kernel_map,
1621 (vaddr_t)cpipe->pipe_buffer.buffer,
1622 cpipe->pipe_buffer.size);
1623 #endif /* NetBSD */
1624
1625 cpipe->pipe_buffer.buffer = NULL;
1626 }
1627 #ifndef PIPE_NODIRECT
1628 if (cpipe->pipe_map.kva != NULL) {
1629 #ifdef __FreeBSD__
1630 amountpipekva -= cpipe->pipe_buffer.size + PAGE_SIZE;
1631 kmem_free(kernel_map,
1632 cpipe->pipe_map.kva,
1633 cpipe->pipe_buffer.size + PAGE_SIZE);
1634 #elif defined(__NetBSD__)
1635 pipe_loan_free(cpipe);
1636 #endif /* NetBSD */
1637 cpipe->pipe_map.cnt = 0;
1638 cpipe->pipe_map.kva = NULL;
1639 cpipe->pipe_map.pos = 0;
1640 cpipe->pipe_map.npages = 0;
1641 }
1642 #endif /* !PIPE_NODIRECT */
1643 }
1644
1645 /*
1646 * shutdown the pipe
1647 */
1648 static void
1649 pipeclose(cpipe)
1650 struct pipe *cpipe;
1651 {
1652 struct pipe *ppipe;
1653
1654 if (!cpipe)
1655 return;
1656
1657 pipeselwakeup(cpipe, cpipe);
1658
1659 /*
1660 * If the other side is blocked, wake it up saying that
1661 * we want to close it down.
1662 */
1663 while (cpipe->pipe_busy) {
1664 wakeup(cpipe);
1665 cpipe->pipe_state |= PIPE_WANTCLOSE | PIPE_EOF;
1666 tsleep(cpipe, PRIBIO, "pipecl", 0);
1667 }
1668
1669 /*
1670 * Disconnect from peer
1671 */
1672 if ((ppipe = cpipe->pipe_peer) != NULL) {
1673 pipeselwakeup(ppipe, ppipe);
1674
1675 ppipe->pipe_state |= PIPE_EOF;
1676 wakeup(ppipe);
1677 ppipe->pipe_peer = NULL;
1678 }
1679
1680 /*
1681 * free resources
1682 */
1683 #ifdef _FreeBSD__
1684 mtx_lock(&vm_mtx);
1685 pipe_free_kmem(cpipe);
1686 /* XXX: erm, doesn't zalloc already have its own locks and
1687 * not need the giant vm lock?
1688 */
1689 zfree(pipe_zone, cpipe);
1690 mtx_unlock(&vm_mtx);
1691 #endif /* FreeBSD */
1692
1693 #ifdef __NetBSD__
1694 pipe_free_kmem(cpipe);
1695 (void) lockmgr(&cpipe->pipe_lock, LK_DRAIN, NULL);
1696 pool_put(&pipe_pool, cpipe);
1697 #endif
1698 }
1699
1700 #ifdef __FreeBSD__
1701 /*ARGSUSED*/
1702 static int
1703 pipe_kqfilter(struct file *fp, struct knote *kn)
1704 {
1705 struct pipe *cpipe = (struct pipe *)kn->kn_fp->f_data;
1706
1707 switch (kn->kn_filter) {
1708 case EVFILT_READ:
1709 kn->kn_fop = &pipe_rfiltops;
1710 break;
1711 case EVFILT_WRITE:
1712 kn->kn_fop = &pipe_wfiltops;
1713 cpipe = cpipe->pipe_peer;
1714 break;
1715 default:
1716 return (1);
1717 }
1718 kn->kn_hook = (caddr_t)cpipe;
1719
1720 SLIST_INSERT_HEAD(&cpipe->pipe_sel.si_note, kn, kn_selnext);
1721 return (0);
1722 }
1723
1724 static void
1725 filt_pipedetach(struct knote *kn)
1726 {
1727 struct pipe *cpipe = (struct pipe *)kn->kn_fp->f_data;
1728
1729 SLIST_REMOVE(&cpipe->pipe_sel.si_note, kn, knote, kn_selnext);
1730 }
1731
1732 /*ARGSUSED*/
1733 static int
1734 filt_piperead(struct knote *kn, long hint)
1735 {
1736 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1737 struct pipe *wpipe = rpipe->pipe_peer;
1738
1739 kn->kn_data = rpipe->pipe_buffer.cnt;
1740 if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW))
1741 kn->kn_data = rpipe->pipe_map.cnt;
1742
1743 if ((rpipe->pipe_state & PIPE_EOF) ||
1744 (wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1745 kn->kn_flags |= EV_EOF;
1746 return (1);
1747 }
1748 return (kn->kn_data > 0);
1749 }
1750
1751 /*ARGSUSED*/
1752 static int
1753 filt_pipewrite(struct knote *kn, long hint)
1754 {
1755 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1756 struct pipe *wpipe = rpipe->pipe_peer;
1757
1758 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1759 kn->kn_data = 0;
1760 kn->kn_flags |= EV_EOF;
1761 return (1);
1762 }
1763 kn->kn_data = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
1764 if (wpipe->pipe_state & PIPE_DIRECTW)
1765 kn->kn_data = 0;
1766
1767 return (kn->kn_data >= PIPE_BUF);
1768 }
1769 #endif /* FreeBSD */
1770
1771 #ifdef __NetBSD__
1772 static int
1773 pipe_fcntl(fp, cmd, data, p)
1774 struct file *fp;
1775 u_int cmd;
1776 caddr_t data;
1777 struct proc *p;
1778 {
1779 if (cmd == F_SETFL)
1780 return (0);
1781 else
1782 return (EOPNOTSUPP);
1783 }
1784
1785 /*
1786 * Handle pipe sysctls.
1787 */
1788 int
1789 sysctl_dopipe(name, namelen, oldp, oldlenp, newp, newlen)
1790 int *name;
1791 u_int namelen;
1792 void *oldp;
1793 size_t *oldlenp;
1794 void *newp;
1795 size_t newlen;
1796 {
1797 /* All sysctl names at this level are terminal. */
1798 if (namelen != 1)
1799 return (ENOTDIR); /* overloaded */
1800
1801 switch (name[0]) {
1802 case KERN_PIPE_MAXKVASZ:
1803 return (sysctl_int(oldp, oldlenp, newp, newlen, &maxpipekva));
1804 case KERN_PIPE_LIMITKVA:
1805 return (sysctl_int(oldp, oldlenp, newp, newlen, &limitpipekva));
1806 case KERN_PIPE_MAXBIGPIPES:
1807 return (sysctl_int(oldp, oldlenp, newp, newlen, &maxbigpipes));
1808 case KERN_PIPE_NBIGPIPES:
1809 return (sysctl_rdint(oldp, oldlenp, newp, nbigpipe));
1810 case KERN_PIPE_KVASIZE:
1811 return (sysctl_rdint(oldp, oldlenp, newp, amountpipekva));
1812 default:
1813 return (EOPNOTSUPP);
1814 }
1815 /* NOTREACHED */
1816 }
1817
1818 /*
1819 * Initialize pipe structs.
1820 */
1821 void
1822 pipe_init(void)
1823 {
1824 pool_init(&pipe_pool, sizeof(struct pipe), 0, 0, 0, "pipepl",
1825 0, NULL, NULL, M_PIPE);
1826 }
1827
1828 #endif /* __NetBSD __ */
1829