sys_pipe.c revision 1.5.2.3 1 /* $NetBSD: sys_pipe.c,v 1.5.2.3 2001/09/08 02:33:48 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1996 John S. Dyson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Absolutely no warranty of function or purpose is made by the author
17 * John S. Dyson.
18 * 4. Modifications may be freely made to this file if the above conditions
19 * are met.
20 *
21 * $FreeBSD: src/sys/kern/sys_pipe.c,v 1.82 2001/06/15 20:45:01 jlemon Exp $
22 */
23
24 /*
25 * This file contains a high-performance replacement for the socket-based
26 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support
27 * all features of sockets, but does do everything that pipes normally
28 * do.
29 *
30 * Adaption for NetBSD UVM, including uvm_loan() based direct write, was
31 * written by Jaromir Dolecek.
32 */
33
34 /*
35 * This code has two modes of operation, a small write mode and a large
36 * write mode. The small write mode acts like conventional pipes with
37 * a kernel buffer. If the buffer is less than PIPE_MINDIRECT, then the
38 * "normal" pipe buffering is done. If the buffer is between PIPE_MINDIRECT
39 * and PIPE_SIZE in size, it is fully mapped into the kernel (on FreeBSD,
40 * those pages are also wired), and the receiving process can copy it directly
41 * from the pages in the sending process.
42 *
43 * If the sending process receives a signal, it is possible that it will
44 * go away, and certainly its address space can change, because control
45 * is returned back to the user-mode side. In that case, the pipe code
46 * arranges to copy the buffer supplied by the user process on FreeBSD, to
47 * a pageable kernel buffer, and the receiving process will grab the data
48 * from the pageable kernel buffer. Since signals don't happen all that often,
49 * the copy operation is normally eliminated.
50 * For NetBSD, the pages are mapped read-only, COW for kernel by uvm_loan(),
51 * so no explicit handling need to be done, all is handled by standard VM
52 * facilities.
53 *
54 * The constant PIPE_MINDIRECT is chosen to make sure that buffering will
55 * happen for small transfers so that the system will not spend all of
56 * its time context switching. PIPE_SIZE is constrained by the
57 * amount of kernel virtual memory.
58 */
59
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/proc.h>
63 #include <sys/fcntl.h>
64 #include <sys/file.h>
65 #include <sys/filedesc.h>
66 #include <sys/filio.h>
67 #include <sys/ttycom.h>
68 #include <sys/stat.h>
69 #include <sys/poll.h>
70 #include <sys/signalvar.h>
71 #include <sys/vnode.h>
72 #include <sys/uio.h>
73 #include <sys/lock.h>
74 #ifdef __FreeBSD__
75 #include <sys/mutex.h>
76 #include <sys/selinfo.h>
77 #include <sys/sysproto.h>
78 #elif defined(__NetBSD__)
79 #include <sys/select.h>
80 #include <sys/malloc.h>
81 #include <sys/mount.h>
82 #include <sys/syscallargs.h>
83 #include <uvm/uvm.h>
84 #include <sys/sysctl.h>
85 #endif /* NetBSD, FreeBSD */
86
87 #include <sys/pipe.h>
88
89 #ifdef __NetBSD__
90 #define vfs_timestamp(tv) microtime(tv)
91 #endif
92
93 /*
94 * Use this define if you want to disable *fancy* VM things. Expect an
95 * approx 30% decrease in transfer rate. This could be useful for
96 * OpenBSD.
97 */
98 /* #define PIPE_NODIRECT */
99
100 /*
101 * interfaces to the outside world
102 */
103 #ifdef __FreeBSD__
104 static int pipe_read __P((struct file *fp, struct uio *uio,
105 struct ucred *cred, int flags, struct proc *p));
106 static int pipe_write __P((struct file *fp, struct uio *uio,
107 struct ucred *cred, int flags, struct proc *p));
108 static int pipe_close __P((struct file *fp, struct proc *p));
109 static int pipe_poll __P((struct file *fp, int events, struct ucred *cred,
110 struct proc *p));
111 static int pipe_kqfilter __P((struct file *fp, struct knote *kn));
112 static int pipe_stat __P((struct file *fp, struct stat *sb, struct proc *p));
113 static int pipe_ioctl __P((struct file *fp, u_long cmd, caddr_t data, struct proc *p));
114
115 static struct fileops pipeops = {
116 pipe_read, pipe_write, pipe_ioctl, pipe_poll, pipe_kqfilter,
117 pipe_stat, pipe_close
118 };
119 #endif /* FreeBSD */
120
121 static void filt_pipedetach(struct knote *kn);
122 static int filt_piperead(struct knote *kn, long hint);
123 static int filt_pipewrite(struct knote *kn, long hint);
124
125 static const struct filterops pipe_rfiltops =
126 { 1, NULL, filt_pipedetach, filt_piperead };
127 static const struct filterops pipe_wfiltops =
128 { 1, NULL, filt_pipedetach, filt_pipewrite };
129
130 #ifdef __NetBSD__
131 static int pipe_read __P((struct file *fp, off_t *offset, struct uio *uio,
132 struct ucred *cred, int flags));
133 static int pipe_write __P((struct file *fp, off_t *offset, struct uio *uio,
134 struct ucred *cred, int flags));
135 static int pipe_close __P((struct file *fp, struct proc *p));
136 static int pipe_poll __P((struct file *fp, int events, struct proc *p));
137 static int pipe_fcntl __P((struct file *fp, u_int com, caddr_t data,
138 struct proc *p));
139 static int pipe_kqfilter __P((struct file *fp, struct knote *kn));
140 static int pipe_stat __P((struct file *fp, struct stat *sb, struct proc *p));
141 static int pipe_ioctl __P((struct file *fp, u_long cmd, caddr_t data, struct proc *p));
142
143 static struct fileops pipeops =
144 { pipe_read, pipe_write, pipe_ioctl, pipe_fcntl, pipe_poll,
145 pipe_stat, pipe_close, pipe_kqfilter };
146 #endif /* NetBSD */
147
148 /*
149 * Default pipe buffer size(s), this can be kind-of large now because pipe
150 * space is pageable. The pipe code will try to maintain locality of
151 * reference for performance reasons, so small amounts of outstanding I/O
152 * will not wipe the cache.
153 */
154 #define MINPIPESIZE (PIPE_SIZE/3)
155 #define MAXPIPESIZE (2*PIPE_SIZE/3)
156
157 /*
158 * Maximum amount of kva for pipes -- this is kind-of a soft limit, but
159 * is there so that on large systems, we don't exhaust it.
160 */
161 #define MAXPIPEKVA (8*1024*1024)
162 static int maxpipekva = MAXPIPEKVA;
163
164 /*
165 * Limit for direct transfers, we cannot, of course limit
166 * the amount of kva for pipes in general though.
167 */
168 #define LIMITPIPEKVA (16*1024*1024)
169 static int limitpipekva = LIMITPIPEKVA;
170
171 /*
172 * Limit the number of "big" pipes
173 */
174 #define LIMITBIGPIPES 32
175 static int maxbigpipes = LIMITBIGPIPES;
176 static int nbigpipe = 0;
177
178 /*
179 * Amount of KVA consumed by pipe buffers.
180 */
181 static int amountpipekva = 0;
182
183 static void pipeclose __P((struct pipe *cpipe));
184 static void pipe_free_kmem __P((struct pipe *cpipe));
185 static int pipe_create __P((struct pipe **cpipep, int allockva));
186 static __inline int pipelock __P((struct pipe *cpipe, int catch));
187 static __inline void pipeunlock __P((struct pipe *cpipe));
188 static __inline void pipeselwakeup __P((struct pipe *selp,
189 struct pipe *sigp));
190 static int pipespace __P((struct pipe *cpipe, int size));
191
192 #ifdef __FreeBSD__
193 #ifndef PIPE_NODIRECT
194 static int pipe_build_write_buffer __P((struct pipe *wpipe, struct uio *uio));
195 static void pipe_destroy_write_buffer __P((struct pipe *wpipe));
196 static int pipe_direct_write __P((struct pipe *wpipe, struct uio *uio));
197 static void pipe_clone_write_buffer __P((struct pipe *wpipe));
198 #endif
199
200 static vm_zone_t pipe_zone;
201 #endif /* FreeBSD */
202
203 #ifdef __NetBSD__
204 #ifndef PIPE_NODIRECT
205 static __inline int pipe_direct_write __P((struct pipe *wpipe, struct uio *uio));
206 static __inline int pipe_loan_alloc __P((struct pipe *wpipe, int npages,
207 vsize_t blen));
208 static void pipe_loan_free __P((struct pipe *wpipe));
209 #endif /* PIPE_NODIRECT */
210
211 static struct pool pipe_pool;
212 #endif /* NetBSD */
213
214 /*
215 * The pipe system call for the DTYPE_PIPE type of pipes
216 */
217
218 /* ARGSUSED */
219 #ifdef __FreeBSD__
220 int
221 pipe(p, uap)
222 struct proc *p;
223 struct pipe_args /* {
224 int dummy;
225 } */ *uap;
226 #elif defined(__NetBSD__)
227 int
228 sys_pipe(p, v, retval)
229 struct proc *p;
230 void *v;
231 register_t *retval;
232 #endif
233 {
234 struct file *rf, *wf;
235 struct pipe *rpipe, *wpipe;
236 int fd, error;
237
238 #ifdef __FreeBSD__
239 if (pipe_zone == NULL)
240 pipe_zone = zinit("PIPE", sizeof(struct pipe), 0, 0, 4);
241
242 rpipe = wpipe = NULL;
243 if (pipe_create(&rpipe, 1) || pipe_create(&wpipe, 1)) {
244 pipeclose(rpipe);
245 pipeclose(wpipe);
246 return (ENFILE);
247 }
248
249 error = falloc(p, &rf, &fd);
250 if (error) {
251 pipeclose(rpipe);
252 pipeclose(wpipe);
253 return (error);
254 }
255 fhold(rf);
256 p->p_retval[0] = fd;
257
258 /*
259 * Warning: once we've gotten past allocation of the fd for the
260 * read-side, we can only drop the read side via fdrop() in order
261 * to avoid races against processes which manage to dup() the read
262 * side while we are blocked trying to allocate the write side.
263 */
264 rf->f_flag = FREAD | FWRITE;
265 rf->f_type = DTYPE_PIPE;
266 rf->f_data = (caddr_t)rpipe;
267 rf->f_ops = &pipeops;
268 error = falloc(p, &wf, &fd);
269 if (error) {
270 struct filedesc *fdp = p->p_fd;
271
272 if (fdp->fd_ofiles[p->p_retval[0]] == rf) {
273 fdp->fd_ofiles[p->p_retval[0]] = NULL;
274 fdrop(rf, p);
275 }
276 fdrop(rf, p);
277 /* rpipe has been closed by fdrop(). */
278 pipeclose(wpipe);
279 return (error);
280 }
281 wf->f_flag = FREAD | FWRITE;
282 wf->f_type = DTYPE_PIPE;
283 wf->f_data = (caddr_t)wpipe;
284 wf->f_ops = &pipeops;
285 p->p_retval[1] = fd;
286
287 rpipe->pipe_peer = wpipe;
288 wpipe->pipe_peer = rpipe;
289 fdrop(rf, p);
290 #endif /* FreeBSD */
291
292 #ifdef __NetBSD__
293 rpipe = wpipe = NULL;
294 if (pipe_create(&rpipe, 1) || pipe_create(&wpipe, 0)) {
295 pipeclose(rpipe);
296 pipeclose(wpipe);
297 return (ENFILE);
298 }
299
300 /*
301 * Note: the file structure returned from falloc() is marked
302 * as 'larval' initially. Unless we mark it as 'mature' by
303 * FILE_SET_MATURE(), any attempt to do anything with it would
304 * return EBADF, including e.g. dup(2) or close(2). This avoids
305 * file descriptor races if we block in the second falloc().
306 */
307
308 error = falloc(p, &rf, &fd);
309 if (error)
310 goto free2;
311 retval[0] = fd;
312 rf->f_flag = FREAD;
313 rf->f_type = DTYPE_PIPE;
314 rf->f_data = (caddr_t)rpipe;
315 rf->f_ops = &pipeops;
316
317 error = falloc(p, &wf, &fd);
318 if (error)
319 goto free3;
320 retval[1] = fd;
321 wf->f_flag = FWRITE;
322 wf->f_type = DTYPE_PIPE;
323 wf->f_data = (caddr_t)wpipe;
324 wf->f_ops = &pipeops;
325
326 rpipe->pipe_peer = wpipe;
327 wpipe->pipe_peer = rpipe;
328
329 FILE_SET_MATURE(rf);
330 FILE_SET_MATURE(wf);
331 FILE_UNUSE(rf, p);
332 FILE_UNUSE(wf, p);
333 return (0);
334 free3:
335 FILE_UNUSE(rf, p);
336 ffree(rf);
337 fdremove(p->p_fd, retval[0]);
338 free2:
339 pipeclose(wpipe);
340 pipeclose(rpipe);
341 #endif /* NetBSD */
342
343 return (error);
344 }
345
346 /*
347 * Allocate kva for pipe circular buffer, the space is pageable
348 * This routine will 'realloc' the size of a pipe safely, if it fails
349 * it will retain the old buffer.
350 * If it fails it will return ENOMEM.
351 */
352 static int
353 pipespace(cpipe, size)
354 struct pipe *cpipe;
355 int size;
356 {
357 caddr_t buffer;
358 #ifdef __FreeBSD__
359 struct vm_object *object;
360 int npages, error;
361
362 npages = round_page(size)/PAGE_SIZE;
363 /*
364 * Create an object, I don't like the idea of paging to/from
365 * kernel_object.
366 */
367 mtx_lock(&vm_mtx);
368 object = vm_object_allocate(OBJT_DEFAULT, npages);
369 buffer = (caddr_t) vm_map_min(kernel_map);
370
371 /*
372 * Insert the object into the kernel map, and allocate kva for it.
373 * The map entry is, by default, pageable.
374 */
375 error = vm_map_find(kernel_map, object, 0,
376 (vm_offset_t *) &buffer, size, 1,
377 VM_PROT_ALL, VM_PROT_ALL, 0);
378
379 if (error != KERN_SUCCESS) {
380 vm_object_deallocate(object);
381 mtx_unlock(&vm_mtx);
382 return (ENOMEM);
383 }
384 #endif /* FreeBSD */
385
386 #ifdef __NetBSD__
387 /*
388 * Allocate pageable virtual address space. Physical memory is allocated
389 * on demand.
390 */
391 buffer = (caddr_t) uvm_km_valloc(kernel_map, round_page(size));
392 if (buffer == NULL)
393 return (ENOMEM);
394 #endif /* NetBSD */
395
396 /* free old resources if we're resizing */
397 pipe_free_kmem(cpipe);
398 #ifdef __FreeBSD__
399 mtx_unlock(&vm_mtx);
400 cpipe->pipe_buffer.object = object;
401 #endif
402 cpipe->pipe_buffer.buffer = buffer;
403 cpipe->pipe_buffer.size = size;
404 cpipe->pipe_buffer.in = 0;
405 cpipe->pipe_buffer.out = 0;
406 cpipe->pipe_buffer.cnt = 0;
407 amountpipekva += cpipe->pipe_buffer.size;
408 return (0);
409 }
410
411 /*
412 * initialize and allocate VM and memory for pipe
413 */
414 static int
415 pipe_create(cpipep, allockva)
416 struct pipe **cpipep;
417 int allockva;
418 {
419 struct pipe *cpipe;
420 int error;
421
422 #ifdef __FreeBSD__
423 *cpipep = zalloc(pipe_zone);
424 #endif
425 #ifdef __NetBSD__
426 *cpipep = pool_get(&pipe_pool, M_WAITOK);
427 #endif
428 if (*cpipep == NULL)
429 return (ENOMEM);
430
431 cpipe = *cpipep;
432
433 /* Initialize */
434 memset(cpipe, 0, sizeof(*cpipe));
435 cpipe->pipe_state = PIPE_SIGNALR;
436
437 if (allockva && (error = pipespace(cpipe, PIPE_SIZE)))
438 return (error);
439
440 vfs_timestamp(&cpipe->pipe_ctime);
441 cpipe->pipe_atime = cpipe->pipe_ctime;
442 cpipe->pipe_mtime = cpipe->pipe_ctime;
443 #ifdef __NetBSD__
444 cpipe->pipe_pgid = NO_PID;
445 lockinit(&cpipe->pipe_lock, PRIBIO | PCATCH, "pipelk", 0, 0);
446 #endif
447
448 return (0);
449 }
450
451
452 /*
453 * lock a pipe for I/O, blocking other access
454 */
455 static __inline int
456 pipelock(cpipe, catch)
457 struct pipe *cpipe;
458 int catch;
459 {
460 int error;
461
462 #ifdef __FreeBSD__
463 while (cpipe->pipe_state & PIPE_LOCK) {
464 cpipe->pipe_state |= PIPE_LWANT;
465 error = tsleep(cpipe, catch ? (PRIBIO | PCATCH) : PRIBIO,
466 "pipelk", 0);
467 if (error != 0)
468 return (error);
469 }
470 cpipe->pipe_state |= PIPE_LOCK;
471 return (0);
472 #endif
473
474 #ifdef __NetBSD__
475 do {
476 error = lockmgr(&cpipe->pipe_lock, LK_EXCLUSIVE, NULL);
477 } while (!catch && (error == EINTR || error == ERESTART));
478 return (error);
479 #endif
480 }
481
482 /*
483 * unlock a pipe I/O lock
484 */
485 static __inline void
486 pipeunlock(cpipe)
487 struct pipe *cpipe;
488 {
489 #ifdef __FreeBSD__
490 cpipe->pipe_state &= ~PIPE_LOCK;
491 if (cpipe->pipe_state & PIPE_LWANT) {
492 cpipe->pipe_state &= ~PIPE_LWANT;
493 wakeup(cpipe);
494 }
495 #endif
496
497 #ifdef __NetBSD__
498 lockmgr(&cpipe->pipe_lock, LK_RELEASE, NULL);
499 #endif
500 }
501
502 /*
503 * Select/poll wakup. This also sends SIGIO to peer connected to
504 * 'sigpipe' side of pipe.
505 */
506 static __inline void
507 pipeselwakeup(selp, sigp)
508 struct pipe *selp, *sigp;
509 {
510
511 #ifdef __FreeBSD__
512 if (selp->pipe_state & PIPE_SEL) {
513 selp->pipe_state &= ~PIPE_SEL;
514 selwakeup(&selp->pipe_sel);
515 }
516 if (sigp && (sigp->pipe_state & PIPE_ASYNC) && sigp->pipe_sigio)
517 pgsigio(sigp->pipe_sigio, SIGIO, 0);
518 KNOTE(&selp->pipe_sel.si_note, 0);
519 #endif
520
521 #ifdef __NetBSD__
522 selnotify(&selp->pipe_sel, 0);
523 if (sigp && (sigp->pipe_state & PIPE_ASYNC) &&
524 sigp->pipe_pgid != NO_PID) {
525 struct proc *p;
526
527 if (sigp->pipe_pgid < 0)
528 gsignal(-sigp->pipe_pgid, SIGIO);
529 else if (sigp->pipe_pgid > 0 &&
530 (p = pfind(sigp->pipe_pgid)) != NULL)
531 psignal(p, SIGIO);
532 }
533 #endif /* NetBSD */
534 }
535
536 /* ARGSUSED */
537 #ifdef __FreeBSD__
538 static int
539 pipe_read(fp, uio, cred, flags, p)
540 struct file *fp;
541 struct uio *uio;
542 struct ucred *cred;
543 int flags;
544 struct proc *p;
545 #elif defined(__NetBSD__)
546 static int
547 pipe_read(fp, offset, uio, cred, flags)
548 struct file *fp;
549 off_t *offset;
550 struct uio *uio;
551 struct ucred *cred;
552 int flags;
553 #endif
554 {
555 struct pipe *rpipe = (struct pipe *) fp->f_data;
556 int error;
557 size_t nread = 0;
558 size_t size;
559 size_t ocnt;
560
561 ++rpipe->pipe_busy;
562 error = pipelock(rpipe, 1);
563 if (error)
564 goto unlocked_error;
565
566 ocnt = rpipe->pipe_buffer.cnt;
567
568 while (uio->uio_resid) {
569 /*
570 * normal pipe buffer receive
571 */
572 if (rpipe->pipe_buffer.cnt > 0) {
573 size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out;
574 if (size > rpipe->pipe_buffer.cnt)
575 size = rpipe->pipe_buffer.cnt;
576 if (size > uio->uio_resid)
577 size = uio->uio_resid;
578
579 error = uiomove(&rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out],
580 size, uio);
581 if (error)
582 break;
583
584 rpipe->pipe_buffer.out += size;
585 if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size)
586 rpipe->pipe_buffer.out = 0;
587
588 rpipe->pipe_buffer.cnt -= size;
589
590 /*
591 * If there is no more to read in the pipe, reset
592 * its pointers to the beginning. This improves
593 * cache hit stats.
594 */
595 if (rpipe->pipe_buffer.cnt == 0) {
596 rpipe->pipe_buffer.in = 0;
597 rpipe->pipe_buffer.out = 0;
598 }
599 nread += size;
600 #ifndef PIPE_NODIRECT
601 /*
602 * Direct copy, bypassing a kernel buffer.
603 */
604 } else if ((size = rpipe->pipe_map.cnt) &&
605 (rpipe->pipe_state & PIPE_DIRECTW)) {
606 caddr_t va;
607 if (size > uio->uio_resid)
608 size = uio->uio_resid;
609
610 va = (caddr_t) rpipe->pipe_map.kva +
611 rpipe->pipe_map.pos;
612 error = uiomove(va, size, uio);
613 if (error)
614 break;
615 nread += size;
616 rpipe->pipe_map.pos += size;
617 rpipe->pipe_map.cnt -= size;
618 if (rpipe->pipe_map.cnt == 0) {
619 rpipe->pipe_state &= ~PIPE_DIRECTW;
620 wakeup(rpipe);
621 }
622 #endif
623 } else {
624 /*
625 * detect EOF condition
626 * read returns 0 on EOF, no need to set error
627 */
628 if (rpipe->pipe_state & PIPE_EOF)
629 break;
630
631 /*
632 * If the "write-side" has been blocked, wake it up now.
633 */
634 if (rpipe->pipe_state & PIPE_WANTW) {
635 rpipe->pipe_state &= ~PIPE_WANTW;
636 wakeup(rpipe);
637 }
638
639 /*
640 * Break if some data was read.
641 */
642 if (nread > 0)
643 break;
644
645 /*
646 * don't block on non-blocking I/O
647 */
648 if (fp->f_flag & FNONBLOCK) {
649 error = EAGAIN;
650 break;
651 }
652
653 /*
654 * Unlock the pipe buffer for our remaining processing.
655 * We will either break out with an error or we will
656 * sleep and relock to loop.
657 */
658 pipeunlock(rpipe);
659
660 /*
661 * We want to read more, wake up select/poll.
662 */
663 pipeselwakeup(rpipe, rpipe->pipe_peer);
664
665 rpipe->pipe_state |= PIPE_WANTR;
666 error = tsleep(rpipe, PRIBIO | PCATCH, "piperd", 0);
667 if (error != 0 || (error = pipelock(rpipe, 1)))
668 goto unlocked_error;
669 }
670 }
671 pipeunlock(rpipe);
672
673 if (error == 0)
674 vfs_timestamp(&rpipe->pipe_atime);
675 unlocked_error:
676 --rpipe->pipe_busy;
677
678 /*
679 * PIPE_WANTCLOSE processing only makes sense if pipe_busy is 0.
680 */
681 if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANTCLOSE)) {
682 rpipe->pipe_state &= ~(PIPE_WANTCLOSE|PIPE_WANTW);
683 wakeup(rpipe);
684 } else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) {
685 /*
686 * Handle write blocking hysteresis.
687 */
688 if (rpipe->pipe_state & PIPE_WANTW) {
689 rpipe->pipe_state &= ~PIPE_WANTW;
690 wakeup(rpipe);
691 }
692 }
693
694 /*
695 * If anything was read off the buffer, signal to the writer it's
696 * possible to write more data. Also send signal if we are here for the
697 * first time after last write.
698 */
699 if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF
700 && (ocnt != rpipe->pipe_buffer.cnt || (rpipe->pipe_state & PIPE_SIGNALR))) {
701 pipeselwakeup(rpipe, rpipe->pipe_peer);
702 rpipe->pipe_state &= ~PIPE_SIGNALR;
703 }
704
705 return (error);
706 }
707
708 #ifdef __FreeBSD__
709 #ifndef PIPE_NODIRECT
710 /*
711 * Map the sending processes' buffer into kernel space and wire it.
712 * This is similar to a physical write operation.
713 */
714 static int
715 pipe_build_write_buffer(wpipe, uio)
716 struct pipe *wpipe;
717 struct uio *uio;
718 {
719 size_t size;
720 int i;
721 vm_offset_t addr, endaddr, paddr;
722
723 size = uio->uio_iov->iov_len;
724 if (size > wpipe->pipe_buffer.size)
725 size = wpipe->pipe_buffer.size;
726
727 endaddr = round_page((vm_offset_t)uio->uio_iov->iov_base + size);
728 mtx_lock(&vm_mtx);
729 addr = trunc_page((vm_offset_t)uio->uio_iov->iov_base);
730 for (i = 0; addr < endaddr; addr += PAGE_SIZE, i++) {
731 vm_page_t m;
732
733 if (vm_fault_quick((caddr_t)addr, VM_PROT_READ) < 0 ||
734 (paddr = pmap_kextract(addr)) == 0) {
735 int j;
736
737 for (j = 0; j < i; j++)
738 vm_page_unwire(wpipe->pipe_map.ms[j], 1);
739 mtx_unlock(&vm_mtx);
740 return (EFAULT);
741 }
742
743 m = PHYS_TO_VM_PAGE(paddr);
744 vm_page_wire(m);
745 wpipe->pipe_map.ms[i] = m;
746 }
747
748 /*
749 * set up the control block
750 */
751 wpipe->pipe_map.npages = i;
752 wpipe->pipe_map.pos =
753 ((vm_offset_t) uio->uio_iov->iov_base) & PAGE_MASK;
754 wpipe->pipe_map.cnt = size;
755
756 /*
757 * and map the buffer
758 */
759 if (wpipe->pipe_map.kva == 0) {
760 /*
761 * We need to allocate space for an extra page because the
762 * address range might (will) span pages at times.
763 */
764 wpipe->pipe_map.kva = kmem_alloc_pageable(kernel_map,
765 wpipe->pipe_buffer.size + PAGE_SIZE);
766 amountpipekva += wpipe->pipe_buffer.size + PAGE_SIZE;
767 }
768 pmap_qenter(wpipe->pipe_map.kva, wpipe->pipe_map.ms,
769 wpipe->pipe_map.npages);
770
771 mtx_unlock(&vm_mtx);
772 /*
773 * and update the uio data
774 */
775
776 uio->uio_iov->iov_len -= size;
777 uio->uio_iov->iov_base += size;
778 if (uio->uio_iov->iov_len == 0)
779 uio->uio_iov++;
780 uio->uio_resid -= size;
781 uio->uio_offset += size;
782 return (0);
783 }
784
785 /*
786 * unmap and unwire the process buffer
787 */
788 static void
789 pipe_destroy_write_buffer(wpipe)
790 struct pipe *wpipe;
791 {
792 int i;
793
794 mtx_lock(&vm_mtx);
795 if (wpipe->pipe_map.kva) {
796 pmap_qremove(wpipe->pipe_map.kva, wpipe->pipe_map.npages);
797
798 if (amountpipekva > maxpipekva) {
799 vm_offset_t kva = wpipe->pipe_map.kva;
800 wpipe->pipe_map.kva = 0;
801 kmem_free(kernel_map, kva,
802 wpipe->pipe_buffer.size + PAGE_SIZE);
803 amountpipekva -= wpipe->pipe_buffer.size + PAGE_SIZE;
804 }
805 }
806 for (i = 0; i < wpipe->pipe_map.npages; i++)
807 vm_page_unwire(wpipe->pipe_map.ms[i], 1);
808 mtx_unlock(&vm_mtx);
809 }
810
811 /*
812 * In the case of a signal, the writing process might go away. This
813 * code copies the data into the circular buffer so that the source
814 * pages can be freed without loss of data.
815 */
816 static void
817 pipe_clone_write_buffer(wpipe)
818 struct pipe *wpipe;
819 {
820 int size;
821 int pos;
822
823 size = wpipe->pipe_map.cnt;
824 pos = wpipe->pipe_map.pos;
825 memcpy((caddr_t) wpipe->pipe_buffer.buffer,
826 (caddr_t) wpipe->pipe_map.kva + pos, size);
827
828 wpipe->pipe_buffer.in = size;
829 wpipe->pipe_buffer.out = 0;
830 wpipe->pipe_buffer.cnt = size;
831 wpipe->pipe_state &= ~PIPE_DIRECTW;
832
833 pipe_destroy_write_buffer(wpipe);
834 }
835
836 /*
837 * This implements the pipe buffer write mechanism. Note that only
838 * a direct write OR a normal pipe write can be pending at any given time.
839 * If there are any characters in the pipe buffer, the direct write will
840 * be deferred until the receiving process grabs all of the bytes from
841 * the pipe buffer. Then the direct mapping write is set-up.
842 */
843 static int
844 pipe_direct_write(wpipe, uio)
845 struct pipe *wpipe;
846 struct uio *uio;
847 {
848 int error;
849
850 retry:
851 while (wpipe->pipe_state & PIPE_DIRECTW) {
852 if (wpipe->pipe_state & PIPE_WANTR) {
853 wpipe->pipe_state &= ~PIPE_WANTR;
854 wakeup(wpipe);
855 }
856 wpipe->pipe_state |= PIPE_WANTW;
857 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdww", 0);
858 if (error)
859 goto error1;
860 if (wpipe->pipe_state & PIPE_EOF) {
861 error = EPIPE;
862 goto error1;
863 }
864 }
865 wpipe->pipe_map.cnt = 0; /* transfer not ready yet */
866 if (wpipe->pipe_buffer.cnt > 0) {
867 if (wpipe->pipe_state & PIPE_WANTR) {
868 wpipe->pipe_state &= ~PIPE_WANTR;
869 wakeup(wpipe);
870 }
871
872 wpipe->pipe_state |= PIPE_WANTW;
873 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdwc", 0);
874 if (error)
875 goto error1;
876 if (wpipe->pipe_state & PIPE_EOF) {
877 error = EPIPE;
878 goto error1;
879 }
880 goto retry;
881 }
882
883 wpipe->pipe_state |= PIPE_DIRECTW;
884
885 error = pipe_build_write_buffer(wpipe, uio);
886 if (error) {
887 wpipe->pipe_state &= ~PIPE_DIRECTW;
888 goto error1;
889 }
890
891 error = 0;
892 while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) {
893 if (wpipe->pipe_state & PIPE_EOF) {
894 pipelock(wpipe, 0);
895 pipe_destroy_write_buffer(wpipe);
896 pipeunlock(wpipe);
897 pipeselwakeup(wpipe, wpipe);
898 error = EPIPE;
899 goto error1;
900 }
901 if (wpipe->pipe_state & PIPE_WANTR) {
902 wpipe->pipe_state &= ~PIPE_WANTR;
903 wakeup(wpipe);
904 }
905 pipeselwakeup(wpipe, wpipe);
906 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdwt", 0);
907 }
908
909 pipelock(wpipe,0);
910 if (wpipe->pipe_state & PIPE_DIRECTW) {
911 /*
912 * this bit of trickery substitutes a kernel buffer for
913 * the process that might be going away.
914 */
915 pipe_clone_write_buffer(wpipe);
916 } else {
917 pipe_destroy_write_buffer(wpipe);
918 }
919 pipeunlock(wpipe);
920 return (error);
921
922 error1:
923 wakeup(wpipe);
924 return (error);
925 }
926 #endif /* !PIPE_NODIRECT */
927 #endif /* FreeBSD */
928
929 #ifdef __NetBSD__
930 #ifndef PIPE_NODIRECT
931 /*
932 * Allocate structure for loan transfer.
933 */
934 static __inline int
935 pipe_loan_alloc(wpipe, npages, blen)
936 struct pipe *wpipe;
937 int npages;
938 vsize_t blen;
939 {
940 wpipe->pipe_map.kva = uvm_km_valloc_wait(kernel_map, blen);
941 if (wpipe->pipe_map.kva == NULL)
942 return (ENOMEM);
943
944 amountpipekva += blen;
945 wpipe->pipe_map.npages = npages;
946 wpipe->pipe_map.ms = (struct vm_page **) malloc(
947 npages * sizeof(struct vm_page *), M_PIPE, M_WAITOK);
948
949 return (0);
950 }
951
952 /*
953 * Free resources allocated for loan transfer.
954 */
955 static void
956 pipe_loan_free(wpipe)
957 struct pipe *wpipe;
958 {
959 uvm_km_free(kernel_map, wpipe->pipe_map.kva,
960 wpipe->pipe_map.npages * PAGE_SIZE);
961 wpipe->pipe_map.kva = NULL;
962 amountpipekva -= wpipe->pipe_map.npages * PAGE_SIZE;
963 free(wpipe->pipe_map.ms, M_PIPE);
964 wpipe->pipe_map.ms = NULL;
965 }
966
967 /*
968 * NetBSD direct write, using uvm_loan() mechanism.
969 * This implements the pipe buffer write mechanism. Note that only
970 * a direct write OR a normal pipe write can be pending at any given time.
971 * If there are any characters in the pipe buffer, the direct write will
972 * be deferred until the receiving process grabs all of the bytes from
973 * the pipe buffer. Then the direct mapping write is set-up.
974 */
975 static __inline int
976 pipe_direct_write(wpipe, uio)
977 struct pipe *wpipe;
978 struct uio *uio;
979 {
980 int error, npages, j;
981 struct vm_page **res = NULL;
982 vaddr_t bbase, kva, base, bend;
983 vsize_t blen, bcnt;
984 voff_t bpos;
985
986 retry:
987 while (wpipe->pipe_state & PIPE_DIRECTW) {
988 if (wpipe->pipe_state & PIPE_WANTR) {
989 wpipe->pipe_state &= ~PIPE_WANTR;
990 wakeup(wpipe);
991 }
992 wpipe->pipe_state |= PIPE_WANTW;
993 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdww", 0);
994 if (error)
995 goto error;
996 if (wpipe->pipe_state & PIPE_EOF) {
997 error = EPIPE;
998 goto error;
999 }
1000 }
1001 wpipe->pipe_map.cnt = 0; /* transfer not ready yet */
1002 if (wpipe->pipe_buffer.cnt > 0) {
1003 if (wpipe->pipe_state & PIPE_WANTR) {
1004 wpipe->pipe_state &= ~PIPE_WANTR;
1005 wakeup(wpipe);
1006 }
1007
1008 wpipe->pipe_state |= PIPE_WANTW;
1009 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdwc", 0);
1010 if (error)
1011 goto error;
1012 if (wpipe->pipe_state & PIPE_EOF) {
1013 error = EPIPE;
1014 goto error;
1015 }
1016 goto retry;
1017 }
1018
1019 /*
1020 * Handle first iovec, first PIPE_CHUNK_SIZE bytes. Expect caller
1021 * to deal with short write.
1022 *
1023 * Note: need to deal with buffers not aligned to PAGE_SIZE.
1024 */
1025 bbase = (vaddr_t)uio->uio_iov[0].iov_base;
1026 base = trunc_page(bbase);
1027 bend = round_page(bbase + uio->uio_iov[0].iov_len);
1028 blen = bend - base;
1029 bpos = bbase - base;
1030
1031 if (blen > PIPE_DIRECT_CHUNK) {
1032 blen = PIPE_DIRECT_CHUNK;
1033 bend = base + blen;
1034 bcnt = PIPE_DIRECT_CHUNK - bpos;
1035 } else
1036 bcnt = uio->uio_iov[0].iov_len;
1037
1038 npages = blen / PAGE_SIZE;
1039
1040 wpipe->pipe_map.pos = bpos;
1041 wpipe->pipe_map.cnt = bcnt;
1042
1043 /*
1044 * Free the old kva if we need more pages than we have
1045 * allocated.
1046 */
1047 if (wpipe->pipe_map.kva && npages > wpipe->pipe_map.npages)
1048 pipe_loan_free(wpipe);
1049
1050 /* Allocate new kva. */
1051 if (!wpipe->pipe_map.kva
1052 && (error = pipe_loan_alloc(wpipe, npages, blen)))
1053 goto error;
1054
1055 /* Loan the write buffer memory from writer process */
1056 error = uvm_loan(&uio->uio_procp->p_vmspace->vm_map, base, blen,
1057 (void **) wpipe->pipe_map.ms, UVM_LOAN_TOPAGE);
1058 if (error)
1059 goto cleanup;
1060 res = wpipe->pipe_map.ms;
1061
1062 /* Enter the loaned pages to kva */
1063 kva = wpipe->pipe_map.kva;
1064 for(j=0; j < npages; j++, kva += PAGE_SIZE)
1065 pmap_enter(pmap_kernel(), kva, res[j]->phys_addr,
1066 VM_PROT_READ, 0);
1067
1068 wpipe->pipe_state |= PIPE_DIRECTW;
1069 error = 0;
1070 while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) {
1071 if (wpipe->pipe_state & PIPE_EOF) {
1072 error = EPIPE;
1073 break;
1074 }
1075 if (wpipe->pipe_state & PIPE_WANTR) {
1076 wpipe->pipe_state &= ~PIPE_WANTR;
1077 wakeup(wpipe);
1078 }
1079 pipeselwakeup(wpipe, wpipe);
1080 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdwt", 0);
1081 }
1082
1083 if (error)
1084 wpipe->pipe_state &= ~PIPE_DIRECTW;
1085
1086 cleanup:
1087 pipelock(wpipe, 0);
1088 if (error || amountpipekva > maxpipekva)
1089 pipe_loan_free(wpipe);
1090 else if (res)
1091 uvm_unloanpage(res, npages);
1092 pipeunlock(wpipe);
1093
1094 if (error == EPIPE) {
1095 pipeselwakeup(wpipe, wpipe);
1096
1097 /*
1098 * If anything was read from what we offered, return success
1099 * and short write. We return EOF on next write(2).
1100 */
1101 if (wpipe->pipe_map.cnt < bcnt) {
1102 bcnt -= wpipe->pipe_map.cnt;
1103 error = 0;
1104 }
1105 }
1106
1107 if (error) {
1108 error:
1109 wakeup(wpipe);
1110 return (error);
1111 }
1112
1113 uio->uio_resid -= bcnt;
1114 /* uio_offset not updated, not set/used for write(2) */
1115
1116 return (0);
1117 }
1118 #endif /* !PIPE_NODIRECT */
1119 #endif /* NetBSD */
1120
1121 #ifdef __FreeBSD__
1122 static int
1123 pipe_write(fp, uio, cred, flags, p)
1124 struct file *fp;
1125 off_t *offset;
1126 struct uio *uio;
1127 struct ucred *cred;
1128 int flags;
1129 struct proc *p;
1130 #elif defined(__NetBSD__)
1131 static int
1132 pipe_write(fp, offset, uio, cred, flags)
1133 struct file *fp;
1134 off_t *offset;
1135 struct uio *uio;
1136 struct ucred *cred;
1137 int flags;
1138 #endif
1139 {
1140 int error = 0;
1141 int orig_resid;
1142 struct pipe *wpipe, *rpipe;
1143
1144 rpipe = (struct pipe *) fp->f_data;
1145 wpipe = rpipe->pipe_peer;
1146
1147 /*
1148 * detect loss of pipe read side, issue SIGPIPE if lost.
1149 */
1150 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF))
1151 return (EPIPE);
1152
1153 ++wpipe->pipe_busy;
1154
1155 /*
1156 * If it is advantageous to resize the pipe buffer, do
1157 * so.
1158 */
1159 if ((uio->uio_resid > PIPE_SIZE) &&
1160 (nbigpipe < maxbigpipes) &&
1161 #ifndef PIPE_NODIRECT
1162 (wpipe->pipe_state & PIPE_DIRECTW) == 0 &&
1163 #endif
1164 (wpipe->pipe_buffer.size <= PIPE_SIZE) &&
1165 (wpipe->pipe_buffer.cnt == 0)) {
1166
1167 if ((error = pipelock(wpipe,1)) == 0) {
1168 if (pipespace(wpipe, BIG_PIPE_SIZE) == 0)
1169 nbigpipe++;
1170 pipeunlock(wpipe);
1171 } else {
1172 /*
1173 * If an error occurred, unbusy and return, waking up
1174 * any waiting readers.
1175 */
1176 --wpipe->pipe_busy;
1177 if (wpipe->pipe_busy == 0
1178 && (wpipe->pipe_state & PIPE_WANTCLOSE)) {
1179 wpipe->pipe_state &=
1180 ~(PIPE_WANTCLOSE | PIPE_WANTR);
1181 wakeup(wpipe);
1182 }
1183
1184 return (error);
1185 }
1186 }
1187
1188 #ifdef __FreeBSD__
1189 KASSERT(wpipe->pipe_buffer.buffer != NULL, ("pipe buffer gone"));
1190 #endif
1191
1192 orig_resid = uio->uio_resid;
1193 while (uio->uio_resid) {
1194 int space;
1195
1196 #ifndef PIPE_NODIRECT
1197 /*
1198 * If the transfer is large, we can gain performance if
1199 * we do process-to-process copies directly.
1200 * If the write is non-blocking, we don't use the
1201 * direct write mechanism.
1202 *
1203 * The direct write mechanism will detect the reader going
1204 * away on us.
1205 */
1206 if ((uio->uio_iov[0].iov_len >= PIPE_MINDIRECT) &&
1207 (uio->uio_resid == orig_resid) &&
1208 (fp->f_flag & FNONBLOCK) == 0 &&
1209 (wpipe->pipe_map.kva || (amountpipekva < limitpipekva))) {
1210 error = pipe_direct_write(wpipe, uio);
1211
1212 /*
1213 * We either errorred, wrote whole buffer, or
1214 * wrote part of buffer. If the error is ENOMEM,
1215 * we failed to allocate some resources for direct
1216 * write and fall back to ordinary write. Otherwise,
1217 * break out now.
1218 */
1219 if (error != ENOMEM)
1220 break;
1221 }
1222 #endif /* PIPE_NODIRECT */
1223
1224 /*
1225 * Pipe buffered writes cannot be coincidental with
1226 * direct writes. We wait until the currently executing
1227 * direct write is completed before we start filling the
1228 * pipe buffer. We break out if a signal occurs or the
1229 * reader goes away.
1230 */
1231 retrywrite:
1232 while (wpipe->pipe_state & PIPE_DIRECTW) {
1233 if (wpipe->pipe_state & PIPE_WANTR) {
1234 wpipe->pipe_state &= ~PIPE_WANTR;
1235 wakeup(wpipe);
1236 }
1237 error = tsleep(wpipe, PRIBIO | PCATCH, "pipbww", 0);
1238 if (wpipe->pipe_state & PIPE_EOF)
1239 break;
1240 if (error)
1241 break;
1242 }
1243 if (wpipe->pipe_state & PIPE_EOF) {
1244 error = EPIPE;
1245 break;
1246 }
1247
1248 space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
1249
1250 /* Writes of size <= PIPE_BUF must be atomic. */
1251 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
1252 space = 0;
1253
1254 if (space > 0 && (wpipe->pipe_buffer.cnt < PIPE_SIZE)) {
1255 int size; /* Transfer size */
1256 int segsize; /* first segment to transfer */
1257
1258 if ((error = pipelock(wpipe,1)) != 0)
1259 break;
1260
1261 /*
1262 * It is possible for a direct write to
1263 * slip in on us... handle it here...
1264 */
1265 if (wpipe->pipe_state & PIPE_DIRECTW) {
1266 pipeunlock(wpipe);
1267 goto retrywrite;
1268 }
1269 /*
1270 * If a process blocked in uiomove, our
1271 * value for space might be bad.
1272 *
1273 * XXX will we be ok if the reader has gone
1274 * away here?
1275 */
1276 if (space > wpipe->pipe_buffer.size -
1277 wpipe->pipe_buffer.cnt) {
1278 pipeunlock(wpipe);
1279 goto retrywrite;
1280 }
1281
1282 /*
1283 * Transfer size is minimum of uio transfer
1284 * and free space in pipe buffer.
1285 */
1286 if (space > uio->uio_resid)
1287 size = uio->uio_resid;
1288 else
1289 size = space;
1290 /*
1291 * First segment to transfer is minimum of
1292 * transfer size and contiguous space in
1293 * pipe buffer. If first segment to transfer
1294 * is less than the transfer size, we've got
1295 * a wraparound in the buffer.
1296 */
1297 segsize = wpipe->pipe_buffer.size -
1298 wpipe->pipe_buffer.in;
1299 if (segsize > size)
1300 segsize = size;
1301
1302 /* Transfer first segment */
1303
1304 error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in],
1305 segsize, uio);
1306
1307 if (error == 0 && segsize < size) {
1308 /*
1309 * Transfer remaining part now, to
1310 * support atomic writes. Wraparound
1311 * happened.
1312 */
1313 #ifdef DEBUG
1314 if (wpipe->pipe_buffer.in + segsize !=
1315 wpipe->pipe_buffer.size)
1316 panic("Expected pipe buffer wraparound disappeared");
1317 #endif
1318
1319 error = uiomove(&wpipe->pipe_buffer.buffer[0],
1320 size - segsize, uio);
1321 }
1322 if (error == 0) {
1323 wpipe->pipe_buffer.in += size;
1324 if (wpipe->pipe_buffer.in >=
1325 wpipe->pipe_buffer.size) {
1326 #ifdef DEBUG
1327 if (wpipe->pipe_buffer.in != size - segsize + wpipe->pipe_buffer.size)
1328 panic("Expected wraparound bad");
1329 #endif
1330 wpipe->pipe_buffer.in = size - segsize;
1331 }
1332
1333 wpipe->pipe_buffer.cnt += size;
1334 #ifdef DEBUG
1335 if (wpipe->pipe_buffer.cnt > wpipe->pipe_buffer.size)
1336 panic("Pipe buffer overflow");
1337 #endif
1338
1339 }
1340 pipeunlock(wpipe);
1341 if (error)
1342 break;
1343
1344 } else {
1345 /*
1346 * If the "read-side" has been blocked, wake it up now.
1347 */
1348 if (wpipe->pipe_state & PIPE_WANTR) {
1349 wpipe->pipe_state &= ~PIPE_WANTR;
1350 wakeup(wpipe);
1351 }
1352
1353 /*
1354 * don't block on non-blocking I/O
1355 */
1356 if (fp->f_flag & FNONBLOCK) {
1357 error = EAGAIN;
1358 break;
1359 }
1360
1361 /*
1362 * We have no more space and have something to offer,
1363 * wake up select/poll.
1364 */
1365 pipeselwakeup(wpipe, wpipe);
1366
1367 wpipe->pipe_state |= PIPE_WANTW;
1368 error = tsleep(wpipe, PRIBIO | PCATCH, "pipewr", 0);
1369 if (error != 0)
1370 break;
1371 /*
1372 * If read side wants to go away, we just issue a signal
1373 * to ourselves.
1374 */
1375 if (wpipe->pipe_state & PIPE_EOF) {
1376 error = EPIPE;
1377 break;
1378 }
1379 }
1380 }
1381
1382 --wpipe->pipe_busy;
1383 if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANTCLOSE)) {
1384 wpipe->pipe_state &= ~(PIPE_WANTCLOSE | PIPE_WANTR);
1385 wakeup(wpipe);
1386 } else if (wpipe->pipe_buffer.cnt > 0) {
1387 /*
1388 * If we have put any characters in the buffer, we wake up
1389 * the reader.
1390 */
1391 if (wpipe->pipe_state & PIPE_WANTR) {
1392 wpipe->pipe_state &= ~PIPE_WANTR;
1393 wakeup(wpipe);
1394 }
1395 }
1396
1397 /*
1398 * Don't return EPIPE if I/O was successful
1399 */
1400 if ((error == EPIPE) && (wpipe->pipe_buffer.cnt == 0)
1401 && (uio->uio_resid == 0))
1402 error = 0;
1403
1404 if (error == 0)
1405 vfs_timestamp(&wpipe->pipe_mtime);
1406
1407 /*
1408 * We have something to offer, wake up select/poll.
1409 * wpipe->pipe_map.cnt is always 0 in this point (direct write
1410 * is only done synchronously), so check wpipe->only pipe_buffer.cnt
1411 */
1412 if (wpipe->pipe_buffer.cnt)
1413 pipeselwakeup(wpipe, wpipe);
1414
1415 /*
1416 * Arrange for next read(2) to do a signal.
1417 */
1418 wpipe->pipe_state |= PIPE_SIGNALR;
1419
1420 return (error);
1421 }
1422
1423 /*
1424 * we implement a very minimal set of ioctls for compatibility with sockets.
1425 */
1426 int
1427 pipe_ioctl(fp, cmd, data, p)
1428 struct file *fp;
1429 u_long cmd;
1430 caddr_t data;
1431 struct proc *p;
1432 {
1433 struct pipe *mpipe = (struct pipe *)fp->f_data;
1434
1435 switch (cmd) {
1436
1437 case FIONBIO:
1438 return (0);
1439
1440 case FIOASYNC:
1441 if (*(int *)data) {
1442 mpipe->pipe_state |= PIPE_ASYNC;
1443 } else {
1444 mpipe->pipe_state &= ~PIPE_ASYNC;
1445 }
1446 return (0);
1447
1448 case FIONREAD:
1449 #ifndef PIPE_NODIRECT
1450 if (mpipe->pipe_state & PIPE_DIRECTW)
1451 *(int *)data = mpipe->pipe_map.cnt;
1452 else
1453 #endif
1454 *(int *)data = mpipe->pipe_buffer.cnt;
1455 return (0);
1456
1457 #ifdef __FreeBSD__
1458 case FIOSETOWN:
1459 return (fsetown(*(int *)data, &mpipe->pipe_sigio));
1460
1461 case FIOGETOWN:
1462 *(int *)data = fgetown(mpipe->pipe_sigio);
1463 return (0);
1464
1465 /* This is deprecated, FIOSETOWN should be used instead. */
1466 case TIOCSPGRP:
1467 return (fsetown(-(*(int *)data), &mpipe->pipe_sigio));
1468
1469 /* This is deprecated, FIOGETOWN should be used instead. */
1470 case TIOCGPGRP:
1471 *(int *)data = -fgetown(mpipe->pipe_sigio);
1472 return (0);
1473 #endif /* FreeBSD */
1474 #ifdef __NetBSD__
1475 case TIOCSPGRP:
1476 mpipe->pipe_pgid = *(int *)data;
1477 return (0);
1478
1479 case TIOCGPGRP:
1480 *(int *)data = mpipe->pipe_pgid;
1481 return (0);
1482 #endif /* NetBSD */
1483
1484 }
1485 return (ENOTTY);
1486 }
1487
1488 int
1489 pipe_poll(fp, events, p)
1490 struct file *fp;
1491 int events;
1492 struct proc *p;
1493 {
1494 struct pipe *rpipe = (struct pipe *)fp->f_data;
1495 struct pipe *wpipe;
1496 int revents = 0;
1497
1498 wpipe = rpipe->pipe_peer;
1499 if (events & (POLLIN | POLLRDNORM))
1500 if ((rpipe->pipe_buffer.cnt > 0) ||
1501 #ifndef PIPE_NODIRECT
1502 (rpipe->pipe_state & PIPE_DIRECTW) ||
1503 #endif
1504 (rpipe->pipe_state & PIPE_EOF))
1505 revents |= events & (POLLIN | POLLRDNORM);
1506
1507 if (events & (POLLOUT | POLLWRNORM))
1508 if (wpipe == NULL || (wpipe->pipe_state & PIPE_EOF)
1509 || (
1510 #ifndef PIPE_NODIRECT
1511 ((wpipe->pipe_state & PIPE_DIRECTW) == 0) &&
1512 #endif
1513 (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF))
1514 revents |= events & (POLLOUT | POLLWRNORM);
1515
1516 if ((rpipe->pipe_state & PIPE_EOF) ||
1517 (wpipe == NULL) ||
1518 (wpipe->pipe_state & PIPE_EOF))
1519 revents |= POLLHUP;
1520
1521 if (revents == 0) {
1522 if (events & (POLLIN | POLLRDNORM)) {
1523 selrecord(p, &rpipe->pipe_sel);
1524 #ifdef __FreeBSD__
1525 rpipe->pipe_state |= PIPE_SEL;
1526 #endif
1527 }
1528
1529 if (events & (POLLOUT | POLLWRNORM)) {
1530 selrecord(p, &wpipe->pipe_sel);
1531 #ifdef __FreeBSD__
1532 wpipe->pipe_state |= PIPE_SEL;
1533 #endif
1534 }
1535 }
1536
1537 return (revents);
1538 }
1539
1540 static int
1541 pipe_stat(fp, ub, p)
1542 struct file *fp;
1543 struct stat *ub;
1544 struct proc *p;
1545 {
1546 struct pipe *pipe = (struct pipe *)fp->f_data;
1547
1548 memset((caddr_t)ub, 0, sizeof(*ub));
1549 ub->st_mode = S_IFIFO;
1550 ub->st_blksize = pipe->pipe_buffer.size;
1551 ub->st_size = pipe->pipe_buffer.cnt;
1552 ub->st_blocks = (ub->st_size) ? 1 : 0;
1553 #ifdef __FreeBSD__
1554 ub->st_atimespec = pipe->pipe_atime;
1555 ub->st_mtimespec = pipe->pipe_mtime;
1556 ub->st_ctimespec = pipe->pipe_ctime;
1557 #endif /* FreeBSD */
1558 #ifdef __NetBSD__
1559 TIMEVAL_TO_TIMESPEC(&pipe->pipe_atime, &ub->st_atimespec)
1560 TIMEVAL_TO_TIMESPEC(&pipe->pipe_mtime, &ub->st_mtimespec);
1561 TIMEVAL_TO_TIMESPEC(&pipe->pipe_ctime, &ub->st_ctimespec);
1562 #endif /* NetBSD */
1563 ub->st_uid = fp->f_cred->cr_uid;
1564 ub->st_gid = fp->f_cred->cr_gid;
1565 /*
1566 * Left as 0: st_dev, st_ino, st_nlink, st_rdev, st_flags, st_gen.
1567 * XXX (st_dev, st_ino) should be unique.
1568 */
1569 return (0);
1570 }
1571
1572 /* ARGSUSED */
1573 static int
1574 pipe_close(fp, p)
1575 struct file *fp;
1576 struct proc *p;
1577 {
1578 struct pipe *cpipe = (struct pipe *)fp->f_data;
1579
1580 #ifdef __FreeBSD__
1581 fp->f_ops = &badfileops;
1582 funsetown(cpipe->pipe_sigio);
1583 #endif
1584 fp->f_data = NULL;
1585 pipeclose(cpipe);
1586 return (0);
1587 }
1588
1589 static void
1590 pipe_free_kmem(cpipe)
1591 struct pipe *cpipe;
1592 {
1593
1594 #ifdef __FreeBSD__
1595 mtx_assert(&vm_mtx, MA_OWNED);
1596 #endif
1597 if (cpipe->pipe_buffer.buffer != NULL) {
1598 if (cpipe->pipe_buffer.size > PIPE_SIZE)
1599 --nbigpipe;
1600 amountpipekva -= cpipe->pipe_buffer.size;
1601 #ifdef __FreeBSD__
1602 kmem_free(kernel_map,
1603 (vm_offset_t)cpipe->pipe_buffer.buffer,
1604 cpipe->pipe_buffer.size);
1605 #elif defined(__NetBSD__)
1606 uvm_km_free(kernel_map,
1607 (vaddr_t)cpipe->pipe_buffer.buffer,
1608 cpipe->pipe_buffer.size);
1609 #endif /* NetBSD */
1610
1611 cpipe->pipe_buffer.buffer = NULL;
1612 }
1613 #ifndef PIPE_NODIRECT
1614 if (cpipe->pipe_map.kva != NULL) {
1615 #ifdef __FreeBSD__
1616 amountpipekva -= cpipe->pipe_buffer.size + PAGE_SIZE;
1617 kmem_free(kernel_map,
1618 cpipe->pipe_map.kva,
1619 cpipe->pipe_buffer.size + PAGE_SIZE);
1620 #elif defined(__NetBSD__)
1621 pipe_loan_free(cpipe);
1622 #endif /* NetBSD */
1623 cpipe->pipe_map.cnt = 0;
1624 cpipe->pipe_map.kva = NULL;
1625 cpipe->pipe_map.pos = 0;
1626 cpipe->pipe_map.npages = 0;
1627 }
1628 #endif /* !PIPE_NODIRECT */
1629 }
1630
1631 /*
1632 * shutdown the pipe
1633 */
1634 static void
1635 pipeclose(cpipe)
1636 struct pipe *cpipe;
1637 {
1638 struct pipe *ppipe;
1639
1640 if (!cpipe)
1641 return;
1642
1643 pipeselwakeup(cpipe, cpipe);
1644
1645 /*
1646 * If the other side is blocked, wake it up saying that
1647 * we want to close it down.
1648 */
1649 while (cpipe->pipe_busy) {
1650 wakeup(cpipe);
1651 cpipe->pipe_state |= PIPE_WANTCLOSE | PIPE_EOF;
1652 tsleep(cpipe, PRIBIO, "pipecl", 0);
1653 }
1654
1655 /*
1656 * Disconnect from peer
1657 */
1658 if ((ppipe = cpipe->pipe_peer) != NULL) {
1659 pipeselwakeup(ppipe, ppipe);
1660
1661 ppipe->pipe_state |= PIPE_EOF;
1662 wakeup(ppipe);
1663 ppipe->pipe_peer = NULL;
1664 }
1665
1666 /*
1667 * free resources
1668 */
1669 #ifdef _FreeBSD__
1670 mtx_lock(&vm_mtx);
1671 pipe_free_kmem(cpipe);
1672 /* XXX: erm, doesn't zalloc already have its own locks and
1673 * not need the giant vm lock?
1674 */
1675 zfree(pipe_zone, cpipe);
1676 mtx_unlock(&vm_mtx);
1677 #endif /* FreeBSD */
1678
1679 #ifdef __NetBSD__
1680 pipe_free_kmem(cpipe);
1681 (void) lockmgr(&cpipe->pipe_lock, LK_DRAIN, NULL);
1682 pool_put(&pipe_pool, cpipe);
1683 #endif
1684 }
1685
1686 /*ARGSUSED*/
1687 static int
1688 pipe_kqfilter(struct file *fp, struct knote *kn)
1689 {
1690 struct pipe *cpipe = (struct pipe *)kn->kn_fp->f_data;
1691
1692 switch (kn->kn_filter) {
1693 case EVFILT_READ:
1694 kn->kn_fop = &pipe_rfiltops;
1695 break;
1696 case EVFILT_WRITE:
1697 kn->kn_fop = &pipe_wfiltops;
1698 cpipe = cpipe->pipe_peer;
1699 break;
1700 default:
1701 return (1);
1702 }
1703 kn->kn_hook = (caddr_t)cpipe;
1704
1705 #ifdef __FreeBSD__
1706 SLIST_INSERT_HEAD(&cpipe->pipe_sel.si_note, kn, kn_selnext);
1707 #else
1708 SLIST_INSERT_HEAD(&cpipe->pipe_sel.si_klist, kn, kn_selnext);
1709 #endif /* __FreeBSD__ */
1710 return (0);
1711 }
1712
1713 static void
1714 filt_pipedetach(struct knote *kn)
1715 {
1716 struct pipe *cpipe = (struct pipe *)kn->kn_fp->f_data;
1717
1718 #ifdef __FreeBSD__
1719 SLIST_REMOVE(&cpipe->pipe_sel.si_note, kn, knote, kn_selnext);
1720 #else
1721 SLIST_REMOVE(&cpipe->pipe_sel.si_klist, kn, knote, kn_selnext);
1722 #endif /* __FreeBSD__ */
1723 }
1724
1725 /*ARGSUSED*/
1726 static int
1727 filt_piperead(struct knote *kn, long hint)
1728 {
1729 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1730 struct pipe *wpipe = rpipe->pipe_peer;
1731
1732 kn->kn_data = rpipe->pipe_buffer.cnt;
1733 if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW))
1734 kn->kn_data = rpipe->pipe_map.cnt;
1735
1736 if ((rpipe->pipe_state & PIPE_EOF) ||
1737 (wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1738 kn->kn_flags |= EV_EOF;
1739 return (1);
1740 }
1741 return (kn->kn_data > 0);
1742 }
1743
1744 /*ARGSUSED*/
1745 static int
1746 filt_pipewrite(struct knote *kn, long hint)
1747 {
1748 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1749 struct pipe *wpipe = rpipe->pipe_peer;
1750
1751 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1752 kn->kn_data = 0;
1753 kn->kn_flags |= EV_EOF;
1754 return (1);
1755 }
1756 kn->kn_data = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
1757 if (wpipe->pipe_state & PIPE_DIRECTW)
1758 kn->kn_data = 0;
1759
1760 return (kn->kn_data >= PIPE_BUF);
1761 }
1762
1763 #ifdef __NetBSD__
1764 static int
1765 pipe_fcntl(fp, cmd, data, p)
1766 struct file *fp;
1767 u_int cmd;
1768 caddr_t data;
1769 struct proc *p;
1770 {
1771 if (cmd == F_SETFL)
1772 return (0);
1773 else
1774 return (EOPNOTSUPP);
1775 }
1776
1777 /*
1778 * Handle pipe sysctls.
1779 */
1780 int
1781 sysctl_dopipe(name, namelen, oldp, oldlenp, newp, newlen)
1782 int *name;
1783 u_int namelen;
1784 void *oldp;
1785 size_t *oldlenp;
1786 void *newp;
1787 size_t newlen;
1788 {
1789 /* All sysctl names at this level are terminal. */
1790 if (namelen != 1)
1791 return (ENOTDIR); /* overloaded */
1792
1793 switch (name[0]) {
1794 case KERN_PIPE_MAXKVASZ:
1795 return (sysctl_int(oldp, oldlenp, newp, newlen, &maxpipekva));
1796 case KERN_PIPE_LIMITKVA:
1797 return (sysctl_int(oldp, oldlenp, newp, newlen, &limitpipekva));
1798 case KERN_PIPE_MAXBIGPIPES:
1799 return (sysctl_int(oldp, oldlenp, newp, newlen, &maxbigpipes));
1800 case KERN_PIPE_NBIGPIPES:
1801 return (sysctl_rdint(oldp, oldlenp, newp, nbigpipe));
1802 case KERN_PIPE_KVASIZE:
1803 return (sysctl_rdint(oldp, oldlenp, newp, amountpipekva));
1804 default:
1805 return (EOPNOTSUPP);
1806 }
1807 /* NOTREACHED */
1808 }
1809
1810 /*
1811 * Initialize pipe structs.
1812 */
1813 void
1814 pipe_init(void)
1815 {
1816 pool_init(&pipe_pool, sizeof(struct pipe), 0, 0, 0, "pipepl",
1817 0, NULL, NULL, M_PIPE);
1818 }
1819
1820 #endif /* __NetBSD __ */
1821