sys_pipe.c revision 1.29 1 /* $NetBSD: sys_pipe.c,v 1.29 2002/11/01 21:46:51 kristerw Exp $ */
2
3 /*
4 * Copyright (c) 1996 John S. Dyson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Absolutely no warranty of function or purpose is made by the author
17 * John S. Dyson.
18 * 4. Modifications may be freely made to this file if the above conditions
19 * are met.
20 *
21 * $FreeBSD: src/sys/kern/sys_pipe.c,v 1.95 2002/03/09 22:06:31 alfred Exp $
22 */
23
24 /*
25 * This file contains a high-performance replacement for the socket-based
26 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support
27 * all features of sockets, but does do everything that pipes normally
28 * do.
29 *
30 * Adaption for NetBSD UVM, including uvm_loan() based direct write, was
31 * written by Jaromir Dolecek.
32 */
33
34 /*
35 * This code has two modes of operation, a small write mode and a large
36 * write mode. The small write mode acts like conventional pipes with
37 * a kernel buffer. If the buffer is less than PIPE_MINDIRECT, then the
38 * "normal" pipe buffering is done. If the buffer is between PIPE_MINDIRECT
39 * and PIPE_SIZE in size, it is fully mapped into the kernel (on FreeBSD,
40 * those pages are also wired), and the receiving process can copy it directly
41 * from the pages in the sending process.
42 *
43 * If the sending process receives a signal, it is possible that it will
44 * go away, and certainly its address space can change, because control
45 * is returned back to the user-mode side. In that case, the pipe code
46 * arranges to copy the buffer supplied by the user process on FreeBSD, to
47 * a pageable kernel buffer, and the receiving process will grab the data
48 * from the pageable kernel buffer. Since signals don't happen all that often,
49 * the copy operation is normally eliminated.
50 * For NetBSD, the pages are mapped read-only, COW for kernel by uvm_loan(),
51 * so no explicit handling need to be done, all is handled by standard VM
52 * facilities.
53 *
54 * The constant PIPE_MINDIRECT is chosen to make sure that buffering will
55 * happen for small transfers so that the system will not spend all of
56 * its time context switching. PIPE_SIZE is constrained by the
57 * amount of kernel virtual memory.
58 */
59
60 #include <sys/cdefs.h>
61 __KERNEL_RCSID(0, "$NetBSD: sys_pipe.c,v 1.29 2002/11/01 21:46:51 kristerw Exp $");
62
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/proc.h>
66 #include <sys/fcntl.h>
67 #include <sys/file.h>
68 #include <sys/filedesc.h>
69 #include <sys/filio.h>
70 #include <sys/kernel.h>
71 #include <sys/lock.h>
72 #include <sys/ttycom.h>
73 #include <sys/stat.h>
74 #include <sys/malloc.h>
75 #include <sys/poll.h>
76 #include <sys/signalvar.h>
77 #include <sys/vnode.h>
78 #include <sys/uio.h>
79 #include <sys/lock.h>
80 #ifdef __FreeBSD__
81 #include <sys/mutex.h>
82 #endif
83 #ifdef __NetBSD__
84 #include <sys/select.h>
85 #include <sys/mount.h>
86 #include <sys/syscallargs.h>
87 #include <uvm/uvm.h>
88 #include <sys/sysctl.h>
89 #include <sys/kernel.h>
90 #endif /* NetBSD, FreeBSD */
91
92 #include <sys/pipe.h>
93
94 #ifdef __NetBSD__
95 /*
96 * Avoid microtime(9), it's slow. We don't guard the read from time(9)
97 * with splclock(9) since we don't actually need to be THAT sure the access
98 * is atomic.
99 */
100 #define vfs_timestamp(tv) (*(tv) = time)
101
102 /* we call it si_klist */
103 #define si_note si_klist
104
105 #endif
106
107 /*
108 * Use this define if you want to disable *fancy* VM things. Expect an
109 * approx 30% decrease in transfer rate. This could be useful for
110 * OpenBSD.
111 */
112 /* #define PIPE_NODIRECT */
113
114 /*
115 * interfaces to the outside world
116 */
117 #ifdef __FreeBSD__
118 static int pipe_read(struct file *fp, struct uio *uio,
119 struct ucred *cred, int flags, struct thread *td);
120 static int pipe_write(struct file *fp, struct uio *uio,
121 struct ucred *cred, int flags, struct thread *td);
122 static int pipe_close(struct file *fp, struct thread *td);
123 static int pipe_poll(struct file *fp, int events, struct ucred *cred,
124 struct thread *td);
125 static int pipe_kqfilter(struct file *fp, struct knote *kn);
126 static int pipe_stat(struct file *fp, struct stat *sb, struct thread *td);
127 static int pipe_ioctl(struct file *fp, u_long cmd, caddr_t data, struct thread *td);
128
129 static struct fileops pipeops = {
130 pipe_read, pipe_write, pipe_ioctl, pipe_poll, pipe_kqfilter,
131 pipe_stat, pipe_close
132 };
133
134 #define PIPE_GET_GIANT(pipe) \
135 do { \
136 PIPE_UNLOCK(wpipe); \
137 mtx_lock(&Giant); \
138 } while (0)
139
140 #define PIPE_DROP_GIANT(pipe) \
141 do { \
142 mtx_unlock(&Giant); \
143 PIPE_LOCK(wpipe); \
144 } while (0)
145
146 #endif /* FreeBSD */
147
148 #ifdef __NetBSD__
149 static int pipe_read(struct file *fp, off_t *offset, struct uio *uio,
150 struct ucred *cred, int flags);
151 static int pipe_write(struct file *fp, off_t *offset, struct uio *uio,
152 struct ucred *cred, int flags);
153 static int pipe_close(struct file *fp, struct proc *p);
154 static int pipe_poll(struct file *fp, int events, struct proc *p);
155 static int pipe_fcntl(struct file *fp, u_int com, caddr_t data,
156 struct proc *p);
157 static int pipe_kqfilter(struct file *fp, struct knote *kn);
158 static int pipe_stat(struct file *fp, struct stat *sb, struct proc *p);
159 static int pipe_ioctl(struct file *fp, u_long cmd, caddr_t data, struct proc *p);
160
161 static struct fileops pipeops =
162 { pipe_read, pipe_write, pipe_ioctl, pipe_fcntl, pipe_poll,
163 pipe_stat, pipe_close, pipe_kqfilter };
164
165 /* XXXSMP perhaps use spinlocks & KERNEL_PROC_(UN)LOCK() ? just clear now */
166 #define PIPE_GET_GIANT(pipe)
167 #define PIPE_DROP_GIANT(pipe)
168 #define GIANT_REQUIRED
169
170 #endif /* NetBSD */
171
172 /*
173 * Default pipe buffer size(s), this can be kind-of large now because pipe
174 * space is pageable. The pipe code will try to maintain locality of
175 * reference for performance reasons, so small amounts of outstanding I/O
176 * will not wipe the cache.
177 */
178 #define MINPIPESIZE (PIPE_SIZE/3)
179 #define MAXPIPESIZE (2*PIPE_SIZE/3)
180
181 /*
182 * Maximum amount of kva for pipes -- this is kind-of a soft limit, but
183 * is there so that on large systems, we don't exhaust it.
184 */
185 #define MAXPIPEKVA (8*1024*1024)
186 static int maxpipekva = MAXPIPEKVA;
187
188 /*
189 * Limit for direct transfers, we cannot, of course limit
190 * the amount of kva for pipes in general though.
191 */
192 #define LIMITPIPEKVA (16*1024*1024)
193 static int limitpipekva = LIMITPIPEKVA;
194
195 /*
196 * Limit the number of "big" pipes
197 */
198 #define LIMITBIGPIPES 32
199 static int maxbigpipes = LIMITBIGPIPES;
200 static int nbigpipe = 0;
201
202 /*
203 * Amount of KVA consumed by pipe buffers.
204 */
205 static int amountpipekva = 0;
206
207 static void pipeclose(struct pipe *cpipe);
208 static void pipe_free_kmem(struct pipe *cpipe);
209 static int pipe_create(struct pipe **cpipep, int allockva);
210 static __inline int pipelock(struct pipe *cpipe, int catch);
211 static __inline void pipeunlock(struct pipe *cpipe);
212 static __inline void pipeselwakeup(struct pipe *cpipe, struct pipe *sigp);
213 #ifndef PIPE_NODIRECT
214 static int pipe_direct_write(struct pipe *wpipe, struct uio *uio);
215 #endif
216 static int pipespace(struct pipe *cpipe, int size);
217
218 #ifdef __NetBSD__
219 #ifndef PIPE_NODIRECT
220 static int pipe_loan_alloc(struct pipe *, int);
221 static void pipe_loan_free(struct pipe *);
222 #endif /* PIPE_NODIRECT */
223
224 static struct pool pipe_pool;
225 #endif /* NetBSD */
226
227 #ifdef __FreeBSD__
228 static vm_zone_t pipe_zone;
229
230 static void pipeinit(void *dummy __unused);
231 #ifndef PIPE_NODIRECT
232 static int pipe_build_write_buffer(struct pipe *wpipe, struct uio *uio);
233 static void pipe_destroy_write_buffer(struct pipe *wpipe);
234 static void pipe_clone_write_buffer(struct pipe *wpipe);
235 #endif
236
237 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_ANY, pipeinit, NULL);
238
239 static void
240 pipeinit(void *dummy __unused)
241 {
242
243 pipe_zone = zinit("PIPE", sizeof(struct pipe), 0, 0, 4);
244 }
245 #endif /* FreeBSD */
246
247 /*
248 * The pipe system call for the DTYPE_PIPE type of pipes
249 */
250
251 /* ARGSUSED */
252 #ifdef __FreeBSD__
253 int
254 pipe(td, uap)
255 struct thread *td;
256 struct pipe_args /* {
257 int dummy;
258 } */ *uap;
259 #elif defined(__NetBSD__)
260 int
261 sys_pipe(p, v, retval)
262 struct proc *p;
263 void *v;
264 register_t *retval;
265 #endif
266 {
267 struct file *rf, *wf;
268 struct pipe *rpipe, *wpipe;
269 int fd, error;
270 #ifdef __FreeBSD__
271 struct mtx *pmtx;
272
273 KASSERT(pipe_zone != NULL, ("pipe_zone not initialized"));
274
275 pmtx = malloc(sizeof(*pmtx), M_TEMP, M_WAITOK | M_ZERO);
276
277 rpipe = wpipe = NULL;
278 if (pipe_create(&rpipe, 1) || pipe_create(&wpipe, 1)) {
279 pipeclose(rpipe);
280 pipeclose(wpipe);
281 free(pmtx, M_TEMP);
282 return (ENFILE);
283 }
284
285 error = falloc(td, &rf, &fd);
286 if (error) {
287 pipeclose(rpipe);
288 pipeclose(wpipe);
289 free(pmtx, M_TEMP);
290 return (error);
291 }
292 fhold(rf);
293 td->td_retval[0] = fd;
294
295 /*
296 * Warning: once we've gotten past allocation of the fd for the
297 * read-side, we can only drop the read side via fdrop() in order
298 * to avoid races against processes which manage to dup() the read
299 * side while we are blocked trying to allocate the write side.
300 */
301 FILE_LOCK(rf);
302 rf->f_flag = FREAD | FWRITE;
303 rf->f_type = DTYPE_PIPE;
304 rf->f_data = (caddr_t)rpipe;
305 rf->f_ops = &pipeops;
306 FILE_UNLOCK(rf);
307 error = falloc(td, &wf, &fd);
308 if (error) {
309 struct filedesc *fdp = td->td_proc->p_fd;
310 FILEDESC_LOCK(fdp);
311 if (fdp->fd_ofiles[td->td_retval[0]] == rf) {
312 fdp->fd_ofiles[td->td_retval[0]] = NULL;
313 FILEDESC_UNLOCK(fdp);
314 fdrop(rf, td);
315 } else
316 FILEDESC_UNLOCK(fdp);
317 fdrop(rf, td);
318 /* rpipe has been closed by fdrop(). */
319 pipeclose(wpipe);
320 free(pmtx, M_TEMP);
321 return (error);
322 }
323 FILE_LOCK(wf);
324 wf->f_flag = FREAD | FWRITE;
325 wf->f_type = DTYPE_PIPE;
326 wf->f_data = (caddr_t)wpipe;
327 wf->f_ops = &pipeops;
328 p->p_retval[1] = fd;
329 rpipe->pipe_peer = wpipe;
330 wpipe->pipe_peer = rpipe;
331 mtx_init(pmtx, "pipe mutex", MTX_DEF);
332 rpipe->pipe_mtxp = wpipe->pipe_mtxp = pmtx;
333 fdrop(rf, td);
334 #endif /* FreeBSD */
335
336 #ifdef __NetBSD__
337 rpipe = wpipe = NULL;
338 if (pipe_create(&rpipe, 1) || pipe_create(&wpipe, 0)) {
339 pipeclose(rpipe);
340 pipeclose(wpipe);
341 return (ENFILE);
342 }
343
344 /*
345 * Note: the file structure returned from falloc() is marked
346 * as 'larval' initially. Unless we mark it as 'mature' by
347 * FILE_SET_MATURE(), any attempt to do anything with it would
348 * return EBADF, including e.g. dup(2) or close(2). This avoids
349 * file descriptor races if we block in the second falloc().
350 */
351
352 error = falloc(p, &rf, &fd);
353 if (error)
354 goto free2;
355 retval[0] = fd;
356 rf->f_flag = FREAD;
357 rf->f_type = DTYPE_PIPE;
358 rf->f_data = (caddr_t)rpipe;
359 rf->f_ops = &pipeops;
360
361 error = falloc(p, &wf, &fd);
362 if (error)
363 goto free3;
364 retval[1] = fd;
365 wf->f_flag = FWRITE;
366 wf->f_type = DTYPE_PIPE;
367 wf->f_data = (caddr_t)wpipe;
368 wf->f_ops = &pipeops;
369
370 rpipe->pipe_peer = wpipe;
371 wpipe->pipe_peer = rpipe;
372
373 FILE_SET_MATURE(rf);
374 FILE_SET_MATURE(wf);
375 FILE_UNUSE(rf, p);
376 FILE_UNUSE(wf, p);
377 return (0);
378 free3:
379 FILE_UNUSE(rf, p);
380 ffree(rf);
381 fdremove(p->p_fd, retval[0]);
382 free2:
383 pipeclose(wpipe);
384 pipeclose(rpipe);
385 #endif /* NetBSD */
386
387 return (error);
388 }
389
390 /*
391 * Allocate kva for pipe circular buffer, the space is pageable
392 * This routine will 'realloc' the size of a pipe safely, if it fails
393 * it will retain the old buffer.
394 * If it fails it will return ENOMEM.
395 */
396 static int
397 pipespace(cpipe, size)
398 struct pipe *cpipe;
399 int size;
400 {
401 caddr_t buffer;
402 #ifdef __FreeBSD__
403 struct vm_object *object;
404 int npages, error;
405
406 GIANT_REQUIRED;
407 KASSERT(cpipe->pipe_mtxp == NULL || !mtx_owned(PIPE_MTX(cpipe)),
408 ("pipespace: pipe mutex locked"));
409
410 npages = round_page(size)/PAGE_SIZE;
411 /*
412 * Create an object, I don't like the idea of paging to/from
413 * kernel_object.
414 */
415 object = vm_object_allocate(OBJT_DEFAULT, npages);
416 buffer = (caddr_t) vm_map_min(kernel_map);
417
418 /*
419 * Insert the object into the kernel map, and allocate kva for it.
420 * The map entry is, by default, pageable.
421 */
422 error = vm_map_find(kernel_map, object, 0,
423 (vm_offset_t *) &buffer, size, 1,
424 VM_PROT_ALL, VM_PROT_ALL, 0);
425
426 if (error != KERN_SUCCESS) {
427 vm_object_deallocate(object);
428 return (ENOMEM);
429 }
430 #endif /* FreeBSD */
431
432 #ifdef __NetBSD__
433 /*
434 * Allocate pageable virtual address space. Physical memory is allocated
435 * on demand.
436 */
437 buffer = (caddr_t) uvm_km_valloc(kernel_map, round_page(size));
438 if (buffer == NULL)
439 return (ENOMEM);
440 #endif /* NetBSD */
441
442 /* free old resources if we're resizing */
443 pipe_free_kmem(cpipe);
444 #ifdef __FreeBSD__
445 cpipe->pipe_buffer.object = object;
446 #endif
447 cpipe->pipe_buffer.buffer = buffer;
448 cpipe->pipe_buffer.size = size;
449 cpipe->pipe_buffer.in = 0;
450 cpipe->pipe_buffer.out = 0;
451 cpipe->pipe_buffer.cnt = 0;
452 amountpipekva += cpipe->pipe_buffer.size;
453 return (0);
454 }
455
456 /*
457 * initialize and allocate VM and memory for pipe
458 */
459 static int
460 pipe_create(cpipep, allockva)
461 struct pipe **cpipep;
462 int allockva;
463 {
464 struct pipe *cpipe;
465 int error;
466
467 #ifdef __FreeBSD__
468 *cpipep = zalloc(pipe_zone);
469 #endif
470 #ifdef __NetBSD__
471 *cpipep = pool_get(&pipe_pool, M_WAITOK);
472 #endif
473 if (*cpipep == NULL)
474 return (ENOMEM);
475
476 cpipe = *cpipep;
477
478 /* Initialize */
479 memset(cpipe, 0, sizeof(*cpipe));
480 cpipe->pipe_state = PIPE_SIGNALR;
481
482 #ifdef __FreeBSD__
483 cpipe->pipe_mtxp = NULL; /* avoid pipespace assertion */
484 #endif
485 if (allockva && (error = pipespace(cpipe, PIPE_SIZE)))
486 return (error);
487
488 vfs_timestamp(&cpipe->pipe_ctime);
489 cpipe->pipe_atime = cpipe->pipe_ctime;
490 cpipe->pipe_mtime = cpipe->pipe_ctime;
491 #ifdef __NetBSD__
492 cpipe->pipe_pgid = NO_PID;
493 lockinit(&cpipe->pipe_lock, PRIBIO | PCATCH, "pipelk", 0, 0);
494 #endif
495
496 return (0);
497 }
498
499
500 /*
501 * lock a pipe for I/O, blocking other access
502 */
503 static __inline int
504 pipelock(cpipe, catch)
505 struct pipe *cpipe;
506 int catch;
507 {
508 int error;
509
510 #ifdef __FreeBSD__
511 PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
512 while (cpipe->pipe_state & PIPE_LOCKFL) {
513 cpipe->pipe_state |= PIPE_LWANT;
514 error = msleep(cpipe, PIPE_MTX(cpipe),
515 catch ? (PRIBIO | PCATCH) : PRIBIO,
516 "pipelk", 0);
517 if (error != 0)
518 return (error);
519 }
520 cpipe->pipe_state |= PIPE_LOCKFL;
521 return (0);
522 #endif
523
524 #ifdef __NetBSD__
525 do {
526 error = lockmgr(&cpipe->pipe_lock, LK_EXCLUSIVE, NULL);
527 } while (!catch && (error == EINTR || error == ERESTART));
528 return (error);
529 #endif
530 }
531
532 /*
533 * unlock a pipe I/O lock
534 */
535 static __inline void
536 pipeunlock(cpipe)
537 struct pipe *cpipe;
538 {
539
540 #ifdef __FreeBSD__
541 PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
542 cpipe->pipe_state &= ~PIPE_LOCKFL;
543 if (cpipe->pipe_state & PIPE_LWANT) {
544 cpipe->pipe_state &= ~PIPE_LWANT;
545 wakeup(cpipe);
546 }
547 #endif
548
549 #ifdef __NetBSD__
550 lockmgr(&cpipe->pipe_lock, LK_RELEASE, NULL);
551 #endif
552 }
553
554 /*
555 * Select/poll wakup. This also sends SIGIO to peer connected to
556 * 'sigpipe' side of pipe.
557 */
558 static __inline void
559 pipeselwakeup(selp, sigp)
560 struct pipe *selp, *sigp;
561 {
562
563 #ifdef __FreeBSD__
564 if (selp->pipe_state & PIPE_SEL) {
565 selp->pipe_state &= ~PIPE_SEL;
566 selwakeup(&selp->pipe_sel);
567 }
568 if (sigp && (sigp->pipe_state & PIPE_ASYNC) && sigp->pipe_sigio)
569 pgsigio(sigp->pipe_sigio, SIGIO, 0);
570 KNOTE(&selp->pipe_sel.si_note, 0);
571 #endif
572
573 #ifdef __NetBSD__
574 selnotify(&selp->pipe_sel, 0);
575 if (sigp && (sigp->pipe_state & PIPE_ASYNC) &&
576 sigp->pipe_pgid != NO_PID) {
577 struct proc *p;
578
579 if (sigp->pipe_pgid < 0)
580 gsignal(-sigp->pipe_pgid, SIGIO);
581 else if (sigp->pipe_pgid > 0 && (p = pfind(sigp->pipe_pgid)) != 0)
582 psignal(p, SIGIO);
583 }
584 #endif /* NetBSD */
585 }
586
587 /* ARGSUSED */
588 #ifdef __FreeBSD__
589 static int
590 pipe_read(fp, uio, cred, flags, td)
591 struct file *fp;
592 struct uio *uio;
593 struct ucred *cred;
594 struct thread *td;
595 int flags;
596 struct proc *p;
597 #elif defined(__NetBSD__)
598 static int
599 pipe_read(fp, offset, uio, cred, flags)
600 struct file *fp;
601 off_t *offset;
602 struct uio *uio;
603 struct ucred *cred;
604 int flags;
605 #endif
606 {
607 struct pipe *rpipe = (struct pipe *) fp->f_data;
608 int error;
609 size_t nread = 0;
610 size_t size;
611 size_t ocnt;
612
613 PIPE_LOCK(rpipe);
614 ++rpipe->pipe_busy;
615 ocnt = rpipe->pipe_buffer.cnt;
616
617 error = pipelock(rpipe, 1);
618 if (error)
619 goto unlocked_error;
620
621 while (uio->uio_resid) {
622 /*
623 * normal pipe buffer receive
624 */
625 if (rpipe->pipe_buffer.cnt > 0) {
626 size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out;
627 if (size > rpipe->pipe_buffer.cnt)
628 size = rpipe->pipe_buffer.cnt;
629 if (size > uio->uio_resid)
630 size = uio->uio_resid;
631
632 PIPE_UNLOCK(rpipe);
633 error = uiomove(&rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out],
634 size, uio);
635 PIPE_LOCK(rpipe);
636 if (error)
637 break;
638
639 rpipe->pipe_buffer.out += size;
640 if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size)
641 rpipe->pipe_buffer.out = 0;
642
643 rpipe->pipe_buffer.cnt -= size;
644
645 /*
646 * If there is no more to read in the pipe, reset
647 * its pointers to the beginning. This improves
648 * cache hit stats.
649 */
650 if (rpipe->pipe_buffer.cnt == 0) {
651 rpipe->pipe_buffer.in = 0;
652 rpipe->pipe_buffer.out = 0;
653 }
654 nread += size;
655 #ifndef PIPE_NODIRECT
656 /*
657 * Direct copy, bypassing a kernel buffer.
658 */
659 } else if ((size = rpipe->pipe_map.cnt) &&
660 (rpipe->pipe_state & PIPE_DIRECTW)) {
661 caddr_t va;
662 if (size > uio->uio_resid)
663 size = uio->uio_resid;
664
665 va = (caddr_t) rpipe->pipe_map.kva +
666 rpipe->pipe_map.pos;
667 PIPE_UNLOCK(rpipe);
668 error = uiomove(va, size, uio);
669 PIPE_LOCK(rpipe);
670 if (error)
671 break;
672 nread += size;
673 rpipe->pipe_map.pos += size;
674 rpipe->pipe_map.cnt -= size;
675 if (rpipe->pipe_map.cnt == 0) {
676 rpipe->pipe_state &= ~PIPE_DIRECTW;
677 wakeup(rpipe);
678 }
679 #endif
680 } else {
681 /*
682 * detect EOF condition
683 * read returns 0 on EOF, no need to set error
684 */
685 if (rpipe->pipe_state & PIPE_EOF)
686 break;
687
688 /*
689 * If the "write-side" has been blocked, wake it up now.
690 */
691 if (rpipe->pipe_state & PIPE_WANTW) {
692 rpipe->pipe_state &= ~PIPE_WANTW;
693 wakeup(rpipe);
694 }
695
696 /*
697 * Break if some data was read.
698 */
699 if (nread > 0)
700 break;
701
702 /*
703 * don't block on non-blocking I/O
704 */
705 if (fp->f_flag & FNONBLOCK) {
706 error = EAGAIN;
707 break;
708 }
709
710 /*
711 * Unlock the pipe buffer for our remaining processing.
712 * We will either break out with an error or we will
713 * sleep and relock to loop.
714 */
715 pipeunlock(rpipe);
716
717 /*
718 * We want to read more, wake up select/poll.
719 */
720 pipeselwakeup(rpipe, rpipe->pipe_peer);
721
722 rpipe->pipe_state |= PIPE_WANTR;
723 #ifdef __FreeBSD__
724 error = msleep(rpipe, PIPE_MTX(rpipe), PRIBIO | PCATCH,
725 "piperd", 0);
726 #else
727 error = tsleep(rpipe, PRIBIO | PCATCH, "piperd", 0);
728 #endif
729 if (error != 0 || (error = pipelock(rpipe, 1)))
730 goto unlocked_error;
731 }
732 }
733 pipeunlock(rpipe);
734
735 /* XXX: should probably do this before getting any locks. */
736 if (error == 0)
737 vfs_timestamp(&rpipe->pipe_atime);
738 unlocked_error:
739 --rpipe->pipe_busy;
740
741 /*
742 * PIPE_WANTCLOSE processing only makes sense if pipe_busy is 0.
743 */
744 if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANTCLOSE)) {
745 rpipe->pipe_state &= ~(PIPE_WANTCLOSE|PIPE_WANTW);
746 wakeup(rpipe);
747 } else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) {
748 /*
749 * Handle write blocking hysteresis.
750 */
751 if (rpipe->pipe_state & PIPE_WANTW) {
752 rpipe->pipe_state &= ~PIPE_WANTW;
753 wakeup(rpipe);
754 }
755 }
756
757 /*
758 * If anything was read off the buffer, signal to the writer it's
759 * possible to write more data. Also send signal if we are here for the
760 * first time after last write.
761 */
762 if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF
763 && (ocnt != rpipe->pipe_buffer.cnt || (rpipe->pipe_state & PIPE_SIGNALR))) {
764 pipeselwakeup(rpipe, rpipe->pipe_peer);
765 rpipe->pipe_state &= ~PIPE_SIGNALR;
766 }
767
768 PIPE_UNLOCK(rpipe);
769 return (error);
770 }
771
772 #ifdef __FreeBSD__
773 #ifndef PIPE_NODIRECT
774 /*
775 * Map the sending processes' buffer into kernel space and wire it.
776 * This is similar to a physical write operation.
777 */
778 static int
779 pipe_build_write_buffer(wpipe, uio)
780 struct pipe *wpipe;
781 struct uio *uio;
782 {
783 size_t size;
784 int i;
785 vm_offset_t addr, endaddr, paddr;
786
787 GIANT_REQUIRED;
788 PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED);
789
790 size = uio->uio_iov->iov_len;
791 if (size > wpipe->pipe_buffer.size)
792 size = wpipe->pipe_buffer.size;
793
794 endaddr = round_page((vm_offset_t)uio->uio_iov->iov_base + size);
795 addr = trunc_page((vm_offset_t)uio->uio_iov->iov_base);
796 for (i = 0; addr < endaddr; addr += PAGE_SIZE, i++) {
797 vm_page_t m;
798
799 if (vm_fault_quick((caddr_t)addr, VM_PROT_READ) < 0 ||
800 (paddr = pmap_kextract(addr)) == 0) {
801 int j;
802
803 for (j = 0; j < i; j++)
804 vm_page_unwire(wpipe->pipe_map.ms[j], 1);
805 return (EFAULT);
806 }
807
808 m = PHYS_TO_VM_PAGE(paddr);
809 vm_page_wire(m);
810 wpipe->pipe_map.ms[i] = m;
811 }
812
813 /*
814 * set up the control block
815 */
816 wpipe->pipe_map.npages = i;
817 wpipe->pipe_map.pos =
818 ((vm_offset_t) uio->uio_iov->iov_base) & PAGE_MASK;
819 wpipe->pipe_map.cnt = size;
820
821 /*
822 * and map the buffer
823 */
824 if (wpipe->pipe_map.kva == 0) {
825 /*
826 * We need to allocate space for an extra page because the
827 * address range might (will) span pages at times.
828 */
829 wpipe->pipe_map.kva = kmem_alloc_pageable(kernel_map,
830 wpipe->pipe_buffer.size + PAGE_SIZE);
831 amountpipekva += wpipe->pipe_buffer.size + PAGE_SIZE;
832 }
833 pmap_qenter(wpipe->pipe_map.kva, wpipe->pipe_map.ms,
834 wpipe->pipe_map.npages);
835
836 /*
837 * and update the uio data
838 */
839
840 uio->uio_iov->iov_len -= size;
841 uio->uio_iov->iov_base += size;
842 if (uio->uio_iov->iov_len == 0)
843 uio->uio_iov++;
844 uio->uio_resid -= size;
845 uio->uio_offset += size;
846 return (0);
847 }
848
849 /*
850 * unmap and unwire the process buffer
851 */
852 static void
853 pipe_destroy_write_buffer(wpipe)
854 struct pipe *wpipe;
855 {
856 int i;
857
858 GIANT_REQUIRED;
859 PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED);
860
861 if (wpipe->pipe_map.kva) {
862 pmap_qremove(wpipe->pipe_map.kva, wpipe->pipe_map.npages);
863
864 if (amountpipekva > maxpipekva) {
865 vm_offset_t kva = wpipe->pipe_map.kva;
866 wpipe->pipe_map.kva = 0;
867 kmem_free(kernel_map, kva,
868 wpipe->pipe_buffer.size + PAGE_SIZE);
869 amountpipekva -= wpipe->pipe_buffer.size + PAGE_SIZE;
870 }
871 }
872 for (i = 0; i < wpipe->pipe_map.npages; i++)
873 vm_page_unwire(wpipe->pipe_map.ms[i], 1);
874 wpipe->pipe_map.npages = 0;
875 }
876
877 /*
878 * In the case of a signal, the writing process might go away. This
879 * code copies the data into the circular buffer so that the source
880 * pages can be freed without loss of data.
881 */
882 static void
883 pipe_clone_write_buffer(wpipe)
884 struct pipe *wpipe;
885 {
886 int size;
887 int pos;
888
889 PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
890 size = wpipe->pipe_map.cnt;
891 pos = wpipe->pipe_map.pos;
892 memcpy((caddr_t) wpipe->pipe_buffer.buffer,
893 (caddr_t) wpipe->pipe_map.kva + pos, size);
894
895 wpipe->pipe_buffer.in = size;
896 wpipe->pipe_buffer.out = 0;
897 wpipe->pipe_buffer.cnt = size;
898 wpipe->pipe_state &= ~PIPE_DIRECTW;
899
900 PIPE_GET_GIANT(wpipe);
901 pipe_destroy_write_buffer(wpipe);
902 PIPE_DROP_GIANT(wpipe);
903 }
904
905 /*
906 * This implements the pipe buffer write mechanism. Note that only
907 * a direct write OR a normal pipe write can be pending at any given time.
908 * If there are any characters in the pipe buffer, the direct write will
909 * be deferred until the receiving process grabs all of the bytes from
910 * the pipe buffer. Then the direct mapping write is set-up.
911 */
912 static int
913 pipe_direct_write(wpipe, uio)
914 struct pipe *wpipe;
915 struct uio *uio;
916 {
917 int error;
918
919 retry:
920 PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
921 while (wpipe->pipe_state & PIPE_DIRECTW) {
922 if (wpipe->pipe_state & PIPE_WANTR) {
923 wpipe->pipe_state &= ~PIPE_WANTR;
924 wakeup(wpipe);
925 }
926 wpipe->pipe_state |= PIPE_WANTW;
927 error = msleep(wpipe, PIPE_MTX(wpipe),
928 PRIBIO | PCATCH, "pipdww", 0);
929 if (error)
930 goto error1;
931 if (wpipe->pipe_state & PIPE_EOF) {
932 error = EPIPE;
933 goto error1;
934 }
935 }
936 wpipe->pipe_map.cnt = 0; /* transfer not ready yet */
937 if (wpipe->pipe_buffer.cnt > 0) {
938 if (wpipe->pipe_state & PIPE_WANTR) {
939 wpipe->pipe_state &= ~PIPE_WANTR;
940 wakeup(wpipe);
941 }
942
943 wpipe->pipe_state |= PIPE_WANTW;
944 error = msleep(wpipe, PIPE_MTX(wpipe),
945 PRIBIO | PCATCH, "pipdwc", 0);
946 if (error)
947 goto error1;
948 if (wpipe->pipe_state & PIPE_EOF) {
949 error = EPIPE;
950 goto error1;
951 }
952 goto retry;
953 }
954
955 wpipe->pipe_state |= PIPE_DIRECTW;
956
957 PIPE_GET_GIANT(wpipe);
958 error = pipe_build_write_buffer(wpipe, uio);
959 PIPE_DROP_GIANT(wpipe);
960 if (error) {
961 wpipe->pipe_state &= ~PIPE_DIRECTW;
962 goto error1;
963 }
964
965 error = 0;
966 while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) {
967 if (wpipe->pipe_state & PIPE_EOF) {
968 pipelock(wpipe, 0);
969 PIPE_GET_GIANT(wpipe);
970 pipe_destroy_write_buffer(wpipe);
971 PIPE_DROP_GIANT(wpipe);
972 pipeunlock(wpipe);
973 pipeselwakeup(wpipe, wpipe);
974 error = EPIPE;
975 goto error1;
976 }
977 if (wpipe->pipe_state & PIPE_WANTR) {
978 wpipe->pipe_state &= ~PIPE_WANTR;
979 wakeup(wpipe);
980 }
981 pipeselwakeup(wpipe, wpipe);
982 error = msleep(wpipe, PIPE_MTX(wpipe), PRIBIO | PCATCH,
983 "pipdwt", 0);
984 }
985
986 pipelock(wpipe,0);
987 if (wpipe->pipe_state & PIPE_DIRECTW) {
988 /*
989 * this bit of trickery substitutes a kernel buffer for
990 * the process that might be going away.
991 */
992 pipe_clone_write_buffer(wpipe);
993 } else {
994 PIPE_GET_GIANT(wpipe);
995 pipe_destroy_write_buffer(wpipe);
996 PIPE_DROP_GIANT(wpipe);
997 }
998 pipeunlock(wpipe);
999 return (error);
1000
1001 error1:
1002 wakeup(wpipe);
1003 return (error);
1004 }
1005 #endif /* !PIPE_NODIRECT */
1006 #endif /* FreeBSD */
1007
1008 #ifdef __NetBSD__
1009 #ifndef PIPE_NODIRECT
1010 /*
1011 * Allocate structure for loan transfer.
1012 */
1013 static int
1014 pipe_loan_alloc(wpipe, npages)
1015 struct pipe *wpipe;
1016 int npages;
1017 {
1018 vsize_t len;
1019
1020 len = (vsize_t)npages << PAGE_SHIFT;
1021 wpipe->pipe_map.kva = uvm_km_valloc_wait(kernel_map, len);
1022 if (wpipe->pipe_map.kva == 0)
1023 return (ENOMEM);
1024
1025 amountpipekva += len;
1026 wpipe->pipe_map.npages = npages;
1027 wpipe->pipe_map.pgs = malloc(npages * sizeof(struct vm_page *), M_PIPE,
1028 M_WAITOK);
1029 return (0);
1030 }
1031
1032 /*
1033 * Free resources allocated for loan transfer.
1034 */
1035 static void
1036 pipe_loan_free(wpipe)
1037 struct pipe *wpipe;
1038 {
1039 vsize_t len;
1040
1041 len = (vsize_t)wpipe->pipe_map.npages << PAGE_SHIFT;
1042 uvm_km_free(kernel_map, wpipe->pipe_map.kva, len);
1043 wpipe->pipe_map.kva = 0;
1044 amountpipekva -= len;
1045 free(wpipe->pipe_map.pgs, M_PIPE);
1046 wpipe->pipe_map.pgs = NULL;
1047 }
1048
1049 /*
1050 * NetBSD direct write, using uvm_loan() mechanism.
1051 * This implements the pipe buffer write mechanism. Note that only
1052 * a direct write OR a normal pipe write can be pending at any given time.
1053 * If there are any characters in the pipe buffer, the direct write will
1054 * be deferred until the receiving process grabs all of the bytes from
1055 * the pipe buffer. Then the direct mapping write is set-up.
1056 */
1057 static int
1058 pipe_direct_write(wpipe, uio)
1059 struct pipe *wpipe;
1060 struct uio *uio;
1061 {
1062 int error, npages, j;
1063 struct vm_page **pgs;
1064 vaddr_t bbase, kva, base, bend;
1065 vsize_t blen, bcnt;
1066 voff_t bpos;
1067
1068 retry:
1069 while (wpipe->pipe_state & PIPE_DIRECTW) {
1070 if (wpipe->pipe_state & PIPE_WANTR) {
1071 wpipe->pipe_state &= ~PIPE_WANTR;
1072 wakeup(wpipe);
1073 }
1074 wpipe->pipe_state |= PIPE_WANTW;
1075 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdww", 0);
1076 if (error)
1077 goto error;
1078 if (wpipe->pipe_state & PIPE_EOF) {
1079 error = EPIPE;
1080 goto error;
1081 }
1082 }
1083 wpipe->pipe_map.cnt = 0; /* transfer not ready yet */
1084 if (wpipe->pipe_buffer.cnt > 0) {
1085 if (wpipe->pipe_state & PIPE_WANTR) {
1086 wpipe->pipe_state &= ~PIPE_WANTR;
1087 wakeup(wpipe);
1088 }
1089
1090 wpipe->pipe_state |= PIPE_WANTW;
1091 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdwc", 0);
1092 if (error)
1093 goto error;
1094 if (wpipe->pipe_state & PIPE_EOF) {
1095 error = EPIPE;
1096 goto error;
1097 }
1098 goto retry;
1099 }
1100
1101 /*
1102 * Handle first PIPE_CHUNK_SIZE bytes of buffer. Deal with buffers
1103 * not aligned to PAGE_SIZE.
1104 */
1105 bbase = (vaddr_t)uio->uio_iov->iov_base;
1106 base = trunc_page(bbase);
1107 bend = round_page(bbase + uio->uio_iov->iov_len);
1108 blen = bend - base;
1109 bpos = bbase - base;
1110
1111 if (blen > PIPE_DIRECT_CHUNK) {
1112 blen = PIPE_DIRECT_CHUNK;
1113 bend = base + blen;
1114 bcnt = PIPE_DIRECT_CHUNK - bpos;
1115 } else {
1116 bcnt = uio->uio_iov->iov_len;
1117 }
1118 npages = blen >> PAGE_SHIFT;
1119
1120 wpipe->pipe_map.pos = bpos;
1121 wpipe->pipe_map.cnt = bcnt;
1122
1123 /*
1124 * Free the old kva if we need more pages than we have
1125 * allocated.
1126 */
1127 if (wpipe->pipe_map.kva && npages > wpipe->pipe_map.npages)
1128 pipe_loan_free(wpipe);
1129
1130 /* Allocate new kva. */
1131 if (wpipe->pipe_map.kva == 0) {
1132 error = pipe_loan_alloc(wpipe, npages);
1133 if (error) {
1134 goto error;
1135 }
1136 }
1137
1138 /* Loan the write buffer memory from writer process */
1139 pgs = wpipe->pipe_map.pgs;
1140 error = uvm_loan(&uio->uio_procp->p_vmspace->vm_map, base, blen,
1141 pgs, UVM_LOAN_TOPAGE);
1142 if (error) {
1143 pgs = NULL;
1144 goto cleanup;
1145 }
1146
1147 /* Enter the loaned pages to kva */
1148 kva = wpipe->pipe_map.kva;
1149 for (j = 0; j < npages; j++, kva += PAGE_SIZE) {
1150 pmap_kenter_pa(kva, VM_PAGE_TO_PHYS(pgs[j]), VM_PROT_READ);
1151 }
1152 pmap_update(pmap_kernel());
1153
1154 wpipe->pipe_state |= PIPE_DIRECTW;
1155 while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) {
1156 if (wpipe->pipe_state & PIPE_EOF) {
1157 error = EPIPE;
1158 break;
1159 }
1160 if (wpipe->pipe_state & PIPE_WANTR) {
1161 wpipe->pipe_state &= ~PIPE_WANTR;
1162 wakeup(wpipe);
1163 }
1164 pipeselwakeup(wpipe, wpipe);
1165 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdwt", 0);
1166 }
1167
1168 if (error)
1169 wpipe->pipe_state &= ~PIPE_DIRECTW;
1170
1171 cleanup:
1172 pipelock(wpipe, 0);
1173 if (pgs != NULL) {
1174 pmap_kremove(wpipe->pipe_map.kva, blen);
1175 uvm_unloan(pgs, npages, UVM_LOAN_TOPAGE);
1176 }
1177 if (error || amountpipekva > maxpipekva)
1178 pipe_loan_free(wpipe);
1179 pipeunlock(wpipe);
1180
1181 if (error) {
1182 pipeselwakeup(wpipe, wpipe);
1183
1184 /*
1185 * If nothing was read from what we offered, return error
1186 * straight on. Otherwise update uio resid first. Caller
1187 * will deal with the error condition, returning short
1188 * write, error, or restarting the write(2) as appropriate.
1189 */
1190 if (wpipe->pipe_map.cnt == bcnt) {
1191 error:
1192 wakeup(wpipe);
1193 return (error);
1194 }
1195
1196 bcnt -= wpipe->pipe_map.cnt;
1197 }
1198
1199 uio->uio_resid -= bcnt;
1200 /* uio_offset not updated, not set/used for write(2) */
1201 uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + bcnt;
1202 uio->uio_iov->iov_len -= bcnt;
1203 if (uio->uio_iov->iov_len == 0) {
1204 uio->uio_iov++;
1205 uio->uio_iovcnt--;
1206 }
1207
1208 return (error);
1209 }
1210 #endif /* !PIPE_NODIRECT */
1211 #endif /* NetBSD */
1212
1213 #ifdef __FreeBSD__
1214 static int
1215 pipe_write(fp, uio, cred, flags, td)
1216 struct file *fp;
1217 off_t *offset;
1218 struct uio *uio;
1219 struct ucred *cred;
1220 int flags;
1221 struct thread *td;
1222 #elif defined(__NetBSD__)
1223 static int
1224 pipe_write(fp, offset, uio, cred, flags)
1225 struct file *fp;
1226 off_t *offset;
1227 struct uio *uio;
1228 struct ucred *cred;
1229 int flags;
1230 #endif
1231 {
1232 int error = 0;
1233 struct pipe *wpipe, *rpipe;
1234
1235 rpipe = (struct pipe *) fp->f_data;
1236 wpipe = rpipe->pipe_peer;
1237
1238 PIPE_LOCK(rpipe);
1239 /*
1240 * detect loss of pipe read side, issue SIGPIPE if lost.
1241 */
1242 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1243 PIPE_UNLOCK(rpipe);
1244 return (EPIPE);
1245 }
1246
1247 ++wpipe->pipe_busy;
1248
1249 /*
1250 * If it is advantageous to resize the pipe buffer, do
1251 * so.
1252 */
1253 if ((uio->uio_resid > PIPE_SIZE) &&
1254 (nbigpipe < maxbigpipes) &&
1255 #ifndef PIPE_NODIRECT
1256 (wpipe->pipe_state & PIPE_DIRECTW) == 0 &&
1257 #endif
1258 (wpipe->pipe_buffer.size <= PIPE_SIZE) &&
1259 (wpipe->pipe_buffer.cnt == 0)) {
1260
1261 if ((error = pipelock(wpipe,1)) == 0) {
1262 PIPE_GET_GIANT(rpipe);
1263 if (pipespace(wpipe, BIG_PIPE_SIZE) == 0)
1264 nbigpipe++;
1265 PIPE_DROP_GIANT(rpipe);
1266 pipeunlock(wpipe);
1267 } else {
1268 /*
1269 * If an error occurred, unbusy and return, waking up
1270 * any waiting readers.
1271 */
1272 --wpipe->pipe_busy;
1273 if (wpipe->pipe_busy == 0
1274 && (wpipe->pipe_state & PIPE_WANTCLOSE)) {
1275 wpipe->pipe_state &=
1276 ~(PIPE_WANTCLOSE | PIPE_WANTR);
1277 wakeup(wpipe);
1278 }
1279
1280 return (error);
1281 }
1282 }
1283
1284 #ifdef __FreeBSD__
1285 /*
1286 * If an early error occured unbusy and return, waking up any pending
1287 * readers.
1288 */
1289 if (error) {
1290 --wpipe->pipe_busy;
1291 if ((wpipe->pipe_busy == 0) &&
1292 (wpipe->pipe_state & PIPE_WANT)) {
1293 wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR);
1294 wakeup(wpipe);
1295 }
1296 PIPE_UNLOCK(rpipe);
1297 return(error);
1298 }
1299
1300 KASSERT(wpipe->pipe_buffer.buffer != NULL, ("pipe buffer gone"));
1301 #endif
1302
1303 while (uio->uio_resid) {
1304 size_t space;
1305
1306 #ifndef PIPE_NODIRECT
1307 /*
1308 * If the transfer is large, we can gain performance if
1309 * we do process-to-process copies directly.
1310 * If the write is non-blocking, we don't use the
1311 * direct write mechanism.
1312 *
1313 * The direct write mechanism will detect the reader going
1314 * away on us.
1315 */
1316 if ((uio->uio_iov->iov_len >= PIPE_MINDIRECT) &&
1317 (fp->f_flag & FNONBLOCK) == 0 &&
1318 (wpipe->pipe_map.kva || (amountpipekva < limitpipekva))) {
1319 error = pipe_direct_write(wpipe, uio);
1320
1321 /*
1322 * Break out if error occured, unless it's ENOMEM.
1323 * ENOMEM means we failed to allocate some resources
1324 * for direct write, so we just fallback to ordinary
1325 * write. If the direct write was successful,
1326 * process rest of data via ordinary write.
1327 */
1328 if (!error)
1329 continue;
1330
1331 if (error != ENOMEM)
1332 break;
1333 }
1334 #endif /* PIPE_NODIRECT */
1335
1336 /*
1337 * Pipe buffered writes cannot be coincidental with
1338 * direct writes. We wait until the currently executing
1339 * direct write is completed before we start filling the
1340 * pipe buffer. We break out if a signal occurs or the
1341 * reader goes away.
1342 */
1343 retrywrite:
1344 while (wpipe->pipe_state & PIPE_DIRECTW) {
1345 if (wpipe->pipe_state & PIPE_WANTR) {
1346 wpipe->pipe_state &= ~PIPE_WANTR;
1347 wakeup(wpipe);
1348 }
1349 #ifdef __FreeBSD__
1350 error = msleep(wpipe, PIPE_MTX(rpipe), PRIBIO | PCATCH,
1351 "pipbww", 0);
1352 #else
1353 error = tsleep(wpipe, PRIBIO | PCATCH, "pipbww", 0);
1354 #endif
1355 if (wpipe->pipe_state & PIPE_EOF)
1356 break;
1357 if (error)
1358 break;
1359 }
1360 if (wpipe->pipe_state & PIPE_EOF) {
1361 error = EPIPE;
1362 break;
1363 }
1364
1365 space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
1366
1367 /* Writes of size <= PIPE_BUF must be atomic. */
1368 if ((space < uio->uio_resid) && (uio->uio_resid <= PIPE_BUF))
1369 space = 0;
1370
1371 if (space > 0) {
1372 int size; /* Transfer size */
1373 int segsize; /* first segment to transfer */
1374
1375 if ((error = pipelock(wpipe,1)) != 0)
1376 break;
1377
1378 /*
1379 * It is possible for a direct write to
1380 * slip in on us... handle it here...
1381 */
1382 if (wpipe->pipe_state & PIPE_DIRECTW) {
1383 pipeunlock(wpipe);
1384 goto retrywrite;
1385 }
1386 /*
1387 * If a process blocked in uiomove, our
1388 * value for space might be bad.
1389 *
1390 * XXX will we be ok if the reader has gone
1391 * away here?
1392 */
1393 if (space > wpipe->pipe_buffer.size -
1394 wpipe->pipe_buffer.cnt) {
1395 pipeunlock(wpipe);
1396 goto retrywrite;
1397 }
1398
1399 /*
1400 * Transfer size is minimum of uio transfer
1401 * and free space in pipe buffer.
1402 */
1403 if (space > uio->uio_resid)
1404 size = uio->uio_resid;
1405 else
1406 size = space;
1407 /*
1408 * First segment to transfer is minimum of
1409 * transfer size and contiguous space in
1410 * pipe buffer. If first segment to transfer
1411 * is less than the transfer size, we've got
1412 * a wraparound in the buffer.
1413 */
1414 segsize = wpipe->pipe_buffer.size -
1415 wpipe->pipe_buffer.in;
1416 if (segsize > size)
1417 segsize = size;
1418
1419 /* Transfer first segment */
1420
1421 PIPE_UNLOCK(rpipe);
1422 error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in],
1423 segsize, uio);
1424 PIPE_LOCK(rpipe);
1425
1426 if (error == 0 && segsize < size) {
1427 /*
1428 * Transfer remaining part now, to
1429 * support atomic writes. Wraparound
1430 * happened.
1431 */
1432 #ifdef DEBUG
1433 if (wpipe->pipe_buffer.in + segsize !=
1434 wpipe->pipe_buffer.size)
1435 panic("Expected pipe buffer wraparound disappeared");
1436 #endif
1437
1438 PIPE_UNLOCK(rpipe);
1439 error = uiomove(&wpipe->pipe_buffer.buffer[0],
1440 size - segsize, uio);
1441 PIPE_LOCK(rpipe);
1442 }
1443 if (error == 0) {
1444 wpipe->pipe_buffer.in += size;
1445 if (wpipe->pipe_buffer.in >=
1446 wpipe->pipe_buffer.size) {
1447 #ifdef DEBUG
1448 if (wpipe->pipe_buffer.in != size - segsize + wpipe->pipe_buffer.size)
1449 panic("Expected wraparound bad");
1450 #endif
1451 wpipe->pipe_buffer.in = size - segsize;
1452 }
1453
1454 wpipe->pipe_buffer.cnt += size;
1455 #ifdef DEBUG
1456 if (wpipe->pipe_buffer.cnt > wpipe->pipe_buffer.size)
1457 panic("Pipe buffer overflow");
1458 #endif
1459 }
1460 pipeunlock(wpipe);
1461 if (error)
1462 break;
1463 } else {
1464 /*
1465 * If the "read-side" has been blocked, wake it up now.
1466 */
1467 if (wpipe->pipe_state & PIPE_WANTR) {
1468 wpipe->pipe_state &= ~PIPE_WANTR;
1469 wakeup(wpipe);
1470 }
1471
1472 /*
1473 * don't block on non-blocking I/O
1474 */
1475 if (fp->f_flag & FNONBLOCK) {
1476 error = EAGAIN;
1477 break;
1478 }
1479
1480 /*
1481 * We have no more space and have something to offer,
1482 * wake up select/poll.
1483 */
1484 pipeselwakeup(wpipe, wpipe);
1485
1486 wpipe->pipe_state |= PIPE_WANTW;
1487 #ifdef __FreeBSD__
1488 error = msleep(wpipe, PIPE_MTX(rpipe),
1489 PRIBIO | PCATCH, "pipewr", 0);
1490 #else
1491 error = tsleep(wpipe, PRIBIO | PCATCH, "pipewr", 0);
1492 #endif
1493 if (error != 0)
1494 break;
1495 /*
1496 * If read side wants to go away, we just issue a signal
1497 * to ourselves.
1498 */
1499 if (wpipe->pipe_state & PIPE_EOF) {
1500 error = EPIPE;
1501 break;
1502 }
1503 }
1504 }
1505
1506 --wpipe->pipe_busy;
1507 if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANTCLOSE)) {
1508 wpipe->pipe_state &= ~(PIPE_WANTCLOSE | PIPE_WANTR);
1509 wakeup(wpipe);
1510 } else if (wpipe->pipe_buffer.cnt > 0) {
1511 /*
1512 * If we have put any characters in the buffer, we wake up
1513 * the reader.
1514 */
1515 if (wpipe->pipe_state & PIPE_WANTR) {
1516 wpipe->pipe_state &= ~PIPE_WANTR;
1517 wakeup(wpipe);
1518 }
1519 }
1520
1521 /*
1522 * Don't return EPIPE if I/O was successful
1523 */
1524 if ((error == EPIPE) && (wpipe->pipe_buffer.cnt == 0)
1525 && (uio->uio_resid == 0))
1526 error = 0;
1527
1528 if (error == 0)
1529 vfs_timestamp(&wpipe->pipe_mtime);
1530
1531 /*
1532 * We have something to offer, wake up select/poll.
1533 * wpipe->pipe_map.cnt is always 0 in this point (direct write
1534 * is only done synchronously), so check only wpipe->pipe_buffer.cnt
1535 */
1536 if (wpipe->pipe_buffer.cnt)
1537 pipeselwakeup(wpipe, wpipe);
1538
1539 /*
1540 * Arrange for next read(2) to do a signal.
1541 */
1542 wpipe->pipe_state |= PIPE_SIGNALR;
1543
1544 PIPE_UNLOCK(rpipe);
1545 return (error);
1546 }
1547
1548 /*
1549 * we implement a very minimal set of ioctls for compatibility with sockets.
1550 */
1551 int
1552 #ifdef __FreeBSD__
1553 pipe_ioctl(fp, cmd, data, td)
1554 struct file *fp;
1555 u_long cmd;
1556 caddr_t data;
1557 struct thread *td;
1558 #else
1559 pipe_ioctl(fp, cmd, data, p)
1560 struct file *fp;
1561 u_long cmd;
1562 caddr_t data;
1563 struct proc *p;
1564 #endif
1565 {
1566 struct pipe *mpipe = (struct pipe *)fp->f_data;
1567
1568 switch (cmd) {
1569
1570 case FIONBIO:
1571 return (0);
1572
1573 case FIOASYNC:
1574 PIPE_LOCK(mpipe);
1575 if (*(int *)data) {
1576 mpipe->pipe_state |= PIPE_ASYNC;
1577 } else {
1578 mpipe->pipe_state &= ~PIPE_ASYNC;
1579 }
1580 PIPE_UNLOCK(mpipe);
1581 return (0);
1582
1583 case FIONREAD:
1584 PIPE_LOCK(mpipe);
1585 #ifndef PIPE_NODIRECT
1586 if (mpipe->pipe_state & PIPE_DIRECTW)
1587 *(int *)data = mpipe->pipe_map.cnt;
1588 else
1589 #endif
1590 *(int *)data = mpipe->pipe_buffer.cnt;
1591 PIPE_UNLOCK(mpipe);
1592 return (0);
1593
1594 #ifdef __FreeBSD__
1595 case FIOSETOWN:
1596 return (fsetown(*(int *)data, &mpipe->pipe_sigio));
1597
1598 case FIOGETOWN:
1599 *(int *)data = fgetown(mpipe->pipe_sigio);
1600 return (0);
1601
1602 /* This is deprecated, FIOSETOWN should be used instead. */
1603 case TIOCSPGRP:
1604 return (fsetown(-(*(int *)data), &mpipe->pipe_sigio));
1605
1606 /* This is deprecated, FIOGETOWN should be used instead. */
1607 case TIOCGPGRP:
1608 *(int *)data = -fgetown(mpipe->pipe_sigio);
1609 return (0);
1610 #endif /* FreeBSD */
1611 #ifdef __NetBSD__
1612 case TIOCSPGRP:
1613 mpipe->pipe_pgid = *(int *)data;
1614 return (0);
1615
1616 case TIOCGPGRP:
1617 *(int *)data = mpipe->pipe_pgid;
1618 return (0);
1619 #endif /* NetBSD */
1620
1621 }
1622 return (EPASSTHROUGH);
1623 }
1624
1625 int
1626 #ifdef __FreeBSD__
1627 pipe_poll(fp, events, cred, td)
1628 struct file *fp;
1629 int events;
1630 struct ucred *cred;
1631 struct thread *td;
1632 #elif defined(__NetBSD__)
1633 pipe_poll(fp, events, td)
1634 struct file *fp;
1635 int events;
1636 struct proc *td;
1637 #endif
1638 {
1639 struct pipe *rpipe = (struct pipe *)fp->f_data;
1640 struct pipe *wpipe;
1641 int revents = 0;
1642
1643 wpipe = rpipe->pipe_peer;
1644 PIPE_LOCK(rpipe);
1645 if (events & (POLLIN | POLLRDNORM))
1646 if ((rpipe->pipe_buffer.cnt > 0) ||
1647 #ifndef PIPE_NODIRECT
1648 (rpipe->pipe_state & PIPE_DIRECTW) ||
1649 #endif
1650 (rpipe->pipe_state & PIPE_EOF))
1651 revents |= events & (POLLIN | POLLRDNORM);
1652
1653 if (events & (POLLOUT | POLLWRNORM))
1654 if (wpipe == NULL || (wpipe->pipe_state & PIPE_EOF)
1655 || (
1656 #ifndef PIPE_NODIRECT
1657 ((wpipe->pipe_state & PIPE_DIRECTW) == 0) &&
1658 #endif
1659 (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF))
1660 revents |= events & (POLLOUT | POLLWRNORM);
1661
1662 if ((rpipe->pipe_state & PIPE_EOF) ||
1663 (wpipe == NULL) ||
1664 (wpipe->pipe_state & PIPE_EOF))
1665 revents |= POLLHUP;
1666
1667 if (revents == 0) {
1668 if (events & (POLLIN | POLLRDNORM)) {
1669 selrecord(td, &rpipe->pipe_sel);
1670 rpipe->pipe_state |= PIPE_SEL;
1671 }
1672
1673 if (events & (POLLOUT | POLLWRNORM)) {
1674 selrecord(td, &wpipe->pipe_sel);
1675 wpipe->pipe_state |= PIPE_SEL;
1676 }
1677 }
1678 PIPE_UNLOCK(rpipe);
1679
1680 return (revents);
1681 }
1682
1683 static int
1684 #ifdef __FreeBSD__
1685 pipe_stat(fp, ub, td)
1686 struct file *fp;
1687 struct stat *ub;
1688 struct thread *td;
1689 #else
1690 pipe_stat(fp, ub, td)
1691 struct file *fp;
1692 struct stat *ub;
1693 struct proc *td;
1694 #endif
1695 {
1696 struct pipe *pipe = (struct pipe *)fp->f_data;
1697
1698 memset((caddr_t)ub, 0, sizeof(*ub));
1699 ub->st_mode = S_IFIFO;
1700 ub->st_blksize = pipe->pipe_buffer.size;
1701 ub->st_size = pipe->pipe_buffer.cnt;
1702 ub->st_blocks = (ub->st_size) ? 1 : 0;
1703 #ifdef __FreeBSD__
1704 ub->st_atimespec = pipe->pipe_atime;
1705 ub->st_mtimespec = pipe->pipe_mtime;
1706 ub->st_ctimespec = pipe->pipe_ctime;
1707 #endif /* FreeBSD */
1708 #ifdef __NetBSD__
1709 TIMEVAL_TO_TIMESPEC(&pipe->pipe_atime, &ub->st_atimespec)
1710 TIMEVAL_TO_TIMESPEC(&pipe->pipe_mtime, &ub->st_mtimespec);
1711 TIMEVAL_TO_TIMESPEC(&pipe->pipe_ctime, &ub->st_ctimespec);
1712 #endif /* NetBSD */
1713 ub->st_uid = fp->f_cred->cr_uid;
1714 ub->st_gid = fp->f_cred->cr_gid;
1715 /*
1716 * Left as 0: st_dev, st_ino, st_nlink, st_rdev, st_flags, st_gen.
1717 * XXX (st_dev, st_ino) should be unique.
1718 */
1719 return (0);
1720 }
1721
1722 /* ARGSUSED */
1723 static int
1724 #ifdef __FreeBSD__
1725 pipe_close(fp, td)
1726 struct file *fp;
1727 struct thread *td;
1728 #else
1729 pipe_close(fp, td)
1730 struct file *fp;
1731 struct proc *td;
1732 #endif
1733 {
1734 struct pipe *cpipe = (struct pipe *)fp->f_data;
1735
1736 #ifdef __FreeBSD__
1737 fp->f_ops = &badfileops;
1738 funsetown(cpipe->pipe_sigio);
1739 #endif
1740 fp->f_data = NULL;
1741 pipeclose(cpipe);
1742 return (0);
1743 }
1744
1745 static void
1746 pipe_free_kmem(cpipe)
1747 struct pipe *cpipe;
1748 {
1749
1750 #ifdef __FreeBSD__
1751
1752 GIANT_REQUIRED;
1753 KASSERT(cpipe->pipe_mtxp == NULL || !mtx_owned(PIPE_MTX(cpipe)),
1754 ("pipespace: pipe mutex locked"));
1755 #endif
1756
1757 if (cpipe->pipe_buffer.buffer != NULL) {
1758 if (cpipe->pipe_buffer.size > PIPE_SIZE)
1759 --nbigpipe;
1760 amountpipekva -= cpipe->pipe_buffer.size;
1761 #ifdef __FreeBSD__
1762 kmem_free(kernel_map,
1763 (vm_offset_t)cpipe->pipe_buffer.buffer,
1764 cpipe->pipe_buffer.size);
1765 #elif defined(__NetBSD__)
1766 uvm_km_free(kernel_map,
1767 (vaddr_t)cpipe->pipe_buffer.buffer,
1768 cpipe->pipe_buffer.size);
1769 #endif /* NetBSD */
1770 cpipe->pipe_buffer.buffer = NULL;
1771 }
1772 #ifndef PIPE_NODIRECT
1773 if (cpipe->pipe_map.kva != 0) {
1774 #ifdef __FreeBSD__
1775 amountpipekva -= cpipe->pipe_buffer.size + PAGE_SIZE;
1776 kmem_free(kernel_map,
1777 cpipe->pipe_map.kva,
1778 cpipe->pipe_buffer.size + PAGE_SIZE);
1779 #elif defined(__NetBSD__)
1780 pipe_loan_free(cpipe);
1781 #endif /* NetBSD */
1782 cpipe->pipe_map.cnt = 0;
1783 cpipe->pipe_map.kva = 0;
1784 cpipe->pipe_map.pos = 0;
1785 cpipe->pipe_map.npages = 0;
1786 }
1787 #endif /* !PIPE_NODIRECT */
1788 }
1789
1790 /*
1791 * shutdown the pipe
1792 */
1793 static void
1794 pipeclose(cpipe)
1795 struct pipe *cpipe;
1796 {
1797 struct pipe *ppipe;
1798 #ifdef __FreeBSD__
1799 int hadpeer = 0;
1800 #endif
1801
1802 if (cpipe == NULL)
1803 return;
1804
1805 /* partially created pipes won't have a valid mutex. */
1806 if (PIPE_MTX(cpipe) != NULL)
1807 PIPE_LOCK(cpipe);
1808
1809 pipeselwakeup(cpipe, cpipe);
1810
1811 /*
1812 * If the other side is blocked, wake it up saying that
1813 * we want to close it down.
1814 */
1815 while (cpipe->pipe_busy) {
1816 wakeup(cpipe);
1817 cpipe->pipe_state |= PIPE_WANTCLOSE | PIPE_EOF;
1818 #ifdef __FreeBSD__
1819 msleep(cpipe, PIPE_MTX(cpipe), PRIBIO, "pipecl", 0);
1820 #else
1821 tsleep(cpipe, PRIBIO, "pipecl", 0);
1822 #endif
1823 }
1824
1825 /*
1826 * Disconnect from peer
1827 */
1828 if ((ppipe = cpipe->pipe_peer) != NULL) {
1829 #ifdef __FreeBSD__
1830 hadpeer++;
1831 #endif
1832 pipeselwakeup(ppipe, ppipe);
1833
1834 ppipe->pipe_state |= PIPE_EOF;
1835 wakeup(ppipe);
1836 #ifdef __FreeBSD__
1837 KNOTE(&ppipe->pipe_sel.si_note, 0);
1838 #endif
1839 ppipe->pipe_peer = NULL;
1840 }
1841 /*
1842 * free resources
1843 */
1844 #ifdef __FreeBSD__
1845 if (PIPE_MTX(cpipe) != NULL) {
1846 PIPE_UNLOCK(cpipe);
1847 if (!hadpeer) {
1848 mtx_destroy(PIPE_MTX(cpipe));
1849 free(PIPE_MTX(cpipe), M_TEMP);
1850 }
1851 }
1852 mtx_lock(&Giant);
1853 pipe_free_kmem(cpipe);
1854 zfree(pipe_zone, cpipe);
1855 mtx_unlock(&Giant);
1856 #endif
1857
1858 #ifdef __NetBSD__
1859 if (PIPE_MTX(cpipe) != NULL)
1860 PIPE_UNLOCK(cpipe);
1861
1862 pipe_free_kmem(cpipe);
1863 (void) lockmgr(&cpipe->pipe_lock, LK_DRAIN, NULL);
1864 pool_put(&pipe_pool, cpipe);
1865 #endif
1866 }
1867
1868 static void
1869 filt_pipedetach(struct knote *kn)
1870 {
1871 struct pipe *cpipe = (struct pipe *)kn->kn_fp->f_data;
1872
1873 switch(kn->kn_filter) {
1874 case EVFILT_WRITE:
1875 /* need the peer structure, not our own */
1876 cpipe = cpipe->pipe_peer;
1877
1878 /* if reader end already closed, just return */
1879 if (!cpipe)
1880 return;
1881
1882 break;
1883 default:
1884 /* nothing to do */
1885 break;
1886 }
1887
1888 #ifdef DIAGNOSTIC
1889 if (kn->kn_hook != cpipe)
1890 panic("filt_pipedetach: inconsistent knote");
1891 #endif
1892
1893 PIPE_LOCK(cpipe);
1894 SLIST_REMOVE(&cpipe->pipe_sel.si_note, kn, knote, kn_selnext);
1895 PIPE_UNLOCK(cpipe);
1896 }
1897
1898 /*ARGSUSED*/
1899 static int
1900 filt_piperead(struct knote *kn, long hint)
1901 {
1902 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1903 struct pipe *wpipe = rpipe->pipe_peer;
1904
1905 PIPE_LOCK(rpipe);
1906 kn->kn_data = rpipe->pipe_buffer.cnt;
1907 if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW))
1908 kn->kn_data = rpipe->pipe_map.cnt;
1909
1910 if ((rpipe->pipe_state & PIPE_EOF) ||
1911 (wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1912 kn->kn_flags |= EV_EOF;
1913 PIPE_UNLOCK(rpipe);
1914 return (1);
1915 }
1916 PIPE_UNLOCK(rpipe);
1917 return (kn->kn_data > 0);
1918 }
1919
1920 /*ARGSUSED*/
1921 static int
1922 filt_pipewrite(struct knote *kn, long hint)
1923 {
1924 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1925 struct pipe *wpipe = rpipe->pipe_peer;
1926
1927 PIPE_LOCK(rpipe);
1928 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1929 kn->kn_data = 0;
1930 kn->kn_flags |= EV_EOF;
1931 PIPE_UNLOCK(rpipe);
1932 return (1);
1933 }
1934 kn->kn_data = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
1935 if (wpipe->pipe_state & PIPE_DIRECTW)
1936 kn->kn_data = 0;
1937
1938 PIPE_UNLOCK(rpipe);
1939 return (kn->kn_data >= PIPE_BUF);
1940 }
1941
1942 static const struct filterops pipe_rfiltops =
1943 { 1, NULL, filt_pipedetach, filt_piperead };
1944 static const struct filterops pipe_wfiltops =
1945 { 1, NULL, filt_pipedetach, filt_pipewrite };
1946
1947 /*ARGSUSED*/
1948 static int
1949 pipe_kqfilter(struct file *fp, struct knote *kn)
1950 {
1951 struct pipe *cpipe;
1952
1953 cpipe = (struct pipe *)kn->kn_fp->f_data;
1954 switch (kn->kn_filter) {
1955 case EVFILT_READ:
1956 kn->kn_fop = &pipe_rfiltops;
1957 break;
1958 case EVFILT_WRITE:
1959 kn->kn_fop = &pipe_wfiltops;
1960 cpipe = cpipe->pipe_peer;
1961 if (cpipe == NULL) {
1962 /* other end of pipe has been closed */
1963 return (EBADF);
1964 }
1965 break;
1966 default:
1967 return (1);
1968 }
1969 kn->kn_hook = cpipe;
1970
1971 PIPE_LOCK(cpipe);
1972 SLIST_INSERT_HEAD(&cpipe->pipe_sel.si_note, kn, kn_selnext);
1973 PIPE_UNLOCK(cpipe);
1974 return (0);
1975 }
1976
1977 #ifdef __NetBSD__
1978 static int
1979 pipe_fcntl(fp, cmd, data, p)
1980 struct file *fp;
1981 u_int cmd;
1982 caddr_t data;
1983 struct proc *p;
1984 {
1985 if (cmd == F_SETFL)
1986 return (0);
1987 else
1988 return (EOPNOTSUPP);
1989 }
1990
1991 /*
1992 * Handle pipe sysctls.
1993 */
1994 int
1995 sysctl_dopipe(name, namelen, oldp, oldlenp, newp, newlen)
1996 int *name;
1997 u_int namelen;
1998 void *oldp;
1999 size_t *oldlenp;
2000 void *newp;
2001 size_t newlen;
2002 {
2003 /* All sysctl names at this level are terminal. */
2004 if (namelen != 1)
2005 return (ENOTDIR); /* overloaded */
2006
2007 switch (name[0]) {
2008 case KERN_PIPE_MAXKVASZ:
2009 return (sysctl_int(oldp, oldlenp, newp, newlen, &maxpipekva));
2010 case KERN_PIPE_LIMITKVA:
2011 return (sysctl_int(oldp, oldlenp, newp, newlen, &limitpipekva));
2012 case KERN_PIPE_MAXBIGPIPES:
2013 return (sysctl_int(oldp, oldlenp, newp, newlen, &maxbigpipes));
2014 case KERN_PIPE_NBIGPIPES:
2015 return (sysctl_rdint(oldp, oldlenp, newp, nbigpipe));
2016 case KERN_PIPE_KVASIZE:
2017 return (sysctl_rdint(oldp, oldlenp, newp, amountpipekva));
2018 default:
2019 return (EOPNOTSUPP);
2020 }
2021 /* NOTREACHED */
2022 }
2023
2024 /*
2025 * Initialize pipe structs.
2026 */
2027 void
2028 pipe_init(void)
2029 {
2030 pool_init(&pipe_pool, sizeof(struct pipe), 0, 0, 0, "pipepl", NULL);
2031 }
2032
2033 #endif /* __NetBSD __ */
2034