sys_pipe.c revision 1.17 1 /* $NetBSD: sys_pipe.c,v 1.17 2001/10/28 20:47:15 jdolecek Exp $ */
2
3 /*
4 * Copyright (c) 1996 John S. Dyson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Absolutely no warranty of function or purpose is made by the author
17 * John S. Dyson.
18 * 4. Modifications may be freely made to this file if the above conditions
19 * are met.
20 *
21 * $FreeBSD: src/sys/kern/sys_pipe.c,v 1.82 2001/06/15 20:45:01 jlemon Exp $
22 */
23
24 /*
25 * This file contains a high-performance replacement for the socket-based
26 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support
27 * all features of sockets, but does do everything that pipes normally
28 * do.
29 *
30 * Adaption for NetBSD UVM, including uvm_loan() based direct write, was
31 * written by Jaromir Dolecek.
32 */
33
34 /*
35 * This code has two modes of operation, a small write mode and a large
36 * write mode. The small write mode acts like conventional pipes with
37 * a kernel buffer. If the buffer is less than PIPE_MINDIRECT, then the
38 * "normal" pipe buffering is done. If the buffer is between PIPE_MINDIRECT
39 * and PIPE_SIZE in size, it is fully mapped into the kernel (on FreeBSD,
40 * those pages are also wired), and the receiving process can copy it directly
41 * from the pages in the sending process.
42 *
43 * If the sending process receives a signal, it is possible that it will
44 * go away, and certainly its address space can change, because control
45 * is returned back to the user-mode side. In that case, the pipe code
46 * arranges to copy the buffer supplied by the user process on FreeBSD, to
47 * a pageable kernel buffer, and the receiving process will grab the data
48 * from the pageable kernel buffer. Since signals don't happen all that often,
49 * the copy operation is normally eliminated.
50 * For NetBSD, the pages are mapped read-only, COW for kernel by uvm_loan(),
51 * so no explicit handling need to be done, all is handled by standard VM
52 * facilities.
53 *
54 * The constant PIPE_MINDIRECT is chosen to make sure that buffering will
55 * happen for small transfers so that the system will not spend all of
56 * its time context switching. PIPE_SIZE is constrained by the
57 * amount of kernel virtual memory.
58 */
59
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/proc.h>
63 #include <sys/fcntl.h>
64 #include <sys/file.h>
65 #include <sys/filedesc.h>
66 #include <sys/filio.h>
67 #include <sys/ttycom.h>
68 #include <sys/stat.h>
69 #include <sys/poll.h>
70 #include <sys/signalvar.h>
71 #include <sys/vnode.h>
72 #include <sys/uio.h>
73 #include <sys/lock.h>
74 #ifdef __FreeBSD__
75 #include <sys/mutex.h>
76 #include <sys/selinfo.h>
77 #include <sys/sysproto.h>
78 #elif defined(__NetBSD__)
79 #include <sys/select.h>
80 #include <sys/malloc.h>
81 #include <sys/mount.h>
82 #include <sys/syscallargs.h>
83 #include <uvm/uvm.h>
84 #include <sys/sysctl.h>
85 #include <sys/kernel.h>
86 #endif /* NetBSD, FreeBSD */
87
88 #include <sys/pipe.h>
89
90 #ifdef __NetBSD__
91 /*
92 * Avoid microtime(9), it's slow. We don't guard the read from time(9)
93 * with splclock(9) since we don't actually need to be THAT sure the access
94 * is atomic.
95 */
96 #define vfs_timestamp(tv) (*(tv) = time)
97 #endif
98
99 /*
100 * Use this define if you want to disable *fancy* VM things. Expect an
101 * approx 30% decrease in transfer rate. This could be useful for
102 * OpenBSD.
103 */
104 /* #define PIPE_NODIRECT */
105
106 /*
107 * interfaces to the outside world
108 */
109 #ifdef __FreeBSD__
110 static int pipe_read __P((struct file *fp, struct uio *uio,
111 struct ucred *cred, int flags, struct proc *p));
112 static int pipe_write __P((struct file *fp, struct uio *uio,
113 struct ucred *cred, int flags, struct proc *p));
114 static int pipe_close __P((struct file *fp, struct proc *p));
115 static int pipe_poll __P((struct file *fp, int events, struct ucred *cred,
116 struct proc *p));
117 static int pipe_kqfilter __P((struct file *fp, struct knote *kn));
118 static int pipe_stat __P((struct file *fp, struct stat *sb, struct proc *p));
119 static int pipe_ioctl __P((struct file *fp, u_long cmd, caddr_t data, struct proc *p));
120
121 static struct fileops pipeops = {
122 pipe_read, pipe_write, pipe_ioctl, pipe_poll, pipe_kqfilter,
123 pipe_stat, pipe_close
124 };
125
126 static void filt_pipedetach(struct knote *kn);
127 static int filt_piperead(struct knote *kn, long hint);
128 static int filt_pipewrite(struct knote *kn, long hint);
129
130 static struct filterops pipe_rfiltops =
131 { 1, NULL, filt_pipedetach, filt_piperead };
132 static struct filterops pipe_wfiltops =
133 { 1, NULL, filt_pipedetach, filt_pipewrite };
134 #endif /* FreeBSD */
135
136 #ifdef __NetBSD__
137 static int pipe_read __P((struct file *fp, off_t *offset, struct uio *uio,
138 struct ucred *cred, int flags));
139 static int pipe_write __P((struct file *fp, off_t *offset, struct uio *uio,
140 struct ucred *cred, int flags));
141 static int pipe_close __P((struct file *fp, struct proc *p));
142 static int pipe_poll __P((struct file *fp, int events, struct proc *p));
143 static int pipe_fcntl __P((struct file *fp, u_int com, caddr_t data,
144 struct proc *p));
145 static int pipe_stat __P((struct file *fp, struct stat *sb, struct proc *p));
146 static int pipe_ioctl __P((struct file *fp, u_long cmd, caddr_t data, struct proc *p));
147
148 static struct fileops pipeops =
149 { pipe_read, pipe_write, pipe_ioctl, pipe_fcntl, pipe_poll,
150 pipe_stat, pipe_close };
151 #endif /* NetBSD */
152
153 /*
154 * Default pipe buffer size(s), this can be kind-of large now because pipe
155 * space is pageable. The pipe code will try to maintain locality of
156 * reference for performance reasons, so small amounts of outstanding I/O
157 * will not wipe the cache.
158 */
159 #define MINPIPESIZE (PIPE_SIZE/3)
160 #define MAXPIPESIZE (2*PIPE_SIZE/3)
161
162 /*
163 * Maximum amount of kva for pipes -- this is kind-of a soft limit, but
164 * is there so that on large systems, we don't exhaust it.
165 */
166 #define MAXPIPEKVA (8*1024*1024)
167 static int maxpipekva = MAXPIPEKVA;
168
169 /*
170 * Limit for direct transfers, we cannot, of course limit
171 * the amount of kva for pipes in general though.
172 */
173 #define LIMITPIPEKVA (16*1024*1024)
174 static int limitpipekva = LIMITPIPEKVA;
175
176 /*
177 * Limit the number of "big" pipes
178 */
179 #define LIMITBIGPIPES 32
180 static int maxbigpipes = LIMITBIGPIPES;
181 static int nbigpipe = 0;
182
183 /*
184 * Amount of KVA consumed by pipe buffers.
185 */
186 static int amountpipekva = 0;
187
188 static void pipeclose __P((struct pipe *cpipe));
189 static void pipe_free_kmem __P((struct pipe *cpipe));
190 static int pipe_create __P((struct pipe **cpipep, int allockva));
191 static __inline int pipelock __P((struct pipe *cpipe, int catch));
192 static __inline void pipeunlock __P((struct pipe *cpipe));
193 static __inline void pipeselwakeup __P((struct pipe *selp,
194 struct pipe *sigp));
195 static int pipespace __P((struct pipe *cpipe, int size));
196
197 #ifdef __FreeBSD__
198 #ifndef PIPE_NODIRECT
199 static int pipe_build_write_buffer __P((struct pipe *wpipe, struct uio *uio));
200 static void pipe_destroy_write_buffer __P((struct pipe *wpipe));
201 static int pipe_direct_write __P((struct pipe *wpipe, struct uio *uio));
202 static void pipe_clone_write_buffer __P((struct pipe *wpipe));
203 #endif
204
205 static vm_zone_t pipe_zone;
206 #endif /* FreeBSD */
207
208 #ifdef __NetBSD__
209 #ifndef PIPE_NODIRECT
210 static __inline int pipe_direct_write __P((struct pipe *wpipe, struct uio *uio));
211 static __inline int pipe_loan_alloc __P((struct pipe *wpipe, int npages,
212 vsize_t blen));
213 static void pipe_loan_free __P((struct pipe *wpipe));
214 #endif /* PIPE_NODIRECT */
215
216 static struct pool pipe_pool;
217 #endif /* NetBSD */
218
219 /*
220 * The pipe system call for the DTYPE_PIPE type of pipes
221 */
222
223 /* ARGSUSED */
224 #ifdef __FreeBSD__
225 int
226 pipe(p, uap)
227 struct proc *p;
228 struct pipe_args /* {
229 int dummy;
230 } */ *uap;
231 #elif defined(__NetBSD__)
232 int
233 sys_pipe(p, v, retval)
234 struct proc *p;
235 void *v;
236 register_t *retval;
237 #endif
238 {
239 struct file *rf, *wf;
240 struct pipe *rpipe, *wpipe;
241 int fd, error;
242
243 #ifdef __FreeBSD__
244 if (pipe_zone == NULL)
245 pipe_zone = zinit("PIPE", sizeof(struct pipe), 0, 0, 4);
246
247 rpipe = wpipe = NULL;
248 if (pipe_create(&rpipe, 1) || pipe_create(&wpipe, 1)) {
249 pipeclose(rpipe);
250 pipeclose(wpipe);
251 return (ENFILE);
252 }
253
254 error = falloc(p, &rf, &fd);
255 if (error) {
256 pipeclose(rpipe);
257 pipeclose(wpipe);
258 return (error);
259 }
260 fhold(rf);
261 p->p_retval[0] = fd;
262
263 /*
264 * Warning: once we've gotten past allocation of the fd for the
265 * read-side, we can only drop the read side via fdrop() in order
266 * to avoid races against processes which manage to dup() the read
267 * side while we are blocked trying to allocate the write side.
268 */
269 rf->f_flag = FREAD | FWRITE;
270 rf->f_type = DTYPE_PIPE;
271 rf->f_data = (caddr_t)rpipe;
272 rf->f_ops = &pipeops;
273 error = falloc(p, &wf, &fd);
274 if (error) {
275 struct filedesc *fdp = p->p_fd;
276
277 if (fdp->fd_ofiles[p->p_retval[0]] == rf) {
278 fdp->fd_ofiles[p->p_retval[0]] = NULL;
279 fdrop(rf, p);
280 }
281 fdrop(rf, p);
282 /* rpipe has been closed by fdrop(). */
283 pipeclose(wpipe);
284 return (error);
285 }
286 wf->f_flag = FREAD | FWRITE;
287 wf->f_type = DTYPE_PIPE;
288 wf->f_data = (caddr_t)wpipe;
289 wf->f_ops = &pipeops;
290 p->p_retval[1] = fd;
291
292 rpipe->pipe_peer = wpipe;
293 wpipe->pipe_peer = rpipe;
294 fdrop(rf, p);
295 #endif /* FreeBSD */
296
297 #ifdef __NetBSD__
298 rpipe = wpipe = NULL;
299 if (pipe_create(&rpipe, 1) || pipe_create(&wpipe, 0)) {
300 pipeclose(rpipe);
301 pipeclose(wpipe);
302 return (ENFILE);
303 }
304
305 /*
306 * Note: the file structure returned from falloc() is marked
307 * as 'larval' initially. Unless we mark it as 'mature' by
308 * FILE_SET_MATURE(), any attempt to do anything with it would
309 * return EBADF, including e.g. dup(2) or close(2). This avoids
310 * file descriptor races if we block in the second falloc().
311 */
312
313 error = falloc(p, &rf, &fd);
314 if (error)
315 goto free2;
316 retval[0] = fd;
317 rf->f_flag = FREAD;
318 rf->f_type = DTYPE_PIPE;
319 rf->f_data = (caddr_t)rpipe;
320 rf->f_ops = &pipeops;
321
322 error = falloc(p, &wf, &fd);
323 if (error)
324 goto free3;
325 retval[1] = fd;
326 wf->f_flag = FWRITE;
327 wf->f_type = DTYPE_PIPE;
328 wf->f_data = (caddr_t)wpipe;
329 wf->f_ops = &pipeops;
330
331 rpipe->pipe_peer = wpipe;
332 wpipe->pipe_peer = rpipe;
333
334 FILE_SET_MATURE(rf);
335 FILE_SET_MATURE(wf);
336 FILE_UNUSE(rf, p);
337 FILE_UNUSE(wf, p);
338 return (0);
339 free3:
340 FILE_UNUSE(rf, p);
341 ffree(rf);
342 fdremove(p->p_fd, retval[0]);
343 free2:
344 pipeclose(wpipe);
345 pipeclose(rpipe);
346 #endif /* NetBSD */
347
348 return (error);
349 }
350
351 /*
352 * Allocate kva for pipe circular buffer, the space is pageable
353 * This routine will 'realloc' the size of a pipe safely, if it fails
354 * it will retain the old buffer.
355 * If it fails it will return ENOMEM.
356 */
357 static int
358 pipespace(cpipe, size)
359 struct pipe *cpipe;
360 int size;
361 {
362 caddr_t buffer;
363 #ifdef __FreeBSD__
364 struct vm_object *object;
365 int npages, error;
366
367 npages = round_page(size)/PAGE_SIZE;
368 /*
369 * Create an object, I don't like the idea of paging to/from
370 * kernel_object.
371 */
372 mtx_lock(&vm_mtx);
373 object = vm_object_allocate(OBJT_DEFAULT, npages);
374 buffer = (caddr_t) vm_map_min(kernel_map);
375
376 /*
377 * Insert the object into the kernel map, and allocate kva for it.
378 * The map entry is, by default, pageable.
379 */
380 error = vm_map_find(kernel_map, object, 0,
381 (vm_offset_t *) &buffer, size, 1,
382 VM_PROT_ALL, VM_PROT_ALL, 0);
383
384 if (error != KERN_SUCCESS) {
385 vm_object_deallocate(object);
386 mtx_unlock(&vm_mtx);
387 return (ENOMEM);
388 }
389 #endif /* FreeBSD */
390
391 #ifdef __NetBSD__
392 /*
393 * Allocate pageable virtual address space. Physical memory is allocated
394 * on demand.
395 */
396 buffer = (caddr_t) uvm_km_valloc(kernel_map, round_page(size));
397 if (buffer == NULL)
398 return (ENOMEM);
399 #endif /* NetBSD */
400
401 /* free old resources if we're resizing */
402 pipe_free_kmem(cpipe);
403 #ifdef __FreeBSD__
404 mtx_unlock(&vm_mtx);
405 cpipe->pipe_buffer.object = object;
406 #endif
407 cpipe->pipe_buffer.buffer = buffer;
408 cpipe->pipe_buffer.size = size;
409 cpipe->pipe_buffer.in = 0;
410 cpipe->pipe_buffer.out = 0;
411 cpipe->pipe_buffer.cnt = 0;
412 amountpipekva += cpipe->pipe_buffer.size;
413 return (0);
414 }
415
416 /*
417 * initialize and allocate VM and memory for pipe
418 */
419 static int
420 pipe_create(cpipep, allockva)
421 struct pipe **cpipep;
422 int allockva;
423 {
424 struct pipe *cpipe;
425 int error;
426
427 #ifdef __FreeBSD__
428 *cpipep = zalloc(pipe_zone);
429 #endif
430 #ifdef __NetBSD__
431 *cpipep = pool_get(&pipe_pool, M_WAITOK);
432 #endif
433 if (*cpipep == NULL)
434 return (ENOMEM);
435
436 cpipe = *cpipep;
437
438 /* Initialize */
439 memset(cpipe, 0, sizeof(*cpipe));
440 cpipe->pipe_state = PIPE_SIGNALR;
441
442 if (allockva && (error = pipespace(cpipe, PIPE_SIZE)))
443 return (error);
444
445 vfs_timestamp(&cpipe->pipe_ctime);
446 cpipe->pipe_atime = cpipe->pipe_ctime;
447 cpipe->pipe_mtime = cpipe->pipe_ctime;
448 #ifdef __NetBSD__
449 cpipe->pipe_pgid = NO_PID;
450 lockinit(&cpipe->pipe_lock, PRIBIO | PCATCH, "pipelk", 0, 0);
451 #endif
452
453 return (0);
454 }
455
456
457 /*
458 * lock a pipe for I/O, blocking other access
459 */
460 static __inline int
461 pipelock(cpipe, catch)
462 struct pipe *cpipe;
463 int catch;
464 {
465 int error;
466
467 #ifdef __FreeBSD__
468 while (cpipe->pipe_state & PIPE_LOCK) {
469 cpipe->pipe_state |= PIPE_LWANT;
470 error = tsleep(cpipe, catch ? (PRIBIO | PCATCH) : PRIBIO,
471 "pipelk", 0);
472 if (error != 0)
473 return (error);
474 }
475 cpipe->pipe_state |= PIPE_LOCK;
476 return (0);
477 #endif
478
479 #ifdef __NetBSD__
480 do {
481 error = lockmgr(&cpipe->pipe_lock, LK_EXCLUSIVE, NULL);
482 } while (!catch && (error == EINTR || error == ERESTART));
483 return (error);
484 #endif
485 }
486
487 /*
488 * unlock a pipe I/O lock
489 */
490 static __inline void
491 pipeunlock(cpipe)
492 struct pipe *cpipe;
493 {
494 #ifdef __FreeBSD__
495 cpipe->pipe_state &= ~PIPE_LOCK;
496 if (cpipe->pipe_state & PIPE_LWANT) {
497 cpipe->pipe_state &= ~PIPE_LWANT;
498 wakeup(cpipe);
499 }
500 #endif
501
502 #ifdef __NetBSD__
503 lockmgr(&cpipe->pipe_lock, LK_RELEASE, NULL);
504 #endif
505 }
506
507 /*
508 * Select/poll wakup. This also sends SIGIO to peer connected to
509 * 'sigpipe' side of pipe.
510 */
511 static __inline void
512 pipeselwakeup(selp, sigp)
513 struct pipe *selp, *sigp;
514 {
515 if (selp->pipe_state & PIPE_SEL) {
516 selp->pipe_state &= ~PIPE_SEL;
517 selwakeup(&selp->pipe_sel);
518 }
519 #ifdef __FreeBSD__
520 if (sigp && (sigp->pipe_state & PIPE_ASYNC) && sigp->pipe_sigio)
521 pgsigio(sigp->pipe_sigio, SIGIO, 0);
522 KNOTE(&selp->pipe_sel.si_note, 0);
523 #endif
524
525 #ifdef __NetBSD__
526 if (sigp && (sigp->pipe_state & PIPE_ASYNC)
527 && sigp->pipe_pgid != NO_PID){
528 struct proc *p;
529
530 if (sigp->pipe_pgid < 0)
531 gsignal(-sigp->pipe_pgid, SIGIO);
532 else if (sigp->pipe_pgid > 0 && (p = pfind(sigp->pipe_pgid)) != 0)
533 psignal(p, SIGIO);
534 }
535 #endif /* NetBSD */
536 }
537
538 /* ARGSUSED */
539 #ifdef __FreeBSD__
540 static int
541 pipe_read(fp, uio, cred, flags, p)
542 struct file *fp;
543 struct uio *uio;
544 struct ucred *cred;
545 int flags;
546 struct proc *p;
547 #elif defined(__NetBSD__)
548 static int
549 pipe_read(fp, offset, uio, cred, flags)
550 struct file *fp;
551 off_t *offset;
552 struct uio *uio;
553 struct ucred *cred;
554 int flags;
555 #endif
556 {
557 struct pipe *rpipe = (struct pipe *) fp->f_data;
558 int error;
559 size_t nread = 0;
560 size_t size;
561 size_t ocnt;
562
563 ++rpipe->pipe_busy;
564 error = pipelock(rpipe, 1);
565 if (error)
566 goto unlocked_error;
567
568 ocnt = rpipe->pipe_buffer.cnt;
569
570 while (uio->uio_resid) {
571 /*
572 * normal pipe buffer receive
573 */
574 if (rpipe->pipe_buffer.cnt > 0) {
575 size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out;
576 if (size > rpipe->pipe_buffer.cnt)
577 size = rpipe->pipe_buffer.cnt;
578 if (size > uio->uio_resid)
579 size = uio->uio_resid;
580
581 error = uiomove(&rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out],
582 size, uio);
583 if (error)
584 break;
585
586 rpipe->pipe_buffer.out += size;
587 if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size)
588 rpipe->pipe_buffer.out = 0;
589
590 rpipe->pipe_buffer.cnt -= size;
591
592 /*
593 * If there is no more to read in the pipe, reset
594 * its pointers to the beginning. This improves
595 * cache hit stats.
596 */
597 if (rpipe->pipe_buffer.cnt == 0) {
598 rpipe->pipe_buffer.in = 0;
599 rpipe->pipe_buffer.out = 0;
600 }
601 nread += size;
602 #ifndef PIPE_NODIRECT
603 /*
604 * Direct copy, bypassing a kernel buffer.
605 */
606 } else if ((size = rpipe->pipe_map.cnt) &&
607 (rpipe->pipe_state & PIPE_DIRECTW)) {
608 caddr_t va;
609 if (size > uio->uio_resid)
610 size = uio->uio_resid;
611
612 va = (caddr_t) rpipe->pipe_map.kva +
613 rpipe->pipe_map.pos;
614 error = uiomove(va, size, uio);
615 if (error)
616 break;
617 nread += size;
618 rpipe->pipe_map.pos += size;
619 rpipe->pipe_map.cnt -= size;
620 if (rpipe->pipe_map.cnt == 0) {
621 rpipe->pipe_state &= ~PIPE_DIRECTW;
622 wakeup(rpipe);
623 }
624 #endif
625 } else {
626 /*
627 * detect EOF condition
628 * read returns 0 on EOF, no need to set error
629 */
630 if (rpipe->pipe_state & PIPE_EOF)
631 break;
632
633 /*
634 * If the "write-side" has been blocked, wake it up now.
635 */
636 if (rpipe->pipe_state & PIPE_WANTW) {
637 rpipe->pipe_state &= ~PIPE_WANTW;
638 wakeup(rpipe);
639 }
640
641 /*
642 * Break if some data was read.
643 */
644 if (nread > 0)
645 break;
646
647 /*
648 * don't block on non-blocking I/O
649 */
650 if (fp->f_flag & FNONBLOCK) {
651 error = EAGAIN;
652 break;
653 }
654
655 /*
656 * Unlock the pipe buffer for our remaining processing.
657 * We will either break out with an error or we will
658 * sleep and relock to loop.
659 */
660 pipeunlock(rpipe);
661
662 /*
663 * We want to read more, wake up select/poll.
664 */
665 pipeselwakeup(rpipe, rpipe->pipe_peer);
666
667 rpipe->pipe_state |= PIPE_WANTR;
668 error = tsleep(rpipe, PRIBIO | PCATCH, "piperd", 0);
669 if (error != 0 || (error = pipelock(rpipe, 1)))
670 goto unlocked_error;
671 }
672 }
673 pipeunlock(rpipe);
674
675 if (error == 0)
676 vfs_timestamp(&rpipe->pipe_atime);
677 unlocked_error:
678 --rpipe->pipe_busy;
679
680 /*
681 * PIPE_WANTCLOSE processing only makes sense if pipe_busy is 0.
682 */
683 if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANTCLOSE)) {
684 rpipe->pipe_state &= ~(PIPE_WANTCLOSE|PIPE_WANTW);
685 wakeup(rpipe);
686 } else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) {
687 /*
688 * Handle write blocking hysteresis.
689 */
690 if (rpipe->pipe_state & PIPE_WANTW) {
691 rpipe->pipe_state &= ~PIPE_WANTW;
692 wakeup(rpipe);
693 }
694 }
695
696 /*
697 * If anything was read off the buffer, signal to the writer it's
698 * possible to write more data. Also send signal if we are here for the
699 * first time after last write.
700 */
701 if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF
702 && (ocnt != rpipe->pipe_buffer.cnt || (rpipe->pipe_state & PIPE_SIGNALR))) {
703 pipeselwakeup(rpipe, rpipe->pipe_peer);
704 rpipe->pipe_state &= ~PIPE_SIGNALR;
705 }
706
707 return (error);
708 }
709
710 #ifdef __FreeBSD__
711 #ifndef PIPE_NODIRECT
712 /*
713 * Map the sending processes' buffer into kernel space and wire it.
714 * This is similar to a physical write operation.
715 */
716 static int
717 pipe_build_write_buffer(wpipe, uio)
718 struct pipe *wpipe;
719 struct uio *uio;
720 {
721 size_t size;
722 int i;
723 vm_offset_t addr, endaddr, paddr;
724
725 size = uio->uio_iov->iov_len;
726 if (size > wpipe->pipe_buffer.size)
727 size = wpipe->pipe_buffer.size;
728
729 endaddr = round_page((vm_offset_t)uio->uio_iov->iov_base + size);
730 mtx_lock(&vm_mtx);
731 addr = trunc_page((vm_offset_t)uio->uio_iov->iov_base);
732 for (i = 0; addr < endaddr; addr += PAGE_SIZE, i++) {
733 vm_page_t m;
734
735 if (vm_fault_quick((caddr_t)addr, VM_PROT_READ) < 0 ||
736 (paddr = pmap_kextract(addr)) == 0) {
737 int j;
738
739 for (j = 0; j < i; j++)
740 vm_page_unwire(wpipe->pipe_map.ms[j], 1);
741 mtx_unlock(&vm_mtx);
742 return (EFAULT);
743 }
744
745 m = PHYS_TO_VM_PAGE(paddr);
746 vm_page_wire(m);
747 wpipe->pipe_map.ms[i] = m;
748 }
749
750 /*
751 * set up the control block
752 */
753 wpipe->pipe_map.npages = i;
754 wpipe->pipe_map.pos =
755 ((vm_offset_t) uio->uio_iov->iov_base) & PAGE_MASK;
756 wpipe->pipe_map.cnt = size;
757
758 /*
759 * and map the buffer
760 */
761 if (wpipe->pipe_map.kva == 0) {
762 /*
763 * We need to allocate space for an extra page because the
764 * address range might (will) span pages at times.
765 */
766 wpipe->pipe_map.kva = kmem_alloc_pageable(kernel_map,
767 wpipe->pipe_buffer.size + PAGE_SIZE);
768 amountpipekva += wpipe->pipe_buffer.size + PAGE_SIZE;
769 }
770 pmap_qenter(wpipe->pipe_map.kva, wpipe->pipe_map.ms,
771 wpipe->pipe_map.npages);
772
773 mtx_unlock(&vm_mtx);
774 /*
775 * and update the uio data
776 */
777
778 uio->uio_iov->iov_len -= size;
779 uio->uio_iov->iov_base += size;
780 if (uio->uio_iov->iov_len == 0)
781 uio->uio_iov++;
782 uio->uio_resid -= size;
783 uio->uio_offset += size;
784 return (0);
785 }
786
787 /*
788 * unmap and unwire the process buffer
789 */
790 static void
791 pipe_destroy_write_buffer(wpipe)
792 struct pipe *wpipe;
793 {
794 int i;
795
796 mtx_lock(&vm_mtx);
797 if (wpipe->pipe_map.kva) {
798 pmap_qremove(wpipe->pipe_map.kva, wpipe->pipe_map.npages);
799
800 if (amountpipekva > maxpipekva) {
801 vm_offset_t kva = wpipe->pipe_map.kva;
802 wpipe->pipe_map.kva = 0;
803 kmem_free(kernel_map, kva,
804 wpipe->pipe_buffer.size + PAGE_SIZE);
805 amountpipekva -= wpipe->pipe_buffer.size + PAGE_SIZE;
806 }
807 }
808 for (i = 0; i < wpipe->pipe_map.npages; i++)
809 vm_page_unwire(wpipe->pipe_map.ms[i], 1);
810 mtx_unlock(&vm_mtx);
811 }
812
813 /*
814 * In the case of a signal, the writing process might go away. This
815 * code copies the data into the circular buffer so that the source
816 * pages can be freed without loss of data.
817 */
818 static void
819 pipe_clone_write_buffer(wpipe)
820 struct pipe *wpipe;
821 {
822 int size;
823 int pos;
824
825 size = wpipe->pipe_map.cnt;
826 pos = wpipe->pipe_map.pos;
827 memcpy((caddr_t) wpipe->pipe_buffer.buffer,
828 (caddr_t) wpipe->pipe_map.kva + pos, size);
829
830 wpipe->pipe_buffer.in = size;
831 wpipe->pipe_buffer.out = 0;
832 wpipe->pipe_buffer.cnt = size;
833 wpipe->pipe_state &= ~PIPE_DIRECTW;
834
835 pipe_destroy_write_buffer(wpipe);
836 }
837
838 /*
839 * This implements the pipe buffer write mechanism. Note that only
840 * a direct write OR a normal pipe write can be pending at any given time.
841 * If there are any characters in the pipe buffer, the direct write will
842 * be deferred until the receiving process grabs all of the bytes from
843 * the pipe buffer. Then the direct mapping write is set-up.
844 */
845 static int
846 pipe_direct_write(wpipe, uio)
847 struct pipe *wpipe;
848 struct uio *uio;
849 {
850 int error;
851
852 retry:
853 while (wpipe->pipe_state & PIPE_DIRECTW) {
854 if (wpipe->pipe_state & PIPE_WANTR) {
855 wpipe->pipe_state &= ~PIPE_WANTR;
856 wakeup(wpipe);
857 }
858 wpipe->pipe_state |= PIPE_WANTW;
859 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdww", 0);
860 if (error)
861 goto error1;
862 if (wpipe->pipe_state & PIPE_EOF) {
863 error = EPIPE;
864 goto error1;
865 }
866 }
867 wpipe->pipe_map.cnt = 0; /* transfer not ready yet */
868 if (wpipe->pipe_buffer.cnt > 0) {
869 if (wpipe->pipe_state & PIPE_WANTR) {
870 wpipe->pipe_state &= ~PIPE_WANTR;
871 wakeup(wpipe);
872 }
873
874 wpipe->pipe_state |= PIPE_WANTW;
875 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdwc", 0);
876 if (error)
877 goto error1;
878 if (wpipe->pipe_state & PIPE_EOF) {
879 error = EPIPE;
880 goto error1;
881 }
882 goto retry;
883 }
884
885 wpipe->pipe_state |= PIPE_DIRECTW;
886
887 error = pipe_build_write_buffer(wpipe, uio);
888 if (error) {
889 wpipe->pipe_state &= ~PIPE_DIRECTW;
890 goto error1;
891 }
892
893 error = 0;
894 while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) {
895 if (wpipe->pipe_state & PIPE_EOF) {
896 pipelock(wpipe, 0);
897 pipe_destroy_write_buffer(wpipe);
898 pipeunlock(wpipe);
899 pipeselwakeup(wpipe, wpipe);
900 error = EPIPE;
901 goto error1;
902 }
903 if (wpipe->pipe_state & PIPE_WANTR) {
904 wpipe->pipe_state &= ~PIPE_WANTR;
905 wakeup(wpipe);
906 }
907 pipeselwakeup(wpipe, wpipe);
908 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdwt", 0);
909 }
910
911 pipelock(wpipe,0);
912 if (wpipe->pipe_state & PIPE_DIRECTW) {
913 /*
914 * this bit of trickery substitutes a kernel buffer for
915 * the process that might be going away.
916 */
917 pipe_clone_write_buffer(wpipe);
918 } else {
919 pipe_destroy_write_buffer(wpipe);
920 }
921 pipeunlock(wpipe);
922 return (error);
923
924 error1:
925 wakeup(wpipe);
926 return (error);
927 }
928 #endif /* !PIPE_NODIRECT */
929 #endif /* FreeBSD */
930
931 #ifdef __NetBSD__
932 #ifndef PIPE_NODIRECT
933 /*
934 * Allocate structure for loan transfer.
935 */
936 static __inline int
937 pipe_loan_alloc(wpipe, npages, blen)
938 struct pipe *wpipe;
939 int npages;
940 vsize_t blen;
941 {
942 wpipe->pipe_map.kva = uvm_km_valloc_wait(kernel_map, blen);
943 if (wpipe->pipe_map.kva == NULL)
944 return (ENOMEM);
945
946 amountpipekva += blen;
947 wpipe->pipe_map.npages = npages;
948 wpipe->pipe_map.ms = (struct vm_page **) malloc(
949 npages * sizeof(struct vm_page *), M_PIPE, M_WAITOK);
950
951 return (0);
952 }
953
954 /*
955 * Free resources allocated for loan transfer.
956 */
957 static void
958 pipe_loan_free(wpipe)
959 struct pipe *wpipe;
960 {
961 uvm_km_free(kernel_map, wpipe->pipe_map.kva,
962 wpipe->pipe_map.npages * PAGE_SIZE);
963 wpipe->pipe_map.kva = NULL;
964 amountpipekva -= wpipe->pipe_map.npages * PAGE_SIZE;
965 free(wpipe->pipe_map.ms, M_PIPE);
966 wpipe->pipe_map.ms = NULL;
967 }
968
969 /*
970 * NetBSD direct write, using uvm_loan() mechanism.
971 * This implements the pipe buffer write mechanism. Note that only
972 * a direct write OR a normal pipe write can be pending at any given time.
973 * If there are any characters in the pipe buffer, the direct write will
974 * be deferred until the receiving process grabs all of the bytes from
975 * the pipe buffer. Then the direct mapping write is set-up.
976 */
977 static __inline int
978 pipe_direct_write(wpipe, uio)
979 struct pipe *wpipe;
980 struct uio *uio;
981 {
982 int error, npages, j;
983 struct vm_page **res = NULL;
984 vaddr_t bbase, kva, base, bend;
985 vsize_t blen, bcnt;
986 voff_t bpos;
987
988 retry:
989 while (wpipe->pipe_state & PIPE_DIRECTW) {
990 if (wpipe->pipe_state & PIPE_WANTR) {
991 wpipe->pipe_state &= ~PIPE_WANTR;
992 wakeup(wpipe);
993 }
994 wpipe->pipe_state |= PIPE_WANTW;
995 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdww", 0);
996 if (error)
997 goto error;
998 if (wpipe->pipe_state & PIPE_EOF) {
999 error = EPIPE;
1000 goto error;
1001 }
1002 }
1003 wpipe->pipe_map.cnt = 0; /* transfer not ready yet */
1004 if (wpipe->pipe_buffer.cnt > 0) {
1005 if (wpipe->pipe_state & PIPE_WANTR) {
1006 wpipe->pipe_state &= ~PIPE_WANTR;
1007 wakeup(wpipe);
1008 }
1009
1010 wpipe->pipe_state |= PIPE_WANTW;
1011 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdwc", 0);
1012 if (error)
1013 goto error;
1014 if (wpipe->pipe_state & PIPE_EOF) {
1015 error = EPIPE;
1016 goto error;
1017 }
1018 goto retry;
1019 }
1020
1021 /*
1022 * Handle first PIPE_CHUNK_SIZE bytes of buffer. Deal with buffers
1023 * not aligned to PAGE_SIZE.
1024 */
1025 bbase = (vaddr_t)uio->uio_iov->iov_base;
1026 base = trunc_page(bbase);
1027 bend = round_page(bbase + uio->uio_iov->iov_len);
1028 blen = bend - base;
1029 bpos = bbase - base;
1030
1031 if (blen > PIPE_DIRECT_CHUNK) {
1032 blen = PIPE_DIRECT_CHUNK;
1033 bend = base + blen;
1034 bcnt = PIPE_DIRECT_CHUNK - bpos;
1035 } else
1036 bcnt = uio->uio_iov->iov_len;
1037
1038 npages = blen / PAGE_SIZE;
1039
1040 wpipe->pipe_map.pos = bpos;
1041 wpipe->pipe_map.cnt = bcnt;
1042
1043 /*
1044 * Free the old kva if we need more pages than we have
1045 * allocated.
1046 */
1047 if (wpipe->pipe_map.kva && npages > wpipe->pipe_map.npages)
1048 pipe_loan_free(wpipe);
1049
1050 /* Allocate new kva. */
1051 if (!wpipe->pipe_map.kva
1052 && (error = pipe_loan_alloc(wpipe, npages, blen)))
1053 goto error;
1054
1055 /* Loan the write buffer memory from writer process */
1056 error = uvm_loan(&uio->uio_procp->p_vmspace->vm_map, base, blen,
1057 (void **) wpipe->pipe_map.ms, UVM_LOAN_TOPAGE);
1058 if (error)
1059 goto cleanup;
1060 res = wpipe->pipe_map.ms;
1061
1062 /* Enter the loaned pages to kva */
1063 kva = wpipe->pipe_map.kva;
1064 for(j=0; j < npages; j++, kva += PAGE_SIZE)
1065 pmap_enter(pmap_kernel(), kva, res[j]->phys_addr,
1066 VM_PROT_READ, 0);
1067 pmap_update(pmap_kernel());
1068
1069 wpipe->pipe_state |= PIPE_DIRECTW;
1070 error = 0;
1071 while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) {
1072 if (wpipe->pipe_state & PIPE_EOF) {
1073 error = EPIPE;
1074 break;
1075 }
1076 if (wpipe->pipe_state & PIPE_WANTR) {
1077 wpipe->pipe_state &= ~PIPE_WANTR;
1078 wakeup(wpipe);
1079 }
1080 pipeselwakeup(wpipe, wpipe);
1081 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdwt", 0);
1082 }
1083
1084 if (error)
1085 wpipe->pipe_state &= ~PIPE_DIRECTW;
1086
1087 cleanup:
1088 pipelock(wpipe, 0);
1089 if (res)
1090 uvm_unloan((void **) res, npages, UVM_LOAN_TOPAGE);
1091 if (error || amountpipekva > maxpipekva)
1092 pipe_loan_free(wpipe);
1093 pipeunlock(wpipe);
1094
1095 if (error) {
1096 pipeselwakeup(wpipe, wpipe);
1097
1098 /*
1099 * If nothing was read from what we offered, return error
1100 * streight on. Otherwise update uio resid first. Caller
1101 * will deal with the error condition, returning short
1102 * write, error, or restarting the write(2) as appropriate.
1103 */
1104 if (wpipe->pipe_map.cnt == bcnt) {
1105 error:
1106 wakeup(wpipe);
1107 return (error);
1108 }
1109
1110 bcnt -= wpipe->pipe_map.cnt;
1111 }
1112
1113 uio->uio_resid -= bcnt;
1114 /* uio_offset not updated, not set/used for write(2) */
1115 (char *) uio->uio_iov->iov_base += bcnt;
1116 uio->uio_iov->iov_len -= bcnt;
1117 if (uio->uio_iov->iov_len == 0) {
1118 uio->uio_iov++;
1119 uio->uio_iovcnt--;
1120 }
1121
1122 return (error);
1123 }
1124 #endif /* !PIPE_NODIRECT */
1125 #endif /* NetBSD */
1126
1127 #ifdef __FreeBSD__
1128 static int
1129 pipe_write(fp, uio, cred, flags, p)
1130 struct file *fp;
1131 off_t *offset;
1132 struct uio *uio;
1133 struct ucred *cred;
1134 int flags;
1135 struct proc *p;
1136 #elif defined(__NetBSD__)
1137 static int
1138 pipe_write(fp, offset, uio, cred, flags)
1139 struct file *fp;
1140 off_t *offset;
1141 struct uio *uio;
1142 struct ucred *cred;
1143 int flags;
1144 #endif
1145 {
1146 int error = 0;
1147 struct pipe *wpipe, *rpipe;
1148
1149 rpipe = (struct pipe *) fp->f_data;
1150 wpipe = rpipe->pipe_peer;
1151
1152 /*
1153 * detect loss of pipe read side, issue SIGPIPE if lost.
1154 */
1155 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF))
1156 return (EPIPE);
1157
1158 ++wpipe->pipe_busy;
1159
1160 /*
1161 * If it is advantageous to resize the pipe buffer, do
1162 * so.
1163 */
1164 if ((uio->uio_resid > PIPE_SIZE) &&
1165 (nbigpipe < maxbigpipes) &&
1166 #ifndef PIPE_NODIRECT
1167 (wpipe->pipe_state & PIPE_DIRECTW) == 0 &&
1168 #endif
1169 (wpipe->pipe_buffer.size <= PIPE_SIZE) &&
1170 (wpipe->pipe_buffer.cnt == 0)) {
1171
1172 if ((error = pipelock(wpipe,1)) == 0) {
1173 if (pipespace(wpipe, BIG_PIPE_SIZE) == 0)
1174 nbigpipe++;
1175 pipeunlock(wpipe);
1176 } else {
1177 /*
1178 * If an error occurred, unbusy and return, waking up
1179 * any waiting readers.
1180 */
1181 --wpipe->pipe_busy;
1182 if (wpipe->pipe_busy == 0
1183 && (wpipe->pipe_state & PIPE_WANTCLOSE)) {
1184 wpipe->pipe_state &=
1185 ~(PIPE_WANTCLOSE | PIPE_WANTR);
1186 wakeup(wpipe);
1187 }
1188
1189 return (error);
1190 }
1191 }
1192
1193 #ifdef __FreeBSD__
1194 KASSERT(wpipe->pipe_buffer.buffer != NULL, ("pipe buffer gone"));
1195 #endif
1196
1197 while (uio->uio_resid) {
1198 int space;
1199
1200 #ifndef PIPE_NODIRECT
1201 /*
1202 * If the transfer is large, we can gain performance if
1203 * we do process-to-process copies directly.
1204 * If the write is non-blocking, we don't use the
1205 * direct write mechanism.
1206 *
1207 * The direct write mechanism will detect the reader going
1208 * away on us.
1209 */
1210 if ((uio->uio_iov->iov_len >= PIPE_MINDIRECT) &&
1211 (fp->f_flag & FNONBLOCK) == 0 &&
1212 (wpipe->pipe_map.kva || (amountpipekva < limitpipekva))) {
1213 error = pipe_direct_write(wpipe, uio);
1214
1215 /*
1216 * Break out if error occured, unless it's ENOMEM.
1217 * ENOMEM means we failed to allocate some resources
1218 * for direct write, so we just fallback to ordinary
1219 * write. If the direct write was successful,
1220 * process rest of data via ordinary write.
1221 */
1222 if (!error)
1223 continue;
1224
1225 if (error != ENOMEM)
1226 break;
1227 }
1228 #endif /* PIPE_NODIRECT */
1229
1230 /*
1231 * Pipe buffered writes cannot be coincidental with
1232 * direct writes. We wait until the currently executing
1233 * direct write is completed before we start filling the
1234 * pipe buffer. We break out if a signal occurs or the
1235 * reader goes away.
1236 */
1237 retrywrite:
1238 while (wpipe->pipe_state & PIPE_DIRECTW) {
1239 if (wpipe->pipe_state & PIPE_WANTR) {
1240 wpipe->pipe_state &= ~PIPE_WANTR;
1241 wakeup(wpipe);
1242 }
1243 error = tsleep(wpipe, PRIBIO | PCATCH, "pipbww", 0);
1244 if (wpipe->pipe_state & PIPE_EOF)
1245 break;
1246 if (error)
1247 break;
1248 }
1249 if (wpipe->pipe_state & PIPE_EOF) {
1250 error = EPIPE;
1251 break;
1252 }
1253
1254 space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
1255
1256 /* Writes of size <= PIPE_BUF must be atomic. */
1257 if ((space < uio->uio_resid) && (uio->uio_resid <= PIPE_BUF))
1258 space = 0;
1259
1260 if (space > 0) {
1261 int size; /* Transfer size */
1262 int segsize; /* first segment to transfer */
1263
1264 if ((error = pipelock(wpipe,1)) != 0)
1265 break;
1266
1267 /*
1268 * It is possible for a direct write to
1269 * slip in on us... handle it here...
1270 */
1271 if (wpipe->pipe_state & PIPE_DIRECTW) {
1272 pipeunlock(wpipe);
1273 goto retrywrite;
1274 }
1275 /*
1276 * If a process blocked in uiomove, our
1277 * value for space might be bad.
1278 *
1279 * XXX will we be ok if the reader has gone
1280 * away here?
1281 */
1282 if (space > wpipe->pipe_buffer.size -
1283 wpipe->pipe_buffer.cnt) {
1284 pipeunlock(wpipe);
1285 goto retrywrite;
1286 }
1287
1288 /*
1289 * Transfer size is minimum of uio transfer
1290 * and free space in pipe buffer.
1291 */
1292 if (space > uio->uio_resid)
1293 size = uio->uio_resid;
1294 else
1295 size = space;
1296 /*
1297 * First segment to transfer is minimum of
1298 * transfer size and contiguous space in
1299 * pipe buffer. If first segment to transfer
1300 * is less than the transfer size, we've got
1301 * a wraparound in the buffer.
1302 */
1303 segsize = wpipe->pipe_buffer.size -
1304 wpipe->pipe_buffer.in;
1305 if (segsize > size)
1306 segsize = size;
1307
1308 /* Transfer first segment */
1309
1310 error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in],
1311 segsize, uio);
1312
1313 if (error == 0 && segsize < size) {
1314 /*
1315 * Transfer remaining part now, to
1316 * support atomic writes. Wraparound
1317 * happened.
1318 */
1319 #ifdef DEBUG
1320 if (wpipe->pipe_buffer.in + segsize !=
1321 wpipe->pipe_buffer.size)
1322 panic("Expected pipe buffer wraparound disappeared");
1323 #endif
1324
1325 error = uiomove(&wpipe->pipe_buffer.buffer[0],
1326 size - segsize, uio);
1327 }
1328 if (error == 0) {
1329 wpipe->pipe_buffer.in += size;
1330 if (wpipe->pipe_buffer.in >=
1331 wpipe->pipe_buffer.size) {
1332 #ifdef DEBUG
1333 if (wpipe->pipe_buffer.in != size - segsize + wpipe->pipe_buffer.size)
1334 panic("Expected wraparound bad");
1335 #endif
1336 wpipe->pipe_buffer.in = size - segsize;
1337 }
1338
1339 wpipe->pipe_buffer.cnt += size;
1340 #ifdef DEBUG
1341 if (wpipe->pipe_buffer.cnt > wpipe->pipe_buffer.size)
1342 panic("Pipe buffer overflow");
1343 #endif
1344
1345 }
1346 pipeunlock(wpipe);
1347 if (error)
1348 break;
1349
1350 } else {
1351 /*
1352 * If the "read-side" has been blocked, wake it up now.
1353 */
1354 if (wpipe->pipe_state & PIPE_WANTR) {
1355 wpipe->pipe_state &= ~PIPE_WANTR;
1356 wakeup(wpipe);
1357 }
1358
1359 /*
1360 * don't block on non-blocking I/O
1361 */
1362 if (fp->f_flag & FNONBLOCK) {
1363 error = EAGAIN;
1364 break;
1365 }
1366
1367 /*
1368 * We have no more space and have something to offer,
1369 * wake up select/poll.
1370 */
1371 pipeselwakeup(wpipe, wpipe);
1372
1373 wpipe->pipe_state |= PIPE_WANTW;
1374 error = tsleep(wpipe, PRIBIO | PCATCH, "pipewr", 0);
1375 if (error != 0)
1376 break;
1377 /*
1378 * If read side wants to go away, we just issue a signal
1379 * to ourselves.
1380 */
1381 if (wpipe->pipe_state & PIPE_EOF) {
1382 error = EPIPE;
1383 break;
1384 }
1385 }
1386 }
1387
1388 --wpipe->pipe_busy;
1389 if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANTCLOSE)) {
1390 wpipe->pipe_state &= ~(PIPE_WANTCLOSE | PIPE_WANTR);
1391 wakeup(wpipe);
1392 } else if (wpipe->pipe_buffer.cnt > 0) {
1393 /*
1394 * If we have put any characters in the buffer, we wake up
1395 * the reader.
1396 */
1397 if (wpipe->pipe_state & PIPE_WANTR) {
1398 wpipe->pipe_state &= ~PIPE_WANTR;
1399 wakeup(wpipe);
1400 }
1401 }
1402
1403 /*
1404 * Don't return EPIPE if I/O was successful
1405 */
1406 if ((error == EPIPE) && (wpipe->pipe_buffer.cnt == 0)
1407 && (uio->uio_resid == 0))
1408 error = 0;
1409
1410 if (error == 0)
1411 vfs_timestamp(&wpipe->pipe_mtime);
1412
1413 /*
1414 * We have something to offer, wake up select/poll.
1415 * wpipe->pipe_map.cnt is always 0 in this point (direct write
1416 * is only done synchronously), so check only wpipe->pipe_buffer.cnt
1417 */
1418 if (wpipe->pipe_buffer.cnt)
1419 pipeselwakeup(wpipe, wpipe);
1420
1421 /*
1422 * Arrange for next read(2) to do a signal.
1423 */
1424 wpipe->pipe_state |= PIPE_SIGNALR;
1425
1426 return (error);
1427 }
1428
1429 /*
1430 * we implement a very minimal set of ioctls for compatibility with sockets.
1431 */
1432 int
1433 pipe_ioctl(fp, cmd, data, p)
1434 struct file *fp;
1435 u_long cmd;
1436 caddr_t data;
1437 struct proc *p;
1438 {
1439 struct pipe *mpipe = (struct pipe *)fp->f_data;
1440
1441 switch (cmd) {
1442
1443 case FIONBIO:
1444 return (0);
1445
1446 case FIOASYNC:
1447 if (*(int *)data) {
1448 mpipe->pipe_state |= PIPE_ASYNC;
1449 } else {
1450 mpipe->pipe_state &= ~PIPE_ASYNC;
1451 }
1452 return (0);
1453
1454 case FIONREAD:
1455 #ifndef PIPE_NODIRECT
1456 if (mpipe->pipe_state & PIPE_DIRECTW)
1457 *(int *)data = mpipe->pipe_map.cnt;
1458 else
1459 #endif
1460 *(int *)data = mpipe->pipe_buffer.cnt;
1461 return (0);
1462
1463 #ifdef __FreeBSD__
1464 case FIOSETOWN:
1465 return (fsetown(*(int *)data, &mpipe->pipe_sigio));
1466
1467 case FIOGETOWN:
1468 *(int *)data = fgetown(mpipe->pipe_sigio);
1469 return (0);
1470
1471 /* This is deprecated, FIOSETOWN should be used instead. */
1472 case TIOCSPGRP:
1473 return (fsetown(-(*(int *)data), &mpipe->pipe_sigio));
1474
1475 /* This is deprecated, FIOGETOWN should be used instead. */
1476 case TIOCGPGRP:
1477 *(int *)data = -fgetown(mpipe->pipe_sigio);
1478 return (0);
1479 #endif /* FreeBSD */
1480 #ifdef __NetBSD__
1481 case TIOCSPGRP:
1482 mpipe->pipe_pgid = *(int *)data;
1483 return (0);
1484
1485 case TIOCGPGRP:
1486 *(int *)data = mpipe->pipe_pgid;
1487 return (0);
1488 #endif /* NetBSD */
1489
1490 }
1491 return (ENOTTY);
1492 }
1493
1494 int
1495 pipe_poll(fp, events, p)
1496 struct file *fp;
1497 int events;
1498 struct proc *p;
1499 {
1500 struct pipe *rpipe = (struct pipe *)fp->f_data;
1501 struct pipe *wpipe;
1502 int revents = 0;
1503
1504 wpipe = rpipe->pipe_peer;
1505 if (events & (POLLIN | POLLRDNORM))
1506 if ((rpipe->pipe_buffer.cnt > 0) ||
1507 #ifndef PIPE_NODIRECT
1508 (rpipe->pipe_state & PIPE_DIRECTW) ||
1509 #endif
1510 (rpipe->pipe_state & PIPE_EOF))
1511 revents |= events & (POLLIN | POLLRDNORM);
1512
1513 if (events & (POLLOUT | POLLWRNORM))
1514 if (wpipe == NULL || (wpipe->pipe_state & PIPE_EOF)
1515 || (
1516 #ifndef PIPE_NODIRECT
1517 ((wpipe->pipe_state & PIPE_DIRECTW) == 0) &&
1518 #endif
1519 (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF))
1520 revents |= events & (POLLOUT | POLLWRNORM);
1521
1522 if ((rpipe->pipe_state & PIPE_EOF) ||
1523 (wpipe == NULL) ||
1524 (wpipe->pipe_state & PIPE_EOF))
1525 revents |= POLLHUP;
1526
1527 if (revents == 0) {
1528 if (events & (POLLIN | POLLRDNORM)) {
1529 selrecord(p, &rpipe->pipe_sel);
1530 rpipe->pipe_state |= PIPE_SEL;
1531 }
1532
1533 if (events & (POLLOUT | POLLWRNORM)) {
1534 selrecord(p, &wpipe->pipe_sel);
1535 wpipe->pipe_state |= PIPE_SEL;
1536 }
1537 }
1538
1539 return (revents);
1540 }
1541
1542 static int
1543 pipe_stat(fp, ub, p)
1544 struct file *fp;
1545 struct stat *ub;
1546 struct proc *p;
1547 {
1548 struct pipe *pipe = (struct pipe *)fp->f_data;
1549
1550 memset((caddr_t)ub, 0, sizeof(*ub));
1551 ub->st_mode = S_IFIFO;
1552 ub->st_blksize = pipe->pipe_buffer.size;
1553 ub->st_size = pipe->pipe_buffer.cnt;
1554 ub->st_blocks = (ub->st_size) ? 1 : 0;
1555 #ifdef __FreeBSD__
1556 ub->st_atimespec = pipe->pipe_atime;
1557 ub->st_mtimespec = pipe->pipe_mtime;
1558 ub->st_ctimespec = pipe->pipe_ctime;
1559 #endif /* FreeBSD */
1560 #ifdef __NetBSD__
1561 TIMEVAL_TO_TIMESPEC(&pipe->pipe_atime, &ub->st_atimespec)
1562 TIMEVAL_TO_TIMESPEC(&pipe->pipe_mtime, &ub->st_mtimespec);
1563 TIMEVAL_TO_TIMESPEC(&pipe->pipe_ctime, &ub->st_ctimespec);
1564 #endif /* NetBSD */
1565 ub->st_uid = fp->f_cred->cr_uid;
1566 ub->st_gid = fp->f_cred->cr_gid;
1567 /*
1568 * Left as 0: st_dev, st_ino, st_nlink, st_rdev, st_flags, st_gen.
1569 * XXX (st_dev, st_ino) should be unique.
1570 */
1571 return (0);
1572 }
1573
1574 /* ARGSUSED */
1575 static int
1576 pipe_close(fp, p)
1577 struct file *fp;
1578 struct proc *p;
1579 {
1580 struct pipe *cpipe = (struct pipe *)fp->f_data;
1581
1582 #ifdef __FreeBSD__
1583 fp->f_ops = &badfileops;
1584 funsetown(cpipe->pipe_sigio);
1585 #endif
1586 fp->f_data = NULL;
1587 pipeclose(cpipe);
1588 return (0);
1589 }
1590
1591 static void
1592 pipe_free_kmem(cpipe)
1593 struct pipe *cpipe;
1594 {
1595
1596 #ifdef __FreeBSD__
1597 mtx_assert(&vm_mtx, MA_OWNED);
1598 #endif
1599 if (cpipe->pipe_buffer.buffer != NULL) {
1600 if (cpipe->pipe_buffer.size > PIPE_SIZE)
1601 --nbigpipe;
1602 amountpipekva -= cpipe->pipe_buffer.size;
1603 #ifdef __FreeBSD__
1604 kmem_free(kernel_map,
1605 (vm_offset_t)cpipe->pipe_buffer.buffer,
1606 cpipe->pipe_buffer.size);
1607 #elif defined(__NetBSD__)
1608 uvm_km_free(kernel_map,
1609 (vaddr_t)cpipe->pipe_buffer.buffer,
1610 cpipe->pipe_buffer.size);
1611 #endif /* NetBSD */
1612
1613 cpipe->pipe_buffer.buffer = NULL;
1614 }
1615 #ifndef PIPE_NODIRECT
1616 if (cpipe->pipe_map.kva != NULL) {
1617 #ifdef __FreeBSD__
1618 amountpipekva -= cpipe->pipe_buffer.size + PAGE_SIZE;
1619 kmem_free(kernel_map,
1620 cpipe->pipe_map.kva,
1621 cpipe->pipe_buffer.size + PAGE_SIZE);
1622 #elif defined(__NetBSD__)
1623 pipe_loan_free(cpipe);
1624 #endif /* NetBSD */
1625 cpipe->pipe_map.cnt = 0;
1626 cpipe->pipe_map.kva = NULL;
1627 cpipe->pipe_map.pos = 0;
1628 cpipe->pipe_map.npages = 0;
1629 }
1630 #endif /* !PIPE_NODIRECT */
1631 }
1632
1633 /*
1634 * shutdown the pipe
1635 */
1636 static void
1637 pipeclose(cpipe)
1638 struct pipe *cpipe;
1639 {
1640 struct pipe *ppipe;
1641
1642 if (!cpipe)
1643 return;
1644
1645 pipeselwakeup(cpipe, cpipe);
1646
1647 /*
1648 * If the other side is blocked, wake it up saying that
1649 * we want to close it down.
1650 */
1651 while (cpipe->pipe_busy) {
1652 wakeup(cpipe);
1653 cpipe->pipe_state |= PIPE_WANTCLOSE | PIPE_EOF;
1654 tsleep(cpipe, PRIBIO, "pipecl", 0);
1655 }
1656
1657 /*
1658 * Disconnect from peer
1659 */
1660 if ((ppipe = cpipe->pipe_peer) != NULL) {
1661 pipeselwakeup(ppipe, ppipe);
1662
1663 ppipe->pipe_state |= PIPE_EOF;
1664 wakeup(ppipe);
1665 ppipe->pipe_peer = NULL;
1666 }
1667
1668 /*
1669 * free resources
1670 */
1671 #ifdef _FreeBSD__
1672 mtx_lock(&vm_mtx);
1673 pipe_free_kmem(cpipe);
1674 /* XXX: erm, doesn't zalloc already have its own locks and
1675 * not need the giant vm lock?
1676 */
1677 zfree(pipe_zone, cpipe);
1678 mtx_unlock(&vm_mtx);
1679 #endif /* FreeBSD */
1680
1681 #ifdef __NetBSD__
1682 pipe_free_kmem(cpipe);
1683 (void) lockmgr(&cpipe->pipe_lock, LK_DRAIN, NULL);
1684 pool_put(&pipe_pool, cpipe);
1685 #endif
1686 }
1687
1688 #ifdef __FreeBSD__
1689 /*ARGSUSED*/
1690 static int
1691 pipe_kqfilter(struct file *fp, struct knote *kn)
1692 {
1693 struct pipe *cpipe = (struct pipe *)kn->kn_fp->f_data;
1694
1695 switch (kn->kn_filter) {
1696 case EVFILT_READ:
1697 kn->kn_fop = &pipe_rfiltops;
1698 break;
1699 case EVFILT_WRITE:
1700 kn->kn_fop = &pipe_wfiltops;
1701 cpipe = cpipe->pipe_peer;
1702 break;
1703 default:
1704 return (1);
1705 }
1706 kn->kn_hook = (caddr_t)cpipe;
1707
1708 SLIST_INSERT_HEAD(&cpipe->pipe_sel.si_note, kn, kn_selnext);
1709 return (0);
1710 }
1711
1712 static void
1713 filt_pipedetach(struct knote *kn)
1714 {
1715 struct pipe *cpipe = (struct pipe *)kn->kn_fp->f_data;
1716
1717 SLIST_REMOVE(&cpipe->pipe_sel.si_note, kn, knote, kn_selnext);
1718 }
1719
1720 /*ARGSUSED*/
1721 static int
1722 filt_piperead(struct knote *kn, long hint)
1723 {
1724 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1725 struct pipe *wpipe = rpipe->pipe_peer;
1726
1727 kn->kn_data = rpipe->pipe_buffer.cnt;
1728 if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW))
1729 kn->kn_data = rpipe->pipe_map.cnt;
1730
1731 if ((rpipe->pipe_state & PIPE_EOF) ||
1732 (wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1733 kn->kn_flags |= EV_EOF;
1734 return (1);
1735 }
1736 return (kn->kn_data > 0);
1737 }
1738
1739 /*ARGSUSED*/
1740 static int
1741 filt_pipewrite(struct knote *kn, long hint)
1742 {
1743 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1744 struct pipe *wpipe = rpipe->pipe_peer;
1745
1746 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1747 kn->kn_data = 0;
1748 kn->kn_flags |= EV_EOF;
1749 return (1);
1750 }
1751 kn->kn_data = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
1752 if (wpipe->pipe_state & PIPE_DIRECTW)
1753 kn->kn_data = 0;
1754
1755 return (kn->kn_data >= PIPE_BUF);
1756 }
1757 #endif /* FreeBSD */
1758
1759 #ifdef __NetBSD__
1760 static int
1761 pipe_fcntl(fp, cmd, data, p)
1762 struct file *fp;
1763 u_int cmd;
1764 caddr_t data;
1765 struct proc *p;
1766 {
1767 if (cmd == F_SETFL)
1768 return (0);
1769 else
1770 return (EOPNOTSUPP);
1771 }
1772
1773 /*
1774 * Handle pipe sysctls.
1775 */
1776 int
1777 sysctl_dopipe(name, namelen, oldp, oldlenp, newp, newlen)
1778 int *name;
1779 u_int namelen;
1780 void *oldp;
1781 size_t *oldlenp;
1782 void *newp;
1783 size_t newlen;
1784 {
1785 /* All sysctl names at this level are terminal. */
1786 if (namelen != 1)
1787 return (ENOTDIR); /* overloaded */
1788
1789 switch (name[0]) {
1790 case KERN_PIPE_MAXKVASZ:
1791 return (sysctl_int(oldp, oldlenp, newp, newlen, &maxpipekva));
1792 case KERN_PIPE_LIMITKVA:
1793 return (sysctl_int(oldp, oldlenp, newp, newlen, &limitpipekva));
1794 case KERN_PIPE_MAXBIGPIPES:
1795 return (sysctl_int(oldp, oldlenp, newp, newlen, &maxbigpipes));
1796 case KERN_PIPE_NBIGPIPES:
1797 return (sysctl_rdint(oldp, oldlenp, newp, nbigpipe));
1798 case KERN_PIPE_KVASIZE:
1799 return (sysctl_rdint(oldp, oldlenp, newp, amountpipekva));
1800 default:
1801 return (EOPNOTSUPP);
1802 }
1803 /* NOTREACHED */
1804 }
1805
1806 /*
1807 * Initialize pipe structs.
1808 */
1809 void
1810 pipe_init(void)
1811 {
1812 pool_init(&pipe_pool, sizeof(struct pipe), 0, 0, 0, "pipepl",
1813 0, NULL, NULL, M_PIPE);
1814 }
1815
1816 #endif /* __NetBSD __ */
1817