sys_pipe.c revision 1.33 1 /* $NetBSD: sys_pipe.c,v 1.33 2003/01/18 10:06:34 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1996 John S. Dyson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Absolutely no warranty of function or purpose is made by the author
17 * John S. Dyson.
18 * 4. Modifications may be freely made to this file if the above conditions
19 * are met.
20 *
21 * $FreeBSD: src/sys/kern/sys_pipe.c,v 1.95 2002/03/09 22:06:31 alfred Exp $
22 */
23
24 /*
25 * This file contains a high-performance replacement for the socket-based
26 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support
27 * all features of sockets, but does do everything that pipes normally
28 * do.
29 *
30 * Adaption for NetBSD UVM, including uvm_loan() based direct write, was
31 * written by Jaromir Dolecek.
32 */
33
34 /*
35 * This code has two modes of operation, a small write mode and a large
36 * write mode. The small write mode acts like conventional pipes with
37 * a kernel buffer. If the buffer is less than PIPE_MINDIRECT, then the
38 * "normal" pipe buffering is done. If the buffer is between PIPE_MINDIRECT
39 * and PIPE_SIZE in size, it is fully mapped into the kernel (on FreeBSD,
40 * those pages are also wired), and the receiving process can copy it directly
41 * from the pages in the sending process.
42 *
43 * If the sending process receives a signal, it is possible that it will
44 * go away, and certainly its address space can change, because control
45 * is returned back to the user-mode side. In that case, the pipe code
46 * arranges to copy the buffer supplied by the user process on FreeBSD, to
47 * a pageable kernel buffer, and the receiving process will grab the data
48 * from the pageable kernel buffer. Since signals don't happen all that often,
49 * the copy operation is normally eliminated.
50 * For NetBSD, the pages are mapped read-only, COW for kernel by uvm_loan(),
51 * so no explicit handling need to be done, all is handled by standard VM
52 * facilities.
53 *
54 * The constant PIPE_MINDIRECT is chosen to make sure that buffering will
55 * happen for small transfers so that the system will not spend all of
56 * its time context switching. PIPE_SIZE is constrained by the
57 * amount of kernel virtual memory.
58 */
59
60 #include <sys/cdefs.h>
61 __KERNEL_RCSID(0, "$NetBSD: sys_pipe.c,v 1.33 2003/01/18 10:06:34 thorpej Exp $");
62
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/proc.h>
66 #include <sys/fcntl.h>
67 #include <sys/file.h>
68 #include <sys/filedesc.h>
69 #include <sys/filio.h>
70 #include <sys/kernel.h>
71 #include <sys/lock.h>
72 #include <sys/ttycom.h>
73 #include <sys/stat.h>
74 #include <sys/malloc.h>
75 #include <sys/poll.h>
76 #include <sys/signalvar.h>
77 #include <sys/vnode.h>
78 #include <sys/uio.h>
79 #include <sys/lock.h>
80 #ifdef __FreeBSD__
81 #include <sys/mutex.h>
82 #endif
83 #ifdef __NetBSD__
84 #include <sys/select.h>
85 #include <sys/mount.h>
86 #include <sys/sa.h>
87 #include <sys/syscallargs.h>
88 #include <uvm/uvm.h>
89 #include <sys/sysctl.h>
90 #include <sys/kernel.h>
91 #endif /* NetBSD, FreeBSD */
92
93 #include <sys/pipe.h>
94
95 #ifdef __NetBSD__
96 /*
97 * Avoid microtime(9), it's slow. We don't guard the read from time(9)
98 * with splclock(9) since we don't actually need to be THAT sure the access
99 * is atomic.
100 */
101 #define vfs_timestamp(tv) (*(tv) = time)
102
103 /* we call it sel_klist */
104 #define sel_note sel_klist
105
106 #endif
107
108 /*
109 * Use this define if you want to disable *fancy* VM things. Expect an
110 * approx 30% decrease in transfer rate. This could be useful for
111 * OpenBSD.
112 */
113 /* #define PIPE_NODIRECT */
114
115 /*
116 * interfaces to the outside world
117 */
118 #ifdef __FreeBSD__
119 static int pipe_read(struct file *fp, struct uio *uio,
120 struct ucred *cred, int flags, struct thread *td);
121 static int pipe_write(struct file *fp, struct uio *uio,
122 struct ucred *cred, int flags, struct thread *td);
123 static int pipe_close(struct file *fp, struct thread *td);
124 static int pipe_poll(struct file *fp, int events, struct ucred *cred,
125 struct thread *td);
126 static int pipe_kqfilter(struct file *fp, struct knote *kn);
127 static int pipe_stat(struct file *fp, struct stat *sb, struct thread *td);
128 static int pipe_ioctl(struct file *fp, u_long cmd, caddr_t data, struct thread *td);
129
130 static struct fileops pipeops = {
131 pipe_read, pipe_write, pipe_ioctl, pipe_poll, pipe_kqfilter,
132 pipe_stat, pipe_close
133 };
134
135 #define PIPE_GET_GIANT(pipe) \
136 do { \
137 PIPE_UNLOCK(wpipe); \
138 mtx_lock(&Giant); \
139 } while (/*CONSTCOND*/ 0)
140
141 #define PIPE_DROP_GIANT(pipe) \
142 do { \
143 mtx_unlock(&Giant); \
144 PIPE_LOCK(wpipe); \
145 } while (/*CONSTCOND*/ 0)
146
147 #endif /* FreeBSD */
148
149 #ifdef __NetBSD__
150 static int pipe_read(struct file *fp, off_t *offset, struct uio *uio,
151 struct ucred *cred, int flags);
152 static int pipe_write(struct file *fp, off_t *offset, struct uio *uio,
153 struct ucred *cred, int flags);
154 static int pipe_close(struct file *fp, struct proc *p);
155 static int pipe_poll(struct file *fp, int events, struct proc *p);
156 static int pipe_fcntl(struct file *fp, u_int com, caddr_t data,
157 struct proc *p);
158 static int pipe_kqfilter(struct file *fp, struct knote *kn);
159 static int pipe_stat(struct file *fp, struct stat *sb, struct proc *p);
160 static int pipe_ioctl(struct file *fp, u_long cmd, caddr_t data, struct proc *p);
161
162 static struct fileops pipeops =
163 { pipe_read, pipe_write, pipe_ioctl, pipe_fcntl, pipe_poll,
164 pipe_stat, pipe_close, pipe_kqfilter };
165
166 /* XXXSMP perhaps use spinlocks & KERNEL_PROC_(UN)LOCK() ? just clear now */
167 #define PIPE_GET_GIANT(pipe)
168 #define PIPE_DROP_GIANT(pipe)
169 #define GIANT_REQUIRED
170
171 #endif /* NetBSD */
172
173 /*
174 * Default pipe buffer size(s), this can be kind-of large now because pipe
175 * space is pageable. The pipe code will try to maintain locality of
176 * reference for performance reasons, so small amounts of outstanding I/O
177 * will not wipe the cache.
178 */
179 #define MINPIPESIZE (PIPE_SIZE/3)
180 #define MAXPIPESIZE (2*PIPE_SIZE/3)
181
182 /*
183 * Maximum amount of kva for pipes -- this is kind-of a soft limit, but
184 * is there so that on large systems, we don't exhaust it.
185 */
186 #define MAXPIPEKVA (8*1024*1024)
187 static int maxpipekva = MAXPIPEKVA;
188
189 /*
190 * Limit for direct transfers, we cannot, of course limit
191 * the amount of kva for pipes in general though.
192 */
193 #define LIMITPIPEKVA (16*1024*1024)
194 static int limitpipekva = LIMITPIPEKVA;
195
196 /*
197 * Limit the number of "big" pipes
198 */
199 #define LIMITBIGPIPES 32
200 static int maxbigpipes = LIMITBIGPIPES;
201 static int nbigpipe = 0;
202
203 /*
204 * Amount of KVA consumed by pipe buffers.
205 */
206 static int amountpipekva = 0;
207
208 static void pipeclose(struct pipe *cpipe);
209 static void pipe_free_kmem(struct pipe *cpipe);
210 static int pipe_create(struct pipe **cpipep, int allockva);
211 static __inline int pipelock(struct pipe *cpipe, int catch);
212 static __inline void pipeunlock(struct pipe *cpipe);
213 static __inline void pipeselwakeup(struct pipe *cpipe, struct pipe *sigp);
214 #ifndef PIPE_NODIRECT
215 static int pipe_direct_write(struct pipe *wpipe, struct uio *uio);
216 #endif
217 static int pipespace(struct pipe *cpipe, int size);
218
219 #ifdef __NetBSD__
220 #ifndef PIPE_NODIRECT
221 static int pipe_loan_alloc(struct pipe *, int);
222 static void pipe_loan_free(struct pipe *);
223 #endif /* PIPE_NODIRECT */
224
225 static struct pool pipe_pool;
226 #endif /* NetBSD */
227
228 #ifdef __FreeBSD__
229 static vm_zone_t pipe_zone;
230
231 static void pipeinit(void *dummy __unused);
232 #ifndef PIPE_NODIRECT
233 static int pipe_build_write_buffer(struct pipe *wpipe, struct uio *uio);
234 static void pipe_destroy_write_buffer(struct pipe *wpipe);
235 static void pipe_clone_write_buffer(struct pipe *wpipe);
236 #endif
237
238 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_ANY, pipeinit, NULL);
239
240 static void
241 pipeinit(void *dummy __unused)
242 {
243
244 pipe_zone = zinit("PIPE", sizeof(struct pipe), 0, 0, 4);
245 }
246 #endif /* FreeBSD */
247
248 /*
249 * The pipe system call for the DTYPE_PIPE type of pipes
250 */
251
252 /* ARGSUSED */
253 #ifdef __FreeBSD__
254 int
255 pipe(td, uap)
256 struct thread *td;
257 struct pipe_args /* {
258 int dummy;
259 } */ *uap;
260 #elif defined(__NetBSD__)
261 int
262 sys_pipe(l, v, retval)
263 struct lwp *l;
264 void *v;
265 register_t *retval;
266 #endif
267 {
268 struct file *rf, *wf;
269 struct pipe *rpipe, *wpipe;
270 int fd, error;
271 struct proc *p;
272 #ifdef __FreeBSD__
273 struct mtx *pmtx;
274
275 KASSERT(pipe_zone != NULL, ("pipe_zone not initialized"));
276
277 pmtx = malloc(sizeof(*pmtx), M_TEMP, M_WAITOK | M_ZERO);
278
279 rpipe = wpipe = NULL;
280 if (pipe_create(&rpipe, 1) || pipe_create(&wpipe, 1)) {
281 pipeclose(rpipe);
282 pipeclose(wpipe);
283 free(pmtx, M_TEMP);
284 return (ENFILE);
285 }
286
287 error = falloc(td, &rf, &fd);
288 if (error) {
289 pipeclose(rpipe);
290 pipeclose(wpipe);
291 free(pmtx, M_TEMP);
292 return (error);
293 }
294 fhold(rf);
295 td->td_retval[0] = fd;
296
297 /*
298 * Warning: once we've gotten past allocation of the fd for the
299 * read-side, we can only drop the read side via fdrop() in order
300 * to avoid races against processes which manage to dup() the read
301 * side while we are blocked trying to allocate the write side.
302 */
303 FILE_LOCK(rf);
304 rf->f_flag = FREAD | FWRITE;
305 rf->f_type = DTYPE_PIPE;
306 rf->f_data = (caddr_t)rpipe;
307 rf->f_ops = &pipeops;
308 FILE_UNLOCK(rf);
309 error = falloc(td, &wf, &fd);
310 if (error) {
311 struct filedesc *fdp = td->td_proc->p_fd;
312 FILEDESC_LOCK(fdp);
313 if (fdp->fd_ofiles[td->td_retval[0]] == rf) {
314 fdp->fd_ofiles[td->td_retval[0]] = NULL;
315 FILEDESC_UNLOCK(fdp);
316 fdrop(rf, td);
317 } else
318 FILEDESC_UNLOCK(fdp);
319 fdrop(rf, td);
320 /* rpipe has been closed by fdrop(). */
321 pipeclose(wpipe);
322 free(pmtx, M_TEMP);
323 return (error);
324 }
325 FILE_LOCK(wf);
326 wf->f_flag = FREAD | FWRITE;
327 wf->f_type = DTYPE_PIPE;
328 wf->f_data = (caddr_t)wpipe;
329 wf->f_ops = &pipeops;
330 p->p_retval[1] = fd;
331 rpipe->pipe_peer = wpipe;
332 wpipe->pipe_peer = rpipe;
333 mtx_init(pmtx, "pipe mutex", MTX_DEF);
334 rpipe->pipe_mtxp = wpipe->pipe_mtxp = pmtx;
335 fdrop(rf, td);
336 #endif /* FreeBSD */
337
338 #ifdef __NetBSD__
339 p = l->l_proc;
340 rpipe = wpipe = NULL;
341 if (pipe_create(&rpipe, 1) || pipe_create(&wpipe, 0)) {
342 pipeclose(rpipe);
343 pipeclose(wpipe);
344 return (ENFILE);
345 }
346
347 /*
348 * Note: the file structure returned from falloc() is marked
349 * as 'larval' initially. Unless we mark it as 'mature' by
350 * FILE_SET_MATURE(), any attempt to do anything with it would
351 * return EBADF, including e.g. dup(2) or close(2). This avoids
352 * file descriptor races if we block in the second falloc().
353 */
354
355 error = falloc(p, &rf, &fd);
356 if (error)
357 goto free2;
358 retval[0] = fd;
359 rf->f_flag = FREAD;
360 rf->f_type = DTYPE_PIPE;
361 rf->f_data = (caddr_t)rpipe;
362 rf->f_ops = &pipeops;
363
364 error = falloc(p, &wf, &fd);
365 if (error)
366 goto free3;
367 retval[1] = fd;
368 wf->f_flag = FWRITE;
369 wf->f_type = DTYPE_PIPE;
370 wf->f_data = (caddr_t)wpipe;
371 wf->f_ops = &pipeops;
372
373 rpipe->pipe_peer = wpipe;
374 wpipe->pipe_peer = rpipe;
375
376 FILE_SET_MATURE(rf);
377 FILE_SET_MATURE(wf);
378 FILE_UNUSE(rf, p);
379 FILE_UNUSE(wf, p);
380 return (0);
381 free3:
382 FILE_UNUSE(rf, p);
383 ffree(rf);
384 fdremove(p->p_fd, retval[0]);
385 free2:
386 pipeclose(wpipe);
387 pipeclose(rpipe);
388 #endif /* NetBSD */
389
390 return (error);
391 }
392
393 /*
394 * Allocate kva for pipe circular buffer, the space is pageable
395 * This routine will 'realloc' the size of a pipe safely, if it fails
396 * it will retain the old buffer.
397 * If it fails it will return ENOMEM.
398 */
399 static int
400 pipespace(cpipe, size)
401 struct pipe *cpipe;
402 int size;
403 {
404 caddr_t buffer;
405 #ifdef __FreeBSD__
406 struct vm_object *object;
407 int npages, error;
408
409 GIANT_REQUIRED;
410 KASSERT(cpipe->pipe_mtxp == NULL || !mtx_owned(PIPE_MTX(cpipe)),
411 ("pipespace: pipe mutex locked"));
412
413 npages = round_page(size)/PAGE_SIZE;
414 /*
415 * Create an object, I don't like the idea of paging to/from
416 * kernel_object.
417 */
418 object = vm_object_allocate(OBJT_DEFAULT, npages);
419 buffer = (caddr_t) vm_map_min(kernel_map);
420
421 /*
422 * Insert the object into the kernel map, and allocate kva for it.
423 * The map entry is, by default, pageable.
424 */
425 error = vm_map_find(kernel_map, object, 0,
426 (vm_offset_t *) &buffer, size, 1,
427 VM_PROT_ALL, VM_PROT_ALL, 0);
428
429 if (error != KERN_SUCCESS) {
430 vm_object_deallocate(object);
431 return (ENOMEM);
432 }
433 #endif /* FreeBSD */
434
435 #ifdef __NetBSD__
436 /*
437 * Allocate pageable virtual address space. Physical memory is allocated
438 * on demand.
439 */
440 buffer = (caddr_t) uvm_km_valloc(kernel_map, round_page(size));
441 if (buffer == NULL)
442 return (ENOMEM);
443 #endif /* NetBSD */
444
445 /* free old resources if we're resizing */
446 pipe_free_kmem(cpipe);
447 #ifdef __FreeBSD__
448 cpipe->pipe_buffer.object = object;
449 #endif
450 cpipe->pipe_buffer.buffer = buffer;
451 cpipe->pipe_buffer.size = size;
452 cpipe->pipe_buffer.in = 0;
453 cpipe->pipe_buffer.out = 0;
454 cpipe->pipe_buffer.cnt = 0;
455 amountpipekva += cpipe->pipe_buffer.size;
456 return (0);
457 }
458
459 /*
460 * initialize and allocate VM and memory for pipe
461 */
462 static int
463 pipe_create(cpipep, allockva)
464 struct pipe **cpipep;
465 int allockva;
466 {
467 struct pipe *cpipe;
468 int error;
469
470 #ifdef __FreeBSD__
471 *cpipep = zalloc(pipe_zone);
472 #endif
473 #ifdef __NetBSD__
474 *cpipep = pool_get(&pipe_pool, M_WAITOK);
475 #endif
476 if (*cpipep == NULL)
477 return (ENOMEM);
478
479 cpipe = *cpipep;
480
481 /* Initialize */
482 memset(cpipe, 0, sizeof(*cpipe));
483 cpipe->pipe_state = PIPE_SIGNALR;
484
485 #ifdef __FreeBSD__
486 cpipe->pipe_mtxp = NULL; /* avoid pipespace assertion */
487 #endif
488 if (allockva && (error = pipespace(cpipe, PIPE_SIZE)))
489 return (error);
490
491 vfs_timestamp(&cpipe->pipe_ctime);
492 cpipe->pipe_atime = cpipe->pipe_ctime;
493 cpipe->pipe_mtime = cpipe->pipe_ctime;
494 #ifdef __NetBSD__
495 cpipe->pipe_pgid = NO_PID;
496 lockinit(&cpipe->pipe_lock, PRIBIO | PCATCH, "pipelk", 0, 0);
497 #endif
498
499 return (0);
500 }
501
502
503 /*
504 * lock a pipe for I/O, blocking other access
505 */
506 static __inline int
507 pipelock(cpipe, catch)
508 struct pipe *cpipe;
509 int catch;
510 {
511 int error;
512
513 #ifdef __FreeBSD__
514 PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
515 while (cpipe->pipe_state & PIPE_LOCKFL) {
516 cpipe->pipe_state |= PIPE_LWANT;
517 error = msleep(cpipe, PIPE_MTX(cpipe),
518 catch ? (PRIBIO | PCATCH) : PRIBIO,
519 "pipelk", 0);
520 if (error != 0)
521 return (error);
522 }
523 cpipe->pipe_state |= PIPE_LOCKFL;
524 return (0);
525 #endif
526
527 #ifdef __NetBSD__
528 do {
529 error = lockmgr(&cpipe->pipe_lock, LK_EXCLUSIVE, NULL);
530 } while (!catch && (error == EINTR || error == ERESTART));
531 return (error);
532 #endif
533 }
534
535 /*
536 * unlock a pipe I/O lock
537 */
538 static __inline void
539 pipeunlock(cpipe)
540 struct pipe *cpipe;
541 {
542
543 #ifdef __FreeBSD__
544 PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
545 cpipe->pipe_state &= ~PIPE_LOCKFL;
546 if (cpipe->pipe_state & PIPE_LWANT) {
547 cpipe->pipe_state &= ~PIPE_LWANT;
548 wakeup(cpipe);
549 }
550 #endif
551
552 #ifdef __NetBSD__
553 lockmgr(&cpipe->pipe_lock, LK_RELEASE, NULL);
554 #endif
555 }
556
557 /*
558 * Select/poll wakup. This also sends SIGIO to peer connected to
559 * 'sigpipe' side of pipe.
560 */
561 static __inline void
562 pipeselwakeup(selp, sigp)
563 struct pipe *selp, *sigp;
564 {
565
566 #ifdef __FreeBSD__
567 if (selp->pipe_state & PIPE_SEL) {
568 selp->pipe_state &= ~PIPE_SEL;
569 selwakeup(&selp->pipe_sel);
570 }
571 if (sigp && (sigp->pipe_state & PIPE_ASYNC) && sigp->pipe_sigio)
572 pgsigio(sigp->pipe_sigio, SIGIO, 0);
573 KNOTE(&selp->pipe_sel.sel_note, 0);
574 #endif
575
576 #ifdef __NetBSD__
577 selnotify(&selp->pipe_sel, 0);
578 if (sigp && (sigp->pipe_state & PIPE_ASYNC) &&
579 sigp->pipe_pgid != NO_PID) {
580 struct proc *p;
581
582 if (sigp->pipe_pgid < 0)
583 gsignal(-sigp->pipe_pgid, SIGIO);
584 else if (sigp->pipe_pgid > 0 && (p = pfind(sigp->pipe_pgid)) != 0)
585 psignal(p, SIGIO);
586 }
587 #endif /* NetBSD */
588 }
589
590 /* ARGSUSED */
591 #ifdef __FreeBSD__
592 static int
593 pipe_read(fp, uio, cred, flags, td)
594 struct file *fp;
595 struct uio *uio;
596 struct ucred *cred;
597 struct thread *td;
598 int flags;
599 struct proc *p;
600 #elif defined(__NetBSD__)
601 static int
602 pipe_read(fp, offset, uio, cred, flags)
603 struct file *fp;
604 off_t *offset;
605 struct uio *uio;
606 struct ucred *cred;
607 int flags;
608 #endif
609 {
610 struct pipe *rpipe = (struct pipe *) fp->f_data;
611 int error;
612 size_t nread = 0;
613 size_t size;
614 size_t ocnt;
615
616 PIPE_LOCK(rpipe);
617 ++rpipe->pipe_busy;
618 ocnt = rpipe->pipe_buffer.cnt;
619
620 error = pipelock(rpipe, 1);
621 if (error)
622 goto unlocked_error;
623
624 while (uio->uio_resid) {
625 /*
626 * normal pipe buffer receive
627 */
628 if (rpipe->pipe_buffer.cnt > 0) {
629 size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out;
630 if (size > rpipe->pipe_buffer.cnt)
631 size = rpipe->pipe_buffer.cnt;
632 if (size > uio->uio_resid)
633 size = uio->uio_resid;
634
635 PIPE_UNLOCK(rpipe);
636 error = uiomove(&rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out],
637 size, uio);
638 PIPE_LOCK(rpipe);
639 if (error)
640 break;
641
642 rpipe->pipe_buffer.out += size;
643 if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size)
644 rpipe->pipe_buffer.out = 0;
645
646 rpipe->pipe_buffer.cnt -= size;
647
648 /*
649 * If there is no more to read in the pipe, reset
650 * its pointers to the beginning. This improves
651 * cache hit stats.
652 */
653 if (rpipe->pipe_buffer.cnt == 0) {
654 rpipe->pipe_buffer.in = 0;
655 rpipe->pipe_buffer.out = 0;
656 }
657 nread += size;
658 #ifndef PIPE_NODIRECT
659 /*
660 * Direct copy, bypassing a kernel buffer.
661 */
662 } else if ((size = rpipe->pipe_map.cnt) &&
663 (rpipe->pipe_state & PIPE_DIRECTW)) {
664 caddr_t va;
665 if (size > uio->uio_resid)
666 size = uio->uio_resid;
667
668 va = (caddr_t) rpipe->pipe_map.kva +
669 rpipe->pipe_map.pos;
670 PIPE_UNLOCK(rpipe);
671 error = uiomove(va, size, uio);
672 PIPE_LOCK(rpipe);
673 if (error)
674 break;
675 nread += size;
676 rpipe->pipe_map.pos += size;
677 rpipe->pipe_map.cnt -= size;
678 if (rpipe->pipe_map.cnt == 0) {
679 rpipe->pipe_state &= ~PIPE_DIRECTW;
680 wakeup(rpipe);
681 }
682 #endif
683 } else {
684 /*
685 * detect EOF condition
686 * read returns 0 on EOF, no need to set error
687 */
688 if (rpipe->pipe_state & PIPE_EOF)
689 break;
690
691 /*
692 * If the "write-side" has been blocked, wake it up now.
693 */
694 if (rpipe->pipe_state & PIPE_WANTW) {
695 rpipe->pipe_state &= ~PIPE_WANTW;
696 wakeup(rpipe);
697 }
698
699 /*
700 * Break if some data was read.
701 */
702 if (nread > 0)
703 break;
704
705 /*
706 * don't block on non-blocking I/O
707 */
708 if (fp->f_flag & FNONBLOCK) {
709 error = EAGAIN;
710 break;
711 }
712
713 /*
714 * Unlock the pipe buffer for our remaining processing.
715 * We will either break out with an error or we will
716 * sleep and relock to loop.
717 */
718 pipeunlock(rpipe);
719
720 /*
721 * We want to read more, wake up select/poll.
722 */
723 pipeselwakeup(rpipe, rpipe->pipe_peer);
724
725 rpipe->pipe_state |= PIPE_WANTR;
726 #ifdef __FreeBSD__
727 error = msleep(rpipe, PIPE_MTX(rpipe), PRIBIO | PCATCH,
728 "piperd", 0);
729 #else
730 error = tsleep(rpipe, PRIBIO | PCATCH, "piperd", 0);
731 #endif
732 if (error != 0 || (error = pipelock(rpipe, 1)))
733 goto unlocked_error;
734 }
735 }
736 pipeunlock(rpipe);
737
738 /* XXX: should probably do this before getting any locks. */
739 if (error == 0)
740 vfs_timestamp(&rpipe->pipe_atime);
741 unlocked_error:
742 --rpipe->pipe_busy;
743
744 /*
745 * PIPE_WANTCLOSE processing only makes sense if pipe_busy is 0.
746 */
747 if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANTCLOSE)) {
748 rpipe->pipe_state &= ~(PIPE_WANTCLOSE|PIPE_WANTW);
749 wakeup(rpipe);
750 } else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) {
751 /*
752 * Handle write blocking hysteresis.
753 */
754 if (rpipe->pipe_state & PIPE_WANTW) {
755 rpipe->pipe_state &= ~PIPE_WANTW;
756 wakeup(rpipe);
757 }
758 }
759
760 /*
761 * If anything was read off the buffer, signal to the writer it's
762 * possible to write more data. Also send signal if we are here for the
763 * first time after last write.
764 */
765 if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF
766 && (ocnt != rpipe->pipe_buffer.cnt || (rpipe->pipe_state & PIPE_SIGNALR))) {
767 pipeselwakeup(rpipe, rpipe->pipe_peer);
768 rpipe->pipe_state &= ~PIPE_SIGNALR;
769 }
770
771 PIPE_UNLOCK(rpipe);
772 return (error);
773 }
774
775 #ifdef __FreeBSD__
776 #ifndef PIPE_NODIRECT
777 /*
778 * Map the sending processes' buffer into kernel space and wire it.
779 * This is similar to a physical write operation.
780 */
781 static int
782 pipe_build_write_buffer(wpipe, uio)
783 struct pipe *wpipe;
784 struct uio *uio;
785 {
786 size_t size;
787 int i;
788 vm_offset_t addr, endaddr, paddr;
789
790 GIANT_REQUIRED;
791 PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED);
792
793 size = uio->uio_iov->iov_len;
794 if (size > wpipe->pipe_buffer.size)
795 size = wpipe->pipe_buffer.size;
796
797 endaddr = round_page((vm_offset_t)uio->uio_iov->iov_base + size);
798 addr = trunc_page((vm_offset_t)uio->uio_iov->iov_base);
799 for (i = 0; addr < endaddr; addr += PAGE_SIZE, i++) {
800 vm_page_t m;
801
802 if (vm_fault_quick((caddr_t)addr, VM_PROT_READ) < 0 ||
803 (paddr = pmap_kextract(addr)) == 0) {
804 int j;
805
806 for (j = 0; j < i; j++)
807 vm_page_unwire(wpipe->pipe_map.ms[j], 1);
808 return (EFAULT);
809 }
810
811 m = PHYS_TO_VM_PAGE(paddr);
812 vm_page_wire(m);
813 wpipe->pipe_map.ms[i] = m;
814 }
815
816 /*
817 * set up the control block
818 */
819 wpipe->pipe_map.npages = i;
820 wpipe->pipe_map.pos =
821 ((vm_offset_t) uio->uio_iov->iov_base) & PAGE_MASK;
822 wpipe->pipe_map.cnt = size;
823
824 /*
825 * and map the buffer
826 */
827 if (wpipe->pipe_map.kva == 0) {
828 /*
829 * We need to allocate space for an extra page because the
830 * address range might (will) span pages at times.
831 */
832 wpipe->pipe_map.kva = kmem_alloc_pageable(kernel_map,
833 wpipe->pipe_buffer.size + PAGE_SIZE);
834 amountpipekva += wpipe->pipe_buffer.size + PAGE_SIZE;
835 }
836 pmap_qenter(wpipe->pipe_map.kva, wpipe->pipe_map.ms,
837 wpipe->pipe_map.npages);
838
839 /*
840 * and update the uio data
841 */
842
843 uio->uio_iov->iov_len -= size;
844 uio->uio_iov->iov_base += size;
845 if (uio->uio_iov->iov_len == 0)
846 uio->uio_iov++;
847 uio->uio_resid -= size;
848 uio->uio_offset += size;
849 return (0);
850 }
851
852 /*
853 * unmap and unwire the process buffer
854 */
855 static void
856 pipe_destroy_write_buffer(wpipe)
857 struct pipe *wpipe;
858 {
859 int i;
860
861 GIANT_REQUIRED;
862 PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED);
863
864 if (wpipe->pipe_map.kva) {
865 pmap_qremove(wpipe->pipe_map.kva, wpipe->pipe_map.npages);
866
867 if (amountpipekva > maxpipekva) {
868 vm_offset_t kva = wpipe->pipe_map.kva;
869 wpipe->pipe_map.kva = 0;
870 kmem_free(kernel_map, kva,
871 wpipe->pipe_buffer.size + PAGE_SIZE);
872 amountpipekva -= wpipe->pipe_buffer.size + PAGE_SIZE;
873 }
874 }
875 for (i = 0; i < wpipe->pipe_map.npages; i++)
876 vm_page_unwire(wpipe->pipe_map.ms[i], 1);
877 wpipe->pipe_map.npages = 0;
878 }
879
880 /*
881 * In the case of a signal, the writing process might go away. This
882 * code copies the data into the circular buffer so that the source
883 * pages can be freed without loss of data.
884 */
885 static void
886 pipe_clone_write_buffer(wpipe)
887 struct pipe *wpipe;
888 {
889 int size;
890 int pos;
891
892 PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
893 size = wpipe->pipe_map.cnt;
894 pos = wpipe->pipe_map.pos;
895 memcpy((caddr_t) wpipe->pipe_buffer.buffer,
896 (caddr_t) wpipe->pipe_map.kva + pos, size);
897
898 wpipe->pipe_buffer.in = size;
899 wpipe->pipe_buffer.out = 0;
900 wpipe->pipe_buffer.cnt = size;
901 wpipe->pipe_state &= ~PIPE_DIRECTW;
902
903 PIPE_GET_GIANT(wpipe);
904 pipe_destroy_write_buffer(wpipe);
905 PIPE_DROP_GIANT(wpipe);
906 }
907
908 /*
909 * This implements the pipe buffer write mechanism. Note that only
910 * a direct write OR a normal pipe write can be pending at any given time.
911 * If there are any characters in the pipe buffer, the direct write will
912 * be deferred until the receiving process grabs all of the bytes from
913 * the pipe buffer. Then the direct mapping write is set-up.
914 */
915 static int
916 pipe_direct_write(wpipe, uio)
917 struct pipe *wpipe;
918 struct uio *uio;
919 {
920 int error;
921
922 retry:
923 PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
924 while (wpipe->pipe_state & PIPE_DIRECTW) {
925 if (wpipe->pipe_state & PIPE_WANTR) {
926 wpipe->pipe_state &= ~PIPE_WANTR;
927 wakeup(wpipe);
928 }
929 wpipe->pipe_state |= PIPE_WANTW;
930 error = msleep(wpipe, PIPE_MTX(wpipe),
931 PRIBIO | PCATCH, "pipdww", 0);
932 if (error)
933 goto error1;
934 if (wpipe->pipe_state & PIPE_EOF) {
935 error = EPIPE;
936 goto error1;
937 }
938 }
939 wpipe->pipe_map.cnt = 0; /* transfer not ready yet */
940 if (wpipe->pipe_buffer.cnt > 0) {
941 if (wpipe->pipe_state & PIPE_WANTR) {
942 wpipe->pipe_state &= ~PIPE_WANTR;
943 wakeup(wpipe);
944 }
945
946 wpipe->pipe_state |= PIPE_WANTW;
947 error = msleep(wpipe, PIPE_MTX(wpipe),
948 PRIBIO | PCATCH, "pipdwc", 0);
949 if (error)
950 goto error1;
951 if (wpipe->pipe_state & PIPE_EOF) {
952 error = EPIPE;
953 goto error1;
954 }
955 goto retry;
956 }
957
958 wpipe->pipe_state |= PIPE_DIRECTW;
959
960 PIPE_GET_GIANT(wpipe);
961 error = pipe_build_write_buffer(wpipe, uio);
962 PIPE_DROP_GIANT(wpipe);
963 if (error) {
964 wpipe->pipe_state &= ~PIPE_DIRECTW;
965 goto error1;
966 }
967
968 error = 0;
969 while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) {
970 if (wpipe->pipe_state & PIPE_EOF) {
971 pipelock(wpipe, 0);
972 PIPE_GET_GIANT(wpipe);
973 pipe_destroy_write_buffer(wpipe);
974 PIPE_DROP_GIANT(wpipe);
975 pipeunlock(wpipe);
976 pipeselwakeup(wpipe, wpipe);
977 error = EPIPE;
978 goto error1;
979 }
980 if (wpipe->pipe_state & PIPE_WANTR) {
981 wpipe->pipe_state &= ~PIPE_WANTR;
982 wakeup(wpipe);
983 }
984 pipeselwakeup(wpipe, wpipe);
985 error = msleep(wpipe, PIPE_MTX(wpipe), PRIBIO | PCATCH,
986 "pipdwt", 0);
987 }
988
989 pipelock(wpipe,0);
990 if (wpipe->pipe_state & PIPE_DIRECTW) {
991 /*
992 * this bit of trickery substitutes a kernel buffer for
993 * the process that might be going away.
994 */
995 pipe_clone_write_buffer(wpipe);
996 } else {
997 PIPE_GET_GIANT(wpipe);
998 pipe_destroy_write_buffer(wpipe);
999 PIPE_DROP_GIANT(wpipe);
1000 }
1001 pipeunlock(wpipe);
1002 return (error);
1003
1004 error1:
1005 wakeup(wpipe);
1006 return (error);
1007 }
1008 #endif /* !PIPE_NODIRECT */
1009 #endif /* FreeBSD */
1010
1011 #ifdef __NetBSD__
1012 #ifndef PIPE_NODIRECT
1013 /*
1014 * Allocate structure for loan transfer.
1015 */
1016 static int
1017 pipe_loan_alloc(wpipe, npages)
1018 struct pipe *wpipe;
1019 int npages;
1020 {
1021 vsize_t len;
1022
1023 len = (vsize_t)npages << PAGE_SHIFT;
1024 wpipe->pipe_map.kva = uvm_km_valloc_wait(kernel_map, len);
1025 if (wpipe->pipe_map.kva == 0)
1026 return (ENOMEM);
1027
1028 amountpipekva += len;
1029 wpipe->pipe_map.npages = npages;
1030 wpipe->pipe_map.pgs = malloc(npages * sizeof(struct vm_page *), M_PIPE,
1031 M_WAITOK);
1032 return (0);
1033 }
1034
1035 /*
1036 * Free resources allocated for loan transfer.
1037 */
1038 static void
1039 pipe_loan_free(wpipe)
1040 struct pipe *wpipe;
1041 {
1042 vsize_t len;
1043
1044 len = (vsize_t)wpipe->pipe_map.npages << PAGE_SHIFT;
1045 uvm_km_free(kernel_map, wpipe->pipe_map.kva, len);
1046 wpipe->pipe_map.kva = 0;
1047 amountpipekva -= len;
1048 free(wpipe->pipe_map.pgs, M_PIPE);
1049 wpipe->pipe_map.pgs = NULL;
1050 }
1051
1052 /*
1053 * NetBSD direct write, using uvm_loan() mechanism.
1054 * This implements the pipe buffer write mechanism. Note that only
1055 * a direct write OR a normal pipe write can be pending at any given time.
1056 * If there are any characters in the pipe buffer, the direct write will
1057 * be deferred until the receiving process grabs all of the bytes from
1058 * the pipe buffer. Then the direct mapping write is set-up.
1059 */
1060 static int
1061 pipe_direct_write(wpipe, uio)
1062 struct pipe *wpipe;
1063 struct uio *uio;
1064 {
1065 int error, npages, j;
1066 struct vm_page **pgs;
1067 vaddr_t bbase, kva, base, bend;
1068 vsize_t blen, bcnt;
1069 voff_t bpos;
1070
1071 retry:
1072 while (wpipe->pipe_state & PIPE_DIRECTW) {
1073 if (wpipe->pipe_state & PIPE_WANTR) {
1074 wpipe->pipe_state &= ~PIPE_WANTR;
1075 wakeup(wpipe);
1076 }
1077 wpipe->pipe_state |= PIPE_WANTW;
1078 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdww", 0);
1079 if (error)
1080 goto error;
1081 if (wpipe->pipe_state & PIPE_EOF) {
1082 error = EPIPE;
1083 goto error;
1084 }
1085 }
1086 wpipe->pipe_map.cnt = 0; /* transfer not ready yet */
1087 if (wpipe->pipe_buffer.cnt > 0) {
1088 if (wpipe->pipe_state & PIPE_WANTR) {
1089 wpipe->pipe_state &= ~PIPE_WANTR;
1090 wakeup(wpipe);
1091 }
1092
1093 wpipe->pipe_state |= PIPE_WANTW;
1094 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdwc", 0);
1095 if (error)
1096 goto error;
1097 if (wpipe->pipe_state & PIPE_EOF) {
1098 error = EPIPE;
1099 goto error;
1100 }
1101 goto retry;
1102 }
1103
1104 /*
1105 * Handle first PIPE_CHUNK_SIZE bytes of buffer. Deal with buffers
1106 * not aligned to PAGE_SIZE.
1107 */
1108 bbase = (vaddr_t)uio->uio_iov->iov_base;
1109 base = trunc_page(bbase);
1110 bend = round_page(bbase + uio->uio_iov->iov_len);
1111 blen = bend - base;
1112 bpos = bbase - base;
1113
1114 if (blen > PIPE_DIRECT_CHUNK) {
1115 blen = PIPE_DIRECT_CHUNK;
1116 bend = base + blen;
1117 bcnt = PIPE_DIRECT_CHUNK - bpos;
1118 } else {
1119 bcnt = uio->uio_iov->iov_len;
1120 }
1121 npages = blen >> PAGE_SHIFT;
1122
1123 wpipe->pipe_map.pos = bpos;
1124 wpipe->pipe_map.cnt = bcnt;
1125
1126 /*
1127 * Free the old kva if we need more pages than we have
1128 * allocated.
1129 */
1130 if (wpipe->pipe_map.kva && npages > wpipe->pipe_map.npages)
1131 pipe_loan_free(wpipe);
1132
1133 /* Allocate new kva. */
1134 if (wpipe->pipe_map.kva == 0) {
1135 error = pipe_loan_alloc(wpipe, npages);
1136 if (error) {
1137 goto error;
1138 }
1139 }
1140
1141 /* Loan the write buffer memory from writer process */
1142 pgs = wpipe->pipe_map.pgs;
1143 error = uvm_loan(&uio->uio_procp->p_vmspace->vm_map, base, blen,
1144 pgs, UVM_LOAN_TOPAGE);
1145 if (error) {
1146 pgs = NULL;
1147 goto cleanup;
1148 }
1149
1150 /* Enter the loaned pages to kva */
1151 kva = wpipe->pipe_map.kva;
1152 for (j = 0; j < npages; j++, kva += PAGE_SIZE) {
1153 pmap_kenter_pa(kva, VM_PAGE_TO_PHYS(pgs[j]), VM_PROT_READ);
1154 }
1155 pmap_update(pmap_kernel());
1156
1157 wpipe->pipe_state |= PIPE_DIRECTW;
1158 while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) {
1159 if (wpipe->pipe_state & PIPE_EOF) {
1160 error = EPIPE;
1161 break;
1162 }
1163 if (wpipe->pipe_state & PIPE_WANTR) {
1164 wpipe->pipe_state &= ~PIPE_WANTR;
1165 wakeup(wpipe);
1166 }
1167 pipeselwakeup(wpipe, wpipe);
1168 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdwt", 0);
1169 }
1170
1171 if (error)
1172 wpipe->pipe_state &= ~PIPE_DIRECTW;
1173
1174 cleanup:
1175 pipelock(wpipe, 0);
1176 if (pgs != NULL) {
1177 pmap_kremove(wpipe->pipe_map.kva, blen);
1178 uvm_unloan(pgs, npages, UVM_LOAN_TOPAGE);
1179 }
1180 if (error || amountpipekva > maxpipekva)
1181 pipe_loan_free(wpipe);
1182 pipeunlock(wpipe);
1183
1184 if (error) {
1185 pipeselwakeup(wpipe, wpipe);
1186
1187 /*
1188 * If nothing was read from what we offered, return error
1189 * straight on. Otherwise update uio resid first. Caller
1190 * will deal with the error condition, returning short
1191 * write, error, or restarting the write(2) as appropriate.
1192 */
1193 if (wpipe->pipe_map.cnt == bcnt) {
1194 error:
1195 wakeup(wpipe);
1196 return (error);
1197 }
1198
1199 bcnt -= wpipe->pipe_map.cnt;
1200 }
1201
1202 uio->uio_resid -= bcnt;
1203 /* uio_offset not updated, not set/used for write(2) */
1204 uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + bcnt;
1205 uio->uio_iov->iov_len -= bcnt;
1206 if (uio->uio_iov->iov_len == 0) {
1207 uio->uio_iov++;
1208 uio->uio_iovcnt--;
1209 }
1210
1211 return (error);
1212 }
1213 #endif /* !PIPE_NODIRECT */
1214 #endif /* NetBSD */
1215
1216 #ifdef __FreeBSD__
1217 static int
1218 pipe_write(fp, uio, cred, flags, td)
1219 struct file *fp;
1220 off_t *offset;
1221 struct uio *uio;
1222 struct ucred *cred;
1223 int flags;
1224 struct thread *td;
1225 #elif defined(__NetBSD__)
1226 static int
1227 pipe_write(fp, offset, uio, cred, flags)
1228 struct file *fp;
1229 off_t *offset;
1230 struct uio *uio;
1231 struct ucred *cred;
1232 int flags;
1233 #endif
1234 {
1235 int error = 0;
1236 struct pipe *wpipe, *rpipe;
1237
1238 rpipe = (struct pipe *) fp->f_data;
1239 wpipe = rpipe->pipe_peer;
1240
1241 PIPE_LOCK(rpipe);
1242 /*
1243 * detect loss of pipe read side, issue SIGPIPE if lost.
1244 */
1245 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1246 PIPE_UNLOCK(rpipe);
1247 return (EPIPE);
1248 }
1249
1250 ++wpipe->pipe_busy;
1251
1252 /*
1253 * If it is advantageous to resize the pipe buffer, do
1254 * so.
1255 */
1256 if ((uio->uio_resid > PIPE_SIZE) &&
1257 (nbigpipe < maxbigpipes) &&
1258 #ifndef PIPE_NODIRECT
1259 (wpipe->pipe_state & PIPE_DIRECTW) == 0 &&
1260 #endif
1261 (wpipe->pipe_buffer.size <= PIPE_SIZE) &&
1262 (wpipe->pipe_buffer.cnt == 0)) {
1263
1264 if ((error = pipelock(wpipe,1)) == 0) {
1265 PIPE_GET_GIANT(rpipe);
1266 if (pipespace(wpipe, BIG_PIPE_SIZE) == 0)
1267 nbigpipe++;
1268 PIPE_DROP_GIANT(rpipe);
1269 pipeunlock(wpipe);
1270 } else {
1271 /*
1272 * If an error occurred, unbusy and return, waking up
1273 * any waiting readers.
1274 */
1275 --wpipe->pipe_busy;
1276 if (wpipe->pipe_busy == 0
1277 && (wpipe->pipe_state & PIPE_WANTCLOSE)) {
1278 wpipe->pipe_state &=
1279 ~(PIPE_WANTCLOSE | PIPE_WANTR);
1280 wakeup(wpipe);
1281 }
1282
1283 return (error);
1284 }
1285 }
1286
1287 #ifdef __FreeBSD__
1288 /*
1289 * If an early error occured unbusy and return, waking up any pending
1290 * readers.
1291 */
1292 if (error) {
1293 --wpipe->pipe_busy;
1294 if ((wpipe->pipe_busy == 0) &&
1295 (wpipe->pipe_state & PIPE_WANT)) {
1296 wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR);
1297 wakeup(wpipe);
1298 }
1299 PIPE_UNLOCK(rpipe);
1300 return(error);
1301 }
1302
1303 KASSERT(wpipe->pipe_buffer.buffer != NULL, ("pipe buffer gone"));
1304 #endif
1305
1306 while (uio->uio_resid) {
1307 size_t space;
1308
1309 #ifndef PIPE_NODIRECT
1310 /*
1311 * If the transfer is large, we can gain performance if
1312 * we do process-to-process copies directly.
1313 * If the write is non-blocking, we don't use the
1314 * direct write mechanism.
1315 *
1316 * The direct write mechanism will detect the reader going
1317 * away on us.
1318 */
1319 if ((uio->uio_iov->iov_len >= PIPE_MINDIRECT) &&
1320 (fp->f_flag & FNONBLOCK) == 0 &&
1321 (wpipe->pipe_map.kva || (amountpipekva < limitpipekva))) {
1322 error = pipe_direct_write(wpipe, uio);
1323
1324 /*
1325 * Break out if error occured, unless it's ENOMEM.
1326 * ENOMEM means we failed to allocate some resources
1327 * for direct write, so we just fallback to ordinary
1328 * write. If the direct write was successful,
1329 * process rest of data via ordinary write.
1330 */
1331 if (!error)
1332 continue;
1333
1334 if (error != ENOMEM)
1335 break;
1336 }
1337 #endif /* PIPE_NODIRECT */
1338
1339 /*
1340 * Pipe buffered writes cannot be coincidental with
1341 * direct writes. We wait until the currently executing
1342 * direct write is completed before we start filling the
1343 * pipe buffer. We break out if a signal occurs or the
1344 * reader goes away.
1345 */
1346 retrywrite:
1347 while (wpipe->pipe_state & PIPE_DIRECTW) {
1348 if (wpipe->pipe_state & PIPE_WANTR) {
1349 wpipe->pipe_state &= ~PIPE_WANTR;
1350 wakeup(wpipe);
1351 }
1352 #ifdef __FreeBSD__
1353 error = msleep(wpipe, PIPE_MTX(rpipe), PRIBIO | PCATCH,
1354 "pipbww", 0);
1355 #else
1356 error = tsleep(wpipe, PRIBIO | PCATCH, "pipbww", 0);
1357 #endif
1358 if (wpipe->pipe_state & PIPE_EOF)
1359 break;
1360 if (error)
1361 break;
1362 }
1363 if (wpipe->pipe_state & PIPE_EOF) {
1364 error = EPIPE;
1365 break;
1366 }
1367
1368 space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
1369
1370 /* Writes of size <= PIPE_BUF must be atomic. */
1371 if ((space < uio->uio_resid) && (uio->uio_resid <= PIPE_BUF))
1372 space = 0;
1373
1374 if (space > 0) {
1375 int size; /* Transfer size */
1376 int segsize; /* first segment to transfer */
1377
1378 if ((error = pipelock(wpipe,1)) != 0)
1379 break;
1380
1381 /*
1382 * It is possible for a direct write to
1383 * slip in on us... handle it here...
1384 */
1385 if (wpipe->pipe_state & PIPE_DIRECTW) {
1386 pipeunlock(wpipe);
1387 goto retrywrite;
1388 }
1389 /*
1390 * If a process blocked in uiomove, our
1391 * value for space might be bad.
1392 *
1393 * XXX will we be ok if the reader has gone
1394 * away here?
1395 */
1396 if (space > wpipe->pipe_buffer.size -
1397 wpipe->pipe_buffer.cnt) {
1398 pipeunlock(wpipe);
1399 goto retrywrite;
1400 }
1401
1402 /*
1403 * Transfer size is minimum of uio transfer
1404 * and free space in pipe buffer.
1405 */
1406 if (space > uio->uio_resid)
1407 size = uio->uio_resid;
1408 else
1409 size = space;
1410 /*
1411 * First segment to transfer is minimum of
1412 * transfer size and contiguous space in
1413 * pipe buffer. If first segment to transfer
1414 * is less than the transfer size, we've got
1415 * a wraparound in the buffer.
1416 */
1417 segsize = wpipe->pipe_buffer.size -
1418 wpipe->pipe_buffer.in;
1419 if (segsize > size)
1420 segsize = size;
1421
1422 /* Transfer first segment */
1423
1424 PIPE_UNLOCK(rpipe);
1425 error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in],
1426 segsize, uio);
1427 PIPE_LOCK(rpipe);
1428
1429 if (error == 0 && segsize < size) {
1430 /*
1431 * Transfer remaining part now, to
1432 * support atomic writes. Wraparound
1433 * happened.
1434 */
1435 #ifdef DEBUG
1436 if (wpipe->pipe_buffer.in + segsize !=
1437 wpipe->pipe_buffer.size)
1438 panic("Expected pipe buffer wraparound disappeared");
1439 #endif
1440
1441 PIPE_UNLOCK(rpipe);
1442 error = uiomove(&wpipe->pipe_buffer.buffer[0],
1443 size - segsize, uio);
1444 PIPE_LOCK(rpipe);
1445 }
1446 if (error == 0) {
1447 wpipe->pipe_buffer.in += size;
1448 if (wpipe->pipe_buffer.in >=
1449 wpipe->pipe_buffer.size) {
1450 #ifdef DEBUG
1451 if (wpipe->pipe_buffer.in != size - segsize + wpipe->pipe_buffer.size)
1452 panic("Expected wraparound bad");
1453 #endif
1454 wpipe->pipe_buffer.in = size - segsize;
1455 }
1456
1457 wpipe->pipe_buffer.cnt += size;
1458 #ifdef DEBUG
1459 if (wpipe->pipe_buffer.cnt > wpipe->pipe_buffer.size)
1460 panic("Pipe buffer overflow");
1461 #endif
1462 }
1463 pipeunlock(wpipe);
1464 if (error)
1465 break;
1466 } else {
1467 /*
1468 * If the "read-side" has been blocked, wake it up now.
1469 */
1470 if (wpipe->pipe_state & PIPE_WANTR) {
1471 wpipe->pipe_state &= ~PIPE_WANTR;
1472 wakeup(wpipe);
1473 }
1474
1475 /*
1476 * don't block on non-blocking I/O
1477 */
1478 if (fp->f_flag & FNONBLOCK) {
1479 error = EAGAIN;
1480 break;
1481 }
1482
1483 /*
1484 * We have no more space and have something to offer,
1485 * wake up select/poll.
1486 */
1487 pipeselwakeup(wpipe, wpipe);
1488
1489 wpipe->pipe_state |= PIPE_WANTW;
1490 #ifdef __FreeBSD__
1491 error = msleep(wpipe, PIPE_MTX(rpipe),
1492 PRIBIO | PCATCH, "pipewr", 0);
1493 #else
1494 error = tsleep(wpipe, PRIBIO | PCATCH, "pipewr", 0);
1495 #endif
1496 if (error != 0)
1497 break;
1498 /*
1499 * If read side wants to go away, we just issue a signal
1500 * to ourselves.
1501 */
1502 if (wpipe->pipe_state & PIPE_EOF) {
1503 error = EPIPE;
1504 break;
1505 }
1506 }
1507 }
1508
1509 --wpipe->pipe_busy;
1510 if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANTCLOSE)) {
1511 wpipe->pipe_state &= ~(PIPE_WANTCLOSE | PIPE_WANTR);
1512 wakeup(wpipe);
1513 } else if (wpipe->pipe_buffer.cnt > 0) {
1514 /*
1515 * If we have put any characters in the buffer, we wake up
1516 * the reader.
1517 */
1518 if (wpipe->pipe_state & PIPE_WANTR) {
1519 wpipe->pipe_state &= ~PIPE_WANTR;
1520 wakeup(wpipe);
1521 }
1522 }
1523
1524 /*
1525 * Don't return EPIPE if I/O was successful
1526 */
1527 if ((error == EPIPE) && (wpipe->pipe_buffer.cnt == 0)
1528 && (uio->uio_resid == 0))
1529 error = 0;
1530
1531 if (error == 0)
1532 vfs_timestamp(&wpipe->pipe_mtime);
1533
1534 /*
1535 * We have something to offer, wake up select/poll.
1536 * wpipe->pipe_map.cnt is always 0 in this point (direct write
1537 * is only done synchronously), so check only wpipe->pipe_buffer.cnt
1538 */
1539 if (wpipe->pipe_buffer.cnt)
1540 pipeselwakeup(wpipe, wpipe);
1541
1542 /*
1543 * Arrange for next read(2) to do a signal.
1544 */
1545 wpipe->pipe_state |= PIPE_SIGNALR;
1546
1547 PIPE_UNLOCK(rpipe);
1548 return (error);
1549 }
1550
1551 /*
1552 * we implement a very minimal set of ioctls for compatibility with sockets.
1553 */
1554 int
1555 #ifdef __FreeBSD__
1556 pipe_ioctl(fp, cmd, data, td)
1557 struct file *fp;
1558 u_long cmd;
1559 caddr_t data;
1560 struct thread *td;
1561 #else
1562 pipe_ioctl(fp, cmd, data, p)
1563 struct file *fp;
1564 u_long cmd;
1565 caddr_t data;
1566 struct proc *p;
1567 #endif
1568 {
1569 struct pipe *mpipe = (struct pipe *)fp->f_data;
1570
1571 switch (cmd) {
1572
1573 case FIONBIO:
1574 return (0);
1575
1576 case FIOASYNC:
1577 PIPE_LOCK(mpipe);
1578 if (*(int *)data) {
1579 mpipe->pipe_state |= PIPE_ASYNC;
1580 } else {
1581 mpipe->pipe_state &= ~PIPE_ASYNC;
1582 }
1583 PIPE_UNLOCK(mpipe);
1584 return (0);
1585
1586 case FIONREAD:
1587 PIPE_LOCK(mpipe);
1588 #ifndef PIPE_NODIRECT
1589 if (mpipe->pipe_state & PIPE_DIRECTW)
1590 *(int *)data = mpipe->pipe_map.cnt;
1591 else
1592 #endif
1593 *(int *)data = mpipe->pipe_buffer.cnt;
1594 PIPE_UNLOCK(mpipe);
1595 return (0);
1596
1597 #ifdef __FreeBSD__
1598 case FIOSETOWN:
1599 return (fsetown(*(int *)data, &mpipe->pipe_sigio));
1600
1601 case FIOGETOWN:
1602 *(int *)data = fgetown(mpipe->pipe_sigio);
1603 return (0);
1604
1605 /* This is deprecated, FIOSETOWN should be used instead. */
1606 case TIOCSPGRP:
1607 return (fsetown(-(*(int *)data), &mpipe->pipe_sigio));
1608
1609 /* This is deprecated, FIOGETOWN should be used instead. */
1610 case TIOCGPGRP:
1611 *(int *)data = -fgetown(mpipe->pipe_sigio);
1612 return (0);
1613 #endif /* FreeBSD */
1614 #ifdef __NetBSD__
1615 case TIOCSPGRP:
1616 mpipe->pipe_pgid = *(int *)data;
1617 return (0);
1618
1619 case TIOCGPGRP:
1620 *(int *)data = mpipe->pipe_pgid;
1621 return (0);
1622 #endif /* NetBSD */
1623
1624 }
1625 return (EPASSTHROUGH);
1626 }
1627
1628 int
1629 #ifdef __FreeBSD__
1630 pipe_poll(fp, events, cred, td)
1631 struct file *fp;
1632 int events;
1633 struct ucred *cred;
1634 struct thread *td;
1635 #elif defined(__NetBSD__)
1636 pipe_poll(fp, events, td)
1637 struct file *fp;
1638 int events;
1639 struct proc *td;
1640 #endif
1641 {
1642 struct pipe *rpipe = (struct pipe *)fp->f_data;
1643 struct pipe *wpipe;
1644 int revents = 0;
1645
1646 wpipe = rpipe->pipe_peer;
1647 PIPE_LOCK(rpipe);
1648 if (events & (POLLIN | POLLRDNORM))
1649 if ((rpipe->pipe_buffer.cnt > 0) ||
1650 #ifndef PIPE_NODIRECT
1651 (rpipe->pipe_state & PIPE_DIRECTW) ||
1652 #endif
1653 (rpipe->pipe_state & PIPE_EOF))
1654 revents |= events & (POLLIN | POLLRDNORM);
1655
1656 if (events & (POLLOUT | POLLWRNORM))
1657 if (wpipe == NULL || (wpipe->pipe_state & PIPE_EOF)
1658 || (
1659 #ifndef PIPE_NODIRECT
1660 ((wpipe->pipe_state & PIPE_DIRECTW) == 0) &&
1661 #endif
1662 (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF))
1663 revents |= events & (POLLOUT | POLLWRNORM);
1664
1665 if ((rpipe->pipe_state & PIPE_EOF) ||
1666 (wpipe == NULL) ||
1667 (wpipe->pipe_state & PIPE_EOF))
1668 revents |= POLLHUP;
1669
1670 if (revents == 0) {
1671 if (events & (POLLIN | POLLRDNORM)) {
1672 selrecord(td, &rpipe->pipe_sel);
1673 rpipe->pipe_state |= PIPE_SEL;
1674 }
1675
1676 if (events & (POLLOUT | POLLWRNORM)) {
1677 selrecord(td, &wpipe->pipe_sel);
1678 wpipe->pipe_state |= PIPE_SEL;
1679 }
1680 }
1681 PIPE_UNLOCK(rpipe);
1682
1683 return (revents);
1684 }
1685
1686 static int
1687 #ifdef __FreeBSD__
1688 pipe_stat(fp, ub, td)
1689 struct file *fp;
1690 struct stat *ub;
1691 struct thread *td;
1692 #else
1693 pipe_stat(fp, ub, td)
1694 struct file *fp;
1695 struct stat *ub;
1696 struct proc *td;
1697 #endif
1698 {
1699 struct pipe *pipe = (struct pipe *)fp->f_data;
1700
1701 memset((caddr_t)ub, 0, sizeof(*ub));
1702 ub->st_mode = S_IFIFO | S_IRUSR | S_IWUSR;
1703 ub->st_blksize = pipe->pipe_buffer.size;
1704 ub->st_size = pipe->pipe_buffer.cnt;
1705 ub->st_blocks = (ub->st_size) ? 1 : 0;
1706 #ifdef __FreeBSD__
1707 ub->st_atimespec = pipe->pipe_atime;
1708 ub->st_mtimespec = pipe->pipe_mtime;
1709 ub->st_ctimespec = pipe->pipe_ctime;
1710 #endif /* FreeBSD */
1711 #ifdef __NetBSD__
1712 TIMEVAL_TO_TIMESPEC(&pipe->pipe_atime, &ub->st_atimespec)
1713 TIMEVAL_TO_TIMESPEC(&pipe->pipe_mtime, &ub->st_mtimespec);
1714 TIMEVAL_TO_TIMESPEC(&pipe->pipe_ctime, &ub->st_ctimespec);
1715 #endif /* NetBSD */
1716 ub->st_uid = fp->f_cred->cr_uid;
1717 ub->st_gid = fp->f_cred->cr_gid;
1718 /*
1719 * Left as 0: st_dev, st_ino, st_nlink, st_rdev, st_flags, st_gen.
1720 * XXX (st_dev, st_ino) should be unique.
1721 */
1722 return (0);
1723 }
1724
1725 /* ARGSUSED */
1726 static int
1727 #ifdef __FreeBSD__
1728 pipe_close(fp, td)
1729 struct file *fp;
1730 struct thread *td;
1731 #else
1732 pipe_close(fp, td)
1733 struct file *fp;
1734 struct proc *td;
1735 #endif
1736 {
1737 struct pipe *cpipe = (struct pipe *)fp->f_data;
1738
1739 #ifdef __FreeBSD__
1740 fp->f_ops = &badfileops;
1741 funsetown(cpipe->pipe_sigio);
1742 #endif
1743 fp->f_data = NULL;
1744 pipeclose(cpipe);
1745 return (0);
1746 }
1747
1748 static void
1749 pipe_free_kmem(cpipe)
1750 struct pipe *cpipe;
1751 {
1752
1753 #ifdef __FreeBSD__
1754
1755 GIANT_REQUIRED;
1756 KASSERT(cpipe->pipe_mtxp == NULL || !mtx_owned(PIPE_MTX(cpipe)),
1757 ("pipespace: pipe mutex locked"));
1758 #endif
1759
1760 if (cpipe->pipe_buffer.buffer != NULL) {
1761 if (cpipe->pipe_buffer.size > PIPE_SIZE)
1762 --nbigpipe;
1763 amountpipekva -= cpipe->pipe_buffer.size;
1764 #ifdef __FreeBSD__
1765 kmem_free(kernel_map,
1766 (vm_offset_t)cpipe->pipe_buffer.buffer,
1767 cpipe->pipe_buffer.size);
1768 #elif defined(__NetBSD__)
1769 uvm_km_free(kernel_map,
1770 (vaddr_t)cpipe->pipe_buffer.buffer,
1771 cpipe->pipe_buffer.size);
1772 #endif /* NetBSD */
1773 cpipe->pipe_buffer.buffer = NULL;
1774 }
1775 #ifndef PIPE_NODIRECT
1776 if (cpipe->pipe_map.kva != 0) {
1777 #ifdef __FreeBSD__
1778 amountpipekva -= cpipe->pipe_buffer.size + PAGE_SIZE;
1779 kmem_free(kernel_map,
1780 cpipe->pipe_map.kva,
1781 cpipe->pipe_buffer.size + PAGE_SIZE);
1782 #elif defined(__NetBSD__)
1783 pipe_loan_free(cpipe);
1784 #endif /* NetBSD */
1785 cpipe->pipe_map.cnt = 0;
1786 cpipe->pipe_map.kva = 0;
1787 cpipe->pipe_map.pos = 0;
1788 cpipe->pipe_map.npages = 0;
1789 }
1790 #endif /* !PIPE_NODIRECT */
1791 }
1792
1793 /*
1794 * shutdown the pipe
1795 */
1796 static void
1797 pipeclose(cpipe)
1798 struct pipe *cpipe;
1799 {
1800 struct pipe *ppipe;
1801 #ifdef __FreeBSD__
1802 int hadpeer = 0;
1803 #endif
1804
1805 if (cpipe == NULL)
1806 return;
1807
1808 /* partially created pipes won't have a valid mutex. */
1809 if (PIPE_MTX(cpipe) != NULL)
1810 PIPE_LOCK(cpipe);
1811
1812 pipeselwakeup(cpipe, cpipe);
1813
1814 /*
1815 * If the other side is blocked, wake it up saying that
1816 * we want to close it down.
1817 */
1818 while (cpipe->pipe_busy) {
1819 wakeup(cpipe);
1820 cpipe->pipe_state |= PIPE_WANTCLOSE | PIPE_EOF;
1821 #ifdef __FreeBSD__
1822 msleep(cpipe, PIPE_MTX(cpipe), PRIBIO, "pipecl", 0);
1823 #else
1824 tsleep(cpipe, PRIBIO, "pipecl", 0);
1825 #endif
1826 }
1827
1828 /*
1829 * Disconnect from peer
1830 */
1831 if ((ppipe = cpipe->pipe_peer) != NULL) {
1832 #ifdef __FreeBSD__
1833 hadpeer++;
1834 #endif
1835 pipeselwakeup(ppipe, ppipe);
1836
1837 ppipe->pipe_state |= PIPE_EOF;
1838 wakeup(ppipe);
1839 #ifdef __FreeBSD__
1840 KNOTE(&ppipe->pipe_sel.sel_note, 0);
1841 #endif
1842 ppipe->pipe_peer = NULL;
1843 }
1844 /*
1845 * free resources
1846 */
1847 #ifdef __FreeBSD__
1848 if (PIPE_MTX(cpipe) != NULL) {
1849 PIPE_UNLOCK(cpipe);
1850 if (!hadpeer) {
1851 mtx_destroy(PIPE_MTX(cpipe));
1852 free(PIPE_MTX(cpipe), M_TEMP);
1853 }
1854 }
1855 mtx_lock(&Giant);
1856 pipe_free_kmem(cpipe);
1857 zfree(pipe_zone, cpipe);
1858 mtx_unlock(&Giant);
1859 #endif
1860
1861 #ifdef __NetBSD__
1862 if (PIPE_MTX(cpipe) != NULL)
1863 PIPE_UNLOCK(cpipe);
1864
1865 pipe_free_kmem(cpipe);
1866 (void) lockmgr(&cpipe->pipe_lock, LK_DRAIN, NULL);
1867 pool_put(&pipe_pool, cpipe);
1868 #endif
1869 }
1870
1871 static void
1872 filt_pipedetach(struct knote *kn)
1873 {
1874 struct pipe *cpipe = (struct pipe *)kn->kn_fp->f_data;
1875
1876 switch(kn->kn_filter) {
1877 case EVFILT_WRITE:
1878 /* need the peer structure, not our own */
1879 cpipe = cpipe->pipe_peer;
1880
1881 /* if reader end already closed, just return */
1882 if (!cpipe)
1883 return;
1884
1885 break;
1886 default:
1887 /* nothing to do */
1888 break;
1889 }
1890
1891 #ifdef DIAGNOSTIC
1892 if (kn->kn_hook != cpipe)
1893 panic("filt_pipedetach: inconsistent knote");
1894 #endif
1895
1896 PIPE_LOCK(cpipe);
1897 SLIST_REMOVE(&cpipe->pipe_sel.sel_note, kn, knote, kn_selnext);
1898 PIPE_UNLOCK(cpipe);
1899 }
1900
1901 /*ARGSUSED*/
1902 static int
1903 filt_piperead(struct knote *kn, long hint)
1904 {
1905 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1906 struct pipe *wpipe = rpipe->pipe_peer;
1907
1908 PIPE_LOCK(rpipe);
1909 kn->kn_data = rpipe->pipe_buffer.cnt;
1910 if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW))
1911 kn->kn_data = rpipe->pipe_map.cnt;
1912
1913 if ((rpipe->pipe_state & PIPE_EOF) ||
1914 (wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1915 kn->kn_flags |= EV_EOF;
1916 PIPE_UNLOCK(rpipe);
1917 return (1);
1918 }
1919 PIPE_UNLOCK(rpipe);
1920 return (kn->kn_data > 0);
1921 }
1922
1923 /*ARGSUSED*/
1924 static int
1925 filt_pipewrite(struct knote *kn, long hint)
1926 {
1927 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1928 struct pipe *wpipe = rpipe->pipe_peer;
1929
1930 PIPE_LOCK(rpipe);
1931 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1932 kn->kn_data = 0;
1933 kn->kn_flags |= EV_EOF;
1934 PIPE_UNLOCK(rpipe);
1935 return (1);
1936 }
1937 kn->kn_data = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
1938 if (wpipe->pipe_state & PIPE_DIRECTW)
1939 kn->kn_data = 0;
1940
1941 PIPE_UNLOCK(rpipe);
1942 return (kn->kn_data >= PIPE_BUF);
1943 }
1944
1945 static const struct filterops pipe_rfiltops =
1946 { 1, NULL, filt_pipedetach, filt_piperead };
1947 static const struct filterops pipe_wfiltops =
1948 { 1, NULL, filt_pipedetach, filt_pipewrite };
1949
1950 /*ARGSUSED*/
1951 static int
1952 pipe_kqfilter(struct file *fp, struct knote *kn)
1953 {
1954 struct pipe *cpipe;
1955
1956 cpipe = (struct pipe *)kn->kn_fp->f_data;
1957 switch (kn->kn_filter) {
1958 case EVFILT_READ:
1959 kn->kn_fop = &pipe_rfiltops;
1960 break;
1961 case EVFILT_WRITE:
1962 kn->kn_fop = &pipe_wfiltops;
1963 cpipe = cpipe->pipe_peer;
1964 if (cpipe == NULL) {
1965 /* other end of pipe has been closed */
1966 return (EBADF);
1967 }
1968 break;
1969 default:
1970 return (1);
1971 }
1972 kn->kn_hook = cpipe;
1973
1974 PIPE_LOCK(cpipe);
1975 SLIST_INSERT_HEAD(&cpipe->pipe_sel.sel_note, kn, kn_selnext);
1976 PIPE_UNLOCK(cpipe);
1977 return (0);
1978 }
1979
1980 #ifdef __NetBSD__
1981 static int
1982 pipe_fcntl(fp, cmd, data, p)
1983 struct file *fp;
1984 u_int cmd;
1985 caddr_t data;
1986 struct proc *p;
1987 {
1988 if (cmd == F_SETFL)
1989 return (0);
1990 else
1991 return (EOPNOTSUPP);
1992 }
1993
1994 /*
1995 * Handle pipe sysctls.
1996 */
1997 int
1998 sysctl_dopipe(name, namelen, oldp, oldlenp, newp, newlen)
1999 int *name;
2000 u_int namelen;
2001 void *oldp;
2002 size_t *oldlenp;
2003 void *newp;
2004 size_t newlen;
2005 {
2006 /* All sysctl names at this level are terminal. */
2007 if (namelen != 1)
2008 return (ENOTDIR); /* overloaded */
2009
2010 switch (name[0]) {
2011 case KERN_PIPE_MAXKVASZ:
2012 return (sysctl_int(oldp, oldlenp, newp, newlen, &maxpipekva));
2013 case KERN_PIPE_LIMITKVA:
2014 return (sysctl_int(oldp, oldlenp, newp, newlen, &limitpipekva));
2015 case KERN_PIPE_MAXBIGPIPES:
2016 return (sysctl_int(oldp, oldlenp, newp, newlen, &maxbigpipes));
2017 case KERN_PIPE_NBIGPIPES:
2018 return (sysctl_rdint(oldp, oldlenp, newp, nbigpipe));
2019 case KERN_PIPE_KVASIZE:
2020 return (sysctl_rdint(oldp, oldlenp, newp, amountpipekva));
2021 default:
2022 return (EOPNOTSUPP);
2023 }
2024 /* NOTREACHED */
2025 }
2026
2027 /*
2028 * Initialize pipe structs.
2029 */
2030 void
2031 pipe_init(void)
2032 {
2033 pool_init(&pipe_pool, sizeof(struct pipe), 0, 0, 0, "pipepl", NULL);
2034 }
2035
2036 #endif /* __NetBSD __ */
2037