netbsd32_fs.c revision 1.6 1 /* $NetBSD: netbsd32_fs.c,v 1.6 2001/06/14 20:32:44 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1998, 2001 Matthew R. Green
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #if defined(_KERNEL_OPT)
32 #include "opt_ktrace.h"
33 #endif
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/malloc.h>
38 #include <sys/mount.h>
39 #include <sys/socket.h>
40 #include <sys/socketvar.h>
41 #include <sys/stat.h>
42 #include <sys/time.h>
43 #include <sys/ktrace.h>
44 #include <sys/resourcevar.h>
45 #include <sys/vnode.h>
46 #include <sys/file.h>
47 #include <sys/filedesc.h>
48 #include <sys/namei.h>
49 #include <sys/syscallargs.h>
50 #include <sys/proc.h>
51
52 #include <compat/netbsd32/netbsd32.h>
53 #include <compat/netbsd32/netbsd32_syscallargs.h>
54 #include <compat/netbsd32/netbsd32_conv.h>
55
56
57 static int dofilereadv32 __P((struct proc *, int, struct file *, struct netbsd32_iovec *,
58 int, off_t *, int, register_t *));
59 static int dofilewritev32 __P((struct proc *, int, struct file *, struct netbsd32_iovec *,
60 int, off_t *, int, register_t *));
61 static int change_utimes32 __P((struct vnode *, netbsd32_timevalp_t, struct proc *));
62
63 int
64 netbsd32_getfsstat(p, v, retval)
65 struct proc *p;
66 void *v;
67 register_t *retval;
68 {
69 struct netbsd32_getfsstat_args /* {
70 syscallarg(netbsd32_statfsp_t) buf;
71 syscallarg(netbsd32_long) bufsize;
72 syscallarg(int) flags;
73 } */ *uap = v;
74 struct mount *mp, *nmp;
75 struct statfs *sp;
76 struct netbsd32_statfs sb32;
77 caddr_t sfsp;
78 long count, maxcount, error;
79
80 maxcount = SCARG(uap, bufsize) / sizeof(struct netbsd32_statfs);
81 sfsp = (caddr_t)(u_long)SCARG(uap, buf);
82 simple_lock(&mountlist_slock);
83 count = 0;
84 for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) {
85 if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock)) {
86 nmp = mp->mnt_list.cqe_next;
87 continue;
88 }
89 if (sfsp && count < maxcount) {
90 sp = &mp->mnt_stat;
91 /*
92 * If MNT_NOWAIT or MNT_LAZY is specified, do not
93 * refresh the fsstat cache. MNT_WAIT or MNT_LAXY
94 * overrides MNT_NOWAIT.
95 */
96 if (SCARG(uap, flags) != MNT_NOWAIT &&
97 SCARG(uap, flags) != MNT_LAZY &&
98 (SCARG(uap, flags) == MNT_WAIT ||
99 SCARG(uap, flags) == 0) &&
100 (error = VFS_STATFS(mp, sp, p)) != 0) {
101 simple_lock(&mountlist_slock);
102 nmp = mp->mnt_list.cqe_next;
103 vfs_unbusy(mp);
104 continue;
105 }
106 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
107 sp->f_oflags = sp->f_flags & 0xffff;
108 netbsd32_from_statfs(sp, &sb32);
109 error = copyout(&sb32, sfsp, sizeof(sb32));
110 if (error) {
111 vfs_unbusy(mp);
112 return (error);
113 }
114 sfsp += sizeof(sb32);
115 }
116 count++;
117 simple_lock(&mountlist_slock);
118 nmp = mp->mnt_list.cqe_next;
119 vfs_unbusy(mp);
120 }
121 simple_unlock(&mountlist_slock);
122 if (sfsp && count > maxcount)
123 *retval = maxcount;
124 else
125 *retval = count;
126 return (0);
127 }
128
129 int
130 netbsd32_readv(p, v, retval)
131 struct proc *p;
132 void *v;
133 register_t *retval;
134 {
135 struct netbsd32_readv_args /* {
136 syscallarg(int) fd;
137 syscallarg(const netbsd32_iovecp_t) iovp;
138 syscallarg(int) iovcnt;
139 } */ *uap = v;
140 int fd = SCARG(uap, fd);
141 struct file *fp;
142 struct filedesc *fdp = p->p_fd;
143
144 if ((fp = fd_getfile(fdp, fd)) == NULL)
145 return (EBADF);
146
147 if ((fp->f_flag & FREAD) == 0)
148 return (EBADF);
149
150 return (dofilereadv32(p, fd, fp, (struct netbsd32_iovec *)(u_long)SCARG(uap, iovp),
151 SCARG(uap, iovcnt), &fp->f_offset, FOF_UPDATE_OFFSET, retval));
152 }
153
154 /* Damn thing copies in the iovec! */
155 int
156 dofilereadv32(p, fd, fp, iovp, iovcnt, offset, flags, retval)
157 struct proc *p;
158 int fd;
159 struct file *fp;
160 struct netbsd32_iovec *iovp;
161 int iovcnt;
162 off_t *offset;
163 int flags;
164 register_t *retval;
165 {
166 struct uio auio;
167 struct iovec *iov;
168 struct iovec *needfree;
169 struct iovec aiov[UIO_SMALLIOV];
170 long i, cnt, error = 0;
171 u_int iovlen;
172 #ifdef KTRACE
173 struct iovec *ktriov = NULL;
174 #endif
175
176 /* note: can't use iovlen until iovcnt is validated */
177 iovlen = iovcnt * sizeof(struct iovec);
178 if ((u_int)iovcnt > UIO_SMALLIOV) {
179 if ((u_int)iovcnt > IOV_MAX)
180 return (EINVAL);
181 MALLOC(iov, struct iovec *, iovlen, M_IOV, M_WAITOK);
182 needfree = iov;
183 } else if ((u_int)iovcnt > 0) {
184 iov = aiov;
185 needfree = NULL;
186 } else
187 return (EINVAL);
188
189 auio.uio_iov = iov;
190 auio.uio_iovcnt = iovcnt;
191 auio.uio_rw = UIO_READ;
192 auio.uio_segflg = UIO_USERSPACE;
193 auio.uio_procp = p;
194 error = netbsd32_to_iovecin(iovp, iov, iovcnt);
195 if (error)
196 goto done;
197 auio.uio_resid = 0;
198 for (i = 0; i < iovcnt; i++) {
199 auio.uio_resid += iov->iov_len;
200 /*
201 * Reads return ssize_t because -1 is returned on error.
202 * Therefore we must restrict the length to SSIZE_MAX to
203 * avoid garbage return values.
204 */
205 if (iov->iov_len > SSIZE_MAX || auio.uio_resid > SSIZE_MAX) {
206 error = EINVAL;
207 goto done;
208 }
209 iov++;
210 }
211 #ifdef KTRACE
212 /*
213 * if tracing, save a copy of iovec
214 */
215 if (KTRPOINT(p, KTR_GENIO)) {
216 MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK);
217 memcpy((caddr_t)ktriov, (caddr_t)auio.uio_iov, iovlen);
218 }
219 #endif
220 cnt = auio.uio_resid;
221 error = (*fp->f_ops->fo_read)(fp, offset, &auio, fp->f_cred, flags);
222 if (error)
223 if (auio.uio_resid != cnt && (error == ERESTART ||
224 error == EINTR || error == EWOULDBLOCK))
225 error = 0;
226 cnt -= auio.uio_resid;
227 #ifdef KTRACE
228 if (KTRPOINT(p, KTR_GENIO))
229 if (error == 0) {
230 ktrgenio(p, fd, UIO_READ, ktriov, cnt,
231 error);
232 FREE(ktriov, M_TEMP);
233 }
234 #endif
235 *retval = cnt;
236 done:
237 if (needfree)
238 FREE(needfree, M_IOV);
239 return (error);
240 }
241
242 int
243 netbsd32_writev(p, v, retval)
244 struct proc *p;
245 void *v;
246 register_t *retval;
247 {
248 struct netbsd32_writev_args /* {
249 syscallarg(int) fd;
250 syscallarg(const netbsd32_iovecp_t) iovp;
251 syscallarg(int) iovcnt;
252 } */ *uap = v;
253 int fd = SCARG(uap, fd);
254 struct file *fp;
255 struct filedesc *fdp = p->p_fd;
256
257 if ((fp = fd_getfile(fdp, fd)) == NULL)
258 return (EBADF);
259
260 if ((fp->f_flag & FWRITE) == 0)
261 return (EBADF);
262
263 return (dofilewritev32(p, fd, fp, (struct netbsd32_iovec *)(u_long)SCARG(uap, iovp),
264 SCARG(uap, iovcnt), &fp->f_offset, FOF_UPDATE_OFFSET, retval));
265 }
266
267 int
268 dofilewritev32(p, fd, fp, iovp, iovcnt, offset, flags, retval)
269 struct proc *p;
270 int fd;
271 struct file *fp;
272 struct netbsd32_iovec *iovp;
273 int iovcnt;
274 off_t *offset;
275 int flags;
276 register_t *retval;
277 {
278 struct uio auio;
279 struct iovec *iov;
280 struct iovec *needfree;
281 struct iovec aiov[UIO_SMALLIOV];
282 long i, cnt, error = 0;
283 u_int iovlen;
284 #ifdef KTRACE
285 struct iovec *ktriov = NULL;
286 #endif
287
288 /* note: can't use iovlen until iovcnt is validated */
289 iovlen = iovcnt * sizeof(struct iovec);
290 if ((u_int)iovcnt > UIO_SMALLIOV) {
291 if ((u_int)iovcnt > IOV_MAX)
292 return (EINVAL);
293 MALLOC(iov, struct iovec *, iovlen, M_IOV, M_WAITOK);
294 needfree = iov;
295 } else if ((u_int)iovcnt > 0) {
296 iov = aiov;
297 needfree = NULL;
298 } else
299 return (EINVAL);
300
301 auio.uio_iov = iov;
302 auio.uio_iovcnt = iovcnt;
303 auio.uio_rw = UIO_WRITE;
304 auio.uio_segflg = UIO_USERSPACE;
305 auio.uio_procp = p;
306 error = netbsd32_to_iovecin(iovp, iov, iovcnt);
307 if (error)
308 goto done;
309 auio.uio_resid = 0;
310 for (i = 0; i < iovcnt; i++) {
311 auio.uio_resid += iov->iov_len;
312 /*
313 * Writes return ssize_t because -1 is returned on error.
314 * Therefore we must restrict the length to SSIZE_MAX to
315 * avoid garbage return values.
316 */
317 if (iov->iov_len > SSIZE_MAX || auio.uio_resid > SSIZE_MAX) {
318 error = EINVAL;
319 goto done;
320 }
321 iov++;
322 }
323 #ifdef KTRACE
324 /*
325 * if tracing, save a copy of iovec
326 */
327 if (KTRPOINT(p, KTR_GENIO)) {
328 MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK);
329 memcpy((caddr_t)ktriov, (caddr_t)auio.uio_iov, iovlen);
330 }
331 #endif
332 cnt = auio.uio_resid;
333 error = (*fp->f_ops->fo_write)(fp, offset, &auio, fp->f_cred, flags);
334 if (error) {
335 if (auio.uio_resid != cnt && (error == ERESTART ||
336 error == EINTR || error == EWOULDBLOCK))
337 error = 0;
338 if (error == EPIPE)
339 psignal(p, SIGPIPE);
340 }
341 cnt -= auio.uio_resid;
342 #ifdef KTRACE
343 if (KTRPOINT(p, KTR_GENIO))
344 if (error == 0) {
345 ktrgenio(p, fd, UIO_WRITE, ktriov, cnt,
346 error);
347 FREE(ktriov, M_TEMP);
348 }
349 #endif
350 *retval = cnt;
351 done:
352 if (needfree)
353 FREE(needfree, M_IOV);
354 return (error);
355 }
356
357 int
358 netbsd32_utimes(p, v, retval)
359 struct proc *p;
360 void *v;
361 register_t *retval;
362 {
363 struct netbsd32_utimes_args /* {
364 syscallarg(const netbsd32_charp) path;
365 syscallarg(const netbsd32_timevalp_t) tptr;
366 } */ *uap = v;
367 int error;
368 struct nameidata nd;
369
370 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, (char *)(u_long)SCARG(uap, path), p);
371 if ((error = namei(&nd)) != 0)
372 return (error);
373
374 error = change_utimes32(nd.ni_vp, SCARG(uap, tptr), p);
375
376 vrele(nd.ni_vp);
377 return (error);
378 }
379
380 /*
381 * Common routine to set access and modification times given a vnode.
382 */
383 static int
384 change_utimes32(vp, tptr, p)
385 struct vnode *vp;
386 netbsd32_timevalp_t tptr;
387 struct proc *p;
388 {
389 struct netbsd32_timeval tv32[2];
390 struct timeval tv[2];
391 struct vattr vattr;
392 int error;
393
394 VATTR_NULL(&vattr);
395 if (tptr == NULL) {
396 microtime(&tv[0]);
397 tv[1] = tv[0];
398 vattr.va_vaflags |= VA_UTIMES_NULL;
399 } else {
400 error = copyin((caddr_t)(u_long)tptr, tv32, sizeof(tv32));
401 if (error)
402 return (error);
403 netbsd32_to_timeval(&tv32[0], &tv[0]);
404 netbsd32_to_timeval(&tv32[1], &tv[1]);
405 }
406 VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
407 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
408 vattr.va_atime.tv_sec = tv[0].tv_sec;
409 vattr.va_atime.tv_nsec = tv[0].tv_usec * 1000;
410 vattr.va_mtime.tv_sec = tv[1].tv_sec;
411 vattr.va_mtime.tv_nsec = tv[1].tv_usec * 1000;
412 error = VOP_SETATTR(vp, &vattr, p->p_ucred, p);
413 VOP_UNLOCK(vp, 0);
414 return (error);
415 }
416
417 int
418 netbsd32_statfs(p, v, retval)
419 struct proc *p;
420 void *v;
421 register_t *retval;
422 {
423 struct netbsd32_statfs_args /* {
424 syscallarg(const netbsd32_charp) path;
425 syscallarg(netbsd32_statfsp_t) buf;
426 } */ *uap = v;
427 struct mount *mp;
428 struct statfs *sp;
429 struct netbsd32_statfs s32;
430 int error;
431 struct nameidata nd;
432
433 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, (char *)(u_long)SCARG(uap, path), p);
434 if ((error = namei(&nd)) != 0)
435 return (error);
436 mp = nd.ni_vp->v_mount;
437 sp = &mp->mnt_stat;
438 vrele(nd.ni_vp);
439 if ((error = VFS_STATFS(mp, sp, p)) != 0)
440 return (error);
441 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
442 netbsd32_from_statfs(sp, &s32);
443 return (copyout(&s32, (caddr_t)(u_long)SCARG(uap, buf), sizeof(s32)));
444 }
445
446 int
447 netbsd32_fstatfs(p, v, retval)
448 struct proc *p;
449 void *v;
450 register_t *retval;
451 {
452 struct netbsd32_fstatfs_args /* {
453 syscallarg(int) fd;
454 syscallarg(netbsd32_statfsp_t) buf;
455 } */ *uap = v;
456 struct file *fp;
457 struct mount *mp;
458 struct statfs *sp;
459 struct netbsd32_statfs s32;
460 int error;
461
462 /* getvnode() will use the descriptor for us */
463 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
464 return (error);
465 mp = ((struct vnode *)fp->f_data)->v_mount;
466 sp = &mp->mnt_stat;
467 if ((error = VFS_STATFS(mp, sp, p)) != 0)
468 goto out;
469 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
470 netbsd32_from_statfs(sp, &s32);
471 error = copyout(&s32, (caddr_t)(u_long)SCARG(uap, buf), sizeof(s32));
472 out:
473 FILE_UNUSE(fp, p);
474 return (error);
475 }
476
477 int
478 netbsd32_futimes(p, v, retval)
479 struct proc *p;
480 void *v;
481 register_t *retval;
482 {
483 struct netbsd32_futimes_args /* {
484 syscallarg(int) fd;
485 syscallarg(const netbsd32_timevalp_t) tptr;
486 } */ *uap = v;
487 int error;
488 struct file *fp;
489
490 /* getvnode() will use the descriptor for us */
491 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
492 return (error);
493
494 error = change_utimes32((struct vnode *)fp->f_data,
495 SCARG(uap, tptr), p);
496 FILE_UNUSE(fp, p);
497 return (error);
498 }
499
500 int
501 netbsd32_getdents(p, v, retval)
502 struct proc *p;
503 void *v;
504 register_t *retval;
505 {
506 struct netbsd32_getdents_args /* {
507 syscallarg(int) fd;
508 syscallarg(netbsd32_charp) buf;
509 syscallarg(netbsd32_size_t) count;
510 } */ *uap = v;
511 struct file *fp;
512 int error, done;
513
514 /* getvnode() will use the descriptor for us */
515 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
516 return (error);
517 if ((fp->f_flag & FREAD) == 0) {
518 error = EBADF;
519 goto out;
520 }
521 error = vn_readdir(fp, (caddr_t)(u_long)SCARG(uap, buf), UIO_USERSPACE,
522 SCARG(uap, count), &done, p, 0, 0);
523 *retval = done;
524 out:
525 FILE_UNUSE(fp, p);
526 return (error);
527 }
528
529 int
530 netbsd32_lutimes(p, v, retval)
531 struct proc *p;
532 void *v;
533 register_t *retval;
534 {
535 struct netbsd32_lutimes_args /* {
536 syscallarg(const netbsd32_charp) path;
537 syscallarg(const netbsd32_timevalp_t) tptr;
538 } */ *uap = v;
539 int error;
540 struct nameidata nd;
541
542 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, (caddr_t)(u_long)SCARG(uap, path), p);
543 if ((error = namei(&nd)) != 0)
544 return (error);
545
546 error = change_utimes32(nd.ni_vp, SCARG(uap, tptr), p);
547
548 vrele(nd.ni_vp);
549 return (error);
550 }
551
552 int
553 netbsd32___stat13(p, v, retval)
554 struct proc *p;
555 void *v;
556 register_t *retval;
557 {
558 struct netbsd32___stat13_args /* {
559 syscallarg(const netbsd32_charp) path;
560 syscallarg(netbsd32_statp_t) ub;
561 } */ *uap = v;
562 struct netbsd32_stat sb32;
563 struct stat sb;
564 int error;
565 struct nameidata nd;
566 caddr_t sg;
567 const char *path;
568
569 path = (char *)(u_long)SCARG(uap, path);
570 sg = stackgap_init(p->p_emul);
571 CHECK_ALT_EXIST(p, &sg, path);
572
573 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, path, p);
574 if ((error = namei(&nd)) != 0)
575 return (error);
576 error = vn_stat(nd.ni_vp, &sb, p);
577 vput(nd.ni_vp);
578 if (error)
579 return (error);
580 netbsd32_from___stat13(&sb, &sb32);
581 error = copyout(&sb32, (caddr_t)(u_long)SCARG(uap, ub), sizeof(sb32));
582 return (error);
583 }
584
585 int
586 netbsd32___fstat13(p, v, retval)
587 struct proc *p;
588 void *v;
589 register_t *retval;
590 {
591 struct netbsd32___fstat13_args /* {
592 syscallarg(int) fd;
593 syscallarg(netbsd32_statp_t) sb;
594 } */ *uap = v;
595 int fd = SCARG(uap, fd);
596 struct filedesc *fdp = p->p_fd;
597 struct file *fp;
598 struct netbsd32_stat sb32;
599 struct stat ub;
600 int error = 0;
601
602 if ((fp = fd_getfile(fdp, fd)) == NULL)
603 return (EBADF);
604
605 FILE_USE(fp);
606 error = (*fp->f_ops->fo_stat)(fp, &ub, p);
607 FILE_UNUSE(fp, p);
608
609 if (error == 0) {
610 netbsd32_from___stat13(&ub, &sb32);
611 error = copyout(&sb32, (caddr_t)(u_long)SCARG(uap, sb), sizeof(sb32));
612 }
613 return (error);
614 }
615
616 int
617 netbsd32___lstat13(p, v, retval)
618 struct proc *p;
619 void *v;
620 register_t *retval;
621 {
622 struct netbsd32___lstat13_args /* {
623 syscallarg(const netbsd32_charp) path;
624 syscallarg(netbsd32_statp_t) ub;
625 } */ *uap = v;
626 struct netbsd32_stat sb32;
627 struct stat sb;
628 int error;
629 struct nameidata nd;
630 caddr_t sg;
631 const char *path;
632
633 path = (char *)(u_long)SCARG(uap, path);
634 sg = stackgap_init(p->p_emul);
635 CHECK_ALT_EXIST(p, &sg, path);
636
637 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, path, p);
638 if ((error = namei(&nd)) != 0)
639 return (error);
640 error = vn_stat(nd.ni_vp, &sb, p);
641 vput(nd.ni_vp);
642 if (error)
643 return (error);
644 netbsd32_from___stat13(&sb, &sb32);
645 error = copyout(&sb32, (caddr_t)(u_long)SCARG(uap, ub), sizeof(sb32));
646 return (error);
647 }
648
649 int
650 netbsd32_preadv(p, v, retval)
651 struct proc *p;
652 void *v;
653 register_t *retval;
654 {
655 struct netbsd32_preadv_args /* {
656 syscallarg(int) fd;
657 syscallarg(const netbsd32_iovecp_t) iovp;
658 syscallarg(int) iovcnt;
659 syscallarg(int) pad;
660 syscallarg(off_t) offset;
661 } */ *uap = v;
662 struct filedesc *fdp = p->p_fd;
663 struct file *fp;
664 struct vnode *vp;
665 off_t offset;
666 int error, fd = SCARG(uap, fd);
667
668 if ((fp = fd_getfile(fdp, fd)) == NULL)
669 return (EBADF);
670
671 if ((fp->f_flag & FREAD) == 0)
672 return (EBADF);
673
674 vp = (struct vnode *)fp->f_data;
675 if (fp->f_type != DTYPE_VNODE
676 || vp->v_type == VFIFO)
677 return (ESPIPE);
678
679 offset = SCARG(uap, offset);
680
681 /*
682 * XXX This works because no file systems actually
683 * XXX take any action on the seek operation.
684 */
685 if ((error = VOP_SEEK(vp, fp->f_offset, offset, fp->f_cred)) != 0)
686 return (error);
687
688 return (dofilereadv32(p, fd, fp, (struct netbsd32_iovec *)(u_long)SCARG(uap, iovp), SCARG(uap, iovcnt),
689 &offset, 0, retval));
690 }
691
692 int
693 netbsd32_pwritev(p, v, retval)
694 struct proc *p;
695 void *v;
696 register_t *retval;
697 {
698 struct netbsd32_pwritev_args /* {
699 syscallarg(int) fd;
700 syscallarg(const netbsd32_iovecp_t) iovp;
701 syscallarg(int) iovcnt;
702 syscallarg(int) pad;
703 syscallarg(off_t) offset;
704 } */ *uap = v;
705 struct filedesc *fdp = p->p_fd;
706 struct file *fp;
707 struct vnode *vp;
708 off_t offset;
709 int error, fd = SCARG(uap, fd);
710
711 if ((fp = fd_getfile(fdp, fd)) == NULL)
712 return (EBADF);
713
714 if ((fp->f_flag & FWRITE) == 0)
715 return (EBADF);
716
717 vp = (struct vnode *)fp->f_data;
718 if (fp->f_type != DTYPE_VNODE
719 || vp->v_type == VFIFO)
720 return (ESPIPE);
721
722 offset = SCARG(uap, offset);
723
724 /*
725 * XXX This works because no file systems actually
726 * XXX take any action on the seek operation.
727 */
728 if ((error = VOP_SEEK(vp, fp->f_offset, offset, fp->f_cred)) != 0)
729 return (error);
730
731 return (dofilewritev32(p, fd, fp, (struct netbsd32_iovec *)(u_long)SCARG(uap, iovp), SCARG(uap, iovcnt),
732 &offset, 0, retval));
733 }
734
735 /*
736 * Find pathname of process's current directory.
737 *
738 * Use vfs vnode-to-name reverse cache; if that fails, fall back
739 * to reading directory contents.
740 */
741 int
742 getcwd_common __P((struct vnode *, struct vnode *,
743 char **, char *, int, int, struct proc *));
744
745 int netbsd32___getcwd(p, v, retval)
746 struct proc *p;
747 void *v;
748 register_t *retval;
749 {
750 struct netbsd32___getcwd_args /* {
751 syscallarg(char *) bufp;
752 syscallarg(size_t) length;
753 } */ *uap = v;
754
755 int error;
756 char *path;
757 char *bp, *bend;
758 int len = (int)SCARG(uap, length);
759 int lenused;
760
761 if (len > MAXPATHLEN*4)
762 len = MAXPATHLEN*4;
763 else if (len < 2)
764 return ERANGE;
765
766 path = (char *)malloc(len, M_TEMP, M_WAITOK);
767 if (!path)
768 return ENOMEM;
769
770 bp = &path[len];
771 bend = bp;
772 *(--bp) = '\0';
773
774 /*
775 * 5th argument here is "max number of vnodes to traverse".
776 * Since each entry takes up at least 2 bytes in the output buffer,
777 * limit it to N/2 vnodes for an N byte buffer.
778 */
779 #define GETCWD_CHECK_ACCESS 0x0001
780 error = getcwd_common (p->p_cwdi->cwdi_cdir, NULL, &bp, path, len/2,
781 GETCWD_CHECK_ACCESS, p);
782
783 if (error)
784 goto out;
785 lenused = bend - bp;
786 *retval = lenused;
787 /* put the result into user buffer */
788 error = copyout(bp, (caddr_t)(u_long)SCARG(uap, bufp), lenused);
789
790 out:
791 free(path, M_TEMP);
792 return error;
793 }
794