netbsd32_fs.c revision 1.8 1 /* $NetBSD: netbsd32_fs.c,v 1.8 2002/03/16 20:43:54 christos Exp $ */
2
3 /*
4 * Copyright (c) 1998, 2001 Matthew R. Green
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: netbsd32_fs.c,v 1.8 2002/03/16 20:43:54 christos Exp $");
33
34 #if defined(_KERNEL_OPT)
35 #include "opt_ktrace.h"
36 #endif
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/malloc.h>
41 #include <sys/mount.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/stat.h>
45 #include <sys/time.h>
46 #include <sys/ktrace.h>
47 #include <sys/resourcevar.h>
48 #include <sys/vnode.h>
49 #include <sys/file.h>
50 #include <sys/filedesc.h>
51 #include <sys/namei.h>
52 #include <sys/syscallargs.h>
53 #include <sys/proc.h>
54
55 #include <compat/netbsd32/netbsd32.h>
56 #include <compat/netbsd32/netbsd32_syscallargs.h>
57 #include <compat/netbsd32/netbsd32_conv.h>
58
59
60 static int dofilereadv32 __P((struct proc *, int, struct file *, struct netbsd32_iovec *,
61 int, off_t *, int, register_t *));
62 static int dofilewritev32 __P((struct proc *, int, struct file *, struct netbsd32_iovec *,
63 int, off_t *, int, register_t *));
64 static int change_utimes32 __P((struct vnode *, netbsd32_timevalp_t, struct proc *));
65
66 int
67 netbsd32_getfsstat(p, v, retval)
68 struct proc *p;
69 void *v;
70 register_t *retval;
71 {
72 struct netbsd32_getfsstat_args /* {
73 syscallarg(netbsd32_statfsp_t) buf;
74 syscallarg(netbsd32_long) bufsize;
75 syscallarg(int) flags;
76 } */ *uap = v;
77 struct mount *mp, *nmp;
78 struct statfs *sp;
79 struct netbsd32_statfs sb32;
80 caddr_t sfsp;
81 long count, maxcount, error;
82
83 maxcount = SCARG(uap, bufsize) / sizeof(struct netbsd32_statfs);
84 sfsp = (caddr_t)(u_long)SCARG(uap, buf);
85 simple_lock(&mountlist_slock);
86 count = 0;
87 for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) {
88 if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock)) {
89 nmp = mp->mnt_list.cqe_next;
90 continue;
91 }
92 if (sfsp && count < maxcount) {
93 sp = &mp->mnt_stat;
94 /*
95 * If MNT_NOWAIT or MNT_LAZY is specified, do not
96 * refresh the fsstat cache. MNT_WAIT or MNT_LAXY
97 * overrides MNT_NOWAIT.
98 */
99 if (SCARG(uap, flags) != MNT_NOWAIT &&
100 SCARG(uap, flags) != MNT_LAZY &&
101 (SCARG(uap, flags) == MNT_WAIT ||
102 SCARG(uap, flags) == 0) &&
103 (error = VFS_STATFS(mp, sp, p)) != 0) {
104 simple_lock(&mountlist_slock);
105 nmp = mp->mnt_list.cqe_next;
106 vfs_unbusy(mp);
107 continue;
108 }
109 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
110 sp->f_oflags = sp->f_flags & 0xffff;
111 netbsd32_from_statfs(sp, &sb32);
112 error = copyout(&sb32, sfsp, sizeof(sb32));
113 if (error) {
114 vfs_unbusy(mp);
115 return (error);
116 }
117 sfsp += sizeof(sb32);
118 }
119 count++;
120 simple_lock(&mountlist_slock);
121 nmp = mp->mnt_list.cqe_next;
122 vfs_unbusy(mp);
123 }
124 simple_unlock(&mountlist_slock);
125 if (sfsp && count > maxcount)
126 *retval = maxcount;
127 else
128 *retval = count;
129 return (0);
130 }
131
132 int
133 netbsd32_readv(p, v, retval)
134 struct proc *p;
135 void *v;
136 register_t *retval;
137 {
138 struct netbsd32_readv_args /* {
139 syscallarg(int) fd;
140 syscallarg(const netbsd32_iovecp_t) iovp;
141 syscallarg(int) iovcnt;
142 } */ *uap = v;
143 int fd = SCARG(uap, fd);
144 struct file *fp;
145 struct filedesc *fdp = p->p_fd;
146
147 if ((fp = fd_getfile(fdp, fd)) == NULL)
148 return (EBADF);
149
150 if ((fp->f_flag & FREAD) == 0)
151 return (EBADF);
152
153 return (dofilereadv32(p, fd, fp, (struct netbsd32_iovec *)(u_long)SCARG(uap, iovp),
154 SCARG(uap, iovcnt), &fp->f_offset, FOF_UPDATE_OFFSET, retval));
155 }
156
157 /* Damn thing copies in the iovec! */
158 int
159 dofilereadv32(p, fd, fp, iovp, iovcnt, offset, flags, retval)
160 struct proc *p;
161 int fd;
162 struct file *fp;
163 struct netbsd32_iovec *iovp;
164 int iovcnt;
165 off_t *offset;
166 int flags;
167 register_t *retval;
168 {
169 struct uio auio;
170 struct iovec *iov;
171 struct iovec *needfree;
172 struct iovec aiov[UIO_SMALLIOV];
173 long i, cnt, error = 0;
174 u_int iovlen;
175 #ifdef KTRACE
176 struct iovec *ktriov = NULL;
177 #endif
178
179 /* note: can't use iovlen until iovcnt is validated */
180 iovlen = iovcnt * sizeof(struct iovec);
181 if ((u_int)iovcnt > UIO_SMALLIOV) {
182 if ((u_int)iovcnt > IOV_MAX)
183 return (EINVAL);
184 MALLOC(iov, struct iovec *, iovlen, M_IOV, M_WAITOK);
185 needfree = iov;
186 } else if ((u_int)iovcnt > 0) {
187 iov = aiov;
188 needfree = NULL;
189 } else
190 return (EINVAL);
191
192 auio.uio_iov = iov;
193 auio.uio_iovcnt = iovcnt;
194 auio.uio_rw = UIO_READ;
195 auio.uio_segflg = UIO_USERSPACE;
196 auio.uio_procp = p;
197 error = netbsd32_to_iovecin(iovp, iov, iovcnt);
198 if (error)
199 goto done;
200 auio.uio_resid = 0;
201 for (i = 0; i < iovcnt; i++) {
202 auio.uio_resid += iov->iov_len;
203 /*
204 * Reads return ssize_t because -1 is returned on error.
205 * Therefore we must restrict the length to SSIZE_MAX to
206 * avoid garbage return values.
207 */
208 if (iov->iov_len > SSIZE_MAX || auio.uio_resid > SSIZE_MAX) {
209 error = EINVAL;
210 goto done;
211 }
212 iov++;
213 }
214 #ifdef KTRACE
215 /*
216 * if tracing, save a copy of iovec
217 */
218 if (KTRPOINT(p, KTR_GENIO)) {
219 MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK);
220 memcpy((caddr_t)ktriov, (caddr_t)auio.uio_iov, iovlen);
221 }
222 #endif
223 cnt = auio.uio_resid;
224 error = (*fp->f_ops->fo_read)(fp, offset, &auio, fp->f_cred, flags);
225 if (error)
226 if (auio.uio_resid != cnt && (error == ERESTART ||
227 error == EINTR || error == EWOULDBLOCK))
228 error = 0;
229 cnt -= auio.uio_resid;
230 #ifdef KTRACE
231 if (KTRPOINT(p, KTR_GENIO))
232 if (error == 0) {
233 ktrgenio(p, fd, UIO_READ, ktriov, cnt,
234 error);
235 FREE(ktriov, M_TEMP);
236 }
237 #endif
238 *retval = cnt;
239 done:
240 if (needfree)
241 FREE(needfree, M_IOV);
242 return (error);
243 }
244
245 int
246 netbsd32_writev(p, v, retval)
247 struct proc *p;
248 void *v;
249 register_t *retval;
250 {
251 struct netbsd32_writev_args /* {
252 syscallarg(int) fd;
253 syscallarg(const netbsd32_iovecp_t) iovp;
254 syscallarg(int) iovcnt;
255 } */ *uap = v;
256 int fd = SCARG(uap, fd);
257 struct file *fp;
258 struct filedesc *fdp = p->p_fd;
259
260 if ((fp = fd_getfile(fdp, fd)) == NULL)
261 return (EBADF);
262
263 if ((fp->f_flag & FWRITE) == 0)
264 return (EBADF);
265
266 return (dofilewritev32(p, fd, fp, (struct netbsd32_iovec *)(u_long)SCARG(uap, iovp),
267 SCARG(uap, iovcnt), &fp->f_offset, FOF_UPDATE_OFFSET, retval));
268 }
269
270 int
271 dofilewritev32(p, fd, fp, iovp, iovcnt, offset, flags, retval)
272 struct proc *p;
273 int fd;
274 struct file *fp;
275 struct netbsd32_iovec *iovp;
276 int iovcnt;
277 off_t *offset;
278 int flags;
279 register_t *retval;
280 {
281 struct uio auio;
282 struct iovec *iov;
283 struct iovec *needfree;
284 struct iovec aiov[UIO_SMALLIOV];
285 long i, cnt, error = 0;
286 u_int iovlen;
287 #ifdef KTRACE
288 struct iovec *ktriov = NULL;
289 #endif
290
291 /* note: can't use iovlen until iovcnt is validated */
292 iovlen = iovcnt * sizeof(struct iovec);
293 if ((u_int)iovcnt > UIO_SMALLIOV) {
294 if ((u_int)iovcnt > IOV_MAX)
295 return (EINVAL);
296 MALLOC(iov, struct iovec *, iovlen, M_IOV, M_WAITOK);
297 needfree = iov;
298 } else if ((u_int)iovcnt > 0) {
299 iov = aiov;
300 needfree = NULL;
301 } else
302 return (EINVAL);
303
304 auio.uio_iov = iov;
305 auio.uio_iovcnt = iovcnt;
306 auio.uio_rw = UIO_WRITE;
307 auio.uio_segflg = UIO_USERSPACE;
308 auio.uio_procp = p;
309 error = netbsd32_to_iovecin(iovp, iov, iovcnt);
310 if (error)
311 goto done;
312 auio.uio_resid = 0;
313 for (i = 0; i < iovcnt; i++) {
314 auio.uio_resid += iov->iov_len;
315 /*
316 * Writes return ssize_t because -1 is returned on error.
317 * Therefore we must restrict the length to SSIZE_MAX to
318 * avoid garbage return values.
319 */
320 if (iov->iov_len > SSIZE_MAX || auio.uio_resid > SSIZE_MAX) {
321 error = EINVAL;
322 goto done;
323 }
324 iov++;
325 }
326 #ifdef KTRACE
327 /*
328 * if tracing, save a copy of iovec
329 */
330 if (KTRPOINT(p, KTR_GENIO)) {
331 MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK);
332 memcpy((caddr_t)ktriov, (caddr_t)auio.uio_iov, iovlen);
333 }
334 #endif
335 cnt = auio.uio_resid;
336 error = (*fp->f_ops->fo_write)(fp, offset, &auio, fp->f_cred, flags);
337 if (error) {
338 if (auio.uio_resid != cnt && (error == ERESTART ||
339 error == EINTR || error == EWOULDBLOCK))
340 error = 0;
341 if (error == EPIPE)
342 psignal(p, SIGPIPE);
343 }
344 cnt -= auio.uio_resid;
345 #ifdef KTRACE
346 if (KTRPOINT(p, KTR_GENIO))
347 if (error == 0) {
348 ktrgenio(p, fd, UIO_WRITE, ktriov, cnt,
349 error);
350 FREE(ktriov, M_TEMP);
351 }
352 #endif
353 *retval = cnt;
354 done:
355 if (needfree)
356 FREE(needfree, M_IOV);
357 return (error);
358 }
359
360 int
361 netbsd32_utimes(p, v, retval)
362 struct proc *p;
363 void *v;
364 register_t *retval;
365 {
366 struct netbsd32_utimes_args /* {
367 syscallarg(const netbsd32_charp) path;
368 syscallarg(const netbsd32_timevalp_t) tptr;
369 } */ *uap = v;
370 int error;
371 struct nameidata nd;
372
373 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, (char *)(u_long)SCARG(uap, path), p);
374 if ((error = namei(&nd)) != 0)
375 return (error);
376
377 error = change_utimes32(nd.ni_vp, SCARG(uap, tptr), p);
378
379 vrele(nd.ni_vp);
380 return (error);
381 }
382
383 /*
384 * Common routine to set access and modification times given a vnode.
385 */
386 static int
387 change_utimes32(vp, tptr, p)
388 struct vnode *vp;
389 netbsd32_timevalp_t tptr;
390 struct proc *p;
391 {
392 struct netbsd32_timeval tv32[2];
393 struct timeval tv[2];
394 struct vattr vattr;
395 int error;
396
397 VATTR_NULL(&vattr);
398 if (tptr == NULL) {
399 microtime(&tv[0]);
400 tv[1] = tv[0];
401 vattr.va_vaflags |= VA_UTIMES_NULL;
402 } else {
403 error = copyin((caddr_t)(u_long)tptr, tv32, sizeof(tv32));
404 if (error)
405 return (error);
406 netbsd32_to_timeval(&tv32[0], &tv[0]);
407 netbsd32_to_timeval(&tv32[1], &tv[1]);
408 }
409 VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
410 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
411 vattr.va_atime.tv_sec = tv[0].tv_sec;
412 vattr.va_atime.tv_nsec = tv[0].tv_usec * 1000;
413 vattr.va_mtime.tv_sec = tv[1].tv_sec;
414 vattr.va_mtime.tv_nsec = tv[1].tv_usec * 1000;
415 error = VOP_SETATTR(vp, &vattr, p->p_ucred, p);
416 VOP_UNLOCK(vp, 0);
417 return (error);
418 }
419
420 int
421 netbsd32_statfs(p, v, retval)
422 struct proc *p;
423 void *v;
424 register_t *retval;
425 {
426 struct netbsd32_statfs_args /* {
427 syscallarg(const netbsd32_charp) path;
428 syscallarg(netbsd32_statfsp_t) buf;
429 } */ *uap = v;
430 struct mount *mp;
431 struct statfs *sp;
432 struct netbsd32_statfs s32;
433 int error;
434 struct nameidata nd;
435
436 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, (char *)(u_long)SCARG(uap, path), p);
437 if ((error = namei(&nd)) != 0)
438 return (error);
439 mp = nd.ni_vp->v_mount;
440 sp = &mp->mnt_stat;
441 vrele(nd.ni_vp);
442 if ((error = VFS_STATFS(mp, sp, p)) != 0)
443 return (error);
444 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
445 netbsd32_from_statfs(sp, &s32);
446 return (copyout(&s32, (caddr_t)(u_long)SCARG(uap, buf), sizeof(s32)));
447 }
448
449 int
450 netbsd32_fstatfs(p, v, retval)
451 struct proc *p;
452 void *v;
453 register_t *retval;
454 {
455 struct netbsd32_fstatfs_args /* {
456 syscallarg(int) fd;
457 syscallarg(netbsd32_statfsp_t) buf;
458 } */ *uap = v;
459 struct file *fp;
460 struct mount *mp;
461 struct statfs *sp;
462 struct netbsd32_statfs s32;
463 int error;
464
465 /* getvnode() will use the descriptor for us */
466 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
467 return (error);
468 mp = ((struct vnode *)fp->f_data)->v_mount;
469 sp = &mp->mnt_stat;
470 if ((error = VFS_STATFS(mp, sp, p)) != 0)
471 goto out;
472 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
473 netbsd32_from_statfs(sp, &s32);
474 error = copyout(&s32, (caddr_t)(u_long)SCARG(uap, buf), sizeof(s32));
475 out:
476 FILE_UNUSE(fp, p);
477 return (error);
478 }
479
480 int
481 netbsd32_futimes(p, v, retval)
482 struct proc *p;
483 void *v;
484 register_t *retval;
485 {
486 struct netbsd32_futimes_args /* {
487 syscallarg(int) fd;
488 syscallarg(const netbsd32_timevalp_t) tptr;
489 } */ *uap = v;
490 int error;
491 struct file *fp;
492
493 /* getvnode() will use the descriptor for us */
494 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
495 return (error);
496
497 error = change_utimes32((struct vnode *)fp->f_data,
498 SCARG(uap, tptr), p);
499 FILE_UNUSE(fp, p);
500 return (error);
501 }
502
503 int
504 netbsd32_getdents(p, v, retval)
505 struct proc *p;
506 void *v;
507 register_t *retval;
508 {
509 struct netbsd32_getdents_args /* {
510 syscallarg(int) fd;
511 syscallarg(netbsd32_charp) buf;
512 syscallarg(netbsd32_size_t) count;
513 } */ *uap = v;
514 struct file *fp;
515 int error, done;
516
517 /* getvnode() will use the descriptor for us */
518 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
519 return (error);
520 if ((fp->f_flag & FREAD) == 0) {
521 error = EBADF;
522 goto out;
523 }
524 error = vn_readdir(fp, (caddr_t)(u_long)SCARG(uap, buf), UIO_USERSPACE,
525 SCARG(uap, count), &done, p, 0, 0);
526 *retval = done;
527 out:
528 FILE_UNUSE(fp, p);
529 return (error);
530 }
531
532 int
533 netbsd32_lutimes(p, v, retval)
534 struct proc *p;
535 void *v;
536 register_t *retval;
537 {
538 struct netbsd32_lutimes_args /* {
539 syscallarg(const netbsd32_charp) path;
540 syscallarg(const netbsd32_timevalp_t) tptr;
541 } */ *uap = v;
542 int error;
543 struct nameidata nd;
544
545 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, (caddr_t)(u_long)SCARG(uap, path), p);
546 if ((error = namei(&nd)) != 0)
547 return (error);
548
549 error = change_utimes32(nd.ni_vp, SCARG(uap, tptr), p);
550
551 vrele(nd.ni_vp);
552 return (error);
553 }
554
555 int
556 netbsd32___stat13(p, v, retval)
557 struct proc *p;
558 void *v;
559 register_t *retval;
560 {
561 struct netbsd32___stat13_args /* {
562 syscallarg(const netbsd32_charp) path;
563 syscallarg(netbsd32_statp_t) ub;
564 } */ *uap = v;
565 struct netbsd32_stat sb32;
566 struct stat sb;
567 int error;
568 struct nameidata nd;
569 caddr_t sg;
570 const char *path;
571
572 path = (char *)(u_long)SCARG(uap, path);
573 sg = stackgap_init(p, 0);
574 CHECK_ALT_EXIST(p, &sg, path);
575
576 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, path, p);
577 if ((error = namei(&nd)) != 0)
578 return (error);
579 error = vn_stat(nd.ni_vp, &sb, p);
580 vput(nd.ni_vp);
581 if (error)
582 return (error);
583 netbsd32_from___stat13(&sb, &sb32);
584 error = copyout(&sb32, (caddr_t)(u_long)SCARG(uap, ub), sizeof(sb32));
585 return (error);
586 }
587
588 int
589 netbsd32___fstat13(p, v, retval)
590 struct proc *p;
591 void *v;
592 register_t *retval;
593 {
594 struct netbsd32___fstat13_args /* {
595 syscallarg(int) fd;
596 syscallarg(netbsd32_statp_t) sb;
597 } */ *uap = v;
598 int fd = SCARG(uap, fd);
599 struct filedesc *fdp = p->p_fd;
600 struct file *fp;
601 struct netbsd32_stat sb32;
602 struct stat ub;
603 int error = 0;
604
605 if ((fp = fd_getfile(fdp, fd)) == NULL)
606 return (EBADF);
607
608 FILE_USE(fp);
609 error = (*fp->f_ops->fo_stat)(fp, &ub, p);
610 FILE_UNUSE(fp, p);
611
612 if (error == 0) {
613 netbsd32_from___stat13(&ub, &sb32);
614 error = copyout(&sb32, (caddr_t)(u_long)SCARG(uap, sb), sizeof(sb32));
615 }
616 return (error);
617 }
618
619 int
620 netbsd32___lstat13(p, v, retval)
621 struct proc *p;
622 void *v;
623 register_t *retval;
624 {
625 struct netbsd32___lstat13_args /* {
626 syscallarg(const netbsd32_charp) path;
627 syscallarg(netbsd32_statp_t) ub;
628 } */ *uap = v;
629 struct netbsd32_stat sb32;
630 struct stat sb;
631 int error;
632 struct nameidata nd;
633 caddr_t sg;
634 const char *path;
635
636 path = (char *)(u_long)SCARG(uap, path);
637 sg = stackgap_init(p, 0);
638 CHECK_ALT_EXIST(p, &sg, path);
639
640 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, path, p);
641 if ((error = namei(&nd)) != 0)
642 return (error);
643 error = vn_stat(nd.ni_vp, &sb, p);
644 vput(nd.ni_vp);
645 if (error)
646 return (error);
647 netbsd32_from___stat13(&sb, &sb32);
648 error = copyout(&sb32, (caddr_t)(u_long)SCARG(uap, ub), sizeof(sb32));
649 return (error);
650 }
651
652 int
653 netbsd32_preadv(p, v, retval)
654 struct proc *p;
655 void *v;
656 register_t *retval;
657 {
658 struct netbsd32_preadv_args /* {
659 syscallarg(int) fd;
660 syscallarg(const netbsd32_iovecp_t) iovp;
661 syscallarg(int) iovcnt;
662 syscallarg(int) pad;
663 syscallarg(off_t) offset;
664 } */ *uap = v;
665 struct filedesc *fdp = p->p_fd;
666 struct file *fp;
667 struct vnode *vp;
668 off_t offset;
669 int error, fd = SCARG(uap, fd);
670
671 if ((fp = fd_getfile(fdp, fd)) == NULL)
672 return (EBADF);
673
674 if ((fp->f_flag & FREAD) == 0)
675 return (EBADF);
676
677 vp = (struct vnode *)fp->f_data;
678 if (fp->f_type != DTYPE_VNODE
679 || vp->v_type == VFIFO)
680 return (ESPIPE);
681
682 offset = SCARG(uap, offset);
683
684 /*
685 * XXX This works because no file systems actually
686 * XXX take any action on the seek operation.
687 */
688 if ((error = VOP_SEEK(vp, fp->f_offset, offset, fp->f_cred)) != 0)
689 return (error);
690
691 return (dofilereadv32(p, fd, fp, (struct netbsd32_iovec *)(u_long)SCARG(uap, iovp), SCARG(uap, iovcnt),
692 &offset, 0, retval));
693 }
694
695 int
696 netbsd32_pwritev(p, v, retval)
697 struct proc *p;
698 void *v;
699 register_t *retval;
700 {
701 struct netbsd32_pwritev_args /* {
702 syscallarg(int) fd;
703 syscallarg(const netbsd32_iovecp_t) iovp;
704 syscallarg(int) iovcnt;
705 syscallarg(int) pad;
706 syscallarg(off_t) offset;
707 } */ *uap = v;
708 struct filedesc *fdp = p->p_fd;
709 struct file *fp;
710 struct vnode *vp;
711 off_t offset;
712 int error, fd = SCARG(uap, fd);
713
714 if ((fp = fd_getfile(fdp, fd)) == NULL)
715 return (EBADF);
716
717 if ((fp->f_flag & FWRITE) == 0)
718 return (EBADF);
719
720 vp = (struct vnode *)fp->f_data;
721 if (fp->f_type != DTYPE_VNODE
722 || vp->v_type == VFIFO)
723 return (ESPIPE);
724
725 offset = SCARG(uap, offset);
726
727 /*
728 * XXX This works because no file systems actually
729 * XXX take any action on the seek operation.
730 */
731 if ((error = VOP_SEEK(vp, fp->f_offset, offset, fp->f_cred)) != 0)
732 return (error);
733
734 return (dofilewritev32(p, fd, fp, (struct netbsd32_iovec *)(u_long)SCARG(uap, iovp), SCARG(uap, iovcnt),
735 &offset, 0, retval));
736 }
737
738 /*
739 * Find pathname of process's current directory.
740 *
741 * Use vfs vnode-to-name reverse cache; if that fails, fall back
742 * to reading directory contents.
743 */
744 int
745 getcwd_common __P((struct vnode *, struct vnode *,
746 char **, char *, int, int, struct proc *));
747
748 int netbsd32___getcwd(p, v, retval)
749 struct proc *p;
750 void *v;
751 register_t *retval;
752 {
753 struct netbsd32___getcwd_args /* {
754 syscallarg(char *) bufp;
755 syscallarg(size_t) length;
756 } */ *uap = v;
757
758 int error;
759 char *path;
760 char *bp, *bend;
761 int len = (int)SCARG(uap, length);
762 int lenused;
763
764 if (len > MAXPATHLEN*4)
765 len = MAXPATHLEN*4;
766 else if (len < 2)
767 return ERANGE;
768
769 path = (char *)malloc(len, M_TEMP, M_WAITOK);
770 if (!path)
771 return ENOMEM;
772
773 bp = &path[len];
774 bend = bp;
775 *(--bp) = '\0';
776
777 /*
778 * 5th argument here is "max number of vnodes to traverse".
779 * Since each entry takes up at least 2 bytes in the output buffer,
780 * limit it to N/2 vnodes for an N byte buffer.
781 */
782 #define GETCWD_CHECK_ACCESS 0x0001
783 error = getcwd_common (p->p_cwdi->cwdi_cdir, NULL, &bp, path, len/2,
784 GETCWD_CHECK_ACCESS, p);
785
786 if (error)
787 goto out;
788 lenused = bend - bp;
789 *retval = lenused;
790 /* put the result into user buffer */
791 error = copyout(bp, (caddr_t)(u_long)SCARG(uap, bufp), lenused);
792
793 out:
794 free(path, M_TEMP);
795 return error;
796 }
797