netbsd32_fs.c revision 1.10 1 /* $NetBSD: netbsd32_fs.c,v 1.10 2002/10/23 13:16:43 scw Exp $ */
2
3 /*
4 * Copyright (c) 1998, 2001 Matthew R. Green
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: netbsd32_fs.c,v 1.10 2002/10/23 13:16:43 scw Exp $");
33
34 #if defined(_KERNEL_OPT)
35 #include "opt_ktrace.h"
36 #endif
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/malloc.h>
41 #include <sys/mount.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/stat.h>
45 #include <sys/time.h>
46 #include <sys/ktrace.h>
47 #include <sys/resourcevar.h>
48 #include <sys/vnode.h>
49 #include <sys/file.h>
50 #include <sys/filedesc.h>
51 #include <sys/namei.h>
52 #include <sys/syscallargs.h>
53 #include <sys/proc.h>
54
55 #include <compat/netbsd32/netbsd32.h>
56 #include <compat/netbsd32/netbsd32_syscallargs.h>
57 #include <compat/netbsd32/netbsd32_conv.h>
58
59
60 static int dofilereadv32 __P((struct proc *, int, struct file *, struct netbsd32_iovec *,
61 int, off_t *, int, register_t *));
62 static int dofilewritev32 __P((struct proc *, int, struct file *, struct netbsd32_iovec *,
63 int, off_t *, int, register_t *));
64 static int change_utimes32 __P((struct vnode *, netbsd32_timevalp_t, struct proc *));
65
66 int
67 netbsd32_getfsstat(p, v, retval)
68 struct proc *p;
69 void *v;
70 register_t *retval;
71 {
72 struct netbsd32_getfsstat_args /* {
73 syscallarg(netbsd32_statfsp_t) buf;
74 syscallarg(netbsd32_long) bufsize;
75 syscallarg(int) flags;
76 } */ *uap = v;
77 struct mount *mp, *nmp;
78 struct statfs *sp;
79 struct netbsd32_statfs sb32;
80 caddr_t sfsp;
81 long count, maxcount, error;
82
83 maxcount = SCARG(uap, bufsize) / sizeof(struct netbsd32_statfs);
84 sfsp = (caddr_t)NETBSD32PTR64(SCARG(uap, buf));
85 simple_lock(&mountlist_slock);
86 count = 0;
87 for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) {
88 if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock)) {
89 nmp = mp->mnt_list.cqe_next;
90 continue;
91 }
92 if (sfsp && count < maxcount) {
93 sp = &mp->mnt_stat;
94 /*
95 * If MNT_NOWAIT or MNT_LAZY is specified, do not
96 * refresh the fsstat cache. MNT_WAIT or MNT_LAXY
97 * overrides MNT_NOWAIT.
98 */
99 if (SCARG(uap, flags) != MNT_NOWAIT &&
100 SCARG(uap, flags) != MNT_LAZY &&
101 (SCARG(uap, flags) == MNT_WAIT ||
102 SCARG(uap, flags) == 0) &&
103 (error = VFS_STATFS(mp, sp, p)) != 0) {
104 simple_lock(&mountlist_slock);
105 nmp = mp->mnt_list.cqe_next;
106 vfs_unbusy(mp);
107 continue;
108 }
109 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
110 sp->f_oflags = sp->f_flags & 0xffff;
111 netbsd32_from_statfs(sp, &sb32);
112 error = copyout(&sb32, sfsp, sizeof(sb32));
113 if (error) {
114 vfs_unbusy(mp);
115 return (error);
116 }
117 sfsp += sizeof(sb32);
118 }
119 count++;
120 simple_lock(&mountlist_slock);
121 nmp = mp->mnt_list.cqe_next;
122 vfs_unbusy(mp);
123 }
124 simple_unlock(&mountlist_slock);
125 if (sfsp && count > maxcount)
126 *retval = maxcount;
127 else
128 *retval = count;
129 return (0);
130 }
131
132 int
133 netbsd32_readv(p, v, retval)
134 struct proc *p;
135 void *v;
136 register_t *retval;
137 {
138 struct netbsd32_readv_args /* {
139 syscallarg(int) fd;
140 syscallarg(const netbsd32_iovecp_t) iovp;
141 syscallarg(int) iovcnt;
142 } */ *uap = v;
143 int fd = SCARG(uap, fd);
144 struct file *fp;
145 struct filedesc *fdp = p->p_fd;
146
147 if ((fp = fd_getfile(fdp, fd)) == NULL)
148 return (EBADF);
149
150 if ((fp->f_flag & FREAD) == 0)
151 return (EBADF);
152
153 FILE_USE(fp);
154
155 return (dofilereadv32(p, fd, fp,
156 (struct netbsd32_iovec *)NETBSD32PTR64(SCARG(uap, iovp)),
157 SCARG(uap, iovcnt), &fp->f_offset, FOF_UPDATE_OFFSET, retval));
158 }
159
160 /* Damn thing copies in the iovec! */
161 int
162 dofilereadv32(p, fd, fp, iovp, iovcnt, offset, flags, retval)
163 struct proc *p;
164 int fd;
165 struct file *fp;
166 struct netbsd32_iovec *iovp;
167 int iovcnt;
168 off_t *offset;
169 int flags;
170 register_t *retval;
171 {
172 struct uio auio;
173 struct iovec *iov;
174 struct iovec *needfree;
175 struct iovec aiov[UIO_SMALLIOV];
176 long i, cnt, error = 0;
177 u_int iovlen;
178 #ifdef KTRACE
179 struct iovec *ktriov = NULL;
180 #endif
181
182 /* note: can't use iovlen until iovcnt is validated */
183 iovlen = iovcnt * sizeof(struct iovec);
184 if ((u_int)iovcnt > UIO_SMALLIOV) {
185 if ((u_int)iovcnt > IOV_MAX) {
186 error = EINVAL;
187 goto out;
188 }
189 iov = malloc(iovlen, M_IOV, M_WAITOK);
190 needfree = iov;
191 } else if ((u_int)iovcnt > 0) {
192 iov = aiov;
193 needfree = NULL;
194 } else {
195 error = EINVAL;
196 goto out;
197 }
198
199 auio.uio_iov = iov;
200 auio.uio_iovcnt = iovcnt;
201 auio.uio_rw = UIO_READ;
202 auio.uio_segflg = UIO_USERSPACE;
203 auio.uio_procp = p;
204 error = netbsd32_to_iovecin(iovp, iov, iovcnt);
205 if (error)
206 goto done;
207 auio.uio_resid = 0;
208 for (i = 0; i < iovcnt; i++) {
209 auio.uio_resid += iov->iov_len;
210 /*
211 * Reads return ssize_t because -1 is returned on error.
212 * Therefore we must restrict the length to SSIZE_MAX to
213 * avoid garbage return values.
214 */
215 if (iov->iov_len > SSIZE_MAX || auio.uio_resid > SSIZE_MAX) {
216 error = EINVAL;
217 goto done;
218 }
219 iov++;
220 }
221 #ifdef KTRACE
222 /*
223 * if tracing, save a copy of iovec
224 */
225 if (KTRPOINT(p, KTR_GENIO)) {
226 ktriov = malloc(iovlen, M_TEMP, M_WAITOK);
227 memcpy((caddr_t)ktriov, (caddr_t)auio.uio_iov, iovlen);
228 }
229 #endif
230 cnt = auio.uio_resid;
231 error = (*fp->f_ops->fo_read)(fp, offset, &auio, fp->f_cred, flags);
232 if (error)
233 if (auio.uio_resid != cnt && (error == ERESTART ||
234 error == EINTR || error == EWOULDBLOCK))
235 error = 0;
236 cnt -= auio.uio_resid;
237 #ifdef KTRACE
238 if (KTRPOINT(p, KTR_GENIO))
239 if (error == 0) {
240 ktrgenio(p, fd, UIO_READ, ktriov, cnt,
241 error);
242 free(ktriov, M_TEMP);
243 }
244 #endif
245 *retval = cnt;
246 done:
247 if (needfree)
248 free(needfree, M_IOV);
249 out:
250 FILE_UNUSE(fp, p);
251 return (error);
252 }
253
254 int
255 netbsd32_writev(p, v, retval)
256 struct proc *p;
257 void *v;
258 register_t *retval;
259 {
260 struct netbsd32_writev_args /* {
261 syscallarg(int) fd;
262 syscallarg(const netbsd32_iovecp_t) iovp;
263 syscallarg(int) iovcnt;
264 } */ *uap = v;
265 int fd = SCARG(uap, fd);
266 struct file *fp;
267 struct filedesc *fdp = p->p_fd;
268
269 if ((fp = fd_getfile(fdp, fd)) == NULL)
270 return (EBADF);
271
272 if ((fp->f_flag & FWRITE) == 0)
273 return (EBADF);
274
275 FILE_USE(fp);
276
277 return (dofilewritev32(p, fd, fp,
278 (struct netbsd32_iovec *)NETBSD32PTR64(SCARG(uap, iovp)),
279 SCARG(uap, iovcnt), &fp->f_offset, FOF_UPDATE_OFFSET, retval));
280 }
281
282 int
283 dofilewritev32(p, fd, fp, iovp, iovcnt, offset, flags, retval)
284 struct proc *p;
285 int fd;
286 struct file *fp;
287 struct netbsd32_iovec *iovp;
288 int iovcnt;
289 off_t *offset;
290 int flags;
291 register_t *retval;
292 {
293 struct uio auio;
294 struct iovec *iov;
295 struct iovec *needfree;
296 struct iovec aiov[UIO_SMALLIOV];
297 long i, cnt, error = 0;
298 u_int iovlen;
299 #ifdef KTRACE
300 struct iovec *ktriov = NULL;
301 #endif
302
303 /* note: can't use iovlen until iovcnt is validated */
304 iovlen = iovcnt * sizeof(struct iovec);
305 if ((u_int)iovcnt > UIO_SMALLIOV) {
306 if ((u_int)iovcnt > IOV_MAX) {
307 error = EINVAL;
308 goto out;
309 }
310 iov = malloc(iovlen, M_IOV, M_WAITOK);
311 needfree = iov;
312 } else if ((u_int)iovcnt > 0) {
313 iov = aiov;
314 needfree = NULL;
315 } else {
316 error = EINVAL;
317 goto out;
318 }
319
320 auio.uio_iov = iov;
321 auio.uio_iovcnt = iovcnt;
322 auio.uio_rw = UIO_WRITE;
323 auio.uio_segflg = UIO_USERSPACE;
324 auio.uio_procp = p;
325 error = netbsd32_to_iovecin(iovp, iov, iovcnt);
326 if (error)
327 goto done;
328 auio.uio_resid = 0;
329 for (i = 0; i < iovcnt; i++) {
330 auio.uio_resid += iov->iov_len;
331 /*
332 * Writes return ssize_t because -1 is returned on error.
333 * Therefore we must restrict the length to SSIZE_MAX to
334 * avoid garbage return values.
335 */
336 if (iov->iov_len > SSIZE_MAX || auio.uio_resid > SSIZE_MAX) {
337 error = EINVAL;
338 goto done;
339 }
340 iov++;
341 }
342 #ifdef KTRACE
343 /*
344 * if tracing, save a copy of iovec
345 */
346 if (KTRPOINT(p, KTR_GENIO)) {
347 ktriov = malloc(iovlen, M_TEMP, M_WAITOK);
348 memcpy((caddr_t)ktriov, (caddr_t)auio.uio_iov, iovlen);
349 }
350 #endif
351 cnt = auio.uio_resid;
352 error = (*fp->f_ops->fo_write)(fp, offset, &auio, fp->f_cred, flags);
353 if (error) {
354 if (auio.uio_resid != cnt && (error == ERESTART ||
355 error == EINTR || error == EWOULDBLOCK))
356 error = 0;
357 if (error == EPIPE)
358 psignal(p, SIGPIPE);
359 }
360 cnt -= auio.uio_resid;
361 #ifdef KTRACE
362 if (KTRPOINT(p, KTR_GENIO))
363 if (error == 0) {
364 ktrgenio(p, fd, UIO_WRITE, ktriov, cnt,
365 error);
366 free(ktriov, M_TEMP);
367 }
368 #endif
369 *retval = cnt;
370 done:
371 if (needfree)
372 free(needfree, M_IOV);
373 out:
374 FILE_UNUSE(fp, p);
375 return (error);
376 }
377
378 int
379 netbsd32_utimes(p, v, retval)
380 struct proc *p;
381 void *v;
382 register_t *retval;
383 {
384 struct netbsd32_utimes_args /* {
385 syscallarg(const netbsd32_charp) path;
386 syscallarg(const netbsd32_timevalp_t) tptr;
387 } */ *uap = v;
388 int error;
389 struct nameidata nd;
390
391 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE,
392 (char *)NETBSD32PTR64(SCARG(uap, path)), p);
393 if ((error = namei(&nd)) != 0)
394 return (error);
395
396 error = change_utimes32(nd.ni_vp, SCARG(uap, tptr), p);
397
398 vrele(nd.ni_vp);
399 return (error);
400 }
401
402 /*
403 * Common routine to set access and modification times given a vnode.
404 */
405 static int
406 change_utimes32(vp, tptr, p)
407 struct vnode *vp;
408 netbsd32_timevalp_t tptr;
409 struct proc *p;
410 {
411 struct netbsd32_timeval tv32[2];
412 struct timeval tv[2];
413 struct vattr vattr;
414 int error;
415
416 VATTR_NULL(&vattr);
417 if (tptr == NULL) {
418 microtime(&tv[0]);
419 tv[1] = tv[0];
420 vattr.va_vaflags |= VA_UTIMES_NULL;
421 } else {
422 error = copyin((caddr_t)NETBSD32PTR64(tptr), tv32,
423 sizeof(tv32));
424 if (error)
425 return (error);
426 netbsd32_to_timeval(&tv32[0], &tv[0]);
427 netbsd32_to_timeval(&tv32[1], &tv[1]);
428 }
429 VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
430 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
431 vattr.va_atime.tv_sec = tv[0].tv_sec;
432 vattr.va_atime.tv_nsec = tv[0].tv_usec * 1000;
433 vattr.va_mtime.tv_sec = tv[1].tv_sec;
434 vattr.va_mtime.tv_nsec = tv[1].tv_usec * 1000;
435 error = VOP_SETATTR(vp, &vattr, p->p_ucred, p);
436 VOP_UNLOCK(vp, 0);
437 return (error);
438 }
439
440 int
441 netbsd32_statfs(p, v, retval)
442 struct proc *p;
443 void *v;
444 register_t *retval;
445 {
446 struct netbsd32_statfs_args /* {
447 syscallarg(const netbsd32_charp) path;
448 syscallarg(netbsd32_statfsp_t) buf;
449 } */ *uap = v;
450 struct mount *mp;
451 struct statfs *sp;
452 struct netbsd32_statfs s32;
453 int error;
454 struct nameidata nd;
455
456 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE,
457 (char *)NETBSD32PTR64(SCARG(uap, path)), p);
458 if ((error = namei(&nd)) != 0)
459 return (error);
460 mp = nd.ni_vp->v_mount;
461 sp = &mp->mnt_stat;
462 vrele(nd.ni_vp);
463 if ((error = VFS_STATFS(mp, sp, p)) != 0)
464 return (error);
465 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
466 netbsd32_from_statfs(sp, &s32);
467 return (copyout(&s32, (caddr_t)NETBSD32PTR64(SCARG(uap, buf)),
468 sizeof(s32)));
469 }
470
471 int
472 netbsd32_fstatfs(p, v, retval)
473 struct proc *p;
474 void *v;
475 register_t *retval;
476 {
477 struct netbsd32_fstatfs_args /* {
478 syscallarg(int) fd;
479 syscallarg(netbsd32_statfsp_t) buf;
480 } */ *uap = v;
481 struct file *fp;
482 struct mount *mp;
483 struct statfs *sp;
484 struct netbsd32_statfs s32;
485 int error;
486
487 /* getvnode() will use the descriptor for us */
488 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
489 return (error);
490 mp = ((struct vnode *)fp->f_data)->v_mount;
491 sp = &mp->mnt_stat;
492 if ((error = VFS_STATFS(mp, sp, p)) != 0)
493 goto out;
494 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
495 netbsd32_from_statfs(sp, &s32);
496 error = copyout(&s32, (caddr_t)NETBSD32PTR64(SCARG(uap, buf)),
497 sizeof(s32));
498 out:
499 FILE_UNUSE(fp, p);
500 return (error);
501 }
502
503 int
504 netbsd32_futimes(p, v, retval)
505 struct proc *p;
506 void *v;
507 register_t *retval;
508 {
509 struct netbsd32_futimes_args /* {
510 syscallarg(int) fd;
511 syscallarg(const netbsd32_timevalp_t) tptr;
512 } */ *uap = v;
513 int error;
514 struct file *fp;
515
516 /* getvnode() will use the descriptor for us */
517 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
518 return (error);
519
520 error = change_utimes32((struct vnode *)fp->f_data,
521 SCARG(uap, tptr), p);
522 FILE_UNUSE(fp, p);
523 return (error);
524 }
525
526 int
527 netbsd32_getdents(p, v, retval)
528 struct proc *p;
529 void *v;
530 register_t *retval;
531 {
532 struct netbsd32_getdents_args /* {
533 syscallarg(int) fd;
534 syscallarg(netbsd32_charp) buf;
535 syscallarg(netbsd32_size_t) count;
536 } */ *uap = v;
537 struct file *fp;
538 int error, done;
539
540 /* getvnode() will use the descriptor for us */
541 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
542 return (error);
543 if ((fp->f_flag & FREAD) == 0) {
544 error = EBADF;
545 goto out;
546 }
547 error = vn_readdir(fp, (caddr_t)NETBSD32PTR64(SCARG(uap, buf)),
548 UIO_USERSPACE, SCARG(uap, count), &done, p, 0, 0);
549 *retval = done;
550 out:
551 FILE_UNUSE(fp, p);
552 return (error);
553 }
554
555 int
556 netbsd32_lutimes(p, v, retval)
557 struct proc *p;
558 void *v;
559 register_t *retval;
560 {
561 struct netbsd32_lutimes_args /* {
562 syscallarg(const netbsd32_charp) path;
563 syscallarg(const netbsd32_timevalp_t) tptr;
564 } */ *uap = v;
565 int error;
566 struct nameidata nd;
567
568 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE,
569 (caddr_t)NETBSD32PTR64(SCARG(uap, path)), p);
570 if ((error = namei(&nd)) != 0)
571 return (error);
572
573 error = change_utimes32(nd.ni_vp, SCARG(uap, tptr), p);
574
575 vrele(nd.ni_vp);
576 return (error);
577 }
578
579 int
580 netbsd32___stat13(p, v, retval)
581 struct proc *p;
582 void *v;
583 register_t *retval;
584 {
585 struct netbsd32___stat13_args /* {
586 syscallarg(const netbsd32_charp) path;
587 syscallarg(netbsd32_statp_t) ub;
588 } */ *uap = v;
589 struct netbsd32_stat sb32;
590 struct stat sb;
591 int error;
592 struct nameidata nd;
593 caddr_t sg;
594 const char *path;
595
596 path = (char *)NETBSD32PTR64(SCARG(uap, path));
597 sg = stackgap_init(p, 0);
598 CHECK_ALT_EXIST(p, &sg, path);
599
600 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, path, p);
601 if ((error = namei(&nd)) != 0)
602 return (error);
603 error = vn_stat(nd.ni_vp, &sb, p);
604 vput(nd.ni_vp);
605 if (error)
606 return (error);
607 netbsd32_from___stat13(&sb, &sb32);
608 error = copyout(&sb32, (caddr_t)NETBSD32PTR64(SCARG(uap, ub)),
609 sizeof(sb32));
610 return (error);
611 }
612
613 int
614 netbsd32___fstat13(p, v, retval)
615 struct proc *p;
616 void *v;
617 register_t *retval;
618 {
619 struct netbsd32___fstat13_args /* {
620 syscallarg(int) fd;
621 syscallarg(netbsd32_statp_t) sb;
622 } */ *uap = v;
623 int fd = SCARG(uap, fd);
624 struct filedesc *fdp = p->p_fd;
625 struct file *fp;
626 struct netbsd32_stat sb32;
627 struct stat ub;
628 int error = 0;
629
630 if ((fp = fd_getfile(fdp, fd)) == NULL)
631 return (EBADF);
632
633 FILE_USE(fp);
634 error = (*fp->f_ops->fo_stat)(fp, &ub, p);
635 FILE_UNUSE(fp, p);
636
637 if (error == 0) {
638 netbsd32_from___stat13(&ub, &sb32);
639 error = copyout(&sb32, (caddr_t)NETBSD32PTR64(SCARG(uap, sb)),
640 sizeof(sb32));
641 }
642 return (error);
643 }
644
645 int
646 netbsd32___lstat13(p, v, retval)
647 struct proc *p;
648 void *v;
649 register_t *retval;
650 {
651 struct netbsd32___lstat13_args /* {
652 syscallarg(const netbsd32_charp) path;
653 syscallarg(netbsd32_statp_t) ub;
654 } */ *uap = v;
655 struct netbsd32_stat sb32;
656 struct stat sb;
657 int error;
658 struct nameidata nd;
659 caddr_t sg;
660 const char *path;
661
662 path = (char *)NETBSD32PTR64(SCARG(uap, path));
663 sg = stackgap_init(p, 0);
664 CHECK_ALT_EXIST(p, &sg, path);
665
666 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, path, p);
667 if ((error = namei(&nd)) != 0)
668 return (error);
669 error = vn_stat(nd.ni_vp, &sb, p);
670 vput(nd.ni_vp);
671 if (error)
672 return (error);
673 netbsd32_from___stat13(&sb, &sb32);
674 error = copyout(&sb32, (caddr_t)NETBSD32PTR64(SCARG(uap, ub)),
675 sizeof(sb32));
676 return (error);
677 }
678
679 int
680 netbsd32_preadv(p, v, retval)
681 struct proc *p;
682 void *v;
683 register_t *retval;
684 {
685 struct netbsd32_preadv_args /* {
686 syscallarg(int) fd;
687 syscallarg(const netbsd32_iovecp_t) iovp;
688 syscallarg(int) iovcnt;
689 syscallarg(int) pad;
690 syscallarg(off_t) offset;
691 } */ *uap = v;
692 struct filedesc *fdp = p->p_fd;
693 struct file *fp;
694 struct vnode *vp;
695 off_t offset;
696 int error, fd = SCARG(uap, fd);
697
698 if ((fp = fd_getfile(fdp, fd)) == NULL)
699 return (EBADF);
700
701 if ((fp->f_flag & FREAD) == 0)
702 return (EBADF);
703
704 FILE_USE(fp);
705
706 vp = (struct vnode *)fp->f_data;
707 if (fp->f_type != DTYPE_VNODE || vp->v_type == VFIFO) {
708 error = ESPIPE;
709 goto out;
710 }
711
712 offset = SCARG(uap, offset);
713
714 /*
715 * XXX This works because no file systems actually
716 * XXX take any action on the seek operation.
717 */
718 if ((error = VOP_SEEK(vp, fp->f_offset, offset, fp->f_cred)) != 0)
719 goto out;
720
721 return (dofilereadv32(p, fd, fp,
722 (struct netbsd32_iovec *)NETBSD32PTR64(SCARG(uap, iovp)),
723 SCARG(uap, iovcnt), &offset, 0, retval));
724
725 out:
726 FILE_UNUSE(fp, p);
727 return (error);
728 }
729
730 int
731 netbsd32_pwritev(p, v, retval)
732 struct proc *p;
733 void *v;
734 register_t *retval;
735 {
736 struct netbsd32_pwritev_args /* {
737 syscallarg(int) fd;
738 syscallarg(const netbsd32_iovecp_t) iovp;
739 syscallarg(int) iovcnt;
740 syscallarg(int) pad;
741 syscallarg(off_t) offset;
742 } */ *uap = v;
743 struct filedesc *fdp = p->p_fd;
744 struct file *fp;
745 struct vnode *vp;
746 off_t offset;
747 int error, fd = SCARG(uap, fd);
748
749 if ((fp = fd_getfile(fdp, fd)) == NULL)
750 return (EBADF);
751
752 if ((fp->f_flag & FWRITE) == 0)
753 return (EBADF);
754
755 FILE_USE(fp);
756
757 vp = (struct vnode *)fp->f_data;
758 if (fp->f_type != DTYPE_VNODE || vp->v_type == VFIFO) {
759 error = ESPIPE;
760 goto out;
761 }
762
763 offset = SCARG(uap, offset);
764
765 /*
766 * XXX This works because no file systems actually
767 * XXX take any action on the seek operation.
768 */
769 if ((error = VOP_SEEK(vp, fp->f_offset, offset, fp->f_cred)) != 0)
770 goto out;
771
772 return (dofilewritev32(p, fd, fp,
773 (struct netbsd32_iovec *)NETBSD32PTR64(SCARG(uap, iovp)),
774 SCARG(uap, iovcnt), &offset, 0, retval));
775
776 out:
777 FILE_UNUSE(fp, p);
778 return (error);
779 }
780
781 /*
782 * Find pathname of process's current directory.
783 *
784 * Use vfs vnode-to-name reverse cache; if that fails, fall back
785 * to reading directory contents.
786 */
787 int
788 getcwd_common __P((struct vnode *, struct vnode *,
789 char **, char *, int, int, struct proc *));
790
791 int netbsd32___getcwd(p, v, retval)
792 struct proc *p;
793 void *v;
794 register_t *retval;
795 {
796 struct netbsd32___getcwd_args /* {
797 syscallarg(char *) bufp;
798 syscallarg(size_t) length;
799 } */ *uap = v;
800
801 int error;
802 char *path;
803 char *bp, *bend;
804 int len = (int)SCARG(uap, length);
805 int lenused;
806
807 if (len > MAXPATHLEN*4)
808 len = MAXPATHLEN*4;
809 else if (len < 2)
810 return ERANGE;
811
812 path = (char *)malloc(len, M_TEMP, M_WAITOK);
813 if (!path)
814 return ENOMEM;
815
816 bp = &path[len];
817 bend = bp;
818 *(--bp) = '\0';
819
820 /*
821 * 5th argument here is "max number of vnodes to traverse".
822 * Since each entry takes up at least 2 bytes in the output buffer,
823 * limit it to N/2 vnodes for an N byte buffer.
824 */
825 #define GETCWD_CHECK_ACCESS 0x0001
826 error = getcwd_common (p->p_cwdi->cwdi_cdir, NULL, &bp, path, len/2,
827 GETCWD_CHECK_ACCESS, p);
828
829 if (error)
830 goto out;
831 lenused = bend - bp;
832 *retval = lenused;
833 /* put the result into user buffer */
834 error = copyout(bp, (caddr_t)NETBSD32PTR64(SCARG(uap, bufp)), lenused);
835
836 out:
837 free(path, M_TEMP);
838 return error;
839 }
840