netbsd32_fs.c revision 1.1 1 /* $NetBSD: netbsd32_fs.c,v 1.1 2001/02/08 13:19:33 mrg Exp $ */
2
3 /*
4 * Copyright (c) 1998, 2001 Matthew R. Green
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #if defined(_KERNEL) && !defined(_LKM)
32 #include "opt_ktrace.h"
33 #endif
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/malloc.h>
38 #include <sys/mount.h>
39 #include <sys/socket.h>
40 #include <sys/socketvar.h>
41 #include <sys/stat.h>
42 #include <sys/time.h>
43 #include <sys/ktrace.h>
44 #include <sys/resourcevar.h>
45 #include <sys/vnode.h>
46 #include <sys/file.h>
47 #include <sys/filedesc.h>
48 #include <sys/namei.h>
49 #include <sys/syscallargs.h>
50 #include <sys/proc.h>
51
52 #include <compat/netbsd32/netbsd32.h>
53 #include <compat/netbsd32/netbsd32_syscallargs.h>
54 #include <compat/netbsd32/netbsd32_conv.h>
55
56
57 static int dofilereadv32 __P((struct proc *, int, struct file *, struct netbsd32_iovec *,
58 int, off_t *, int, register_t *));
59 static int dofilewritev32 __P((struct proc *, int, struct file *, struct netbsd32_iovec *,
60 int, off_t *, int, register_t *));
61 static int change_utimes32 __P((struct vnode *, netbsd32_timevalp_t, struct proc *));
62
63 int
64 netbsd32_getfsstat(p, v, retval)
65 struct proc *p;
66 void *v;
67 register_t *retval;
68 {
69 struct netbsd32_getfsstat_args /* {
70 syscallarg(netbsd32_statfsp_t) buf;
71 syscallarg(netbsd32_long) bufsize;
72 syscallarg(int) flags;
73 } */ *uap = v;
74 struct mount *mp, *nmp;
75 struct statfs *sp;
76 struct netbsd32_statfs sb32;
77 caddr_t sfsp;
78 long count, maxcount, error;
79
80 maxcount = SCARG(uap, bufsize) / sizeof(struct netbsd32_statfs);
81 sfsp = (caddr_t)(u_long)SCARG(uap, buf);
82 simple_lock(&mountlist_slock);
83 count = 0;
84 for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) {
85 if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock)) {
86 nmp = mp->mnt_list.cqe_next;
87 continue;
88 }
89 if (sfsp && count < maxcount) {
90 sp = &mp->mnt_stat;
91 /*
92 * If MNT_NOWAIT or MNT_LAZY is specified, do not
93 * refresh the fsstat cache. MNT_WAIT or MNT_LAXY
94 * overrides MNT_NOWAIT.
95 */
96 if (SCARG(uap, flags) != MNT_NOWAIT &&
97 SCARG(uap, flags) != MNT_LAZY &&
98 (SCARG(uap, flags) == MNT_WAIT ||
99 SCARG(uap, flags) == 0) &&
100 (error = VFS_STATFS(mp, sp, p)) != 0) {
101 simple_lock(&mountlist_slock);
102 nmp = mp->mnt_list.cqe_next;
103 vfs_unbusy(mp);
104 continue;
105 }
106 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
107 sp->f_oflags = sp->f_flags & 0xffff;
108 netbsd32_from_statfs(sp, &sb32);
109 error = copyout(&sb32, sfsp, sizeof(sb32));
110 if (error) {
111 vfs_unbusy(mp);
112 return (error);
113 }
114 sfsp += sizeof(sb32);
115 }
116 count++;
117 simple_lock(&mountlist_slock);
118 nmp = mp->mnt_list.cqe_next;
119 vfs_unbusy(mp);
120 }
121 simple_unlock(&mountlist_slock);
122 if (sfsp && count > maxcount)
123 *retval = maxcount;
124 else
125 *retval = count;
126 return (0);
127 }
128
129 int
130 netbsd32_readv(p, v, retval)
131 struct proc *p;
132 void *v;
133 register_t *retval;
134 {
135 struct netbsd32_readv_args /* {
136 syscallarg(int) fd;
137 syscallarg(const netbsd32_iovecp_t) iovp;
138 syscallarg(int) iovcnt;
139 } */ *uap = v;
140 int fd = SCARG(uap, fd);
141 struct file *fp;
142 struct filedesc *fdp = p->p_fd;
143
144 if ((u_int)fd >= fdp->fd_nfiles ||
145 (fp = fdp->fd_ofiles[fd]) == NULL ||
146 (fp->f_flag & FREAD) == 0)
147 return (EBADF);
148
149 return (dofilereadv32(p, fd, fp, (struct netbsd32_iovec *)(u_long)SCARG(uap, iovp),
150 SCARG(uap, iovcnt), &fp->f_offset, FOF_UPDATE_OFFSET, retval));
151 }
152
153 /* Damn thing copies in the iovec! */
154 int
155 dofilereadv32(p, fd, fp, iovp, iovcnt, offset, flags, retval)
156 struct proc *p;
157 int fd;
158 struct file *fp;
159 struct netbsd32_iovec *iovp;
160 int iovcnt;
161 off_t *offset;
162 int flags;
163 register_t *retval;
164 {
165 struct uio auio;
166 struct iovec *iov;
167 struct iovec *needfree;
168 struct iovec aiov[UIO_SMALLIOV];
169 long i, cnt, error = 0;
170 u_int iovlen;
171 #ifdef KTRACE
172 struct iovec *ktriov = NULL;
173 #endif
174
175 /* note: can't use iovlen until iovcnt is validated */
176 iovlen = iovcnt * sizeof(struct iovec);
177 if ((u_int)iovcnt > UIO_SMALLIOV) {
178 if ((u_int)iovcnt > IOV_MAX)
179 return (EINVAL);
180 MALLOC(iov, struct iovec *, iovlen, M_IOV, M_WAITOK);
181 needfree = iov;
182 } else if ((u_int)iovcnt > 0) {
183 iov = aiov;
184 needfree = NULL;
185 } else
186 return (EINVAL);
187
188 auio.uio_iov = iov;
189 auio.uio_iovcnt = iovcnt;
190 auio.uio_rw = UIO_READ;
191 auio.uio_segflg = UIO_USERSPACE;
192 auio.uio_procp = p;
193 error = netbsd32_to_iovecin(iovp, iov, iovcnt);
194 if (error)
195 goto done;
196 auio.uio_resid = 0;
197 for (i = 0; i < iovcnt; i++) {
198 auio.uio_resid += iov->iov_len;
199 /*
200 * Reads return ssize_t because -1 is returned on error.
201 * Therefore we must restrict the length to SSIZE_MAX to
202 * avoid garbage return values.
203 */
204 if (iov->iov_len > SSIZE_MAX || auio.uio_resid > SSIZE_MAX) {
205 error = EINVAL;
206 goto done;
207 }
208 iov++;
209 }
210 #ifdef KTRACE
211 /*
212 * if tracing, save a copy of iovec
213 */
214 if (KTRPOINT(p, KTR_GENIO)) {
215 MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK);
216 memcpy((caddr_t)ktriov, (caddr_t)auio.uio_iov, iovlen);
217 }
218 #endif
219 cnt = auio.uio_resid;
220 error = (*fp->f_ops->fo_read)(fp, offset, &auio, fp->f_cred, flags);
221 if (error)
222 if (auio.uio_resid != cnt && (error == ERESTART ||
223 error == EINTR || error == EWOULDBLOCK))
224 error = 0;
225 cnt -= auio.uio_resid;
226 #ifdef KTRACE
227 if (KTRPOINT(p, KTR_GENIO))
228 if (error == 0) {
229 ktrgenio(p, fd, UIO_READ, ktriov, cnt,
230 error);
231 FREE(ktriov, M_TEMP);
232 }
233 #endif
234 *retval = cnt;
235 done:
236 if (needfree)
237 FREE(needfree, M_IOV);
238 return (error);
239 }
240
241 int
242 netbsd32_writev(p, v, retval)
243 struct proc *p;
244 void *v;
245 register_t *retval;
246 {
247 struct netbsd32_writev_args /* {
248 syscallarg(int) fd;
249 syscallarg(const netbsd32_iovecp_t) iovp;
250 syscallarg(int) iovcnt;
251 } */ *uap = v;
252 int fd = SCARG(uap, fd);
253 struct file *fp;
254 struct filedesc *fdp = p->p_fd;
255
256 if ((u_int)fd >= fdp->fd_nfiles ||
257 (fp = fdp->fd_ofiles[fd]) == NULL ||
258 (fp->f_flag & FWRITE) == 0)
259 return (EBADF);
260
261 return (dofilewritev32(p, fd, fp, (struct netbsd32_iovec *)(u_long)SCARG(uap, iovp),
262 SCARG(uap, iovcnt), &fp->f_offset, FOF_UPDATE_OFFSET, retval));
263 }
264
265 int
266 dofilewritev32(p, fd, fp, iovp, iovcnt, offset, flags, retval)
267 struct proc *p;
268 int fd;
269 struct file *fp;
270 struct netbsd32_iovec *iovp;
271 int iovcnt;
272 off_t *offset;
273 int flags;
274 register_t *retval;
275 {
276 struct uio auio;
277 struct iovec *iov;
278 struct iovec *needfree;
279 struct iovec aiov[UIO_SMALLIOV];
280 long i, cnt, error = 0;
281 u_int iovlen;
282 #ifdef KTRACE
283 struct iovec *ktriov = NULL;
284 #endif
285
286 /* note: can't use iovlen until iovcnt is validated */
287 iovlen = iovcnt * sizeof(struct iovec);
288 if ((u_int)iovcnt > UIO_SMALLIOV) {
289 if ((u_int)iovcnt > IOV_MAX)
290 return (EINVAL);
291 MALLOC(iov, struct iovec *, iovlen, M_IOV, M_WAITOK);
292 needfree = iov;
293 } else if ((u_int)iovcnt > 0) {
294 iov = aiov;
295 needfree = NULL;
296 } else
297 return (EINVAL);
298
299 auio.uio_iov = iov;
300 auio.uio_iovcnt = iovcnt;
301 auio.uio_rw = UIO_WRITE;
302 auio.uio_segflg = UIO_USERSPACE;
303 auio.uio_procp = p;
304 error = netbsd32_to_iovecin(iovp, iov, iovcnt);
305 if (error)
306 goto done;
307 auio.uio_resid = 0;
308 for (i = 0; i < iovcnt; i++) {
309 auio.uio_resid += iov->iov_len;
310 /*
311 * Writes return ssize_t because -1 is returned on error.
312 * Therefore we must restrict the length to SSIZE_MAX to
313 * avoid garbage return values.
314 */
315 if (iov->iov_len > SSIZE_MAX || auio.uio_resid > SSIZE_MAX) {
316 error = EINVAL;
317 goto done;
318 }
319 iov++;
320 }
321 #ifdef KTRACE
322 /*
323 * if tracing, save a copy of iovec
324 */
325 if (KTRPOINT(p, KTR_GENIO)) {
326 MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK);
327 memcpy((caddr_t)ktriov, (caddr_t)auio.uio_iov, iovlen);
328 }
329 #endif
330 cnt = auio.uio_resid;
331 error = (*fp->f_ops->fo_write)(fp, offset, &auio, fp->f_cred, flags);
332 if (error) {
333 if (auio.uio_resid != cnt && (error == ERESTART ||
334 error == EINTR || error == EWOULDBLOCK))
335 error = 0;
336 if (error == EPIPE)
337 psignal(p, SIGPIPE);
338 }
339 cnt -= auio.uio_resid;
340 #ifdef KTRACE
341 if (KTRPOINT(p, KTR_GENIO))
342 if (error == 0) {
343 ktrgenio(p, fd, UIO_WRITE, ktriov, cnt,
344 error);
345 FREE(ktriov, M_TEMP);
346 }
347 #endif
348 *retval = cnt;
349 done:
350 if (needfree)
351 FREE(needfree, M_IOV);
352 return (error);
353 }
354
355 int
356 netbsd32_utimes(p, v, retval)
357 struct proc *p;
358 void *v;
359 register_t *retval;
360 {
361 struct netbsd32_utimes_args /* {
362 syscallarg(const netbsd32_charp) path;
363 syscallarg(const netbsd32_timevalp_t) tptr;
364 } */ *uap = v;
365 int error;
366 struct nameidata nd;
367
368 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, (char *)(u_long)SCARG(uap, path), p);
369 if ((error = namei(&nd)) != 0)
370 return (error);
371
372 error = change_utimes32(nd.ni_vp, SCARG(uap, tptr), p);
373
374 vrele(nd.ni_vp);
375 return (error);
376 }
377
378 /*
379 * Common routine to set access and modification times given a vnode.
380 */
381 static int
382 change_utimes32(vp, tptr, p)
383 struct vnode *vp;
384 netbsd32_timevalp_t tptr;
385 struct proc *p;
386 {
387 struct netbsd32_timeval tv32[2];
388 struct timeval tv[2];
389 struct vattr vattr;
390 int error;
391
392 VATTR_NULL(&vattr);
393 if (tptr == NULL) {
394 microtime(&tv[0]);
395 tv[1] = tv[0];
396 vattr.va_vaflags |= VA_UTIMES_NULL;
397 } else {
398 error = copyin((caddr_t)(u_long)tptr, tv32, sizeof(tv32));
399 if (error)
400 return (error);
401 netbsd32_to_timeval(&tv32[0], &tv[0]);
402 netbsd32_to_timeval(&tv32[1], &tv[1]);
403 }
404 VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
405 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
406 vattr.va_atime.tv_sec = tv[0].tv_sec;
407 vattr.va_atime.tv_nsec = tv[0].tv_usec * 1000;
408 vattr.va_mtime.tv_sec = tv[1].tv_sec;
409 vattr.va_mtime.tv_nsec = tv[1].tv_usec * 1000;
410 error = VOP_SETATTR(vp, &vattr, p->p_ucred, p);
411 VOP_UNLOCK(vp, 0);
412 return (error);
413 }
414
415 int
416 netbsd32_statfs(p, v, retval)
417 struct proc *p;
418 void *v;
419 register_t *retval;
420 {
421 struct netbsd32_statfs_args /* {
422 syscallarg(const netbsd32_charp) path;
423 syscallarg(netbsd32_statfsp_t) buf;
424 } */ *uap = v;
425 struct mount *mp;
426 struct statfs *sp;
427 struct netbsd32_statfs s32;
428 int error;
429 struct nameidata nd;
430
431 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, (char *)(u_long)SCARG(uap, path), p);
432 if ((error = namei(&nd)) != 0)
433 return (error);
434 mp = nd.ni_vp->v_mount;
435 sp = &mp->mnt_stat;
436 vrele(nd.ni_vp);
437 if ((error = VFS_STATFS(mp, sp, p)) != 0)
438 return (error);
439 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
440 netbsd32_from_statfs(sp, &s32);
441 return (copyout(&s32, (caddr_t)(u_long)SCARG(uap, buf), sizeof(s32)));
442 }
443
444 int
445 netbsd32_fstatfs(p, v, retval)
446 struct proc *p;
447 void *v;
448 register_t *retval;
449 {
450 struct netbsd32_fstatfs_args /* {
451 syscallarg(int) fd;
452 syscallarg(netbsd32_statfsp_t) buf;
453 } */ *uap = v;
454 struct file *fp;
455 struct mount *mp;
456 struct statfs *sp;
457 struct netbsd32_statfs s32;
458 int error;
459
460 /* getvnode() will use the descriptor for us */
461 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
462 return (error);
463 mp = ((struct vnode *)fp->f_data)->v_mount;
464 sp = &mp->mnt_stat;
465 if ((error = VFS_STATFS(mp, sp, p)) != 0)
466 goto out;
467 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
468 netbsd32_from_statfs(sp, &s32);
469 error = copyout(&s32, (caddr_t)(u_long)SCARG(uap, buf), sizeof(s32));
470 out:
471 FILE_UNUSE(fp, p);
472 return (error);
473 }
474
475 int
476 netbsd32_futimes(p, v, retval)
477 struct proc *p;
478 void *v;
479 register_t *retval;
480 {
481 struct netbsd32_futimes_args /* {
482 syscallarg(int) fd;
483 syscallarg(const netbsd32_timevalp_t) tptr;
484 } */ *uap = v;
485 int error;
486 struct file *fp;
487
488 /* getvnode() will use the descriptor for us */
489 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
490 return (error);
491
492 error = change_utimes32((struct vnode *)fp->f_data,
493 SCARG(uap, tptr), p);
494 FILE_UNUSE(fp, p);
495 return (error);
496 }
497
498 int
499 netbsd32_getdents(p, v, retval)
500 struct proc *p;
501 void *v;
502 register_t *retval;
503 {
504 struct netbsd32_getdents_args /* {
505 syscallarg(int) fd;
506 syscallarg(netbsd32_charp) buf;
507 syscallarg(netbsd32_size_t) count;
508 } */ *uap = v;
509 struct file *fp;
510 int error, done;
511
512 /* getvnode() will use the descriptor for us */
513 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
514 return (error);
515 if ((fp->f_flag & FREAD) == 0) {
516 error = EBADF;
517 goto out;
518 }
519 error = vn_readdir(fp, (caddr_t)(u_long)SCARG(uap, buf), UIO_USERSPACE,
520 SCARG(uap, count), &done, p, 0, 0);
521 *retval = done;
522 out:
523 FILE_UNUSE(fp, p);
524 return (error);
525 }
526
527 int
528 netbsd32_lutimes(p, v, retval)
529 struct proc *p;
530 void *v;
531 register_t *retval;
532 {
533 struct netbsd32_lutimes_args /* {
534 syscallarg(const netbsd32_charp) path;
535 syscallarg(const netbsd32_timevalp_t) tptr;
536 } */ *uap = v;
537 int error;
538 struct nameidata nd;
539
540 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, (caddr_t)(u_long)SCARG(uap, path), p);
541 if ((error = namei(&nd)) != 0)
542 return (error);
543
544 error = change_utimes32(nd.ni_vp, SCARG(uap, tptr), p);
545
546 vrele(nd.ni_vp);
547 return (error);
548 }
549
550 int
551 netbsd32___stat13(p, v, retval)
552 struct proc *p;
553 void *v;
554 register_t *retval;
555 {
556 struct netbsd32___stat13_args /* {
557 syscallarg(const netbsd32_charp) path;
558 syscallarg(netbsd32_statp_t) ub;
559 } */ *uap = v;
560 struct netbsd32_stat sb32;
561 struct stat sb;
562 int error;
563 struct nameidata nd;
564 caddr_t sg;
565 const char *path;
566
567 path = (char *)(u_long)SCARG(uap, path);
568 sg = stackgap_init(p->p_emul);
569 CHECK_ALT_EXIST(p, &sg, path);
570
571 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, path, p);
572 if ((error = namei(&nd)) != 0)
573 return (error);
574 error = vn_stat(nd.ni_vp, &sb, p);
575 vput(nd.ni_vp);
576 if (error)
577 return (error);
578 netbsd32_from___stat13(&sb, &sb32);
579 error = copyout(&sb32, (caddr_t)(u_long)SCARG(uap, ub), sizeof(sb32));
580 return (error);
581 }
582
583 int
584 netbsd32___fstat13(p, v, retval)
585 struct proc *p;
586 void *v;
587 register_t *retval;
588 {
589 struct netbsd32___fstat13_args /* {
590 syscallarg(int) fd;
591 syscallarg(netbsd32_statp_t) sb;
592 } */ *uap = v;
593 int fd = SCARG(uap, fd);
594 struct filedesc *fdp = p->p_fd;
595 struct file *fp;
596 struct netbsd32_stat sb32;
597 struct stat ub;
598 int error = 0;
599
600 if ((u_int)fd >= fdp->fd_nfiles ||
601 (fp = fdp->fd_ofiles[fd]) == NULL)
602 return (EBADF);
603 switch (fp->f_type) {
604
605 case DTYPE_VNODE:
606 error = vn_stat((struct vnode *)fp->f_data, &ub, p);
607 break;
608
609 case DTYPE_SOCKET:
610 error = soo_stat((struct socket *)fp->f_data, &ub);
611 break;
612
613 default:
614 panic("fstat");
615 /*NOTREACHED*/
616 }
617 if (error == 0) {
618 netbsd32_from___stat13(&ub, &sb32);
619 error = copyout(&sb32, (caddr_t)(u_long)SCARG(uap, sb), sizeof(sb32));
620 }
621 return (error);
622 }
623
624 int
625 netbsd32___lstat13(p, v, retval)
626 struct proc *p;
627 void *v;
628 register_t *retval;
629 {
630 struct netbsd32___lstat13_args /* {
631 syscallarg(const netbsd32_charp) path;
632 syscallarg(netbsd32_statp_t) ub;
633 } */ *uap = v;
634 struct netbsd32_stat sb32;
635 struct stat sb;
636 int error;
637 struct nameidata nd;
638 caddr_t sg;
639 const char *path;
640
641 path = (char *)(u_long)SCARG(uap, path);
642 sg = stackgap_init(p->p_emul);
643 CHECK_ALT_EXIST(p, &sg, path);
644
645 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, path, p);
646 if ((error = namei(&nd)) != 0)
647 return (error);
648 error = vn_stat(nd.ni_vp, &sb, p);
649 vput(nd.ni_vp);
650 if (error)
651 return (error);
652 netbsd32_from___stat13(&sb, &sb32);
653 error = copyout(&sb32, (caddr_t)(u_long)SCARG(uap, ub), sizeof(sb32));
654 return (error);
655 }
656
657 int
658 netbsd32_preadv(p, v, retval)
659 struct proc *p;
660 void *v;
661 register_t *retval;
662 {
663 struct netbsd32_preadv_args /* {
664 syscallarg(int) fd;
665 syscallarg(const netbsd32_iovecp_t) iovp;
666 syscallarg(int) iovcnt;
667 syscallarg(int) pad;
668 syscallarg(off_t) offset;
669 } */ *uap = v;
670 struct filedesc *fdp = p->p_fd;
671 struct file *fp;
672 struct vnode *vp;
673 off_t offset;
674 int error, fd = SCARG(uap, fd);
675
676 if ((u_int)fd >= fdp->fd_nfiles ||
677 (fp = fdp->fd_ofiles[fd]) == NULL ||
678 (fp->f_flag & FREAD) == 0)
679 return (EBADF);
680
681 vp = (struct vnode *)fp->f_data;
682 if (fp->f_type != DTYPE_VNODE
683 || vp->v_type == VFIFO)
684 return (ESPIPE);
685
686 offset = SCARG(uap, offset);
687
688 /*
689 * XXX This works because no file systems actually
690 * XXX take any action on the seek operation.
691 */
692 if ((error = VOP_SEEK(vp, fp->f_offset, offset, fp->f_cred)) != 0)
693 return (error);
694
695 return (dofilereadv32(p, fd, fp, (struct netbsd32_iovec *)(u_long)SCARG(uap, iovp), SCARG(uap, iovcnt),
696 &offset, 0, retval));
697 }
698
699 int
700 netbsd32_pwritev(p, v, retval)
701 struct proc *p;
702 void *v;
703 register_t *retval;
704 {
705 struct netbsd32_pwritev_args /* {
706 syscallarg(int) fd;
707 syscallarg(const netbsd32_iovecp_t) iovp;
708 syscallarg(int) iovcnt;
709 syscallarg(int) pad;
710 syscallarg(off_t) offset;
711 } */ *uap = v;
712 struct filedesc *fdp = p->p_fd;
713 struct file *fp;
714 struct vnode *vp;
715 off_t offset;
716 int error, fd = SCARG(uap, fd);
717
718 if ((u_int)fd >= fdp->fd_nfiles ||
719 (fp = fdp->fd_ofiles[fd]) == NULL ||
720 (fp->f_flag & FWRITE) == 0)
721 return (EBADF);
722
723 vp = (struct vnode *)fp->f_data;
724 if (fp->f_type != DTYPE_VNODE
725 || vp->v_type == VFIFO)
726 return (ESPIPE);
727
728 offset = SCARG(uap, offset);
729
730 /*
731 * XXX This works because no file systems actually
732 * XXX take any action on the seek operation.
733 */
734 if ((error = VOP_SEEK(vp, fp->f_offset, offset, fp->f_cred)) != 0)
735 return (error);
736
737 return (dofilewritev32(p, fd, fp, (struct netbsd32_iovec *)(u_long)SCARG(uap, iovp), SCARG(uap, iovcnt),
738 &offset, 0, retval));
739 }
740
741 /*
742 * Find pathname of process's current directory.
743 *
744 * Use vfs vnode-to-name reverse cache; if that fails, fall back
745 * to reading directory contents.
746 */
747 int
748 getcwd_common __P((struct vnode *, struct vnode *,
749 char **, char *, int, int, struct proc *));
750
751 int netbsd32___getcwd(p, v, retval)
752 struct proc *p;
753 void *v;
754 register_t *retval;
755 {
756 struct netbsd32___getcwd_args /* {
757 syscallarg(char *) bufp;
758 syscallarg(size_t) length;
759 } */ *uap = v;
760
761 int error;
762 char *path;
763 char *bp, *bend;
764 int len = (int)SCARG(uap, length);
765 int lenused;
766
767 if (len > MAXPATHLEN*4)
768 len = MAXPATHLEN*4;
769 else if (len < 2)
770 return ERANGE;
771
772 path = (char *)malloc(len, M_TEMP, M_WAITOK);
773 if (!path)
774 return ENOMEM;
775
776 bp = &path[len];
777 bend = bp;
778 *(--bp) = '\0';
779
780 /*
781 * 5th argument here is "max number of vnodes to traverse".
782 * Since each entry takes up at least 2 bytes in the output buffer,
783 * limit it to N/2 vnodes for an N byte buffer.
784 */
785 #define GETCWD_CHECK_ACCESS 0x0001
786 error = getcwd_common (p->p_cwdi->cwdi_cdir, NULL, &bp, path, len/2,
787 GETCWD_CHECK_ACCESS, p);
788
789 if (error)
790 goto out;
791 lenused = bend - bp;
792 *retval = lenused;
793 /* put the result into user buffer */
794 error = copyout(bp, (caddr_t)(u_long)SCARG(uap, bufp), lenused);
795
796 out:
797 free(path, M_TEMP);
798 return error;
799 }
800