coda_vnops.c revision 1.63.2.2 1 /* $NetBSD: coda_vnops.c,v 1.63.2.2 2007/12/26 21:38:51 ad Exp $ */
2
3 /*
4 *
5 * Coda: an Experimental Distributed File System
6 * Release 3.1
7 *
8 * Copyright (c) 1987-1998 Carnegie Mellon University
9 * All Rights Reserved
10 *
11 * Permission to use, copy, modify and distribute this software and its
12 * documentation is hereby granted, provided that both the copyright
13 * notice and this permission notice appear in all copies of the
14 * software, derivative works or modified versions, and any portions
15 * thereof, and that both notices appear in supporting documentation, and
16 * that credit is given to Carnegie Mellon University in all documents
17 * and publicity pertaining to direct or indirect use of this code or its
18 * derivatives.
19 *
20 * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS,
21 * SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS
22 * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON
23 * DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
24 * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF
25 * ANY DERIVATIVE WORK.
26 *
27 * Carnegie Mellon encourages users of this software to return any
28 * improvements or extensions that they make, and to grant Carnegie
29 * Mellon the rights to redistribute these changes without encumbrance.
30 *
31 * @(#) coda/coda_vnops.c,v 1.1.1.1 1998/08/29 21:26:46 rvb Exp $
32 */
33
34 /*
35 * Mach Operating System
36 * Copyright (c) 1990 Carnegie-Mellon University
37 * Copyright (c) 1989 Carnegie-Mellon University
38 * All rights reserved. The CMU software License Agreement specifies
39 * the terms and conditions for use and redistribution.
40 */
41
42 /*
43 * This code was written for the Coda file system at Carnegie Mellon
44 * University. Contributers include David Steere, James Kistler, and
45 * M. Satyanarayanan.
46 */
47
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: coda_vnops.c,v 1.63.2.2 2007/12/26 21:38:51 ad Exp $");
50
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/malloc.h>
54 #include <sys/errno.h>
55 #include <sys/acct.h>
56 #include <sys/file.h>
57 #include <sys/uio.h>
58 #include <sys/namei.h>
59 #include <sys/ioctl.h>
60 #include <sys/mount.h>
61 #include <sys/proc.h>
62 #include <sys/select.h>
63 #include <sys/user.h>
64 #include <sys/vnode.h>
65 #include <sys/kauth.h>
66
67 #include <miscfs/genfs/genfs.h>
68
69 #include <coda/coda.h>
70 #include <coda/cnode.h>
71 #include <coda/coda_vnops.h>
72 #include <coda/coda_venus.h>
73 #include <coda/coda_opstats.h>
74 #include <coda/coda_subr.h>
75 #include <coda/coda_namecache.h>
76 #include <coda/coda_pioctl.h>
77
78 /*
79 * These flags select various performance enhancements.
80 */
81 int coda_attr_cache = 1; /* Set to cache attributes in the kernel */
82 int coda_symlink_cache = 1; /* Set to cache symbolic link information */
83 int coda_access_cache = 1; /* Set to handle some access checks directly */
84
85 /* structure to keep track of vfs calls */
86
87 struct coda_op_stats coda_vnodeopstats[CODA_VNODEOPS_SIZE];
88
89 #define MARK_ENTRY(op) (coda_vnodeopstats[op].entries++)
90 #define MARK_INT_SAT(op) (coda_vnodeopstats[op].sat_intrn++)
91 #define MARK_INT_FAIL(op) (coda_vnodeopstats[op].unsat_intrn++)
92 #define MARK_INT_GEN(op) (coda_vnodeopstats[op].gen_intrn++)
93
94 /* What we are delaying for in printf */
95 int coda_printf_delay = 0; /* in microseconds */
96 int coda_vnop_print_entry = 0;
97 static int coda_lockdebug = 0;
98
99 #define ENTRY if(coda_vnop_print_entry) myprintf(("Entered %s\n",__func__))
100
101 /* Definition of the vnode operation vector */
102
103 const struct vnodeopv_entry_desc coda_vnodeop_entries[] = {
104 { &vop_default_desc, coda_vop_error },
105 { &vop_lookup_desc, coda_lookup }, /* lookup */
106 { &vop_create_desc, coda_create }, /* create */
107 { &vop_mknod_desc, coda_vop_error }, /* mknod */
108 { &vop_open_desc, coda_open }, /* open */
109 { &vop_close_desc, coda_close }, /* close */
110 { &vop_access_desc, coda_access }, /* access */
111 { &vop_getattr_desc, coda_getattr }, /* getattr */
112 { &vop_setattr_desc, coda_setattr }, /* setattr */
113 { &vop_read_desc, coda_read }, /* read */
114 { &vop_write_desc, coda_write }, /* write */
115 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */
116 { &vop_ioctl_desc, coda_ioctl }, /* ioctl */
117 { &vop_mmap_desc, genfs_mmap }, /* mmap */
118 { &vop_fsync_desc, coda_fsync }, /* fsync */
119 { &vop_remove_desc, coda_remove }, /* remove */
120 { &vop_link_desc, coda_link }, /* link */
121 { &vop_rename_desc, coda_rename }, /* rename */
122 { &vop_mkdir_desc, coda_mkdir }, /* mkdir */
123 { &vop_rmdir_desc, coda_rmdir }, /* rmdir */
124 { &vop_symlink_desc, coda_symlink }, /* symlink */
125 { &vop_readdir_desc, coda_readdir }, /* readdir */
126 { &vop_readlink_desc, coda_readlink }, /* readlink */
127 { &vop_abortop_desc, coda_abortop }, /* abortop */
128 { &vop_inactive_desc, coda_inactive }, /* inactive */
129 { &vop_reclaim_desc, coda_reclaim }, /* reclaim */
130 { &vop_lock_desc, coda_lock }, /* lock */
131 { &vop_unlock_desc, coda_unlock }, /* unlock */
132 { &vop_bmap_desc, coda_bmap }, /* bmap */
133 { &vop_strategy_desc, coda_strategy }, /* strategy */
134 { &vop_print_desc, coda_vop_error }, /* print */
135 { &vop_islocked_desc, coda_islocked }, /* islocked */
136 { &vop_pathconf_desc, coda_vop_error }, /* pathconf */
137 { &vop_advlock_desc, coda_vop_nop }, /* advlock */
138 { &vop_bwrite_desc, coda_vop_error }, /* bwrite */
139 { &vop_lease_desc, coda_vop_nop }, /* lease */
140 { &vop_seek_desc, genfs_seek }, /* seek */
141 { &vop_poll_desc, genfs_poll }, /* poll */
142 { &vop_getpages_desc, coda_getpages }, /* getpages */
143 { &vop_putpages_desc, coda_putpages }, /* putpages */
144 { NULL, NULL }
145 };
146
147 const struct vnodeopv_desc coda_vnodeop_opv_desc =
148 { &coda_vnodeop_p, coda_vnodeop_entries };
149
150 /* Definitions of NetBSD vnodeop interfaces */
151
152 /*
153 * A generic error routine. Return EIO without looking at arguments.
154 */
155 int
156 coda_vop_error(void *anon) {
157 struct vnodeop_desc **desc = (struct vnodeop_desc **)anon;
158
159 if (codadebug) {
160 myprintf(("coda_vop_error: Vnode operation %s called (error).\n",
161 (*desc)->vdesc_name));
162 }
163
164 return EIO;
165 }
166
167 /* A generic do-nothing. For lease_check, advlock */
168 int
169 coda_vop_nop(void *anon) {
170 struct vnodeop_desc **desc = (struct vnodeop_desc **)anon;
171
172 if (codadebug) {
173 myprintf(("Vnode operation %s called, but unsupported\n",
174 (*desc)->vdesc_name));
175 }
176 return (0);
177 }
178
179 int
180 coda_vnodeopstats_init(void)
181 {
182 int i;
183
184 for(i=0;i<CODA_VNODEOPS_SIZE;i++) {
185 coda_vnodeopstats[i].opcode = i;
186 coda_vnodeopstats[i].entries = 0;
187 coda_vnodeopstats[i].sat_intrn = 0;
188 coda_vnodeopstats[i].unsat_intrn = 0;
189 coda_vnodeopstats[i].gen_intrn = 0;
190 }
191
192 return 0;
193 }
194
195 /*
196 * XXX The entire relationship between VOP_OPEN and having a container
197 * file (via venus_open) needs to be reexamined. In particular, it's
198 * valid to open/mmap/close and then reference. Instead of doing
199 * VOP_OPEN when getpages needs a container, we should do the
200 * venus_open part, and record that the vnode has opened the container
201 * for getpages, and do the matching logical close on coda_inactive.
202 * Further, coda_rdwr needs a container file, and sometimes needs to
203 * do the equivalent of open (core dumps).
204 */
205 /*
206 * coda_open calls Venus to return the device and inode of the
207 * container file, and then obtains a vnode for that file. The
208 * container vnode is stored in the coda vnode, and a reference is
209 * added for each open file.
210 */
211 int
212 coda_open(void *v)
213 {
214 /*
215 * NetBSD can pass the O_EXCL flag in mode, even though the check
216 * has already happened. Venus defensively assumes that if open
217 * is passed the EXCL, it must be a bug. We strip the flag here.
218 */
219 /* true args */
220 struct vop_open_args *ap = v;
221 struct vnode *vp = ap->a_vp;
222 struct cnode *cp = VTOC(vp);
223 int flag = ap->a_mode & (~O_EXCL);
224 kauth_cred_t cred = ap->a_cred;
225 /* locals */
226 int error;
227 dev_t dev; /* container file device, inode, vnode */
228 ino_t inode;
229 struct vnode *container_vp;
230
231 MARK_ENTRY(CODA_OPEN_STATS);
232
233 /* Check for open of control file. */
234 if (IS_CTL_VP(vp)) {
235 /* if (WRITABLE(flag)) */
236 if (flag & (FWRITE | O_TRUNC | O_CREAT | O_EXCL)) {
237 MARK_INT_FAIL(CODA_OPEN_STATS);
238 return(EACCES);
239 }
240 MARK_INT_SAT(CODA_OPEN_STATS);
241 return(0);
242 }
243
244 error = venus_open(vtomi(vp), &cp->c_fid, flag, cred, curlwp, &dev, &inode);
245 if (error)
246 return (error);
247 if (!error) {
248 CODADEBUG(CODA_OPEN, myprintf(("open: dev %d inode %llu result %d\n",
249 dev, (unsigned long long)inode, error)); )
250 }
251
252 /*
253 * Obtain locked and referenced container vnode from container
254 * device/inode.
255 */
256 error = coda_grab_vnode(dev, inode, &container_vp);
257 if (error)
258 return (error);
259
260 /* Save the vnode pointer for the container file. */
261 if (cp->c_ovp == NULL) {
262 cp->c_ovp = container_vp;
263 } else {
264 if (cp->c_ovp != container_vp)
265 /*
266 * Perhaps venus returned a different container, or
267 * something else went wrong.
268 */
269 panic("coda_open: cp->c_ovp != container_vp");
270 }
271 cp->c_ocount++;
272
273 /* Flush the attribute cache if writing the file. */
274 if (flag & FWRITE) {
275 cp->c_owrite++;
276 cp->c_flags &= ~C_VATTR;
277 }
278
279 /*
280 * Save the <device, inode> pair for the container file to speed
281 * up subsequent reads while closed (mmap, program execution).
282 * This is perhaps safe because venus will invalidate the node
283 * before changing the container file mapping.
284 */
285 cp->c_device = dev;
286 cp->c_inode = inode;
287
288 /* Open the container file. */
289 error = VOP_OPEN(container_vp, flag, cred);
290 /*
291 * Drop the lock on the container, after we have done VOP_OPEN
292 * (which requires a locked vnode).
293 */
294 VOP_UNLOCK(container_vp, 0);
295 return(error);
296 }
297
298 /*
299 * Close the cache file used for I/O and notify Venus.
300 */
301 int
302 coda_close(void *v)
303 {
304 /* true args */
305 struct vop_close_args *ap = v;
306 struct vnode *vp = ap->a_vp;
307 struct cnode *cp = VTOC(vp);
308 int flag = ap->a_fflag;
309 kauth_cred_t cred = ap->a_cred;
310 /* locals */
311 int error;
312
313 MARK_ENTRY(CODA_CLOSE_STATS);
314
315 /* Check for close of control file. */
316 if (IS_CTL_VP(vp)) {
317 MARK_INT_SAT(CODA_CLOSE_STATS);
318 return(0);
319 }
320
321 /*
322 * XXX The IS_UNMOUNTING part of this is very suspect.
323 */
324 if (IS_UNMOUNTING(cp)) {
325 if (cp->c_ovp) {
326 #ifdef CODA_VERBOSE
327 printf("coda_close: destroying container ref %d, ufs vp %p of vp %p/cp %p\n",
328 vp->v_usecount, cp->c_ovp, vp, cp);
329 #endif
330 #ifdef hmm
331 vgone(cp->c_ovp);
332 #else
333 vn_lock(cp->c_ovp, LK_EXCLUSIVE | LK_RETRY);
334 VOP_CLOSE(cp->c_ovp, flag, cred); /* Do errors matter here? */
335 vput(cp->c_ovp);
336 #endif
337 } else {
338 #ifdef CODA_VERBOSE
339 printf("coda_close: NO container vp %p/cp %p\n", vp, cp);
340 #endif
341 }
342 return ENODEV;
343 }
344
345 /* Lock the container node, and VOP_CLOSE it. */
346 vn_lock(cp->c_ovp, LK_EXCLUSIVE | LK_RETRY);
347 VOP_CLOSE(cp->c_ovp, flag, cred); /* Do errors matter here? */
348 /*
349 * Drop the lock we just obtained, and vrele the container vnode.
350 * Decrement reference counts, and clear container vnode pointer on
351 * last close.
352 */
353 vput(cp->c_ovp);
354 if (flag & FWRITE)
355 --cp->c_owrite;
356 if (--cp->c_ocount == 0)
357 cp->c_ovp = NULL;
358
359 error = venus_close(vtomi(vp), &cp->c_fid, flag, cred, curlwp);
360
361 CODADEBUG(CODA_CLOSE, myprintf(("close: result %d\n",error)); )
362 return(error);
363 }
364
365 int
366 coda_read(void *v)
367 {
368 struct vop_read_args *ap = v;
369
370 ENTRY;
371 return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_READ,
372 ap->a_ioflag, ap->a_cred, curlwp));
373 }
374
375 int
376 coda_write(void *v)
377 {
378 struct vop_write_args *ap = v;
379
380 ENTRY;
381 return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_WRITE,
382 ap->a_ioflag, ap->a_cred, curlwp));
383 }
384
385 int
386 coda_rdwr(struct vnode *vp, struct uio *uiop, enum uio_rw rw, int ioflag,
387 kauth_cred_t cred, struct lwp *l)
388 {
389 /* upcall decl */
390 /* NOTE: container file operation!!! */
391 /* locals */
392 struct cnode *cp = VTOC(vp);
393 struct vnode *cfvp = cp->c_ovp;
394 struct proc *p = l->l_proc;
395 int opened_internally = 0;
396 int error = 0;
397
398 MARK_ENTRY(CODA_RDWR_STATS);
399
400 CODADEBUG(CODA_RDWR, myprintf(("coda_rdwr(%d, %p, %lu, %lld)\n", rw,
401 uiop->uio_iov->iov_base,
402 (unsigned long) uiop->uio_resid,
403 (long long) uiop->uio_offset)); )
404
405 /* Check for rdwr of control object. */
406 if (IS_CTL_VP(vp)) {
407 MARK_INT_FAIL(CODA_RDWR_STATS);
408 return(EINVAL);
409 }
410
411 /* Redirect the request to UFS. */
412
413 /*
414 * If file is not already open this must be a page
415 * {read,write} request. Iget the cache file's inode
416 * pointer if we still have its <device, inode> pair.
417 * Otherwise, we must do an internal open to derive the
418 * pair.
419 * XXX Integrate this into a coherent strategy for container
420 * file acquisition.
421 */
422 if (cfvp == NULL) {
423 /*
424 * If we're dumping core, do the internal open. Otherwise
425 * venus won't have the correct size of the core when
426 * it's completely written.
427 */
428 if (cp->c_inode != 0 && !(p && (p->p_acflag & ACORE))) {
429 printf("coda_rdwr: grabbing container vnode, losing reference\n");
430 /* Get locked and refed vnode. */
431 error = coda_grab_vnode(cp->c_device, cp->c_inode, &cfvp);
432 if (error) {
433 MARK_INT_FAIL(CODA_RDWR_STATS);
434 return(error);
435 }
436 /*
437 * Drop lock.
438 * XXX Where is reference released.
439 */
440 VOP_UNLOCK(cfvp, 0);
441 }
442 else {
443 printf("coda_rdwr: internal VOP_OPEN\n");
444 opened_internally = 1;
445 MARK_INT_GEN(CODA_OPEN_STATS);
446 error = VOP_OPEN(vp, (rw == UIO_READ ? FREAD : FWRITE), cred);
447 #ifdef CODA_VERBOSE
448 printf("coda_rdwr: Internally Opening %p\n", vp);
449 #endif
450 if (error) {
451 MARK_INT_FAIL(CODA_RDWR_STATS);
452 return(error);
453 }
454 cfvp = cp->c_ovp;
455 }
456 }
457
458 /* Have UFS handle the call. */
459 CODADEBUG(CODA_RDWR, myprintf(("indirect rdwr: fid = %s, refcnt = %d\n",
460 coda_f2s(&cp->c_fid), CTOV(cp)->v_usecount)); )
461
462 if (rw == UIO_READ) {
463 error = VOP_READ(cfvp, uiop, ioflag, cred);
464 } else {
465 error = VOP_WRITE(cfvp, uiop, ioflag, cred);
466 }
467
468 if (error)
469 MARK_INT_FAIL(CODA_RDWR_STATS);
470 else
471 MARK_INT_SAT(CODA_RDWR_STATS);
472
473 /* Do an internal close if necessary. */
474 if (opened_internally) {
475 MARK_INT_GEN(CODA_CLOSE_STATS);
476 (void)VOP_CLOSE(vp, (rw == UIO_READ ? FREAD : FWRITE), cred);
477 }
478
479 /* Invalidate cached attributes if writing. */
480 if (rw == UIO_WRITE)
481 cp->c_flags &= ~C_VATTR;
482 return(error);
483 }
484
485 int
486 coda_ioctl(void *v)
487 {
488 /* true args */
489 struct vop_ioctl_args *ap = v;
490 struct vnode *vp = ap->a_vp;
491 int com = ap->a_command;
492 void *data = ap->a_data;
493 int flag = ap->a_fflag;
494 kauth_cred_t cred = ap->a_cred;
495 /* locals */
496 int error;
497 struct vnode *tvp;
498 struct nameidata ndp;
499 struct PioctlData *iap = (struct PioctlData *)data;
500
501 MARK_ENTRY(CODA_IOCTL_STATS);
502
503 CODADEBUG(CODA_IOCTL, myprintf(("in coda_ioctl on %s\n", iap->path));)
504
505 /* Don't check for operation on a dying object, for ctlvp it
506 shouldn't matter */
507
508 /* Must be control object to succeed. */
509 if (!IS_CTL_VP(vp)) {
510 MARK_INT_FAIL(CODA_IOCTL_STATS);
511 CODADEBUG(CODA_IOCTL, myprintf(("coda_ioctl error: vp != ctlvp"));)
512 return (EOPNOTSUPP);
513 }
514 /* Look up the pathname. */
515
516 /* Should we use the name cache here? It would get it from
517 lookupname sooner or later anyway, right? */
518
519 NDINIT(&ndp, LOOKUP, (iap->follow ? FOLLOW : NOFOLLOW), UIO_USERSPACE,
520 iap->path);
521 error = namei(&ndp);
522 tvp = ndp.ni_vp;
523
524 if (error) {
525 MARK_INT_FAIL(CODA_IOCTL_STATS);
526 CODADEBUG(CODA_IOCTL, myprintf(("coda_ioctl error: lookup returns %d\n",
527 error));)
528 return(error);
529 }
530
531 /*
532 * Make sure this is a coda style cnode, but it may be a
533 * different vfsp
534 */
535 /* XXX: this totally violates the comment about vtagtype in vnode.h */
536 if (tvp->v_tag != VT_CODA) {
537 vrele(tvp);
538 MARK_INT_FAIL(CODA_IOCTL_STATS);
539 CODADEBUG(CODA_IOCTL,
540 myprintf(("coda_ioctl error: %s not a coda object\n",
541 iap->path));)
542 return(EINVAL);
543 }
544
545 if (iap->vi.in_size > VC_MAXDATASIZE) {
546 vrele(tvp);
547 return(EINVAL);
548 }
549 error = venus_ioctl(vtomi(tvp), &((VTOC(tvp))->c_fid), com, flag, data,
550 cred, curlwp);
551
552 if (error)
553 MARK_INT_FAIL(CODA_IOCTL_STATS);
554 else
555 CODADEBUG(CODA_IOCTL, myprintf(("Ioctl returns %d \n", error)); )
556
557 vrele(tvp);
558 return(error);
559 }
560
561 /*
562 * To reduce the cost of a user-level venus;we cache attributes in
563 * the kernel. Each cnode has storage allocated for an attribute. If
564 * c_vattr is valid, return a reference to it. Otherwise, get the
565 * attributes from venus and store them in the cnode. There is some
566 * question if this method is a security leak. But I think that in
567 * order to make this call, the user must have done a lookup and
568 * opened the file, and therefore should already have access.
569 */
570 int
571 coda_getattr(void *v)
572 {
573 /* true args */
574 struct vop_getattr_args *ap = v;
575 struct vnode *vp = ap->a_vp;
576 struct cnode *cp = VTOC(vp);
577 struct vattr *vap = ap->a_vap;
578 kauth_cred_t cred = ap->a_cred;
579 /* locals */
580 int error;
581
582 MARK_ENTRY(CODA_GETATTR_STATS);
583
584 /* Check for getattr of control object. */
585 if (IS_CTL_VP(vp)) {
586 MARK_INT_FAIL(CODA_GETATTR_STATS);
587 return(ENOENT);
588 }
589
590 /* Check to see if the attributes have already been cached */
591 if (VALID_VATTR(cp)) {
592 CODADEBUG(CODA_GETATTR, { myprintf(("attr cache hit: %s\n",
593 coda_f2s(&cp->c_fid)));});
594 CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR))
595 print_vattr(&cp->c_vattr); );
596
597 *vap = cp->c_vattr;
598 MARK_INT_SAT(CODA_GETATTR_STATS);
599 return(0);
600 }
601
602 error = venus_getattr(vtomi(vp), &cp->c_fid, cred, curlwp, vap);
603
604 if (!error) {
605 CODADEBUG(CODA_GETATTR, myprintf(("getattr miss %s: result %d\n",
606 coda_f2s(&cp->c_fid), error)); )
607
608 CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR))
609 print_vattr(vap); );
610
611 /* If not open for write, store attributes in cnode */
612 if ((cp->c_owrite == 0) && (coda_attr_cache)) {
613 cp->c_vattr = *vap;
614 cp->c_flags |= C_VATTR;
615 }
616
617 }
618 return(error);
619 }
620
621 int
622 coda_setattr(void *v)
623 {
624 /* true args */
625 struct vop_setattr_args *ap = v;
626 struct vnode *vp = ap->a_vp;
627 struct cnode *cp = VTOC(vp);
628 struct vattr *vap = ap->a_vap;
629 kauth_cred_t cred = ap->a_cred;
630 /* locals */
631 int error;
632
633 MARK_ENTRY(CODA_SETATTR_STATS);
634
635 /* Check for setattr of control object. */
636 if (IS_CTL_VP(vp)) {
637 MARK_INT_FAIL(CODA_SETATTR_STATS);
638 return(ENOENT);
639 }
640
641 if (codadebug & CODADBGMSK(CODA_SETATTR)) {
642 print_vattr(vap);
643 }
644 error = venus_setattr(vtomi(vp), &cp->c_fid, vap, cred, curlwp);
645
646 if (!error)
647 cp->c_flags &= ~C_VATTR;
648
649 CODADEBUG(CODA_SETATTR, myprintf(("setattr %d\n", error)); )
650 return(error);
651 }
652
653 int
654 coda_access(void *v)
655 {
656 /* true args */
657 struct vop_access_args *ap = v;
658 struct vnode *vp = ap->a_vp;
659 struct cnode *cp = VTOC(vp);
660 int mode = ap->a_mode;
661 kauth_cred_t cred = ap->a_cred;
662 /* locals */
663 int error;
664
665 MARK_ENTRY(CODA_ACCESS_STATS);
666
667 /* Check for access of control object. Only read access is
668 allowed on it. */
669 if (IS_CTL_VP(vp)) {
670 /* bogus hack - all will be marked as successes */
671 MARK_INT_SAT(CODA_ACCESS_STATS);
672 return(((mode & VREAD) && !(mode & (VWRITE | VEXEC)))
673 ? 0 : EACCES);
674 }
675
676 /*
677 * if the file is a directory, and we are checking exec (eg lookup)
678 * access, and the file is in the namecache, then the user must have
679 * lookup access to it.
680 */
681 if (coda_access_cache) {
682 if ((vp->v_type == VDIR) && (mode & VEXEC)) {
683 if (coda_nc_lookup(cp, ".", 1, cred)) {
684 MARK_INT_SAT(CODA_ACCESS_STATS);
685 return(0); /* it was in the cache */
686 }
687 }
688 }
689
690 error = venus_access(vtomi(vp), &cp->c_fid, mode, cred, curlwp);
691
692 return(error);
693 }
694
695 /*
696 * CODA abort op, called after namei() when a CREATE/DELETE isn't actually
697 * done. If a buffer has been saved in anticipation of a coda_create or
698 * a coda_remove, delete it.
699 */
700 /* ARGSUSED */
701 int
702 coda_abortop(void *v)
703 {
704 /* true args */
705 struct vop_abortop_args /* {
706 struct vnode *a_dvp;
707 struct componentname *a_cnp;
708 } */ *ap = v;
709 /* upcall decl */
710 /* locals */
711
712 if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF)
713 PNBUF_PUT(ap->a_cnp->cn_pnbuf);
714 return (0);
715 }
716
717 int
718 coda_readlink(void *v)
719 {
720 /* true args */
721 struct vop_readlink_args *ap = v;
722 struct vnode *vp = ap->a_vp;
723 struct cnode *cp = VTOC(vp);
724 struct uio *uiop = ap->a_uio;
725 kauth_cred_t cred = ap->a_cred;
726 /* locals */
727 struct lwp *l = curlwp;
728 int error;
729 char *str;
730 int len;
731
732 MARK_ENTRY(CODA_READLINK_STATS);
733
734 /* Check for readlink of control object. */
735 if (IS_CTL_VP(vp)) {
736 MARK_INT_FAIL(CODA_READLINK_STATS);
737 return(ENOENT);
738 }
739
740 if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) { /* symlink was cached */
741 uiop->uio_rw = UIO_READ;
742 error = uiomove(cp->c_symlink, (int)cp->c_symlen, uiop);
743 if (error)
744 MARK_INT_FAIL(CODA_READLINK_STATS);
745 else
746 MARK_INT_SAT(CODA_READLINK_STATS);
747 return(error);
748 }
749
750 error = venus_readlink(vtomi(vp), &cp->c_fid, cred, l, &str, &len);
751
752 if (!error) {
753 uiop->uio_rw = UIO_READ;
754 error = uiomove(str, len, uiop);
755
756 if (coda_symlink_cache) {
757 cp->c_symlink = str;
758 cp->c_symlen = len;
759 cp->c_flags |= C_SYMLINK;
760 } else
761 CODA_FREE(str, len);
762 }
763
764 CODADEBUG(CODA_READLINK, myprintf(("in readlink result %d\n",error));)
765 return(error);
766 }
767
768 int
769 coda_fsync(void *v)
770 {
771 /* true args */
772 struct vop_fsync_args *ap = v;
773 struct vnode *vp = ap->a_vp;
774 struct cnode *cp = VTOC(vp);
775 kauth_cred_t cred = ap->a_cred;
776 /* locals */
777 struct vnode *convp = cp->c_ovp;
778 int error;
779
780 MARK_ENTRY(CODA_FSYNC_STATS);
781
782 /* Check for fsync on an unmounting object */
783 /* The NetBSD kernel, in it's infinite wisdom, can try to fsync
784 * after an unmount has been initiated. This is a Bad Thing,
785 * which we have to avoid. Not a legitimate failure for stats.
786 */
787 if (IS_UNMOUNTING(cp)) {
788 return(ENODEV);
789 }
790
791 /* Check for fsync of control object. */
792 if (IS_CTL_VP(vp)) {
793 MARK_INT_SAT(CODA_FSYNC_STATS);
794 return(0);
795 }
796
797 if (convp)
798 VOP_FSYNC(convp, cred, MNT_WAIT, 0, 0);
799
800 /*
801 * We can expect fsync on any vnode at all if venus is pruging it.
802 * Venus can't very well answer the fsync request, now can it?
803 * Hopefully, it won't have to, because hopefully, venus preserves
804 * the (possibly untrue) invariant that it never purges an open
805 * vnode. Hopefully.
806 */
807 if (cp->c_flags & C_PURGING) {
808 return(0);
809 }
810
811 error = venus_fsync(vtomi(vp), &cp->c_fid, cred, curlwp);
812
813 CODADEBUG(CODA_FSYNC, myprintf(("in fsync result %d\n",error)); );
814 return(error);
815 }
816
817 /*
818 * vp is locked on entry, and we must unlock it.
819 * XXX This routine is suspect and probably needs rewriting.
820 */
821 int
822 coda_inactive(void *v)
823 {
824 /* true args */
825 struct vop_inactive_args *ap = v;
826 struct vnode *vp = ap->a_vp;
827 struct cnode *cp = VTOC(vp);
828 kauth_cred_t cred __unused = NULL;
829
830 /* We don't need to send inactive to venus - DCS */
831 MARK_ENTRY(CODA_INACTIVE_STATS);
832
833 if (IS_CTL_VP(vp)) {
834 MARK_INT_SAT(CODA_INACTIVE_STATS);
835 return 0;
836 }
837
838 CODADEBUG(CODA_INACTIVE, myprintf(("in inactive, %s, vfsp %p\n",
839 coda_f2s(&cp->c_fid), vp->v_mount));)
840
841 /* If an array has been allocated to hold the symlink, deallocate it */
842 if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) {
843 if (cp->c_symlink == NULL)
844 panic("coda_inactive: null symlink pointer in cnode");
845
846 CODA_FREE(cp->c_symlink, cp->c_symlen);
847 cp->c_flags &= ~C_SYMLINK;
848 cp->c_symlen = 0;
849 }
850
851 /* Remove it from the table so it can't be found. */
852 coda_unsave(cp);
853 if (vp->v_mount->mnt_data == NULL) {
854 myprintf(("Help! vfsp->vfs_data was NULL, but vnode %p wasn't dying\n", vp));
855 panic("badness in coda_inactive");
856 }
857
858 if (IS_UNMOUNTING(cp)) {
859 /* XXX Do we need to VOP_CLOSE container vnodes? */
860 if (vp->v_usecount > 0)
861 printf("coda_inactive: IS_UNMOUNTING %p usecount %d\n",
862 vp, vp->v_usecount);
863 if (cp->c_ovp != NULL)
864 printf("coda_inactive: %p ovp != NULL\n", vp);
865 VOP_UNLOCK(vp, 0);
866 } else {
867 /* Sanity checks that perhaps should be panic. */
868 if (vp->v_usecount) {
869 printf("coda_inactive: %p usecount %d\n", vp, vp->v_usecount);
870 }
871 if (cp->c_ovp != NULL) {
872 printf("coda_inactive: %p ovp != NULL\n", vp);
873 }
874 VOP_UNLOCK(vp, 0);
875 *ap->a_recycle = true;
876 }
877
878 MARK_INT_SAT(CODA_INACTIVE_STATS);
879 return(0);
880 }
881
882 /*
883 * Coda does not use the normal namecache, but a private version.
884 * Consider how to use the standard facility instead.
885 */
886 int
887 coda_lookup(void *v)
888 {
889 /* true args */
890 struct vop_lookup_args *ap = v;
891 /* (locked) vnode of dir in which to do lookup */
892 struct vnode *dvp = ap->a_dvp;
893 struct cnode *dcp = VTOC(dvp);
894 /* output variable for result */
895 struct vnode **vpp = ap->a_vpp;
896 /* name to lookup */
897 struct componentname *cnp = ap->a_cnp;
898 kauth_cred_t cred = cnp->cn_cred;
899 struct lwp *l = curlwp;
900 /* locals */
901 struct cnode *cp;
902 const char *nm = cnp->cn_nameptr;
903 int len = cnp->cn_namelen;
904 int flags = cnp->cn_flags;
905 int isdot;
906 CodaFid VFid;
907 int vtype;
908 int error = 0;
909
910 MARK_ENTRY(CODA_LOOKUP_STATS);
911
912 CODADEBUG(CODA_LOOKUP, myprintf(("lookup: %s in %s\n",
913 nm, coda_f2s(&dcp->c_fid))););
914
915 /*
916 * XXX componentname flags in MODMASK are not handled at all
917 */
918
919 /*
920 * The overall strategy is to switch on the lookup type and get a
921 * result vnode that is vref'd but not locked. Then, the code at
922 * exit: switches on ., .., and regular lookups and does the right
923 * locking.
924 */
925
926 /* Check for lookup of control object. */
927 if (IS_CTL_NAME(dvp, nm, len)) {
928 *vpp = coda_ctlvp;
929 vref(*vpp);
930 MARK_INT_SAT(CODA_LOOKUP_STATS);
931 goto exit;
932 }
933
934 /* Avoid trying to hand venus an unreasonably long name. */
935 if (len+1 > CODA_MAXNAMLEN) {
936 MARK_INT_FAIL(CODA_LOOKUP_STATS);
937 CODADEBUG(CODA_LOOKUP, myprintf(("name too long: lookup, %s (%s)\n",
938 coda_f2s(&dcp->c_fid), nm)););
939 *vpp = (struct vnode *)0;
940 error = EINVAL;
941 goto exit;
942 }
943
944 /*
945 * XXX Check for DOT lookups, and short circuit all the caches,
946 * just doing an extra vref. (venus guarantees that lookup of
947 * . returns self.)
948 */
949 isdot = (len == 1 && nm[0] == '.');
950
951 /*
952 * Try to resolve the lookup in the minicache. If that fails, ask
953 * venus to do the lookup. XXX The interaction between vnode
954 * locking and any locking that coda does is not clear.
955 */
956 cp = coda_nc_lookup(dcp, nm, len, cred);
957 if (cp) {
958 *vpp = CTOV(cp);
959 vref(*vpp);
960 CODADEBUG(CODA_LOOKUP,
961 myprintf(("lookup result %d vpp %p\n",error,*vpp));)
962 } else {
963 /* The name wasn't cached, so ask Venus. */
964 error = venus_lookup(vtomi(dvp), &dcp->c_fid, nm, len, cred, l, &VFid, &vtype);
965
966 if (error) {
967 MARK_INT_FAIL(CODA_LOOKUP_STATS);
968 CODADEBUG(CODA_LOOKUP, myprintf(("lookup error on %s (%s)%d\n",
969 coda_f2s(&dcp->c_fid), nm, error));)
970 *vpp = (struct vnode *)0;
971 } else {
972 MARK_INT_SAT(CODA_LOOKUP_STATS);
973 CODADEBUG(CODA_LOOKUP,
974 myprintf(("lookup: %s type %o result %d\n",
975 coda_f2s(&VFid), vtype, error)); )
976
977 cp = make_coda_node(&VFid, dvp->v_mount, vtype);
978 *vpp = CTOV(cp);
979 /* vpp is now vrefed. */
980
981 /*
982 * Unless this vnode is marked CODA_NOCACHE, enter it into
983 * the coda name cache to avoid a future venus round-trip.
984 * XXX Interaction with componentname NOCACHE is unclear.
985 */
986 if (!(vtype & CODA_NOCACHE))
987 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
988 }
989 }
990
991 exit:
992 /*
993 * If we are creating, and this was the last name to be looked up,
994 * and the error was ENOENT, then make the leaf NULL and return
995 * success.
996 * XXX Check against new lookup rules.
997 */
998 if (((cnp->cn_nameiop == CREATE) || (cnp->cn_nameiop == RENAME))
999 && (cnp->cn_flags & ISLASTCN)
1000 && (error == ENOENT))
1001 {
1002 error = EJUSTRETURN;
1003 cnp->cn_flags |= SAVENAME;
1004 *ap->a_vpp = NULL;
1005 }
1006
1007 /*
1008 * If we are removing, and we are at the last element, and we
1009 * found it, then we need to keep the name around so that the
1010 * removal will go ahead as planned.
1011 * XXX Check against new lookup rules.
1012 */
1013 if ((cnp->cn_nameiop == DELETE)
1014 && (cnp->cn_flags & ISLASTCN)
1015 && !error)
1016 {
1017 cnp->cn_flags |= SAVENAME;
1018 }
1019
1020 /*
1021 * If the lookup succeeded, we must generally lock the returned
1022 * vnode. This could be a ., .., or normal lookup. See
1023 * vnodeops(9) for the details.
1024 */
1025 /*
1026 * XXX LK_RETRY is likely incorrect. Handle vn_lock failure
1027 * somehow, and remove LK_RETRY.
1028 */
1029 if (!error || (error == EJUSTRETURN)) {
1030 /* Lookup has a value and it isn't "."? */
1031 if (*ap->a_vpp && (*ap->a_vpp != dvp)) {
1032 if (flags & ISDOTDOT)
1033 /* ..: unlock parent */
1034 VOP_UNLOCK(dvp, 0);
1035 /* all but .: lock child */
1036 vn_lock(*ap->a_vpp, LK_EXCLUSIVE | LK_RETRY);
1037 if (flags & ISDOTDOT)
1038 /* ..: relock parent */
1039 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
1040 }
1041 /* else .: leave dvp locked */
1042 } else {
1043 /* The lookup failed, so return NULL. Leave dvp locked. */
1044 *ap->a_vpp = NULL;
1045 }
1046 return(error);
1047 }
1048
1049 /*ARGSUSED*/
1050 int
1051 coda_create(void *v)
1052 {
1053 /* true args */
1054 struct vop_create_args *ap = v;
1055 struct vnode *dvp = ap->a_dvp;
1056 struct cnode *dcp = VTOC(dvp);
1057 struct vattr *va = ap->a_vap;
1058 int exclusive = 1;
1059 int mode = ap->a_vap->va_mode;
1060 struct vnode **vpp = ap->a_vpp;
1061 struct componentname *cnp = ap->a_cnp;
1062 kauth_cred_t cred = cnp->cn_cred;
1063 struct lwp *l = curlwp;
1064 /* locals */
1065 int error;
1066 struct cnode *cp;
1067 const char *nm = cnp->cn_nameptr;
1068 int len = cnp->cn_namelen;
1069 CodaFid VFid;
1070 struct vattr attr;
1071
1072 MARK_ENTRY(CODA_CREATE_STATS);
1073
1074 /* All creates are exclusive XXX */
1075 /* I'm assuming the 'mode' argument is the file mode bits XXX */
1076
1077 /* Check for create of control object. */
1078 if (IS_CTL_NAME(dvp, nm, len)) {
1079 *vpp = (struct vnode *)0;
1080 MARK_INT_FAIL(CODA_CREATE_STATS);
1081 return(EACCES);
1082 }
1083
1084 error = venus_create(vtomi(dvp), &dcp->c_fid, nm, len, exclusive, mode, va, cred, l, &VFid, &attr);
1085
1086 if (!error) {
1087
1088 /*
1089 * XXX Violation of venus/kernel invariants is a difficult case,
1090 * but venus should not be able to cause a panic.
1091 */
1092 /* If this is an exclusive create, panic if the file already exists. */
1093 /* Venus should have detected the file and reported EEXIST. */
1094
1095 if ((exclusive == 1) &&
1096 (coda_find(&VFid) != NULL))
1097 panic("cnode existed for newly created file!");
1098
1099 cp = make_coda_node(&VFid, dvp->v_mount, attr.va_type);
1100 *vpp = CTOV(cp);
1101
1102 /* XXX vnodeops doesn't say this argument can be changed. */
1103 /* Update va to reflect the new attributes. */
1104 (*va) = attr;
1105
1106 /* Update the attribute cache and mark it as valid */
1107 if (coda_attr_cache) {
1108 VTOC(*vpp)->c_vattr = attr;
1109 VTOC(*vpp)->c_flags |= C_VATTR;
1110 }
1111
1112 /* Invalidate parent's attr cache (modification time has changed). */
1113 VTOC(dvp)->c_flags &= ~C_VATTR;
1114
1115 /* enter the new vnode in the Name Cache */
1116 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
1117
1118 CODADEBUG(CODA_CREATE,
1119 myprintf(("create: %s, result %d\n",
1120 coda_f2s(&VFid), error)); )
1121 } else {
1122 *vpp = (struct vnode *)0;
1123 CODADEBUG(CODA_CREATE, myprintf(("create error %d\n", error));)
1124 }
1125
1126 /*
1127 * vnodeops(9) says that we must unlock the parent and lock the child.
1128 * XXX Should we lock the child first?
1129 */
1130 vput(dvp);
1131 if (!error) {
1132 if ((cnp->cn_flags & LOCKLEAF) == 0) {
1133 /* This should not happen; flags are for lookup only. */
1134 printf("coda_create: LOCKLEAF not set!\n");
1135 }
1136
1137 if ((error = vn_lock(*ap->a_vpp, LK_EXCLUSIVE))) {
1138 /* XXX Perhaps avoid this panic. */
1139 panic("coda_create: couldn't lock child");
1140 }
1141 }
1142
1143 /* Per vnodeops(9), free name except on success and SAVESTART. */
1144 if (error || (cnp->cn_flags & SAVESTART) == 0) {
1145 PNBUF_PUT(cnp->cn_pnbuf);
1146 }
1147 return(error);
1148 }
1149
1150 int
1151 coda_remove(void *v)
1152 {
1153 /* true args */
1154 struct vop_remove_args *ap = v;
1155 struct vnode *dvp = ap->a_dvp;
1156 struct cnode *cp = VTOC(dvp);
1157 struct vnode *vp = ap->a_vp;
1158 struct componentname *cnp = ap->a_cnp;
1159 kauth_cred_t cred = cnp->cn_cred;
1160 struct lwp *l = curlwp;
1161 /* locals */
1162 int error;
1163 const char *nm = cnp->cn_nameptr;
1164 int len = cnp->cn_namelen;
1165 struct cnode *tp;
1166
1167 MARK_ENTRY(CODA_REMOVE_STATS);
1168
1169 CODADEBUG(CODA_REMOVE, myprintf(("remove: %s in %s\n",
1170 nm, coda_f2s(&cp->c_fid))););
1171
1172 /* Remove the file's entry from the CODA Name Cache */
1173 /* We're being conservative here, it might be that this person
1174 * doesn't really have sufficient access to delete the file
1175 * but we feel zapping the entry won't really hurt anyone -- dcs
1176 */
1177 /* I'm gonna go out on a limb here. If a file and a hardlink to it
1178 * exist, and one is removed, the link count on the other will be
1179 * off by 1. We could either invalidate the attrs if cached, or
1180 * fix them. I'll try to fix them. DCS 11/8/94
1181 */
1182 tp = coda_nc_lookup(VTOC(dvp), nm, len, cred);
1183 if (tp) {
1184 if (VALID_VATTR(tp)) { /* If attrs are cached */
1185 if (tp->c_vattr.va_nlink > 1) { /* If it's a hard link */
1186 tp->c_vattr.va_nlink--;
1187 }
1188 }
1189
1190 coda_nc_zapfile(VTOC(dvp), nm, len);
1191 /* No need to flush it if it doesn't exist! */
1192 }
1193 /* Invalidate the parent's attr cache, the modification time has changed */
1194 VTOC(dvp)->c_flags &= ~C_VATTR;
1195
1196 /* Check for remove of control object. */
1197 if (IS_CTL_NAME(dvp, nm, len)) {
1198 MARK_INT_FAIL(CODA_REMOVE_STATS);
1199 return(ENOENT);
1200 }
1201
1202 error = venus_remove(vtomi(dvp), &cp->c_fid, nm, len, cred, l);
1203
1204 CODADEBUG(CODA_REMOVE, myprintf(("in remove result %d\n",error)); )
1205
1206 /*
1207 * Unlock parent and child (avoiding double if ".").
1208 */
1209 if (dvp == vp) {
1210 vrele(vp);
1211 } else {
1212 vput(vp);
1213 }
1214 vput(dvp);
1215
1216 return(error);
1217 }
1218
1219 /*
1220 * dvp is the directory where the link is to go, and is locked.
1221 * vp is the object to be linked to, and is unlocked.
1222 * At exit, we must unlock dvp, and vput dvp.
1223 */
1224 int
1225 coda_link(void *v)
1226 {
1227 /* true args */
1228 struct vop_link_args *ap = v;
1229 struct vnode *vp = ap->a_vp;
1230 struct cnode *cp = VTOC(vp);
1231 struct vnode *dvp = ap->a_dvp;
1232 struct cnode *dcp = VTOC(dvp);
1233 struct componentname *cnp = ap->a_cnp;
1234 kauth_cred_t cred = cnp->cn_cred;
1235 struct lwp *l = curlwp;
1236 /* locals */
1237 int error;
1238 const char *nm = cnp->cn_nameptr;
1239 int len = cnp->cn_namelen;
1240
1241 MARK_ENTRY(CODA_LINK_STATS);
1242
1243 if (codadebug & CODADBGMSK(CODA_LINK)) {
1244
1245 myprintf(("nb_link: vp fid: %s\n",
1246 coda_f2s(&cp->c_fid)));
1247 myprintf(("nb_link: dvp fid: %s)\n",
1248 coda_f2s(&dcp->c_fid)));
1249
1250 }
1251 if (codadebug & CODADBGMSK(CODA_LINK)) {
1252 myprintf(("link: vp fid: %s\n",
1253 coda_f2s(&cp->c_fid)));
1254 myprintf(("link: dvp fid: %s\n",
1255 coda_f2s(&dcp->c_fid)));
1256
1257 }
1258
1259 /* Check for link to/from control object. */
1260 if (IS_CTL_NAME(dvp, nm, len) || IS_CTL_VP(vp)) {
1261 MARK_INT_FAIL(CODA_LINK_STATS);
1262 return(EACCES);
1263 }
1264
1265 /* If linking . to a name, error out earlier. */
1266 if (vp == dvp) {
1267 printf("coda_link vp==dvp\n");
1268 error = EISDIR;
1269 goto exit;
1270 }
1271
1272 /* XXX Why does venus_link need the vnode to be locked?*/
1273 if ((error = vn_lock(vp, LK_EXCLUSIVE)) != 0) {
1274 printf("coda_link: couldn't lock vnode %p\n", vp);
1275 error = EFAULT; /* XXX better value */
1276 goto exit;
1277 }
1278 error = venus_link(vtomi(vp), &cp->c_fid, &dcp->c_fid, nm, len, cred, l);
1279 VOP_UNLOCK(vp, 0);
1280
1281 /* Invalidate parent's attr cache (the modification time has changed). */
1282 VTOC(dvp)->c_flags &= ~C_VATTR;
1283 /* Invalidate child's attr cache (XXX why). */
1284 VTOC(vp)->c_flags &= ~C_VATTR;
1285
1286 CODADEBUG(CODA_LINK, myprintf(("in link result %d\n",error)); )
1287
1288 exit:
1289 vput(dvp);
1290 return(error);
1291 }
1292
1293 int
1294 coda_rename(void *v)
1295 {
1296 /* true args */
1297 struct vop_rename_args *ap = v;
1298 struct vnode *odvp = ap->a_fdvp;
1299 struct cnode *odcp = VTOC(odvp);
1300 struct componentname *fcnp = ap->a_fcnp;
1301 struct vnode *ndvp = ap->a_tdvp;
1302 struct cnode *ndcp = VTOC(ndvp);
1303 struct componentname *tcnp = ap->a_tcnp;
1304 kauth_cred_t cred = fcnp->cn_cred;
1305 struct lwp *l = curlwp;
1306 /* true args */
1307 int error;
1308 const char *fnm = fcnp->cn_nameptr;
1309 int flen = fcnp->cn_namelen;
1310 const char *tnm = tcnp->cn_nameptr;
1311 int tlen = tcnp->cn_namelen;
1312
1313 MARK_ENTRY(CODA_RENAME_STATS);
1314
1315 /* Hmmm. The vnodes are already looked up. Perhaps they are locked?
1316 This could be Bad. XXX */
1317 #ifdef OLD_DIAGNOSTIC
1318 if ((fcnp->cn_cred != tcnp->cn_cred)
1319 || (fcnp->cn_lwp != tcnp->cn_lwp))
1320 {
1321 panic("coda_rename: component names don't agree");
1322 }
1323 #endif
1324
1325 /* Check for rename involving control object. */
1326 if (IS_CTL_NAME(odvp, fnm, flen) || IS_CTL_NAME(ndvp, tnm, tlen)) {
1327 MARK_INT_FAIL(CODA_RENAME_STATS);
1328 return(EACCES);
1329 }
1330
1331 /* Problem with moving directories -- need to flush entry for .. */
1332 if (odvp != ndvp) {
1333 struct cnode *ovcp = coda_nc_lookup(VTOC(odvp), fnm, flen, cred);
1334 if (ovcp) {
1335 struct vnode *ovp = CTOV(ovcp);
1336 if ((ovp) &&
1337 (ovp->v_type == VDIR)) /* If it's a directory */
1338 coda_nc_zapfile(VTOC(ovp),"..", 2);
1339 }
1340 }
1341
1342 /* Remove the entries for both source and target files */
1343 coda_nc_zapfile(VTOC(odvp), fnm, flen);
1344 coda_nc_zapfile(VTOC(ndvp), tnm, tlen);
1345
1346 /* Invalidate the parent's attr cache, the modification time has changed */
1347 VTOC(odvp)->c_flags &= ~C_VATTR;
1348 VTOC(ndvp)->c_flags &= ~C_VATTR;
1349
1350 if (flen+1 > CODA_MAXNAMLEN) {
1351 MARK_INT_FAIL(CODA_RENAME_STATS);
1352 error = EINVAL;
1353 goto exit;
1354 }
1355
1356 if (tlen+1 > CODA_MAXNAMLEN) {
1357 MARK_INT_FAIL(CODA_RENAME_STATS);
1358 error = EINVAL;
1359 goto exit;
1360 }
1361
1362 error = venus_rename(vtomi(odvp), &odcp->c_fid, &ndcp->c_fid, fnm, flen, tnm, tlen, cred, l);
1363
1364 exit:
1365 CODADEBUG(CODA_RENAME, myprintf(("in rename result %d\n",error));)
1366 /* XXX - do we need to call cache pureg on the moved vnode? */
1367 cache_purge(ap->a_fvp);
1368
1369 /* It seems to be incumbent on us to drop locks on all four vnodes */
1370 /* From-vnodes are not locked, only ref'd. To-vnodes are locked. */
1371
1372 vrele(ap->a_fvp);
1373 vrele(odvp);
1374
1375 if (ap->a_tvp) {
1376 if (ap->a_tvp == ndvp) {
1377 vrele(ap->a_tvp);
1378 } else {
1379 vput(ap->a_tvp);
1380 }
1381 }
1382
1383 vput(ndvp);
1384 return(error);
1385 }
1386
1387 int
1388 coda_mkdir(void *v)
1389 {
1390 /* true args */
1391 struct vop_mkdir_args *ap = v;
1392 struct vnode *dvp = ap->a_dvp;
1393 struct cnode *dcp = VTOC(dvp);
1394 struct componentname *cnp = ap->a_cnp;
1395 struct vattr *va = ap->a_vap;
1396 struct vnode **vpp = ap->a_vpp;
1397 kauth_cred_t cred = cnp->cn_cred;
1398 struct lwp *l = curlwp;
1399 /* locals */
1400 int error;
1401 const char *nm = cnp->cn_nameptr;
1402 int len = cnp->cn_namelen;
1403 struct cnode *cp;
1404 CodaFid VFid;
1405 struct vattr ova;
1406
1407 MARK_ENTRY(CODA_MKDIR_STATS);
1408
1409 /* Check for mkdir of target object. */
1410 if (IS_CTL_NAME(dvp, nm, len)) {
1411 *vpp = (struct vnode *)0;
1412 MARK_INT_FAIL(CODA_MKDIR_STATS);
1413 return(EACCES);
1414 }
1415
1416 if (len+1 > CODA_MAXNAMLEN) {
1417 *vpp = (struct vnode *)0;
1418 MARK_INT_FAIL(CODA_MKDIR_STATS);
1419 return(EACCES);
1420 }
1421
1422 error = venus_mkdir(vtomi(dvp), &dcp->c_fid, nm, len, va, cred, l, &VFid, &ova);
1423
1424 if (!error) {
1425 if (coda_find(&VFid) != NULL)
1426 panic("cnode existed for newly created directory!");
1427
1428
1429 cp = make_coda_node(&VFid, dvp->v_mount, va->va_type);
1430 *vpp = CTOV(cp);
1431
1432 /* enter the new vnode in the Name Cache */
1433 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
1434
1435 /* as a side effect, enter "." and ".." for the directory */
1436 coda_nc_enter(VTOC(*vpp), ".", 1, cred, VTOC(*vpp));
1437 coda_nc_enter(VTOC(*vpp), "..", 2, cred, VTOC(dvp));
1438
1439 if (coda_attr_cache) {
1440 VTOC(*vpp)->c_vattr = ova; /* update the attr cache */
1441 VTOC(*vpp)->c_flags |= C_VATTR; /* Valid attributes in cnode */
1442 }
1443
1444 /* Invalidate the parent's attr cache, the modification time has changed */
1445 VTOC(dvp)->c_flags &= ~C_VATTR;
1446
1447 CODADEBUG( CODA_MKDIR, myprintf(("mkdir: %s result %d\n",
1448 coda_f2s(&VFid), error)); )
1449 } else {
1450 *vpp = (struct vnode *)0;
1451 CODADEBUG(CODA_MKDIR, myprintf(("mkdir error %d\n",error));)
1452 }
1453
1454 /*
1455 * Currently, all mkdirs explicitly vput their dvp's.
1456 * It also appears that we *must* lock the vpp, since
1457 * lockleaf isn't set, but someone down the road is going
1458 * to try to unlock the new directory.
1459 */
1460 vput(dvp);
1461 if (!error) {
1462 if ((error = vn_lock(*ap->a_vpp, LK_EXCLUSIVE))) {
1463 panic("coda_mkdir: couldn't lock child");
1464 }
1465 }
1466
1467 /* Per vnodeops(9), free name except on success and SAVESTART. */
1468 if (error || (cnp->cn_flags & SAVESTART) == 0) {
1469 PNBUF_PUT(cnp->cn_pnbuf);
1470 }
1471 return(error);
1472 }
1473
1474 int
1475 coda_rmdir(void *v)
1476 {
1477 /* true args */
1478 struct vop_rmdir_args *ap = v;
1479 struct vnode *dvp = ap->a_dvp;
1480 struct cnode *dcp = VTOC(dvp);
1481 struct vnode *vp = ap->a_vp;
1482 struct componentname *cnp = ap->a_cnp;
1483 kauth_cred_t cred = cnp->cn_cred;
1484 struct lwp *l = curlwp;
1485 /* true args */
1486 int error;
1487 const char *nm = cnp->cn_nameptr;
1488 int len = cnp->cn_namelen;
1489 struct cnode *cp;
1490
1491 MARK_ENTRY(CODA_RMDIR_STATS);
1492
1493 /* Check for rmdir of control object. */
1494 if (IS_CTL_NAME(dvp, nm, len)) {
1495 MARK_INT_FAIL(CODA_RMDIR_STATS);
1496 return(ENOENT);
1497 }
1498
1499 /* Can't remove . in self. */
1500 if (dvp == vp) {
1501 printf("coda_rmdir: dvp == vp\n");
1502 error = EINVAL;
1503 goto exit;
1504 }
1505
1506 /*
1507 * The caller may not have adequate permissions, and the venus
1508 * operation may fail, but it doesn't hurt from a correctness
1509 * viewpoint to invalidate cache entries.
1510 * XXX Why isn't this done after the venus_rmdir call?
1511 */
1512 /* Look up child in name cache (by name, from parent). */
1513 cp = coda_nc_lookup(dcp, nm, len, cred);
1514 /* If found, remove all children of the child (., ..). */
1515 if (cp) coda_nc_zapParentfid(&(cp->c_fid), NOT_DOWNCALL);
1516
1517 /* Remove child's own entry. */
1518 coda_nc_zapfile(dcp, nm, len);
1519
1520 /* Invalidate parent's attr cache (the modification time has changed). */
1521 dcp->c_flags &= ~C_VATTR;
1522
1523 error = venus_rmdir(vtomi(dvp), &dcp->c_fid, nm, len, cred, l);
1524
1525 CODADEBUG(CODA_RMDIR, myprintf(("in rmdir result %d\n", error)); )
1526
1527 exit:
1528 /* vput both vnodes */
1529 vput(dvp);
1530 if (dvp == vp) {
1531 vrele(vp);
1532 } else {
1533 vput(vp);
1534 }
1535
1536 return(error);
1537 }
1538
1539 int
1540 coda_symlink(void *v)
1541 {
1542 /* true args */
1543 struct vop_symlink_args *ap = v;
1544 struct vnode *dvp = ap->a_dvp;
1545 struct cnode *dcp = VTOC(dvp);
1546 /* a_vpp is used in place below */
1547 struct componentname *cnp = ap->a_cnp;
1548 struct vattr *tva = ap->a_vap;
1549 char *path = ap->a_target;
1550 kauth_cred_t cred = cnp->cn_cred;
1551 struct lwp *l = curlwp;
1552 /* locals */
1553 int error;
1554 u_long saved_cn_flags;
1555 const char *nm = cnp->cn_nameptr;
1556 int len = cnp->cn_namelen;
1557 int plen = strlen(path);
1558
1559 /*
1560 * Here's the strategy for the moment: perform the symlink, then
1561 * do a lookup to grab the resulting vnode. I know this requires
1562 * two communications with Venus for a new sybolic link, but
1563 * that's the way the ball bounces. I don't yet want to change
1564 * the way the Mach symlink works. When Mach support is
1565 * deprecated, we should change symlink so that the common case
1566 * returns the resultant vnode in a vpp argument.
1567 */
1568
1569 MARK_ENTRY(CODA_SYMLINK_STATS);
1570
1571 /* Check for symlink of control object. */
1572 if (IS_CTL_NAME(dvp, nm, len)) {
1573 MARK_INT_FAIL(CODA_SYMLINK_STATS);
1574 error = EACCES;
1575 goto exit;
1576 }
1577
1578 if (plen+1 > CODA_MAXPATHLEN) {
1579 MARK_INT_FAIL(CODA_SYMLINK_STATS);
1580 error = EINVAL;
1581 goto exit;
1582 }
1583
1584 if (len+1 > CODA_MAXNAMLEN) {
1585 MARK_INT_FAIL(CODA_SYMLINK_STATS);
1586 error = EINVAL;
1587 goto exit;
1588 }
1589
1590 error = venus_symlink(vtomi(dvp), &dcp->c_fid, path, plen, nm, len, tva, cred, l);
1591
1592 /* Invalidate the parent's attr cache (modification time has changed). */
1593 dcp->c_flags &= ~C_VATTR;
1594
1595 if (!error) {
1596 /*
1597 * VOP_SYMLINK is not defined to pay attention to cnp->cn_flags;
1598 * these are defined only for VOP_LOOKUP. We desire to reuse
1599 * cnp for a VOP_LOOKUP operation, and must be sure to not pass
1600 * stray flags passed to us. Such stray flags can occur because
1601 * sys_symlink makes a namei call and then reuses the
1602 * componentname structure.
1603 */
1604 /*
1605 * XXX Arguably we should create our own componentname structure
1606 * and not reuse the one that was passed in.
1607 */
1608 saved_cn_flags = cnp->cn_flags;
1609 cnp->cn_flags &= ~(MODMASK | OPMASK);
1610 cnp->cn_flags |= LOOKUP;
1611 error = VOP_LOOKUP(dvp, ap->a_vpp, cnp);
1612 cnp->cn_flags = saved_cn_flags;
1613 /* Either an error occurs, or ap->a_vpp is locked. */
1614 }
1615
1616 exit:
1617 /* unlock and deference parent */
1618 vput(dvp);
1619
1620 /* Per vnodeops(9), free name except on success and SAVESTART. */
1621 if (error || (cnp->cn_flags & SAVESTART) == 0) {
1622 PNBUF_PUT(cnp->cn_pnbuf);
1623 }
1624
1625 CODADEBUG(CODA_SYMLINK, myprintf(("in symlink result %d\n",error)); )
1626 return(error);
1627 }
1628
1629 /*
1630 * Read directory entries.
1631 */
1632 int
1633 coda_readdir(void *v)
1634 {
1635 /* true args */
1636 struct vop_readdir_args *ap = v;
1637 struct vnode *vp = ap->a_vp;
1638 struct cnode *cp = VTOC(vp);
1639 struct uio *uiop = ap->a_uio;
1640 kauth_cred_t cred = ap->a_cred;
1641 int *eofflag = ap->a_eofflag;
1642 off_t **cookies = ap->a_cookies;
1643 int *ncookies = ap->a_ncookies;
1644 /* upcall decl */
1645 /* locals */
1646 int error = 0;
1647
1648 MARK_ENTRY(CODA_READDIR_STATS);
1649
1650 CODADEBUG(CODA_READDIR, myprintf(("coda_readdir(%p, %lu, %lld)\n", uiop->uio_iov->iov_base, (unsigned long) uiop->uio_resid, (long long) uiop->uio_offset)); )
1651
1652 /* Check for readdir of control object. */
1653 if (IS_CTL_VP(vp)) {
1654 MARK_INT_FAIL(CODA_READDIR_STATS);
1655 return(ENOENT);
1656 }
1657
1658 {
1659 /* Redirect the request to UFS. */
1660
1661 /* If directory is not already open do an "internal open" on it. */
1662 int opened_internally = 0;
1663 if (cp->c_ovp == NULL) {
1664 opened_internally = 1;
1665 MARK_INT_GEN(CODA_OPEN_STATS);
1666 error = VOP_OPEN(vp, FREAD, cred);
1667 #ifdef CODA_VERBOSE
1668 printf("coda_readdir: Internally Opening %p\n", vp);
1669 #endif
1670 if (error) return(error);
1671 } else
1672 vp = cp->c_ovp;
1673
1674 /* Have UFS handle the call. */
1675 CODADEBUG(CODA_READDIR, myprintf((
1676 "indirect readdir: fid = %s, refcnt = %d\n",
1677 coda_f2s(&cp->c_fid), vp->v_usecount)); )
1678 error = VOP_READDIR(vp, uiop, cred, eofflag, cookies, ncookies);
1679 if (error)
1680 MARK_INT_FAIL(CODA_READDIR_STATS);
1681 else
1682 MARK_INT_SAT(CODA_READDIR_STATS);
1683
1684 /* Do an "internal close" if necessary. */
1685 if (opened_internally) {
1686 MARK_INT_GEN(CODA_CLOSE_STATS);
1687 (void)VOP_CLOSE(vp, FREAD, cred);
1688 }
1689 }
1690
1691 return(error);
1692 }
1693
1694 /*
1695 * Convert from file system blocks to device blocks
1696 */
1697 int
1698 coda_bmap(void *v)
1699 {
1700 /* XXX on the global proc */
1701 /* true args */
1702 struct vop_bmap_args *ap = v;
1703 struct vnode *vp __unused = ap->a_vp; /* file's vnode */
1704 daddr_t bn __unused = ap->a_bn; /* fs block number */
1705 struct vnode **vpp = ap->a_vpp; /* RETURN vp of device */
1706 daddr_t *bnp __unused = ap->a_bnp; /* RETURN device block number */
1707 struct lwp *l __unused = curlwp;
1708 /* upcall decl */
1709 /* locals */
1710
1711 *vpp = (struct vnode *)0;
1712 myprintf(("coda_bmap called!\n"));
1713 return(EINVAL);
1714 }
1715
1716 /*
1717 * I don't think the following two things are used anywhere, so I've
1718 * commented them out
1719 *
1720 * struct buf *async_bufhead;
1721 * int async_daemon_count;
1722 */
1723 int
1724 coda_strategy(void *v)
1725 {
1726 /* true args */
1727 struct vop_strategy_args *ap = v;
1728 struct buf *bp __unused = ap->a_bp;
1729 struct lwp *l __unused = curlwp;
1730 /* upcall decl */
1731 /* locals */
1732
1733 myprintf(("coda_strategy called! "));
1734 return(EINVAL);
1735 }
1736
1737 int
1738 coda_reclaim(void *v)
1739 {
1740 /* true args */
1741 struct vop_reclaim_args *ap = v;
1742 struct vnode *vp = ap->a_vp;
1743 struct cnode *cp = VTOC(vp);
1744 /* upcall decl */
1745 /* locals */
1746
1747 /*
1748 * Forced unmount/flush will let vnodes with non zero use be destroyed!
1749 */
1750 ENTRY;
1751
1752 if (IS_UNMOUNTING(cp)) {
1753 #ifdef DEBUG
1754 if (VTOC(vp)->c_ovp) {
1755 if (IS_UNMOUNTING(cp))
1756 printf("coda_reclaim: c_ovp not void: vp %p, cp %p\n", vp, cp);
1757 }
1758 #endif
1759 } else {
1760 #ifdef OLD_DIAGNOSTIC
1761 if (vp->v_usecount != 0)
1762 print("coda_reclaim: pushing active %p\n", vp);
1763 if (VTOC(vp)->c_ovp) {
1764 panic("coda_reclaim: c_ovp not void");
1765 }
1766 #endif
1767 }
1768 cache_purge(vp);
1769 coda_free(VTOC(vp));
1770 SET_VTOC(vp) = NULL;
1771 return (0);
1772 }
1773
1774 int
1775 coda_lock(void *v)
1776 {
1777 /* true args */
1778 struct vop_lock_args *ap = v;
1779 struct vnode *vp = ap->a_vp;
1780 struct cnode *cp = VTOC(vp);
1781 /* upcall decl */
1782 /* locals */
1783
1784 ENTRY;
1785
1786 if (coda_lockdebug) {
1787 myprintf(("Attempting lock on %s\n",
1788 coda_f2s(&cp->c_fid)));
1789 }
1790
1791 return (lockmgr(&vp->v_lock, ap->a_flags, &vp->v_interlock));
1792 }
1793
1794 int
1795 coda_unlock(void *v)
1796 {
1797 /* true args */
1798 struct vop_unlock_args *ap = v;
1799 struct vnode *vp = ap->a_vp;
1800 struct cnode *cp = VTOC(vp);
1801 /* upcall decl */
1802 /* locals */
1803
1804 ENTRY;
1805 if (coda_lockdebug) {
1806 myprintf(("Attempting unlock on %s\n",
1807 coda_f2s(&cp->c_fid)));
1808 }
1809
1810 return (lockmgr(&vp->v_lock, ap->a_flags | LK_RELEASE, &vp->v_interlock));
1811 }
1812
1813 int
1814 coda_islocked(void *v)
1815 {
1816 /* true args */
1817 struct vop_islocked_args *ap = v;
1818 ENTRY;
1819
1820 return (lockstatus(&ap->a_vp->v_lock));
1821 }
1822
1823 /*
1824 * Given a device and inode, obtain a locked vnode. One reference is
1825 * obtained and passed back to the caller.
1826 */
1827 int
1828 coda_grab_vnode(dev_t dev, ino_t ino, struct vnode **vpp)
1829 {
1830 int error;
1831 struct mount *mp;
1832
1833 /* Obtain mount point structure from device. */
1834 if (!(mp = devtomp(dev))) {
1835 myprintf(("coda_grab_vnode: devtomp(%d) returns NULL\n", dev));
1836 return(ENXIO);
1837 }
1838
1839 /*
1840 * Obtain vnode from mount point and inode.
1841 * XXX VFS_VGET does not clearly define locked/referenced state of
1842 * returned vnode.
1843 */
1844 error = VFS_VGET(mp, ino, vpp);
1845 if (error) {
1846 myprintf(("coda_grab_vnode: iget/vget(%d, %llu) returns %p, err %d\n",
1847 dev, (unsigned long long)ino, *vpp, error));
1848 return(ENOENT);
1849 }
1850 return(0);
1851 }
1852
1853 void
1854 print_vattr(struct vattr *attr)
1855 {
1856 const char *typestr;
1857
1858 switch (attr->va_type) {
1859 case VNON:
1860 typestr = "VNON";
1861 break;
1862 case VREG:
1863 typestr = "VREG";
1864 break;
1865 case VDIR:
1866 typestr = "VDIR";
1867 break;
1868 case VBLK:
1869 typestr = "VBLK";
1870 break;
1871 case VCHR:
1872 typestr = "VCHR";
1873 break;
1874 case VLNK:
1875 typestr = "VLNK";
1876 break;
1877 case VSOCK:
1878 typestr = "VSCK";
1879 break;
1880 case VFIFO:
1881 typestr = "VFFO";
1882 break;
1883 case VBAD:
1884 typestr = "VBAD";
1885 break;
1886 default:
1887 typestr = "????";
1888 break;
1889 }
1890
1891
1892 myprintf(("attr: type %s mode %d uid %d gid %d fsid %d rdev %d\n",
1893 typestr, (int)attr->va_mode, (int)attr->va_uid,
1894 (int)attr->va_gid, (int)attr->va_fsid, (int)attr->va_rdev));
1895
1896 myprintf((" fileid %d nlink %d size %d blocksize %d bytes %d\n",
1897 (int)attr->va_fileid, (int)attr->va_nlink,
1898 (int)attr->va_size,
1899 (int)attr->va_blocksize,(int)attr->va_bytes));
1900 myprintf((" gen %ld flags %ld vaflags %d\n",
1901 attr->va_gen, attr->va_flags, attr->va_vaflags));
1902 myprintf((" atime sec %d nsec %d\n",
1903 (int)attr->va_atime.tv_sec, (int)attr->va_atime.tv_nsec));
1904 myprintf((" mtime sec %d nsec %d\n",
1905 (int)attr->va_mtime.tv_sec, (int)attr->va_mtime.tv_nsec));
1906 myprintf((" ctime sec %d nsec %d\n",
1907 (int)attr->va_ctime.tv_sec, (int)attr->va_ctime.tv_nsec));
1908 }
1909
1910 /* How to print a ucred */
1911 void
1912 print_cred(kauth_cred_t cred)
1913 {
1914
1915 uint16_t ngroups;
1916 int i;
1917
1918 myprintf(("ref %d\tuid %d\n", kauth_cred_getrefcnt(cred),
1919 kauth_cred_geteuid(cred)));
1920
1921 ngroups = kauth_cred_ngroups(cred);
1922 for (i=0; i < ngroups; i++)
1923 myprintf(("\tgroup %d: (%d)\n", i, kauth_cred_group(cred, i)));
1924 myprintf(("\n"));
1925
1926 }
1927
1928 /*
1929 * Return a vnode for the given fid.
1930 * If no cnode exists for this fid create one and put it
1931 * in a table hashed by coda_f2i(). If the cnode for
1932 * this fid is already in the table return it (ref count is
1933 * incremented by coda_find. The cnode will be flushed from the
1934 * table when coda_inactive calls coda_unsave.
1935 */
1936 struct cnode *
1937 make_coda_node(CodaFid *fid, struct mount *vfsp, short type)
1938 {
1939 struct cnode *cp;
1940 int err;
1941
1942 if ((cp = coda_find(fid)) == NULL) {
1943 struct vnode *vp;
1944
1945 cp = coda_alloc();
1946 cp->c_fid = *fid;
1947
1948 err = getnewvnode(VT_CODA, vfsp, coda_vnodeop_p, &vp);
1949 if (err) {
1950 panic("coda: getnewvnode returned error %d", err);
1951 }
1952 vp->v_data = cp;
1953 vp->v_type = type;
1954 cp->c_vnode = vp;
1955 uvm_vnp_setsize(vp, 0);
1956 coda_save(cp);
1957
1958 } else {
1959 vref(CTOV(cp));
1960 }
1961
1962 return cp;
1963 }
1964
1965 /*
1966 * coda_getpages may be called on a vnode which has not been opened,
1967 * e.g. to fault in pages to execute a program. In that case, we must
1968 * open the file to get the container. The vnode may or may not be
1969 * locked, and we must leave it in the same state.
1970 * XXX The protocol requires v_uobj.vmobjlock to be
1971 * held by caller, but this isn't documented in vnodeops(9) or vnode_if.src.
1972 */
1973 int
1974 coda_getpages(void *v)
1975 {
1976 struct vop_getpages_args /* {
1977 struct vnode *a_vp;
1978 voff_t a_offset;
1979 struct vm_page **a_m;
1980 int *a_count;
1981 int a_centeridx;
1982 vm_prot_t a_access_type;
1983 int a_advice;
1984 int a_flags;
1985 } */ *ap = v;
1986 struct vnode *vp = ap->a_vp;
1987 struct cnode *cp = VTOC(vp);
1988 struct lwp *l = curlwp;
1989 kauth_cred_t cred = l->l_cred;
1990 int error, cerror;
1991 int waslocked; /* 1 if vnode lock was held on entry */
1992 int didopen = 0; /* 1 if we opened container file */
1993
1994 /*
1995 * Handle a case that uvm_fault doesn't quite use yet.
1996 * See layer_vnops.c. for inspiration.
1997 */
1998 if (ap->a_flags & PGO_LOCKED) {
1999 return EBUSY;
2000 }
2001
2002 /* Check for control object. */
2003 if (IS_CTL_VP(vp)) {
2004 printf("coda_getpages: control object %p\n", vp);
2005 mutex_exit(&vp->v_uobj.vmobjlock);
2006 return(EINVAL);
2007 }
2008
2009 /*
2010 * XXX It's really not ok to be releasing the lock we get,
2011 * because we could be overlapping with another call to
2012 * getpages and drop a lock they are relying on. We need to
2013 * figure out whether getpages ever is called holding the
2014 * lock, and if we should serialize getpages calls by some
2015 * mechanism.
2016 */
2017 waslocked = VOP_ISLOCKED(vp);
2018
2019 /* Drop the vmobject lock. */
2020 mutex_exit(&vp->v_uobj.vmobjlock);
2021
2022 /* Get container file if not already present. */
2023 if (cp->c_ovp == NULL) {
2024 /*
2025 * VOP_OPEN requires a locked vnode. We must avoid
2026 * locking the vnode if it is already locked, and
2027 * leave it in the same state on exit.
2028 */
2029 if (waslocked == 0) {
2030 cerror = vn_lock(vp, LK_EXCLUSIVE);
2031 if (cerror) {
2032 printf("coda_getpages: can't lock vnode %p\n",
2033 vp);
2034 return cerror;
2035 }
2036 #if 0
2037 printf("coda_getpages: locked vnode %p\n", vp);
2038 #endif
2039 }
2040
2041 /*
2042 * Open file (causes upcall to venus).
2043 * XXX Perhaps we should not fully open the file, but
2044 * simply obtain a container file.
2045 */
2046 /* XXX Is it ok to do this while holding the simplelock? */
2047 cerror = VOP_OPEN(vp, FREAD, cred);
2048
2049 if (cerror) {
2050 printf("coda_getpages: cannot open vnode %p => %d\n",
2051 vp, cerror);
2052 if (waslocked == 0)
2053 VOP_UNLOCK(vp, 0);
2054 return cerror;
2055 }
2056
2057 #if 0
2058 printf("coda_getpages: opened vnode %p\n", vp);
2059 #endif
2060 didopen = 1;
2061 }
2062 KASSERT(cp->c_ovp != NULL);
2063
2064 /* Munge the arg structure to refer to the container vnode. */
2065 ap->a_vp = cp->c_ovp;
2066
2067 /* Get the lock on the container vnode, and call getpages on it. */
2068 mutex_enter(&ap->a_vp->v_uobj.vmobjlock);
2069 error = VCALL(ap->a_vp, VOFFSET(vop_getpages), ap);
2070
2071 /* If we opened the vnode, we must close it. */
2072 if (didopen) {
2073 /*
2074 * VOP_CLOSE requires a locked vnode, but we are still
2075 * holding the lock (or riding a caller's lock).
2076 */
2077 cerror = VOP_CLOSE(vp, FREAD, cred);
2078 if (cerror != 0)
2079 /* XXX How should we handle this? */
2080 printf("coda_getpages: closed vnode %p -> %d\n",
2081 vp, cerror);
2082
2083 /* If we obtained a lock, drop it. */
2084 if (waslocked == 0)
2085 VOP_UNLOCK(vp, 0);
2086 }
2087
2088 return error;
2089 }
2090
2091 /*
2092 * The protocol requires v_uobj.vmobjlock to be held by the caller, as
2093 * documented in vnodeops(9). XXX vnode_if.src doesn't say this.
2094 */
2095 int
2096 coda_putpages(void *v)
2097 {
2098 struct vop_putpages_args /* {
2099 struct vnode *a_vp;
2100 voff_t a_offlo;
2101 voff_t a_offhi;
2102 int a_flags;
2103 } */ *ap = v;
2104 struct vnode *vp = ap->a_vp;
2105 struct cnode *cp = VTOC(vp);
2106 int error;
2107
2108 /* Drop the vmobject lock. */
2109 mutex_exit(&vp->v_uobj.vmobjlock);
2110
2111 /* Check for control object. */
2112 if (IS_CTL_VP(vp)) {
2113 printf("coda_putpages: control object %p\n", vp);
2114 return(EINVAL);
2115 }
2116
2117 /*
2118 * If container object is not present, then there are no pages
2119 * to put; just return without error. This happens all the
2120 * time, apparently during discard of a closed vnode (which
2121 * trivially can't have dirty pages).
2122 */
2123 if (cp->c_ovp == NULL)
2124 return 0;
2125
2126 /* Munge the arg structure to refer to the container vnode. */
2127 ap->a_vp = cp->c_ovp;
2128
2129 /* Get the lock on the container vnode, and call putpages on it. */
2130 mutex_enter(&ap->a_vp->v_uobj.vmobjlock);
2131 error = VCALL(ap->a_vp, VOFFSET(vop_putpages), ap);
2132
2133 return error;
2134 }
2135