coda_vnops.c revision 1.98.2.1 1 /* $NetBSD: coda_vnops.c,v 1.98.2.1 2015/04/06 15:18:05 skrll Exp $ */
2
3 /*
4 *
5 * Coda: an Experimental Distributed File System
6 * Release 3.1
7 *
8 * Copyright (c) 1987-1998 Carnegie Mellon University
9 * All Rights Reserved
10 *
11 * Permission to use, copy, modify and distribute this software and its
12 * documentation is hereby granted, provided that both the copyright
13 * notice and this permission notice appear in all copies of the
14 * software, derivative works or modified versions, and any portions
15 * thereof, and that both notices appear in supporting documentation, and
16 * that credit is given to Carnegie Mellon University in all documents
17 * and publicity pertaining to direct or indirect use of this code or its
18 * derivatives.
19 *
20 * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS,
21 * SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS
22 * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON
23 * DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
24 * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF
25 * ANY DERIVATIVE WORK.
26 *
27 * Carnegie Mellon encourages users of this software to return any
28 * improvements or extensions that they make, and to grant Carnegie
29 * Mellon the rights to redistribute these changes without encumbrance.
30 *
31 * @(#) coda/coda_vnops.c,v 1.1.1.1 1998/08/29 21:26:46 rvb Exp $
32 */
33
34 /*
35 * Mach Operating System
36 * Copyright (c) 1990 Carnegie-Mellon University
37 * Copyright (c) 1989 Carnegie-Mellon University
38 * All rights reserved. The CMU software License Agreement specifies
39 * the terms and conditions for use and redistribution.
40 */
41
42 /*
43 * This code was written for the Coda file system at Carnegie Mellon
44 * University. Contributers include David Steere, James Kistler, and
45 * M. Satyanarayanan.
46 */
47
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: coda_vnops.c,v 1.98.2.1 2015/04/06 15:18:05 skrll Exp $");
50
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/malloc.h>
54 #include <sys/errno.h>
55 #include <sys/acct.h>
56 #include <sys/file.h>
57 #include <sys/uio.h>
58 #include <sys/namei.h>
59 #include <sys/ioctl.h>
60 #include <sys/mount.h>
61 #include <sys/proc.h>
62 #include <sys/select.h>
63 #include <sys/vnode.h>
64 #include <sys/kauth.h>
65
66 #include <miscfs/genfs/genfs.h>
67 #include <miscfs/specfs/specdev.h>
68
69 #include <coda/coda.h>
70 #include <coda/cnode.h>
71 #include <coda/coda_vnops.h>
72 #include <coda/coda_venus.h>
73 #include <coda/coda_opstats.h>
74 #include <coda/coda_subr.h>
75 #include <coda/coda_namecache.h>
76 #include <coda/coda_pioctl.h>
77
78 /*
79 * These flags select various performance enhancements.
80 */
81 int coda_attr_cache = 1; /* Set to cache attributes in the kernel */
82 int coda_symlink_cache = 1; /* Set to cache symbolic link information */
83 int coda_access_cache = 1; /* Set to handle some access checks directly */
84
85 /* structure to keep track of vfs calls */
86
87 struct coda_op_stats coda_vnodeopstats[CODA_VNODEOPS_SIZE];
88
89 #define MARK_ENTRY(op) (coda_vnodeopstats[op].entries++)
90 #define MARK_INT_SAT(op) (coda_vnodeopstats[op].sat_intrn++)
91 #define MARK_INT_FAIL(op) (coda_vnodeopstats[op].unsat_intrn++)
92 #define MARK_INT_GEN(op) (coda_vnodeopstats[op].gen_intrn++)
93
94 /* What we are delaying for in printf */
95 static int coda_lockdebug = 0;
96
97 #define ENTRY if(coda_vnop_print_entry) myprintf(("Entered %s\n",__func__))
98
99 /* Definition of the vnode operation vector */
100
101 const struct vnodeopv_entry_desc coda_vnodeop_entries[] = {
102 { &vop_default_desc, coda_vop_error },
103 { &vop_lookup_desc, coda_lookup }, /* lookup */
104 { &vop_create_desc, coda_create }, /* create */
105 { &vop_mknod_desc, coda_vop_error }, /* mknod */
106 { &vop_open_desc, coda_open }, /* open */
107 { &vop_close_desc, coda_close }, /* close */
108 { &vop_access_desc, coda_access }, /* access */
109 { &vop_getattr_desc, coda_getattr }, /* getattr */
110 { &vop_setattr_desc, coda_setattr }, /* setattr */
111 { &vop_read_desc, coda_read }, /* read */
112 { &vop_write_desc, coda_write }, /* write */
113 { &vop_fallocate_desc, genfs_eopnotsupp }, /* fallocate */
114 { &vop_fdiscard_desc, genfs_eopnotsupp }, /* fdiscard */
115 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */
116 { &vop_ioctl_desc, coda_ioctl }, /* ioctl */
117 { &vop_mmap_desc, genfs_mmap }, /* mmap */
118 { &vop_fsync_desc, coda_fsync }, /* fsync */
119 { &vop_remove_desc, coda_remove }, /* remove */
120 { &vop_link_desc, coda_link }, /* link */
121 { &vop_rename_desc, coda_rename }, /* rename */
122 { &vop_mkdir_desc, coda_mkdir }, /* mkdir */
123 { &vop_rmdir_desc, coda_rmdir }, /* rmdir */
124 { &vop_symlink_desc, coda_symlink }, /* symlink */
125 { &vop_readdir_desc, coda_readdir }, /* readdir */
126 { &vop_readlink_desc, coda_readlink }, /* readlink */
127 { &vop_abortop_desc, coda_abortop }, /* abortop */
128 { &vop_inactive_desc, coda_inactive }, /* inactive */
129 { &vop_reclaim_desc, coda_reclaim }, /* reclaim */
130 { &vop_lock_desc, coda_lock }, /* lock */
131 { &vop_unlock_desc, coda_unlock }, /* unlock */
132 { &vop_bmap_desc, coda_bmap }, /* bmap */
133 { &vop_strategy_desc, coda_strategy }, /* strategy */
134 { &vop_print_desc, coda_vop_error }, /* print */
135 { &vop_islocked_desc, coda_islocked }, /* islocked */
136 { &vop_pathconf_desc, coda_vop_error }, /* pathconf */
137 { &vop_advlock_desc, coda_vop_nop }, /* advlock */
138 { &vop_bwrite_desc, coda_vop_error }, /* bwrite */
139 { &vop_seek_desc, genfs_seek }, /* seek */
140 { &vop_poll_desc, genfs_poll }, /* poll */
141 { &vop_getpages_desc, coda_getpages }, /* getpages */
142 { &vop_putpages_desc, coda_putpages }, /* putpages */
143 { NULL, NULL }
144 };
145
146 static void coda_print_vattr(struct vattr *);
147
148 int (**coda_vnodeop_p)(void *);
149 const struct vnodeopv_desc coda_vnodeop_opv_desc =
150 { &coda_vnodeop_p, coda_vnodeop_entries };
151
152 /* Definitions of NetBSD vnodeop interfaces */
153
154 /*
155 * A generic error routine. Return EIO without looking at arguments.
156 */
157 int
158 coda_vop_error(void *anon) {
159 struct vnodeop_desc **desc = (struct vnodeop_desc **)anon;
160
161 if (codadebug) {
162 myprintf(("%s: Vnode operation %s called (error).\n",
163 __func__, (*desc)->vdesc_name));
164 }
165
166 return EIO;
167 }
168
169 /* A generic do-nothing. */
170 int
171 coda_vop_nop(void *anon) {
172 struct vnodeop_desc **desc = (struct vnodeop_desc **)anon;
173
174 if (codadebug) {
175 myprintf(("Vnode operation %s called, but unsupported\n",
176 (*desc)->vdesc_name));
177 }
178 return (0);
179 }
180
181 int
182 coda_vnodeopstats_init(void)
183 {
184 int i;
185
186 for(i=0;i<CODA_VNODEOPS_SIZE;i++) {
187 coda_vnodeopstats[i].opcode = i;
188 coda_vnodeopstats[i].entries = 0;
189 coda_vnodeopstats[i].sat_intrn = 0;
190 coda_vnodeopstats[i].unsat_intrn = 0;
191 coda_vnodeopstats[i].gen_intrn = 0;
192 }
193
194 return 0;
195 }
196
197 /*
198 * XXX The entire relationship between VOP_OPEN and having a container
199 * file (via venus_open) needs to be reexamined. In particular, it's
200 * valid to open/mmap/close and then reference. Instead of doing
201 * VOP_OPEN when getpages needs a container, we should do the
202 * venus_open part, and record that the vnode has opened the container
203 * for getpages, and do the matching logical close on coda_inactive.
204 * Further, coda_rdwr needs a container file, and sometimes needs to
205 * do the equivalent of open (core dumps).
206 */
207 /*
208 * coda_open calls Venus to return the device and inode of the
209 * container file, and then obtains a vnode for that file. The
210 * container vnode is stored in the coda vnode, and a reference is
211 * added for each open file.
212 */
213 int
214 coda_open(void *v)
215 {
216 /*
217 * NetBSD can pass the O_EXCL flag in mode, even though the check
218 * has already happened. Venus defensively assumes that if open
219 * is passed the EXCL, it must be a bug. We strip the flag here.
220 */
221 /* true args */
222 struct vop_open_args *ap = v;
223 vnode_t *vp = ap->a_vp;
224 struct cnode *cp = VTOC(vp);
225 int flag = ap->a_mode & (~O_EXCL);
226 kauth_cred_t cred = ap->a_cred;
227 /* locals */
228 int error;
229 dev_t dev; /* container file device, inode, vnode */
230 ino_t inode;
231 vnode_t *container_vp;
232
233 MARK_ENTRY(CODA_OPEN_STATS);
234
235 KASSERT(VOP_ISLOCKED(vp));
236 /* Check for open of control file. */
237 if (IS_CTL_VP(vp)) {
238 /* if (WRITABLE(flag)) */
239 if (flag & (FWRITE | O_TRUNC | O_CREAT | O_EXCL)) {
240 MARK_INT_FAIL(CODA_OPEN_STATS);
241 return(EACCES);
242 }
243 MARK_INT_SAT(CODA_OPEN_STATS);
244 return(0);
245 }
246
247 error = venus_open(vtomi(vp), &cp->c_fid, flag, cred, curlwp, &dev, &inode);
248 if (error)
249 return (error);
250 if (!error) {
251 CODADEBUG(CODA_OPEN, myprintf((
252 "%s: dev 0x%llx inode %llu result %d\n", __func__,
253 (unsigned long long)dev, (unsigned long long)inode, error));)
254 }
255
256 /*
257 * Obtain locked and referenced container vnode from container
258 * device/inode.
259 */
260 error = coda_grab_vnode(vp, dev, inode, &container_vp);
261 if (error)
262 return (error);
263
264 /* Save the vnode pointer for the container file. */
265 if (cp->c_ovp == NULL) {
266 cp->c_ovp = container_vp;
267 } else {
268 if (cp->c_ovp != container_vp)
269 /*
270 * Perhaps venus returned a different container, or
271 * something else went wrong.
272 */
273 panic("%s: cp->c_ovp != container_vp", __func__);
274 }
275 cp->c_ocount++;
276
277 /* Flush the attribute cache if writing the file. */
278 if (flag & FWRITE) {
279 cp->c_owrite++;
280 cp->c_flags &= ~C_VATTR;
281 }
282
283 /*
284 * Save the <device, inode> pair for the container file to speed
285 * up subsequent reads while closed (mmap, program execution).
286 * This is perhaps safe because venus will invalidate the node
287 * before changing the container file mapping.
288 */
289 cp->c_device = dev;
290 cp->c_inode = inode;
291
292 /* Open the container file. */
293 error = VOP_OPEN(container_vp, flag, cred);
294 /*
295 * Drop the lock on the container, after we have done VOP_OPEN
296 * (which requires a locked vnode).
297 */
298 VOP_UNLOCK(container_vp);
299 return(error);
300 }
301
302 /*
303 * Close the cache file used for I/O and notify Venus.
304 */
305 int
306 coda_close(void *v)
307 {
308 /* true args */
309 struct vop_close_args *ap = v;
310 vnode_t *vp = ap->a_vp;
311 struct cnode *cp = VTOC(vp);
312 int flag = ap->a_fflag;
313 kauth_cred_t cred = ap->a_cred;
314 /* locals */
315 int error;
316
317 MARK_ENTRY(CODA_CLOSE_STATS);
318
319 /* Check for close of control file. */
320 if (IS_CTL_VP(vp)) {
321 MARK_INT_SAT(CODA_CLOSE_STATS);
322 return(0);
323 }
324
325 /*
326 * XXX The IS_UNMOUNTING part of this is very suspect.
327 */
328 if (IS_UNMOUNTING(cp)) {
329 if (cp->c_ovp) {
330 #ifdef CODA_VERBOSE
331 printf("%s: destroying container %d, ufs vp %p of vp %p/cp %p\n",
332 __func__, vp->v_usecount, cp->c_ovp, vp, cp);
333 #endif
334 #ifdef hmm
335 vgone(cp->c_ovp);
336 #else
337 vn_lock(cp->c_ovp, LK_EXCLUSIVE | LK_RETRY);
338 VOP_CLOSE(cp->c_ovp, flag, cred); /* Do errors matter here? */
339 vput(cp->c_ovp);
340 #endif
341 } else {
342 #ifdef CODA_VERBOSE
343 printf("%s: NO container vp %p/cp %p\n", __func__, vp, cp);
344 #endif
345 }
346 return ENODEV;
347 }
348
349 /* Lock the container node, and VOP_CLOSE it. */
350 vn_lock(cp->c_ovp, LK_EXCLUSIVE | LK_RETRY);
351 VOP_CLOSE(cp->c_ovp, flag, cred); /* Do errors matter here? */
352 /*
353 * Drop the lock we just obtained, and vrele the container vnode.
354 * Decrement reference counts, and clear container vnode pointer on
355 * last close.
356 */
357 vput(cp->c_ovp);
358 if (flag & FWRITE)
359 --cp->c_owrite;
360 if (--cp->c_ocount == 0)
361 cp->c_ovp = NULL;
362
363 error = venus_close(vtomi(vp), &cp->c_fid, flag, cred, curlwp);
364
365 CODADEBUG(CODA_CLOSE, myprintf(("%s: result %d\n", __func__, error)); )
366 return(error);
367 }
368
369 int
370 coda_read(void *v)
371 {
372 struct vop_read_args *ap = v;
373
374 ENTRY;
375 return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_READ,
376 ap->a_ioflag, ap->a_cred, curlwp));
377 }
378
379 int
380 coda_write(void *v)
381 {
382 struct vop_write_args *ap = v;
383
384 ENTRY;
385 return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_WRITE,
386 ap->a_ioflag, ap->a_cred, curlwp));
387 }
388
389 int
390 coda_rdwr(vnode_t *vp, struct uio *uiop, enum uio_rw rw, int ioflag,
391 kauth_cred_t cred, struct lwp *l)
392 {
393 /* upcall decl */
394 /* NOTE: container file operation!!! */
395 /* locals */
396 struct cnode *cp = VTOC(vp);
397 vnode_t *cfvp = cp->c_ovp;
398 struct proc *p = l->l_proc;
399 int opened_internally = 0;
400 int error = 0;
401
402 MARK_ENTRY(CODA_RDWR_STATS);
403
404 CODADEBUG(CODA_RDWR, myprintf(("coda_rdwr(%d, %p, %lu, %lld)\n", rw,
405 uiop->uio_iov->iov_base, (unsigned long) uiop->uio_resid,
406 (long long) uiop->uio_offset)); )
407
408 /* Check for rdwr of control object. */
409 if (IS_CTL_VP(vp)) {
410 MARK_INT_FAIL(CODA_RDWR_STATS);
411 return(EINVAL);
412 }
413
414 /* Redirect the request to UFS. */
415
416 /*
417 * If file is not already open this must be a page
418 * {read,write} request. Iget the cache file's inode
419 * pointer if we still have its <device, inode> pair.
420 * Otherwise, we must do an internal open to derive the
421 * pair.
422 * XXX Integrate this into a coherent strategy for container
423 * file acquisition.
424 */
425 if (cfvp == NULL) {
426 /*
427 * If we're dumping core, do the internal open. Otherwise
428 * venus won't have the correct size of the core when
429 * it's completely written.
430 */
431 if (cp->c_inode != 0 && !(p && (p->p_acflag & ACORE))) {
432 #ifdef CODA_VERBOSE
433 printf("%s: grabbing container vnode, losing reference\n",
434 __func__);
435 #endif
436 /* Get locked and refed vnode. */
437 error = coda_grab_vnode(vp, cp->c_device, cp->c_inode, &cfvp);
438 if (error) {
439 MARK_INT_FAIL(CODA_RDWR_STATS);
440 return(error);
441 }
442 /*
443 * Drop lock.
444 * XXX Where is reference released.
445 */
446 VOP_UNLOCK(cfvp);
447 }
448 else {
449 #ifdef CODA_VERBOSE
450 printf("%s: internal VOP_OPEN\n", __func__);
451 #endif
452 opened_internally = 1;
453 MARK_INT_GEN(CODA_OPEN_STATS);
454 error = VOP_OPEN(vp, (rw == UIO_READ ? FREAD : FWRITE), cred);
455 #ifdef CODA_VERBOSE
456 printf("%s: Internally Opening %p\n", __func__, vp);
457 #endif
458 if (error) {
459 MARK_INT_FAIL(CODA_RDWR_STATS);
460 return(error);
461 }
462 cfvp = cp->c_ovp;
463 }
464 }
465
466 /* Have UFS handle the call. */
467 CODADEBUG(CODA_RDWR, myprintf(("%s: fid = %s, refcnt = %d\n", __func__,
468 coda_f2s(&cp->c_fid), CTOV(cp)->v_usecount)); )
469
470 if (rw == UIO_READ) {
471 error = VOP_READ(cfvp, uiop, ioflag, cred);
472 } else {
473 error = VOP_WRITE(cfvp, uiop, ioflag, cred);
474 }
475
476 if (error)
477 MARK_INT_FAIL(CODA_RDWR_STATS);
478 else
479 MARK_INT_SAT(CODA_RDWR_STATS);
480
481 /* Do an internal close if necessary. */
482 if (opened_internally) {
483 MARK_INT_GEN(CODA_CLOSE_STATS);
484 (void)VOP_CLOSE(vp, (rw == UIO_READ ? FREAD : FWRITE), cred);
485 }
486
487 /* Invalidate cached attributes if writing. */
488 if (rw == UIO_WRITE)
489 cp->c_flags &= ~C_VATTR;
490 return(error);
491 }
492
493 int
494 coda_ioctl(void *v)
495 {
496 /* true args */
497 struct vop_ioctl_args *ap = v;
498 vnode_t *vp = ap->a_vp;
499 int com = ap->a_command;
500 void *data = ap->a_data;
501 int flag = ap->a_fflag;
502 kauth_cred_t cred = ap->a_cred;
503 /* locals */
504 int error;
505 vnode_t *tvp;
506 struct PioctlData *iap = (struct PioctlData *)data;
507 namei_simple_flags_t sflags;
508
509 MARK_ENTRY(CODA_IOCTL_STATS);
510
511 CODADEBUG(CODA_IOCTL, myprintf(("in coda_ioctl on %s\n", iap->path));)
512
513 /* Don't check for operation on a dying object, for ctlvp it
514 shouldn't matter */
515
516 /* Must be control object to succeed. */
517 if (!IS_CTL_VP(vp)) {
518 MARK_INT_FAIL(CODA_IOCTL_STATS);
519 CODADEBUG(CODA_IOCTL, myprintf(("%s error: vp != ctlvp", __func__));)
520 return (EOPNOTSUPP);
521 }
522 /* Look up the pathname. */
523
524 /* Should we use the name cache here? It would get it from
525 lookupname sooner or later anyway, right? */
526
527 sflags = iap->follow ? NSM_FOLLOW_NOEMULROOT : NSM_NOFOLLOW_NOEMULROOT;
528 error = namei_simple_user(iap->path, sflags, &tvp);
529
530 if (error) {
531 MARK_INT_FAIL(CODA_IOCTL_STATS);
532 CODADEBUG(CODA_IOCTL, myprintf(("%s error: lookup returns %d\n",
533 __func__, error));)
534 return(error);
535 }
536
537 /*
538 * Make sure this is a coda style cnode, but it may be a
539 * different vfsp
540 */
541 /* XXX: this totally violates the comment about vtagtype in vnode.h */
542 if (tvp->v_tag != VT_CODA) {
543 vrele(tvp);
544 MARK_INT_FAIL(CODA_IOCTL_STATS);
545 CODADEBUG(CODA_IOCTL, myprintf(("%s error: %s not a coda object\n",
546 __func__, iap->path));)
547 return(EINVAL);
548 }
549
550 if (iap->vi.in_size > VC_MAXDATASIZE || iap->vi.out_size > VC_MAXDATASIZE) {
551 vrele(tvp);
552 return(EINVAL);
553 }
554 error = venus_ioctl(vtomi(tvp), &((VTOC(tvp))->c_fid), com, flag, data,
555 cred, curlwp);
556
557 if (error)
558 MARK_INT_FAIL(CODA_IOCTL_STATS);
559 else
560 CODADEBUG(CODA_IOCTL, myprintf(("Ioctl returns %d \n", error)); )
561
562 vrele(tvp);
563 return(error);
564 }
565
566 /*
567 * To reduce the cost of a user-level venus;we cache attributes in
568 * the kernel. Each cnode has storage allocated for an attribute. If
569 * c_vattr is valid, return a reference to it. Otherwise, get the
570 * attributes from venus and store them in the cnode. There is some
571 * question if this method is a security leak. But I think that in
572 * order to make this call, the user must have done a lookup and
573 * opened the file, and therefore should already have access.
574 */
575 int
576 coda_getattr(void *v)
577 {
578 /* true args */
579 struct vop_getattr_args *ap = v;
580 vnode_t *vp = ap->a_vp;
581 struct cnode *cp = VTOC(vp);
582 struct vattr *vap = ap->a_vap;
583 kauth_cred_t cred = ap->a_cred;
584 /* locals */
585 int error;
586
587 MARK_ENTRY(CODA_GETATTR_STATS);
588
589 /* Check for getattr of control object. */
590 if (IS_CTL_VP(vp)) {
591 MARK_INT_FAIL(CODA_GETATTR_STATS);
592 return(ENOENT);
593 }
594
595 /* Check to see if the attributes have already been cached */
596 if (VALID_VATTR(cp)) {
597 CODADEBUG(CODA_GETATTR, { myprintf(("%s: attr cache hit: %s\n",
598 __func__, coda_f2s(&cp->c_fid)));})
599 CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR))
600 coda_print_vattr(&cp->c_vattr); )
601
602 *vap = cp->c_vattr;
603 MARK_INT_SAT(CODA_GETATTR_STATS);
604 return(0);
605 }
606
607 error = venus_getattr(vtomi(vp), &cp->c_fid, cred, curlwp, vap);
608
609 if (!error) {
610 CODADEBUG(CODA_GETATTR, myprintf(("%s miss %s: result %d\n",
611 __func__, coda_f2s(&cp->c_fid), error)); )
612
613 CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR))
614 coda_print_vattr(vap); )
615
616 /* If not open for write, store attributes in cnode */
617 if ((cp->c_owrite == 0) && (coda_attr_cache)) {
618 cp->c_vattr = *vap;
619 cp->c_flags |= C_VATTR;
620 }
621
622 }
623 return(error);
624 }
625
626 int
627 coda_setattr(void *v)
628 {
629 /* true args */
630 struct vop_setattr_args *ap = v;
631 vnode_t *vp = ap->a_vp;
632 struct cnode *cp = VTOC(vp);
633 struct vattr *vap = ap->a_vap;
634 kauth_cred_t cred = ap->a_cred;
635 /* locals */
636 int error;
637
638 MARK_ENTRY(CODA_SETATTR_STATS);
639
640 /* Check for setattr of control object. */
641 if (IS_CTL_VP(vp)) {
642 MARK_INT_FAIL(CODA_SETATTR_STATS);
643 return(ENOENT);
644 }
645
646 if (codadebug & CODADBGMSK(CODA_SETATTR)) {
647 coda_print_vattr(vap);
648 }
649 error = venus_setattr(vtomi(vp), &cp->c_fid, vap, cred, curlwp);
650
651 if (!error)
652 cp->c_flags &= ~C_VATTR;
653
654 CODADEBUG(CODA_SETATTR, myprintf(("setattr %d\n", error)); )
655 return(error);
656 }
657
658 int
659 coda_access(void *v)
660 {
661 /* true args */
662 struct vop_access_args *ap = v;
663 vnode_t *vp = ap->a_vp;
664 struct cnode *cp = VTOC(vp);
665 int mode = ap->a_mode;
666 kauth_cred_t cred = ap->a_cred;
667 /* locals */
668 int error;
669
670 MARK_ENTRY(CODA_ACCESS_STATS);
671
672 /* Check for access of control object. Only read access is
673 allowed on it. */
674 if (IS_CTL_VP(vp)) {
675 /* bogus hack - all will be marked as successes */
676 MARK_INT_SAT(CODA_ACCESS_STATS);
677 return(((mode & VREAD) && !(mode & (VWRITE | VEXEC)))
678 ? 0 : EACCES);
679 }
680
681 /*
682 * if the file is a directory, and we are checking exec (eg lookup)
683 * access, and the file is in the namecache, then the user must have
684 * lookup access to it.
685 */
686 if (coda_access_cache) {
687 if ((vp->v_type == VDIR) && (mode & VEXEC)) {
688 if (coda_nc_lookup(cp, ".", 1, cred)) {
689 MARK_INT_SAT(CODA_ACCESS_STATS);
690 return(0); /* it was in the cache */
691 }
692 }
693 }
694
695 error = venus_access(vtomi(vp), &cp->c_fid, mode, cred, curlwp);
696
697 return(error);
698 }
699
700 /*
701 * CODA abort op, called after namei() when a CREATE/DELETE isn't actually
702 * done. If a buffer has been saved in anticipation of a coda_create or
703 * a coda_remove, delete it.
704 */
705 /* ARGSUSED */
706 int
707 coda_abortop(void *v)
708 {
709 /* true args */
710 struct vop_abortop_args /* {
711 vnode_t *a_dvp;
712 struct componentname *a_cnp;
713 } */ *ap = v;
714
715 (void)ap;
716 /* upcall decl */
717 /* locals */
718
719 return (0);
720 }
721
722 int
723 coda_readlink(void *v)
724 {
725 /* true args */
726 struct vop_readlink_args *ap = v;
727 vnode_t *vp = ap->a_vp;
728 struct cnode *cp = VTOC(vp);
729 struct uio *uiop = ap->a_uio;
730 kauth_cred_t cred = ap->a_cred;
731 /* locals */
732 struct lwp *l = curlwp;
733 int error;
734 char *str;
735 int len;
736
737 MARK_ENTRY(CODA_READLINK_STATS);
738
739 /* Check for readlink of control object. */
740 if (IS_CTL_VP(vp)) {
741 MARK_INT_FAIL(CODA_READLINK_STATS);
742 return(ENOENT);
743 }
744
745 if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) { /* symlink was cached */
746 uiop->uio_rw = UIO_READ;
747 error = uiomove(cp->c_symlink, (int)cp->c_symlen, uiop);
748 if (error)
749 MARK_INT_FAIL(CODA_READLINK_STATS);
750 else
751 MARK_INT_SAT(CODA_READLINK_STATS);
752 return(error);
753 }
754
755 error = venus_readlink(vtomi(vp), &cp->c_fid, cred, l, &str, &len);
756
757 if (!error) {
758 uiop->uio_rw = UIO_READ;
759 error = uiomove(str, len, uiop);
760
761 if (coda_symlink_cache) {
762 cp->c_symlink = str;
763 cp->c_symlen = len;
764 cp->c_flags |= C_SYMLINK;
765 } else
766 CODA_FREE(str, len);
767 }
768
769 CODADEBUG(CODA_READLINK, myprintf(("in readlink result %d\n",error));)
770 return(error);
771 }
772
773 int
774 coda_fsync(void *v)
775 {
776 /* true args */
777 struct vop_fsync_args *ap = v;
778 vnode_t *vp = ap->a_vp;
779 struct cnode *cp = VTOC(vp);
780 kauth_cred_t cred = ap->a_cred;
781 /* locals */
782 vnode_t *convp = cp->c_ovp;
783 int error;
784
785 MARK_ENTRY(CODA_FSYNC_STATS);
786
787 /* Check for fsync on an unmounting object */
788 /* The NetBSD kernel, in its infinite wisdom, can try to fsync
789 * after an unmount has been initiated. This is a Bad Thing,
790 * which we have to avoid. Not a legitimate failure for stats.
791 */
792 if (IS_UNMOUNTING(cp)) {
793 return(ENODEV);
794 }
795
796 /* Check for fsync of control object or unitialized cnode. */
797 if (IS_CTL_VP(vp) || vp->v_type == VNON) {
798 MARK_INT_SAT(CODA_FSYNC_STATS);
799 return(0);
800 }
801
802 if (convp)
803 VOP_FSYNC(convp, cred, MNT_WAIT, 0, 0);
804
805 /*
806 * We can expect fsync on any vnode at all if venus is pruging it.
807 * Venus can't very well answer the fsync request, now can it?
808 * Hopefully, it won't have to, because hopefully, venus preserves
809 * the (possibly untrue) invariant that it never purges an open
810 * vnode. Hopefully.
811 */
812 if (cp->c_flags & C_PURGING) {
813 return(0);
814 }
815
816 error = venus_fsync(vtomi(vp), &cp->c_fid, cred, curlwp);
817
818 CODADEBUG(CODA_FSYNC, myprintf(("in fsync result %d\n",error)); )
819 return(error);
820 }
821
822 /*
823 * vp is locked on entry, and we must unlock it.
824 * XXX This routine is suspect and probably needs rewriting.
825 */
826 int
827 coda_inactive(void *v)
828 {
829 /* true args */
830 struct vop_inactive_args *ap = v;
831 vnode_t *vp = ap->a_vp;
832 struct cnode *cp = VTOC(vp);
833 kauth_cred_t cred __unused = NULL;
834
835 /* We don't need to send inactive to venus - DCS */
836 MARK_ENTRY(CODA_INACTIVE_STATS);
837
838 if (IS_CTL_VP(vp)) {
839 MARK_INT_SAT(CODA_INACTIVE_STATS);
840 VOP_UNLOCK(vp);
841 return 0;
842 }
843
844 CODADEBUG(CODA_INACTIVE, myprintf(("in inactive, %s, vfsp %p\n",
845 coda_f2s(&cp->c_fid), vp->v_mount));)
846
847 if (vp->v_mount->mnt_data == NULL) {
848 myprintf(("Help! vfsp->vfs_data was NULL, but vnode %p wasn't dying\n", vp));
849 panic("badness in coda_inactive");
850 }
851
852 #ifdef CODA_VERBOSE
853 /* Sanity checks that perhaps should be panic. */
854 if (vp->v_usecount > 1)
855 printf("%s: %p usecount %d\n", __func__, vp, vp->v_usecount);
856 if (cp->c_ovp != NULL)
857 printf("%s: %p ovp != NULL\n", __func__, vp);
858 #endif
859 /* XXX Do we need to VOP_CLOSE container vnodes? */
860 VOP_UNLOCK(vp);
861 if (!IS_UNMOUNTING(cp))
862 *ap->a_recycle = true;
863
864 MARK_INT_SAT(CODA_INACTIVE_STATS);
865 return(0);
866 }
867
868 /*
869 * Coda does not use the normal namecache, but a private version.
870 * Consider how to use the standard facility instead.
871 */
872 int
873 coda_lookup(void *v)
874 {
875 /* true args */
876 struct vop_lookup_v2_args *ap = v;
877 /* (locked) vnode of dir in which to do lookup */
878 vnode_t *dvp = ap->a_dvp;
879 struct cnode *dcp = VTOC(dvp);
880 /* output variable for result */
881 vnode_t **vpp = ap->a_vpp;
882 /* name to lookup */
883 struct componentname *cnp = ap->a_cnp;
884 kauth_cred_t cred = cnp->cn_cred;
885 struct lwp *l = curlwp;
886 /* locals */
887 struct cnode *cp;
888 const char *nm = cnp->cn_nameptr;
889 int len = cnp->cn_namelen;
890 CodaFid VFid;
891 int vtype;
892 int error = 0;
893
894 MARK_ENTRY(CODA_LOOKUP_STATS);
895
896 CODADEBUG(CODA_LOOKUP, myprintf(("%s: %s in %s\n", __func__,
897 nm, coda_f2s(&dcp->c_fid)));)
898
899 /*
900 * XXX componentname flags in MODMASK are not handled at all
901 */
902
903 /*
904 * The overall strategy is to switch on the lookup type and get a
905 * result vnode that is vref'd but not locked.
906 */
907
908 /* Check for lookup of control object. */
909 if (IS_CTL_NAME(dvp, nm, len)) {
910 *vpp = coda_ctlvp;
911 vref(*vpp);
912 MARK_INT_SAT(CODA_LOOKUP_STATS);
913 goto exit;
914 }
915
916 /* Avoid trying to hand venus an unreasonably long name. */
917 if (len+1 > CODA_MAXNAMLEN) {
918 MARK_INT_FAIL(CODA_LOOKUP_STATS);
919 CODADEBUG(CODA_LOOKUP, myprintf(("%s: name too long:, %s (%s)\n",
920 __func__, coda_f2s(&dcp->c_fid), nm));)
921 *vpp = (vnode_t *)0;
922 error = EINVAL;
923 goto exit;
924 }
925
926 /*
927 * Try to resolve the lookup in the minicache. If that fails, ask
928 * venus to do the lookup. XXX The interaction between vnode
929 * locking and any locking that coda does is not clear.
930 */
931 cp = coda_nc_lookup(dcp, nm, len, cred);
932 if (cp) {
933 *vpp = CTOV(cp);
934 vref(*vpp);
935 CODADEBUG(CODA_LOOKUP,
936 myprintf(("lookup result %d vpp %p\n",error,*vpp));)
937 } else {
938 /* The name wasn't cached, so ask Venus. */
939 error = venus_lookup(vtomi(dvp), &dcp->c_fid, nm, len, cred, l, &VFid,
940 &vtype);
941
942 if (error) {
943 MARK_INT_FAIL(CODA_LOOKUP_STATS);
944 CODADEBUG(CODA_LOOKUP, myprintf(("%s: lookup error on %s (%s)%d\n",
945 __func__, coda_f2s(&dcp->c_fid), nm, error));)
946 *vpp = (vnode_t *)0;
947 } else {
948 MARK_INT_SAT(CODA_LOOKUP_STATS);
949 CODADEBUG(CODA_LOOKUP, myprintf(("%s: %s type %o result %d\n",
950 __func__, coda_f2s(&VFid), vtype, error)); )
951
952 cp = make_coda_node(&VFid, dvp->v_mount, vtype);
953 *vpp = CTOV(cp);
954 /* vpp is now vrefed. */
955
956 /*
957 * Unless this vnode is marked CODA_NOCACHE, enter it into
958 * the coda name cache to avoid a future venus round-trip.
959 * XXX Interaction with componentname NOCACHE is unclear.
960 */
961 if (!(vtype & CODA_NOCACHE))
962 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
963 }
964 }
965
966 exit:
967 /*
968 * If we are creating, and this was the last name to be looked up,
969 * and the error was ENOENT, then make the leaf NULL and return
970 * success.
971 * XXX Check against new lookup rules.
972 */
973 if (((cnp->cn_nameiop == CREATE) || (cnp->cn_nameiop == RENAME))
974 && (cnp->cn_flags & ISLASTCN)
975 && (error == ENOENT))
976 {
977 error = EJUSTRETURN;
978 *ap->a_vpp = NULL;
979 }
980
981 return(error);
982 }
983
984 /*ARGSUSED*/
985 int
986 coda_create(void *v)
987 {
988 /* true args */
989 struct vop_create_v3_args *ap = v;
990 vnode_t *dvp = ap->a_dvp;
991 struct cnode *dcp = VTOC(dvp);
992 struct vattr *va = ap->a_vap;
993 int exclusive = 1;
994 int mode = ap->a_vap->va_mode;
995 vnode_t **vpp = ap->a_vpp;
996 struct componentname *cnp = ap->a_cnp;
997 kauth_cred_t cred = cnp->cn_cred;
998 struct lwp *l = curlwp;
999 /* locals */
1000 int error;
1001 struct cnode *cp;
1002 const char *nm = cnp->cn_nameptr;
1003 int len = cnp->cn_namelen;
1004 CodaFid VFid;
1005 struct vattr attr;
1006
1007 MARK_ENTRY(CODA_CREATE_STATS);
1008
1009 /* All creates are exclusive XXX */
1010 /* I'm assuming the 'mode' argument is the file mode bits XXX */
1011
1012 /* Check for create of control object. */
1013 if (IS_CTL_NAME(dvp, nm, len)) {
1014 *vpp = (vnode_t *)0;
1015 MARK_INT_FAIL(CODA_CREATE_STATS);
1016 return(EACCES);
1017 }
1018
1019 error = venus_create(vtomi(dvp), &dcp->c_fid, nm, len, exclusive, mode, va, cred, l, &VFid, &attr);
1020
1021 if (!error) {
1022
1023 /*
1024 * XXX Violation of venus/kernel invariants is a difficult case,
1025 * but venus should not be able to cause a panic.
1026 */
1027 /* If this is an exclusive create, panic if the file already exists. */
1028 /* Venus should have detected the file and reported EEXIST. */
1029
1030 if ((exclusive == 1) &&
1031 (coda_find(&VFid) != NULL))
1032 panic("cnode existed for newly created file!");
1033
1034 cp = make_coda_node(&VFid, dvp->v_mount, attr.va_type);
1035 *vpp = CTOV(cp);
1036
1037 /* XXX vnodeops doesn't say this argument can be changed. */
1038 /* Update va to reflect the new attributes. */
1039 (*va) = attr;
1040
1041 /* Update the attribute cache and mark it as valid */
1042 if (coda_attr_cache) {
1043 VTOC(*vpp)->c_vattr = attr;
1044 VTOC(*vpp)->c_flags |= C_VATTR;
1045 }
1046
1047 /* Invalidate parent's attr cache (modification time has changed). */
1048 VTOC(dvp)->c_flags &= ~C_VATTR;
1049
1050 /* enter the new vnode in the Name Cache */
1051 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
1052
1053 CODADEBUG(CODA_CREATE, myprintf(("%s: %s, result %d\n", __func__,
1054 coda_f2s(&VFid), error)); )
1055 } else {
1056 *vpp = (vnode_t *)0;
1057 CODADEBUG(CODA_CREATE, myprintf(("%s: create error %d\n", __func__,
1058 error));)
1059 }
1060
1061 if (!error) {
1062 #ifdef CODA_VERBOSE
1063 if ((cnp->cn_flags & LOCKLEAF) == 0)
1064 /* This should not happen; flags are for lookup only. */
1065 printf("%s: LOCKLEAF not set!\n", __func__);
1066 #endif
1067 }
1068
1069 return(error);
1070 }
1071
1072 int
1073 coda_remove(void *v)
1074 {
1075 /* true args */
1076 struct vop_remove_args *ap = v;
1077 vnode_t *dvp = ap->a_dvp;
1078 struct cnode *cp = VTOC(dvp);
1079 vnode_t *vp = ap->a_vp;
1080 struct componentname *cnp = ap->a_cnp;
1081 kauth_cred_t cred = cnp->cn_cred;
1082 struct lwp *l = curlwp;
1083 /* locals */
1084 int error;
1085 const char *nm = cnp->cn_nameptr;
1086 int len = cnp->cn_namelen;
1087 struct cnode *tp;
1088
1089 MARK_ENTRY(CODA_REMOVE_STATS);
1090
1091 CODADEBUG(CODA_REMOVE, myprintf(("%s: %s in %s\n", __func__,
1092 nm, coda_f2s(&cp->c_fid)));)
1093
1094 /* Remove the file's entry from the CODA Name Cache */
1095 /* We're being conservative here, it might be that this person
1096 * doesn't really have sufficient access to delete the file
1097 * but we feel zapping the entry won't really hurt anyone -- dcs
1098 */
1099 /* I'm gonna go out on a limb here. If a file and a hardlink to it
1100 * exist, and one is removed, the link count on the other will be
1101 * off by 1. We could either invalidate the attrs if cached, or
1102 * fix them. I'll try to fix them. DCS 11/8/94
1103 */
1104 tp = coda_nc_lookup(VTOC(dvp), nm, len, cred);
1105 if (tp) {
1106 if (VALID_VATTR(tp)) { /* If attrs are cached */
1107 if (tp->c_vattr.va_nlink > 1) { /* If it's a hard link */
1108 tp->c_vattr.va_nlink--;
1109 }
1110 }
1111
1112 coda_nc_zapfile(VTOC(dvp), nm, len);
1113 /* No need to flush it if it doesn't exist! */
1114 }
1115 /* Invalidate the parent's attr cache, the modification time has changed */
1116 VTOC(dvp)->c_flags &= ~C_VATTR;
1117
1118 /* Check for remove of control object. */
1119 if (IS_CTL_NAME(dvp, nm, len)) {
1120 MARK_INT_FAIL(CODA_REMOVE_STATS);
1121 return(ENOENT);
1122 }
1123
1124 error = venus_remove(vtomi(dvp), &cp->c_fid, nm, len, cred, l);
1125
1126 CODADEBUG(CODA_REMOVE, myprintf(("in remove result %d\n",error)); )
1127
1128 /*
1129 * Unlock parent and child (avoiding double if ".").
1130 */
1131 if (dvp == vp) {
1132 vrele(vp);
1133 } else {
1134 vput(vp);
1135 }
1136 vput(dvp);
1137
1138 return(error);
1139 }
1140
1141 /*
1142 * dvp is the directory where the link is to go, and is locked.
1143 * vp is the object to be linked to, and is unlocked.
1144 * At exit, we must unlock dvp, and vput dvp.
1145 */
1146 int
1147 coda_link(void *v)
1148 {
1149 /* true args */
1150 struct vop_link_args *ap = v;
1151 vnode_t *vp = ap->a_vp;
1152 struct cnode *cp = VTOC(vp);
1153 vnode_t *dvp = ap->a_dvp;
1154 struct cnode *dcp = VTOC(dvp);
1155 struct componentname *cnp = ap->a_cnp;
1156 kauth_cred_t cred = cnp->cn_cred;
1157 struct lwp *l = curlwp;
1158 /* locals */
1159 int error;
1160 const char *nm = cnp->cn_nameptr;
1161 int len = cnp->cn_namelen;
1162
1163 MARK_ENTRY(CODA_LINK_STATS);
1164
1165 if (codadebug & CODADBGMSK(CODA_LINK)) {
1166
1167 myprintf(("%s: vp fid: %s\n", __func__, coda_f2s(&cp->c_fid)));
1168 myprintf(("%s: dvp fid: %s)\n", __func__, coda_f2s(&dcp->c_fid)));
1169
1170 }
1171 if (codadebug & CODADBGMSK(CODA_LINK)) {
1172 myprintf(("%s: vp fid: %s\n", __func__, coda_f2s(&cp->c_fid)));
1173 myprintf(("%s: dvp fid: %s\n", __func__, coda_f2s(&dcp->c_fid)));
1174
1175 }
1176
1177 /* Check for link to/from control object. */
1178 if (IS_CTL_NAME(dvp, nm, len) || IS_CTL_VP(vp)) {
1179 MARK_INT_FAIL(CODA_LINK_STATS);
1180 return(EACCES);
1181 }
1182
1183 /* If linking . to a name, error out earlier. */
1184 if (vp == dvp) {
1185 #ifdef CODA_VERBOSE
1186 printf("%s coda_link vp==dvp\n", __func__);
1187 #endif
1188 error = EISDIR;
1189 goto exit;
1190 }
1191
1192 /* XXX Why does venus_link need the vnode to be locked?*/
1193 if ((error = vn_lock(vp, LK_EXCLUSIVE)) != 0) {
1194 #ifdef CODA_VERBOSE
1195 printf("%s: couldn't lock vnode %p\n", __func__, vp);
1196 #endif
1197 error = EFAULT; /* XXX better value */
1198 goto exit;
1199 }
1200 error = venus_link(vtomi(vp), &cp->c_fid, &dcp->c_fid, nm, len, cred, l);
1201 VOP_UNLOCK(vp);
1202
1203 /* Invalidate parent's attr cache (the modification time has changed). */
1204 VTOC(dvp)->c_flags &= ~C_VATTR;
1205 /* Invalidate child's attr cache (XXX why). */
1206 VTOC(vp)->c_flags &= ~C_VATTR;
1207
1208 CODADEBUG(CODA_LINK, myprintf(("in link result %d\n",error)); )
1209
1210 exit:
1211 vput(dvp);
1212 return(error);
1213 }
1214
1215 int
1216 coda_rename(void *v)
1217 {
1218 /* true args */
1219 struct vop_rename_args *ap = v;
1220 vnode_t *odvp = ap->a_fdvp;
1221 struct cnode *odcp = VTOC(odvp);
1222 struct componentname *fcnp = ap->a_fcnp;
1223 vnode_t *ndvp = ap->a_tdvp;
1224 struct cnode *ndcp = VTOC(ndvp);
1225 struct componentname *tcnp = ap->a_tcnp;
1226 kauth_cred_t cred = fcnp->cn_cred;
1227 struct lwp *l = curlwp;
1228 /* true args */
1229 int error;
1230 const char *fnm = fcnp->cn_nameptr;
1231 int flen = fcnp->cn_namelen;
1232 const char *tnm = tcnp->cn_nameptr;
1233 int tlen = tcnp->cn_namelen;
1234
1235 MARK_ENTRY(CODA_RENAME_STATS);
1236
1237 /* Hmmm. The vnodes are already looked up. Perhaps they are locked?
1238 This could be Bad. XXX */
1239 #ifdef OLD_DIAGNOSTIC
1240 if ((fcnp->cn_cred != tcnp->cn_cred)
1241 || (fcnp->cn_lwp != tcnp->cn_lwp))
1242 {
1243 panic("%s: component names don't agree", __func__);
1244 }
1245 #endif
1246
1247 /* Check for rename involving control object. */
1248 if (IS_CTL_NAME(odvp, fnm, flen) || IS_CTL_NAME(ndvp, tnm, tlen)) {
1249 MARK_INT_FAIL(CODA_RENAME_STATS);
1250 return(EACCES);
1251 }
1252
1253 /* Problem with moving directories -- need to flush entry for .. */
1254 if (odvp != ndvp) {
1255 struct cnode *ovcp = coda_nc_lookup(VTOC(odvp), fnm, flen, cred);
1256 if (ovcp) {
1257 vnode_t *ovp = CTOV(ovcp);
1258 if ((ovp) &&
1259 (ovp->v_type == VDIR)) /* If it's a directory */
1260 coda_nc_zapfile(VTOC(ovp),"..", 2);
1261 }
1262 }
1263
1264 /* Remove the entries for both source and target files */
1265 coda_nc_zapfile(VTOC(odvp), fnm, flen);
1266 coda_nc_zapfile(VTOC(ndvp), tnm, tlen);
1267
1268 /* Invalidate the parent's attr cache, the modification time has changed */
1269 VTOC(odvp)->c_flags &= ~C_VATTR;
1270 VTOC(ndvp)->c_flags &= ~C_VATTR;
1271
1272 if (flen+1 > CODA_MAXNAMLEN) {
1273 MARK_INT_FAIL(CODA_RENAME_STATS);
1274 error = EINVAL;
1275 goto exit;
1276 }
1277
1278 if (tlen+1 > CODA_MAXNAMLEN) {
1279 MARK_INT_FAIL(CODA_RENAME_STATS);
1280 error = EINVAL;
1281 goto exit;
1282 }
1283
1284 error = venus_rename(vtomi(odvp), &odcp->c_fid, &ndcp->c_fid, fnm, flen, tnm, tlen, cred, l);
1285
1286 exit:
1287 CODADEBUG(CODA_RENAME, myprintf(("in rename result %d\n",error));)
1288 /* XXX - do we need to call cache pureg on the moved vnode? */
1289 cache_purge(ap->a_fvp);
1290
1291 /* It seems to be incumbent on us to drop locks on all four vnodes */
1292 /* From-vnodes are not locked, only ref'd. To-vnodes are locked. */
1293
1294 vrele(ap->a_fvp);
1295 vrele(odvp);
1296
1297 if (ap->a_tvp) {
1298 if (ap->a_tvp == ndvp) {
1299 vrele(ap->a_tvp);
1300 } else {
1301 vput(ap->a_tvp);
1302 }
1303 }
1304
1305 vput(ndvp);
1306 return(error);
1307 }
1308
1309 int
1310 coda_mkdir(void *v)
1311 {
1312 /* true args */
1313 struct vop_mkdir_v3_args *ap = v;
1314 vnode_t *dvp = ap->a_dvp;
1315 struct cnode *dcp = VTOC(dvp);
1316 struct componentname *cnp = ap->a_cnp;
1317 struct vattr *va = ap->a_vap;
1318 vnode_t **vpp = ap->a_vpp;
1319 kauth_cred_t cred = cnp->cn_cred;
1320 struct lwp *l = curlwp;
1321 /* locals */
1322 int error;
1323 const char *nm = cnp->cn_nameptr;
1324 int len = cnp->cn_namelen;
1325 struct cnode *cp;
1326 CodaFid VFid;
1327 struct vattr ova;
1328
1329 MARK_ENTRY(CODA_MKDIR_STATS);
1330
1331 /* Check for mkdir of target object. */
1332 if (IS_CTL_NAME(dvp, nm, len)) {
1333 *vpp = (vnode_t *)0;
1334 MARK_INT_FAIL(CODA_MKDIR_STATS);
1335 return(EACCES);
1336 }
1337
1338 if (len+1 > CODA_MAXNAMLEN) {
1339 *vpp = (vnode_t *)0;
1340 MARK_INT_FAIL(CODA_MKDIR_STATS);
1341 return(EACCES);
1342 }
1343
1344 error = venus_mkdir(vtomi(dvp), &dcp->c_fid, nm, len, va, cred, l, &VFid, &ova);
1345
1346 if (!error) {
1347 if (coda_find(&VFid) != NULL)
1348 panic("cnode existed for newly created directory!");
1349
1350
1351 cp = make_coda_node(&VFid, dvp->v_mount, va->va_type);
1352 *vpp = CTOV(cp);
1353
1354 /* enter the new vnode in the Name Cache */
1355 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
1356
1357 /* as a side effect, enter "." and ".." for the directory */
1358 coda_nc_enter(VTOC(*vpp), ".", 1, cred, VTOC(*vpp));
1359 coda_nc_enter(VTOC(*vpp), "..", 2, cred, VTOC(dvp));
1360
1361 if (coda_attr_cache) {
1362 VTOC(*vpp)->c_vattr = ova; /* update the attr cache */
1363 VTOC(*vpp)->c_flags |= C_VATTR; /* Valid attributes in cnode */
1364 }
1365
1366 /* Invalidate the parent's attr cache, the modification time has changed */
1367 VTOC(dvp)->c_flags &= ~C_VATTR;
1368
1369 CODADEBUG( CODA_MKDIR, myprintf(("%s: %s result %d\n", __func__,
1370 coda_f2s(&VFid), error)); )
1371 } else {
1372 *vpp = (vnode_t *)0;
1373 CODADEBUG(CODA_MKDIR, myprintf(("%s error %d\n", __func__, error));)
1374 }
1375
1376 return(error);
1377 }
1378
1379 int
1380 coda_rmdir(void *v)
1381 {
1382 /* true args */
1383 struct vop_rmdir_args *ap = v;
1384 vnode_t *dvp = ap->a_dvp;
1385 struct cnode *dcp = VTOC(dvp);
1386 vnode_t *vp = ap->a_vp;
1387 struct componentname *cnp = ap->a_cnp;
1388 kauth_cred_t cred = cnp->cn_cred;
1389 struct lwp *l = curlwp;
1390 /* true args */
1391 int error;
1392 const char *nm = cnp->cn_nameptr;
1393 int len = cnp->cn_namelen;
1394 struct cnode *cp;
1395
1396 MARK_ENTRY(CODA_RMDIR_STATS);
1397
1398 /* Check for rmdir of control object. */
1399 if (IS_CTL_NAME(dvp, nm, len)) {
1400 MARK_INT_FAIL(CODA_RMDIR_STATS);
1401 return(ENOENT);
1402 }
1403
1404 /* Can't remove . in self. */
1405 if (dvp == vp) {
1406 #ifdef CODA_VERBOSE
1407 printf("%s: dvp == vp\n", __func__);
1408 #endif
1409 error = EINVAL;
1410 goto exit;
1411 }
1412
1413 /*
1414 * The caller may not have adequate permissions, and the venus
1415 * operation may fail, but it doesn't hurt from a correctness
1416 * viewpoint to invalidate cache entries.
1417 * XXX Why isn't this done after the venus_rmdir call?
1418 */
1419 /* Look up child in name cache (by name, from parent). */
1420 cp = coda_nc_lookup(dcp, nm, len, cred);
1421 /* If found, remove all children of the child (., ..). */
1422 if (cp) coda_nc_zapParentfid(&(cp->c_fid), NOT_DOWNCALL);
1423
1424 /* Remove child's own entry. */
1425 coda_nc_zapfile(dcp, nm, len);
1426
1427 /* Invalidate parent's attr cache (the modification time has changed). */
1428 dcp->c_flags &= ~C_VATTR;
1429
1430 error = venus_rmdir(vtomi(dvp), &dcp->c_fid, nm, len, cred, l);
1431
1432 CODADEBUG(CODA_RMDIR, myprintf(("in rmdir result %d\n", error)); )
1433
1434 exit:
1435 /* vput both vnodes */
1436 vput(dvp);
1437 if (dvp == vp) {
1438 vrele(vp);
1439 } else {
1440 vput(vp);
1441 }
1442
1443 return(error);
1444 }
1445
1446 int
1447 coda_symlink(void *v)
1448 {
1449 /* true args */
1450 struct vop_symlink_v3_args *ap = v;
1451 vnode_t *dvp = ap->a_dvp;
1452 struct cnode *dcp = VTOC(dvp);
1453 /* a_vpp is used in place below */
1454 struct componentname *cnp = ap->a_cnp;
1455 struct vattr *tva = ap->a_vap;
1456 char *path = ap->a_target;
1457 kauth_cred_t cred = cnp->cn_cred;
1458 struct lwp *l = curlwp;
1459 /* locals */
1460 int error;
1461 u_long saved_cn_flags;
1462 const char *nm = cnp->cn_nameptr;
1463 int len = cnp->cn_namelen;
1464 int plen = strlen(path);
1465
1466 /*
1467 * Here's the strategy for the moment: perform the symlink, then
1468 * do a lookup to grab the resulting vnode. I know this requires
1469 * two communications with Venus for a new sybolic link, but
1470 * that's the way the ball bounces. I don't yet want to change
1471 * the way the Mach symlink works. When Mach support is
1472 * deprecated, we should change symlink so that the common case
1473 * returns the resultant vnode in a vpp argument.
1474 */
1475
1476 MARK_ENTRY(CODA_SYMLINK_STATS);
1477
1478 /* Check for symlink of control object. */
1479 if (IS_CTL_NAME(dvp, nm, len)) {
1480 MARK_INT_FAIL(CODA_SYMLINK_STATS);
1481 error = EACCES;
1482 goto exit;
1483 }
1484
1485 if (plen+1 > CODA_MAXPATHLEN) {
1486 MARK_INT_FAIL(CODA_SYMLINK_STATS);
1487 error = EINVAL;
1488 goto exit;
1489 }
1490
1491 if (len+1 > CODA_MAXNAMLEN) {
1492 MARK_INT_FAIL(CODA_SYMLINK_STATS);
1493 error = EINVAL;
1494 goto exit;
1495 }
1496
1497 error = venus_symlink(vtomi(dvp), &dcp->c_fid, path, plen, nm, len, tva, cred, l);
1498
1499 /* Invalidate the parent's attr cache (modification time has changed). */
1500 dcp->c_flags &= ~C_VATTR;
1501
1502 if (!error) {
1503 /*
1504 * VOP_SYMLINK is not defined to pay attention to cnp->cn_flags;
1505 * these are defined only for VOP_LOOKUP. We desire to reuse
1506 * cnp for a VOP_LOOKUP operation, and must be sure to not pass
1507 * stray flags passed to us. Such stray flags can occur because
1508 * sys_symlink makes a namei call and then reuses the
1509 * componentname structure.
1510 */
1511 /*
1512 * XXX Arguably we should create our own componentname structure
1513 * and not reuse the one that was passed in.
1514 */
1515 saved_cn_flags = cnp->cn_flags;
1516 cnp->cn_flags &= ~(MODMASK | OPMASK);
1517 cnp->cn_flags |= LOOKUP;
1518 error = VOP_LOOKUP(dvp, ap->a_vpp, cnp);
1519 cnp->cn_flags = saved_cn_flags;
1520 }
1521
1522 exit:
1523 CODADEBUG(CODA_SYMLINK, myprintf(("in symlink result %d\n",error)); )
1524 return(error);
1525 }
1526
1527 /*
1528 * Read directory entries.
1529 */
1530 int
1531 coda_readdir(void *v)
1532 {
1533 /* true args */
1534 struct vop_readdir_args *ap = v;
1535 vnode_t *vp = ap->a_vp;
1536 struct cnode *cp = VTOC(vp);
1537 struct uio *uiop = ap->a_uio;
1538 kauth_cred_t cred = ap->a_cred;
1539 int *eofflag = ap->a_eofflag;
1540 off_t **cookies = ap->a_cookies;
1541 int *ncookies = ap->a_ncookies;
1542 /* upcall decl */
1543 /* locals */
1544 int error = 0;
1545
1546 MARK_ENTRY(CODA_READDIR_STATS);
1547
1548 CODADEBUG(CODA_READDIR, myprintf(("%s: (%p, %lu, %lld)\n", __func__,
1549 uiop->uio_iov->iov_base, (unsigned long) uiop->uio_resid,
1550 (long long) uiop->uio_offset)); )
1551
1552 /* Check for readdir of control object. */
1553 if (IS_CTL_VP(vp)) {
1554 MARK_INT_FAIL(CODA_READDIR_STATS);
1555 return(ENOENT);
1556 }
1557
1558 {
1559 /* Redirect the request to UFS. */
1560
1561 /* If directory is not already open do an "internal open" on it. */
1562 int opened_internally = 0;
1563 if (cp->c_ovp == NULL) {
1564 opened_internally = 1;
1565 MARK_INT_GEN(CODA_OPEN_STATS);
1566 error = VOP_OPEN(vp, FREAD, cred);
1567 #ifdef CODA_VERBOSE
1568 printf("%s: Internally Opening %p\n", __func__, vp);
1569 #endif
1570 if (error) return(error);
1571 } else
1572 vp = cp->c_ovp;
1573
1574 /* Have UFS handle the call. */
1575 CODADEBUG(CODA_READDIR, myprintf(("%s: fid = %s, refcnt = %d\n",
1576 __func__, coda_f2s(&cp->c_fid), vp->v_usecount)); )
1577 error = VOP_READDIR(vp, uiop, cred, eofflag, cookies, ncookies);
1578 if (error)
1579 MARK_INT_FAIL(CODA_READDIR_STATS);
1580 else
1581 MARK_INT_SAT(CODA_READDIR_STATS);
1582
1583 /* Do an "internal close" if necessary. */
1584 if (opened_internally) {
1585 MARK_INT_GEN(CODA_CLOSE_STATS);
1586 (void)VOP_CLOSE(vp, FREAD, cred);
1587 }
1588 }
1589
1590 return(error);
1591 }
1592
1593 /*
1594 * Convert from file system blocks to device blocks
1595 */
1596 int
1597 coda_bmap(void *v)
1598 {
1599 /* XXX on the global proc */
1600 /* true args */
1601 struct vop_bmap_args *ap = v;
1602 vnode_t *vp __unused = ap->a_vp; /* file's vnode */
1603 daddr_t bn __unused = ap->a_bn; /* fs block number */
1604 vnode_t **vpp = ap->a_vpp; /* RETURN vp of device */
1605 daddr_t *bnp __unused = ap->a_bnp; /* RETURN device block number */
1606 struct lwp *l __unused = curlwp;
1607 /* upcall decl */
1608 /* locals */
1609
1610 *vpp = (vnode_t *)0;
1611 myprintf(("coda_bmap called!\n"));
1612 return(EINVAL);
1613 }
1614
1615 /*
1616 * I don't think the following two things are used anywhere, so I've
1617 * commented them out
1618 *
1619 * struct buf *async_bufhead;
1620 * int async_daemon_count;
1621 */
1622 int
1623 coda_strategy(void *v)
1624 {
1625 /* true args */
1626 struct vop_strategy_args *ap = v;
1627 struct buf *bp __unused = ap->a_bp;
1628 struct lwp *l __unused = curlwp;
1629 /* upcall decl */
1630 /* locals */
1631
1632 myprintf(("coda_strategy called! "));
1633 return(EINVAL);
1634 }
1635
1636 int
1637 coda_reclaim(void *v)
1638 {
1639 /* true args */
1640 struct vop_reclaim_args *ap = v;
1641 vnode_t *vp = ap->a_vp;
1642 struct cnode *cp = VTOC(vp);
1643 /* upcall decl */
1644 /* locals */
1645
1646 /*
1647 * Forced unmount/flush will let vnodes with non zero use be destroyed!
1648 */
1649 ENTRY;
1650
1651 if (IS_UNMOUNTING(cp)) {
1652 #ifdef DEBUG
1653 if (VTOC(vp)->c_ovp) {
1654 if (IS_UNMOUNTING(cp))
1655 printf("%s: c_ovp not void: vp %p, cp %p\n", __func__, vp, cp);
1656 }
1657 #endif
1658 } else {
1659 #ifdef OLD_DIAGNOSTIC
1660 if (vp->v_usecount != 0)
1661 print("%s: pushing active %p\n", __func__, vp);
1662 if (VTOC(vp)->c_ovp) {
1663 panic("%s: c_ovp not void", __func__);
1664 }
1665 #endif
1666 }
1667 /* If an array has been allocated to hold the symlink, deallocate it */
1668 if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) {
1669 if (cp->c_symlink == NULL)
1670 panic("%s: null symlink pointer in cnode", __func__);
1671
1672 CODA_FREE(cp->c_symlink, cp->c_symlen);
1673 cp->c_flags &= ~C_SYMLINK;
1674 cp->c_symlen = 0;
1675 }
1676
1677 /* Remove it from the table so it can't be found. */
1678 vcache_remove(vp->v_mount, &cp->c_fid, sizeof(CodaFid));
1679
1680 mutex_enter(vp->v_interlock);
1681 mutex_enter(&cp->c_lock);
1682 SET_VTOC(vp) = NULL;
1683 mutex_exit(&cp->c_lock);
1684 mutex_exit(vp->v_interlock);
1685 mutex_destroy(&cp->c_lock);
1686 kmem_free(cp, sizeof(*cp));
1687
1688 return (0);
1689 }
1690
1691 int
1692 coda_lock(void *v)
1693 {
1694 /* true args */
1695 struct vop_lock_args *ap = v;
1696 vnode_t *vp = ap->a_vp;
1697 struct cnode *cp = VTOC(vp);
1698 /* upcall decl */
1699 /* locals */
1700
1701 ENTRY;
1702
1703 if (coda_lockdebug) {
1704 myprintf(("Attempting lock on %s\n",
1705 coda_f2s(&cp->c_fid)));
1706 }
1707
1708 return genfs_lock(v);
1709 }
1710
1711 int
1712 coda_unlock(void *v)
1713 {
1714 /* true args */
1715 struct vop_unlock_args *ap = v;
1716 vnode_t *vp = ap->a_vp;
1717 struct cnode *cp = VTOC(vp);
1718 /* upcall decl */
1719 /* locals */
1720
1721 ENTRY;
1722 if (coda_lockdebug) {
1723 myprintf(("Attempting unlock on %s\n",
1724 coda_f2s(&cp->c_fid)));
1725 }
1726
1727 return genfs_unlock(v);
1728 }
1729
1730 int
1731 coda_islocked(void *v)
1732 {
1733 /* true args */
1734 ENTRY;
1735
1736 return genfs_islocked(v);
1737 }
1738
1739 /*
1740 * Given a device and inode, obtain a locked vnode. One reference is
1741 * obtained and passed back to the caller.
1742 */
1743 int
1744 coda_grab_vnode(vnode_t *uvp, dev_t dev, ino_t ino, vnode_t **vpp)
1745 {
1746 int error;
1747 struct mount *mp;
1748
1749 /* Obtain mount point structure from device. */
1750 if (!(mp = devtomp(dev))) {
1751 myprintf(("%s: devtomp(0x%llx) returns NULL\n", __func__,
1752 (unsigned long long)dev));
1753 return(ENXIO);
1754 }
1755
1756 /*
1757 * Obtain vnode from mount point and inode.
1758 */
1759 error = VFS_VGET(mp, ino, vpp);
1760 if (error) {
1761 myprintf(("%s: iget/vget(0x%llx, %llu) returns %p, err %d\n", __func__,
1762 (unsigned long long)dev, (unsigned long long)ino, *vpp, error));
1763 return(ENOENT);
1764 }
1765 /* share the underlying vnode lock with the coda vnode */
1766 mutex_obj_hold((*vpp)->v_interlock);
1767 uvm_obj_setlock(&uvp->v_uobj, (*vpp)->v_interlock);
1768 KASSERT(VOP_ISLOCKED(*vpp));
1769 return(0);
1770 }
1771
1772 static void
1773 coda_print_vattr(struct vattr *attr)
1774 {
1775 const char *typestr;
1776
1777 switch (attr->va_type) {
1778 case VNON:
1779 typestr = "VNON";
1780 break;
1781 case VREG:
1782 typestr = "VREG";
1783 break;
1784 case VDIR:
1785 typestr = "VDIR";
1786 break;
1787 case VBLK:
1788 typestr = "VBLK";
1789 break;
1790 case VCHR:
1791 typestr = "VCHR";
1792 break;
1793 case VLNK:
1794 typestr = "VLNK";
1795 break;
1796 case VSOCK:
1797 typestr = "VSCK";
1798 break;
1799 case VFIFO:
1800 typestr = "VFFO";
1801 break;
1802 case VBAD:
1803 typestr = "VBAD";
1804 break;
1805 default:
1806 typestr = "????";
1807 break;
1808 }
1809
1810
1811 myprintf(("attr: type %s mode %d uid %d gid %d fsid %d rdev %d\n",
1812 typestr, (int)attr->va_mode, (int)attr->va_uid,
1813 (int)attr->va_gid, (int)attr->va_fsid, (int)attr->va_rdev));
1814
1815 myprintf((" fileid %d nlink %d size %d blocksize %d bytes %d\n",
1816 (int)attr->va_fileid, (int)attr->va_nlink,
1817 (int)attr->va_size,
1818 (int)attr->va_blocksize,(int)attr->va_bytes));
1819 myprintf((" gen %ld flags %ld vaflags %d\n",
1820 attr->va_gen, attr->va_flags, attr->va_vaflags));
1821 myprintf((" atime sec %d nsec %d\n",
1822 (int)attr->va_atime.tv_sec, (int)attr->va_atime.tv_nsec));
1823 myprintf((" mtime sec %d nsec %d\n",
1824 (int)attr->va_mtime.tv_sec, (int)attr->va_mtime.tv_nsec));
1825 myprintf((" ctime sec %d nsec %d\n",
1826 (int)attr->va_ctime.tv_sec, (int)attr->va_ctime.tv_nsec));
1827 }
1828
1829 /*
1830 * Return a vnode for the given fid.
1831 * If no cnode exists for this fid create one and put it
1832 * in a table hashed by coda_f2i(). If the cnode for
1833 * this fid is already in the table return it (ref count is
1834 * incremented by coda_find. The cnode will be flushed from the
1835 * table when coda_inactive calls coda_unsave.
1836 */
1837 struct cnode *
1838 make_coda_node(CodaFid *fid, struct mount *fvsp, short type)
1839 {
1840 int error __diagused;
1841 struct vnode *vp;
1842 struct cnode *cp;
1843
1844 error = vcache_get(fvsp, fid, sizeof(CodaFid), &vp);
1845 KASSERT(error == 0);
1846
1847 mutex_enter(vp->v_interlock);
1848 cp = VTOC(vp);
1849 KASSERT(cp != NULL);
1850 mutex_enter(&cp->c_lock);
1851 mutex_exit(vp->v_interlock);
1852
1853 if (vp->v_type != type) {
1854 if (vp->v_type == VCHR || vp->v_type == VBLK)
1855 spec_node_destroy(vp);
1856 vp->v_type = type;
1857 if (type == VCHR || type == VBLK)
1858 spec_node_init(vp, NODEV);
1859 uvm_vnp_setsize(vp, 0);
1860 }
1861 mutex_exit(&cp->c_lock);
1862
1863 return cp;
1864 }
1865
1866 /*
1867 * coda_getpages may be called on a vnode which has not been opened,
1868 * e.g. to fault in pages to execute a program. In that case, we must
1869 * open the file to get the container. The vnode may or may not be
1870 * locked, and we must leave it in the same state.
1871 */
1872 int
1873 coda_getpages(void *v)
1874 {
1875 struct vop_getpages_args /* {
1876 vnode_t *a_vp;
1877 voff_t a_offset;
1878 struct vm_page **a_m;
1879 int *a_count;
1880 int a_centeridx;
1881 vm_prot_t a_access_type;
1882 int a_advice;
1883 int a_flags;
1884 } */ *ap = v;
1885 vnode_t *vp = ap->a_vp, *cvp;
1886 struct cnode *cp = VTOC(vp);
1887 struct lwp *l = curlwp;
1888 kauth_cred_t cred = l->l_cred;
1889 int error, cerror;
1890 int waslocked; /* 1 if vnode lock was held on entry */
1891 int didopen = 0; /* 1 if we opened container file */
1892
1893 /*
1894 * Handle a case that uvm_fault doesn't quite use yet.
1895 * See layer_vnops.c. for inspiration.
1896 */
1897 if (ap->a_flags & PGO_LOCKED) {
1898 return EBUSY;
1899 }
1900
1901 KASSERT(mutex_owned(vp->v_interlock));
1902
1903 /* Check for control object. */
1904 if (IS_CTL_VP(vp)) {
1905 #ifdef CODA_VERBOSE
1906 printf("%s: control object %p\n", __func__, vp);
1907 #endif
1908 return(EINVAL);
1909 }
1910
1911 /*
1912 * XXX It's really not ok to be releasing the lock we get,
1913 * because we could be overlapping with another call to
1914 * getpages and drop a lock they are relying on. We need to
1915 * figure out whether getpages ever is called holding the
1916 * lock, and if we should serialize getpages calls by some
1917 * mechanism.
1918 */
1919 /* XXX VOP_ISLOCKED() may not be used for lock decisions. */
1920 waslocked = VOP_ISLOCKED(vp);
1921
1922 /* Get container file if not already present. */
1923 cvp = cp->c_ovp;
1924 if (cvp == NULL) {
1925 /*
1926 * VOP_OPEN requires a locked vnode. We must avoid
1927 * locking the vnode if it is already locked, and
1928 * leave it in the same state on exit.
1929 */
1930 if (waslocked == 0) {
1931 mutex_exit(vp->v_interlock);
1932 cerror = vn_lock(vp, LK_EXCLUSIVE);
1933 if (cerror) {
1934 #ifdef CODA_VERBOSE
1935 printf("%s: can't lock vnode %p\n",
1936 __func__, vp);
1937 #endif
1938 return cerror;
1939 }
1940 #ifdef CODA_VERBOSE
1941 printf("%s: locked vnode %p\n", __func__, vp);
1942 #endif
1943 }
1944
1945 /*
1946 * Open file (causes upcall to venus).
1947 * XXX Perhaps we should not fully open the file, but
1948 * simply obtain a container file.
1949 */
1950 /* XXX Is it ok to do this while holding the mutex? */
1951 cerror = VOP_OPEN(vp, FREAD, cred);
1952
1953 if (cerror) {
1954 #ifdef CODA_VERBOSE
1955 printf("%s: cannot open vnode %p => %d\n", __func__,
1956 vp, cerror);
1957 #endif
1958 if (waslocked == 0)
1959 VOP_UNLOCK(vp);
1960 return cerror;
1961 }
1962
1963 #ifdef CODA_VERBOSE
1964 printf("%s: opened vnode %p\n", __func__, vp);
1965 #endif
1966 cvp = cp->c_ovp;
1967 didopen = 1;
1968 if (waslocked == 0)
1969 mutex_enter(vp->v_interlock);
1970 }
1971 KASSERT(cvp != NULL);
1972
1973 /* Munge the arg structure to refer to the container vnode. */
1974 KASSERT(cvp->v_interlock == vp->v_interlock);
1975 ap->a_vp = cp->c_ovp;
1976
1977 /* Finally, call getpages on it. */
1978 error = VCALL(ap->a_vp, VOFFSET(vop_getpages), ap);
1979
1980 /* If we opened the vnode, we must close it. */
1981 if (didopen) {
1982 /*
1983 * VOP_CLOSE requires a locked vnode, but we are still
1984 * holding the lock (or riding a caller's lock).
1985 */
1986 cerror = VOP_CLOSE(vp, FREAD, cred);
1987 #ifdef CODA_VERBOSE
1988 if (cerror != 0)
1989 /* XXX How should we handle this? */
1990 printf("%s: closed vnode %p -> %d\n", __func__,
1991 vp, cerror);
1992 #endif
1993
1994 /* If we obtained a lock, drop it. */
1995 if (waslocked == 0)
1996 VOP_UNLOCK(vp);
1997 }
1998
1999 return error;
2000 }
2001
2002 /*
2003 * The protocol requires v_interlock to be held by the caller.
2004 */
2005 int
2006 coda_putpages(void *v)
2007 {
2008 struct vop_putpages_args /* {
2009 vnode_t *a_vp;
2010 voff_t a_offlo;
2011 voff_t a_offhi;
2012 int a_flags;
2013 } */ *ap = v;
2014 vnode_t *vp = ap->a_vp, *cvp;
2015 struct cnode *cp = VTOC(vp);
2016 int error;
2017
2018 KASSERT(mutex_owned(vp->v_interlock));
2019
2020 /* Check for control object. */
2021 if (IS_CTL_VP(vp)) {
2022 mutex_exit(vp->v_interlock);
2023 #ifdef CODA_VERBOSE
2024 printf("%s: control object %p\n", __func__, vp);
2025 #endif
2026 return 0;
2027 }
2028
2029 /*
2030 * If container object is not present, then there are no pages
2031 * to put; just return without error. This happens all the
2032 * time, apparently during discard of a closed vnode (which
2033 * trivially can't have dirty pages).
2034 */
2035 cvp = cp->c_ovp;
2036 if (cvp == NULL) {
2037 mutex_exit(vp->v_interlock);
2038 return 0;
2039 }
2040
2041 /* Munge the arg structure to refer to the container vnode. */
2042 KASSERT(cvp->v_interlock == vp->v_interlock);
2043 ap->a_vp = cvp;
2044
2045 /* Finally, call putpages on it. */
2046 error = VCALL(ap->a_vp, VOFFSET(vop_putpages), ap);
2047
2048 return error;
2049 }
2050