coda_vnops.c revision 1.81.2.1 1 /* $NetBSD: coda_vnops.c,v 1.81.2.1 2012/05/23 10:07:52 yamt Exp $ */
2
3 /*
4 *
5 * Coda: an Experimental Distributed File System
6 * Release 3.1
7 *
8 * Copyright (c) 1987-1998 Carnegie Mellon University
9 * All Rights Reserved
10 *
11 * Permission to use, copy, modify and distribute this software and its
12 * documentation is hereby granted, provided that both the copyright
13 * notice and this permission notice appear in all copies of the
14 * software, derivative works or modified versions, and any portions
15 * thereof, and that both notices appear in supporting documentation, and
16 * that credit is given to Carnegie Mellon University in all documents
17 * and publicity pertaining to direct or indirect use of this code or its
18 * derivatives.
19 *
20 * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS,
21 * SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS
22 * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON
23 * DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
24 * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF
25 * ANY DERIVATIVE WORK.
26 *
27 * Carnegie Mellon encourages users of this software to return any
28 * improvements or extensions that they make, and to grant Carnegie
29 * Mellon the rights to redistribute these changes without encumbrance.
30 *
31 * @(#) coda/coda_vnops.c,v 1.1.1.1 1998/08/29 21:26:46 rvb Exp $
32 */
33
34 /*
35 * Mach Operating System
36 * Copyright (c) 1990 Carnegie-Mellon University
37 * Copyright (c) 1989 Carnegie-Mellon University
38 * All rights reserved. The CMU software License Agreement specifies
39 * the terms and conditions for use and redistribution.
40 */
41
42 /*
43 * This code was written for the Coda file system at Carnegie Mellon
44 * University. Contributers include David Steere, James Kistler, and
45 * M. Satyanarayanan.
46 */
47
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: coda_vnops.c,v 1.81.2.1 2012/05/23 10:07:52 yamt Exp $");
50
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/malloc.h>
54 #include <sys/errno.h>
55 #include <sys/acct.h>
56 #include <sys/file.h>
57 #include <sys/uio.h>
58 #include <sys/namei.h>
59 #include <sys/ioctl.h>
60 #include <sys/mount.h>
61 #include <sys/proc.h>
62 #include <sys/select.h>
63 #include <sys/vnode.h>
64 #include <sys/kauth.h>
65
66 #include <miscfs/genfs/genfs.h>
67
68 #include <coda/coda.h>
69 #include <coda/cnode.h>
70 #include <coda/coda_vnops.h>
71 #include <coda/coda_venus.h>
72 #include <coda/coda_opstats.h>
73 #include <coda/coda_subr.h>
74 #include <coda/coda_namecache.h>
75 #include <coda/coda_pioctl.h>
76
77 /*
78 * These flags select various performance enhancements.
79 */
80 int coda_attr_cache = 1; /* Set to cache attributes in the kernel */
81 int coda_symlink_cache = 1; /* Set to cache symbolic link information */
82 int coda_access_cache = 1; /* Set to handle some access checks directly */
83
84 /* structure to keep track of vfs calls */
85
86 struct coda_op_stats coda_vnodeopstats[CODA_VNODEOPS_SIZE];
87
88 #define MARK_ENTRY(op) (coda_vnodeopstats[op].entries++)
89 #define MARK_INT_SAT(op) (coda_vnodeopstats[op].sat_intrn++)
90 #define MARK_INT_FAIL(op) (coda_vnodeopstats[op].unsat_intrn++)
91 #define MARK_INT_GEN(op) (coda_vnodeopstats[op].gen_intrn++)
92
93 /* What we are delaying for in printf */
94 static int coda_lockdebug = 0;
95
96 #define ENTRY if(coda_vnop_print_entry) myprintf(("Entered %s\n",__func__))
97
98 /* Definition of the vnode operation vector */
99
100 const struct vnodeopv_entry_desc coda_vnodeop_entries[] = {
101 { &vop_default_desc, coda_vop_error },
102 { &vop_lookup_desc, coda_lookup }, /* lookup */
103 { &vop_create_desc, coda_create }, /* create */
104 { &vop_mknod_desc, coda_vop_error }, /* mknod */
105 { &vop_open_desc, coda_open }, /* open */
106 { &vop_close_desc, coda_close }, /* close */
107 { &vop_access_desc, coda_access }, /* access */
108 { &vop_getattr_desc, coda_getattr }, /* getattr */
109 { &vop_setattr_desc, coda_setattr }, /* setattr */
110 { &vop_read_desc, coda_read }, /* read */
111 { &vop_write_desc, coda_write }, /* write */
112 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */
113 { &vop_ioctl_desc, coda_ioctl }, /* ioctl */
114 { &vop_mmap_desc, genfs_mmap }, /* mmap */
115 { &vop_fsync_desc, coda_fsync }, /* fsync */
116 { &vop_remove_desc, coda_remove }, /* remove */
117 { &vop_link_desc, coda_link }, /* link */
118 { &vop_rename_desc, coda_rename }, /* rename */
119 { &vop_mkdir_desc, coda_mkdir }, /* mkdir */
120 { &vop_rmdir_desc, coda_rmdir }, /* rmdir */
121 { &vop_symlink_desc, coda_symlink }, /* symlink */
122 { &vop_readdir_desc, coda_readdir }, /* readdir */
123 { &vop_readlink_desc, coda_readlink }, /* readlink */
124 { &vop_abortop_desc, coda_abortop }, /* abortop */
125 { &vop_inactive_desc, coda_inactive }, /* inactive */
126 { &vop_reclaim_desc, coda_reclaim }, /* reclaim */
127 { &vop_lock_desc, coda_lock }, /* lock */
128 { &vop_unlock_desc, coda_unlock }, /* unlock */
129 { &vop_bmap_desc, coda_bmap }, /* bmap */
130 { &vop_strategy_desc, coda_strategy }, /* strategy */
131 { &vop_print_desc, coda_vop_error }, /* print */
132 { &vop_islocked_desc, coda_islocked }, /* islocked */
133 { &vop_pathconf_desc, coda_vop_error }, /* pathconf */
134 { &vop_advlock_desc, coda_vop_nop }, /* advlock */
135 { &vop_bwrite_desc, coda_vop_error }, /* bwrite */
136 { &vop_seek_desc, genfs_seek }, /* seek */
137 { &vop_poll_desc, genfs_poll }, /* poll */
138 { &vop_getpages_desc, coda_getpages }, /* getpages */
139 { &vop_putpages_desc, coda_putpages }, /* putpages */
140 { NULL, NULL }
141 };
142
143 const struct vnodeopv_desc coda_vnodeop_opv_desc =
144 { &coda_vnodeop_p, coda_vnodeop_entries };
145
146 /* Definitions of NetBSD vnodeop interfaces */
147
148 /*
149 * A generic error routine. Return EIO without looking at arguments.
150 */
151 int
152 coda_vop_error(void *anon) {
153 struct vnodeop_desc **desc = (struct vnodeop_desc **)anon;
154
155 if (codadebug) {
156 myprintf(("%s: Vnode operation %s called (error).\n",
157 __func__, (*desc)->vdesc_name));
158 }
159
160 return EIO;
161 }
162
163 /* A generic do-nothing. */
164 int
165 coda_vop_nop(void *anon) {
166 struct vnodeop_desc **desc = (struct vnodeop_desc **)anon;
167
168 if (codadebug) {
169 myprintf(("Vnode operation %s called, but unsupported\n",
170 (*desc)->vdesc_name));
171 }
172 return (0);
173 }
174
175 int
176 coda_vnodeopstats_init(void)
177 {
178 int i;
179
180 for(i=0;i<CODA_VNODEOPS_SIZE;i++) {
181 coda_vnodeopstats[i].opcode = i;
182 coda_vnodeopstats[i].entries = 0;
183 coda_vnodeopstats[i].sat_intrn = 0;
184 coda_vnodeopstats[i].unsat_intrn = 0;
185 coda_vnodeopstats[i].gen_intrn = 0;
186 }
187
188 return 0;
189 }
190
191 /*
192 * XXX The entire relationship between VOP_OPEN and having a container
193 * file (via venus_open) needs to be reexamined. In particular, it's
194 * valid to open/mmap/close and then reference. Instead of doing
195 * VOP_OPEN when getpages needs a container, we should do the
196 * venus_open part, and record that the vnode has opened the container
197 * for getpages, and do the matching logical close on coda_inactive.
198 * Further, coda_rdwr needs a container file, and sometimes needs to
199 * do the equivalent of open (core dumps).
200 */
201 /*
202 * coda_open calls Venus to return the device and inode of the
203 * container file, and then obtains a vnode for that file. The
204 * container vnode is stored in the coda vnode, and a reference is
205 * added for each open file.
206 */
207 int
208 coda_open(void *v)
209 {
210 /*
211 * NetBSD can pass the O_EXCL flag in mode, even though the check
212 * has already happened. Venus defensively assumes that if open
213 * is passed the EXCL, it must be a bug. We strip the flag here.
214 */
215 /* true args */
216 struct vop_open_args *ap = v;
217 vnode_t *vp = ap->a_vp;
218 struct cnode *cp = VTOC(vp);
219 int flag = ap->a_mode & (~O_EXCL);
220 kauth_cred_t cred = ap->a_cred;
221 /* locals */
222 int error;
223 dev_t dev; /* container file device, inode, vnode */
224 ino_t inode;
225 vnode_t *container_vp;
226
227 MARK_ENTRY(CODA_OPEN_STATS);
228
229 /* Check for open of control file. */
230 if (IS_CTL_VP(vp)) {
231 /* if (WRITABLE(flag)) */
232 if (flag & (FWRITE | O_TRUNC | O_CREAT | O_EXCL)) {
233 MARK_INT_FAIL(CODA_OPEN_STATS);
234 return(EACCES);
235 }
236 MARK_INT_SAT(CODA_OPEN_STATS);
237 return(0);
238 }
239
240 error = venus_open(vtomi(vp), &cp->c_fid, flag, cred, curlwp, &dev, &inode);
241 if (error)
242 return (error);
243 if (!error) {
244 CODADEBUG(CODA_OPEN, myprintf((
245 "%s: dev 0x%llx inode %llu result %d\n", __func__,
246 (unsigned long long)dev, (unsigned long long)inode, error));)
247 }
248
249 /*
250 * Obtain locked and referenced container vnode from container
251 * device/inode.
252 */
253 error = coda_grab_vnode(vp, dev, inode, &container_vp);
254 if (error)
255 return (error);
256
257 /* Save the vnode pointer for the container file. */
258 if (cp->c_ovp == NULL) {
259 cp->c_ovp = container_vp;
260 } else {
261 if (cp->c_ovp != container_vp)
262 /*
263 * Perhaps venus returned a different container, or
264 * something else went wrong.
265 */
266 panic("%s: cp->c_ovp != container_vp", __func__);
267 }
268 cp->c_ocount++;
269
270 /* Flush the attribute cache if writing the file. */
271 if (flag & FWRITE) {
272 cp->c_owrite++;
273 cp->c_flags &= ~C_VATTR;
274 }
275
276 /*
277 * Save the <device, inode> pair for the container file to speed
278 * up subsequent reads while closed (mmap, program execution).
279 * This is perhaps safe because venus will invalidate the node
280 * before changing the container file mapping.
281 */
282 cp->c_device = dev;
283 cp->c_inode = inode;
284
285 /* Open the container file. */
286 error = VOP_OPEN(container_vp, flag, cred);
287 /*
288 * Drop the lock on the container, after we have done VOP_OPEN
289 * (which requires a locked vnode).
290 */
291 VOP_UNLOCK(container_vp);
292 return(error);
293 }
294
295 /*
296 * Close the cache file used for I/O and notify Venus.
297 */
298 int
299 coda_close(void *v)
300 {
301 /* true args */
302 struct vop_close_args *ap = v;
303 vnode_t *vp = ap->a_vp;
304 struct cnode *cp = VTOC(vp);
305 int flag = ap->a_fflag;
306 kauth_cred_t cred = ap->a_cred;
307 /* locals */
308 int error;
309
310 MARK_ENTRY(CODA_CLOSE_STATS);
311
312 /* Check for close of control file. */
313 if (IS_CTL_VP(vp)) {
314 MARK_INT_SAT(CODA_CLOSE_STATS);
315 return(0);
316 }
317
318 /*
319 * XXX The IS_UNMOUNTING part of this is very suspect.
320 */
321 if (IS_UNMOUNTING(cp)) {
322 if (cp->c_ovp) {
323 #ifdef CODA_VERBOSE
324 printf("%s: destroying container %d, ufs vp %p of vp %p/cp %p\n",
325 __func__, vp->v_usecount, cp->c_ovp, vp, cp);
326 #endif
327 #ifdef hmm
328 vgone(cp->c_ovp);
329 #else
330 vn_lock(cp->c_ovp, LK_EXCLUSIVE | LK_RETRY);
331 VOP_CLOSE(cp->c_ovp, flag, cred); /* Do errors matter here? */
332 vput(cp->c_ovp);
333 #endif
334 } else {
335 #ifdef CODA_VERBOSE
336 printf("%s: NO container vp %p/cp %p\n", __func__, vp, cp);
337 #endif
338 }
339 return ENODEV;
340 }
341
342 /* Lock the container node, and VOP_CLOSE it. */
343 vn_lock(cp->c_ovp, LK_EXCLUSIVE | LK_RETRY);
344 VOP_CLOSE(cp->c_ovp, flag, cred); /* Do errors matter here? */
345 /*
346 * Drop the lock we just obtained, and vrele the container vnode.
347 * Decrement reference counts, and clear container vnode pointer on
348 * last close.
349 */
350 vput(cp->c_ovp);
351 if (flag & FWRITE)
352 --cp->c_owrite;
353 if (--cp->c_ocount == 0)
354 cp->c_ovp = NULL;
355
356 error = venus_close(vtomi(vp), &cp->c_fid, flag, cred, curlwp);
357
358 CODADEBUG(CODA_CLOSE, myprintf(("%s: result %d\n", __func__, error)); )
359 return(error);
360 }
361
362 int
363 coda_read(void *v)
364 {
365 struct vop_read_args *ap = v;
366
367 ENTRY;
368 return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_READ,
369 ap->a_ioflag, ap->a_cred, curlwp));
370 }
371
372 int
373 coda_write(void *v)
374 {
375 struct vop_write_args *ap = v;
376
377 ENTRY;
378 return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_WRITE,
379 ap->a_ioflag, ap->a_cred, curlwp));
380 }
381
382 int
383 coda_rdwr(vnode_t *vp, struct uio *uiop, enum uio_rw rw, int ioflag,
384 kauth_cred_t cred, struct lwp *l)
385 {
386 /* upcall decl */
387 /* NOTE: container file operation!!! */
388 /* locals */
389 struct cnode *cp = VTOC(vp);
390 vnode_t *cfvp = cp->c_ovp;
391 struct proc *p = l->l_proc;
392 int opened_internally = 0;
393 int error = 0;
394
395 MARK_ENTRY(CODA_RDWR_STATS);
396
397 CODADEBUG(CODA_RDWR, myprintf(("coda_rdwr(%d, %p, %lu, %lld)\n", rw,
398 uiop->uio_iov->iov_base, (unsigned long) uiop->uio_resid,
399 (long long) uiop->uio_offset)); )
400
401 /* Check for rdwr of control object. */
402 if (IS_CTL_VP(vp)) {
403 MARK_INT_FAIL(CODA_RDWR_STATS);
404 return(EINVAL);
405 }
406
407 /* Redirect the request to UFS. */
408
409 /*
410 * If file is not already open this must be a page
411 * {read,write} request. Iget the cache file's inode
412 * pointer if we still have its <device, inode> pair.
413 * Otherwise, we must do an internal open to derive the
414 * pair.
415 * XXX Integrate this into a coherent strategy for container
416 * file acquisition.
417 */
418 if (cfvp == NULL) {
419 /*
420 * If we're dumping core, do the internal open. Otherwise
421 * venus won't have the correct size of the core when
422 * it's completely written.
423 */
424 if (cp->c_inode != 0 && !(p && (p->p_acflag & ACORE))) {
425 #ifdef CODA_VERBOSE
426 printf("%s: grabbing container vnode, losing reference\n",
427 __func__);
428 #endif
429 /* Get locked and refed vnode. */
430 error = coda_grab_vnode(vp, cp->c_device, cp->c_inode, &cfvp);
431 if (error) {
432 MARK_INT_FAIL(CODA_RDWR_STATS);
433 return(error);
434 }
435 /*
436 * Drop lock.
437 * XXX Where is reference released.
438 */
439 VOP_UNLOCK(cfvp);
440 }
441 else {
442 #ifdef CODA_VERBOSE
443 printf("%s: internal VOP_OPEN\n", __func__);
444 #endif
445 opened_internally = 1;
446 MARK_INT_GEN(CODA_OPEN_STATS);
447 error = VOP_OPEN(vp, (rw == UIO_READ ? FREAD : FWRITE), cred);
448 #ifdef CODA_VERBOSE
449 printf("%s: Internally Opening %p\n", __func__, vp);
450 #endif
451 if (error) {
452 MARK_INT_FAIL(CODA_RDWR_STATS);
453 return(error);
454 }
455 cfvp = cp->c_ovp;
456 }
457 }
458
459 /* Have UFS handle the call. */
460 CODADEBUG(CODA_RDWR, myprintf(("%s: fid = %s, refcnt = %d\n", __func__,
461 coda_f2s(&cp->c_fid), CTOV(cp)->v_usecount)); )
462
463 if (rw == UIO_READ) {
464 error = VOP_READ(cfvp, uiop, ioflag, cred);
465 } else {
466 error = VOP_WRITE(cfvp, uiop, ioflag, cred);
467 }
468
469 if (error)
470 MARK_INT_FAIL(CODA_RDWR_STATS);
471 else
472 MARK_INT_SAT(CODA_RDWR_STATS);
473
474 /* Do an internal close if necessary. */
475 if (opened_internally) {
476 MARK_INT_GEN(CODA_CLOSE_STATS);
477 (void)VOP_CLOSE(vp, (rw == UIO_READ ? FREAD : FWRITE), cred);
478 }
479
480 /* Invalidate cached attributes if writing. */
481 if (rw == UIO_WRITE)
482 cp->c_flags &= ~C_VATTR;
483 return(error);
484 }
485
486 int
487 coda_ioctl(void *v)
488 {
489 /* true args */
490 struct vop_ioctl_args *ap = v;
491 vnode_t *vp = ap->a_vp;
492 int com = ap->a_command;
493 void *data = ap->a_data;
494 int flag = ap->a_fflag;
495 kauth_cred_t cred = ap->a_cred;
496 /* locals */
497 int error;
498 vnode_t *tvp;
499 struct PioctlData *iap = (struct PioctlData *)data;
500 namei_simple_flags_t sflags;
501
502 MARK_ENTRY(CODA_IOCTL_STATS);
503
504 CODADEBUG(CODA_IOCTL, myprintf(("in coda_ioctl on %s\n", iap->path));)
505
506 /* Don't check for operation on a dying object, for ctlvp it
507 shouldn't matter */
508
509 /* Must be control object to succeed. */
510 if (!IS_CTL_VP(vp)) {
511 MARK_INT_FAIL(CODA_IOCTL_STATS);
512 CODADEBUG(CODA_IOCTL, myprintf(("%s error: vp != ctlvp", __func__));)
513 return (EOPNOTSUPP);
514 }
515 /* Look up the pathname. */
516
517 /* Should we use the name cache here? It would get it from
518 lookupname sooner or later anyway, right? */
519
520 sflags = iap->follow ? NSM_FOLLOW_NOEMULROOT : NSM_NOFOLLOW_NOEMULROOT;
521 error = namei_simple_user(iap->path, sflags, &tvp);
522
523 if (error) {
524 MARK_INT_FAIL(CODA_IOCTL_STATS);
525 CODADEBUG(CODA_IOCTL, myprintf(("%s error: lookup returns %d\n",
526 __func__, error));)
527 return(error);
528 }
529
530 /*
531 * Make sure this is a coda style cnode, but it may be a
532 * different vfsp
533 */
534 /* XXX: this totally violates the comment about vtagtype in vnode.h */
535 if (tvp->v_tag != VT_CODA) {
536 vrele(tvp);
537 MARK_INT_FAIL(CODA_IOCTL_STATS);
538 CODADEBUG(CODA_IOCTL, myprintf(("%s error: %s not a coda object\n",
539 __func__, iap->path));)
540 return(EINVAL);
541 }
542
543 if (iap->vi.in_size > VC_MAXDATASIZE || iap->vi.out_size > VC_MAXDATASIZE) {
544 vrele(tvp);
545 return(EINVAL);
546 }
547 error = venus_ioctl(vtomi(tvp), &((VTOC(tvp))->c_fid), com, flag, data,
548 cred, curlwp);
549
550 if (error)
551 MARK_INT_FAIL(CODA_IOCTL_STATS);
552 else
553 CODADEBUG(CODA_IOCTL, myprintf(("Ioctl returns %d \n", error)); )
554
555 vrele(tvp);
556 return(error);
557 }
558
559 /*
560 * To reduce the cost of a user-level venus;we cache attributes in
561 * the kernel. Each cnode has storage allocated for an attribute. If
562 * c_vattr is valid, return a reference to it. Otherwise, get the
563 * attributes from venus and store them in the cnode. There is some
564 * question if this method is a security leak. But I think that in
565 * order to make this call, the user must have done a lookup and
566 * opened the file, and therefore should already have access.
567 */
568 int
569 coda_getattr(void *v)
570 {
571 /* true args */
572 struct vop_getattr_args *ap = v;
573 vnode_t *vp = ap->a_vp;
574 struct cnode *cp = VTOC(vp);
575 struct vattr *vap = ap->a_vap;
576 kauth_cred_t cred = ap->a_cred;
577 /* locals */
578 int error;
579
580 MARK_ENTRY(CODA_GETATTR_STATS);
581
582 /* Check for getattr of control object. */
583 if (IS_CTL_VP(vp)) {
584 MARK_INT_FAIL(CODA_GETATTR_STATS);
585 return(ENOENT);
586 }
587
588 /* Check to see if the attributes have already been cached */
589 if (VALID_VATTR(cp)) {
590 CODADEBUG(CODA_GETATTR, { myprintf(("%s: attr cache hit: %s\n",
591 __func__, coda_f2s(&cp->c_fid)));})
592 CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR))
593 print_vattr(&cp->c_vattr); )
594
595 *vap = cp->c_vattr;
596 MARK_INT_SAT(CODA_GETATTR_STATS);
597 return(0);
598 }
599
600 error = venus_getattr(vtomi(vp), &cp->c_fid, cred, curlwp, vap);
601
602 if (!error) {
603 CODADEBUG(CODA_GETATTR, myprintf(("%s miss %s: result %d\n",
604 __func__, coda_f2s(&cp->c_fid), error)); )
605
606 CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR))
607 print_vattr(vap); )
608
609 /* If not open for write, store attributes in cnode */
610 if ((cp->c_owrite == 0) && (coda_attr_cache)) {
611 cp->c_vattr = *vap;
612 cp->c_flags |= C_VATTR;
613 }
614
615 }
616 return(error);
617 }
618
619 int
620 coda_setattr(void *v)
621 {
622 /* true args */
623 struct vop_setattr_args *ap = v;
624 vnode_t *vp = ap->a_vp;
625 struct cnode *cp = VTOC(vp);
626 struct vattr *vap = ap->a_vap;
627 kauth_cred_t cred = ap->a_cred;
628 /* locals */
629 int error;
630
631 MARK_ENTRY(CODA_SETATTR_STATS);
632
633 /* Check for setattr of control object. */
634 if (IS_CTL_VP(vp)) {
635 MARK_INT_FAIL(CODA_SETATTR_STATS);
636 return(ENOENT);
637 }
638
639 if (codadebug & CODADBGMSK(CODA_SETATTR)) {
640 print_vattr(vap);
641 }
642 error = venus_setattr(vtomi(vp), &cp->c_fid, vap, cred, curlwp);
643
644 if (!error)
645 cp->c_flags &= ~C_VATTR;
646
647 CODADEBUG(CODA_SETATTR, myprintf(("setattr %d\n", error)); )
648 return(error);
649 }
650
651 int
652 coda_access(void *v)
653 {
654 /* true args */
655 struct vop_access_args *ap = v;
656 vnode_t *vp = ap->a_vp;
657 struct cnode *cp = VTOC(vp);
658 int mode = ap->a_mode;
659 kauth_cred_t cred = ap->a_cred;
660 /* locals */
661 int error;
662
663 MARK_ENTRY(CODA_ACCESS_STATS);
664
665 /* Check for access of control object. Only read access is
666 allowed on it. */
667 if (IS_CTL_VP(vp)) {
668 /* bogus hack - all will be marked as successes */
669 MARK_INT_SAT(CODA_ACCESS_STATS);
670 return(((mode & VREAD) && !(mode & (VWRITE | VEXEC)))
671 ? 0 : EACCES);
672 }
673
674 /*
675 * if the file is a directory, and we are checking exec (eg lookup)
676 * access, and the file is in the namecache, then the user must have
677 * lookup access to it.
678 */
679 if (coda_access_cache) {
680 if ((vp->v_type == VDIR) && (mode & VEXEC)) {
681 if (coda_nc_lookup(cp, ".", 1, cred)) {
682 MARK_INT_SAT(CODA_ACCESS_STATS);
683 return(0); /* it was in the cache */
684 }
685 }
686 }
687
688 error = venus_access(vtomi(vp), &cp->c_fid, mode, cred, curlwp);
689
690 return(error);
691 }
692
693 /*
694 * CODA abort op, called after namei() when a CREATE/DELETE isn't actually
695 * done. If a buffer has been saved in anticipation of a coda_create or
696 * a coda_remove, delete it.
697 */
698 /* ARGSUSED */
699 int
700 coda_abortop(void *v)
701 {
702 /* true args */
703 struct vop_abortop_args /* {
704 vnode_t *a_dvp;
705 struct componentname *a_cnp;
706 } */ *ap = v;
707
708 (void)ap;
709 /* upcall decl */
710 /* locals */
711
712 return (0);
713 }
714
715 int
716 coda_readlink(void *v)
717 {
718 /* true args */
719 struct vop_readlink_args *ap = v;
720 vnode_t *vp = ap->a_vp;
721 struct cnode *cp = VTOC(vp);
722 struct uio *uiop = ap->a_uio;
723 kauth_cred_t cred = ap->a_cred;
724 /* locals */
725 struct lwp *l = curlwp;
726 int error;
727 char *str;
728 int len;
729
730 MARK_ENTRY(CODA_READLINK_STATS);
731
732 /* Check for readlink of control object. */
733 if (IS_CTL_VP(vp)) {
734 MARK_INT_FAIL(CODA_READLINK_STATS);
735 return(ENOENT);
736 }
737
738 if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) { /* symlink was cached */
739 uiop->uio_rw = UIO_READ;
740 error = uiomove(cp->c_symlink, (int)cp->c_symlen, uiop);
741 if (error)
742 MARK_INT_FAIL(CODA_READLINK_STATS);
743 else
744 MARK_INT_SAT(CODA_READLINK_STATS);
745 return(error);
746 }
747
748 error = venus_readlink(vtomi(vp), &cp->c_fid, cred, l, &str, &len);
749
750 if (!error) {
751 uiop->uio_rw = UIO_READ;
752 error = uiomove(str, len, uiop);
753
754 if (coda_symlink_cache) {
755 cp->c_symlink = str;
756 cp->c_symlen = len;
757 cp->c_flags |= C_SYMLINK;
758 } else
759 CODA_FREE(str, len);
760 }
761
762 CODADEBUG(CODA_READLINK, myprintf(("in readlink result %d\n",error));)
763 return(error);
764 }
765
766 int
767 coda_fsync(void *v)
768 {
769 /* true args */
770 struct vop_fsync_args *ap = v;
771 vnode_t *vp = ap->a_vp;
772 struct cnode *cp = VTOC(vp);
773 kauth_cred_t cred = ap->a_cred;
774 /* locals */
775 vnode_t *convp = cp->c_ovp;
776 int error;
777
778 MARK_ENTRY(CODA_FSYNC_STATS);
779
780 /* Check for fsync on an unmounting object */
781 /* The NetBSD kernel, in it's infinite wisdom, can try to fsync
782 * after an unmount has been initiated. This is a Bad Thing,
783 * which we have to avoid. Not a legitimate failure for stats.
784 */
785 if (IS_UNMOUNTING(cp)) {
786 return(ENODEV);
787 }
788
789 /* Check for fsync of control object. */
790 if (IS_CTL_VP(vp)) {
791 MARK_INT_SAT(CODA_FSYNC_STATS);
792 return(0);
793 }
794
795 if (convp)
796 VOP_FSYNC(convp, cred, MNT_WAIT, 0, 0);
797
798 /*
799 * We can expect fsync on any vnode at all if venus is pruging it.
800 * Venus can't very well answer the fsync request, now can it?
801 * Hopefully, it won't have to, because hopefully, venus preserves
802 * the (possibly untrue) invariant that it never purges an open
803 * vnode. Hopefully.
804 */
805 if (cp->c_flags & C_PURGING) {
806 return(0);
807 }
808
809 error = venus_fsync(vtomi(vp), &cp->c_fid, cred, curlwp);
810
811 CODADEBUG(CODA_FSYNC, myprintf(("in fsync result %d\n",error)); )
812 return(error);
813 }
814
815 /*
816 * vp is locked on entry, and we must unlock it.
817 * XXX This routine is suspect and probably needs rewriting.
818 */
819 int
820 coda_inactive(void *v)
821 {
822 /* true args */
823 struct vop_inactive_args *ap = v;
824 vnode_t *vp = ap->a_vp;
825 struct cnode *cp = VTOC(vp);
826 kauth_cred_t cred __unused = NULL;
827
828 /* We don't need to send inactive to venus - DCS */
829 MARK_ENTRY(CODA_INACTIVE_STATS);
830
831 if (IS_CTL_VP(vp)) {
832 MARK_INT_SAT(CODA_INACTIVE_STATS);
833 return 0;
834 }
835
836 CODADEBUG(CODA_INACTIVE, myprintf(("in inactive, %s, vfsp %p\n",
837 coda_f2s(&cp->c_fid), vp->v_mount));)
838
839 /* If an array has been allocated to hold the symlink, deallocate it */
840 if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) {
841 if (cp->c_symlink == NULL)
842 panic("%s: null symlink pointer in cnode", __func__);
843
844 CODA_FREE(cp->c_symlink, cp->c_symlen);
845 cp->c_flags &= ~C_SYMLINK;
846 cp->c_symlen = 0;
847 }
848
849 /* Remove it from the table so it can't be found. */
850 coda_unsave(cp);
851 if (vp->v_mount->mnt_data == NULL) {
852 myprintf(("Help! vfsp->vfs_data was NULL, but vnode %p wasn't dying\n", vp));
853 panic("badness in coda_inactive");
854 }
855
856 #ifdef CODA_VERBOSE
857 /* Sanity checks that perhaps should be panic. */
858 if (vp->v_usecount > 1)
859 printf("%s: %p usecount %d\n", __func__, vp, vp->v_usecount);
860 if (cp->c_ovp != NULL)
861 printf("%s: %p ovp != NULL\n", __func__, vp);
862 #endif
863 /* XXX Do we need to VOP_CLOSE container vnodes? */
864 VOP_UNLOCK(vp);
865 if (!IS_UNMOUNTING(cp))
866 *ap->a_recycle = true;
867
868 MARK_INT_SAT(CODA_INACTIVE_STATS);
869 return(0);
870 }
871
872 /*
873 * Coda does not use the normal namecache, but a private version.
874 * Consider how to use the standard facility instead.
875 */
876 int
877 coda_lookup(void *v)
878 {
879 /* true args */
880 struct vop_lookup_args *ap = v;
881 /* (locked) vnode of dir in which to do lookup */
882 vnode_t *dvp = ap->a_dvp;
883 struct cnode *dcp = VTOC(dvp);
884 /* output variable for result */
885 vnode_t **vpp = ap->a_vpp;
886 /* name to lookup */
887 struct componentname *cnp = ap->a_cnp;
888 kauth_cred_t cred = cnp->cn_cred;
889 struct lwp *l = curlwp;
890 /* locals */
891 struct cnode *cp;
892 const char *nm = cnp->cn_nameptr;
893 int len = cnp->cn_namelen;
894 int flags = cnp->cn_flags;
895 int isdot;
896 CodaFid VFid;
897 int vtype;
898 int error = 0;
899
900 MARK_ENTRY(CODA_LOOKUP_STATS);
901
902 CODADEBUG(CODA_LOOKUP, myprintf(("%s: %s in %s\n", __func__,
903 nm, coda_f2s(&dcp->c_fid)));)
904
905 /*
906 * XXX componentname flags in MODMASK are not handled at all
907 */
908
909 /*
910 * The overall strategy is to switch on the lookup type and get a
911 * result vnode that is vref'd but not locked. Then, the code at
912 * exit: switches on ., .., and regular lookups and does the right
913 * locking.
914 */
915
916 /* Check for lookup of control object. */
917 if (IS_CTL_NAME(dvp, nm, len)) {
918 *vpp = coda_ctlvp;
919 vref(*vpp);
920 MARK_INT_SAT(CODA_LOOKUP_STATS);
921 goto exit;
922 }
923
924 /* Avoid trying to hand venus an unreasonably long name. */
925 if (len+1 > CODA_MAXNAMLEN) {
926 MARK_INT_FAIL(CODA_LOOKUP_STATS);
927 CODADEBUG(CODA_LOOKUP, myprintf(("%s: name too long:, %s (%s)\n",
928 __func__, coda_f2s(&dcp->c_fid), nm));)
929 *vpp = (vnode_t *)0;
930 error = EINVAL;
931 goto exit;
932 }
933
934 /*
935 * XXX Check for DOT lookups, and short circuit all the caches,
936 * just doing an extra vref. (venus guarantees that lookup of
937 * . returns self.)
938 */
939 isdot = (len == 1 && nm[0] == '.');
940
941 /*
942 * Try to resolve the lookup in the minicache. If that fails, ask
943 * venus to do the lookup. XXX The interaction between vnode
944 * locking and any locking that coda does is not clear.
945 */
946 cp = coda_nc_lookup(dcp, nm, len, cred);
947 if (cp) {
948 *vpp = CTOV(cp);
949 vref(*vpp);
950 CODADEBUG(CODA_LOOKUP,
951 myprintf(("lookup result %d vpp %p\n",error,*vpp));)
952 } else {
953 /* The name wasn't cached, so ask Venus. */
954 error = venus_lookup(vtomi(dvp), &dcp->c_fid, nm, len, cred, l, &VFid,
955 &vtype);
956
957 if (error) {
958 MARK_INT_FAIL(CODA_LOOKUP_STATS);
959 CODADEBUG(CODA_LOOKUP, myprintf(("%s: lookup error on %s (%s)%d\n",
960 __func__, coda_f2s(&dcp->c_fid), nm, error));)
961 *vpp = (vnode_t *)0;
962 } else {
963 MARK_INT_SAT(CODA_LOOKUP_STATS);
964 CODADEBUG(CODA_LOOKUP, myprintf(("%s: %s type %o result %d\n",
965 __func__, coda_f2s(&VFid), vtype, error)); )
966
967 cp = make_coda_node(&VFid, dvp->v_mount, vtype);
968 *vpp = CTOV(cp);
969 /* vpp is now vrefed. */
970
971 /*
972 * Unless this vnode is marked CODA_NOCACHE, enter it into
973 * the coda name cache to avoid a future venus round-trip.
974 * XXX Interaction with componentname NOCACHE is unclear.
975 */
976 if (!(vtype & CODA_NOCACHE))
977 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
978 }
979 }
980
981 exit:
982 /*
983 * If we are creating, and this was the last name to be looked up,
984 * and the error was ENOENT, then make the leaf NULL and return
985 * success.
986 * XXX Check against new lookup rules.
987 */
988 if (((cnp->cn_nameiop == CREATE) || (cnp->cn_nameiop == RENAME))
989 && (cnp->cn_flags & ISLASTCN)
990 && (error == ENOENT))
991 {
992 error = EJUSTRETURN;
993 *ap->a_vpp = NULL;
994 }
995
996 /*
997 * If the lookup succeeded, we must generally lock the returned
998 * vnode. This could be a ., .., or normal lookup. See
999 * vnodeops(9) for the details.
1000 */
1001 /*
1002 * XXX LK_RETRY is likely incorrect. Handle vn_lock failure
1003 * somehow, and remove LK_RETRY.
1004 */
1005 if (!error || (error == EJUSTRETURN)) {
1006 /* Lookup has a value and it isn't "."? */
1007 if (*ap->a_vpp && (*ap->a_vpp != dvp)) {
1008 if (flags & ISDOTDOT)
1009 /* ..: unlock parent */
1010 VOP_UNLOCK(dvp);
1011 /* all but .: lock child */
1012 vn_lock(*ap->a_vpp, LK_EXCLUSIVE | LK_RETRY);
1013 if (flags & ISDOTDOT)
1014 /* ..: relock parent */
1015 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
1016 }
1017 /* else .: leave dvp locked */
1018 } else {
1019 /* The lookup failed, so return NULL. Leave dvp locked. */
1020 *ap->a_vpp = NULL;
1021 }
1022 return(error);
1023 }
1024
1025 /*ARGSUSED*/
1026 int
1027 coda_create(void *v)
1028 {
1029 /* true args */
1030 struct vop_create_args *ap = v;
1031 vnode_t *dvp = ap->a_dvp;
1032 struct cnode *dcp = VTOC(dvp);
1033 struct vattr *va = ap->a_vap;
1034 int exclusive = 1;
1035 int mode = ap->a_vap->va_mode;
1036 vnode_t **vpp = ap->a_vpp;
1037 struct componentname *cnp = ap->a_cnp;
1038 kauth_cred_t cred = cnp->cn_cred;
1039 struct lwp *l = curlwp;
1040 /* locals */
1041 int error;
1042 struct cnode *cp;
1043 const char *nm = cnp->cn_nameptr;
1044 int len = cnp->cn_namelen;
1045 CodaFid VFid;
1046 struct vattr attr;
1047
1048 MARK_ENTRY(CODA_CREATE_STATS);
1049
1050 /* All creates are exclusive XXX */
1051 /* I'm assuming the 'mode' argument is the file mode bits XXX */
1052
1053 /* Check for create of control object. */
1054 if (IS_CTL_NAME(dvp, nm, len)) {
1055 *vpp = (vnode_t *)0;
1056 MARK_INT_FAIL(CODA_CREATE_STATS);
1057 return(EACCES);
1058 }
1059
1060 error = venus_create(vtomi(dvp), &dcp->c_fid, nm, len, exclusive, mode, va, cred, l, &VFid, &attr);
1061
1062 if (!error) {
1063
1064 /*
1065 * XXX Violation of venus/kernel invariants is a difficult case,
1066 * but venus should not be able to cause a panic.
1067 */
1068 /* If this is an exclusive create, panic if the file already exists. */
1069 /* Venus should have detected the file and reported EEXIST. */
1070
1071 if ((exclusive == 1) &&
1072 (coda_find(&VFid) != NULL))
1073 panic("cnode existed for newly created file!");
1074
1075 cp = make_coda_node(&VFid, dvp->v_mount, attr.va_type);
1076 *vpp = CTOV(cp);
1077
1078 /* XXX vnodeops doesn't say this argument can be changed. */
1079 /* Update va to reflect the new attributes. */
1080 (*va) = attr;
1081
1082 /* Update the attribute cache and mark it as valid */
1083 if (coda_attr_cache) {
1084 VTOC(*vpp)->c_vattr = attr;
1085 VTOC(*vpp)->c_flags |= C_VATTR;
1086 }
1087
1088 /* Invalidate parent's attr cache (modification time has changed). */
1089 VTOC(dvp)->c_flags &= ~C_VATTR;
1090
1091 /* enter the new vnode in the Name Cache */
1092 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
1093
1094 CODADEBUG(CODA_CREATE, myprintf(("%s: %s, result %d\n", __func__,
1095 coda_f2s(&VFid), error)); )
1096 } else {
1097 *vpp = (vnode_t *)0;
1098 CODADEBUG(CODA_CREATE, myprintf(("%s: create error %d\n", __func__,
1099 error));)
1100 }
1101
1102 /*
1103 * vnodeops(9) says that we must unlock the parent and lock the child.
1104 * XXX Should we lock the child first?
1105 */
1106 vput(dvp);
1107 if (!error) {
1108 #ifdef CODA_VERBOSE
1109 if ((cnp->cn_flags & LOCKLEAF) == 0)
1110 /* This should not happen; flags are for lookup only. */
1111 printf("%s: LOCKLEAF not set!\n", __func__);
1112
1113 if ((error = vn_lock(*ap->a_vpp, LK_EXCLUSIVE)))
1114 /* XXX Perhaps avoid this panic. */
1115 panic("%s: couldn't lock child", __func__);
1116 #endif
1117 }
1118
1119 return(error);
1120 }
1121
1122 int
1123 coda_remove(void *v)
1124 {
1125 /* true args */
1126 struct vop_remove_args *ap = v;
1127 vnode_t *dvp = ap->a_dvp;
1128 struct cnode *cp = VTOC(dvp);
1129 vnode_t *vp = ap->a_vp;
1130 struct componentname *cnp = ap->a_cnp;
1131 kauth_cred_t cred = cnp->cn_cred;
1132 struct lwp *l = curlwp;
1133 /* locals */
1134 int error;
1135 const char *nm = cnp->cn_nameptr;
1136 int len = cnp->cn_namelen;
1137 struct cnode *tp;
1138
1139 MARK_ENTRY(CODA_REMOVE_STATS);
1140
1141 CODADEBUG(CODA_REMOVE, myprintf(("%s: %s in %s\n", __func__,
1142 nm, coda_f2s(&cp->c_fid)));)
1143
1144 /* Remove the file's entry from the CODA Name Cache */
1145 /* We're being conservative here, it might be that this person
1146 * doesn't really have sufficient access to delete the file
1147 * but we feel zapping the entry won't really hurt anyone -- dcs
1148 */
1149 /* I'm gonna go out on a limb here. If a file and a hardlink to it
1150 * exist, and one is removed, the link count on the other will be
1151 * off by 1. We could either invalidate the attrs if cached, or
1152 * fix them. I'll try to fix them. DCS 11/8/94
1153 */
1154 tp = coda_nc_lookup(VTOC(dvp), nm, len, cred);
1155 if (tp) {
1156 if (VALID_VATTR(tp)) { /* If attrs are cached */
1157 if (tp->c_vattr.va_nlink > 1) { /* If it's a hard link */
1158 tp->c_vattr.va_nlink--;
1159 }
1160 }
1161
1162 coda_nc_zapfile(VTOC(dvp), nm, len);
1163 /* No need to flush it if it doesn't exist! */
1164 }
1165 /* Invalidate the parent's attr cache, the modification time has changed */
1166 VTOC(dvp)->c_flags &= ~C_VATTR;
1167
1168 /* Check for remove of control object. */
1169 if (IS_CTL_NAME(dvp, nm, len)) {
1170 MARK_INT_FAIL(CODA_REMOVE_STATS);
1171 return(ENOENT);
1172 }
1173
1174 error = venus_remove(vtomi(dvp), &cp->c_fid, nm, len, cred, l);
1175
1176 CODADEBUG(CODA_REMOVE, myprintf(("in remove result %d\n",error)); )
1177
1178 /*
1179 * Unlock parent and child (avoiding double if ".").
1180 */
1181 if (dvp == vp) {
1182 vrele(vp);
1183 } else {
1184 vput(vp);
1185 }
1186 vput(dvp);
1187
1188 return(error);
1189 }
1190
1191 /*
1192 * dvp is the directory where the link is to go, and is locked.
1193 * vp is the object to be linked to, and is unlocked.
1194 * At exit, we must unlock dvp, and vput dvp.
1195 */
1196 int
1197 coda_link(void *v)
1198 {
1199 /* true args */
1200 struct vop_link_args *ap = v;
1201 vnode_t *vp = ap->a_vp;
1202 struct cnode *cp = VTOC(vp);
1203 vnode_t *dvp = ap->a_dvp;
1204 struct cnode *dcp = VTOC(dvp);
1205 struct componentname *cnp = ap->a_cnp;
1206 kauth_cred_t cred = cnp->cn_cred;
1207 struct lwp *l = curlwp;
1208 /* locals */
1209 int error;
1210 const char *nm = cnp->cn_nameptr;
1211 int len = cnp->cn_namelen;
1212
1213 MARK_ENTRY(CODA_LINK_STATS);
1214
1215 if (codadebug & CODADBGMSK(CODA_LINK)) {
1216
1217 myprintf(("%s: vp fid: %s\n", __func__, coda_f2s(&cp->c_fid)));
1218 myprintf(("%s: dvp fid: %s)\n", __func__, coda_f2s(&dcp->c_fid)));
1219
1220 }
1221 if (codadebug & CODADBGMSK(CODA_LINK)) {
1222 myprintf(("%s: vp fid: %s\n", __func__, coda_f2s(&cp->c_fid)));
1223 myprintf(("%s: dvp fid: %s\n", __func__, coda_f2s(&dcp->c_fid)));
1224
1225 }
1226
1227 /* Check for link to/from control object. */
1228 if (IS_CTL_NAME(dvp, nm, len) || IS_CTL_VP(vp)) {
1229 MARK_INT_FAIL(CODA_LINK_STATS);
1230 return(EACCES);
1231 }
1232
1233 /* If linking . to a name, error out earlier. */
1234 if (vp == dvp) {
1235 #ifdef CODA_VERBOSE
1236 printf("%s coda_link vp==dvp\n", __func__);
1237 #endif
1238 error = EISDIR;
1239 goto exit;
1240 }
1241
1242 /* XXX Why does venus_link need the vnode to be locked?*/
1243 if ((error = vn_lock(vp, LK_EXCLUSIVE)) != 0) {
1244 #ifdef CODA_VERBOSE
1245 printf("%s: couldn't lock vnode %p\n", __func__, vp);
1246 #endif
1247 error = EFAULT; /* XXX better value */
1248 goto exit;
1249 }
1250 error = venus_link(vtomi(vp), &cp->c_fid, &dcp->c_fid, nm, len, cred, l);
1251 VOP_UNLOCK(vp);
1252
1253 /* Invalidate parent's attr cache (the modification time has changed). */
1254 VTOC(dvp)->c_flags &= ~C_VATTR;
1255 /* Invalidate child's attr cache (XXX why). */
1256 VTOC(vp)->c_flags &= ~C_VATTR;
1257
1258 CODADEBUG(CODA_LINK, myprintf(("in link result %d\n",error)); )
1259
1260 exit:
1261 vput(dvp);
1262 return(error);
1263 }
1264
1265 int
1266 coda_rename(void *v)
1267 {
1268 /* true args */
1269 struct vop_rename_args *ap = v;
1270 vnode_t *odvp = ap->a_fdvp;
1271 struct cnode *odcp = VTOC(odvp);
1272 struct componentname *fcnp = ap->a_fcnp;
1273 vnode_t *ndvp = ap->a_tdvp;
1274 struct cnode *ndcp = VTOC(ndvp);
1275 struct componentname *tcnp = ap->a_tcnp;
1276 kauth_cred_t cred = fcnp->cn_cred;
1277 struct lwp *l = curlwp;
1278 /* true args */
1279 int error;
1280 const char *fnm = fcnp->cn_nameptr;
1281 int flen = fcnp->cn_namelen;
1282 const char *tnm = tcnp->cn_nameptr;
1283 int tlen = tcnp->cn_namelen;
1284
1285 MARK_ENTRY(CODA_RENAME_STATS);
1286
1287 /* Hmmm. The vnodes are already looked up. Perhaps they are locked?
1288 This could be Bad. XXX */
1289 #ifdef OLD_DIAGNOSTIC
1290 if ((fcnp->cn_cred != tcnp->cn_cred)
1291 || (fcnp->cn_lwp != tcnp->cn_lwp))
1292 {
1293 panic("%s: component names don't agree", __func__);
1294 }
1295 #endif
1296
1297 /* Check for rename involving control object. */
1298 if (IS_CTL_NAME(odvp, fnm, flen) || IS_CTL_NAME(ndvp, tnm, tlen)) {
1299 MARK_INT_FAIL(CODA_RENAME_STATS);
1300 return(EACCES);
1301 }
1302
1303 /* Problem with moving directories -- need to flush entry for .. */
1304 if (odvp != ndvp) {
1305 struct cnode *ovcp = coda_nc_lookup(VTOC(odvp), fnm, flen, cred);
1306 if (ovcp) {
1307 vnode_t *ovp = CTOV(ovcp);
1308 if ((ovp) &&
1309 (ovp->v_type == VDIR)) /* If it's a directory */
1310 coda_nc_zapfile(VTOC(ovp),"..", 2);
1311 }
1312 }
1313
1314 /* Remove the entries for both source and target files */
1315 coda_nc_zapfile(VTOC(odvp), fnm, flen);
1316 coda_nc_zapfile(VTOC(ndvp), tnm, tlen);
1317
1318 /* Invalidate the parent's attr cache, the modification time has changed */
1319 VTOC(odvp)->c_flags &= ~C_VATTR;
1320 VTOC(ndvp)->c_flags &= ~C_VATTR;
1321
1322 if (flen+1 > CODA_MAXNAMLEN) {
1323 MARK_INT_FAIL(CODA_RENAME_STATS);
1324 error = EINVAL;
1325 goto exit;
1326 }
1327
1328 if (tlen+1 > CODA_MAXNAMLEN) {
1329 MARK_INT_FAIL(CODA_RENAME_STATS);
1330 error = EINVAL;
1331 goto exit;
1332 }
1333
1334 error = venus_rename(vtomi(odvp), &odcp->c_fid, &ndcp->c_fid, fnm, flen, tnm, tlen, cred, l);
1335
1336 exit:
1337 CODADEBUG(CODA_RENAME, myprintf(("in rename result %d\n",error));)
1338 /* XXX - do we need to call cache pureg on the moved vnode? */
1339 cache_purge(ap->a_fvp);
1340
1341 /* It seems to be incumbent on us to drop locks on all four vnodes */
1342 /* From-vnodes are not locked, only ref'd. To-vnodes are locked. */
1343
1344 vrele(ap->a_fvp);
1345 vrele(odvp);
1346
1347 if (ap->a_tvp) {
1348 if (ap->a_tvp == ndvp) {
1349 vrele(ap->a_tvp);
1350 } else {
1351 vput(ap->a_tvp);
1352 }
1353 }
1354
1355 vput(ndvp);
1356 return(error);
1357 }
1358
1359 int
1360 coda_mkdir(void *v)
1361 {
1362 /* true args */
1363 struct vop_mkdir_args *ap = v;
1364 vnode_t *dvp = ap->a_dvp;
1365 struct cnode *dcp = VTOC(dvp);
1366 struct componentname *cnp = ap->a_cnp;
1367 struct vattr *va = ap->a_vap;
1368 vnode_t **vpp = ap->a_vpp;
1369 kauth_cred_t cred = cnp->cn_cred;
1370 struct lwp *l = curlwp;
1371 /* locals */
1372 int error;
1373 const char *nm = cnp->cn_nameptr;
1374 int len = cnp->cn_namelen;
1375 struct cnode *cp;
1376 CodaFid VFid;
1377 struct vattr ova;
1378
1379 MARK_ENTRY(CODA_MKDIR_STATS);
1380
1381 /* Check for mkdir of target object. */
1382 if (IS_CTL_NAME(dvp, nm, len)) {
1383 *vpp = (vnode_t *)0;
1384 MARK_INT_FAIL(CODA_MKDIR_STATS);
1385 return(EACCES);
1386 }
1387
1388 if (len+1 > CODA_MAXNAMLEN) {
1389 *vpp = (vnode_t *)0;
1390 MARK_INT_FAIL(CODA_MKDIR_STATS);
1391 return(EACCES);
1392 }
1393
1394 error = venus_mkdir(vtomi(dvp), &dcp->c_fid, nm, len, va, cred, l, &VFid, &ova);
1395
1396 if (!error) {
1397 if (coda_find(&VFid) != NULL)
1398 panic("cnode existed for newly created directory!");
1399
1400
1401 cp = make_coda_node(&VFid, dvp->v_mount, va->va_type);
1402 *vpp = CTOV(cp);
1403
1404 /* enter the new vnode in the Name Cache */
1405 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
1406
1407 /* as a side effect, enter "." and ".." for the directory */
1408 coda_nc_enter(VTOC(*vpp), ".", 1, cred, VTOC(*vpp));
1409 coda_nc_enter(VTOC(*vpp), "..", 2, cred, VTOC(dvp));
1410
1411 if (coda_attr_cache) {
1412 VTOC(*vpp)->c_vattr = ova; /* update the attr cache */
1413 VTOC(*vpp)->c_flags |= C_VATTR; /* Valid attributes in cnode */
1414 }
1415
1416 /* Invalidate the parent's attr cache, the modification time has changed */
1417 VTOC(dvp)->c_flags &= ~C_VATTR;
1418
1419 CODADEBUG( CODA_MKDIR, myprintf(("%s: %s result %d\n", __func__,
1420 coda_f2s(&VFid), error)); )
1421 } else {
1422 *vpp = (vnode_t *)0;
1423 CODADEBUG(CODA_MKDIR, myprintf(("%s error %d\n", __func__, error));)
1424 }
1425
1426 /*
1427 * Currently, all mkdirs explicitly vput their dvp's.
1428 * It also appears that we *must* lock the vpp, since
1429 * lockleaf isn't set, but someone down the road is going
1430 * to try to unlock the new directory.
1431 */
1432 vput(dvp);
1433 if (!error) {
1434 if ((error = vn_lock(*ap->a_vpp, LK_EXCLUSIVE))) {
1435 panic("%s: couldn't lock child", __func__);
1436 }
1437 }
1438
1439 return(error);
1440 }
1441
1442 int
1443 coda_rmdir(void *v)
1444 {
1445 /* true args */
1446 struct vop_rmdir_args *ap = v;
1447 vnode_t *dvp = ap->a_dvp;
1448 struct cnode *dcp = VTOC(dvp);
1449 vnode_t *vp = ap->a_vp;
1450 struct componentname *cnp = ap->a_cnp;
1451 kauth_cred_t cred = cnp->cn_cred;
1452 struct lwp *l = curlwp;
1453 /* true args */
1454 int error;
1455 const char *nm = cnp->cn_nameptr;
1456 int len = cnp->cn_namelen;
1457 struct cnode *cp;
1458
1459 MARK_ENTRY(CODA_RMDIR_STATS);
1460
1461 /* Check for rmdir of control object. */
1462 if (IS_CTL_NAME(dvp, nm, len)) {
1463 MARK_INT_FAIL(CODA_RMDIR_STATS);
1464 return(ENOENT);
1465 }
1466
1467 /* Can't remove . in self. */
1468 if (dvp == vp) {
1469 #ifdef CODA_VERBOSE
1470 printf("%s: dvp == vp\n", __func__);
1471 #endif
1472 error = EINVAL;
1473 goto exit;
1474 }
1475
1476 /*
1477 * The caller may not have adequate permissions, and the venus
1478 * operation may fail, but it doesn't hurt from a correctness
1479 * viewpoint to invalidate cache entries.
1480 * XXX Why isn't this done after the venus_rmdir call?
1481 */
1482 /* Look up child in name cache (by name, from parent). */
1483 cp = coda_nc_lookup(dcp, nm, len, cred);
1484 /* If found, remove all children of the child (., ..). */
1485 if (cp) coda_nc_zapParentfid(&(cp->c_fid), NOT_DOWNCALL);
1486
1487 /* Remove child's own entry. */
1488 coda_nc_zapfile(dcp, nm, len);
1489
1490 /* Invalidate parent's attr cache (the modification time has changed). */
1491 dcp->c_flags &= ~C_VATTR;
1492
1493 error = venus_rmdir(vtomi(dvp), &dcp->c_fid, nm, len, cred, l);
1494
1495 CODADEBUG(CODA_RMDIR, myprintf(("in rmdir result %d\n", error)); )
1496
1497 exit:
1498 /* vput both vnodes */
1499 vput(dvp);
1500 if (dvp == vp) {
1501 vrele(vp);
1502 } else {
1503 vput(vp);
1504 }
1505
1506 return(error);
1507 }
1508
1509 int
1510 coda_symlink(void *v)
1511 {
1512 /* true args */
1513 struct vop_symlink_args *ap = v;
1514 vnode_t *dvp = ap->a_dvp;
1515 struct cnode *dcp = VTOC(dvp);
1516 /* a_vpp is used in place below */
1517 struct componentname *cnp = ap->a_cnp;
1518 struct vattr *tva = ap->a_vap;
1519 char *path = ap->a_target;
1520 kauth_cred_t cred = cnp->cn_cred;
1521 struct lwp *l = curlwp;
1522 /* locals */
1523 int error;
1524 u_long saved_cn_flags;
1525 const char *nm = cnp->cn_nameptr;
1526 int len = cnp->cn_namelen;
1527 int plen = strlen(path);
1528
1529 /*
1530 * Here's the strategy for the moment: perform the symlink, then
1531 * do a lookup to grab the resulting vnode. I know this requires
1532 * two communications with Venus for a new sybolic link, but
1533 * that's the way the ball bounces. I don't yet want to change
1534 * the way the Mach symlink works. When Mach support is
1535 * deprecated, we should change symlink so that the common case
1536 * returns the resultant vnode in a vpp argument.
1537 */
1538
1539 MARK_ENTRY(CODA_SYMLINK_STATS);
1540
1541 /* Check for symlink of control object. */
1542 if (IS_CTL_NAME(dvp, nm, len)) {
1543 MARK_INT_FAIL(CODA_SYMLINK_STATS);
1544 error = EACCES;
1545 goto exit;
1546 }
1547
1548 if (plen+1 > CODA_MAXPATHLEN) {
1549 MARK_INT_FAIL(CODA_SYMLINK_STATS);
1550 error = EINVAL;
1551 goto exit;
1552 }
1553
1554 if (len+1 > CODA_MAXNAMLEN) {
1555 MARK_INT_FAIL(CODA_SYMLINK_STATS);
1556 error = EINVAL;
1557 goto exit;
1558 }
1559
1560 error = venus_symlink(vtomi(dvp), &dcp->c_fid, path, plen, nm, len, tva, cred, l);
1561
1562 /* Invalidate the parent's attr cache (modification time has changed). */
1563 dcp->c_flags &= ~C_VATTR;
1564
1565 if (!error) {
1566 /*
1567 * VOP_SYMLINK is not defined to pay attention to cnp->cn_flags;
1568 * these are defined only for VOP_LOOKUP. We desire to reuse
1569 * cnp for a VOP_LOOKUP operation, and must be sure to not pass
1570 * stray flags passed to us. Such stray flags can occur because
1571 * sys_symlink makes a namei call and then reuses the
1572 * componentname structure.
1573 */
1574 /*
1575 * XXX Arguably we should create our own componentname structure
1576 * and not reuse the one that was passed in.
1577 */
1578 saved_cn_flags = cnp->cn_flags;
1579 cnp->cn_flags &= ~(MODMASK | OPMASK);
1580 cnp->cn_flags |= LOOKUP;
1581 error = VOP_LOOKUP(dvp, ap->a_vpp, cnp);
1582 cnp->cn_flags = saved_cn_flags;
1583 /* Either an error occurs, or ap->a_vpp is locked. */
1584 }
1585
1586 exit:
1587 /* unlock and deference parent */
1588 vput(dvp);
1589
1590 CODADEBUG(CODA_SYMLINK, myprintf(("in symlink result %d\n",error)); )
1591 return(error);
1592 }
1593
1594 /*
1595 * Read directory entries.
1596 */
1597 int
1598 coda_readdir(void *v)
1599 {
1600 /* true args */
1601 struct vop_readdir_args *ap = v;
1602 vnode_t *vp = ap->a_vp;
1603 struct cnode *cp = VTOC(vp);
1604 struct uio *uiop = ap->a_uio;
1605 kauth_cred_t cred = ap->a_cred;
1606 int *eofflag = ap->a_eofflag;
1607 off_t **cookies = ap->a_cookies;
1608 int *ncookies = ap->a_ncookies;
1609 /* upcall decl */
1610 /* locals */
1611 int error = 0;
1612
1613 MARK_ENTRY(CODA_READDIR_STATS);
1614
1615 CODADEBUG(CODA_READDIR, myprintf(("%s: (%p, %lu, %lld)\n", __func__,
1616 uiop->uio_iov->iov_base, (unsigned long) uiop->uio_resid,
1617 (long long) uiop->uio_offset)); )
1618
1619 /* Check for readdir of control object. */
1620 if (IS_CTL_VP(vp)) {
1621 MARK_INT_FAIL(CODA_READDIR_STATS);
1622 return(ENOENT);
1623 }
1624
1625 {
1626 /* Redirect the request to UFS. */
1627
1628 /* If directory is not already open do an "internal open" on it. */
1629 int opened_internally = 0;
1630 if (cp->c_ovp == NULL) {
1631 opened_internally = 1;
1632 MARK_INT_GEN(CODA_OPEN_STATS);
1633 error = VOP_OPEN(vp, FREAD, cred);
1634 #ifdef CODA_VERBOSE
1635 printf("%s: Internally Opening %p\n", __func__, vp);
1636 #endif
1637 if (error) return(error);
1638 } else
1639 vp = cp->c_ovp;
1640
1641 /* Have UFS handle the call. */
1642 CODADEBUG(CODA_READDIR, myprintf(("%s: fid = %s, refcnt = %d\n",
1643 __func__, coda_f2s(&cp->c_fid), vp->v_usecount)); )
1644 error = VOP_READDIR(vp, uiop, cred, eofflag, cookies, ncookies);
1645 if (error)
1646 MARK_INT_FAIL(CODA_READDIR_STATS);
1647 else
1648 MARK_INT_SAT(CODA_READDIR_STATS);
1649
1650 /* Do an "internal close" if necessary. */
1651 if (opened_internally) {
1652 MARK_INT_GEN(CODA_CLOSE_STATS);
1653 (void)VOP_CLOSE(vp, FREAD, cred);
1654 }
1655 }
1656
1657 return(error);
1658 }
1659
1660 /*
1661 * Convert from file system blocks to device blocks
1662 */
1663 int
1664 coda_bmap(void *v)
1665 {
1666 /* XXX on the global proc */
1667 /* true args */
1668 struct vop_bmap_args *ap = v;
1669 vnode_t *vp __unused = ap->a_vp; /* file's vnode */
1670 daddr_t bn __unused = ap->a_bn; /* fs block number */
1671 vnode_t **vpp = ap->a_vpp; /* RETURN vp of device */
1672 daddr_t *bnp __unused = ap->a_bnp; /* RETURN device block number */
1673 struct lwp *l __unused = curlwp;
1674 /* upcall decl */
1675 /* locals */
1676
1677 *vpp = (vnode_t *)0;
1678 myprintf(("coda_bmap called!\n"));
1679 return(EINVAL);
1680 }
1681
1682 /*
1683 * I don't think the following two things are used anywhere, so I've
1684 * commented them out
1685 *
1686 * struct buf *async_bufhead;
1687 * int async_daemon_count;
1688 */
1689 int
1690 coda_strategy(void *v)
1691 {
1692 /* true args */
1693 struct vop_strategy_args *ap = v;
1694 struct buf *bp __unused = ap->a_bp;
1695 struct lwp *l __unused = curlwp;
1696 /* upcall decl */
1697 /* locals */
1698
1699 myprintf(("coda_strategy called! "));
1700 return(EINVAL);
1701 }
1702
1703 int
1704 coda_reclaim(void *v)
1705 {
1706 /* true args */
1707 struct vop_reclaim_args *ap = v;
1708 vnode_t *vp = ap->a_vp;
1709 struct cnode *cp = VTOC(vp);
1710 /* upcall decl */
1711 /* locals */
1712
1713 /*
1714 * Forced unmount/flush will let vnodes with non zero use be destroyed!
1715 */
1716 ENTRY;
1717
1718 if (IS_UNMOUNTING(cp)) {
1719 #ifdef DEBUG
1720 if (VTOC(vp)->c_ovp) {
1721 if (IS_UNMOUNTING(cp))
1722 printf("%s: c_ovp not void: vp %p, cp %p\n", __func__, vp, cp);
1723 }
1724 #endif
1725 } else {
1726 #ifdef OLD_DIAGNOSTIC
1727 if (vp->v_usecount != 0)
1728 print("%s: pushing active %p\n", __func__, vp);
1729 if (VTOC(vp)->c_ovp) {
1730 panic("%s: c_ovp not void", __func__);
1731 }
1732 #endif
1733 }
1734 coda_free(VTOC(vp));
1735 SET_VTOC(vp) = NULL;
1736 return (0);
1737 }
1738
1739 int
1740 coda_lock(void *v)
1741 {
1742 /* true args */
1743 struct vop_lock_args *ap = v;
1744 vnode_t *vp = ap->a_vp;
1745 struct cnode *cp = VTOC(vp);
1746 /* upcall decl */
1747 /* locals */
1748
1749 ENTRY;
1750
1751 if (coda_lockdebug) {
1752 myprintf(("Attempting lock on %s\n",
1753 coda_f2s(&cp->c_fid)));
1754 }
1755
1756 return genfs_lock(v);
1757 }
1758
1759 int
1760 coda_unlock(void *v)
1761 {
1762 /* true args */
1763 struct vop_unlock_args *ap = v;
1764 vnode_t *vp = ap->a_vp;
1765 struct cnode *cp = VTOC(vp);
1766 /* upcall decl */
1767 /* locals */
1768
1769 ENTRY;
1770 if (coda_lockdebug) {
1771 myprintf(("Attempting unlock on %s\n",
1772 coda_f2s(&cp->c_fid)));
1773 }
1774
1775 return genfs_unlock(v);
1776 }
1777
1778 int
1779 coda_islocked(void *v)
1780 {
1781 /* true args */
1782 ENTRY;
1783
1784 return genfs_islocked(v);
1785 }
1786
1787 /*
1788 * Given a device and inode, obtain a locked vnode. One reference is
1789 * obtained and passed back to the caller.
1790 */
1791 int
1792 coda_grab_vnode(vnode_t *uvp, dev_t dev, ino_t ino, vnode_t **vpp)
1793 {
1794 int error;
1795 struct mount *mp;
1796
1797 /* Obtain mount point structure from device. */
1798 if (!(mp = devtomp(dev))) {
1799 myprintf(("%s: devtomp(0x%llx) returns NULL\n", __func__,
1800 (unsigned long long)dev));
1801 return(ENXIO);
1802 }
1803
1804 /*
1805 * Obtain vnode from mount point and inode.
1806 * XXX VFS_VGET does not clearly define locked/referenced state of
1807 * returned vnode.
1808 */
1809 error = VFS_VGET(mp, ino, vpp);
1810 if (error) {
1811 myprintf(("%s: iget/vget(0x%llx, %llu) returns %p, err %d\n", __func__,
1812 (unsigned long long)dev, (unsigned long long)ino, *vpp, error));
1813 return(ENOENT);
1814 }
1815 /* share the underlying vnode lock with the coda vnode */
1816 mutex_obj_hold((*vpp)->v_interlock);
1817 uvm_obj_setlock(&uvp->v_uobj, (*vpp)->v_interlock);
1818 return(0);
1819 }
1820
1821 void
1822 print_vattr(struct vattr *attr)
1823 {
1824 const char *typestr;
1825
1826 switch (attr->va_type) {
1827 case VNON:
1828 typestr = "VNON";
1829 break;
1830 case VREG:
1831 typestr = "VREG";
1832 break;
1833 case VDIR:
1834 typestr = "VDIR";
1835 break;
1836 case VBLK:
1837 typestr = "VBLK";
1838 break;
1839 case VCHR:
1840 typestr = "VCHR";
1841 break;
1842 case VLNK:
1843 typestr = "VLNK";
1844 break;
1845 case VSOCK:
1846 typestr = "VSCK";
1847 break;
1848 case VFIFO:
1849 typestr = "VFFO";
1850 break;
1851 case VBAD:
1852 typestr = "VBAD";
1853 break;
1854 default:
1855 typestr = "????";
1856 break;
1857 }
1858
1859
1860 myprintf(("attr: type %s mode %d uid %d gid %d fsid %d rdev %d\n",
1861 typestr, (int)attr->va_mode, (int)attr->va_uid,
1862 (int)attr->va_gid, (int)attr->va_fsid, (int)attr->va_rdev));
1863
1864 myprintf((" fileid %d nlink %d size %d blocksize %d bytes %d\n",
1865 (int)attr->va_fileid, (int)attr->va_nlink,
1866 (int)attr->va_size,
1867 (int)attr->va_blocksize,(int)attr->va_bytes));
1868 myprintf((" gen %ld flags %ld vaflags %d\n",
1869 attr->va_gen, attr->va_flags, attr->va_vaflags));
1870 myprintf((" atime sec %d nsec %d\n",
1871 (int)attr->va_atime.tv_sec, (int)attr->va_atime.tv_nsec));
1872 myprintf((" mtime sec %d nsec %d\n",
1873 (int)attr->va_mtime.tv_sec, (int)attr->va_mtime.tv_nsec));
1874 myprintf((" ctime sec %d nsec %d\n",
1875 (int)attr->va_ctime.tv_sec, (int)attr->va_ctime.tv_nsec));
1876 }
1877
1878 /* How to print a ucred */
1879 void
1880 print_cred(kauth_cred_t cred)
1881 {
1882
1883 uint16_t ngroups;
1884 int i;
1885
1886 myprintf(("ref %d\tuid %d\n", kauth_cred_getrefcnt(cred),
1887 kauth_cred_geteuid(cred)));
1888
1889 ngroups = kauth_cred_ngroups(cred);
1890 for (i=0; i < ngroups; i++)
1891 myprintf(("\tgroup %d: (%d)\n", i, kauth_cred_group(cred, i)));
1892 myprintf(("\n"));
1893
1894 }
1895
1896 /*
1897 * Return a vnode for the given fid.
1898 * If no cnode exists for this fid create one and put it
1899 * in a table hashed by coda_f2i(). If the cnode for
1900 * this fid is already in the table return it (ref count is
1901 * incremented by coda_find. The cnode will be flushed from the
1902 * table when coda_inactive calls coda_unsave.
1903 */
1904 struct cnode *
1905 make_coda_node(CodaFid *fid, struct mount *fvsp, short type)
1906 {
1907 struct cnode *cp;
1908 int error;
1909
1910 if ((cp = coda_find(fid)) == NULL) {
1911 vnode_t *vp;
1912
1913 cp = coda_alloc();
1914 cp->c_fid = *fid;
1915
1916 error = getnewvnode(VT_CODA, fvsp, coda_vnodeop_p, NULL, &vp);
1917 if (error) {
1918 panic("%s: getnewvnode returned error %d", __func__, error);
1919 }
1920 vp->v_data = cp;
1921 vp->v_type = type;
1922 cp->c_vnode = vp;
1923 uvm_vnp_setsize(vp, 0);
1924 coda_save(cp);
1925
1926 } else {
1927 vref(CTOV(cp));
1928 }
1929
1930 return cp;
1931 }
1932
1933 /*
1934 * coda_getpages may be called on a vnode which has not been opened,
1935 * e.g. to fault in pages to execute a program. In that case, we must
1936 * open the file to get the container. The vnode may or may not be
1937 * locked, and we must leave it in the same state.
1938 */
1939 int
1940 coda_getpages(void *v)
1941 {
1942 struct vop_getpages_args /* {
1943 vnode_t *a_vp;
1944 voff_t a_offset;
1945 struct vm_page **a_m;
1946 int *a_count;
1947 int a_centeridx;
1948 vm_prot_t a_access_type;
1949 int a_advice;
1950 int a_flags;
1951 } */ *ap = v;
1952 vnode_t *vp = ap->a_vp, *cvp;
1953 struct cnode *cp = VTOC(vp);
1954 struct lwp *l = curlwp;
1955 kauth_cred_t cred = l->l_cred;
1956 int error, cerror;
1957 int waslocked; /* 1 if vnode lock was held on entry */
1958 int didopen = 0; /* 1 if we opened container file */
1959
1960 /*
1961 * Handle a case that uvm_fault doesn't quite use yet.
1962 * See layer_vnops.c. for inspiration.
1963 */
1964 if (ap->a_flags & PGO_LOCKED) {
1965 return EBUSY;
1966 }
1967
1968 KASSERT(mutex_owned(vp->v_interlock));
1969
1970 /* Check for control object. */
1971 if (IS_CTL_VP(vp)) {
1972 #ifdef CODA_VERBOSE
1973 printf("%s: control object %p\n", __func__, vp);
1974 #endif
1975 return(EINVAL);
1976 }
1977
1978 /*
1979 * XXX It's really not ok to be releasing the lock we get,
1980 * because we could be overlapping with another call to
1981 * getpages and drop a lock they are relying on. We need to
1982 * figure out whether getpages ever is called holding the
1983 * lock, and if we should serialize getpages calls by some
1984 * mechanism.
1985 */
1986 /* XXX VOP_ISLOCKED() may not be used for lock decisions. */
1987 waslocked = VOP_ISLOCKED(vp);
1988
1989 /* Get container file if not already present. */
1990 cvp = cp->c_ovp;
1991 if (cvp == NULL) {
1992 /*
1993 * VOP_OPEN requires a locked vnode. We must avoid
1994 * locking the vnode if it is already locked, and
1995 * leave it in the same state on exit.
1996 */
1997 if (waslocked == 0) {
1998 mutex_exit(vp->v_interlock);
1999 cerror = vn_lock(vp, LK_EXCLUSIVE);
2000 if (cerror) {
2001 #ifdef CODA_VERBOSE
2002 printf("%s: can't lock vnode %p\n",
2003 __func__, vp);
2004 #endif
2005 return cerror;
2006 }
2007 #ifdef CODA_VERBOSE
2008 printf("%s: locked vnode %p\n", __func__, vp);
2009 #endif
2010 }
2011
2012 /*
2013 * Open file (causes upcall to venus).
2014 * XXX Perhaps we should not fully open the file, but
2015 * simply obtain a container file.
2016 */
2017 /* XXX Is it ok to do this while holding the simplelock? */
2018 cerror = VOP_OPEN(vp, FREAD, cred);
2019
2020 if (cerror) {
2021 #ifdef CODA_VERBOSE
2022 printf("%s: cannot open vnode %p => %d\n", __func__,
2023 vp, cerror);
2024 #endif
2025 if (waslocked == 0)
2026 VOP_UNLOCK(vp);
2027 return cerror;
2028 }
2029
2030 #ifdef CODA_VERBOSE
2031 printf("%s: opened vnode %p\n", __func__, vp);
2032 #endif
2033 cvp = cp->c_ovp;
2034 didopen = 1;
2035 if (waslocked == 0)
2036 mutex_enter(vp->v_interlock);
2037 }
2038 KASSERT(cvp != NULL);
2039
2040 /* Munge the arg structure to refer to the container vnode. */
2041 KASSERT(cvp->v_interlock == vp->v_interlock);
2042 ap->a_vp = cp->c_ovp;
2043
2044 /* Finally, call getpages on it. */
2045 error = VCALL(ap->a_vp, VOFFSET(vop_getpages), ap);
2046
2047 /* If we opened the vnode, we must close it. */
2048 if (didopen) {
2049 /*
2050 * VOP_CLOSE requires a locked vnode, but we are still
2051 * holding the lock (or riding a caller's lock).
2052 */
2053 cerror = VOP_CLOSE(vp, FREAD, cred);
2054 #ifdef CODA_VERBOSE
2055 if (cerror != 0)
2056 /* XXX How should we handle this? */
2057 printf("%s: closed vnode %p -> %d\n", __func__,
2058 vp, cerror);
2059 #endif
2060
2061 /* If we obtained a lock, drop it. */
2062 if (waslocked == 0)
2063 VOP_UNLOCK(vp);
2064 }
2065
2066 return error;
2067 }
2068
2069 /*
2070 * The protocol requires v_interlock to be held by the caller.
2071 */
2072 int
2073 coda_putpages(void *v)
2074 {
2075 struct vop_putpages_args /* {
2076 vnode_t *a_vp;
2077 voff_t a_offlo;
2078 voff_t a_offhi;
2079 int a_flags;
2080 } */ *ap = v;
2081 vnode_t *vp = ap->a_vp, *cvp;
2082 struct cnode *cp = VTOC(vp);
2083 int error;
2084
2085 KASSERT(mutex_owned(vp->v_interlock));
2086
2087 /* Check for control object. */
2088 if (IS_CTL_VP(vp)) {
2089 mutex_exit(vp->v_interlock);
2090 #ifdef CODA_VERBOSE
2091 printf("%s: control object %p\n", __func__, vp);
2092 #endif
2093 return(EINVAL);
2094 }
2095
2096 /*
2097 * If container object is not present, then there are no pages
2098 * to put; just return without error. This happens all the
2099 * time, apparently during discard of a closed vnode (which
2100 * trivially can't have dirty pages).
2101 */
2102 cvp = cp->c_ovp;
2103 if (cvp == NULL) {
2104 mutex_exit(vp->v_interlock);
2105 return 0;
2106 }
2107
2108 /* Munge the arg structure to refer to the container vnode. */
2109 KASSERT(cvp->v_interlock == vp->v_interlock);
2110 ap->a_vp = cvp;
2111
2112 /* Finally, call putpages on it. */
2113 error = VCALL(ap->a_vp, VOFFSET(vop_putpages), ap);
2114
2115 return error;
2116 }
2117