coda_vnops.c revision 1.68.12.2 1 /* $NetBSD: coda_vnops.c,v 1.68.12.2 2009/07/18 14:52:57 yamt Exp $ */
2
3 /*
4 *
5 * Coda: an Experimental Distributed File System
6 * Release 3.1
7 *
8 * Copyright (c) 1987-1998 Carnegie Mellon University
9 * All Rights Reserved
10 *
11 * Permission to use, copy, modify and distribute this software and its
12 * documentation is hereby granted, provided that both the copyright
13 * notice and this permission notice appear in all copies of the
14 * software, derivative works or modified versions, and any portions
15 * thereof, and that both notices appear in supporting documentation, and
16 * that credit is given to Carnegie Mellon University in all documents
17 * and publicity pertaining to direct or indirect use of this code or its
18 * derivatives.
19 *
20 * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS,
21 * SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS
22 * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON
23 * DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
24 * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF
25 * ANY DERIVATIVE WORK.
26 *
27 * Carnegie Mellon encourages users of this software to return any
28 * improvements or extensions that they make, and to grant Carnegie
29 * Mellon the rights to redistribute these changes without encumbrance.
30 *
31 * @(#) coda/coda_vnops.c,v 1.1.1.1 1998/08/29 21:26:46 rvb Exp $
32 */
33
34 /*
35 * Mach Operating System
36 * Copyright (c) 1990 Carnegie-Mellon University
37 * Copyright (c) 1989 Carnegie-Mellon University
38 * All rights reserved. The CMU software License Agreement specifies
39 * the terms and conditions for use and redistribution.
40 */
41
42 /*
43 * This code was written for the Coda file system at Carnegie Mellon
44 * University. Contributers include David Steere, James Kistler, and
45 * M. Satyanarayanan.
46 */
47
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: coda_vnops.c,v 1.68.12.2 2009/07/18 14:52:57 yamt Exp $");
50
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/malloc.h>
54 #include <sys/errno.h>
55 #include <sys/acct.h>
56 #include <sys/file.h>
57 #include <sys/uio.h>
58 #include <sys/namei.h>
59 #include <sys/ioctl.h>
60 #include <sys/mount.h>
61 #include <sys/proc.h>
62 #include <sys/select.h>
63 #include <sys/user.h>
64 #include <sys/vnode.h>
65 #include <sys/kauth.h>
66
67 #include <miscfs/genfs/genfs.h>
68
69 #include <coda/coda.h>
70 #include <coda/cnode.h>
71 #include <coda/coda_vnops.h>
72 #include <coda/coda_venus.h>
73 #include <coda/coda_opstats.h>
74 #include <coda/coda_subr.h>
75 #include <coda/coda_namecache.h>
76 #include <coda/coda_pioctl.h>
77
78 /*
79 * These flags select various performance enhancements.
80 */
81 int coda_attr_cache = 1; /* Set to cache attributes in the kernel */
82 int coda_symlink_cache = 1; /* Set to cache symbolic link information */
83 int coda_access_cache = 1; /* Set to handle some access checks directly */
84
85 /* structure to keep track of vfs calls */
86
87 struct coda_op_stats coda_vnodeopstats[CODA_VNODEOPS_SIZE];
88
89 #define MARK_ENTRY(op) (coda_vnodeopstats[op].entries++)
90 #define MARK_INT_SAT(op) (coda_vnodeopstats[op].sat_intrn++)
91 #define MARK_INT_FAIL(op) (coda_vnodeopstats[op].unsat_intrn++)
92 #define MARK_INT_GEN(op) (coda_vnodeopstats[op].gen_intrn++)
93
94 /* What we are delaying for in printf */
95 int coda_printf_delay = 0; /* in microseconds */
96 int coda_vnop_print_entry = 0;
97 static int coda_lockdebug = 0;
98
99 #define ENTRY if(coda_vnop_print_entry) myprintf(("Entered %s\n",__func__))
100
101 /* Definition of the vnode operation vector */
102
103 const struct vnodeopv_entry_desc coda_vnodeop_entries[] = {
104 { &vop_default_desc, coda_vop_error },
105 { &vop_lookup_desc, coda_lookup }, /* lookup */
106 { &vop_create_desc, coda_create }, /* create */
107 { &vop_mknod_desc, coda_vop_error }, /* mknod */
108 { &vop_open_desc, coda_open }, /* open */
109 { &vop_close_desc, coda_close }, /* close */
110 { &vop_access_desc, coda_access }, /* access */
111 { &vop_getattr_desc, coda_getattr }, /* getattr */
112 { &vop_setattr_desc, coda_setattr }, /* setattr */
113 { &vop_read_desc, coda_read }, /* read */
114 { &vop_write_desc, coda_write }, /* write */
115 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */
116 { &vop_ioctl_desc, coda_ioctl }, /* ioctl */
117 { &vop_mmap_desc, genfs_mmap }, /* mmap */
118 { &vop_fsync_desc, coda_fsync }, /* fsync */
119 { &vop_remove_desc, coda_remove }, /* remove */
120 { &vop_link_desc, coda_link }, /* link */
121 { &vop_rename_desc, coda_rename }, /* rename */
122 { &vop_mkdir_desc, coda_mkdir }, /* mkdir */
123 { &vop_rmdir_desc, coda_rmdir }, /* rmdir */
124 { &vop_symlink_desc, coda_symlink }, /* symlink */
125 { &vop_readdir_desc, coda_readdir }, /* readdir */
126 { &vop_readlink_desc, coda_readlink }, /* readlink */
127 { &vop_abortop_desc, coda_abortop }, /* abortop */
128 { &vop_inactive_desc, coda_inactive }, /* inactive */
129 { &vop_reclaim_desc, coda_reclaim }, /* reclaim */
130 { &vop_lock_desc, coda_lock }, /* lock */
131 { &vop_unlock_desc, coda_unlock }, /* unlock */
132 { &vop_bmap_desc, coda_bmap }, /* bmap */
133 { &vop_strategy_desc, coda_strategy }, /* strategy */
134 { &vop_print_desc, coda_vop_error }, /* print */
135 { &vop_islocked_desc, coda_islocked }, /* islocked */
136 { &vop_pathconf_desc, coda_vop_error }, /* pathconf */
137 { &vop_advlock_desc, coda_vop_nop }, /* advlock */
138 { &vop_bwrite_desc, coda_vop_error }, /* bwrite */
139 { &vop_seek_desc, genfs_seek }, /* seek */
140 { &vop_poll_desc, genfs_poll }, /* poll */
141 { &vop_getpages_desc, coda_getpages }, /* getpages */
142 { &vop_putpages_desc, coda_putpages }, /* putpages */
143 { NULL, NULL }
144 };
145
146 const struct vnodeopv_desc coda_vnodeop_opv_desc =
147 { &coda_vnodeop_p, coda_vnodeop_entries };
148
149 /* Definitions of NetBSD vnodeop interfaces */
150
151 /*
152 * A generic error routine. Return EIO without looking at arguments.
153 */
154 int
155 coda_vop_error(void *anon) {
156 struct vnodeop_desc **desc = (struct vnodeop_desc **)anon;
157
158 if (codadebug) {
159 myprintf(("coda_vop_error: Vnode operation %s called (error).\n",
160 (*desc)->vdesc_name));
161 }
162
163 return EIO;
164 }
165
166 /* A generic do-nothing. */
167 int
168 coda_vop_nop(void *anon) {
169 struct vnodeop_desc **desc = (struct vnodeop_desc **)anon;
170
171 if (codadebug) {
172 myprintf(("Vnode operation %s called, but unsupported\n",
173 (*desc)->vdesc_name));
174 }
175 return (0);
176 }
177
178 int
179 coda_vnodeopstats_init(void)
180 {
181 int i;
182
183 for(i=0;i<CODA_VNODEOPS_SIZE;i++) {
184 coda_vnodeopstats[i].opcode = i;
185 coda_vnodeopstats[i].entries = 0;
186 coda_vnodeopstats[i].sat_intrn = 0;
187 coda_vnodeopstats[i].unsat_intrn = 0;
188 coda_vnodeopstats[i].gen_intrn = 0;
189 }
190
191 return 0;
192 }
193
194 /*
195 * XXX The entire relationship between VOP_OPEN and having a container
196 * file (via venus_open) needs to be reexamined. In particular, it's
197 * valid to open/mmap/close and then reference. Instead of doing
198 * VOP_OPEN when getpages needs a container, we should do the
199 * venus_open part, and record that the vnode has opened the container
200 * for getpages, and do the matching logical close on coda_inactive.
201 * Further, coda_rdwr needs a container file, and sometimes needs to
202 * do the equivalent of open (core dumps).
203 */
204 /*
205 * coda_open calls Venus to return the device and inode of the
206 * container file, and then obtains a vnode for that file. The
207 * container vnode is stored in the coda vnode, and a reference is
208 * added for each open file.
209 */
210 int
211 coda_open(void *v)
212 {
213 /*
214 * NetBSD can pass the O_EXCL flag in mode, even though the check
215 * has already happened. Venus defensively assumes that if open
216 * is passed the EXCL, it must be a bug. We strip the flag here.
217 */
218 /* true args */
219 struct vop_open_args *ap = v;
220 struct vnode *vp = ap->a_vp;
221 struct cnode *cp = VTOC(vp);
222 int flag = ap->a_mode & (~O_EXCL);
223 kauth_cred_t cred = ap->a_cred;
224 /* locals */
225 int error;
226 dev_t dev; /* container file device, inode, vnode */
227 ino_t inode;
228 struct vnode *container_vp;
229
230 MARK_ENTRY(CODA_OPEN_STATS);
231
232 /* Check for open of control file. */
233 if (IS_CTL_VP(vp)) {
234 /* if (WRITABLE(flag)) */
235 if (flag & (FWRITE | O_TRUNC | O_CREAT | O_EXCL)) {
236 MARK_INT_FAIL(CODA_OPEN_STATS);
237 return(EACCES);
238 }
239 MARK_INT_SAT(CODA_OPEN_STATS);
240 return(0);
241 }
242
243 error = venus_open(vtomi(vp), &cp->c_fid, flag, cred, curlwp, &dev, &inode);
244 if (error)
245 return (error);
246 if (!error) {
247 CODADEBUG(CODA_OPEN,
248 myprintf(("open: dev 0x%llx inode %llu result %d\n",
249 (unsigned long long)dev, (unsigned long long)inode, error));)
250 }
251
252 /*
253 * Obtain locked and referenced container vnode from container
254 * device/inode.
255 */
256 error = coda_grab_vnode(dev, inode, &container_vp);
257 if (error)
258 return (error);
259
260 /* Save the vnode pointer for the container file. */
261 if (cp->c_ovp == NULL) {
262 cp->c_ovp = container_vp;
263 } else {
264 if (cp->c_ovp != container_vp)
265 /*
266 * Perhaps venus returned a different container, or
267 * something else went wrong.
268 */
269 panic("coda_open: cp->c_ovp != container_vp");
270 }
271 cp->c_ocount++;
272
273 /* Flush the attribute cache if writing the file. */
274 if (flag & FWRITE) {
275 cp->c_owrite++;
276 cp->c_flags &= ~C_VATTR;
277 }
278
279 /*
280 * Save the <device, inode> pair for the container file to speed
281 * up subsequent reads while closed (mmap, program execution).
282 * This is perhaps safe because venus will invalidate the node
283 * before changing the container file mapping.
284 */
285 cp->c_device = dev;
286 cp->c_inode = inode;
287
288 /* Open the container file. */
289 error = VOP_OPEN(container_vp, flag, cred);
290 /*
291 * Drop the lock on the container, after we have done VOP_OPEN
292 * (which requires a locked vnode).
293 */
294 VOP_UNLOCK(container_vp, 0);
295 return(error);
296 }
297
298 /*
299 * Close the cache file used for I/O and notify Venus.
300 */
301 int
302 coda_close(void *v)
303 {
304 /* true args */
305 struct vop_close_args *ap = v;
306 struct vnode *vp = ap->a_vp;
307 struct cnode *cp = VTOC(vp);
308 int flag = ap->a_fflag;
309 kauth_cred_t cred = ap->a_cred;
310 /* locals */
311 int error;
312
313 MARK_ENTRY(CODA_CLOSE_STATS);
314
315 /* Check for close of control file. */
316 if (IS_CTL_VP(vp)) {
317 MARK_INT_SAT(CODA_CLOSE_STATS);
318 return(0);
319 }
320
321 /*
322 * XXX The IS_UNMOUNTING part of this is very suspect.
323 */
324 if (IS_UNMOUNTING(cp)) {
325 if (cp->c_ovp) {
326 #ifdef CODA_VERBOSE
327 printf("coda_close: destroying container ref %d, ufs vp %p of vp %p/cp %p\n",
328 vp->v_usecount, cp->c_ovp, vp, cp);
329 #endif
330 #ifdef hmm
331 vgone(cp->c_ovp);
332 #else
333 vn_lock(cp->c_ovp, LK_EXCLUSIVE | LK_RETRY);
334 VOP_CLOSE(cp->c_ovp, flag, cred); /* Do errors matter here? */
335 vput(cp->c_ovp);
336 #endif
337 } else {
338 #ifdef CODA_VERBOSE
339 printf("coda_close: NO container vp %p/cp %p\n", vp, cp);
340 #endif
341 }
342 return ENODEV;
343 }
344
345 /* Lock the container node, and VOP_CLOSE it. */
346 vn_lock(cp->c_ovp, LK_EXCLUSIVE | LK_RETRY);
347 VOP_CLOSE(cp->c_ovp, flag, cred); /* Do errors matter here? */
348 /*
349 * Drop the lock we just obtained, and vrele the container vnode.
350 * Decrement reference counts, and clear container vnode pointer on
351 * last close.
352 */
353 vput(cp->c_ovp);
354 if (flag & FWRITE)
355 --cp->c_owrite;
356 if (--cp->c_ocount == 0)
357 cp->c_ovp = NULL;
358
359 error = venus_close(vtomi(vp), &cp->c_fid, flag, cred, curlwp);
360
361 CODADEBUG(CODA_CLOSE, myprintf(("close: result %d\n",error)); )
362 return(error);
363 }
364
365 int
366 coda_read(void *v)
367 {
368 struct vop_read_args *ap = v;
369
370 ENTRY;
371 return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_READ,
372 ap->a_ioflag, ap->a_cred, curlwp));
373 }
374
375 int
376 coda_write(void *v)
377 {
378 struct vop_write_args *ap = v;
379
380 ENTRY;
381 return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_WRITE,
382 ap->a_ioflag, ap->a_cred, curlwp));
383 }
384
385 int
386 coda_rdwr(struct vnode *vp, struct uio *uiop, enum uio_rw rw, int ioflag,
387 kauth_cred_t cred, struct lwp *l)
388 {
389 /* upcall decl */
390 /* NOTE: container file operation!!! */
391 /* locals */
392 struct cnode *cp = VTOC(vp);
393 struct vnode *cfvp = cp->c_ovp;
394 struct proc *p = l->l_proc;
395 int opened_internally = 0;
396 int error = 0;
397
398 MARK_ENTRY(CODA_RDWR_STATS);
399
400 CODADEBUG(CODA_RDWR, myprintf(("coda_rdwr(%d, %p, %lu, %lld)\n", rw,
401 uiop->uio_iov->iov_base,
402 (unsigned long) uiop->uio_resid,
403 (long long) uiop->uio_offset)); )
404
405 /* Check for rdwr of control object. */
406 if (IS_CTL_VP(vp)) {
407 MARK_INT_FAIL(CODA_RDWR_STATS);
408 return(EINVAL);
409 }
410
411 /* Redirect the request to UFS. */
412
413 /*
414 * If file is not already open this must be a page
415 * {read,write} request. Iget the cache file's inode
416 * pointer if we still have its <device, inode> pair.
417 * Otherwise, we must do an internal open to derive the
418 * pair.
419 * XXX Integrate this into a coherent strategy for container
420 * file acquisition.
421 */
422 if (cfvp == NULL) {
423 /*
424 * If we're dumping core, do the internal open. Otherwise
425 * venus won't have the correct size of the core when
426 * it's completely written.
427 */
428 if (cp->c_inode != 0 && !(p && (p->p_acflag & ACORE))) {
429 printf("coda_rdwr: grabbing container vnode, losing reference\n");
430 /* Get locked and refed vnode. */
431 error = coda_grab_vnode(cp->c_device, cp->c_inode, &cfvp);
432 if (error) {
433 MARK_INT_FAIL(CODA_RDWR_STATS);
434 return(error);
435 }
436 /*
437 * Drop lock.
438 * XXX Where is reference released.
439 */
440 VOP_UNLOCK(cfvp, 0);
441 }
442 else {
443 printf("coda_rdwr: internal VOP_OPEN\n");
444 opened_internally = 1;
445 MARK_INT_GEN(CODA_OPEN_STATS);
446 error = VOP_OPEN(vp, (rw == UIO_READ ? FREAD : FWRITE), cred);
447 #ifdef CODA_VERBOSE
448 printf("coda_rdwr: Internally Opening %p\n", vp);
449 #endif
450 if (error) {
451 MARK_INT_FAIL(CODA_RDWR_STATS);
452 return(error);
453 }
454 cfvp = cp->c_ovp;
455 }
456 }
457
458 /* Have UFS handle the call. */
459 CODADEBUG(CODA_RDWR, myprintf(("indirect rdwr: fid = %s, refcnt = %d\n",
460 coda_f2s(&cp->c_fid), CTOV(cp)->v_usecount)); )
461
462 if (rw == UIO_READ) {
463 error = VOP_READ(cfvp, uiop, ioflag, cred);
464 } else {
465 error = VOP_WRITE(cfvp, uiop, ioflag, cred);
466 }
467
468 if (error)
469 MARK_INT_FAIL(CODA_RDWR_STATS);
470 else
471 MARK_INT_SAT(CODA_RDWR_STATS);
472
473 /* Do an internal close if necessary. */
474 if (opened_internally) {
475 MARK_INT_GEN(CODA_CLOSE_STATS);
476 (void)VOP_CLOSE(vp, (rw == UIO_READ ? FREAD : FWRITE), cred);
477 }
478
479 /* Invalidate cached attributes if writing. */
480 if (rw == UIO_WRITE)
481 cp->c_flags &= ~C_VATTR;
482 return(error);
483 }
484
485 int
486 coda_ioctl(void *v)
487 {
488 /* true args */
489 struct vop_ioctl_args *ap = v;
490 struct vnode *vp = ap->a_vp;
491 int com = ap->a_command;
492 void *data = ap->a_data;
493 int flag = ap->a_fflag;
494 kauth_cred_t cred = ap->a_cred;
495 /* locals */
496 int error;
497 struct vnode *tvp;
498 struct PioctlData *iap = (struct PioctlData *)data;
499 namei_simple_flags_t sflags;
500
501 MARK_ENTRY(CODA_IOCTL_STATS);
502
503 CODADEBUG(CODA_IOCTL, myprintf(("in coda_ioctl on %s\n", iap->path));)
504
505 /* Don't check for operation on a dying object, for ctlvp it
506 shouldn't matter */
507
508 /* Must be control object to succeed. */
509 if (!IS_CTL_VP(vp)) {
510 MARK_INT_FAIL(CODA_IOCTL_STATS);
511 CODADEBUG(CODA_IOCTL, myprintf(("coda_ioctl error: vp != ctlvp"));)
512 return (EOPNOTSUPP);
513 }
514 /* Look up the pathname. */
515
516 /* Should we use the name cache here? It would get it from
517 lookupname sooner or later anyway, right? */
518
519 sflags = iap->follow ? NSM_FOLLOW_NOEMULROOT : NSM_NOFOLLOW_NOEMULROOT;
520 error = namei_simple_user(iap->path, sflags, &tvp);
521
522 if (error) {
523 MARK_INT_FAIL(CODA_IOCTL_STATS);
524 CODADEBUG(CODA_IOCTL, myprintf(("coda_ioctl error: lookup returns %d\n",
525 error));)
526 return(error);
527 }
528
529 /*
530 * Make sure this is a coda style cnode, but it may be a
531 * different vfsp
532 */
533 /* XXX: this totally violates the comment about vtagtype in vnode.h */
534 if (tvp->v_tag != VT_CODA) {
535 vrele(tvp);
536 MARK_INT_FAIL(CODA_IOCTL_STATS);
537 CODADEBUG(CODA_IOCTL,
538 myprintf(("coda_ioctl error: %s not a coda object\n",
539 iap->path));)
540 return(EINVAL);
541 }
542
543 if (iap->vi.in_size > VC_MAXDATASIZE) {
544 vrele(tvp);
545 return(EINVAL);
546 }
547 error = venus_ioctl(vtomi(tvp), &((VTOC(tvp))->c_fid), com, flag, data,
548 cred, curlwp);
549
550 if (error)
551 MARK_INT_FAIL(CODA_IOCTL_STATS);
552 else
553 CODADEBUG(CODA_IOCTL, myprintf(("Ioctl returns %d \n", error)); )
554
555 vrele(tvp);
556 return(error);
557 }
558
559 /*
560 * To reduce the cost of a user-level venus;we cache attributes in
561 * the kernel. Each cnode has storage allocated for an attribute. If
562 * c_vattr is valid, return a reference to it. Otherwise, get the
563 * attributes from venus and store them in the cnode. There is some
564 * question if this method is a security leak. But I think that in
565 * order to make this call, the user must have done a lookup and
566 * opened the file, and therefore should already have access.
567 */
568 int
569 coda_getattr(void *v)
570 {
571 /* true args */
572 struct vop_getattr_args *ap = v;
573 struct vnode *vp = ap->a_vp;
574 struct cnode *cp = VTOC(vp);
575 struct vattr *vap = ap->a_vap;
576 kauth_cred_t cred = ap->a_cred;
577 /* locals */
578 int error;
579
580 MARK_ENTRY(CODA_GETATTR_STATS);
581
582 /* Check for getattr of control object. */
583 if (IS_CTL_VP(vp)) {
584 MARK_INT_FAIL(CODA_GETATTR_STATS);
585 return(ENOENT);
586 }
587
588 /* Check to see if the attributes have already been cached */
589 if (VALID_VATTR(cp)) {
590 CODADEBUG(CODA_GETATTR, { myprintf(("attr cache hit: %s\n",
591 coda_f2s(&cp->c_fid)));});
592 CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR))
593 print_vattr(&cp->c_vattr); );
594
595 *vap = cp->c_vattr;
596 MARK_INT_SAT(CODA_GETATTR_STATS);
597 return(0);
598 }
599
600 error = venus_getattr(vtomi(vp), &cp->c_fid, cred, curlwp, vap);
601
602 if (!error) {
603 CODADEBUG(CODA_GETATTR, myprintf(("getattr miss %s: result %d\n",
604 coda_f2s(&cp->c_fid), error)); )
605
606 CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR))
607 print_vattr(vap); );
608
609 /* If not open for write, store attributes in cnode */
610 if ((cp->c_owrite == 0) && (coda_attr_cache)) {
611 cp->c_vattr = *vap;
612 cp->c_flags |= C_VATTR;
613 }
614
615 }
616 return(error);
617 }
618
619 int
620 coda_setattr(void *v)
621 {
622 /* true args */
623 struct vop_setattr_args *ap = v;
624 struct vnode *vp = ap->a_vp;
625 struct cnode *cp = VTOC(vp);
626 struct vattr *vap = ap->a_vap;
627 kauth_cred_t cred = ap->a_cred;
628 /* locals */
629 int error;
630
631 MARK_ENTRY(CODA_SETATTR_STATS);
632
633 /* Check for setattr of control object. */
634 if (IS_CTL_VP(vp)) {
635 MARK_INT_FAIL(CODA_SETATTR_STATS);
636 return(ENOENT);
637 }
638
639 if (codadebug & CODADBGMSK(CODA_SETATTR)) {
640 print_vattr(vap);
641 }
642 error = venus_setattr(vtomi(vp), &cp->c_fid, vap, cred, curlwp);
643
644 if (!error)
645 cp->c_flags &= ~C_VATTR;
646
647 CODADEBUG(CODA_SETATTR, myprintf(("setattr %d\n", error)); )
648 return(error);
649 }
650
651 int
652 coda_access(void *v)
653 {
654 /* true args */
655 struct vop_access_args *ap = v;
656 struct vnode *vp = ap->a_vp;
657 struct cnode *cp = VTOC(vp);
658 int mode = ap->a_mode;
659 kauth_cred_t cred = ap->a_cred;
660 /* locals */
661 int error;
662
663 MARK_ENTRY(CODA_ACCESS_STATS);
664
665 /* Check for access of control object. Only read access is
666 allowed on it. */
667 if (IS_CTL_VP(vp)) {
668 /* bogus hack - all will be marked as successes */
669 MARK_INT_SAT(CODA_ACCESS_STATS);
670 return(((mode & VREAD) && !(mode & (VWRITE | VEXEC)))
671 ? 0 : EACCES);
672 }
673
674 /*
675 * if the file is a directory, and we are checking exec (eg lookup)
676 * access, and the file is in the namecache, then the user must have
677 * lookup access to it.
678 */
679 if (coda_access_cache) {
680 if ((vp->v_type == VDIR) && (mode & VEXEC)) {
681 if (coda_nc_lookup(cp, ".", 1, cred)) {
682 MARK_INT_SAT(CODA_ACCESS_STATS);
683 return(0); /* it was in the cache */
684 }
685 }
686 }
687
688 error = venus_access(vtomi(vp), &cp->c_fid, mode, cred, curlwp);
689
690 return(error);
691 }
692
693 /*
694 * CODA abort op, called after namei() when a CREATE/DELETE isn't actually
695 * done. If a buffer has been saved in anticipation of a coda_create or
696 * a coda_remove, delete it.
697 */
698 /* ARGSUSED */
699 int
700 coda_abortop(void *v)
701 {
702 /* true args */
703 struct vop_abortop_args /* {
704 struct vnode *a_dvp;
705 struct componentname *a_cnp;
706 } */ *ap = v;
707 /* upcall decl */
708 /* locals */
709
710 if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF)
711 PNBUF_PUT(ap->a_cnp->cn_pnbuf);
712 return (0);
713 }
714
715 int
716 coda_readlink(void *v)
717 {
718 /* true args */
719 struct vop_readlink_args *ap = v;
720 struct vnode *vp = ap->a_vp;
721 struct cnode *cp = VTOC(vp);
722 struct uio *uiop = ap->a_uio;
723 kauth_cred_t cred = ap->a_cred;
724 /* locals */
725 struct lwp *l = curlwp;
726 int error;
727 char *str;
728 int len;
729
730 MARK_ENTRY(CODA_READLINK_STATS);
731
732 /* Check for readlink of control object. */
733 if (IS_CTL_VP(vp)) {
734 MARK_INT_FAIL(CODA_READLINK_STATS);
735 return(ENOENT);
736 }
737
738 if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) { /* symlink was cached */
739 uiop->uio_rw = UIO_READ;
740 error = uiomove(cp->c_symlink, (int)cp->c_symlen, uiop);
741 if (error)
742 MARK_INT_FAIL(CODA_READLINK_STATS);
743 else
744 MARK_INT_SAT(CODA_READLINK_STATS);
745 return(error);
746 }
747
748 error = venus_readlink(vtomi(vp), &cp->c_fid, cred, l, &str, &len);
749
750 if (!error) {
751 uiop->uio_rw = UIO_READ;
752 error = uiomove(str, len, uiop);
753
754 if (coda_symlink_cache) {
755 cp->c_symlink = str;
756 cp->c_symlen = len;
757 cp->c_flags |= C_SYMLINK;
758 } else
759 CODA_FREE(str, len);
760 }
761
762 CODADEBUG(CODA_READLINK, myprintf(("in readlink result %d\n",error));)
763 return(error);
764 }
765
766 int
767 coda_fsync(void *v)
768 {
769 /* true args */
770 struct vop_fsync_args *ap = v;
771 struct vnode *vp = ap->a_vp;
772 struct cnode *cp = VTOC(vp);
773 kauth_cred_t cred = ap->a_cred;
774 /* locals */
775 struct vnode *convp = cp->c_ovp;
776 int error;
777
778 MARK_ENTRY(CODA_FSYNC_STATS);
779
780 /* Check for fsync on an unmounting object */
781 /* The NetBSD kernel, in it's infinite wisdom, can try to fsync
782 * after an unmount has been initiated. This is a Bad Thing,
783 * which we have to avoid. Not a legitimate failure for stats.
784 */
785 if (IS_UNMOUNTING(cp)) {
786 return(ENODEV);
787 }
788
789 /* Check for fsync of control object. */
790 if (IS_CTL_VP(vp)) {
791 MARK_INT_SAT(CODA_FSYNC_STATS);
792 return(0);
793 }
794
795 if (convp)
796 VOP_FSYNC(convp, cred, MNT_WAIT, 0, 0);
797
798 /*
799 * We can expect fsync on any vnode at all if venus is pruging it.
800 * Venus can't very well answer the fsync request, now can it?
801 * Hopefully, it won't have to, because hopefully, venus preserves
802 * the (possibly untrue) invariant that it never purges an open
803 * vnode. Hopefully.
804 */
805 if (cp->c_flags & C_PURGING) {
806 return(0);
807 }
808
809 error = venus_fsync(vtomi(vp), &cp->c_fid, cred, curlwp);
810
811 CODADEBUG(CODA_FSYNC, myprintf(("in fsync result %d\n",error)); );
812 return(error);
813 }
814
815 /*
816 * vp is locked on entry, and we must unlock it.
817 * XXX This routine is suspect and probably needs rewriting.
818 */
819 int
820 coda_inactive(void *v)
821 {
822 /* true args */
823 struct vop_inactive_args *ap = v;
824 struct vnode *vp = ap->a_vp;
825 struct cnode *cp = VTOC(vp);
826 kauth_cred_t cred __unused = NULL;
827
828 /* We don't need to send inactive to venus - DCS */
829 MARK_ENTRY(CODA_INACTIVE_STATS);
830
831 if (IS_CTL_VP(vp)) {
832 MARK_INT_SAT(CODA_INACTIVE_STATS);
833 return 0;
834 }
835
836 CODADEBUG(CODA_INACTIVE, myprintf(("in inactive, %s, vfsp %p\n",
837 coda_f2s(&cp->c_fid), vp->v_mount));)
838
839 /* If an array has been allocated to hold the symlink, deallocate it */
840 if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) {
841 if (cp->c_symlink == NULL)
842 panic("coda_inactive: null symlink pointer in cnode");
843
844 CODA_FREE(cp->c_symlink, cp->c_symlen);
845 cp->c_flags &= ~C_SYMLINK;
846 cp->c_symlen = 0;
847 }
848
849 /* Remove it from the table so it can't be found. */
850 coda_unsave(cp);
851 if (vp->v_mount->mnt_data == NULL) {
852 myprintf(("Help! vfsp->vfs_data was NULL, but vnode %p wasn't dying\n", vp));
853 panic("badness in coda_inactive");
854 }
855
856 if (IS_UNMOUNTING(cp)) {
857 /* XXX Do we need to VOP_CLOSE container vnodes? */
858 if (vp->v_usecount > 0)
859 printf("coda_inactive: IS_UNMOUNTING %p usecount %d\n",
860 vp, vp->v_usecount);
861 if (cp->c_ovp != NULL)
862 printf("coda_inactive: %p ovp != NULL\n", vp);
863 VOP_UNLOCK(vp, 0);
864 } else {
865 /* Sanity checks that perhaps should be panic. */
866 if (vp->v_usecount) {
867 printf("coda_inactive: %p usecount %d\n", vp, vp->v_usecount);
868 }
869 if (cp->c_ovp != NULL) {
870 printf("coda_inactive: %p ovp != NULL\n", vp);
871 }
872 VOP_UNLOCK(vp, 0);
873 *ap->a_recycle = true;
874 }
875
876 MARK_INT_SAT(CODA_INACTIVE_STATS);
877 return(0);
878 }
879
880 /*
881 * Coda does not use the normal namecache, but a private version.
882 * Consider how to use the standard facility instead.
883 */
884 int
885 coda_lookup(void *v)
886 {
887 /* true args */
888 struct vop_lookup_args *ap = v;
889 /* (locked) vnode of dir in which to do lookup */
890 struct vnode *dvp = ap->a_dvp;
891 struct cnode *dcp = VTOC(dvp);
892 /* output variable for result */
893 struct vnode **vpp = ap->a_vpp;
894 /* name to lookup */
895 struct componentname *cnp = ap->a_cnp;
896 kauth_cred_t cred = cnp->cn_cred;
897 struct lwp *l = curlwp;
898 /* locals */
899 struct cnode *cp;
900 const char *nm = cnp->cn_nameptr;
901 int len = cnp->cn_namelen;
902 int flags = cnp->cn_flags;
903 int isdot;
904 CodaFid VFid;
905 int vtype;
906 int error = 0;
907
908 MARK_ENTRY(CODA_LOOKUP_STATS);
909
910 CODADEBUG(CODA_LOOKUP, myprintf(("lookup: %s in %s\n",
911 nm, coda_f2s(&dcp->c_fid))););
912
913 /*
914 * XXX componentname flags in MODMASK are not handled at all
915 */
916
917 /*
918 * The overall strategy is to switch on the lookup type and get a
919 * result vnode that is vref'd but not locked. Then, the code at
920 * exit: switches on ., .., and regular lookups and does the right
921 * locking.
922 */
923
924 /* Check for lookup of control object. */
925 if (IS_CTL_NAME(dvp, nm, len)) {
926 *vpp = coda_ctlvp;
927 vref(*vpp);
928 MARK_INT_SAT(CODA_LOOKUP_STATS);
929 goto exit;
930 }
931
932 /* Avoid trying to hand venus an unreasonably long name. */
933 if (len+1 > CODA_MAXNAMLEN) {
934 MARK_INT_FAIL(CODA_LOOKUP_STATS);
935 CODADEBUG(CODA_LOOKUP, myprintf(("name too long: lookup, %s (%s)\n",
936 coda_f2s(&dcp->c_fid), nm)););
937 *vpp = (struct vnode *)0;
938 error = EINVAL;
939 goto exit;
940 }
941
942 /*
943 * XXX Check for DOT lookups, and short circuit all the caches,
944 * just doing an extra vref. (venus guarantees that lookup of
945 * . returns self.)
946 */
947 isdot = (len == 1 && nm[0] == '.');
948
949 /*
950 * Try to resolve the lookup in the minicache. If that fails, ask
951 * venus to do the lookup. XXX The interaction between vnode
952 * locking and any locking that coda does is not clear.
953 */
954 cp = coda_nc_lookup(dcp, nm, len, cred);
955 if (cp) {
956 *vpp = CTOV(cp);
957 vref(*vpp);
958 CODADEBUG(CODA_LOOKUP,
959 myprintf(("lookup result %d vpp %p\n",error,*vpp));)
960 } else {
961 /* The name wasn't cached, so ask Venus. */
962 error = venus_lookup(vtomi(dvp), &dcp->c_fid, nm, len, cred, l, &VFid, &vtype);
963
964 if (error) {
965 MARK_INT_FAIL(CODA_LOOKUP_STATS);
966 CODADEBUG(CODA_LOOKUP, myprintf(("lookup error on %s (%s)%d\n",
967 coda_f2s(&dcp->c_fid), nm, error));)
968 *vpp = (struct vnode *)0;
969 } else {
970 MARK_INT_SAT(CODA_LOOKUP_STATS);
971 CODADEBUG(CODA_LOOKUP,
972 myprintf(("lookup: %s type %o result %d\n",
973 coda_f2s(&VFid), vtype, error)); )
974
975 cp = make_coda_node(&VFid, dvp->v_mount, vtype);
976 *vpp = CTOV(cp);
977 /* vpp is now vrefed. */
978
979 /*
980 * Unless this vnode is marked CODA_NOCACHE, enter it into
981 * the coda name cache to avoid a future venus round-trip.
982 * XXX Interaction with componentname NOCACHE is unclear.
983 */
984 if (!(vtype & CODA_NOCACHE))
985 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
986 }
987 }
988
989 exit:
990 /*
991 * If we are creating, and this was the last name to be looked up,
992 * and the error was ENOENT, then make the leaf NULL and return
993 * success.
994 * XXX Check against new lookup rules.
995 */
996 if (((cnp->cn_nameiop == CREATE) || (cnp->cn_nameiop == RENAME))
997 && (cnp->cn_flags & ISLASTCN)
998 && (error == ENOENT))
999 {
1000 error = EJUSTRETURN;
1001 cnp->cn_flags |= SAVENAME;
1002 *ap->a_vpp = NULL;
1003 }
1004
1005 /*
1006 * If we are removing, and we are at the last element, and we
1007 * found it, then we need to keep the name around so that the
1008 * removal will go ahead as planned.
1009 * XXX Check against new lookup rules.
1010 */
1011 if ((cnp->cn_nameiop == DELETE)
1012 && (cnp->cn_flags & ISLASTCN)
1013 && !error)
1014 {
1015 cnp->cn_flags |= SAVENAME;
1016 }
1017
1018 /*
1019 * If the lookup succeeded, we must generally lock the returned
1020 * vnode. This could be a ., .., or normal lookup. See
1021 * vnodeops(9) for the details.
1022 */
1023 /*
1024 * XXX LK_RETRY is likely incorrect. Handle vn_lock failure
1025 * somehow, and remove LK_RETRY.
1026 */
1027 if (!error || (error == EJUSTRETURN)) {
1028 /* Lookup has a value and it isn't "."? */
1029 if (*ap->a_vpp && (*ap->a_vpp != dvp)) {
1030 if (flags & ISDOTDOT)
1031 /* ..: unlock parent */
1032 VOP_UNLOCK(dvp, 0);
1033 /* all but .: lock child */
1034 vn_lock(*ap->a_vpp, LK_EXCLUSIVE | LK_RETRY);
1035 if (flags & ISDOTDOT)
1036 /* ..: relock parent */
1037 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
1038 }
1039 /* else .: leave dvp locked */
1040 } else {
1041 /* The lookup failed, so return NULL. Leave dvp locked. */
1042 *ap->a_vpp = NULL;
1043 }
1044 return(error);
1045 }
1046
1047 /*ARGSUSED*/
1048 int
1049 coda_create(void *v)
1050 {
1051 /* true args */
1052 struct vop_create_args *ap = v;
1053 struct vnode *dvp = ap->a_dvp;
1054 struct cnode *dcp = VTOC(dvp);
1055 struct vattr *va = ap->a_vap;
1056 int exclusive = 1;
1057 int mode = ap->a_vap->va_mode;
1058 struct vnode **vpp = ap->a_vpp;
1059 struct componentname *cnp = ap->a_cnp;
1060 kauth_cred_t cred = cnp->cn_cred;
1061 struct lwp *l = curlwp;
1062 /* locals */
1063 int error;
1064 struct cnode *cp;
1065 const char *nm = cnp->cn_nameptr;
1066 int len = cnp->cn_namelen;
1067 CodaFid VFid;
1068 struct vattr attr;
1069
1070 MARK_ENTRY(CODA_CREATE_STATS);
1071
1072 /* All creates are exclusive XXX */
1073 /* I'm assuming the 'mode' argument is the file mode bits XXX */
1074
1075 /* Check for create of control object. */
1076 if (IS_CTL_NAME(dvp, nm, len)) {
1077 *vpp = (struct vnode *)0;
1078 MARK_INT_FAIL(CODA_CREATE_STATS);
1079 return(EACCES);
1080 }
1081
1082 error = venus_create(vtomi(dvp), &dcp->c_fid, nm, len, exclusive, mode, va, cred, l, &VFid, &attr);
1083
1084 if (!error) {
1085
1086 /*
1087 * XXX Violation of venus/kernel invariants is a difficult case,
1088 * but venus should not be able to cause a panic.
1089 */
1090 /* If this is an exclusive create, panic if the file already exists. */
1091 /* Venus should have detected the file and reported EEXIST. */
1092
1093 if ((exclusive == 1) &&
1094 (coda_find(&VFid) != NULL))
1095 panic("cnode existed for newly created file!");
1096
1097 cp = make_coda_node(&VFid, dvp->v_mount, attr.va_type);
1098 *vpp = CTOV(cp);
1099
1100 /* XXX vnodeops doesn't say this argument can be changed. */
1101 /* Update va to reflect the new attributes. */
1102 (*va) = attr;
1103
1104 /* Update the attribute cache and mark it as valid */
1105 if (coda_attr_cache) {
1106 VTOC(*vpp)->c_vattr = attr;
1107 VTOC(*vpp)->c_flags |= C_VATTR;
1108 }
1109
1110 /* Invalidate parent's attr cache (modification time has changed). */
1111 VTOC(dvp)->c_flags &= ~C_VATTR;
1112
1113 /* enter the new vnode in the Name Cache */
1114 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
1115
1116 CODADEBUG(CODA_CREATE,
1117 myprintf(("create: %s, result %d\n",
1118 coda_f2s(&VFid), error)); )
1119 } else {
1120 *vpp = (struct vnode *)0;
1121 CODADEBUG(CODA_CREATE, myprintf(("create error %d\n", error));)
1122 }
1123
1124 /*
1125 * vnodeops(9) says that we must unlock the parent and lock the child.
1126 * XXX Should we lock the child first?
1127 */
1128 vput(dvp);
1129 if (!error) {
1130 if ((cnp->cn_flags & LOCKLEAF) == 0) {
1131 /* This should not happen; flags are for lookup only. */
1132 printf("coda_create: LOCKLEAF not set!\n");
1133 }
1134
1135 if ((error = vn_lock(*ap->a_vpp, LK_EXCLUSIVE))) {
1136 /* XXX Perhaps avoid this panic. */
1137 panic("coda_create: couldn't lock child");
1138 }
1139 }
1140
1141 /* Per vnodeops(9), free name except on success and SAVESTART. */
1142 if (error || (cnp->cn_flags & SAVESTART) == 0) {
1143 PNBUF_PUT(cnp->cn_pnbuf);
1144 }
1145 return(error);
1146 }
1147
1148 int
1149 coda_remove(void *v)
1150 {
1151 /* true args */
1152 struct vop_remove_args *ap = v;
1153 struct vnode *dvp = ap->a_dvp;
1154 struct cnode *cp = VTOC(dvp);
1155 struct vnode *vp = ap->a_vp;
1156 struct componentname *cnp = ap->a_cnp;
1157 kauth_cred_t cred = cnp->cn_cred;
1158 struct lwp *l = curlwp;
1159 /* locals */
1160 int error;
1161 const char *nm = cnp->cn_nameptr;
1162 int len = cnp->cn_namelen;
1163 struct cnode *tp;
1164
1165 MARK_ENTRY(CODA_REMOVE_STATS);
1166
1167 CODADEBUG(CODA_REMOVE, myprintf(("remove: %s in %s\n",
1168 nm, coda_f2s(&cp->c_fid))););
1169
1170 /* Remove the file's entry from the CODA Name Cache */
1171 /* We're being conservative here, it might be that this person
1172 * doesn't really have sufficient access to delete the file
1173 * but we feel zapping the entry won't really hurt anyone -- dcs
1174 */
1175 /* I'm gonna go out on a limb here. If a file and a hardlink to it
1176 * exist, and one is removed, the link count on the other will be
1177 * off by 1. We could either invalidate the attrs if cached, or
1178 * fix them. I'll try to fix them. DCS 11/8/94
1179 */
1180 tp = coda_nc_lookup(VTOC(dvp), nm, len, cred);
1181 if (tp) {
1182 if (VALID_VATTR(tp)) { /* If attrs are cached */
1183 if (tp->c_vattr.va_nlink > 1) { /* If it's a hard link */
1184 tp->c_vattr.va_nlink--;
1185 }
1186 }
1187
1188 coda_nc_zapfile(VTOC(dvp), nm, len);
1189 /* No need to flush it if it doesn't exist! */
1190 }
1191 /* Invalidate the parent's attr cache, the modification time has changed */
1192 VTOC(dvp)->c_flags &= ~C_VATTR;
1193
1194 /* Check for remove of control object. */
1195 if (IS_CTL_NAME(dvp, nm, len)) {
1196 MARK_INT_FAIL(CODA_REMOVE_STATS);
1197 return(ENOENT);
1198 }
1199
1200 error = venus_remove(vtomi(dvp), &cp->c_fid, nm, len, cred, l);
1201
1202 CODADEBUG(CODA_REMOVE, myprintf(("in remove result %d\n",error)); )
1203
1204 /*
1205 * Unlock parent and child (avoiding double if ".").
1206 */
1207 if (dvp == vp) {
1208 vrele(vp);
1209 } else {
1210 vput(vp);
1211 }
1212 vput(dvp);
1213
1214 return(error);
1215 }
1216
1217 /*
1218 * dvp is the directory where the link is to go, and is locked.
1219 * vp is the object to be linked to, and is unlocked.
1220 * At exit, we must unlock dvp, and vput dvp.
1221 */
1222 int
1223 coda_link(void *v)
1224 {
1225 /* true args */
1226 struct vop_link_args *ap = v;
1227 struct vnode *vp = ap->a_vp;
1228 struct cnode *cp = VTOC(vp);
1229 struct vnode *dvp = ap->a_dvp;
1230 struct cnode *dcp = VTOC(dvp);
1231 struct componentname *cnp = ap->a_cnp;
1232 kauth_cred_t cred = cnp->cn_cred;
1233 struct lwp *l = curlwp;
1234 /* locals */
1235 int error;
1236 const char *nm = cnp->cn_nameptr;
1237 int len = cnp->cn_namelen;
1238
1239 MARK_ENTRY(CODA_LINK_STATS);
1240
1241 if (codadebug & CODADBGMSK(CODA_LINK)) {
1242
1243 myprintf(("nb_link: vp fid: %s\n",
1244 coda_f2s(&cp->c_fid)));
1245 myprintf(("nb_link: dvp fid: %s)\n",
1246 coda_f2s(&dcp->c_fid)));
1247
1248 }
1249 if (codadebug & CODADBGMSK(CODA_LINK)) {
1250 myprintf(("link: vp fid: %s\n",
1251 coda_f2s(&cp->c_fid)));
1252 myprintf(("link: dvp fid: %s\n",
1253 coda_f2s(&dcp->c_fid)));
1254
1255 }
1256
1257 /* Check for link to/from control object. */
1258 if (IS_CTL_NAME(dvp, nm, len) || IS_CTL_VP(vp)) {
1259 MARK_INT_FAIL(CODA_LINK_STATS);
1260 return(EACCES);
1261 }
1262
1263 /* If linking . to a name, error out earlier. */
1264 if (vp == dvp) {
1265 printf("coda_link vp==dvp\n");
1266 error = EISDIR;
1267 goto exit;
1268 }
1269
1270 /* XXX Why does venus_link need the vnode to be locked?*/
1271 if ((error = vn_lock(vp, LK_EXCLUSIVE)) != 0) {
1272 printf("coda_link: couldn't lock vnode %p\n", vp);
1273 error = EFAULT; /* XXX better value */
1274 goto exit;
1275 }
1276 error = venus_link(vtomi(vp), &cp->c_fid, &dcp->c_fid, nm, len, cred, l);
1277 VOP_UNLOCK(vp, 0);
1278
1279 /* Invalidate parent's attr cache (the modification time has changed). */
1280 VTOC(dvp)->c_flags &= ~C_VATTR;
1281 /* Invalidate child's attr cache (XXX why). */
1282 VTOC(vp)->c_flags &= ~C_VATTR;
1283
1284 CODADEBUG(CODA_LINK, myprintf(("in link result %d\n",error)); )
1285
1286 exit:
1287 vput(dvp);
1288 return(error);
1289 }
1290
1291 int
1292 coda_rename(void *v)
1293 {
1294 /* true args */
1295 struct vop_rename_args *ap = v;
1296 struct vnode *odvp = ap->a_fdvp;
1297 struct cnode *odcp = VTOC(odvp);
1298 struct componentname *fcnp = ap->a_fcnp;
1299 struct vnode *ndvp = ap->a_tdvp;
1300 struct cnode *ndcp = VTOC(ndvp);
1301 struct componentname *tcnp = ap->a_tcnp;
1302 kauth_cred_t cred = fcnp->cn_cred;
1303 struct lwp *l = curlwp;
1304 /* true args */
1305 int error;
1306 const char *fnm = fcnp->cn_nameptr;
1307 int flen = fcnp->cn_namelen;
1308 const char *tnm = tcnp->cn_nameptr;
1309 int tlen = tcnp->cn_namelen;
1310
1311 MARK_ENTRY(CODA_RENAME_STATS);
1312
1313 /* Hmmm. The vnodes are already looked up. Perhaps they are locked?
1314 This could be Bad. XXX */
1315 #ifdef OLD_DIAGNOSTIC
1316 if ((fcnp->cn_cred != tcnp->cn_cred)
1317 || (fcnp->cn_lwp != tcnp->cn_lwp))
1318 {
1319 panic("coda_rename: component names don't agree");
1320 }
1321 #endif
1322
1323 /* Check for rename involving control object. */
1324 if (IS_CTL_NAME(odvp, fnm, flen) || IS_CTL_NAME(ndvp, tnm, tlen)) {
1325 MARK_INT_FAIL(CODA_RENAME_STATS);
1326 return(EACCES);
1327 }
1328
1329 /* Problem with moving directories -- need to flush entry for .. */
1330 if (odvp != ndvp) {
1331 struct cnode *ovcp = coda_nc_lookup(VTOC(odvp), fnm, flen, cred);
1332 if (ovcp) {
1333 struct vnode *ovp = CTOV(ovcp);
1334 if ((ovp) &&
1335 (ovp->v_type == VDIR)) /* If it's a directory */
1336 coda_nc_zapfile(VTOC(ovp),"..", 2);
1337 }
1338 }
1339
1340 /* Remove the entries for both source and target files */
1341 coda_nc_zapfile(VTOC(odvp), fnm, flen);
1342 coda_nc_zapfile(VTOC(ndvp), tnm, tlen);
1343
1344 /* Invalidate the parent's attr cache, the modification time has changed */
1345 VTOC(odvp)->c_flags &= ~C_VATTR;
1346 VTOC(ndvp)->c_flags &= ~C_VATTR;
1347
1348 if (flen+1 > CODA_MAXNAMLEN) {
1349 MARK_INT_FAIL(CODA_RENAME_STATS);
1350 error = EINVAL;
1351 goto exit;
1352 }
1353
1354 if (tlen+1 > CODA_MAXNAMLEN) {
1355 MARK_INT_FAIL(CODA_RENAME_STATS);
1356 error = EINVAL;
1357 goto exit;
1358 }
1359
1360 error = venus_rename(vtomi(odvp), &odcp->c_fid, &ndcp->c_fid, fnm, flen, tnm, tlen, cred, l);
1361
1362 exit:
1363 CODADEBUG(CODA_RENAME, myprintf(("in rename result %d\n",error));)
1364 /* XXX - do we need to call cache pureg on the moved vnode? */
1365 cache_purge(ap->a_fvp);
1366
1367 /* It seems to be incumbent on us to drop locks on all four vnodes */
1368 /* From-vnodes are not locked, only ref'd. To-vnodes are locked. */
1369
1370 vrele(ap->a_fvp);
1371 vrele(odvp);
1372
1373 if (ap->a_tvp) {
1374 if (ap->a_tvp == ndvp) {
1375 vrele(ap->a_tvp);
1376 } else {
1377 vput(ap->a_tvp);
1378 }
1379 }
1380
1381 vput(ndvp);
1382 return(error);
1383 }
1384
1385 int
1386 coda_mkdir(void *v)
1387 {
1388 /* true args */
1389 struct vop_mkdir_args *ap = v;
1390 struct vnode *dvp = ap->a_dvp;
1391 struct cnode *dcp = VTOC(dvp);
1392 struct componentname *cnp = ap->a_cnp;
1393 struct vattr *va = ap->a_vap;
1394 struct vnode **vpp = ap->a_vpp;
1395 kauth_cred_t cred = cnp->cn_cred;
1396 struct lwp *l = curlwp;
1397 /* locals */
1398 int error;
1399 const char *nm = cnp->cn_nameptr;
1400 int len = cnp->cn_namelen;
1401 struct cnode *cp;
1402 CodaFid VFid;
1403 struct vattr ova;
1404
1405 MARK_ENTRY(CODA_MKDIR_STATS);
1406
1407 /* Check for mkdir of target object. */
1408 if (IS_CTL_NAME(dvp, nm, len)) {
1409 *vpp = (struct vnode *)0;
1410 MARK_INT_FAIL(CODA_MKDIR_STATS);
1411 return(EACCES);
1412 }
1413
1414 if (len+1 > CODA_MAXNAMLEN) {
1415 *vpp = (struct vnode *)0;
1416 MARK_INT_FAIL(CODA_MKDIR_STATS);
1417 return(EACCES);
1418 }
1419
1420 error = venus_mkdir(vtomi(dvp), &dcp->c_fid, nm, len, va, cred, l, &VFid, &ova);
1421
1422 if (!error) {
1423 if (coda_find(&VFid) != NULL)
1424 panic("cnode existed for newly created directory!");
1425
1426
1427 cp = make_coda_node(&VFid, dvp->v_mount, va->va_type);
1428 *vpp = CTOV(cp);
1429
1430 /* enter the new vnode in the Name Cache */
1431 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
1432
1433 /* as a side effect, enter "." and ".." for the directory */
1434 coda_nc_enter(VTOC(*vpp), ".", 1, cred, VTOC(*vpp));
1435 coda_nc_enter(VTOC(*vpp), "..", 2, cred, VTOC(dvp));
1436
1437 if (coda_attr_cache) {
1438 VTOC(*vpp)->c_vattr = ova; /* update the attr cache */
1439 VTOC(*vpp)->c_flags |= C_VATTR; /* Valid attributes in cnode */
1440 }
1441
1442 /* Invalidate the parent's attr cache, the modification time has changed */
1443 VTOC(dvp)->c_flags &= ~C_VATTR;
1444
1445 CODADEBUG( CODA_MKDIR, myprintf(("mkdir: %s result %d\n",
1446 coda_f2s(&VFid), error)); )
1447 } else {
1448 *vpp = (struct vnode *)0;
1449 CODADEBUG(CODA_MKDIR, myprintf(("mkdir error %d\n",error));)
1450 }
1451
1452 /*
1453 * Currently, all mkdirs explicitly vput their dvp's.
1454 * It also appears that we *must* lock the vpp, since
1455 * lockleaf isn't set, but someone down the road is going
1456 * to try to unlock the new directory.
1457 */
1458 vput(dvp);
1459 if (!error) {
1460 if ((error = vn_lock(*ap->a_vpp, LK_EXCLUSIVE))) {
1461 panic("coda_mkdir: couldn't lock child");
1462 }
1463 }
1464
1465 /* Per vnodeops(9), free name except on success and SAVESTART. */
1466 if (error || (cnp->cn_flags & SAVESTART) == 0) {
1467 PNBUF_PUT(cnp->cn_pnbuf);
1468 }
1469 return(error);
1470 }
1471
1472 int
1473 coda_rmdir(void *v)
1474 {
1475 /* true args */
1476 struct vop_rmdir_args *ap = v;
1477 struct vnode *dvp = ap->a_dvp;
1478 struct cnode *dcp = VTOC(dvp);
1479 struct vnode *vp = ap->a_vp;
1480 struct componentname *cnp = ap->a_cnp;
1481 kauth_cred_t cred = cnp->cn_cred;
1482 struct lwp *l = curlwp;
1483 /* true args */
1484 int error;
1485 const char *nm = cnp->cn_nameptr;
1486 int len = cnp->cn_namelen;
1487 struct cnode *cp;
1488
1489 MARK_ENTRY(CODA_RMDIR_STATS);
1490
1491 /* Check for rmdir of control object. */
1492 if (IS_CTL_NAME(dvp, nm, len)) {
1493 MARK_INT_FAIL(CODA_RMDIR_STATS);
1494 return(ENOENT);
1495 }
1496
1497 /* Can't remove . in self. */
1498 if (dvp == vp) {
1499 printf("coda_rmdir: dvp == vp\n");
1500 error = EINVAL;
1501 goto exit;
1502 }
1503
1504 /*
1505 * The caller may not have adequate permissions, and the venus
1506 * operation may fail, but it doesn't hurt from a correctness
1507 * viewpoint to invalidate cache entries.
1508 * XXX Why isn't this done after the venus_rmdir call?
1509 */
1510 /* Look up child in name cache (by name, from parent). */
1511 cp = coda_nc_lookup(dcp, nm, len, cred);
1512 /* If found, remove all children of the child (., ..). */
1513 if (cp) coda_nc_zapParentfid(&(cp->c_fid), NOT_DOWNCALL);
1514
1515 /* Remove child's own entry. */
1516 coda_nc_zapfile(dcp, nm, len);
1517
1518 /* Invalidate parent's attr cache (the modification time has changed). */
1519 dcp->c_flags &= ~C_VATTR;
1520
1521 error = venus_rmdir(vtomi(dvp), &dcp->c_fid, nm, len, cred, l);
1522
1523 CODADEBUG(CODA_RMDIR, myprintf(("in rmdir result %d\n", error)); )
1524
1525 exit:
1526 /* vput both vnodes */
1527 vput(dvp);
1528 if (dvp == vp) {
1529 vrele(vp);
1530 } else {
1531 vput(vp);
1532 }
1533
1534 return(error);
1535 }
1536
1537 int
1538 coda_symlink(void *v)
1539 {
1540 /* true args */
1541 struct vop_symlink_args *ap = v;
1542 struct vnode *dvp = ap->a_dvp;
1543 struct cnode *dcp = VTOC(dvp);
1544 /* a_vpp is used in place below */
1545 struct componentname *cnp = ap->a_cnp;
1546 struct vattr *tva = ap->a_vap;
1547 char *path = ap->a_target;
1548 kauth_cred_t cred = cnp->cn_cred;
1549 struct lwp *l = curlwp;
1550 /* locals */
1551 int error;
1552 u_long saved_cn_flags;
1553 const char *nm = cnp->cn_nameptr;
1554 int len = cnp->cn_namelen;
1555 int plen = strlen(path);
1556
1557 /*
1558 * Here's the strategy for the moment: perform the symlink, then
1559 * do a lookup to grab the resulting vnode. I know this requires
1560 * two communications with Venus for a new sybolic link, but
1561 * that's the way the ball bounces. I don't yet want to change
1562 * the way the Mach symlink works. When Mach support is
1563 * deprecated, we should change symlink so that the common case
1564 * returns the resultant vnode in a vpp argument.
1565 */
1566
1567 MARK_ENTRY(CODA_SYMLINK_STATS);
1568
1569 /* Check for symlink of control object. */
1570 if (IS_CTL_NAME(dvp, nm, len)) {
1571 MARK_INT_FAIL(CODA_SYMLINK_STATS);
1572 error = EACCES;
1573 goto exit;
1574 }
1575
1576 if (plen+1 > CODA_MAXPATHLEN) {
1577 MARK_INT_FAIL(CODA_SYMLINK_STATS);
1578 error = EINVAL;
1579 goto exit;
1580 }
1581
1582 if (len+1 > CODA_MAXNAMLEN) {
1583 MARK_INT_FAIL(CODA_SYMLINK_STATS);
1584 error = EINVAL;
1585 goto exit;
1586 }
1587
1588 error = venus_symlink(vtomi(dvp), &dcp->c_fid, path, plen, nm, len, tva, cred, l);
1589
1590 /* Invalidate the parent's attr cache (modification time has changed). */
1591 dcp->c_flags &= ~C_VATTR;
1592
1593 if (!error) {
1594 /*
1595 * VOP_SYMLINK is not defined to pay attention to cnp->cn_flags;
1596 * these are defined only for VOP_LOOKUP. We desire to reuse
1597 * cnp for a VOP_LOOKUP operation, and must be sure to not pass
1598 * stray flags passed to us. Such stray flags can occur because
1599 * sys_symlink makes a namei call and then reuses the
1600 * componentname structure.
1601 */
1602 /*
1603 * XXX Arguably we should create our own componentname structure
1604 * and not reuse the one that was passed in.
1605 */
1606 saved_cn_flags = cnp->cn_flags;
1607 cnp->cn_flags &= ~(MODMASK | OPMASK);
1608 cnp->cn_flags |= LOOKUP;
1609 error = VOP_LOOKUP(dvp, ap->a_vpp, cnp);
1610 cnp->cn_flags = saved_cn_flags;
1611 /* Either an error occurs, or ap->a_vpp is locked. */
1612 }
1613
1614 exit:
1615 /* unlock and deference parent */
1616 vput(dvp);
1617
1618 /* Per vnodeops(9), free name except on success and SAVESTART. */
1619 if (error || (cnp->cn_flags & SAVESTART) == 0) {
1620 PNBUF_PUT(cnp->cn_pnbuf);
1621 }
1622
1623 CODADEBUG(CODA_SYMLINK, myprintf(("in symlink result %d\n",error)); )
1624 return(error);
1625 }
1626
1627 /*
1628 * Read directory entries.
1629 */
1630 int
1631 coda_readdir(void *v)
1632 {
1633 /* true args */
1634 struct vop_readdir_args *ap = v;
1635 struct vnode *vp = ap->a_vp;
1636 struct cnode *cp = VTOC(vp);
1637 struct uio *uiop = ap->a_uio;
1638 kauth_cred_t cred = ap->a_cred;
1639 int *eofflag = ap->a_eofflag;
1640 off_t **cookies = ap->a_cookies;
1641 int *ncookies = ap->a_ncookies;
1642 /* upcall decl */
1643 /* locals */
1644 int error = 0;
1645
1646 MARK_ENTRY(CODA_READDIR_STATS);
1647
1648 CODADEBUG(CODA_READDIR, myprintf(("coda_readdir(%p, %lu, %lld)\n", uiop->uio_iov->iov_base, (unsigned long) uiop->uio_resid, (long long) uiop->uio_offset)); )
1649
1650 /* Check for readdir of control object. */
1651 if (IS_CTL_VP(vp)) {
1652 MARK_INT_FAIL(CODA_READDIR_STATS);
1653 return(ENOENT);
1654 }
1655
1656 {
1657 /* Redirect the request to UFS. */
1658
1659 /* If directory is not already open do an "internal open" on it. */
1660 int opened_internally = 0;
1661 if (cp->c_ovp == NULL) {
1662 opened_internally = 1;
1663 MARK_INT_GEN(CODA_OPEN_STATS);
1664 error = VOP_OPEN(vp, FREAD, cred);
1665 #ifdef CODA_VERBOSE
1666 printf("coda_readdir: Internally Opening %p\n", vp);
1667 #endif
1668 if (error) return(error);
1669 } else
1670 vp = cp->c_ovp;
1671
1672 /* Have UFS handle the call. */
1673 CODADEBUG(CODA_READDIR, myprintf((
1674 "indirect readdir: fid = %s, refcnt = %d\n",
1675 coda_f2s(&cp->c_fid), vp->v_usecount)); )
1676 error = VOP_READDIR(vp, uiop, cred, eofflag, cookies, ncookies);
1677 if (error)
1678 MARK_INT_FAIL(CODA_READDIR_STATS);
1679 else
1680 MARK_INT_SAT(CODA_READDIR_STATS);
1681
1682 /* Do an "internal close" if necessary. */
1683 if (opened_internally) {
1684 MARK_INT_GEN(CODA_CLOSE_STATS);
1685 (void)VOP_CLOSE(vp, FREAD, cred);
1686 }
1687 }
1688
1689 return(error);
1690 }
1691
1692 /*
1693 * Convert from file system blocks to device blocks
1694 */
1695 int
1696 coda_bmap(void *v)
1697 {
1698 /* XXX on the global proc */
1699 /* true args */
1700 struct vop_bmap_args *ap = v;
1701 struct vnode *vp __unused = ap->a_vp; /* file's vnode */
1702 daddr_t bn __unused = ap->a_bn; /* fs block number */
1703 struct vnode **vpp = ap->a_vpp; /* RETURN vp of device */
1704 daddr_t *bnp __unused = ap->a_bnp; /* RETURN device block number */
1705 struct lwp *l __unused = curlwp;
1706 /* upcall decl */
1707 /* locals */
1708
1709 *vpp = (struct vnode *)0;
1710 myprintf(("coda_bmap called!\n"));
1711 return(EINVAL);
1712 }
1713
1714 /*
1715 * I don't think the following two things are used anywhere, so I've
1716 * commented them out
1717 *
1718 * struct buf *async_bufhead;
1719 * int async_daemon_count;
1720 */
1721 int
1722 coda_strategy(void *v)
1723 {
1724 /* true args */
1725 struct vop_strategy_args *ap = v;
1726 struct buf *bp __unused = ap->a_bp;
1727 struct lwp *l __unused = curlwp;
1728 /* upcall decl */
1729 /* locals */
1730
1731 myprintf(("coda_strategy called! "));
1732 return(EINVAL);
1733 }
1734
1735 int
1736 coda_reclaim(void *v)
1737 {
1738 /* true args */
1739 struct vop_reclaim_args *ap = v;
1740 struct vnode *vp = ap->a_vp;
1741 struct cnode *cp = VTOC(vp);
1742 /* upcall decl */
1743 /* locals */
1744
1745 /*
1746 * Forced unmount/flush will let vnodes with non zero use be destroyed!
1747 */
1748 ENTRY;
1749
1750 if (IS_UNMOUNTING(cp)) {
1751 #ifdef DEBUG
1752 if (VTOC(vp)->c_ovp) {
1753 if (IS_UNMOUNTING(cp))
1754 printf("coda_reclaim: c_ovp not void: vp %p, cp %p\n", vp, cp);
1755 }
1756 #endif
1757 } else {
1758 #ifdef OLD_DIAGNOSTIC
1759 if (vp->v_usecount != 0)
1760 print("coda_reclaim: pushing active %p\n", vp);
1761 if (VTOC(vp)->c_ovp) {
1762 panic("coda_reclaim: c_ovp not void");
1763 }
1764 #endif
1765 }
1766 cache_purge(vp);
1767 coda_free(VTOC(vp));
1768 SET_VTOC(vp) = NULL;
1769 return (0);
1770 }
1771
1772 int
1773 coda_lock(void *v)
1774 {
1775 /* true args */
1776 struct vop_lock_args *ap = v;
1777 struct vnode *vp = ap->a_vp;
1778 struct cnode *cp = VTOC(vp);
1779 int flags = ap->a_flags;
1780 /* upcall decl */
1781 /* locals */
1782
1783 ENTRY;
1784
1785 if (coda_lockdebug) {
1786 myprintf(("Attempting lock on %s\n",
1787 coda_f2s(&cp->c_fid)));
1788 }
1789
1790 if ((flags & LK_INTERLOCK) != 0) {
1791 mutex_exit(&vp->v_interlock);
1792 flags &= ~LK_INTERLOCK;
1793 }
1794
1795 return (vlockmgr(&vp->v_lock, flags));
1796 }
1797
1798 int
1799 coda_unlock(void *v)
1800 {
1801 /* true args */
1802 struct vop_unlock_args *ap = v;
1803 struct vnode *vp = ap->a_vp;
1804 struct cnode *cp = VTOC(vp);
1805 /* upcall decl */
1806 /* locals */
1807
1808 ENTRY;
1809 if (coda_lockdebug) {
1810 myprintf(("Attempting unlock on %s\n",
1811 coda_f2s(&cp->c_fid)));
1812 }
1813
1814 return (vlockmgr(&vp->v_lock, ap->a_flags | LK_RELEASE));
1815 }
1816
1817 int
1818 coda_islocked(void *v)
1819 {
1820 /* true args */
1821 struct vop_islocked_args *ap = v;
1822 ENTRY;
1823
1824 return (vlockstatus(&ap->a_vp->v_lock));
1825 }
1826
1827 /*
1828 * Given a device and inode, obtain a locked vnode. One reference is
1829 * obtained and passed back to the caller.
1830 */
1831 int
1832 coda_grab_vnode(dev_t dev, ino_t ino, struct vnode **vpp)
1833 {
1834 int error;
1835 struct mount *mp;
1836
1837 /* Obtain mount point structure from device. */
1838 if (!(mp = devtomp(dev))) {
1839 myprintf(("coda_grab_vnode: devtomp(0x%llx) returns NULL\n",
1840 (unsigned long long)dev));
1841 return(ENXIO);
1842 }
1843
1844 /*
1845 * Obtain vnode from mount point and inode.
1846 * XXX VFS_VGET does not clearly define locked/referenced state of
1847 * returned vnode.
1848 */
1849 error = VFS_VGET(mp, ino, vpp);
1850 if (error) {
1851 myprintf(("coda_grab_vnode: iget/vget(0x%llx, %llu) returns %p, err %d\n",
1852 (unsigned long long)dev, (unsigned long long)ino, *vpp, error));
1853 return(ENOENT);
1854 }
1855 return(0);
1856 }
1857
1858 void
1859 print_vattr(struct vattr *attr)
1860 {
1861 const char *typestr;
1862
1863 switch (attr->va_type) {
1864 case VNON:
1865 typestr = "VNON";
1866 break;
1867 case VREG:
1868 typestr = "VREG";
1869 break;
1870 case VDIR:
1871 typestr = "VDIR";
1872 break;
1873 case VBLK:
1874 typestr = "VBLK";
1875 break;
1876 case VCHR:
1877 typestr = "VCHR";
1878 break;
1879 case VLNK:
1880 typestr = "VLNK";
1881 break;
1882 case VSOCK:
1883 typestr = "VSCK";
1884 break;
1885 case VFIFO:
1886 typestr = "VFFO";
1887 break;
1888 case VBAD:
1889 typestr = "VBAD";
1890 break;
1891 default:
1892 typestr = "????";
1893 break;
1894 }
1895
1896
1897 myprintf(("attr: type %s mode %d uid %d gid %d fsid %d rdev %d\n",
1898 typestr, (int)attr->va_mode, (int)attr->va_uid,
1899 (int)attr->va_gid, (int)attr->va_fsid, (int)attr->va_rdev));
1900
1901 myprintf((" fileid %d nlink %d size %d blocksize %d bytes %d\n",
1902 (int)attr->va_fileid, (int)attr->va_nlink,
1903 (int)attr->va_size,
1904 (int)attr->va_blocksize,(int)attr->va_bytes));
1905 myprintf((" gen %ld flags %ld vaflags %d\n",
1906 attr->va_gen, attr->va_flags, attr->va_vaflags));
1907 myprintf((" atime sec %d nsec %d\n",
1908 (int)attr->va_atime.tv_sec, (int)attr->va_atime.tv_nsec));
1909 myprintf((" mtime sec %d nsec %d\n",
1910 (int)attr->va_mtime.tv_sec, (int)attr->va_mtime.tv_nsec));
1911 myprintf((" ctime sec %d nsec %d\n",
1912 (int)attr->va_ctime.tv_sec, (int)attr->va_ctime.tv_nsec));
1913 }
1914
1915 /* How to print a ucred */
1916 void
1917 print_cred(kauth_cred_t cred)
1918 {
1919
1920 uint16_t ngroups;
1921 int i;
1922
1923 myprintf(("ref %d\tuid %d\n", kauth_cred_getrefcnt(cred),
1924 kauth_cred_geteuid(cred)));
1925
1926 ngroups = kauth_cred_ngroups(cred);
1927 for (i=0; i < ngroups; i++)
1928 myprintf(("\tgroup %d: (%d)\n", i, kauth_cred_group(cred, i)));
1929 myprintf(("\n"));
1930
1931 }
1932
1933 /*
1934 * Return a vnode for the given fid.
1935 * If no cnode exists for this fid create one and put it
1936 * in a table hashed by coda_f2i(). If the cnode for
1937 * this fid is already in the table return it (ref count is
1938 * incremented by coda_find. The cnode will be flushed from the
1939 * table when coda_inactive calls coda_unsave.
1940 */
1941 struct cnode *
1942 make_coda_node(CodaFid *fid, struct mount *vfsp, short type)
1943 {
1944 struct cnode *cp;
1945 int err;
1946
1947 if ((cp = coda_find(fid)) == NULL) {
1948 struct vnode *vp;
1949
1950 cp = coda_alloc();
1951 cp->c_fid = *fid;
1952
1953 err = getnewvnode(VT_CODA, vfsp, coda_vnodeop_p, &vp);
1954 if (err) {
1955 panic("coda: getnewvnode returned error %d", err);
1956 }
1957 vp->v_data = cp;
1958 vp->v_type = type;
1959 cp->c_vnode = vp;
1960 uvm_vnp_setsize(vp, 0);
1961 coda_save(cp);
1962
1963 } else {
1964 vref(CTOV(cp));
1965 }
1966
1967 return cp;
1968 }
1969
1970 /*
1971 * coda_getpages may be called on a vnode which has not been opened,
1972 * e.g. to fault in pages to execute a program. In that case, we must
1973 * open the file to get the container. The vnode may or may not be
1974 * locked, and we must leave it in the same state.
1975 * XXX The protocol requires v_uobj.vmobjlock to be
1976 * held by caller, but this isn't documented in vnodeops(9) or vnode_if.src.
1977 */
1978 int
1979 coda_getpages(void *v)
1980 {
1981 struct vop_getpages_args /* {
1982 struct vnode *a_vp;
1983 voff_t a_offset;
1984 struct vm_page **a_m;
1985 int *a_count;
1986 int a_centeridx;
1987 vm_prot_t a_access_type;
1988 int a_advice;
1989 int a_flags;
1990 } */ *ap = v;
1991 struct vnode *vp = ap->a_vp;
1992 struct cnode *cp = VTOC(vp);
1993 struct lwp *l = curlwp;
1994 kauth_cred_t cred = l->l_cred;
1995 int error, cerror;
1996 int waslocked; /* 1 if vnode lock was held on entry */
1997 int didopen = 0; /* 1 if we opened container file */
1998
1999 /*
2000 * Handle a case that uvm_fault doesn't quite use yet.
2001 * See layer_vnops.c. for inspiration.
2002 */
2003 if (ap->a_flags & PGO_LOCKED) {
2004 return EBUSY;
2005 }
2006
2007 /* Check for control object. */
2008 if (IS_CTL_VP(vp)) {
2009 printf("coda_getpages: control object %p\n", vp);
2010 mutex_exit(&vp->v_uobj.vmobjlock);
2011 return(EINVAL);
2012 }
2013
2014 /*
2015 * XXX It's really not ok to be releasing the lock we get,
2016 * because we could be overlapping with another call to
2017 * getpages and drop a lock they are relying on. We need to
2018 * figure out whether getpages ever is called holding the
2019 * lock, and if we should serialize getpages calls by some
2020 * mechanism.
2021 */
2022 waslocked = VOP_ISLOCKED(vp);
2023
2024 /* Drop the vmobject lock. */
2025 mutex_exit(&vp->v_uobj.vmobjlock);
2026
2027 /* Get container file if not already present. */
2028 if (cp->c_ovp == NULL) {
2029 /*
2030 * VOP_OPEN requires a locked vnode. We must avoid
2031 * locking the vnode if it is already locked, and
2032 * leave it in the same state on exit.
2033 */
2034 if (waslocked == 0) {
2035 cerror = vn_lock(vp, LK_EXCLUSIVE);
2036 if (cerror) {
2037 printf("coda_getpages: can't lock vnode %p\n",
2038 vp);
2039 return cerror;
2040 }
2041 #if 0
2042 printf("coda_getpages: locked vnode %p\n", vp);
2043 #endif
2044 }
2045
2046 /*
2047 * Open file (causes upcall to venus).
2048 * XXX Perhaps we should not fully open the file, but
2049 * simply obtain a container file.
2050 */
2051 /* XXX Is it ok to do this while holding the simplelock? */
2052 cerror = VOP_OPEN(vp, FREAD, cred);
2053
2054 if (cerror) {
2055 printf("coda_getpages: cannot open vnode %p => %d\n",
2056 vp, cerror);
2057 if (waslocked == 0)
2058 VOP_UNLOCK(vp, 0);
2059 return cerror;
2060 }
2061
2062 #if 0
2063 printf("coda_getpages: opened vnode %p\n", vp);
2064 #endif
2065 didopen = 1;
2066 }
2067 KASSERT(cp->c_ovp != NULL);
2068
2069 /* Munge the arg structure to refer to the container vnode. */
2070 ap->a_vp = cp->c_ovp;
2071
2072 /* Get the lock on the container vnode, and call getpages on it. */
2073 mutex_enter(&ap->a_vp->v_uobj.vmobjlock);
2074 error = VCALL(ap->a_vp, VOFFSET(vop_getpages), ap);
2075
2076 /* If we opened the vnode, we must close it. */
2077 if (didopen) {
2078 /*
2079 * VOP_CLOSE requires a locked vnode, but we are still
2080 * holding the lock (or riding a caller's lock).
2081 */
2082 cerror = VOP_CLOSE(vp, FREAD, cred);
2083 if (cerror != 0)
2084 /* XXX How should we handle this? */
2085 printf("coda_getpages: closed vnode %p -> %d\n",
2086 vp, cerror);
2087
2088 /* If we obtained a lock, drop it. */
2089 if (waslocked == 0)
2090 VOP_UNLOCK(vp, 0);
2091 }
2092
2093 return error;
2094 }
2095
2096 /*
2097 * The protocol requires v_uobj.vmobjlock to be held by the caller, as
2098 * documented in vnodeops(9). XXX vnode_if.src doesn't say this.
2099 */
2100 int
2101 coda_putpages(void *v)
2102 {
2103 struct vop_putpages_args /* {
2104 struct vnode *a_vp;
2105 voff_t a_offlo;
2106 voff_t a_offhi;
2107 int a_flags;
2108 } */ *ap = v;
2109 struct vnode *vp = ap->a_vp;
2110 struct cnode *cp = VTOC(vp);
2111 int error;
2112
2113 /* Drop the vmobject lock. */
2114 mutex_exit(&vp->v_uobj.vmobjlock);
2115
2116 /* Check for control object. */
2117 if (IS_CTL_VP(vp)) {
2118 printf("coda_putpages: control object %p\n", vp);
2119 return(EINVAL);
2120 }
2121
2122 /*
2123 * If container object is not present, then there are no pages
2124 * to put; just return without error. This happens all the
2125 * time, apparently during discard of a closed vnode (which
2126 * trivially can't have dirty pages).
2127 */
2128 if (cp->c_ovp == NULL)
2129 return 0;
2130
2131 /* Munge the arg structure to refer to the container vnode. */
2132 ap->a_vp = cp->c_ovp;
2133
2134 /* Get the lock on the container vnode, and call putpages on it. */
2135 mutex_enter(&ap->a_vp->v_uobj.vmobjlock);
2136 error = VCALL(ap->a_vp, VOFFSET(vop_putpages), ap);
2137
2138 return error;
2139 }
2140