coda_vnops.c revision 1.58 1 /* $NetBSD: coda_vnops.c,v 1.58 2007/04/15 12:45:39 gdt Exp $ */
2
3 /*
4 *
5 * Coda: an Experimental Distributed File System
6 * Release 3.1
7 *
8 * Copyright (c) 1987-1998 Carnegie Mellon University
9 * All Rights Reserved
10 *
11 * Permission to use, copy, modify and distribute this software and its
12 * documentation is hereby granted, provided that both the copyright
13 * notice and this permission notice appear in all copies of the
14 * software, derivative works or modified versions, and any portions
15 * thereof, and that both notices appear in supporting documentation, and
16 * that credit is given to Carnegie Mellon University in all documents
17 * and publicity pertaining to direct or indirect use of this code or its
18 * derivatives.
19 *
20 * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS,
21 * SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS
22 * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON
23 * DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
24 * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF
25 * ANY DERIVATIVE WORK.
26 *
27 * Carnegie Mellon encourages users of this software to return any
28 * improvements or extensions that they make, and to grant Carnegie
29 * Mellon the rights to redistribute these changes without encumbrance.
30 *
31 * @(#) coda/coda_vnops.c,v 1.1.1.1 1998/08/29 21:26:46 rvb Exp $
32 */
33
34 /*
35 * Mach Operating System
36 * Copyright (c) 1990 Carnegie-Mellon University
37 * Copyright (c) 1989 Carnegie-Mellon University
38 * All rights reserved. The CMU software License Agreement specifies
39 * the terms and conditions for use and redistribution.
40 */
41
42 /*
43 * This code was written for the Coda file system at Carnegie Mellon
44 * University. Contributers include David Steere, James Kistler, and
45 * M. Satyanarayanan.
46 */
47
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: coda_vnops.c,v 1.58 2007/04/15 12:45:39 gdt Exp $");
50
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/malloc.h>
54 #include <sys/errno.h>
55 #include <sys/acct.h>
56 #include <sys/file.h>
57 #include <sys/uio.h>
58 #include <sys/namei.h>
59 #include <sys/ioctl.h>
60 #include <sys/mount.h>
61 #include <sys/proc.h>
62 #include <sys/select.h>
63 #include <sys/user.h>
64 #include <sys/kauth.h>
65
66 #include <miscfs/genfs/genfs.h>
67
68 #include <coda/coda.h>
69 #include <coda/cnode.h>
70 #include <coda/coda_vnops.h>
71 #include <coda/coda_venus.h>
72 #include <coda/coda_opstats.h>
73 #include <coda/coda_subr.h>
74 #include <coda/coda_namecache.h>
75 #include <coda/coda_pioctl.h>
76
77 /*
78 * These flags select various performance enhancements.
79 */
80 int coda_attr_cache = 1; /* Set to cache attributes in the kernel */
81 int coda_symlink_cache = 1; /* Set to cache symbolic link information */
82 int coda_access_cache = 1; /* Set to handle some access checks directly */
83
84 /* structure to keep track of vfs calls */
85
86 struct coda_op_stats coda_vnodeopstats[CODA_VNODEOPS_SIZE];
87
88 #define MARK_ENTRY(op) (coda_vnodeopstats[op].entries++)
89 #define MARK_INT_SAT(op) (coda_vnodeopstats[op].sat_intrn++)
90 #define MARK_INT_FAIL(op) (coda_vnodeopstats[op].unsat_intrn++)
91 #define MARK_INT_GEN(op) (coda_vnodeopstats[op].gen_intrn++)
92
93 /* What we are delaying for in printf */
94 int coda_printf_delay = 0; /* in microseconds */
95 int coda_vnop_print_entry = 0;
96 static int coda_lockdebug = 0;
97
98 #define ENTRY if(coda_vnop_print_entry) myprintf(("Entered %s\n",__func__))
99
100 /* Definition of the vnode operation vector */
101
102 const struct vnodeopv_entry_desc coda_vnodeop_entries[] = {
103 { &vop_default_desc, coda_vop_error },
104 { &vop_lookup_desc, coda_lookup }, /* lookup */
105 { &vop_create_desc, coda_create }, /* create */
106 { &vop_mknod_desc, coda_vop_error }, /* mknod */
107 { &vop_open_desc, coda_open }, /* open */
108 { &vop_close_desc, coda_close }, /* close */
109 { &vop_access_desc, coda_access }, /* access */
110 { &vop_getattr_desc, coda_getattr }, /* getattr */
111 { &vop_setattr_desc, coda_setattr }, /* setattr */
112 { &vop_read_desc, coda_read }, /* read */
113 { &vop_write_desc, coda_write }, /* write */
114 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */
115 { &vop_ioctl_desc, coda_ioctl }, /* ioctl */
116 { &vop_mmap_desc, genfs_mmap }, /* mmap */
117 { &vop_fsync_desc, coda_fsync }, /* fsync */
118 { &vop_remove_desc, coda_remove }, /* remove */
119 { &vop_link_desc, coda_link }, /* link */
120 { &vop_rename_desc, coda_rename }, /* rename */
121 { &vop_mkdir_desc, coda_mkdir }, /* mkdir */
122 { &vop_rmdir_desc, coda_rmdir }, /* rmdir */
123 { &vop_symlink_desc, coda_symlink }, /* symlink */
124 { &vop_readdir_desc, coda_readdir }, /* readdir */
125 { &vop_readlink_desc, coda_readlink }, /* readlink */
126 { &vop_abortop_desc, coda_abortop }, /* abortop */
127 { &vop_inactive_desc, coda_inactive }, /* inactive */
128 { &vop_reclaim_desc, coda_reclaim }, /* reclaim */
129 { &vop_lock_desc, coda_lock }, /* lock */
130 { &vop_unlock_desc, coda_unlock }, /* unlock */
131 { &vop_bmap_desc, coda_bmap }, /* bmap */
132 { &vop_strategy_desc, coda_strategy }, /* strategy */
133 { &vop_print_desc, coda_vop_error }, /* print */
134 { &vop_islocked_desc, coda_islocked }, /* islocked */
135 { &vop_pathconf_desc, coda_vop_error }, /* pathconf */
136 { &vop_advlock_desc, coda_vop_nop }, /* advlock */
137 { &vop_bwrite_desc, coda_vop_error }, /* bwrite */
138 { &vop_lease_desc, coda_vop_nop }, /* lease */
139 { &vop_seek_desc, genfs_seek }, /* seek */
140 { &vop_poll_desc, genfs_poll }, /* poll */
141 { &vop_getpages_desc, coda_getpages }, /* getpages */
142 { &vop_putpages_desc, coda_putpages }, /* putpages */
143 { NULL, NULL }
144 };
145
146 const struct vnodeopv_desc coda_vnodeop_opv_desc =
147 { &coda_vnodeop_p, coda_vnodeop_entries };
148
149 /* Definitions of NetBSD vnodeop interfaces */
150
151 /*
152 * A generic error routine. Return EIO without looking at arguments.
153 */
154 int
155 coda_vop_error(void *anon) {
156 struct vnodeop_desc **desc = (struct vnodeop_desc **)anon;
157
158 if (codadebug) {
159 myprintf(("coda_vop_error: Vnode operation %s called (error).\n",
160 (*desc)->vdesc_name));
161 }
162
163 return EIO;
164 }
165
166 /* A generic do-nothing. For lease_check, advlock */
167 int
168 coda_vop_nop(void *anon) {
169 struct vnodeop_desc **desc = (struct vnodeop_desc **)anon;
170
171 if (codadebug) {
172 myprintf(("Vnode operation %s called, but unsupported\n",
173 (*desc)->vdesc_name));
174 }
175 return (0);
176 }
177
178 int
179 coda_vnodeopstats_init(void)
180 {
181 int i;
182
183 for(i=0;i<CODA_VNODEOPS_SIZE;i++) {
184 coda_vnodeopstats[i].opcode = i;
185 coda_vnodeopstats[i].entries = 0;
186 coda_vnodeopstats[i].sat_intrn = 0;
187 coda_vnodeopstats[i].unsat_intrn = 0;
188 coda_vnodeopstats[i].gen_intrn = 0;
189 }
190
191 return 0;
192 }
193
194 /*
195 * XXX The entire relationship between VOP_OPEN and having a container
196 * file (via venus_open) needs to be reexamined. In particular, it's
197 * valid to open/mmap/close and then reference. Instead of doing
198 * VOP_OPEN when getpages needs a container, we should do the
199 * venus_open part, and record that the vnode has opened the container
200 * for getpages, and do the matching logical close on coda_inactive.
201 * Further, coda_rdwr needs a container file, and sometimes needs to
202 * do the equivalent of open (core dumps).
203 */
204 /*
205 * coda_open calls Venus to return the device and inode of the
206 * container file, and then obtains a vnode for that file. The
207 * container vnode is stored in the coda vnode, and a reference is
208 * added for each open file.
209 */
210 int
211 coda_open(void *v)
212 {
213 /*
214 * NetBSD can pass the O_EXCL flag in mode, even though the check
215 * has already happened. Venus defensively assumes that if open
216 * is passed the EXCL, it must be a bug. We strip the flag here.
217 */
218 /* true args */
219 struct vop_open_args *ap = v;
220 struct vnode *vp = ap->a_vp;
221 struct cnode *cp = VTOC(vp);
222 int flag = ap->a_mode & (~O_EXCL);
223 kauth_cred_t cred = ap->a_cred;
224 struct lwp *l = ap->a_l;
225 /* locals */
226 int error;
227 dev_t dev; /* container file device, inode, vnode */
228 ino_t inode;
229 struct vnode *container_vp;
230
231 MARK_ENTRY(CODA_OPEN_STATS);
232
233 /* Check for open of control file. */
234 if (IS_CTL_VP(vp)) {
235 /* if (WRITABLE(flag)) */
236 if (flag & (FWRITE | O_TRUNC | O_CREAT | O_EXCL)) {
237 MARK_INT_FAIL(CODA_OPEN_STATS);
238 return(EACCES);
239 }
240 MARK_INT_SAT(CODA_OPEN_STATS);
241 return(0);
242 }
243
244 error = venus_open(vtomi(vp), &cp->c_fid, flag, cred, l, &dev, &inode);
245 if (error)
246 return (error);
247 if (!error) {
248 CODADEBUG( CODA_OPEN,myprintf(("open: dev %d inode %llu result %d\n",
249 dev, (unsigned long long)inode, error)); )
250 }
251
252 /*
253 * Obtain locked and referenced container vnode from container
254 * device/inode.
255 */
256 error = coda_grab_vnode(dev, inode, &container_vp);
257 if (error)
258 return (error);
259
260 /*
261 * Keep a reference to the coda vnode until the close comes in.
262 * XXX This does not make sense. Try without.
263 */
264 vref(vp);
265
266 /* Save the vnode pointer for the container file. */
267 if (cp->c_ovp == NULL) {
268 cp->c_ovp = container_vp;
269 } else {
270 if (cp->c_ovp != container_vp)
271 /*
272 * Perhaps venus returned a different container, or
273 * something else went wrong.
274 */
275 panic("coda_open: cp->c_ovp != container_vp");
276 }
277 cp->c_ocount++;
278
279 /* Flush the attribute cache if writing the file. */
280 if (flag & FWRITE) {
281 cp->c_owrite++;
282 cp->c_flags &= ~C_VATTR;
283 }
284
285 /* Save the <device, inode> pair for the cache file to speed
286 up subsequent page_read's. */
287 cp->c_device = dev;
288 cp->c_inode = inode;
289
290 /* Open the cache file. */
291 error = VOP_OPEN(container_vp, flag, cred, l);
292 /*
293 * Drop the lock on the container, after we have done VOP_OPEN
294 * (which requires a locked vnode).
295 */
296 VOP_UNLOCK(container_vp, 0);
297 return(error);
298 }
299
300 /*
301 * Close the cache file used for I/O and notify Venus.
302 */
303 int
304 coda_close(void *v)
305 {
306 /* true args */
307 struct vop_close_args *ap = v;
308 struct vnode *vp = ap->a_vp;
309 struct cnode *cp = VTOC(vp);
310 int flag = ap->a_fflag;
311 kauth_cred_t cred = ap->a_cred;
312 struct lwp *l = ap->a_l;
313 /* locals */
314 int error;
315
316 MARK_ENTRY(CODA_CLOSE_STATS);
317
318 /* Check for close of control file. */
319 if (IS_CTL_VP(vp)) {
320 MARK_INT_SAT(CODA_CLOSE_STATS);
321 return(0);
322 }
323
324 /*
325 * XXX The IS_UNMOUNTING part of this is very suspect.
326 */
327 if (IS_UNMOUNTING(cp)) {
328 if (cp->c_ovp) {
329 #ifdef CODA_VERBOSE
330 printf("coda_close: destroying container ref %d, ufs vp %p of vp %p/cp %p\n",
331 vp->v_usecount, cp->c_ovp, vp, cp);
332 #endif
333 #ifdef hmm
334 vgone(cp->c_ovp);
335 #else
336 vn_lock(cp->c_ovp, LK_EXCLUSIVE | LK_RETRY);
337 VOP_CLOSE(cp->c_ovp, flag, cred, l); /* Do errors matter here? */
338 vput(cp->c_ovp);
339 #endif
340 } else {
341 #ifdef CODA_VERBOSE
342 printf("coda_close: NO container vp %p/cp %p\n", vp, cp);
343 #endif
344 }
345 return ENODEV;
346 }
347
348 /* Lock the container node, and VOP_CLOSE it. */
349 vn_lock(cp->c_ovp, LK_EXCLUSIVE | LK_RETRY);
350 VOP_CLOSE(cp->c_ovp, flag, cred, l); /* Do errors matter here? */
351 /*
352 * Drop the lock we just obtained, and vrele the container vnode.
353 * Decrement reference counts, and clear container vnode pointer on
354 * last close.
355 */
356 vput(cp->c_ovp);
357 if (flag & FWRITE)
358 --cp->c_owrite;
359 if (--cp->c_ocount == 0)
360 cp->c_ovp = NULL;
361
362 error = venus_close(vtomi(vp), &cp->c_fid, flag, cred, l);
363
364 /* Release reference to coda vnode taken during open. */
365 vrele(CTOV(cp));
366
367 CODADEBUG(CODA_CLOSE, myprintf(("close: result %d\n",error)); )
368 return(error);
369 }
370
371 int
372 coda_read(void *v)
373 {
374 struct vop_read_args *ap = v;
375
376 ENTRY;
377 return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_READ,
378 ap->a_ioflag, ap->a_cred, curlwp));
379 }
380
381 int
382 coda_write(void *v)
383 {
384 struct vop_write_args *ap = v;
385
386 ENTRY;
387 return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_WRITE,
388 ap->a_ioflag, ap->a_cred, curlwp));
389 }
390
391 int
392 coda_rdwr(struct vnode *vp, struct uio *uiop, enum uio_rw rw, int ioflag,
393 kauth_cred_t cred, struct lwp *l)
394 {
395 /* upcall decl */
396 /* NOTE: container file operation!!! */
397 /* locals */
398 struct cnode *cp = VTOC(vp);
399 struct vnode *cfvp = cp->c_ovp;
400 struct proc *p = l->l_proc;
401 int opened_internally = 0;
402 int error = 0;
403
404 MARK_ENTRY(CODA_RDWR_STATS);
405
406 CODADEBUG(CODA_RDWR, myprintf(("coda_rdwr(%d, %p, %lu, %lld)\n", rw,
407 uiop->uio_iov->iov_base,
408 (unsigned long) uiop->uio_resid,
409 (long long) uiop->uio_offset)); )
410
411 /* Check for rdwr of control object. */
412 if (IS_CTL_VP(vp)) {
413 MARK_INT_FAIL(CODA_RDWR_STATS);
414 return(EINVAL);
415 }
416
417 /* Redirect the request to UFS. */
418
419 /*
420 * If file is not already open this must be a page
421 * {read,write} request. Iget the cache file's inode
422 * pointer if we still have its <device, inode> pair.
423 * Otherwise, we must do an internal open to derive the
424 * pair.
425 * XXX Integrate this into a coherent strategy for container
426 * file acquisition.
427 */
428 if (cfvp == NULL) {
429 /*
430 * If we're dumping core, do the internal open. Otherwise
431 * venus won't have the correct size of the core when
432 * it's completely written.
433 */
434 if (cp->c_inode != 0 && !(p && (p->p_acflag & ACORE))) {
435 printf("coda_rdwr: grabbing container vnode, losing reference\n");
436 /* Get locked and refed vnode. */
437 error = coda_grab_vnode(cp->c_device, cp->c_inode, &cfvp);
438 if (error) {
439 MARK_INT_FAIL(CODA_RDWR_STATS);
440 return(error);
441 }
442 /*
443 * Drop lock.
444 * XXX Where is reference released.
445 */
446 VOP_UNLOCK(cfvp, 0);
447 }
448 else {
449 printf("coda_rdwr: internal VOP_OPEN\n");
450 opened_internally = 1;
451 MARK_INT_GEN(CODA_OPEN_STATS);
452 error = VOP_OPEN(vp, (rw == UIO_READ ? FREAD : FWRITE),
453 cred, l);
454 #ifdef CODA_VERBOSE
455 printf("coda_rdwr: Internally Opening %p\n", vp);
456 #endif
457 if (error) {
458 MARK_INT_FAIL(CODA_RDWR_STATS);
459 return(error);
460 }
461 cfvp = cp->c_ovp;
462 }
463 }
464
465 /* Have UFS handle the call. */
466 CODADEBUG(CODA_RDWR, myprintf(("indirect rdwr: fid = %s, refcnt = %d\n",
467 coda_f2s(&cp->c_fid), CTOV(cp)->v_usecount)); )
468
469 if (rw == UIO_READ) {
470 error = VOP_READ(cfvp, uiop, ioflag, cred);
471 } else {
472 error = VOP_WRITE(cfvp, uiop, ioflag, cred);
473 }
474
475 if (error)
476 MARK_INT_FAIL(CODA_RDWR_STATS);
477 else
478 MARK_INT_SAT(CODA_RDWR_STATS);
479
480 /* Do an internal close if necessary. */
481 if (opened_internally) {
482 MARK_INT_GEN(CODA_CLOSE_STATS);
483 (void)VOP_CLOSE(vp, (rw == UIO_READ ? FREAD : FWRITE), cred, l);
484 }
485
486 /* Invalidate cached attributes if writing. */
487 if (rw == UIO_WRITE)
488 cp->c_flags &= ~C_VATTR;
489 return(error);
490 }
491
492 int
493 coda_ioctl(void *v)
494 {
495 /* true args */
496 struct vop_ioctl_args *ap = v;
497 struct vnode *vp = ap->a_vp;
498 int com = ap->a_command;
499 void *data = ap->a_data;
500 int flag = ap->a_fflag;
501 kauth_cred_t cred = ap->a_cred;
502 struct lwp *l = ap->a_l;
503 /* locals */
504 int error;
505 struct vnode *tvp;
506 struct nameidata ndp;
507 struct PioctlData *iap = (struct PioctlData *)data;
508
509 MARK_ENTRY(CODA_IOCTL_STATS);
510
511 CODADEBUG(CODA_IOCTL, myprintf(("in coda_ioctl on %s\n", iap->path));)
512
513 /* Don't check for operation on a dying object, for ctlvp it
514 shouldn't matter */
515
516 /* Must be control object to succeed. */
517 if (!IS_CTL_VP(vp)) {
518 MARK_INT_FAIL(CODA_IOCTL_STATS);
519 CODADEBUG(CODA_IOCTL, myprintf(("coda_ioctl error: vp != ctlvp"));)
520 return (EOPNOTSUPP);
521 }
522 /* Look up the pathname. */
523
524 /* Should we use the name cache here? It would get it from
525 lookupname sooner or later anyway, right? */
526
527 NDINIT(&ndp, LOOKUP, (iap->follow ? FOLLOW : NOFOLLOW), UIO_USERSPACE,
528 iap->path, l);
529 error = namei(&ndp);
530 tvp = ndp.ni_vp;
531
532 if (error) {
533 MARK_INT_FAIL(CODA_IOCTL_STATS);
534 CODADEBUG(CODA_IOCTL, myprintf(("coda_ioctl error: lookup returns %d\n",
535 error));)
536 return(error);
537 }
538
539 /*
540 * Make sure this is a coda style cnode, but it may be a
541 * different vfsp
542 */
543 /* XXX: this totally violates the comment about vtagtype in vnode.h */
544 if (tvp->v_tag != VT_CODA) {
545 vrele(tvp);
546 MARK_INT_FAIL(CODA_IOCTL_STATS);
547 CODADEBUG(CODA_IOCTL,
548 myprintf(("coda_ioctl error: %s not a coda object\n",
549 iap->path));)
550 return(EINVAL);
551 }
552
553 if (iap->vi.in_size > VC_MAXDATASIZE) {
554 vrele(tvp);
555 return(EINVAL);
556 }
557 error = venus_ioctl(vtomi(tvp), &((VTOC(tvp))->c_fid), com, flag, data, cred, l);
558
559 if (error)
560 MARK_INT_FAIL(CODA_IOCTL_STATS);
561 else
562 CODADEBUG(CODA_IOCTL, myprintf(("Ioctl returns %d \n", error)); )
563
564 vrele(tvp);
565 return(error);
566 }
567
568 /*
569 * To reduce the cost of a user-level venus;we cache attributes in
570 * the kernel. Each cnode has storage allocated for an attribute. If
571 * c_vattr is valid, return a reference to it. Otherwise, get the
572 * attributes from venus and store them in the cnode. There is some
573 * question if this method is a security leak. But I think that in
574 * order to make this call, the user must have done a lookup and
575 * opened the file, and therefore should already have access.
576 */
577 int
578 coda_getattr(void *v)
579 {
580 /* true args */
581 struct vop_getattr_args *ap = v;
582 struct vnode *vp = ap->a_vp;
583 struct cnode *cp = VTOC(vp);
584 struct vattr *vap = ap->a_vap;
585 kauth_cred_t cred = ap->a_cred;
586 struct lwp *l = ap->a_l;
587 /* locals */
588 int error;
589
590 MARK_ENTRY(CODA_GETATTR_STATS);
591
592 /* Check for getattr of control object. */
593 if (IS_CTL_VP(vp)) {
594 MARK_INT_FAIL(CODA_GETATTR_STATS);
595 return(ENOENT);
596 }
597
598 /* Check to see if the attributes have already been cached */
599 if (VALID_VATTR(cp)) {
600 CODADEBUG(CODA_GETATTR, { myprintf(("attr cache hit: %s\n",
601 coda_f2s(&cp->c_fid)));});
602 CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR))
603 print_vattr(&cp->c_vattr); );
604
605 *vap = cp->c_vattr;
606 MARK_INT_SAT(CODA_GETATTR_STATS);
607 return(0);
608 }
609
610 error = venus_getattr(vtomi(vp), &cp->c_fid, cred, l, vap);
611
612 if (!error) {
613 CODADEBUG(CODA_GETATTR, myprintf(("getattr miss %s: result %d\n",
614 coda_f2s(&cp->c_fid), error)); )
615
616 CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR))
617 print_vattr(vap); );
618
619 /* If not open for write, store attributes in cnode */
620 if ((cp->c_owrite == 0) && (coda_attr_cache)) {
621 cp->c_vattr = *vap;
622 cp->c_flags |= C_VATTR;
623 }
624
625 }
626 return(error);
627 }
628
629 int
630 coda_setattr(void *v)
631 {
632 /* true args */
633 struct vop_setattr_args *ap = v;
634 struct vnode *vp = ap->a_vp;
635 struct cnode *cp = VTOC(vp);
636 struct vattr *vap = ap->a_vap;
637 kauth_cred_t cred = ap->a_cred;
638 struct lwp *l = ap->a_l;
639 /* locals */
640 int error;
641
642 MARK_ENTRY(CODA_SETATTR_STATS);
643
644 /* Check for setattr of control object. */
645 if (IS_CTL_VP(vp)) {
646 MARK_INT_FAIL(CODA_SETATTR_STATS);
647 return(ENOENT);
648 }
649
650 if (codadebug & CODADBGMSK(CODA_SETATTR)) {
651 print_vattr(vap);
652 }
653 error = venus_setattr(vtomi(vp), &cp->c_fid, vap, cred, l);
654
655 if (!error)
656 cp->c_flags &= ~C_VATTR;
657
658 CODADEBUG(CODA_SETATTR, myprintf(("setattr %d\n", error)); )
659 return(error);
660 }
661
662 int
663 coda_access(void *v)
664 {
665 /* true args */
666 struct vop_access_args *ap = v;
667 struct vnode *vp = ap->a_vp;
668 struct cnode *cp = VTOC(vp);
669 int mode = ap->a_mode;
670 kauth_cred_t cred = ap->a_cred;
671 struct lwp *l = ap->a_l;
672 /* locals */
673 int error;
674
675 MARK_ENTRY(CODA_ACCESS_STATS);
676
677 /* Check for access of control object. Only read access is
678 allowed on it. */
679 if (IS_CTL_VP(vp)) {
680 /* bogus hack - all will be marked as successes */
681 MARK_INT_SAT(CODA_ACCESS_STATS);
682 return(((mode & VREAD) && !(mode & (VWRITE | VEXEC)))
683 ? 0 : EACCES);
684 }
685
686 /*
687 * if the file is a directory, and we are checking exec (eg lookup)
688 * access, and the file is in the namecache, then the user must have
689 * lookup access to it.
690 */
691 if (coda_access_cache) {
692 if ((vp->v_type == VDIR) && (mode & VEXEC)) {
693 if (coda_nc_lookup(cp, ".", 1, cred)) {
694 MARK_INT_SAT(CODA_ACCESS_STATS);
695 return(0); /* it was in the cache */
696 }
697 }
698 }
699
700 error = venus_access(vtomi(vp), &cp->c_fid, mode, cred, l);
701
702 return(error);
703 }
704
705 /*
706 * CODA abort op, called after namei() when a CREATE/DELETE isn't actually
707 * done. If a buffer has been saved in anticipation of a coda_create or
708 * a coda_remove, delete it.
709 */
710 /* ARGSUSED */
711 int
712 coda_abortop(void *v)
713 {
714 /* true args */
715 struct vop_abortop_args /* {
716 struct vnode *a_dvp;
717 struct componentname *a_cnp;
718 } */ *ap = v;
719 /* upcall decl */
720 /* locals */
721
722 if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF)
723 PNBUF_PUT(ap->a_cnp->cn_pnbuf);
724 return (0);
725 }
726
727 int
728 coda_readlink(void *v)
729 {
730 /* true args */
731 struct vop_readlink_args *ap = v;
732 struct vnode *vp = ap->a_vp;
733 struct cnode *cp = VTOC(vp);
734 struct uio *uiop = ap->a_uio;
735 kauth_cred_t cred = ap->a_cred;
736 /* locals */
737 struct lwp *l = curlwp;
738 int error;
739 char *str;
740 int len;
741
742 MARK_ENTRY(CODA_READLINK_STATS);
743
744 /* Check for readlink of control object. */
745 if (IS_CTL_VP(vp)) {
746 MARK_INT_FAIL(CODA_READLINK_STATS);
747 return(ENOENT);
748 }
749
750 if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) { /* symlink was cached */
751 uiop->uio_rw = UIO_READ;
752 error = uiomove(cp->c_symlink, (int)cp->c_symlen, uiop);
753 if (error)
754 MARK_INT_FAIL(CODA_READLINK_STATS);
755 else
756 MARK_INT_SAT(CODA_READLINK_STATS);
757 return(error);
758 }
759
760 error = venus_readlink(vtomi(vp), &cp->c_fid, cred, l, &str, &len);
761
762 if (!error) {
763 uiop->uio_rw = UIO_READ;
764 error = uiomove(str, len, uiop);
765
766 if (coda_symlink_cache) {
767 cp->c_symlink = str;
768 cp->c_symlen = len;
769 cp->c_flags |= C_SYMLINK;
770 } else
771 CODA_FREE(str, len);
772 }
773
774 CODADEBUG(CODA_READLINK, myprintf(("in readlink result %d\n",error));)
775 return(error);
776 }
777
778 int
779 coda_fsync(void *v)
780 {
781 /* true args */
782 struct vop_fsync_args *ap = v;
783 struct vnode *vp = ap->a_vp;
784 struct cnode *cp = VTOC(vp);
785 kauth_cred_t cred = ap->a_cred;
786 struct lwp *l = ap->a_l;
787 /* locals */
788 struct vnode *convp = cp->c_ovp;
789 int error;
790
791 MARK_ENTRY(CODA_FSYNC_STATS);
792
793 /* Check for fsync on an unmounting object */
794 /* The NetBSD kernel, in it's infinite wisdom, can try to fsync
795 * after an unmount has been initiated. This is a Bad Thing,
796 * which we have to avoid. Not a legitimate failure for stats.
797 */
798 if (IS_UNMOUNTING(cp)) {
799 return(ENODEV);
800 }
801
802 /* Check for fsync of control object. */
803 if (IS_CTL_VP(vp)) {
804 MARK_INT_SAT(CODA_FSYNC_STATS);
805 return(0);
806 }
807
808 if (convp)
809 VOP_FSYNC(convp, cred, MNT_WAIT, 0, 0, l);
810
811 /*
812 * We can expect fsync on any vnode at all if venus is pruging it.
813 * Venus can't very well answer the fsync request, now can it?
814 * Hopefully, it won't have to, because hopefully, venus preserves
815 * the (possibly untrue) invariant that it never purges an open
816 * vnode. Hopefully.
817 */
818 if (cp->c_flags & C_PURGING) {
819 return(0);
820 }
821
822 error = venus_fsync(vtomi(vp), &cp->c_fid, cred, l);
823
824 CODADEBUG(CODA_FSYNC, myprintf(("in fsync result %d\n",error)); );
825 return(error);
826 }
827
828 int
829 coda_inactive(void *v)
830 {
831 /* XXX - at the moment, inactive doesn't look at cred, and doesn't
832 have a proc pointer. Oops. */
833 /* true args */
834 struct vop_inactive_args *ap = v;
835 struct vnode *vp = ap->a_vp;
836 struct cnode *cp = VTOC(vp);
837 kauth_cred_t cred __attribute__((unused)) = NULL;
838 struct lwp *l __attribute__((unused)) = curlwp;
839 /* upcall decl */
840 /* locals */
841
842 /* We don't need to send inactive to venus - DCS */
843 MARK_ENTRY(CODA_INACTIVE_STATS);
844
845 if (IS_CTL_VP(vp)) {
846 MARK_INT_SAT(CODA_INACTIVE_STATS);
847 return 0;
848 }
849
850 CODADEBUG(CODA_INACTIVE, myprintf(("in inactive, %s, vfsp %p\n",
851 coda_f2s(&cp->c_fid), vp->v_mount));)
852
853 /* If an array has been allocated to hold the symlink, deallocate it */
854 if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) {
855 if (cp->c_symlink == NULL)
856 panic("coda_inactive: null symlink pointer in cnode");
857
858 CODA_FREE(cp->c_symlink, cp->c_symlen);
859 cp->c_flags &= ~C_SYMLINK;
860 cp->c_symlen = 0;
861 }
862
863 /* Remove it from the table so it can't be found. */
864 coda_unsave(cp);
865 if (vp->v_mount->mnt_data == NULL) {
866 myprintf(("Help! vfsp->vfs_data was NULL, but vnode %p wasn't dying\n", vp));
867 panic("badness in coda_inactive");
868 }
869
870 if (IS_UNMOUNTING(cp)) {
871 #ifdef DEBUG
872 printf("coda_inactive: IS_UNMOUNTING use %d: vp %p, cp %p\n", vp->v_usecount, vp, cp);
873 if (cp->c_ovp != NULL)
874 printf("coda_inactive: cp->ovp != NULL use %d: vp %p, cp %p\n",
875 vp->v_usecount, vp, cp);
876 #endif
877 lockmgr(&vp->v_lock, LK_RELEASE, &vp->v_interlock);
878 } else {
879 #ifdef OLD_DIAGNOSTIC
880 if (CTOV(cp)->v_usecount) {
881 panic("coda_inactive: nonzero reference count");
882 }
883 if (cp->c_ovp != NULL) {
884 panic("coda_inactive: cp->ovp != NULL");
885 }
886 #endif
887 VOP_UNLOCK(vp, 0);
888 vgone(vp);
889 }
890
891 MARK_INT_SAT(CODA_INACTIVE_STATS);
892 return(0);
893 }
894
895 /*
896 * Remote file system operations having to do with directory manipulation.
897 */
898
899 /*
900 * It appears that in NetBSD, lookup is supposed to return the vnode locked
901 */
902 int
903 coda_lookup(void *v)
904 {
905 /* true args */
906 struct vop_lookup_args *ap = v;
907 /* (locked) vnode of dir in which to do lookup */
908 struct vnode *dvp = ap->a_dvp;
909 struct cnode *dcp = VTOC(dvp);
910 /* output variable for result */
911 struct vnode **vpp = ap->a_vpp;
912 /* name to lookup */
913 struct componentname *cnp = ap->a_cnp;
914 kauth_cred_t cred = cnp->cn_cred;
915 struct lwp *l = cnp->cn_lwp;
916 /* locals */
917 struct cnode *cp;
918 const char *nm = cnp->cn_nameptr;
919 int len = cnp->cn_namelen;
920 int flags = cnp->cn_flags;
921 int isdot;
922 CodaFid VFid;
923 int vtype;
924 int error = 0;
925
926 MARK_ENTRY(CODA_LOOKUP_STATS);
927
928 CODADEBUG(CODA_LOOKUP, myprintf(("lookup: %s in %s\n",
929 nm, coda_f2s(&dcp->c_fid))););
930
931 /*
932 * XXX componentname flags in MODMASK are not handled at all
933 */
934
935 /*
936 * The overall strategy is to switch on the lookup type and get a
937 * result vnode that is vref'd but not locked. Then, the code at
938 * exit: switches on ., .., and regular lookups and does the right
939 * locking.
940 */
941
942 /* Check for lookup of control object. */
943 if (IS_CTL_NAME(dvp, nm, len)) {
944 *vpp = coda_ctlvp;
945 vref(*vpp);
946 MARK_INT_SAT(CODA_LOOKUP_STATS);
947 goto exit;
948 }
949
950 /* Avoid trying to hand venus an unreasonably long name. */
951 if (len+1 > CODA_MAXNAMLEN) {
952 MARK_INT_FAIL(CODA_LOOKUP_STATS);
953 CODADEBUG(CODA_LOOKUP, myprintf(("name too long: lookup, %s (%s)\n",
954 coda_f2s(&dcp->c_fid), nm)););
955 *vpp = (struct vnode *)0;
956 error = EINVAL;
957 goto exit;
958 }
959
960 /*
961 * XXX check for DOT lookups, and short circuit all the caches,
962 * just doing an extra vref. (venus guarantees that lookup of
963 * . returns self.)
964 */
965 isdot = (len == 1 && nm[0] == '.');
966
967 /*
968 * Try to resolve the lookup in the minicache. If that fails, ask
969 * venus to do the lookup. XXX The interaction between vnode
970 * locking and any locking that coda does is not clear.
971 */
972 cp = coda_nc_lookup(dcp, nm, len, cred);
973 if (cp) {
974 *vpp = CTOV(cp);
975 vref(*vpp);
976 CODADEBUG(CODA_LOOKUP,
977 myprintf(("lookup result %d vpp %p\n",error,*vpp));)
978 } else {
979 /* The name wasn't cached, so ask Venus. */
980 error = venus_lookup(vtomi(dvp), &dcp->c_fid, nm, len, cred, l, &VFid, &vtype);
981
982 if (error) {
983 MARK_INT_FAIL(CODA_LOOKUP_STATS);
984 CODADEBUG(CODA_LOOKUP, myprintf(("lookup error on %s (%s)%d\n",
985 coda_f2s(&dcp->c_fid), nm, error));)
986 *vpp = (struct vnode *)0;
987 } else {
988 MARK_INT_SAT(CODA_LOOKUP_STATS);
989 CODADEBUG(CODA_LOOKUP,
990 myprintf(("lookup: %s type %o result %d\n",
991 coda_f2s(&VFid), vtype, error)); )
992
993 cp = make_coda_node(&VFid, dvp->v_mount, vtype);
994 *vpp = CTOV(cp);
995 /* vpp is now vrefed. */
996
997 /*
998 * Unless this vnode is marked CODA_NOCACHE, enter it into
999 * the coda name cache to avoid a future venus round-trip.
1000 * XXX Interaction with componentname NOCACHE is unclear.
1001 */
1002 if (!(vtype & CODA_NOCACHE))
1003 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
1004 }
1005 }
1006
1007 exit:
1008 /*
1009 * If we are creating, and this was the last name to be looked up,
1010 * and the error was ENOENT, then make the leaf NULL and return
1011 * success.
1012 * XXX Check against new lookup rules.
1013 */
1014 if (((cnp->cn_nameiop == CREATE) || (cnp->cn_nameiop == RENAME))
1015 && (cnp->cn_flags & ISLASTCN)
1016 && (error == ENOENT))
1017 {
1018 error = EJUSTRETURN;
1019 cnp->cn_flags |= SAVENAME;
1020 *ap->a_vpp = NULL;
1021 }
1022
1023 /*
1024 * If we are removing, and we are at the last element, and we
1025 * found it, then we need to keep the name around so that the
1026 * removal will go ahead as planned.
1027 * XXX Check against new lookup rules.
1028 */
1029 if ((cnp->cn_nameiop == DELETE)
1030 && (cnp->cn_flags & ISLASTCN)
1031 && !error)
1032 {
1033 cnp->cn_flags |= SAVENAME;
1034 }
1035
1036 /*
1037 * If the lookup succeeded, we must generally lock the returned
1038 * vnode. This could be a ., .., or normal lookup. See
1039 * vnodeops(9) for the details.
1040 */
1041 /*
1042 * XXX LK_RETRY is likely incorrect. Handle vn_lock failure
1043 * somehow, and remove LK_RETRY.
1044 */
1045 if (!error || (error == EJUSTRETURN)) {
1046 /* Lookup has a value and it isn't "."? */
1047 if (*ap->a_vpp && (*ap->a_vpp != dvp)) {
1048 if (flags & ISDOTDOT)
1049 /* ..: unlock parent */
1050 VOP_UNLOCK(dvp, 0);
1051 /* all but .: lock child */
1052 vn_lock(*ap->a_vpp, LK_EXCLUSIVE | LK_RETRY);
1053 if (flags & ISDOTDOT)
1054 /* ..: relock parent */
1055 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
1056 }
1057 /* else .: leave dvp locked */
1058 } else {
1059 /* The lookup failed, so return NULL. Leave dvp locked. */
1060 *ap->a_vpp = NULL;
1061 }
1062 return(error);
1063 }
1064
1065 /*ARGSUSED*/
1066 int
1067 coda_create(void *v)
1068 {
1069 /* true args */
1070 struct vop_create_args *ap = v;
1071 struct vnode *dvp = ap->a_dvp;
1072 struct cnode *dcp = VTOC(dvp);
1073 struct vattr *va = ap->a_vap;
1074 int exclusive = 1;
1075 int mode = ap->a_vap->va_mode;
1076 struct vnode **vpp = ap->a_vpp;
1077 struct componentname *cnp = ap->a_cnp;
1078 kauth_cred_t cred = cnp->cn_cred;
1079 struct lwp *l = cnp->cn_lwp;
1080 /* locals */
1081 int error;
1082 struct cnode *cp;
1083 const char *nm = cnp->cn_nameptr;
1084 int len = cnp->cn_namelen;
1085 CodaFid VFid;
1086 struct vattr attr;
1087
1088 MARK_ENTRY(CODA_CREATE_STATS);
1089
1090 /* All creates are exclusive XXX */
1091 /* I'm assuming the 'mode' argument is the file mode bits XXX */
1092
1093 /* Check for create of control object. */
1094 if (IS_CTL_NAME(dvp, nm, len)) {
1095 *vpp = (struct vnode *)0;
1096 MARK_INT_FAIL(CODA_CREATE_STATS);
1097 return(EACCES);
1098 }
1099
1100 error = venus_create(vtomi(dvp), &dcp->c_fid, nm, len, exclusive, mode, va, cred, l, &VFid, &attr);
1101
1102 if (!error) {
1103
1104 /*
1105 * XXX Violation of venus/kernel invariants is a difficult case,
1106 * but venus should not be able to cause a panic.
1107 */
1108 /* If this is an exclusive create, panic if the file already exists. */
1109 /* Venus should have detected the file and reported EEXIST. */
1110
1111 if ((exclusive == 1) &&
1112 (coda_find(&VFid) != NULL))
1113 panic("cnode existed for newly created file!");
1114
1115 cp = make_coda_node(&VFid, dvp->v_mount, attr.va_type);
1116 *vpp = CTOV(cp);
1117
1118 /* XXX vnodeops doesn't say this argument can be changed. */
1119 /* Update va to reflect the new attributes. */
1120 (*va) = attr;
1121
1122 /* Update the attribute cache and mark it as valid */
1123 if (coda_attr_cache) {
1124 VTOC(*vpp)->c_vattr = attr;
1125 VTOC(*vpp)->c_flags |= C_VATTR;
1126 }
1127
1128 /* Invalidate parent's attr cache (modification time has changed). */
1129 VTOC(dvp)->c_flags &= ~C_VATTR;
1130
1131 /* enter the new vnode in the Name Cache */
1132 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
1133
1134 CODADEBUG(CODA_CREATE,
1135 myprintf(("create: %s, result %d\n",
1136 coda_f2s(&VFid), error)); )
1137 } else {
1138 *vpp = (struct vnode *)0;
1139 CODADEBUG(CODA_CREATE, myprintf(("create error %d\n", error));)
1140 }
1141
1142 /*
1143 * vnodeops(9) says that we must unlock the parent and lock the child.
1144 * XXX Should we lock the child first?
1145 */
1146 vput(dvp);
1147 if (!error) {
1148 if ((cnp->cn_flags & LOCKLEAF) == 0) {
1149 /* This should not happen; flags are for lookup only. */
1150 printf("coda_create: LOCKLEAF not set!\n");
1151 }
1152
1153 if ((error = vn_lock(*ap->a_vpp, LK_EXCLUSIVE))) {
1154 /* XXX Perhaps avoid this panic. */
1155 panic("coda_create: couldn't lock child");
1156 }
1157 }
1158
1159 /* Per vnodeops(9), free name except on success and SAVESTART. */
1160 if (error || (cnp->cn_flags & SAVESTART) == 0) {
1161 PNBUF_PUT(cnp->cn_pnbuf);
1162 }
1163 return(error);
1164 }
1165
1166 int
1167 coda_remove(void *v)
1168 {
1169 /* true args */
1170 struct vop_remove_args *ap = v;
1171 struct vnode *dvp = ap->a_dvp;
1172 struct cnode *cp = VTOC(dvp);
1173 struct vnode *vp = ap->a_vp;
1174 struct componentname *cnp = ap->a_cnp;
1175 kauth_cred_t cred = cnp->cn_cred;
1176 struct lwp *l = cnp->cn_lwp;
1177 /* locals */
1178 int error;
1179 const char *nm = cnp->cn_nameptr;
1180 int len = cnp->cn_namelen;
1181 struct cnode *tp;
1182
1183 MARK_ENTRY(CODA_REMOVE_STATS);
1184
1185 CODADEBUG(CODA_REMOVE, myprintf(("remove: %s in %s\n",
1186 nm, coda_f2s(&cp->c_fid))););
1187
1188 /* Remove the file's entry from the CODA Name Cache */
1189 /* We're being conservative here, it might be that this person
1190 * doesn't really have sufficient access to delete the file
1191 * but we feel zapping the entry won't really hurt anyone -- dcs
1192 */
1193 /* I'm gonna go out on a limb here. If a file and a hardlink to it
1194 * exist, and one is removed, the link count on the other will be
1195 * off by 1. We could either invalidate the attrs if cached, or
1196 * fix them. I'll try to fix them. DCS 11/8/94
1197 */
1198 tp = coda_nc_lookup(VTOC(dvp), nm, len, cred);
1199 if (tp) {
1200 if (VALID_VATTR(tp)) { /* If attrs are cached */
1201 if (tp->c_vattr.va_nlink > 1) { /* If it's a hard link */
1202 tp->c_vattr.va_nlink--;
1203 }
1204 }
1205
1206 coda_nc_zapfile(VTOC(dvp), nm, len);
1207 /* No need to flush it if it doesn't exist! */
1208 }
1209 /* Invalidate the parent's attr cache, the modification time has changed */
1210 VTOC(dvp)->c_flags &= ~C_VATTR;
1211
1212 /* Check for remove of control object. */
1213 if (IS_CTL_NAME(dvp, nm, len)) {
1214 MARK_INT_FAIL(CODA_REMOVE_STATS);
1215 return(ENOENT);
1216 }
1217
1218 error = venus_remove(vtomi(dvp), &cp->c_fid, nm, len, cred, l);
1219
1220 CODADEBUG(CODA_REMOVE, myprintf(("in remove result %d\n",error)); )
1221
1222 /*
1223 * Unlock parent and child (avoiding double if ".").
1224 */
1225 if (dvp == vp) {
1226 vrele(vp);
1227 } else {
1228 vput(vp);
1229 }
1230 vput(dvp);
1231
1232 return(error);
1233 }
1234
1235 /*
1236 * dvp is the directory where the link is to go, and is locked.
1237 * vp is the object to be linked to, and is unlocked.
1238 * At exit, we must unlock dvp, and vput dvp.
1239 */
1240 int
1241 coda_link(void *v)
1242 {
1243 /* true args */
1244 struct vop_link_args *ap = v;
1245 struct vnode *vp = ap->a_vp;
1246 struct cnode *cp = VTOC(vp);
1247 struct vnode *dvp = ap->a_dvp;
1248 struct cnode *dcp = VTOC(dvp);
1249 struct componentname *cnp = ap->a_cnp;
1250 kauth_cred_t cred = cnp->cn_cred;
1251 struct lwp *l = cnp->cn_lwp;
1252 /* locals */
1253 int error;
1254 const char *nm = cnp->cn_nameptr;
1255 int len = cnp->cn_namelen;
1256
1257 MARK_ENTRY(CODA_LINK_STATS);
1258
1259 if (codadebug & CODADBGMSK(CODA_LINK)) {
1260
1261 myprintf(("nb_link: vp fid: %s\n",
1262 coda_f2s(&cp->c_fid)));
1263 myprintf(("nb_link: dvp fid: %s)\n",
1264 coda_f2s(&dcp->c_fid)));
1265
1266 }
1267 if (codadebug & CODADBGMSK(CODA_LINK)) {
1268 myprintf(("link: vp fid: %s\n",
1269 coda_f2s(&cp->c_fid)));
1270 myprintf(("link: dvp fid: %s\n",
1271 coda_f2s(&dcp->c_fid)));
1272
1273 }
1274
1275 /* Check for link to/from control object. */
1276 if (IS_CTL_NAME(dvp, nm, len) || IS_CTL_VP(vp)) {
1277 MARK_INT_FAIL(CODA_LINK_STATS);
1278 return(EACCES);
1279 }
1280
1281 /* If linking . to a name, error out earlier. */
1282 if (vp == dvp) {
1283 printf("coda_link vp==dvp\n");
1284 error = EISDIR;
1285 goto exit;
1286 }
1287
1288 /* XXX Why does venus_link need the vnode to be locked?*/
1289 if ((error = vn_lock(vp, LK_EXCLUSIVE)) != 0) {
1290 printf("coda_link: couldn't lock vnode %p\n", vp);
1291 error = EFAULT; /* XXX better value */
1292 goto exit;
1293 }
1294 error = venus_link(vtomi(vp), &cp->c_fid, &dcp->c_fid, nm, len, cred, l);
1295 VOP_UNLOCK(vp, 0);
1296
1297 /* Invalidate parent's attr cache (the modification time has changed). */
1298 VTOC(dvp)->c_flags &= ~C_VATTR;
1299 /* Invalidate child's attr cache (XXX why). */
1300 VTOC(vp)->c_flags &= ~C_VATTR;
1301
1302 CODADEBUG(CODA_LINK, myprintf(("in link result %d\n",error)); )
1303
1304 exit:
1305 vput(dvp);
1306 return(error);
1307 }
1308
1309 int
1310 coda_rename(void *v)
1311 {
1312 /* true args */
1313 struct vop_rename_args *ap = v;
1314 struct vnode *odvp = ap->a_fdvp;
1315 struct cnode *odcp = VTOC(odvp);
1316 struct componentname *fcnp = ap->a_fcnp;
1317 struct vnode *ndvp = ap->a_tdvp;
1318 struct cnode *ndcp = VTOC(ndvp);
1319 struct componentname *tcnp = ap->a_tcnp;
1320 kauth_cred_t cred = fcnp->cn_cred;
1321 struct lwp *l = fcnp->cn_lwp;
1322 /* true args */
1323 int error;
1324 const char *fnm = fcnp->cn_nameptr;
1325 int flen = fcnp->cn_namelen;
1326 const char *tnm = tcnp->cn_nameptr;
1327 int tlen = tcnp->cn_namelen;
1328
1329 MARK_ENTRY(CODA_RENAME_STATS);
1330
1331 /* Hmmm. The vnodes are already looked up. Perhaps they are locked?
1332 This could be Bad. XXX */
1333 #ifdef OLD_DIAGNOSTIC
1334 if ((fcnp->cn_cred != tcnp->cn_cred)
1335 || (fcnp->cn_lwp != tcnp->cn_lwp))
1336 {
1337 panic("coda_rename: component names don't agree");
1338 }
1339 #endif
1340
1341 /* Check for rename involving control object. */
1342 if (IS_CTL_NAME(odvp, fnm, flen) || IS_CTL_NAME(ndvp, tnm, tlen)) {
1343 MARK_INT_FAIL(CODA_RENAME_STATS);
1344 return(EACCES);
1345 }
1346
1347 /* Problem with moving directories -- need to flush entry for .. */
1348 if (odvp != ndvp) {
1349 struct cnode *ovcp = coda_nc_lookup(VTOC(odvp), fnm, flen, cred);
1350 if (ovcp) {
1351 struct vnode *ovp = CTOV(ovcp);
1352 if ((ovp) &&
1353 (ovp->v_type == VDIR)) /* If it's a directory */
1354 coda_nc_zapfile(VTOC(ovp),"..", 2);
1355 }
1356 }
1357
1358 /* Remove the entries for both source and target files */
1359 coda_nc_zapfile(VTOC(odvp), fnm, flen);
1360 coda_nc_zapfile(VTOC(ndvp), tnm, tlen);
1361
1362 /* Invalidate the parent's attr cache, the modification time has changed */
1363 VTOC(odvp)->c_flags &= ~C_VATTR;
1364 VTOC(ndvp)->c_flags &= ~C_VATTR;
1365
1366 if (flen+1 > CODA_MAXNAMLEN) {
1367 MARK_INT_FAIL(CODA_RENAME_STATS);
1368 error = EINVAL;
1369 goto exit;
1370 }
1371
1372 if (tlen+1 > CODA_MAXNAMLEN) {
1373 MARK_INT_FAIL(CODA_RENAME_STATS);
1374 error = EINVAL;
1375 goto exit;
1376 }
1377
1378 error = venus_rename(vtomi(odvp), &odcp->c_fid, &ndcp->c_fid, fnm, flen, tnm, tlen, cred, l);
1379
1380 exit:
1381 CODADEBUG(CODA_RENAME, myprintf(("in rename result %d\n",error));)
1382 /* XXX - do we need to call cache pureg on the moved vnode? */
1383 cache_purge(ap->a_fvp);
1384
1385 /* It seems to be incumbent on us to drop locks on all four vnodes */
1386 /* From-vnodes are not locked, only ref'd. To-vnodes are locked. */
1387
1388 vrele(ap->a_fvp);
1389 vrele(odvp);
1390
1391 if (ap->a_tvp) {
1392 if (ap->a_tvp == ndvp) {
1393 vrele(ap->a_tvp);
1394 } else {
1395 vput(ap->a_tvp);
1396 }
1397 }
1398
1399 vput(ndvp);
1400 return(error);
1401 }
1402
1403 int
1404 coda_mkdir(void *v)
1405 {
1406 /* true args */
1407 struct vop_mkdir_args *ap = v;
1408 struct vnode *dvp = ap->a_dvp;
1409 struct cnode *dcp = VTOC(dvp);
1410 struct componentname *cnp = ap->a_cnp;
1411 struct vattr *va = ap->a_vap;
1412 struct vnode **vpp = ap->a_vpp;
1413 kauth_cred_t cred = cnp->cn_cred;
1414 struct lwp *l = cnp->cn_lwp;
1415 /* locals */
1416 int error;
1417 const char *nm = cnp->cn_nameptr;
1418 int len = cnp->cn_namelen;
1419 struct cnode *cp;
1420 CodaFid VFid;
1421 struct vattr ova;
1422
1423 MARK_ENTRY(CODA_MKDIR_STATS);
1424
1425 /* Check for mkdir of target object. */
1426 if (IS_CTL_NAME(dvp, nm, len)) {
1427 *vpp = (struct vnode *)0;
1428 MARK_INT_FAIL(CODA_MKDIR_STATS);
1429 return(EACCES);
1430 }
1431
1432 if (len+1 > CODA_MAXNAMLEN) {
1433 *vpp = (struct vnode *)0;
1434 MARK_INT_FAIL(CODA_MKDIR_STATS);
1435 return(EACCES);
1436 }
1437
1438 error = venus_mkdir(vtomi(dvp), &dcp->c_fid, nm, len, va, cred, l, &VFid, &ova);
1439
1440 if (!error) {
1441 if (coda_find(&VFid) != NULL)
1442 panic("cnode existed for newly created directory!");
1443
1444
1445 cp = make_coda_node(&VFid, dvp->v_mount, va->va_type);
1446 *vpp = CTOV(cp);
1447
1448 /* enter the new vnode in the Name Cache */
1449 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
1450
1451 /* as a side effect, enter "." and ".." for the directory */
1452 coda_nc_enter(VTOC(*vpp), ".", 1, cred, VTOC(*vpp));
1453 coda_nc_enter(VTOC(*vpp), "..", 2, cred, VTOC(dvp));
1454
1455 if (coda_attr_cache) {
1456 VTOC(*vpp)->c_vattr = ova; /* update the attr cache */
1457 VTOC(*vpp)->c_flags |= C_VATTR; /* Valid attributes in cnode */
1458 }
1459
1460 /* Invalidate the parent's attr cache, the modification time has changed */
1461 VTOC(dvp)->c_flags &= ~C_VATTR;
1462
1463 CODADEBUG( CODA_MKDIR, myprintf(("mkdir: %s result %d\n",
1464 coda_f2s(&VFid), error)); )
1465 } else {
1466 *vpp = (struct vnode *)0;
1467 CODADEBUG(CODA_MKDIR, myprintf(("mkdir error %d\n",error));)
1468 }
1469
1470 /*
1471 * Currently, all mkdirs explicitly vput their dvp's.
1472 * It also appears that we *must* lock the vpp, since
1473 * lockleaf isn't set, but someone down the road is going
1474 * to try to unlock the new directory.
1475 */
1476 vput(dvp);
1477 if (!error) {
1478 if ((error = vn_lock(*ap->a_vpp, LK_EXCLUSIVE))) {
1479 panic("coda_mkdir: couldn't lock child");
1480 }
1481 }
1482
1483 /* Per vnodeops(9), free name except on success and SAVESTART. */
1484 if (error || (cnp->cn_flags & SAVESTART) == 0) {
1485 PNBUF_PUT(cnp->cn_pnbuf);
1486 }
1487 return(error);
1488 }
1489
1490 int
1491 coda_rmdir(void *v)
1492 {
1493 /* true args */
1494 struct vop_rmdir_args *ap = v;
1495 struct vnode *dvp = ap->a_dvp;
1496 struct cnode *dcp = VTOC(dvp);
1497 struct vnode *vp = ap->a_vp;
1498 struct componentname *cnp = ap->a_cnp;
1499 kauth_cred_t cred = cnp->cn_cred;
1500 struct lwp *l = cnp->cn_lwp;
1501 /* true args */
1502 int error;
1503 const char *nm = cnp->cn_nameptr;
1504 int len = cnp->cn_namelen;
1505 struct cnode *cp;
1506
1507 MARK_ENTRY(CODA_RMDIR_STATS);
1508
1509 /* Check for rmdir of control object. */
1510 if (IS_CTL_NAME(dvp, nm, len)) {
1511 MARK_INT_FAIL(CODA_RMDIR_STATS);
1512 return(ENOENT);
1513 }
1514
1515 /* Can't remove . in self. */
1516 if (dvp == vp) {
1517 printf("coda_rmdir: dvp == vp\n");
1518 error = EINVAL;
1519 goto exit;
1520 }
1521
1522 /*
1523 * The caller may not have adequate permissions, and the venus
1524 * operation may fail, but it doesn't hurt from a correctness
1525 * viewpoint to invalidate cache entries.
1526 * XXX Why isn't this done after the venus_rmdir call?
1527 */
1528 /* Look up child in name cache (by name, from parent). */
1529 cp = coda_nc_lookup(dcp, nm, len, cred);
1530 /* If found, remove all children of the child (., ..). */
1531 if (cp) coda_nc_zapParentfid(&(cp->c_fid), NOT_DOWNCALL);
1532
1533 /* Remove child's own entry. */
1534 coda_nc_zapfile(dcp, nm, len);
1535
1536 /* Invalidate parent's attr cache (the modification time has changed). */
1537 dcp->c_flags &= ~C_VATTR;
1538
1539 error = venus_rmdir(vtomi(dvp), &dcp->c_fid, nm, len, cred, l);
1540
1541 CODADEBUG(CODA_RMDIR, myprintf(("in rmdir result %d\n", error)); )
1542
1543 exit:
1544 /* vput both vnodes */
1545 vput(dvp);
1546 if (dvp == vp) {
1547 vrele(vp);
1548 } else {
1549 vput(vp);
1550 }
1551
1552 return(error);
1553 }
1554
1555 int
1556 coda_symlink(void *v)
1557 {
1558 /* true args */
1559 struct vop_symlink_args *ap = v;
1560 struct vnode *dvp = ap->a_dvp;
1561 struct cnode *dcp = VTOC(dvp);
1562 /* a_vpp is used in place below */
1563 struct componentname *cnp = ap->a_cnp;
1564 struct vattr *tva = ap->a_vap;
1565 char *path = ap->a_target;
1566 kauth_cred_t cred = cnp->cn_cred;
1567 struct lwp *l = cnp->cn_lwp;
1568 /* locals */
1569 int error;
1570 u_long saved_cn_flags;
1571 const char *nm = cnp->cn_nameptr;
1572 int len = cnp->cn_namelen;
1573 int plen = strlen(path);
1574
1575 /*
1576 * Here's the strategy for the moment: perform the symlink, then
1577 * do a lookup to grab the resulting vnode. I know this requires
1578 * two communications with Venus for a new sybolic link, but
1579 * that's the way the ball bounces. I don't yet want to change
1580 * the way the Mach symlink works. When Mach support is
1581 * deprecated, we should change symlink so that the common case
1582 * returns the resultant vnode in a vpp argument.
1583 */
1584
1585 MARK_ENTRY(CODA_SYMLINK_STATS);
1586
1587 /* Check for symlink of control object. */
1588 if (IS_CTL_NAME(dvp, nm, len)) {
1589 MARK_INT_FAIL(CODA_SYMLINK_STATS);
1590 error = EACCES;
1591 goto exit;
1592 }
1593
1594 if (plen+1 > CODA_MAXPATHLEN) {
1595 MARK_INT_FAIL(CODA_SYMLINK_STATS);
1596 error = EINVAL;
1597 goto exit;
1598 }
1599
1600 if (len+1 > CODA_MAXNAMLEN) {
1601 MARK_INT_FAIL(CODA_SYMLINK_STATS);
1602 error = EINVAL;
1603 goto exit;
1604 }
1605
1606 error = venus_symlink(vtomi(dvp), &dcp->c_fid, path, plen, nm, len, tva, cred, l);
1607
1608 /* Invalidate the parent's attr cache (modification time has changed). */
1609 dcp->c_flags &= ~C_VATTR;
1610
1611 if (!error) {
1612 /*
1613 * VOP_SYMLINK is not defined to pay attention to cnp->cn_flags;
1614 * these are defined only for VOP_LOOKUP. We desire to reuse
1615 * cnp for a VOP_LOOKUP operation, and must be sure to not pass
1616 * stray flags passed to us. Such stray flags can occur because
1617 * sys_symlink makes a namei call and then reuses the
1618 * componentname structure.
1619 */
1620 /*
1621 * XXX Arguably we should create our own componentname structure
1622 * and not reuse the one that was passed in.
1623 */
1624 saved_cn_flags = cnp->cn_flags;
1625 cnp->cn_flags &= ~(MODMASK | OPMASK);
1626 cnp->cn_flags |= LOOKUP;
1627 error = VOP_LOOKUP(dvp, ap->a_vpp, cnp);
1628 cnp->cn_flags = saved_cn_flags;
1629 /* Either an error occurs, or ap->a_vpp is locked. */
1630 }
1631
1632 exit:
1633 /* unlock and deference parent */
1634 vput(dvp);
1635
1636 /* Per vnodeops(9), free name except on success and SAVESTART. */
1637 if (error || (cnp->cn_flags & SAVESTART) == 0) {
1638 PNBUF_PUT(cnp->cn_pnbuf);
1639 }
1640
1641 CODADEBUG(CODA_SYMLINK, myprintf(("in symlink result %d\n",error)); )
1642 return(error);
1643 }
1644
1645 /*
1646 * Read directory entries.
1647 */
1648 int
1649 coda_readdir(void *v)
1650 {
1651 /* true args */
1652 struct vop_readdir_args *ap = v;
1653 struct vnode *vp = ap->a_vp;
1654 struct cnode *cp = VTOC(vp);
1655 struct uio *uiop = ap->a_uio;
1656 kauth_cred_t cred = ap->a_cred;
1657 int *eofflag = ap->a_eofflag;
1658 off_t **cookies = ap->a_cookies;
1659 int *ncookies = ap->a_ncookies;
1660 /* upcall decl */
1661 /* locals */
1662 struct lwp *l = curlwp;
1663 int error = 0;
1664
1665 MARK_ENTRY(CODA_READDIR_STATS);
1666
1667 CODADEBUG(CODA_READDIR, myprintf(("coda_readdir(%p, %lu, %lld)\n", uiop->uio_iov->iov_base, (unsigned long) uiop->uio_resid, (long long) uiop->uio_offset)); )
1668
1669 /* Check for readdir of control object. */
1670 if (IS_CTL_VP(vp)) {
1671 MARK_INT_FAIL(CODA_READDIR_STATS);
1672 return(ENOENT);
1673 }
1674
1675 {
1676 /* Redirect the request to UFS. */
1677
1678 /* If directory is not already open do an "internal open" on it. */
1679 int opened_internally = 0;
1680 if (cp->c_ovp == NULL) {
1681 opened_internally = 1;
1682 MARK_INT_GEN(CODA_OPEN_STATS);
1683 error = VOP_OPEN(vp, FREAD, cred, l);
1684 #ifdef CODA_VERBOSE
1685 printf("coda_readdir: Internally Opening %p\n", vp);
1686 #endif
1687 if (error) return(error);
1688 } else
1689 vp = cp->c_ovp;
1690
1691 /* Have UFS handle the call. */
1692 CODADEBUG(CODA_READDIR, myprintf((
1693 "indirect readdir: fid = %s, refcnt = %d\n",
1694 coda_f2s(&cp->c_fid), vp->v_usecount)); )
1695 error = VOP_READDIR(vp, uiop, cred, eofflag, cookies, ncookies);
1696 if (error)
1697 MARK_INT_FAIL(CODA_READDIR_STATS);
1698 else
1699 MARK_INT_SAT(CODA_READDIR_STATS);
1700
1701 /* Do an "internal close" if necessary. */
1702 if (opened_internally) {
1703 MARK_INT_GEN(CODA_CLOSE_STATS);
1704 (void)VOP_CLOSE(vp, FREAD, cred, l);
1705 }
1706 }
1707
1708 return(error);
1709 }
1710
1711 /*
1712 * Convert from file system blocks to device blocks
1713 */
1714 int
1715 coda_bmap(void *v)
1716 {
1717 /* XXX on the global proc */
1718 /* true args */
1719 struct vop_bmap_args *ap = v;
1720 struct vnode *vp __attribute__((unused)) = ap->a_vp; /* file's vnode */
1721 daddr_t bn __attribute__((unused)) = ap->a_bn; /* fs block number */
1722 struct vnode **vpp = ap->a_vpp; /* RETURN vp of device */
1723 daddr_t *bnp __attribute__((unused)) = ap->a_bnp; /* RETURN device block number */
1724 struct lwp *l __attribute__((unused)) = curlwp;
1725 /* upcall decl */
1726 /* locals */
1727
1728 *vpp = (struct vnode *)0;
1729 myprintf(("coda_bmap called!\n"));
1730 return(EINVAL);
1731 }
1732
1733 /*
1734 * I don't think the following two things are used anywhere, so I've
1735 * commented them out
1736 *
1737 * struct buf *async_bufhead;
1738 * int async_daemon_count;
1739 */
1740 int
1741 coda_strategy(void *v)
1742 {
1743 /* true args */
1744 struct vop_strategy_args *ap = v;
1745 struct buf *bp __attribute__((unused)) = ap->a_bp;
1746 struct lwp *l __attribute__((unused)) = curlwp;
1747 /* upcall decl */
1748 /* locals */
1749
1750 myprintf(("coda_strategy called! "));
1751 return(EINVAL);
1752 }
1753
1754 int
1755 coda_reclaim(void *v)
1756 {
1757 /* true args */
1758 struct vop_reclaim_args *ap = v;
1759 struct vnode *vp = ap->a_vp;
1760 struct cnode *cp = VTOC(vp);
1761 /* upcall decl */
1762 /* locals */
1763
1764 /*
1765 * Forced unmount/flush will let vnodes with non zero use be destroyed!
1766 */
1767 ENTRY;
1768
1769 if (IS_UNMOUNTING(cp)) {
1770 #ifdef DEBUG
1771 if (VTOC(vp)->c_ovp) {
1772 if (IS_UNMOUNTING(cp))
1773 printf("coda_reclaim: c_ovp not void: vp %p, cp %p\n", vp, cp);
1774 }
1775 #endif
1776 } else {
1777 #ifdef OLD_DIAGNOSTIC
1778 if (vp->v_usecount != 0)
1779 print("coda_reclaim: pushing active %p\n", vp);
1780 if (VTOC(vp)->c_ovp) {
1781 panic("coda_reclaim: c_ovp not void");
1782 }
1783 #endif
1784 }
1785 cache_purge(vp);
1786 coda_free(VTOC(vp));
1787 SET_VTOC(vp) = NULL;
1788 return (0);
1789 }
1790
1791 int
1792 coda_lock(void *v)
1793 {
1794 /* true args */
1795 struct vop_lock_args *ap = v;
1796 struct vnode *vp = ap->a_vp;
1797 struct cnode *cp = VTOC(vp);
1798 /* upcall decl */
1799 /* locals */
1800
1801 ENTRY;
1802
1803 if (coda_lockdebug) {
1804 myprintf(("Attempting lock on %s\n",
1805 coda_f2s(&cp->c_fid)));
1806 }
1807
1808 return (lockmgr(&vp->v_lock, ap->a_flags, &vp->v_interlock));
1809 }
1810
1811 int
1812 coda_unlock(void *v)
1813 {
1814 /* true args */
1815 struct vop_unlock_args *ap = v;
1816 struct vnode *vp = ap->a_vp;
1817 struct cnode *cp = VTOC(vp);
1818 /* upcall decl */
1819 /* locals */
1820
1821 ENTRY;
1822 if (coda_lockdebug) {
1823 myprintf(("Attempting unlock on %s\n",
1824 coda_f2s(&cp->c_fid)));
1825 }
1826
1827 return (lockmgr(&vp->v_lock, ap->a_flags | LK_RELEASE, &vp->v_interlock));
1828 }
1829
1830 int
1831 coda_islocked(void *v)
1832 {
1833 /* true args */
1834 struct vop_islocked_args *ap = v;
1835 ENTRY;
1836
1837 return (lockstatus(&ap->a_vp->v_lock));
1838 }
1839
1840 /*
1841 * Given a device and inode, obtain a locked vnode. One reference is
1842 * obtained and passed back to the caller.
1843 */
1844 int
1845 coda_grab_vnode(dev_t dev, ino_t ino, struct vnode **vpp)
1846 {
1847 int error;
1848 struct mount *mp;
1849
1850 /* Obtain mount point structure from device. */
1851 if (!(mp = devtomp(dev))) {
1852 myprintf(("coda_grab_vnode: devtomp(%d) returns NULL\n", dev));
1853 return(ENXIO);
1854 }
1855
1856 /*
1857 * Obtain vnode from mount point and inode.
1858 * XXX VFS_VGET does not clearly define locked/referenced state of
1859 * returned vnode.
1860 */
1861 error = VFS_VGET(mp, ino, vpp);
1862 if (error) {
1863 myprintf(("coda_grab_vnode: iget/vget(%d, %llu) returns %p, err %d\n",
1864 dev, (unsigned long long)ino, *vpp, error));
1865 return(ENOENT);
1866 }
1867 return(0);
1868 }
1869
1870 void
1871 print_vattr(struct vattr *attr)
1872 {
1873 const char *typestr;
1874
1875 switch (attr->va_type) {
1876 case VNON:
1877 typestr = "VNON";
1878 break;
1879 case VREG:
1880 typestr = "VREG";
1881 break;
1882 case VDIR:
1883 typestr = "VDIR";
1884 break;
1885 case VBLK:
1886 typestr = "VBLK";
1887 break;
1888 case VCHR:
1889 typestr = "VCHR";
1890 break;
1891 case VLNK:
1892 typestr = "VLNK";
1893 break;
1894 case VSOCK:
1895 typestr = "VSCK";
1896 break;
1897 case VFIFO:
1898 typestr = "VFFO";
1899 break;
1900 case VBAD:
1901 typestr = "VBAD";
1902 break;
1903 default:
1904 typestr = "????";
1905 break;
1906 }
1907
1908
1909 myprintf(("attr: type %s mode %d uid %d gid %d fsid %d rdev %d\n",
1910 typestr, (int)attr->va_mode, (int)attr->va_uid,
1911 (int)attr->va_gid, (int)attr->va_fsid, (int)attr->va_rdev));
1912
1913 myprintf((" fileid %d nlink %d size %d blocksize %d bytes %d\n",
1914 (int)attr->va_fileid, (int)attr->va_nlink,
1915 (int)attr->va_size,
1916 (int)attr->va_blocksize,(int)attr->va_bytes));
1917 myprintf((" gen %ld flags %ld vaflags %d\n",
1918 attr->va_gen, attr->va_flags, attr->va_vaflags));
1919 myprintf((" atime sec %d nsec %d\n",
1920 (int)attr->va_atime.tv_sec, (int)attr->va_atime.tv_nsec));
1921 myprintf((" mtime sec %d nsec %d\n",
1922 (int)attr->va_mtime.tv_sec, (int)attr->va_mtime.tv_nsec));
1923 myprintf((" ctime sec %d nsec %d\n",
1924 (int)attr->va_ctime.tv_sec, (int)attr->va_ctime.tv_nsec));
1925 }
1926
1927 /* How to print a ucred */
1928 void
1929 print_cred(kauth_cred_t cred)
1930 {
1931
1932 uint16_t ngroups;
1933 int i;
1934
1935 myprintf(("ref %d\tuid %d\n", kauth_cred_getrefcnt(cred),
1936 kauth_cred_geteuid(cred)));
1937
1938 ngroups = kauth_cred_ngroups(cred);
1939 for (i=0; i < ngroups; i++)
1940 myprintf(("\tgroup %d: (%d)\n", i, kauth_cred_group(cred, i)));
1941 myprintf(("\n"));
1942
1943 }
1944
1945 /*
1946 * Return a vnode for the given fid.
1947 * If no cnode exists for this fid create one and put it
1948 * in a table hashed by coda_f2i(). If the cnode for
1949 * this fid is already in the table return it (ref count is
1950 * incremented by coda_find. The cnode will be flushed from the
1951 * table when coda_inactive calls coda_unsave.
1952 */
1953 struct cnode *
1954 make_coda_node(CodaFid *fid, struct mount *vfsp, short type)
1955 {
1956 struct cnode *cp;
1957 int err;
1958
1959 if ((cp = coda_find(fid)) == NULL) {
1960 struct vnode *vp;
1961
1962 cp = coda_alloc();
1963 cp->c_fid = *fid;
1964
1965 err = getnewvnode(VT_CODA, vfsp, coda_vnodeop_p, &vp);
1966 if (err) {
1967 panic("coda: getnewvnode returned error %d", err);
1968 }
1969 vp->v_data = cp;
1970 vp->v_type = type;
1971 cp->c_vnode = vp;
1972 coda_save(cp);
1973
1974 } else {
1975 vref(CTOV(cp));
1976 }
1977
1978 return cp;
1979 }
1980
1981 /*
1982 * coda_getpages may be called on a vnode which has not been opened,
1983 * e.g. to fault in pages to execute a program. In that case, we must
1984 * open the file to get the container. The vnode may or may not be
1985 * locked, and we must leave it in the same state.
1986 * XXX The protocol apparently requires v_uobj.vmobjlock to be
1987 * held by caller, but this isn't documented.
1988 * XXX Most code uses v_interlock instead, which is really the same
1989 * variable.
1990 */
1991 int
1992 coda_getpages(void *v)
1993 {
1994 struct vop_getpages_args /* {
1995 struct vnode *a_vp;
1996 voff_t a_offset;
1997 struct vm_page **a_m;
1998 int *a_count;
1999 int a_centeridx;
2000 vm_prot_t a_access_type;
2001 int a_advice;
2002 int a_flags;
2003 } */ *ap = v;
2004 struct vnode *vp = ap->a_vp;
2005 struct cnode *cp = VTOC(vp);
2006 struct lwp *l = curlwp;
2007 kauth_cred_t cred = l->l_cred;
2008 int error, cerror;
2009 int waslocked; /* 1 if vnode lock was held on entry */
2010 int didopen = 0; /* 1 if we opened container file */
2011
2012 /*
2013 * Handle a case that uvm_fault doesn't quite use yet.
2014 * See layer_vnops.c. for inspiration.
2015 */
2016 if (ap->a_flags & PGO_LOCKED) {
2017 return EBUSY;
2018 }
2019
2020 /* Check for control object. */
2021 if (IS_CTL_VP(vp)) {
2022 printf("coda_getpages: control object %p\n", vp);
2023 simple_unlock(&vp->v_interlock);
2024 return(EINVAL);
2025 }
2026
2027 /*
2028 * XXX It's really not ok to be releasing the lock we get,
2029 * because we could be overlapping with another call to
2030 * getpages and drop a lock they are relying on. We need to
2031 * figure out whether getpages ever is called holding the
2032 * lock, and if we should serialize getpages calls by some
2033 * mechanism.
2034 */
2035 waslocked = VOP_ISLOCKED(vp);
2036
2037 /* Drop vmobj lock. */
2038 simple_unlock(&vp->v_interlock);
2039
2040 /* Get container file if not already present. */
2041 if (cp->c_ovp == NULL) {
2042 /*
2043 * VOP_OPEN requires a locked vnode. We must avoid
2044 * locking the vnode if it is already locked, and
2045 * leave it in the same state on exit.
2046 */
2047 if (waslocked == 0) {
2048 cerror = vn_lock(vp, LK_EXCLUSIVE);
2049 if (cerror) {
2050 printf("coda_getpages: can't lock vnode %p\n",
2051 vp);
2052 simple_unlock(&vp->v_interlock);
2053 return cerror;
2054 }
2055 #if 0
2056 printf("coda_getpages: locked vnode %p\n", vp);
2057 #endif
2058 }
2059
2060 /*
2061 * Open file (causes upcall to venus).
2062 * XXX Perhaps we should not fully open the file, but
2063 * simply obtain a container file.
2064 */
2065 /* XXX Is it ok to do this while holding the simplelock? */
2066 cerror = VOP_OPEN(vp, FREAD, cred, l);
2067
2068 if (cerror) {
2069 printf("coda_getpages: cannot open vnode %p => %d\n",
2070 vp, cerror);
2071 if (waslocked == 0)
2072 VOP_UNLOCK(vp, 0);
2073 simple_unlock(&vp->v_interlock);
2074 return cerror;
2075 }
2076
2077 #if 0
2078 printf("coda_getpages: opened vnode %p\n", vp);
2079 #endif
2080 didopen = 1;
2081 }
2082 KASSERT(cp->c_ovp != NULL);
2083
2084 /* Like LAYERVPTOLOWERVP, but coda doesn't use the layer struct. */
2085 ap->a_vp = cp->c_ovp;
2086
2087 /* Get the lock on the container vnode, and call getpages on it. */
2088 simple_lock(&ap->a_vp->v_interlock);
2089 error = VCALL(ap->a_vp, VOFFSET(vop_getpages), ap);
2090
2091 /* If we opened the vnode, we must close it. */
2092 if (didopen) {
2093 /*
2094 * VOP_CLOSE requires a locked vnode, but we are still
2095 * holding the lock (or riding a caller's lock).
2096 */
2097 cerror = VOP_CLOSE(vp, FREAD, cred, l);
2098 if (cerror != 0)
2099 /* XXX How should we handle this? */
2100 printf("coda_getpages: closed vnode %p -> %d\n",
2101 vp, cerror);
2102
2103 /* If we obtained a lock, drop it. */
2104 if (waslocked == 0)
2105 VOP_UNLOCK(vp, 0);
2106 }
2107
2108 return error;
2109 }
2110
2111 /*
2112 * The protocol requires v_uobj.vmobjlock to be held by the caller, as
2113 * documented in vnodeops(9).
2114 * XXX vnode_if.src doesn't say this.
2115 * XXX Most code uses v_interlock instead, which is really the same
2116 * variable.
2117 */
2118 int
2119 coda_putpages(void *v)
2120 {
2121 struct vop_putpages_args /* {
2122 struct vnode *a_vp;
2123 voff_t a_offlo;
2124 voff_t a_offhi;
2125 int a_flags;
2126 } */ *ap = v;
2127 struct vnode *vp = ap->a_vp;
2128 struct cnode *cp = VTOC(vp);
2129 int error;
2130
2131 /* Check for control object. */
2132 if (IS_CTL_VP(vp)) {
2133 printf("coda_putpages: control object %p\n", vp);
2134 simple_unlock(&vp->v_interlock);
2135 return(EINVAL);
2136 }
2137
2138 /* Drop the vmobj lock. */
2139 simple_unlock(&vp->v_interlock);
2140
2141 /*
2142 * If container object is not present, then there are no pages
2143 * to put; just return without error. This happens all the
2144 * time, apparently during discard of a closed vnode (which
2145 * trivially can't have dirty pages).
2146 */
2147 if (cp->c_ovp == NULL)
2148 return 0;
2149
2150 /* Like LAYERVPTOLOWERVP, but coda doesn't use the layer struct. */
2151 ap->a_vp = cp->c_ovp;
2152
2153 /* Get the lock on the container vnode, and call putpages on it. */
2154 simple_lock(&ap->a_vp->v_interlock);
2155 error = VCALL(ap->a_vp, VOFFSET(vop_putpages), ap);
2156
2157 return error;
2158 }
2159