coda_vnops.c revision 1.78.2.1 1 /* $NetBSD: coda_vnops.c,v 1.78.2.1 2011/06/06 09:07:13 jruoho Exp $ */
2
3 /*
4 *
5 * Coda: an Experimental Distributed File System
6 * Release 3.1
7 *
8 * Copyright (c) 1987-1998 Carnegie Mellon University
9 * All Rights Reserved
10 *
11 * Permission to use, copy, modify and distribute this software and its
12 * documentation is hereby granted, provided that both the copyright
13 * notice and this permission notice appear in all copies of the
14 * software, derivative works or modified versions, and any portions
15 * thereof, and that both notices appear in supporting documentation, and
16 * that credit is given to Carnegie Mellon University in all documents
17 * and publicity pertaining to direct or indirect use of this code or its
18 * derivatives.
19 *
20 * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS,
21 * SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS
22 * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON
23 * DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
24 * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF
25 * ANY DERIVATIVE WORK.
26 *
27 * Carnegie Mellon encourages users of this software to return any
28 * improvements or extensions that they make, and to grant Carnegie
29 * Mellon the rights to redistribute these changes without encumbrance.
30 *
31 * @(#) coda/coda_vnops.c,v 1.1.1.1 1998/08/29 21:26:46 rvb Exp $
32 */
33
34 /*
35 * Mach Operating System
36 * Copyright (c) 1990 Carnegie-Mellon University
37 * Copyright (c) 1989 Carnegie-Mellon University
38 * All rights reserved. The CMU software License Agreement specifies
39 * the terms and conditions for use and redistribution.
40 */
41
42 /*
43 * This code was written for the Coda file system at Carnegie Mellon
44 * University. Contributers include David Steere, James Kistler, and
45 * M. Satyanarayanan.
46 */
47
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: coda_vnops.c,v 1.78.2.1 2011/06/06 09:07:13 jruoho Exp $");
50
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/malloc.h>
54 #include <sys/errno.h>
55 #include <sys/acct.h>
56 #include <sys/file.h>
57 #include <sys/uio.h>
58 #include <sys/namei.h>
59 #include <sys/ioctl.h>
60 #include <sys/mount.h>
61 #include <sys/proc.h>
62 #include <sys/select.h>
63 #include <sys/vnode.h>
64 #include <sys/kauth.h>
65
66 #include <miscfs/genfs/genfs.h>
67
68 #include <coda/coda.h>
69 #include <coda/cnode.h>
70 #include <coda/coda_vnops.h>
71 #include <coda/coda_venus.h>
72 #include <coda/coda_opstats.h>
73 #include <coda/coda_subr.h>
74 #include <coda/coda_namecache.h>
75 #include <coda/coda_pioctl.h>
76
77 /*
78 * These flags select various performance enhancements.
79 */
80 int coda_attr_cache = 1; /* Set to cache attributes in the kernel */
81 int coda_symlink_cache = 1; /* Set to cache symbolic link information */
82 int coda_access_cache = 1; /* Set to handle some access checks directly */
83
84 /* structure to keep track of vfs calls */
85
86 struct coda_op_stats coda_vnodeopstats[CODA_VNODEOPS_SIZE];
87
88 #define MARK_ENTRY(op) (coda_vnodeopstats[op].entries++)
89 #define MARK_INT_SAT(op) (coda_vnodeopstats[op].sat_intrn++)
90 #define MARK_INT_FAIL(op) (coda_vnodeopstats[op].unsat_intrn++)
91 #define MARK_INT_GEN(op) (coda_vnodeopstats[op].gen_intrn++)
92
93 /* What we are delaying for in printf */
94 int coda_printf_delay = 0; /* in microseconds */
95 int coda_vnop_print_entry = 0;
96 static int coda_lockdebug = 0;
97
98 #define ENTRY if(coda_vnop_print_entry) myprintf(("Entered %s\n",__func__))
99
100 /* Definition of the vnode operation vector */
101
102 const struct vnodeopv_entry_desc coda_vnodeop_entries[] = {
103 { &vop_default_desc, coda_vop_error },
104 { &vop_lookup_desc, coda_lookup }, /* lookup */
105 { &vop_create_desc, coda_create }, /* create */
106 { &vop_mknod_desc, coda_vop_error }, /* mknod */
107 { &vop_open_desc, coda_open }, /* open */
108 { &vop_close_desc, coda_close }, /* close */
109 { &vop_access_desc, coda_access }, /* access */
110 { &vop_getattr_desc, coda_getattr }, /* getattr */
111 { &vop_setattr_desc, coda_setattr }, /* setattr */
112 { &vop_read_desc, coda_read }, /* read */
113 { &vop_write_desc, coda_write }, /* write */
114 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */
115 { &vop_ioctl_desc, coda_ioctl }, /* ioctl */
116 { &vop_mmap_desc, genfs_mmap }, /* mmap */
117 { &vop_fsync_desc, coda_fsync }, /* fsync */
118 { &vop_remove_desc, coda_remove }, /* remove */
119 { &vop_link_desc, coda_link }, /* link */
120 { &vop_rename_desc, coda_rename }, /* rename */
121 { &vop_mkdir_desc, coda_mkdir }, /* mkdir */
122 { &vop_rmdir_desc, coda_rmdir }, /* rmdir */
123 { &vop_symlink_desc, coda_symlink }, /* symlink */
124 { &vop_readdir_desc, coda_readdir }, /* readdir */
125 { &vop_readlink_desc, coda_readlink }, /* readlink */
126 { &vop_abortop_desc, coda_abortop }, /* abortop */
127 { &vop_inactive_desc, coda_inactive }, /* inactive */
128 { &vop_reclaim_desc, coda_reclaim }, /* reclaim */
129 { &vop_lock_desc, coda_lock }, /* lock */
130 { &vop_unlock_desc, coda_unlock }, /* unlock */
131 { &vop_bmap_desc, coda_bmap }, /* bmap */
132 { &vop_strategy_desc, coda_strategy }, /* strategy */
133 { &vop_print_desc, coda_vop_error }, /* print */
134 { &vop_islocked_desc, coda_islocked }, /* islocked */
135 { &vop_pathconf_desc, coda_vop_error }, /* pathconf */
136 { &vop_advlock_desc, coda_vop_nop }, /* advlock */
137 { &vop_bwrite_desc, coda_vop_error }, /* bwrite */
138 { &vop_seek_desc, genfs_seek }, /* seek */
139 { &vop_poll_desc, genfs_poll }, /* poll */
140 { &vop_getpages_desc, coda_getpages }, /* getpages */
141 { &vop_putpages_desc, coda_putpages }, /* putpages */
142 { NULL, NULL }
143 };
144
145 const struct vnodeopv_desc coda_vnodeop_opv_desc =
146 { &coda_vnodeop_p, coda_vnodeop_entries };
147
148 /* Definitions of NetBSD vnodeop interfaces */
149
150 /*
151 * A generic error routine. Return EIO without looking at arguments.
152 */
153 int
154 coda_vop_error(void *anon) {
155 struct vnodeop_desc **desc = (struct vnodeop_desc **)anon;
156
157 if (codadebug) {
158 myprintf(("coda_vop_error: Vnode operation %s called (error).\n",
159 (*desc)->vdesc_name));
160 }
161
162 return EIO;
163 }
164
165 /* A generic do-nothing. */
166 int
167 coda_vop_nop(void *anon) {
168 struct vnodeop_desc **desc = (struct vnodeop_desc **)anon;
169
170 if (codadebug) {
171 myprintf(("Vnode operation %s called, but unsupported\n",
172 (*desc)->vdesc_name));
173 }
174 return (0);
175 }
176
177 int
178 coda_vnodeopstats_init(void)
179 {
180 int i;
181
182 for(i=0;i<CODA_VNODEOPS_SIZE;i++) {
183 coda_vnodeopstats[i].opcode = i;
184 coda_vnodeopstats[i].entries = 0;
185 coda_vnodeopstats[i].sat_intrn = 0;
186 coda_vnodeopstats[i].unsat_intrn = 0;
187 coda_vnodeopstats[i].gen_intrn = 0;
188 }
189
190 return 0;
191 }
192
193 /*
194 * XXX The entire relationship between VOP_OPEN and having a container
195 * file (via venus_open) needs to be reexamined. In particular, it's
196 * valid to open/mmap/close and then reference. Instead of doing
197 * VOP_OPEN when getpages needs a container, we should do the
198 * venus_open part, and record that the vnode has opened the container
199 * for getpages, and do the matching logical close on coda_inactive.
200 * Further, coda_rdwr needs a container file, and sometimes needs to
201 * do the equivalent of open (core dumps).
202 */
203 /*
204 * coda_open calls Venus to return the device and inode of the
205 * container file, and then obtains a vnode for that file. The
206 * container vnode is stored in the coda vnode, and a reference is
207 * added for each open file.
208 */
209 int
210 coda_open(void *v)
211 {
212 /*
213 * NetBSD can pass the O_EXCL flag in mode, even though the check
214 * has already happened. Venus defensively assumes that if open
215 * is passed the EXCL, it must be a bug. We strip the flag here.
216 */
217 /* true args */
218 struct vop_open_args *ap = v;
219 struct vnode *vp = ap->a_vp;
220 struct cnode *cp = VTOC(vp);
221 int flag = ap->a_mode & (~O_EXCL);
222 kauth_cred_t cred = ap->a_cred;
223 /* locals */
224 int error;
225 dev_t dev; /* container file device, inode, vnode */
226 ino_t inode;
227 struct vnode *container_vp;
228
229 MARK_ENTRY(CODA_OPEN_STATS);
230
231 /* Check for open of control file. */
232 if (IS_CTL_VP(vp)) {
233 /* if (WRITABLE(flag)) */
234 if (flag & (FWRITE | O_TRUNC | O_CREAT | O_EXCL)) {
235 MARK_INT_FAIL(CODA_OPEN_STATS);
236 return(EACCES);
237 }
238 MARK_INT_SAT(CODA_OPEN_STATS);
239 return(0);
240 }
241
242 error = venus_open(vtomi(vp), &cp->c_fid, flag, cred, curlwp, &dev, &inode);
243 if (error)
244 return (error);
245 if (!error) {
246 CODADEBUG(CODA_OPEN,
247 myprintf(("open: dev 0x%llx inode %llu result %d\n",
248 (unsigned long long)dev, (unsigned long long)inode, error));)
249 }
250
251 /*
252 * Obtain locked and referenced container vnode from container
253 * device/inode.
254 */
255 error = coda_grab_vnode(dev, inode, &container_vp);
256 if (error)
257 return (error);
258
259 /* Save the vnode pointer for the container file. */
260 if (cp->c_ovp == NULL) {
261 cp->c_ovp = container_vp;
262 } else {
263 if (cp->c_ovp != container_vp)
264 /*
265 * Perhaps venus returned a different container, or
266 * something else went wrong.
267 */
268 panic("coda_open: cp->c_ovp != container_vp");
269 }
270 cp->c_ocount++;
271
272 /* Flush the attribute cache if writing the file. */
273 if (flag & FWRITE) {
274 cp->c_owrite++;
275 cp->c_flags &= ~C_VATTR;
276 }
277
278 /*
279 * Save the <device, inode> pair for the container file to speed
280 * up subsequent reads while closed (mmap, program execution).
281 * This is perhaps safe because venus will invalidate the node
282 * before changing the container file mapping.
283 */
284 cp->c_device = dev;
285 cp->c_inode = inode;
286
287 /* Open the container file. */
288 error = VOP_OPEN(container_vp, flag, cred);
289 /*
290 * Drop the lock on the container, after we have done VOP_OPEN
291 * (which requires a locked vnode).
292 */
293 VOP_UNLOCK(container_vp);
294 return(error);
295 }
296
297 /*
298 * Close the cache file used for I/O and notify Venus.
299 */
300 int
301 coda_close(void *v)
302 {
303 /* true args */
304 struct vop_close_args *ap = v;
305 struct vnode *vp = ap->a_vp;
306 struct cnode *cp = VTOC(vp);
307 int flag = ap->a_fflag;
308 kauth_cred_t cred = ap->a_cred;
309 /* locals */
310 int error;
311
312 MARK_ENTRY(CODA_CLOSE_STATS);
313
314 /* Check for close of control file. */
315 if (IS_CTL_VP(vp)) {
316 MARK_INT_SAT(CODA_CLOSE_STATS);
317 return(0);
318 }
319
320 /*
321 * XXX The IS_UNMOUNTING part of this is very suspect.
322 */
323 if (IS_UNMOUNTING(cp)) {
324 if (cp->c_ovp) {
325 #ifdef CODA_VERBOSE
326 printf("coda_close: destroying container ref %d, ufs vp %p of vp %p/cp %p\n",
327 vp->v_usecount, cp->c_ovp, vp, cp);
328 #endif
329 #ifdef hmm
330 vgone(cp->c_ovp);
331 #else
332 vn_lock(cp->c_ovp, LK_EXCLUSIVE | LK_RETRY);
333 VOP_CLOSE(cp->c_ovp, flag, cred); /* Do errors matter here? */
334 vput(cp->c_ovp);
335 #endif
336 } else {
337 #ifdef CODA_VERBOSE
338 printf("coda_close: NO container vp %p/cp %p\n", vp, cp);
339 #endif
340 }
341 return ENODEV;
342 }
343
344 /* Lock the container node, and VOP_CLOSE it. */
345 vn_lock(cp->c_ovp, LK_EXCLUSIVE | LK_RETRY);
346 VOP_CLOSE(cp->c_ovp, flag, cred); /* Do errors matter here? */
347 /*
348 * Drop the lock we just obtained, and vrele the container vnode.
349 * Decrement reference counts, and clear container vnode pointer on
350 * last close.
351 */
352 vput(cp->c_ovp);
353 if (flag & FWRITE)
354 --cp->c_owrite;
355 if (--cp->c_ocount == 0)
356 cp->c_ovp = NULL;
357
358 error = venus_close(vtomi(vp), &cp->c_fid, flag, cred, curlwp);
359
360 CODADEBUG(CODA_CLOSE, myprintf(("close: result %d\n",error)); )
361 return(error);
362 }
363
364 int
365 coda_read(void *v)
366 {
367 struct vop_read_args *ap = v;
368
369 ENTRY;
370 return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_READ,
371 ap->a_ioflag, ap->a_cred, curlwp));
372 }
373
374 int
375 coda_write(void *v)
376 {
377 struct vop_write_args *ap = v;
378
379 ENTRY;
380 return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_WRITE,
381 ap->a_ioflag, ap->a_cred, curlwp));
382 }
383
384 int
385 coda_rdwr(struct vnode *vp, struct uio *uiop, enum uio_rw rw, int ioflag,
386 kauth_cred_t cred, struct lwp *l)
387 {
388 /* upcall decl */
389 /* NOTE: container file operation!!! */
390 /* locals */
391 struct cnode *cp = VTOC(vp);
392 struct vnode *cfvp = cp->c_ovp;
393 struct proc *p = l->l_proc;
394 int opened_internally = 0;
395 int error = 0;
396
397 MARK_ENTRY(CODA_RDWR_STATS);
398
399 CODADEBUG(CODA_RDWR, myprintf(("coda_rdwr(%d, %p, %lu, %lld)\n", rw,
400 uiop->uio_iov->iov_base,
401 (unsigned long) uiop->uio_resid,
402 (long long) uiop->uio_offset)); )
403
404 /* Check for rdwr of control object. */
405 if (IS_CTL_VP(vp)) {
406 MARK_INT_FAIL(CODA_RDWR_STATS);
407 return(EINVAL);
408 }
409
410 /* Redirect the request to UFS. */
411
412 /*
413 * If file is not already open this must be a page
414 * {read,write} request. Iget the cache file's inode
415 * pointer if we still have its <device, inode> pair.
416 * Otherwise, we must do an internal open to derive the
417 * pair.
418 * XXX Integrate this into a coherent strategy for container
419 * file acquisition.
420 */
421 if (cfvp == NULL) {
422 /*
423 * If we're dumping core, do the internal open. Otherwise
424 * venus won't have the correct size of the core when
425 * it's completely written.
426 */
427 if (cp->c_inode != 0 && !(p && (p->p_acflag & ACORE))) {
428 printf("coda_rdwr: grabbing container vnode, losing reference\n");
429 /* Get locked and refed vnode. */
430 error = coda_grab_vnode(cp->c_device, cp->c_inode, &cfvp);
431 if (error) {
432 MARK_INT_FAIL(CODA_RDWR_STATS);
433 return(error);
434 }
435 /*
436 * Drop lock.
437 * XXX Where is reference released.
438 */
439 VOP_UNLOCK(cfvp);
440 }
441 else {
442 printf("coda_rdwr: internal VOP_OPEN\n");
443 opened_internally = 1;
444 MARK_INT_GEN(CODA_OPEN_STATS);
445 error = VOP_OPEN(vp, (rw == UIO_READ ? FREAD : FWRITE), cred);
446 #ifdef CODA_VERBOSE
447 printf("coda_rdwr: Internally Opening %p\n", vp);
448 #endif
449 if (error) {
450 MARK_INT_FAIL(CODA_RDWR_STATS);
451 return(error);
452 }
453 cfvp = cp->c_ovp;
454 }
455 }
456
457 /* Have UFS handle the call. */
458 CODADEBUG(CODA_RDWR, myprintf(("indirect rdwr: fid = %s, refcnt = %d\n",
459 coda_f2s(&cp->c_fid), CTOV(cp)->v_usecount)); )
460
461 if (rw == UIO_READ) {
462 error = VOP_READ(cfvp, uiop, ioflag, cred);
463 } else {
464 error = VOP_WRITE(cfvp, uiop, ioflag, cred);
465 }
466
467 if (error)
468 MARK_INT_FAIL(CODA_RDWR_STATS);
469 else
470 MARK_INT_SAT(CODA_RDWR_STATS);
471
472 /* Do an internal close if necessary. */
473 if (opened_internally) {
474 MARK_INT_GEN(CODA_CLOSE_STATS);
475 (void)VOP_CLOSE(vp, (rw == UIO_READ ? FREAD : FWRITE), cred);
476 }
477
478 /* Invalidate cached attributes if writing. */
479 if (rw == UIO_WRITE)
480 cp->c_flags &= ~C_VATTR;
481 return(error);
482 }
483
484 int
485 coda_ioctl(void *v)
486 {
487 /* true args */
488 struct vop_ioctl_args *ap = v;
489 struct vnode *vp = ap->a_vp;
490 int com = ap->a_command;
491 void *data = ap->a_data;
492 int flag = ap->a_fflag;
493 kauth_cred_t cred = ap->a_cred;
494 /* locals */
495 int error;
496 struct vnode *tvp;
497 struct PioctlData *iap = (struct PioctlData *)data;
498 namei_simple_flags_t sflags;
499
500 MARK_ENTRY(CODA_IOCTL_STATS);
501
502 CODADEBUG(CODA_IOCTL, myprintf(("in coda_ioctl on %s\n", iap->path));)
503
504 /* Don't check for operation on a dying object, for ctlvp it
505 shouldn't matter */
506
507 /* Must be control object to succeed. */
508 if (!IS_CTL_VP(vp)) {
509 MARK_INT_FAIL(CODA_IOCTL_STATS);
510 CODADEBUG(CODA_IOCTL, myprintf(("coda_ioctl error: vp != ctlvp"));)
511 return (EOPNOTSUPP);
512 }
513 /* Look up the pathname. */
514
515 /* Should we use the name cache here? It would get it from
516 lookupname sooner or later anyway, right? */
517
518 sflags = iap->follow ? NSM_FOLLOW_NOEMULROOT : NSM_NOFOLLOW_NOEMULROOT;
519 error = namei_simple_user(iap->path, sflags, &tvp);
520
521 if (error) {
522 MARK_INT_FAIL(CODA_IOCTL_STATS);
523 CODADEBUG(CODA_IOCTL, myprintf(("coda_ioctl error: lookup returns %d\n",
524 error));)
525 return(error);
526 }
527
528 /*
529 * Make sure this is a coda style cnode, but it may be a
530 * different vfsp
531 */
532 /* XXX: this totally violates the comment about vtagtype in vnode.h */
533 if (tvp->v_tag != VT_CODA) {
534 vrele(tvp);
535 MARK_INT_FAIL(CODA_IOCTL_STATS);
536 CODADEBUG(CODA_IOCTL,
537 myprintf(("coda_ioctl error: %s not a coda object\n",
538 iap->path));)
539 return(EINVAL);
540 }
541
542 if (iap->vi.in_size > VC_MAXDATASIZE || iap->vi.out_size > VC_MAXDATASIZE) {
543 vrele(tvp);
544 return(EINVAL);
545 }
546 error = venus_ioctl(vtomi(tvp), &((VTOC(tvp))->c_fid), com, flag, data,
547 cred, curlwp);
548
549 if (error)
550 MARK_INT_FAIL(CODA_IOCTL_STATS);
551 else
552 CODADEBUG(CODA_IOCTL, myprintf(("Ioctl returns %d \n", error)); )
553
554 vrele(tvp);
555 return(error);
556 }
557
558 /*
559 * To reduce the cost of a user-level venus;we cache attributes in
560 * the kernel. Each cnode has storage allocated for an attribute. If
561 * c_vattr is valid, return a reference to it. Otherwise, get the
562 * attributes from venus and store them in the cnode. There is some
563 * question if this method is a security leak. But I think that in
564 * order to make this call, the user must have done a lookup and
565 * opened the file, and therefore should already have access.
566 */
567 int
568 coda_getattr(void *v)
569 {
570 /* true args */
571 struct vop_getattr_args *ap = v;
572 struct vnode *vp = ap->a_vp;
573 struct cnode *cp = VTOC(vp);
574 struct vattr *vap = ap->a_vap;
575 kauth_cred_t cred = ap->a_cred;
576 /* locals */
577 int error;
578
579 MARK_ENTRY(CODA_GETATTR_STATS);
580
581 /* Check for getattr of control object. */
582 if (IS_CTL_VP(vp)) {
583 MARK_INT_FAIL(CODA_GETATTR_STATS);
584 return(ENOENT);
585 }
586
587 /* Check to see if the attributes have already been cached */
588 if (VALID_VATTR(cp)) {
589 CODADEBUG(CODA_GETATTR, { myprintf(("attr cache hit: %s\n",
590 coda_f2s(&cp->c_fid)));});
591 CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR))
592 print_vattr(&cp->c_vattr); );
593
594 *vap = cp->c_vattr;
595 MARK_INT_SAT(CODA_GETATTR_STATS);
596 return(0);
597 }
598
599 error = venus_getattr(vtomi(vp), &cp->c_fid, cred, curlwp, vap);
600
601 if (!error) {
602 CODADEBUG(CODA_GETATTR, myprintf(("getattr miss %s: result %d\n",
603 coda_f2s(&cp->c_fid), error)); )
604
605 CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR))
606 print_vattr(vap); );
607
608 /* If not open for write, store attributes in cnode */
609 if ((cp->c_owrite == 0) && (coda_attr_cache)) {
610 cp->c_vattr = *vap;
611 cp->c_flags |= C_VATTR;
612 }
613
614 }
615 return(error);
616 }
617
618 int
619 coda_setattr(void *v)
620 {
621 /* true args */
622 struct vop_setattr_args *ap = v;
623 struct vnode *vp = ap->a_vp;
624 struct cnode *cp = VTOC(vp);
625 struct vattr *vap = ap->a_vap;
626 kauth_cred_t cred = ap->a_cred;
627 /* locals */
628 int error;
629
630 MARK_ENTRY(CODA_SETATTR_STATS);
631
632 /* Check for setattr of control object. */
633 if (IS_CTL_VP(vp)) {
634 MARK_INT_FAIL(CODA_SETATTR_STATS);
635 return(ENOENT);
636 }
637
638 if (codadebug & CODADBGMSK(CODA_SETATTR)) {
639 print_vattr(vap);
640 }
641 error = venus_setattr(vtomi(vp), &cp->c_fid, vap, cred, curlwp);
642
643 if (!error)
644 cp->c_flags &= ~C_VATTR;
645
646 CODADEBUG(CODA_SETATTR, myprintf(("setattr %d\n", error)); )
647 return(error);
648 }
649
650 int
651 coda_access(void *v)
652 {
653 /* true args */
654 struct vop_access_args *ap = v;
655 struct vnode *vp = ap->a_vp;
656 struct cnode *cp = VTOC(vp);
657 int mode = ap->a_mode;
658 kauth_cred_t cred = ap->a_cred;
659 /* locals */
660 int error;
661
662 MARK_ENTRY(CODA_ACCESS_STATS);
663
664 /* Check for access of control object. Only read access is
665 allowed on it. */
666 if (IS_CTL_VP(vp)) {
667 /* bogus hack - all will be marked as successes */
668 MARK_INT_SAT(CODA_ACCESS_STATS);
669 return(((mode & VREAD) && !(mode & (VWRITE | VEXEC)))
670 ? 0 : EACCES);
671 }
672
673 /*
674 * if the file is a directory, and we are checking exec (eg lookup)
675 * access, and the file is in the namecache, then the user must have
676 * lookup access to it.
677 */
678 if (coda_access_cache) {
679 if ((vp->v_type == VDIR) && (mode & VEXEC)) {
680 if (coda_nc_lookup(cp, ".", 1, cred)) {
681 MARK_INT_SAT(CODA_ACCESS_STATS);
682 return(0); /* it was in the cache */
683 }
684 }
685 }
686
687 error = venus_access(vtomi(vp), &cp->c_fid, mode, cred, curlwp);
688
689 return(error);
690 }
691
692 /*
693 * CODA abort op, called after namei() when a CREATE/DELETE isn't actually
694 * done. If a buffer has been saved in anticipation of a coda_create or
695 * a coda_remove, delete it.
696 */
697 /* ARGSUSED */
698 int
699 coda_abortop(void *v)
700 {
701 /* true args */
702 struct vop_abortop_args /* {
703 struct vnode *a_dvp;
704 struct componentname *a_cnp;
705 } */ *ap = v;
706
707 (void)ap;
708 /* upcall decl */
709 /* locals */
710
711 return (0);
712 }
713
714 int
715 coda_readlink(void *v)
716 {
717 /* true args */
718 struct vop_readlink_args *ap = v;
719 struct vnode *vp = ap->a_vp;
720 struct cnode *cp = VTOC(vp);
721 struct uio *uiop = ap->a_uio;
722 kauth_cred_t cred = ap->a_cred;
723 /* locals */
724 struct lwp *l = curlwp;
725 int error;
726 char *str;
727 int len;
728
729 MARK_ENTRY(CODA_READLINK_STATS);
730
731 /* Check for readlink of control object. */
732 if (IS_CTL_VP(vp)) {
733 MARK_INT_FAIL(CODA_READLINK_STATS);
734 return(ENOENT);
735 }
736
737 if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) { /* symlink was cached */
738 uiop->uio_rw = UIO_READ;
739 error = uiomove(cp->c_symlink, (int)cp->c_symlen, uiop);
740 if (error)
741 MARK_INT_FAIL(CODA_READLINK_STATS);
742 else
743 MARK_INT_SAT(CODA_READLINK_STATS);
744 return(error);
745 }
746
747 error = venus_readlink(vtomi(vp), &cp->c_fid, cred, l, &str, &len);
748
749 if (!error) {
750 uiop->uio_rw = UIO_READ;
751 error = uiomove(str, len, uiop);
752
753 if (coda_symlink_cache) {
754 cp->c_symlink = str;
755 cp->c_symlen = len;
756 cp->c_flags |= C_SYMLINK;
757 } else
758 CODA_FREE(str, len);
759 }
760
761 CODADEBUG(CODA_READLINK, myprintf(("in readlink result %d\n",error));)
762 return(error);
763 }
764
765 int
766 coda_fsync(void *v)
767 {
768 /* true args */
769 struct vop_fsync_args *ap = v;
770 struct vnode *vp = ap->a_vp;
771 struct cnode *cp = VTOC(vp);
772 kauth_cred_t cred = ap->a_cred;
773 /* locals */
774 struct vnode *convp = cp->c_ovp;
775 int error;
776
777 MARK_ENTRY(CODA_FSYNC_STATS);
778
779 /* Check for fsync on an unmounting object */
780 /* The NetBSD kernel, in it's infinite wisdom, can try to fsync
781 * after an unmount has been initiated. This is a Bad Thing,
782 * which we have to avoid. Not a legitimate failure for stats.
783 */
784 if (IS_UNMOUNTING(cp)) {
785 return(ENODEV);
786 }
787
788 /* Check for fsync of control object. */
789 if (IS_CTL_VP(vp)) {
790 MARK_INT_SAT(CODA_FSYNC_STATS);
791 return(0);
792 }
793
794 if (convp)
795 VOP_FSYNC(convp, cred, MNT_WAIT, 0, 0);
796
797 /*
798 * We can expect fsync on any vnode at all if venus is pruging it.
799 * Venus can't very well answer the fsync request, now can it?
800 * Hopefully, it won't have to, because hopefully, venus preserves
801 * the (possibly untrue) invariant that it never purges an open
802 * vnode. Hopefully.
803 */
804 if (cp->c_flags & C_PURGING) {
805 return(0);
806 }
807
808 error = venus_fsync(vtomi(vp), &cp->c_fid, cred, curlwp);
809
810 CODADEBUG(CODA_FSYNC, myprintf(("in fsync result %d\n",error)); );
811 return(error);
812 }
813
814 /*
815 * vp is locked on entry, and we must unlock it.
816 * XXX This routine is suspect and probably needs rewriting.
817 */
818 int
819 coda_inactive(void *v)
820 {
821 /* true args */
822 struct vop_inactive_args *ap = v;
823 struct vnode *vp = ap->a_vp;
824 struct cnode *cp = VTOC(vp);
825 kauth_cred_t cred __unused = NULL;
826
827 /* We don't need to send inactive to venus - DCS */
828 MARK_ENTRY(CODA_INACTIVE_STATS);
829
830 if (IS_CTL_VP(vp)) {
831 MARK_INT_SAT(CODA_INACTIVE_STATS);
832 return 0;
833 }
834
835 CODADEBUG(CODA_INACTIVE, myprintf(("in inactive, %s, vfsp %p\n",
836 coda_f2s(&cp->c_fid), vp->v_mount));)
837
838 /* If an array has been allocated to hold the symlink, deallocate it */
839 if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) {
840 if (cp->c_symlink == NULL)
841 panic("coda_inactive: null symlink pointer in cnode");
842
843 CODA_FREE(cp->c_symlink, cp->c_symlen);
844 cp->c_flags &= ~C_SYMLINK;
845 cp->c_symlen = 0;
846 }
847
848 /* Remove it from the table so it can't be found. */
849 coda_unsave(cp);
850 if (vp->v_mount->mnt_data == NULL) {
851 myprintf(("Help! vfsp->vfs_data was NULL, but vnode %p wasn't dying\n", vp));
852 panic("badness in coda_inactive");
853 }
854
855 if (IS_UNMOUNTING(cp)) {
856 /* XXX Do we need to VOP_CLOSE container vnodes? */
857 if (vp->v_usecount > 0)
858 printf("coda_inactive: IS_UNMOUNTING %p usecount %d\n",
859 vp, vp->v_usecount);
860 if (cp->c_ovp != NULL)
861 printf("coda_inactive: %p ovp != NULL\n", vp);
862 VOP_UNLOCK(vp);
863 } else {
864 /* Sanity checks that perhaps should be panic. */
865 if (vp->v_usecount) {
866 printf("coda_inactive: %p usecount %d\n", vp, vp->v_usecount);
867 }
868 if (cp->c_ovp != NULL) {
869 printf("coda_inactive: %p ovp != NULL\n", vp);
870 }
871 VOP_UNLOCK(vp);
872 *ap->a_recycle = true;
873 }
874
875 MARK_INT_SAT(CODA_INACTIVE_STATS);
876 return(0);
877 }
878
879 /*
880 * Coda does not use the normal namecache, but a private version.
881 * Consider how to use the standard facility instead.
882 */
883 int
884 coda_lookup(void *v)
885 {
886 /* true args */
887 struct vop_lookup_args *ap = v;
888 /* (locked) vnode of dir in which to do lookup */
889 struct vnode *dvp = ap->a_dvp;
890 struct cnode *dcp = VTOC(dvp);
891 /* output variable for result */
892 struct vnode **vpp = ap->a_vpp;
893 /* name to lookup */
894 struct componentname *cnp = ap->a_cnp;
895 kauth_cred_t cred = cnp->cn_cred;
896 struct lwp *l = curlwp;
897 /* locals */
898 struct cnode *cp;
899 const char *nm = cnp->cn_nameptr;
900 int len = cnp->cn_namelen;
901 int flags = cnp->cn_flags;
902 int isdot;
903 CodaFid VFid;
904 int vtype;
905 int error = 0;
906
907 MARK_ENTRY(CODA_LOOKUP_STATS);
908
909 CODADEBUG(CODA_LOOKUP, myprintf(("lookup: %s in %s\n",
910 nm, coda_f2s(&dcp->c_fid))););
911
912 /*
913 * XXX componentname flags in MODMASK are not handled at all
914 */
915
916 /*
917 * The overall strategy is to switch on the lookup type and get a
918 * result vnode that is vref'd but not locked. Then, the code at
919 * exit: switches on ., .., and regular lookups and does the right
920 * locking.
921 */
922
923 /* Check for lookup of control object. */
924 if (IS_CTL_NAME(dvp, nm, len)) {
925 *vpp = coda_ctlvp;
926 vref(*vpp);
927 MARK_INT_SAT(CODA_LOOKUP_STATS);
928 goto exit;
929 }
930
931 /* Avoid trying to hand venus an unreasonably long name. */
932 if (len+1 > CODA_MAXNAMLEN) {
933 MARK_INT_FAIL(CODA_LOOKUP_STATS);
934 CODADEBUG(CODA_LOOKUP, myprintf(("name too long: lookup, %s (%s)\n",
935 coda_f2s(&dcp->c_fid), nm)););
936 *vpp = (struct vnode *)0;
937 error = EINVAL;
938 goto exit;
939 }
940
941 /*
942 * XXX Check for DOT lookups, and short circuit all the caches,
943 * just doing an extra vref. (venus guarantees that lookup of
944 * . returns self.)
945 */
946 isdot = (len == 1 && nm[0] == '.');
947
948 /*
949 * Try to resolve the lookup in the minicache. If that fails, ask
950 * venus to do the lookup. XXX The interaction between vnode
951 * locking and any locking that coda does is not clear.
952 */
953 cp = coda_nc_lookup(dcp, nm, len, cred);
954 if (cp) {
955 *vpp = CTOV(cp);
956 vref(*vpp);
957 CODADEBUG(CODA_LOOKUP,
958 myprintf(("lookup result %d vpp %p\n",error,*vpp));)
959 } else {
960 /* The name wasn't cached, so ask Venus. */
961 error = venus_lookup(vtomi(dvp), &dcp->c_fid, nm, len, cred, l, &VFid, &vtype);
962
963 if (error) {
964 MARK_INT_FAIL(CODA_LOOKUP_STATS);
965 CODADEBUG(CODA_LOOKUP, myprintf(("lookup error on %s (%s)%d\n",
966 coda_f2s(&dcp->c_fid), nm, error));)
967 *vpp = (struct vnode *)0;
968 } else {
969 MARK_INT_SAT(CODA_LOOKUP_STATS);
970 CODADEBUG(CODA_LOOKUP,
971 myprintf(("lookup: %s type %o result %d\n",
972 coda_f2s(&VFid), vtype, error)); )
973
974 cp = make_coda_node(&VFid, dvp->v_mount, vtype);
975 *vpp = CTOV(cp);
976 /* vpp is now vrefed. */
977
978 /*
979 * Unless this vnode is marked CODA_NOCACHE, enter it into
980 * the coda name cache to avoid a future venus round-trip.
981 * XXX Interaction with componentname NOCACHE is unclear.
982 */
983 if (!(vtype & CODA_NOCACHE))
984 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
985 }
986 }
987
988 exit:
989 /*
990 * If we are creating, and this was the last name to be looked up,
991 * and the error was ENOENT, then make the leaf NULL and return
992 * success.
993 * XXX Check against new lookup rules.
994 */
995 if (((cnp->cn_nameiop == CREATE) || (cnp->cn_nameiop == RENAME))
996 && (cnp->cn_flags & ISLASTCN)
997 && (error == ENOENT))
998 {
999 error = EJUSTRETURN;
1000 *ap->a_vpp = NULL;
1001 }
1002
1003 /*
1004 * If the lookup succeeded, we must generally lock the returned
1005 * vnode. This could be a ., .., or normal lookup. See
1006 * vnodeops(9) for the details.
1007 */
1008 /*
1009 * XXX LK_RETRY is likely incorrect. Handle vn_lock failure
1010 * somehow, and remove LK_RETRY.
1011 */
1012 if (!error || (error == EJUSTRETURN)) {
1013 /* Lookup has a value and it isn't "."? */
1014 if (*ap->a_vpp && (*ap->a_vpp != dvp)) {
1015 if (flags & ISDOTDOT)
1016 /* ..: unlock parent */
1017 VOP_UNLOCK(dvp);
1018 /* all but .: lock child */
1019 vn_lock(*ap->a_vpp, LK_EXCLUSIVE | LK_RETRY);
1020 if (flags & ISDOTDOT)
1021 /* ..: relock parent */
1022 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
1023 }
1024 /* else .: leave dvp locked */
1025 } else {
1026 /* The lookup failed, so return NULL. Leave dvp locked. */
1027 *ap->a_vpp = NULL;
1028 }
1029 return(error);
1030 }
1031
1032 /*ARGSUSED*/
1033 int
1034 coda_create(void *v)
1035 {
1036 /* true args */
1037 struct vop_create_args *ap = v;
1038 struct vnode *dvp = ap->a_dvp;
1039 struct cnode *dcp = VTOC(dvp);
1040 struct vattr *va = ap->a_vap;
1041 int exclusive = 1;
1042 int mode = ap->a_vap->va_mode;
1043 struct vnode **vpp = ap->a_vpp;
1044 struct componentname *cnp = ap->a_cnp;
1045 kauth_cred_t cred = cnp->cn_cred;
1046 struct lwp *l = curlwp;
1047 /* locals */
1048 int error;
1049 struct cnode *cp;
1050 const char *nm = cnp->cn_nameptr;
1051 int len = cnp->cn_namelen;
1052 CodaFid VFid;
1053 struct vattr attr;
1054
1055 MARK_ENTRY(CODA_CREATE_STATS);
1056
1057 /* All creates are exclusive XXX */
1058 /* I'm assuming the 'mode' argument is the file mode bits XXX */
1059
1060 /* Check for create of control object. */
1061 if (IS_CTL_NAME(dvp, nm, len)) {
1062 *vpp = (struct vnode *)0;
1063 MARK_INT_FAIL(CODA_CREATE_STATS);
1064 return(EACCES);
1065 }
1066
1067 error = venus_create(vtomi(dvp), &dcp->c_fid, nm, len, exclusive, mode, va, cred, l, &VFid, &attr);
1068
1069 if (!error) {
1070
1071 /*
1072 * XXX Violation of venus/kernel invariants is a difficult case,
1073 * but venus should not be able to cause a panic.
1074 */
1075 /* If this is an exclusive create, panic if the file already exists. */
1076 /* Venus should have detected the file and reported EEXIST. */
1077
1078 if ((exclusive == 1) &&
1079 (coda_find(&VFid) != NULL))
1080 panic("cnode existed for newly created file!");
1081
1082 cp = make_coda_node(&VFid, dvp->v_mount, attr.va_type);
1083 *vpp = CTOV(cp);
1084
1085 /* XXX vnodeops doesn't say this argument can be changed. */
1086 /* Update va to reflect the new attributes. */
1087 (*va) = attr;
1088
1089 /* Update the attribute cache and mark it as valid */
1090 if (coda_attr_cache) {
1091 VTOC(*vpp)->c_vattr = attr;
1092 VTOC(*vpp)->c_flags |= C_VATTR;
1093 }
1094
1095 /* Invalidate parent's attr cache (modification time has changed). */
1096 VTOC(dvp)->c_flags &= ~C_VATTR;
1097
1098 /* enter the new vnode in the Name Cache */
1099 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
1100
1101 CODADEBUG(CODA_CREATE,
1102 myprintf(("create: %s, result %d\n",
1103 coda_f2s(&VFid), error)); )
1104 } else {
1105 *vpp = (struct vnode *)0;
1106 CODADEBUG(CODA_CREATE, myprintf(("create error %d\n", error));)
1107 }
1108
1109 /*
1110 * vnodeops(9) says that we must unlock the parent and lock the child.
1111 * XXX Should we lock the child first?
1112 */
1113 vput(dvp);
1114 if (!error) {
1115 if ((cnp->cn_flags & LOCKLEAF) == 0) {
1116 /* This should not happen; flags are for lookup only. */
1117 printf("coda_create: LOCKLEAF not set!\n");
1118 }
1119
1120 if ((error = vn_lock(*ap->a_vpp, LK_EXCLUSIVE))) {
1121 /* XXX Perhaps avoid this panic. */
1122 panic("coda_create: couldn't lock child");
1123 }
1124 }
1125
1126 return(error);
1127 }
1128
1129 int
1130 coda_remove(void *v)
1131 {
1132 /* true args */
1133 struct vop_remove_args *ap = v;
1134 struct vnode *dvp = ap->a_dvp;
1135 struct cnode *cp = VTOC(dvp);
1136 struct vnode *vp = ap->a_vp;
1137 struct componentname *cnp = ap->a_cnp;
1138 kauth_cred_t cred = cnp->cn_cred;
1139 struct lwp *l = curlwp;
1140 /* locals */
1141 int error;
1142 const char *nm = cnp->cn_nameptr;
1143 int len = cnp->cn_namelen;
1144 struct cnode *tp;
1145
1146 MARK_ENTRY(CODA_REMOVE_STATS);
1147
1148 CODADEBUG(CODA_REMOVE, myprintf(("remove: %s in %s\n",
1149 nm, coda_f2s(&cp->c_fid))););
1150
1151 /* Remove the file's entry from the CODA Name Cache */
1152 /* We're being conservative here, it might be that this person
1153 * doesn't really have sufficient access to delete the file
1154 * but we feel zapping the entry won't really hurt anyone -- dcs
1155 */
1156 /* I'm gonna go out on a limb here. If a file and a hardlink to it
1157 * exist, and one is removed, the link count on the other will be
1158 * off by 1. We could either invalidate the attrs if cached, or
1159 * fix them. I'll try to fix them. DCS 11/8/94
1160 */
1161 tp = coda_nc_lookup(VTOC(dvp), nm, len, cred);
1162 if (tp) {
1163 if (VALID_VATTR(tp)) { /* If attrs are cached */
1164 if (tp->c_vattr.va_nlink > 1) { /* If it's a hard link */
1165 tp->c_vattr.va_nlink--;
1166 }
1167 }
1168
1169 coda_nc_zapfile(VTOC(dvp), nm, len);
1170 /* No need to flush it if it doesn't exist! */
1171 }
1172 /* Invalidate the parent's attr cache, the modification time has changed */
1173 VTOC(dvp)->c_flags &= ~C_VATTR;
1174
1175 /* Check for remove of control object. */
1176 if (IS_CTL_NAME(dvp, nm, len)) {
1177 MARK_INT_FAIL(CODA_REMOVE_STATS);
1178 return(ENOENT);
1179 }
1180
1181 error = venus_remove(vtomi(dvp), &cp->c_fid, nm, len, cred, l);
1182
1183 CODADEBUG(CODA_REMOVE, myprintf(("in remove result %d\n",error)); )
1184
1185 /*
1186 * Unlock parent and child (avoiding double if ".").
1187 */
1188 if (dvp == vp) {
1189 vrele(vp);
1190 } else {
1191 vput(vp);
1192 }
1193 vput(dvp);
1194
1195 return(error);
1196 }
1197
1198 /*
1199 * dvp is the directory where the link is to go, and is locked.
1200 * vp is the object to be linked to, and is unlocked.
1201 * At exit, we must unlock dvp, and vput dvp.
1202 */
1203 int
1204 coda_link(void *v)
1205 {
1206 /* true args */
1207 struct vop_link_args *ap = v;
1208 struct vnode *vp = ap->a_vp;
1209 struct cnode *cp = VTOC(vp);
1210 struct vnode *dvp = ap->a_dvp;
1211 struct cnode *dcp = VTOC(dvp);
1212 struct componentname *cnp = ap->a_cnp;
1213 kauth_cred_t cred = cnp->cn_cred;
1214 struct lwp *l = curlwp;
1215 /* locals */
1216 int error;
1217 const char *nm = cnp->cn_nameptr;
1218 int len = cnp->cn_namelen;
1219
1220 MARK_ENTRY(CODA_LINK_STATS);
1221
1222 if (codadebug & CODADBGMSK(CODA_LINK)) {
1223
1224 myprintf(("nb_link: vp fid: %s\n",
1225 coda_f2s(&cp->c_fid)));
1226 myprintf(("nb_link: dvp fid: %s)\n",
1227 coda_f2s(&dcp->c_fid)));
1228
1229 }
1230 if (codadebug & CODADBGMSK(CODA_LINK)) {
1231 myprintf(("link: vp fid: %s\n",
1232 coda_f2s(&cp->c_fid)));
1233 myprintf(("link: dvp fid: %s\n",
1234 coda_f2s(&dcp->c_fid)));
1235
1236 }
1237
1238 /* Check for link to/from control object. */
1239 if (IS_CTL_NAME(dvp, nm, len) || IS_CTL_VP(vp)) {
1240 MARK_INT_FAIL(CODA_LINK_STATS);
1241 return(EACCES);
1242 }
1243
1244 /* If linking . to a name, error out earlier. */
1245 if (vp == dvp) {
1246 printf("coda_link vp==dvp\n");
1247 error = EISDIR;
1248 goto exit;
1249 }
1250
1251 /* XXX Why does venus_link need the vnode to be locked?*/
1252 if ((error = vn_lock(vp, LK_EXCLUSIVE)) != 0) {
1253 printf("coda_link: couldn't lock vnode %p\n", vp);
1254 error = EFAULT; /* XXX better value */
1255 goto exit;
1256 }
1257 error = venus_link(vtomi(vp), &cp->c_fid, &dcp->c_fid, nm, len, cred, l);
1258 VOP_UNLOCK(vp);
1259
1260 /* Invalidate parent's attr cache (the modification time has changed). */
1261 VTOC(dvp)->c_flags &= ~C_VATTR;
1262 /* Invalidate child's attr cache (XXX why). */
1263 VTOC(vp)->c_flags &= ~C_VATTR;
1264
1265 CODADEBUG(CODA_LINK, myprintf(("in link result %d\n",error)); )
1266
1267 exit:
1268 vput(dvp);
1269 return(error);
1270 }
1271
1272 int
1273 coda_rename(void *v)
1274 {
1275 /* true args */
1276 struct vop_rename_args *ap = v;
1277 struct vnode *odvp = ap->a_fdvp;
1278 struct cnode *odcp = VTOC(odvp);
1279 struct componentname *fcnp = ap->a_fcnp;
1280 struct vnode *ndvp = ap->a_tdvp;
1281 struct cnode *ndcp = VTOC(ndvp);
1282 struct componentname *tcnp = ap->a_tcnp;
1283 kauth_cred_t cred = fcnp->cn_cred;
1284 struct lwp *l = curlwp;
1285 /* true args */
1286 int error;
1287 const char *fnm = fcnp->cn_nameptr;
1288 int flen = fcnp->cn_namelen;
1289 const char *tnm = tcnp->cn_nameptr;
1290 int tlen = tcnp->cn_namelen;
1291
1292 MARK_ENTRY(CODA_RENAME_STATS);
1293
1294 /* Hmmm. The vnodes are already looked up. Perhaps they are locked?
1295 This could be Bad. XXX */
1296 #ifdef OLD_DIAGNOSTIC
1297 if ((fcnp->cn_cred != tcnp->cn_cred)
1298 || (fcnp->cn_lwp != tcnp->cn_lwp))
1299 {
1300 panic("coda_rename: component names don't agree");
1301 }
1302 #endif
1303
1304 /* Check for rename involving control object. */
1305 if (IS_CTL_NAME(odvp, fnm, flen) || IS_CTL_NAME(ndvp, tnm, tlen)) {
1306 MARK_INT_FAIL(CODA_RENAME_STATS);
1307 return(EACCES);
1308 }
1309
1310 /* Problem with moving directories -- need to flush entry for .. */
1311 if (odvp != ndvp) {
1312 struct cnode *ovcp = coda_nc_lookup(VTOC(odvp), fnm, flen, cred);
1313 if (ovcp) {
1314 struct vnode *ovp = CTOV(ovcp);
1315 if ((ovp) &&
1316 (ovp->v_type == VDIR)) /* If it's a directory */
1317 coda_nc_zapfile(VTOC(ovp),"..", 2);
1318 }
1319 }
1320
1321 /* Remove the entries for both source and target files */
1322 coda_nc_zapfile(VTOC(odvp), fnm, flen);
1323 coda_nc_zapfile(VTOC(ndvp), tnm, tlen);
1324
1325 /* Invalidate the parent's attr cache, the modification time has changed */
1326 VTOC(odvp)->c_flags &= ~C_VATTR;
1327 VTOC(ndvp)->c_flags &= ~C_VATTR;
1328
1329 if (flen+1 > CODA_MAXNAMLEN) {
1330 MARK_INT_FAIL(CODA_RENAME_STATS);
1331 error = EINVAL;
1332 goto exit;
1333 }
1334
1335 if (tlen+1 > CODA_MAXNAMLEN) {
1336 MARK_INT_FAIL(CODA_RENAME_STATS);
1337 error = EINVAL;
1338 goto exit;
1339 }
1340
1341 error = venus_rename(vtomi(odvp), &odcp->c_fid, &ndcp->c_fid, fnm, flen, tnm, tlen, cred, l);
1342
1343 exit:
1344 CODADEBUG(CODA_RENAME, myprintf(("in rename result %d\n",error));)
1345 /* XXX - do we need to call cache pureg on the moved vnode? */
1346 cache_purge(ap->a_fvp);
1347
1348 /* It seems to be incumbent on us to drop locks on all four vnodes */
1349 /* From-vnodes are not locked, only ref'd. To-vnodes are locked. */
1350
1351 vrele(ap->a_fvp);
1352 vrele(odvp);
1353
1354 if (ap->a_tvp) {
1355 if (ap->a_tvp == ndvp) {
1356 vrele(ap->a_tvp);
1357 } else {
1358 vput(ap->a_tvp);
1359 }
1360 }
1361
1362 vput(ndvp);
1363 return(error);
1364 }
1365
1366 int
1367 coda_mkdir(void *v)
1368 {
1369 /* true args */
1370 struct vop_mkdir_args *ap = v;
1371 struct vnode *dvp = ap->a_dvp;
1372 struct cnode *dcp = VTOC(dvp);
1373 struct componentname *cnp = ap->a_cnp;
1374 struct vattr *va = ap->a_vap;
1375 struct vnode **vpp = ap->a_vpp;
1376 kauth_cred_t cred = cnp->cn_cred;
1377 struct lwp *l = curlwp;
1378 /* locals */
1379 int error;
1380 const char *nm = cnp->cn_nameptr;
1381 int len = cnp->cn_namelen;
1382 struct cnode *cp;
1383 CodaFid VFid;
1384 struct vattr ova;
1385
1386 MARK_ENTRY(CODA_MKDIR_STATS);
1387
1388 /* Check for mkdir of target object. */
1389 if (IS_CTL_NAME(dvp, nm, len)) {
1390 *vpp = (struct vnode *)0;
1391 MARK_INT_FAIL(CODA_MKDIR_STATS);
1392 return(EACCES);
1393 }
1394
1395 if (len+1 > CODA_MAXNAMLEN) {
1396 *vpp = (struct vnode *)0;
1397 MARK_INT_FAIL(CODA_MKDIR_STATS);
1398 return(EACCES);
1399 }
1400
1401 error = venus_mkdir(vtomi(dvp), &dcp->c_fid, nm, len, va, cred, l, &VFid, &ova);
1402
1403 if (!error) {
1404 if (coda_find(&VFid) != NULL)
1405 panic("cnode existed for newly created directory!");
1406
1407
1408 cp = make_coda_node(&VFid, dvp->v_mount, va->va_type);
1409 *vpp = CTOV(cp);
1410
1411 /* enter the new vnode in the Name Cache */
1412 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
1413
1414 /* as a side effect, enter "." and ".." for the directory */
1415 coda_nc_enter(VTOC(*vpp), ".", 1, cred, VTOC(*vpp));
1416 coda_nc_enter(VTOC(*vpp), "..", 2, cred, VTOC(dvp));
1417
1418 if (coda_attr_cache) {
1419 VTOC(*vpp)->c_vattr = ova; /* update the attr cache */
1420 VTOC(*vpp)->c_flags |= C_VATTR; /* Valid attributes in cnode */
1421 }
1422
1423 /* Invalidate the parent's attr cache, the modification time has changed */
1424 VTOC(dvp)->c_flags &= ~C_VATTR;
1425
1426 CODADEBUG( CODA_MKDIR, myprintf(("mkdir: %s result %d\n",
1427 coda_f2s(&VFid), error)); )
1428 } else {
1429 *vpp = (struct vnode *)0;
1430 CODADEBUG(CODA_MKDIR, myprintf(("mkdir error %d\n",error));)
1431 }
1432
1433 /*
1434 * Currently, all mkdirs explicitly vput their dvp's.
1435 * It also appears that we *must* lock the vpp, since
1436 * lockleaf isn't set, but someone down the road is going
1437 * to try to unlock the new directory.
1438 */
1439 vput(dvp);
1440 if (!error) {
1441 if ((error = vn_lock(*ap->a_vpp, LK_EXCLUSIVE))) {
1442 panic("coda_mkdir: couldn't lock child");
1443 }
1444 }
1445
1446 return(error);
1447 }
1448
1449 int
1450 coda_rmdir(void *v)
1451 {
1452 /* true args */
1453 struct vop_rmdir_args *ap = v;
1454 struct vnode *dvp = ap->a_dvp;
1455 struct cnode *dcp = VTOC(dvp);
1456 struct vnode *vp = ap->a_vp;
1457 struct componentname *cnp = ap->a_cnp;
1458 kauth_cred_t cred = cnp->cn_cred;
1459 struct lwp *l = curlwp;
1460 /* true args */
1461 int error;
1462 const char *nm = cnp->cn_nameptr;
1463 int len = cnp->cn_namelen;
1464 struct cnode *cp;
1465
1466 MARK_ENTRY(CODA_RMDIR_STATS);
1467
1468 /* Check for rmdir of control object. */
1469 if (IS_CTL_NAME(dvp, nm, len)) {
1470 MARK_INT_FAIL(CODA_RMDIR_STATS);
1471 return(ENOENT);
1472 }
1473
1474 /* Can't remove . in self. */
1475 if (dvp == vp) {
1476 printf("coda_rmdir: dvp == vp\n");
1477 error = EINVAL;
1478 goto exit;
1479 }
1480
1481 /*
1482 * The caller may not have adequate permissions, and the venus
1483 * operation may fail, but it doesn't hurt from a correctness
1484 * viewpoint to invalidate cache entries.
1485 * XXX Why isn't this done after the venus_rmdir call?
1486 */
1487 /* Look up child in name cache (by name, from parent). */
1488 cp = coda_nc_lookup(dcp, nm, len, cred);
1489 /* If found, remove all children of the child (., ..). */
1490 if (cp) coda_nc_zapParentfid(&(cp->c_fid), NOT_DOWNCALL);
1491
1492 /* Remove child's own entry. */
1493 coda_nc_zapfile(dcp, nm, len);
1494
1495 /* Invalidate parent's attr cache (the modification time has changed). */
1496 dcp->c_flags &= ~C_VATTR;
1497
1498 error = venus_rmdir(vtomi(dvp), &dcp->c_fid, nm, len, cred, l);
1499
1500 CODADEBUG(CODA_RMDIR, myprintf(("in rmdir result %d\n", error)); )
1501
1502 exit:
1503 /* vput both vnodes */
1504 vput(dvp);
1505 if (dvp == vp) {
1506 vrele(vp);
1507 } else {
1508 vput(vp);
1509 }
1510
1511 return(error);
1512 }
1513
1514 int
1515 coda_symlink(void *v)
1516 {
1517 /* true args */
1518 struct vop_symlink_args *ap = v;
1519 struct vnode *dvp = ap->a_dvp;
1520 struct cnode *dcp = VTOC(dvp);
1521 /* a_vpp is used in place below */
1522 struct componentname *cnp = ap->a_cnp;
1523 struct vattr *tva = ap->a_vap;
1524 char *path = ap->a_target;
1525 kauth_cred_t cred = cnp->cn_cred;
1526 struct lwp *l = curlwp;
1527 /* locals */
1528 int error;
1529 u_long saved_cn_flags;
1530 const char *nm = cnp->cn_nameptr;
1531 int len = cnp->cn_namelen;
1532 int plen = strlen(path);
1533
1534 /*
1535 * Here's the strategy for the moment: perform the symlink, then
1536 * do a lookup to grab the resulting vnode. I know this requires
1537 * two communications with Venus for a new sybolic link, but
1538 * that's the way the ball bounces. I don't yet want to change
1539 * the way the Mach symlink works. When Mach support is
1540 * deprecated, we should change symlink so that the common case
1541 * returns the resultant vnode in a vpp argument.
1542 */
1543
1544 MARK_ENTRY(CODA_SYMLINK_STATS);
1545
1546 /* Check for symlink of control object. */
1547 if (IS_CTL_NAME(dvp, nm, len)) {
1548 MARK_INT_FAIL(CODA_SYMLINK_STATS);
1549 error = EACCES;
1550 goto exit;
1551 }
1552
1553 if (plen+1 > CODA_MAXPATHLEN) {
1554 MARK_INT_FAIL(CODA_SYMLINK_STATS);
1555 error = EINVAL;
1556 goto exit;
1557 }
1558
1559 if (len+1 > CODA_MAXNAMLEN) {
1560 MARK_INT_FAIL(CODA_SYMLINK_STATS);
1561 error = EINVAL;
1562 goto exit;
1563 }
1564
1565 error = venus_symlink(vtomi(dvp), &dcp->c_fid, path, plen, nm, len, tva, cred, l);
1566
1567 /* Invalidate the parent's attr cache (modification time has changed). */
1568 dcp->c_flags &= ~C_VATTR;
1569
1570 if (!error) {
1571 /*
1572 * VOP_SYMLINK is not defined to pay attention to cnp->cn_flags;
1573 * these are defined only for VOP_LOOKUP. We desire to reuse
1574 * cnp for a VOP_LOOKUP operation, and must be sure to not pass
1575 * stray flags passed to us. Such stray flags can occur because
1576 * sys_symlink makes a namei call and then reuses the
1577 * componentname structure.
1578 */
1579 /*
1580 * XXX Arguably we should create our own componentname structure
1581 * and not reuse the one that was passed in.
1582 */
1583 saved_cn_flags = cnp->cn_flags;
1584 cnp->cn_flags &= ~(MODMASK | OPMASK);
1585 cnp->cn_flags |= LOOKUP;
1586 error = VOP_LOOKUP(dvp, ap->a_vpp, cnp);
1587 cnp->cn_flags = saved_cn_flags;
1588 /* Either an error occurs, or ap->a_vpp is locked. */
1589 }
1590
1591 exit:
1592 /* unlock and deference parent */
1593 vput(dvp);
1594
1595 CODADEBUG(CODA_SYMLINK, myprintf(("in symlink result %d\n",error)); )
1596 return(error);
1597 }
1598
1599 /*
1600 * Read directory entries.
1601 */
1602 int
1603 coda_readdir(void *v)
1604 {
1605 /* true args */
1606 struct vop_readdir_args *ap = v;
1607 struct vnode *vp = ap->a_vp;
1608 struct cnode *cp = VTOC(vp);
1609 struct uio *uiop = ap->a_uio;
1610 kauth_cred_t cred = ap->a_cred;
1611 int *eofflag = ap->a_eofflag;
1612 off_t **cookies = ap->a_cookies;
1613 int *ncookies = ap->a_ncookies;
1614 /* upcall decl */
1615 /* locals */
1616 int error = 0;
1617
1618 MARK_ENTRY(CODA_READDIR_STATS);
1619
1620 CODADEBUG(CODA_READDIR, myprintf(("coda_readdir(%p, %lu, %lld)\n", uiop->uio_iov->iov_base, (unsigned long) uiop->uio_resid, (long long) uiop->uio_offset)); )
1621
1622 /* Check for readdir of control object. */
1623 if (IS_CTL_VP(vp)) {
1624 MARK_INT_FAIL(CODA_READDIR_STATS);
1625 return(ENOENT);
1626 }
1627
1628 {
1629 /* Redirect the request to UFS. */
1630
1631 /* If directory is not already open do an "internal open" on it. */
1632 int opened_internally = 0;
1633 if (cp->c_ovp == NULL) {
1634 opened_internally = 1;
1635 MARK_INT_GEN(CODA_OPEN_STATS);
1636 error = VOP_OPEN(vp, FREAD, cred);
1637 #ifdef CODA_VERBOSE
1638 printf("coda_readdir: Internally Opening %p\n", vp);
1639 #endif
1640 if (error) return(error);
1641 } else
1642 vp = cp->c_ovp;
1643
1644 /* Have UFS handle the call. */
1645 CODADEBUG(CODA_READDIR, myprintf((
1646 "indirect readdir: fid = %s, refcnt = %d\n",
1647 coda_f2s(&cp->c_fid), vp->v_usecount)); )
1648 error = VOP_READDIR(vp, uiop, cred, eofflag, cookies, ncookies);
1649 if (error)
1650 MARK_INT_FAIL(CODA_READDIR_STATS);
1651 else
1652 MARK_INT_SAT(CODA_READDIR_STATS);
1653
1654 /* Do an "internal close" if necessary. */
1655 if (opened_internally) {
1656 MARK_INT_GEN(CODA_CLOSE_STATS);
1657 (void)VOP_CLOSE(vp, FREAD, cred);
1658 }
1659 }
1660
1661 return(error);
1662 }
1663
1664 /*
1665 * Convert from file system blocks to device blocks
1666 */
1667 int
1668 coda_bmap(void *v)
1669 {
1670 /* XXX on the global proc */
1671 /* true args */
1672 struct vop_bmap_args *ap = v;
1673 struct vnode *vp __unused = ap->a_vp; /* file's vnode */
1674 daddr_t bn __unused = ap->a_bn; /* fs block number */
1675 struct vnode **vpp = ap->a_vpp; /* RETURN vp of device */
1676 daddr_t *bnp __unused = ap->a_bnp; /* RETURN device block number */
1677 struct lwp *l __unused = curlwp;
1678 /* upcall decl */
1679 /* locals */
1680
1681 *vpp = (struct vnode *)0;
1682 myprintf(("coda_bmap called!\n"));
1683 return(EINVAL);
1684 }
1685
1686 /*
1687 * I don't think the following two things are used anywhere, so I've
1688 * commented them out
1689 *
1690 * struct buf *async_bufhead;
1691 * int async_daemon_count;
1692 */
1693 int
1694 coda_strategy(void *v)
1695 {
1696 /* true args */
1697 struct vop_strategy_args *ap = v;
1698 struct buf *bp __unused = ap->a_bp;
1699 struct lwp *l __unused = curlwp;
1700 /* upcall decl */
1701 /* locals */
1702
1703 myprintf(("coda_strategy called! "));
1704 return(EINVAL);
1705 }
1706
1707 int
1708 coda_reclaim(void *v)
1709 {
1710 /* true args */
1711 struct vop_reclaim_args *ap = v;
1712 struct vnode *vp = ap->a_vp;
1713 struct cnode *cp = VTOC(vp);
1714 /* upcall decl */
1715 /* locals */
1716
1717 /*
1718 * Forced unmount/flush will let vnodes with non zero use be destroyed!
1719 */
1720 ENTRY;
1721
1722 if (IS_UNMOUNTING(cp)) {
1723 #ifdef DEBUG
1724 if (VTOC(vp)->c_ovp) {
1725 if (IS_UNMOUNTING(cp))
1726 printf("coda_reclaim: c_ovp not void: vp %p, cp %p\n", vp, cp);
1727 }
1728 #endif
1729 } else {
1730 #ifdef OLD_DIAGNOSTIC
1731 if (vp->v_usecount != 0)
1732 print("coda_reclaim: pushing active %p\n", vp);
1733 if (VTOC(vp)->c_ovp) {
1734 panic("coda_reclaim: c_ovp not void");
1735 }
1736 #endif
1737 }
1738 coda_free(VTOC(vp));
1739 SET_VTOC(vp) = NULL;
1740 return (0);
1741 }
1742
1743 int
1744 coda_lock(void *v)
1745 {
1746 /* true args */
1747 struct vop_lock_args *ap = v;
1748 struct vnode *vp = ap->a_vp;
1749 struct cnode *cp = VTOC(vp);
1750 /* upcall decl */
1751 /* locals */
1752
1753 ENTRY;
1754
1755 if (coda_lockdebug) {
1756 myprintf(("Attempting lock on %s\n",
1757 coda_f2s(&cp->c_fid)));
1758 }
1759
1760 return genfs_lock(v);
1761 }
1762
1763 int
1764 coda_unlock(void *v)
1765 {
1766 /* true args */
1767 struct vop_unlock_args *ap = v;
1768 struct vnode *vp = ap->a_vp;
1769 struct cnode *cp = VTOC(vp);
1770 /* upcall decl */
1771 /* locals */
1772
1773 ENTRY;
1774 if (coda_lockdebug) {
1775 myprintf(("Attempting unlock on %s\n",
1776 coda_f2s(&cp->c_fid)));
1777 }
1778
1779 return genfs_unlock(v);
1780 }
1781
1782 int
1783 coda_islocked(void *v)
1784 {
1785 /* true args */
1786 ENTRY;
1787
1788 return genfs_islocked(v);
1789 }
1790
1791 /*
1792 * Given a device and inode, obtain a locked vnode. One reference is
1793 * obtained and passed back to the caller.
1794 */
1795 int
1796 coda_grab_vnode(dev_t dev, ino_t ino, struct vnode **vpp)
1797 {
1798 int error;
1799 struct mount *mp;
1800
1801 /* Obtain mount point structure from device. */
1802 if (!(mp = devtomp(dev))) {
1803 myprintf(("coda_grab_vnode: devtomp(0x%llx) returns NULL\n",
1804 (unsigned long long)dev));
1805 return(ENXIO);
1806 }
1807
1808 /*
1809 * Obtain vnode from mount point and inode.
1810 * XXX VFS_VGET does not clearly define locked/referenced state of
1811 * returned vnode.
1812 */
1813 error = VFS_VGET(mp, ino, vpp);
1814 if (error) {
1815 myprintf(("coda_grab_vnode: iget/vget(0x%llx, %llu) returns %p, err %d\n",
1816 (unsigned long long)dev, (unsigned long long)ino, *vpp, error));
1817 return(ENOENT);
1818 }
1819 return(0);
1820 }
1821
1822 void
1823 print_vattr(struct vattr *attr)
1824 {
1825 const char *typestr;
1826
1827 switch (attr->va_type) {
1828 case VNON:
1829 typestr = "VNON";
1830 break;
1831 case VREG:
1832 typestr = "VREG";
1833 break;
1834 case VDIR:
1835 typestr = "VDIR";
1836 break;
1837 case VBLK:
1838 typestr = "VBLK";
1839 break;
1840 case VCHR:
1841 typestr = "VCHR";
1842 break;
1843 case VLNK:
1844 typestr = "VLNK";
1845 break;
1846 case VSOCK:
1847 typestr = "VSCK";
1848 break;
1849 case VFIFO:
1850 typestr = "VFFO";
1851 break;
1852 case VBAD:
1853 typestr = "VBAD";
1854 break;
1855 default:
1856 typestr = "????";
1857 break;
1858 }
1859
1860
1861 myprintf(("attr: type %s mode %d uid %d gid %d fsid %d rdev %d\n",
1862 typestr, (int)attr->va_mode, (int)attr->va_uid,
1863 (int)attr->va_gid, (int)attr->va_fsid, (int)attr->va_rdev));
1864
1865 myprintf((" fileid %d nlink %d size %d blocksize %d bytes %d\n",
1866 (int)attr->va_fileid, (int)attr->va_nlink,
1867 (int)attr->va_size,
1868 (int)attr->va_blocksize,(int)attr->va_bytes));
1869 myprintf((" gen %ld flags %ld vaflags %d\n",
1870 attr->va_gen, attr->va_flags, attr->va_vaflags));
1871 myprintf((" atime sec %d nsec %d\n",
1872 (int)attr->va_atime.tv_sec, (int)attr->va_atime.tv_nsec));
1873 myprintf((" mtime sec %d nsec %d\n",
1874 (int)attr->va_mtime.tv_sec, (int)attr->va_mtime.tv_nsec));
1875 myprintf((" ctime sec %d nsec %d\n",
1876 (int)attr->va_ctime.tv_sec, (int)attr->va_ctime.tv_nsec));
1877 }
1878
1879 /* How to print a ucred */
1880 void
1881 print_cred(kauth_cred_t cred)
1882 {
1883
1884 uint16_t ngroups;
1885 int i;
1886
1887 myprintf(("ref %d\tuid %d\n", kauth_cred_getrefcnt(cred),
1888 kauth_cred_geteuid(cred)));
1889
1890 ngroups = kauth_cred_ngroups(cred);
1891 for (i=0; i < ngroups; i++)
1892 myprintf(("\tgroup %d: (%d)\n", i, kauth_cred_group(cred, i)));
1893 myprintf(("\n"));
1894
1895 }
1896
1897 /*
1898 * Return a vnode for the given fid.
1899 * If no cnode exists for this fid create one and put it
1900 * in a table hashed by coda_f2i(). If the cnode for
1901 * this fid is already in the table return it (ref count is
1902 * incremented by coda_find. The cnode will be flushed from the
1903 * table when coda_inactive calls coda_unsave.
1904 */
1905 struct cnode *
1906 make_coda_node(CodaFid *fid, struct mount *vfsp, short type)
1907 {
1908 struct cnode *cp;
1909 int err;
1910
1911 if ((cp = coda_find(fid)) == NULL) {
1912 struct vnode *vp;
1913
1914 cp = coda_alloc();
1915 cp->c_fid = *fid;
1916
1917 err = getnewvnode(VT_CODA, vfsp, coda_vnodeop_p, &vp);
1918 if (err) {
1919 panic("coda: getnewvnode returned error %d", err);
1920 }
1921 vp->v_data = cp;
1922 vp->v_type = type;
1923 cp->c_vnode = vp;
1924 uvm_vnp_setsize(vp, 0);
1925 coda_save(cp);
1926
1927 } else {
1928 vref(CTOV(cp));
1929 }
1930
1931 return cp;
1932 }
1933
1934 /*
1935 * coda_getpages may be called on a vnode which has not been opened,
1936 * e.g. to fault in pages to execute a program. In that case, we must
1937 * open the file to get the container. The vnode may or may not be
1938 * locked, and we must leave it in the same state.
1939 * XXX The protocol requires v_uobj.vmobjlock to be
1940 * held by caller, but this isn't documented in vnodeops(9) or vnode_if.src.
1941 */
1942 int
1943 coda_getpages(void *v)
1944 {
1945 struct vop_getpages_args /* {
1946 struct vnode *a_vp;
1947 voff_t a_offset;
1948 struct vm_page **a_m;
1949 int *a_count;
1950 int a_centeridx;
1951 vm_prot_t a_access_type;
1952 int a_advice;
1953 int a_flags;
1954 } */ *ap = v;
1955 struct vnode *vp = ap->a_vp;
1956 struct cnode *cp = VTOC(vp);
1957 struct lwp *l = curlwp;
1958 kauth_cred_t cred = l->l_cred;
1959 int error, cerror;
1960 int waslocked; /* 1 if vnode lock was held on entry */
1961 int didopen = 0; /* 1 if we opened container file */
1962
1963 /*
1964 * Handle a case that uvm_fault doesn't quite use yet.
1965 * See layer_vnops.c. for inspiration.
1966 */
1967 if (ap->a_flags & PGO_LOCKED) {
1968 return EBUSY;
1969 }
1970
1971 /* Check for control object. */
1972 if (IS_CTL_VP(vp)) {
1973 printf("coda_getpages: control object %p\n", vp);
1974 mutex_exit(&vp->v_uobj.vmobjlock);
1975 return(EINVAL);
1976 }
1977
1978 /*
1979 * XXX It's really not ok to be releasing the lock we get,
1980 * because we could be overlapping with another call to
1981 * getpages and drop a lock they are relying on. We need to
1982 * figure out whether getpages ever is called holding the
1983 * lock, and if we should serialize getpages calls by some
1984 * mechanism.
1985 */
1986 /* XXX VOP_ISLOCKED() may not be used for lock decisions. */
1987 waslocked = VOP_ISLOCKED(vp);
1988
1989 /* Drop the vmobject lock. */
1990 mutex_exit(&vp->v_uobj.vmobjlock);
1991
1992 /* Get container file if not already present. */
1993 if (cp->c_ovp == NULL) {
1994 /*
1995 * VOP_OPEN requires a locked vnode. We must avoid
1996 * locking the vnode if it is already locked, and
1997 * leave it in the same state on exit.
1998 */
1999 if (waslocked == 0) {
2000 cerror = vn_lock(vp, LK_EXCLUSIVE);
2001 if (cerror) {
2002 printf("coda_getpages: can't lock vnode %p\n",
2003 vp);
2004 return cerror;
2005 }
2006 #if 0
2007 printf("coda_getpages: locked vnode %p\n", vp);
2008 #endif
2009 }
2010
2011 /*
2012 * Open file (causes upcall to venus).
2013 * XXX Perhaps we should not fully open the file, but
2014 * simply obtain a container file.
2015 */
2016 /* XXX Is it ok to do this while holding the simplelock? */
2017 cerror = VOP_OPEN(vp, FREAD, cred);
2018
2019 if (cerror) {
2020 printf("coda_getpages: cannot open vnode %p => %d\n",
2021 vp, cerror);
2022 if (waslocked == 0)
2023 VOP_UNLOCK(vp);
2024 return cerror;
2025 }
2026
2027 #if 0
2028 printf("coda_getpages: opened vnode %p\n", vp);
2029 #endif
2030 didopen = 1;
2031 }
2032 KASSERT(cp->c_ovp != NULL);
2033
2034 /* Munge the arg structure to refer to the container vnode. */
2035 ap->a_vp = cp->c_ovp;
2036
2037 /* Get the lock on the container vnode, and call getpages on it. */
2038 mutex_enter(&ap->a_vp->v_uobj.vmobjlock);
2039 error = VCALL(ap->a_vp, VOFFSET(vop_getpages), ap);
2040
2041 /* If we opened the vnode, we must close it. */
2042 if (didopen) {
2043 /*
2044 * VOP_CLOSE requires a locked vnode, but we are still
2045 * holding the lock (or riding a caller's lock).
2046 */
2047 cerror = VOP_CLOSE(vp, FREAD, cred);
2048 if (cerror != 0)
2049 /* XXX How should we handle this? */
2050 printf("coda_getpages: closed vnode %p -> %d\n",
2051 vp, cerror);
2052
2053 /* If we obtained a lock, drop it. */
2054 if (waslocked == 0)
2055 VOP_UNLOCK(vp);
2056 }
2057
2058 return error;
2059 }
2060
2061 /*
2062 * The protocol requires v_uobj.vmobjlock to be held by the caller, as
2063 * documented in vnodeops(9). XXX vnode_if.src doesn't say this.
2064 */
2065 int
2066 coda_putpages(void *v)
2067 {
2068 struct vop_putpages_args /* {
2069 struct vnode *a_vp;
2070 voff_t a_offlo;
2071 voff_t a_offhi;
2072 int a_flags;
2073 } */ *ap = v;
2074 struct vnode *vp = ap->a_vp;
2075 struct cnode *cp = VTOC(vp);
2076 int error;
2077
2078 /* Drop the vmobject lock. */
2079 mutex_exit(&vp->v_uobj.vmobjlock);
2080
2081 /* Check for control object. */
2082 if (IS_CTL_VP(vp)) {
2083 printf("coda_putpages: control object %p\n", vp);
2084 return(EINVAL);
2085 }
2086
2087 /*
2088 * If container object is not present, then there are no pages
2089 * to put; just return without error. This happens all the
2090 * time, apparently during discard of a closed vnode (which
2091 * trivially can't have dirty pages).
2092 */
2093 if (cp->c_ovp == NULL)
2094 return 0;
2095
2096 /* Munge the arg structure to refer to the container vnode. */
2097 ap->a_vp = cp->c_ovp;
2098
2099 /* Get the lock on the container vnode, and call putpages on it. */
2100 mutex_enter(&ap->a_vp->v_uobj.vmobjlock);
2101 error = VCALL(ap->a_vp, VOFFSET(vop_putpages), ap);
2102
2103 return error;
2104 }
2105