coda_vnops.c revision 1.85 1 /* $NetBSD: coda_vnops.c,v 1.85 2012/05/02 16:51:01 christos Exp $ */
2
3 /*
4 *
5 * Coda: an Experimental Distributed File System
6 * Release 3.1
7 *
8 * Copyright (c) 1987-1998 Carnegie Mellon University
9 * All Rights Reserved
10 *
11 * Permission to use, copy, modify and distribute this software and its
12 * documentation is hereby granted, provided that both the copyright
13 * notice and this permission notice appear in all copies of the
14 * software, derivative works or modified versions, and any portions
15 * thereof, and that both notices appear in supporting documentation, and
16 * that credit is given to Carnegie Mellon University in all documents
17 * and publicity pertaining to direct or indirect use of this code or its
18 * derivatives.
19 *
20 * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS,
21 * SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS
22 * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON
23 * DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
24 * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF
25 * ANY DERIVATIVE WORK.
26 *
27 * Carnegie Mellon encourages users of this software to return any
28 * improvements or extensions that they make, and to grant Carnegie
29 * Mellon the rights to redistribute these changes without encumbrance.
30 *
31 * @(#) coda/coda_vnops.c,v 1.1.1.1 1998/08/29 21:26:46 rvb Exp $
32 */
33
34 /*
35 * Mach Operating System
36 * Copyright (c) 1990 Carnegie-Mellon University
37 * Copyright (c) 1989 Carnegie-Mellon University
38 * All rights reserved. The CMU software License Agreement specifies
39 * the terms and conditions for use and redistribution.
40 */
41
42 /*
43 * This code was written for the Coda file system at Carnegie Mellon
44 * University. Contributers include David Steere, James Kistler, and
45 * M. Satyanarayanan.
46 */
47
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: coda_vnops.c,v 1.85 2012/05/02 16:51:01 christos Exp $");
50
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/malloc.h>
54 #include <sys/errno.h>
55 #include <sys/acct.h>
56 #include <sys/file.h>
57 #include <sys/uio.h>
58 #include <sys/namei.h>
59 #include <sys/ioctl.h>
60 #include <sys/mount.h>
61 #include <sys/proc.h>
62 #include <sys/select.h>
63 #include <sys/vnode.h>
64 #include <sys/kauth.h>
65
66 #include <miscfs/genfs/genfs.h>
67
68 #include <coda/coda.h>
69 #include <coda/cnode.h>
70 #include <coda/coda_vnops.h>
71 #include <coda/coda_venus.h>
72 #include <coda/coda_opstats.h>
73 #include <coda/coda_subr.h>
74 #include <coda/coda_namecache.h>
75 #include <coda/coda_pioctl.h>
76
77 /*
78 * These flags select various performance enhancements.
79 */
80 int coda_attr_cache = 1; /* Set to cache attributes in the kernel */
81 int coda_symlink_cache = 1; /* Set to cache symbolic link information */
82 int coda_access_cache = 1; /* Set to handle some access checks directly */
83
84 /* structure to keep track of vfs calls */
85
86 struct coda_op_stats coda_vnodeopstats[CODA_VNODEOPS_SIZE];
87
88 #define MARK_ENTRY(op) (coda_vnodeopstats[op].entries++)
89 #define MARK_INT_SAT(op) (coda_vnodeopstats[op].sat_intrn++)
90 #define MARK_INT_FAIL(op) (coda_vnodeopstats[op].unsat_intrn++)
91 #define MARK_INT_GEN(op) (coda_vnodeopstats[op].gen_intrn++)
92
93 /* What we are delaying for in printf */
94 static int coda_lockdebug = 0;
95
96 #define ENTRY if(coda_vnop_print_entry) myprintf(("Entered %s\n",__func__))
97
98 /* Definition of the vnode operation vector */
99
100 const struct vnodeopv_entry_desc coda_vnodeop_entries[] = {
101 { &vop_default_desc, coda_vop_error },
102 { &vop_lookup_desc, coda_lookup }, /* lookup */
103 { &vop_create_desc, coda_create }, /* create */
104 { &vop_mknod_desc, coda_vop_error }, /* mknod */
105 { &vop_open_desc, coda_open }, /* open */
106 { &vop_close_desc, coda_close }, /* close */
107 { &vop_access_desc, coda_access }, /* access */
108 { &vop_getattr_desc, coda_getattr }, /* getattr */
109 { &vop_setattr_desc, coda_setattr }, /* setattr */
110 { &vop_read_desc, coda_read }, /* read */
111 { &vop_write_desc, coda_write }, /* write */
112 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */
113 { &vop_ioctl_desc, coda_ioctl }, /* ioctl */
114 { &vop_mmap_desc, genfs_mmap }, /* mmap */
115 { &vop_fsync_desc, coda_fsync }, /* fsync */
116 { &vop_remove_desc, coda_remove }, /* remove */
117 { &vop_link_desc, coda_link }, /* link */
118 { &vop_rename_desc, coda_rename }, /* rename */
119 { &vop_mkdir_desc, coda_mkdir }, /* mkdir */
120 { &vop_rmdir_desc, coda_rmdir }, /* rmdir */
121 { &vop_symlink_desc, coda_symlink }, /* symlink */
122 { &vop_readdir_desc, coda_readdir }, /* readdir */
123 { &vop_readlink_desc, coda_readlink }, /* readlink */
124 { &vop_abortop_desc, coda_abortop }, /* abortop */
125 { &vop_inactive_desc, coda_inactive }, /* inactive */
126 { &vop_reclaim_desc, coda_reclaim }, /* reclaim */
127 { &vop_lock_desc, coda_lock }, /* lock */
128 { &vop_unlock_desc, coda_unlock }, /* unlock */
129 { &vop_bmap_desc, coda_bmap }, /* bmap */
130 { &vop_strategy_desc, coda_strategy }, /* strategy */
131 { &vop_print_desc, coda_vop_error }, /* print */
132 { &vop_islocked_desc, coda_islocked }, /* islocked */
133 { &vop_pathconf_desc, coda_vop_error }, /* pathconf */
134 { &vop_advlock_desc, coda_vop_nop }, /* advlock */
135 { &vop_bwrite_desc, coda_vop_error }, /* bwrite */
136 { &vop_seek_desc, genfs_seek }, /* seek */
137 { &vop_poll_desc, genfs_poll }, /* poll */
138 { &vop_getpages_desc, coda_getpages }, /* getpages */
139 { &vop_putpages_desc, coda_putpages }, /* putpages */
140 { NULL, NULL }
141 };
142
143 const struct vnodeopv_desc coda_vnodeop_opv_desc =
144 { &coda_vnodeop_p, coda_vnodeop_entries };
145
146 /* Definitions of NetBSD vnodeop interfaces */
147
148 /*
149 * A generic error routine. Return EIO without looking at arguments.
150 */
151 int
152 coda_vop_error(void *anon) {
153 struct vnodeop_desc **desc = (struct vnodeop_desc **)anon;
154
155 if (codadebug) {
156 myprintf(("%s: Vnode operation %s called (error).\n",
157 __func__, (*desc)->vdesc_name));
158 }
159
160 return EIO;
161 }
162
163 /* A generic do-nothing. */
164 int
165 coda_vop_nop(void *anon) {
166 struct vnodeop_desc **desc = (struct vnodeop_desc **)anon;
167
168 if (codadebug) {
169 myprintf(("Vnode operation %s called, but unsupported\n",
170 (*desc)->vdesc_name));
171 }
172 return (0);
173 }
174
175 int
176 coda_vnodeopstats_init(void)
177 {
178 int i;
179
180 for(i=0;i<CODA_VNODEOPS_SIZE;i++) {
181 coda_vnodeopstats[i].opcode = i;
182 coda_vnodeopstats[i].entries = 0;
183 coda_vnodeopstats[i].sat_intrn = 0;
184 coda_vnodeopstats[i].unsat_intrn = 0;
185 coda_vnodeopstats[i].gen_intrn = 0;
186 }
187
188 return 0;
189 }
190
191 /*
192 * XXX The entire relationship between VOP_OPEN and having a container
193 * file (via venus_open) needs to be reexamined. In particular, it's
194 * valid to open/mmap/close and then reference. Instead of doing
195 * VOP_OPEN when getpages needs a container, we should do the
196 * venus_open part, and record that the vnode has opened the container
197 * for getpages, and do the matching logical close on coda_inactive.
198 * Further, coda_rdwr needs a container file, and sometimes needs to
199 * do the equivalent of open (core dumps).
200 */
201 /*
202 * coda_open calls Venus to return the device and inode of the
203 * container file, and then obtains a vnode for that file. The
204 * container vnode is stored in the coda vnode, and a reference is
205 * added for each open file.
206 */
207 int
208 coda_open(void *v)
209 {
210 /*
211 * NetBSD can pass the O_EXCL flag in mode, even though the check
212 * has already happened. Venus defensively assumes that if open
213 * is passed the EXCL, it must be a bug. We strip the flag here.
214 */
215 /* true args */
216 struct vop_open_args *ap = v;
217 vnode_t *vp = ap->a_vp;
218 struct cnode *cp = VTOC(vp);
219 int flag = ap->a_mode & (~O_EXCL);
220 kauth_cred_t cred = ap->a_cred;
221 /* locals */
222 int error;
223 dev_t dev; /* container file device, inode, vnode */
224 ino_t inode;
225 vnode_t *container_vp;
226
227 MARK_ENTRY(CODA_OPEN_STATS);
228
229 /* Check for open of control file. */
230 if (IS_CTL_VP(vp)) {
231 /* if (WRITABLE(flag)) */
232 if (flag & (FWRITE | O_TRUNC | O_CREAT | O_EXCL)) {
233 MARK_INT_FAIL(CODA_OPEN_STATS);
234 return(EACCES);
235 }
236 MARK_INT_SAT(CODA_OPEN_STATS);
237 return(0);
238 }
239
240 error = venus_open(vtomi(vp), &cp->c_fid, flag, cred, curlwp, &dev, &inode);
241 if (error)
242 return (error);
243 if (!error) {
244 CODADEBUG(CODA_OPEN, myprintf((
245 "%s: dev 0x%llx inode %llu result %d\n", __func__,
246 (unsigned long long)dev, (unsigned long long)inode, error));)
247 }
248
249 /*
250 * Obtain locked and referenced container vnode from container
251 * device/inode.
252 */
253 error = coda_grab_vnode(vp, dev, inode, &container_vp);
254 if (error)
255 return (error);
256
257 /* Save the vnode pointer for the container file. */
258 if (cp->c_ovp == NULL) {
259 cp->c_ovp = container_vp;
260 } else {
261 if (cp->c_ovp != container_vp)
262 /*
263 * Perhaps venus returned a different container, or
264 * something else went wrong.
265 */
266 panic("%s: cp->c_ovp != container_vp", __func__);
267 }
268 cp->c_ocount++;
269
270 /* Flush the attribute cache if writing the file. */
271 if (flag & FWRITE) {
272 cp->c_owrite++;
273 cp->c_flags &= ~C_VATTR;
274 }
275
276 /*
277 * Save the <device, inode> pair for the container file to speed
278 * up subsequent reads while closed (mmap, program execution).
279 * This is perhaps safe because venus will invalidate the node
280 * before changing the container file mapping.
281 */
282 cp->c_device = dev;
283 cp->c_inode = inode;
284
285 /* Open the container file. */
286 error = VOP_OPEN(container_vp, flag, cred);
287 /*
288 * Drop the lock on the container, after we have done VOP_OPEN
289 * (which requires a locked vnode).
290 */
291 VOP_UNLOCK(container_vp);
292 return(error);
293 }
294
295 /*
296 * Close the cache file used for I/O and notify Venus.
297 */
298 int
299 coda_close(void *v)
300 {
301 /* true args */
302 struct vop_close_args *ap = v;
303 vnode_t *vp = ap->a_vp;
304 struct cnode *cp = VTOC(vp);
305 int flag = ap->a_fflag;
306 kauth_cred_t cred = ap->a_cred;
307 /* locals */
308 int error;
309
310 MARK_ENTRY(CODA_CLOSE_STATS);
311
312 /* Check for close of control file. */
313 if (IS_CTL_VP(vp)) {
314 MARK_INT_SAT(CODA_CLOSE_STATS);
315 return(0);
316 }
317
318 /*
319 * XXX The IS_UNMOUNTING part of this is very suspect.
320 */
321 if (IS_UNMOUNTING(cp)) {
322 if (cp->c_ovp) {
323 #ifdef CODA_VERBOSE
324 printf("%s: destroying container %d, ufs vp %p of vp %p/cp %p\n",
325 __func__, vp->v_usecount, cp->c_ovp, vp, cp);
326 #endif
327 #ifdef hmm
328 vgone(cp->c_ovp);
329 #else
330 vn_lock(cp->c_ovp, LK_EXCLUSIVE | LK_RETRY);
331 VOP_CLOSE(cp->c_ovp, flag, cred); /* Do errors matter here? */
332 vput(cp->c_ovp);
333 #endif
334 } else {
335 #ifdef CODA_VERBOSE
336 printf("%s: NO container vp %p/cp %p\n", __func__, vp, cp);
337 #endif
338 }
339 return ENODEV;
340 }
341
342 /* Lock the container node, and VOP_CLOSE it. */
343 vn_lock(cp->c_ovp, LK_EXCLUSIVE | LK_RETRY);
344 VOP_CLOSE(cp->c_ovp, flag, cred); /* Do errors matter here? */
345 /*
346 * Drop the lock we just obtained, and vrele the container vnode.
347 * Decrement reference counts, and clear container vnode pointer on
348 * last close.
349 */
350 vput(cp->c_ovp);
351 if (flag & FWRITE)
352 --cp->c_owrite;
353 if (--cp->c_ocount == 0)
354 cp->c_ovp = NULL;
355
356 error = venus_close(vtomi(vp), &cp->c_fid, flag, cred, curlwp);
357
358 CODADEBUG(CODA_CLOSE, myprintf(("%s: result %d\n", __func__, error)); )
359 return(error);
360 }
361
362 int
363 coda_read(void *v)
364 {
365 struct vop_read_args *ap = v;
366
367 ENTRY;
368 return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_READ,
369 ap->a_ioflag, ap->a_cred, curlwp));
370 }
371
372 int
373 coda_write(void *v)
374 {
375 struct vop_write_args *ap = v;
376
377 ENTRY;
378 return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_WRITE,
379 ap->a_ioflag, ap->a_cred, curlwp));
380 }
381
382 int
383 coda_rdwr(vnode_t *vp, struct uio *uiop, enum uio_rw rw, int ioflag,
384 kauth_cred_t cred, struct lwp *l)
385 {
386 /* upcall decl */
387 /* NOTE: container file operation!!! */
388 /* locals */
389 struct cnode *cp = VTOC(vp);
390 vnode_t *cfvp = cp->c_ovp;
391 struct proc *p = l->l_proc;
392 int opened_internally = 0;
393 int error = 0;
394
395 MARK_ENTRY(CODA_RDWR_STATS);
396
397 CODADEBUG(CODA_RDWR, myprintf(("coda_rdwr(%d, %p, %lu, %lld)\n", rw,
398 uiop->uio_iov->iov_base, (unsigned long) uiop->uio_resid,
399 (long long) uiop->uio_offset)); )
400
401 /* Check for rdwr of control object. */
402 if (IS_CTL_VP(vp)) {
403 MARK_INT_FAIL(CODA_RDWR_STATS);
404 return(EINVAL);
405 }
406
407 /* Redirect the request to UFS. */
408
409 /*
410 * If file is not already open this must be a page
411 * {read,write} request. Iget the cache file's inode
412 * pointer if we still have its <device, inode> pair.
413 * Otherwise, we must do an internal open to derive the
414 * pair.
415 * XXX Integrate this into a coherent strategy for container
416 * file acquisition.
417 */
418 if (cfvp == NULL) {
419 /*
420 * If we're dumping core, do the internal open. Otherwise
421 * venus won't have the correct size of the core when
422 * it's completely written.
423 */
424 if (cp->c_inode != 0 && !(p && (p->p_acflag & ACORE))) {
425 printf("%s: grabbing container vnode, losing reference\n",
426 __func__);
427 /* Get locked and refed vnode. */
428 error = coda_grab_vnode(vp, cp->c_device, cp->c_inode, &cfvp);
429 if (error) {
430 MARK_INT_FAIL(CODA_RDWR_STATS);
431 return(error);
432 }
433 /*
434 * Drop lock.
435 * XXX Where is reference released.
436 */
437 VOP_UNLOCK(cfvp);
438 }
439 else {
440 printf("%s: internal VOP_OPEN\n", __func__);
441 opened_internally = 1;
442 MARK_INT_GEN(CODA_OPEN_STATS);
443 error = VOP_OPEN(vp, (rw == UIO_READ ? FREAD : FWRITE), cred);
444 #ifdef CODA_VERBOSE
445 printf("%s: Internally Opening %p\n", __func__, vp);
446 #endif
447 if (error) {
448 MARK_INT_FAIL(CODA_RDWR_STATS);
449 return(error);
450 }
451 cfvp = cp->c_ovp;
452 }
453 }
454
455 /* Have UFS handle the call. */
456 CODADEBUG(CODA_RDWR, myprintf(("%s: fid = %s, refcnt = %d\n", __func__,
457 coda_f2s(&cp->c_fid), CTOV(cp)->v_usecount)); )
458
459 if (rw == UIO_READ) {
460 error = VOP_READ(cfvp, uiop, ioflag, cred);
461 } else {
462 error = VOP_WRITE(cfvp, uiop, ioflag, cred);
463 }
464
465 if (error)
466 MARK_INT_FAIL(CODA_RDWR_STATS);
467 else
468 MARK_INT_SAT(CODA_RDWR_STATS);
469
470 /* Do an internal close if necessary. */
471 if (opened_internally) {
472 MARK_INT_GEN(CODA_CLOSE_STATS);
473 (void)VOP_CLOSE(vp, (rw == UIO_READ ? FREAD : FWRITE), cred);
474 }
475
476 /* Invalidate cached attributes if writing. */
477 if (rw == UIO_WRITE)
478 cp->c_flags &= ~C_VATTR;
479 return(error);
480 }
481
482 int
483 coda_ioctl(void *v)
484 {
485 /* true args */
486 struct vop_ioctl_args *ap = v;
487 vnode_t *vp = ap->a_vp;
488 int com = ap->a_command;
489 void *data = ap->a_data;
490 int flag = ap->a_fflag;
491 kauth_cred_t cred = ap->a_cred;
492 /* locals */
493 int error;
494 vnode_t *tvp;
495 struct PioctlData *iap = (struct PioctlData *)data;
496 namei_simple_flags_t sflags;
497
498 MARK_ENTRY(CODA_IOCTL_STATS);
499
500 CODADEBUG(CODA_IOCTL, myprintf(("in coda_ioctl on %s\n", iap->path));)
501
502 /* Don't check for operation on a dying object, for ctlvp it
503 shouldn't matter */
504
505 /* Must be control object to succeed. */
506 if (!IS_CTL_VP(vp)) {
507 MARK_INT_FAIL(CODA_IOCTL_STATS);
508 CODADEBUG(CODA_IOCTL, myprintf(("%s error: vp != ctlvp", __func__));)
509 return (EOPNOTSUPP);
510 }
511 /* Look up the pathname. */
512
513 /* Should we use the name cache here? It would get it from
514 lookupname sooner or later anyway, right? */
515
516 sflags = iap->follow ? NSM_FOLLOW_NOEMULROOT : NSM_NOFOLLOW_NOEMULROOT;
517 error = namei_simple_user(iap->path, sflags, &tvp);
518
519 if (error) {
520 MARK_INT_FAIL(CODA_IOCTL_STATS);
521 CODADEBUG(CODA_IOCTL, myprintf(("%s error: lookup returns %d\n",
522 __func__, error));)
523 return(error);
524 }
525
526 /*
527 * Make sure this is a coda style cnode, but it may be a
528 * different vfsp
529 */
530 /* XXX: this totally violates the comment about vtagtype in vnode.h */
531 if (tvp->v_tag != VT_CODA) {
532 vrele(tvp);
533 MARK_INT_FAIL(CODA_IOCTL_STATS);
534 CODADEBUG(CODA_IOCTL, myprintf(("%s error: %s not a coda object\n",
535 __func__, iap->path));)
536 return(EINVAL);
537 }
538
539 if (iap->vi.in_size > VC_MAXDATASIZE || iap->vi.out_size > VC_MAXDATASIZE) {
540 vrele(tvp);
541 return(EINVAL);
542 }
543 error = venus_ioctl(vtomi(tvp), &((VTOC(tvp))->c_fid), com, flag, data,
544 cred, curlwp);
545
546 if (error)
547 MARK_INT_FAIL(CODA_IOCTL_STATS);
548 else
549 CODADEBUG(CODA_IOCTL, myprintf(("Ioctl returns %d \n", error)); )
550
551 vrele(tvp);
552 return(error);
553 }
554
555 /*
556 * To reduce the cost of a user-level venus;we cache attributes in
557 * the kernel. Each cnode has storage allocated for an attribute. If
558 * c_vattr is valid, return a reference to it. Otherwise, get the
559 * attributes from venus and store them in the cnode. There is some
560 * question if this method is a security leak. But I think that in
561 * order to make this call, the user must have done a lookup and
562 * opened the file, and therefore should already have access.
563 */
564 int
565 coda_getattr(void *v)
566 {
567 /* true args */
568 struct vop_getattr_args *ap = v;
569 vnode_t *vp = ap->a_vp;
570 struct cnode *cp = VTOC(vp);
571 struct vattr *vap = ap->a_vap;
572 kauth_cred_t cred = ap->a_cred;
573 /* locals */
574 int error;
575
576 MARK_ENTRY(CODA_GETATTR_STATS);
577
578 /* Check for getattr of control object. */
579 if (IS_CTL_VP(vp)) {
580 MARK_INT_FAIL(CODA_GETATTR_STATS);
581 return(ENOENT);
582 }
583
584 /* Check to see if the attributes have already been cached */
585 if (VALID_VATTR(cp)) {
586 CODADEBUG(CODA_GETATTR, { myprintf(("%s: attr cache hit: %s\n",
587 __func__, coda_f2s(&cp->c_fid)));})
588 CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR))
589 print_vattr(&cp->c_vattr); )
590
591 *vap = cp->c_vattr;
592 MARK_INT_SAT(CODA_GETATTR_STATS);
593 return(0);
594 }
595
596 error = venus_getattr(vtomi(vp), &cp->c_fid, cred, curlwp, vap);
597
598 if (!error) {
599 CODADEBUG(CODA_GETATTR, myprintf(("%s miss %s: result %d\n",
600 __func__, coda_f2s(&cp->c_fid), error)); )
601
602 CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR))
603 print_vattr(vap); )
604
605 /* If not open for write, store attributes in cnode */
606 if ((cp->c_owrite == 0) && (coda_attr_cache)) {
607 cp->c_vattr = *vap;
608 cp->c_flags |= C_VATTR;
609 }
610
611 }
612 return(error);
613 }
614
615 int
616 coda_setattr(void *v)
617 {
618 /* true args */
619 struct vop_setattr_args *ap = v;
620 vnode_t *vp = ap->a_vp;
621 struct cnode *cp = VTOC(vp);
622 struct vattr *vap = ap->a_vap;
623 kauth_cred_t cred = ap->a_cred;
624 /* locals */
625 int error;
626
627 MARK_ENTRY(CODA_SETATTR_STATS);
628
629 /* Check for setattr of control object. */
630 if (IS_CTL_VP(vp)) {
631 MARK_INT_FAIL(CODA_SETATTR_STATS);
632 return(ENOENT);
633 }
634
635 if (codadebug & CODADBGMSK(CODA_SETATTR)) {
636 print_vattr(vap);
637 }
638 error = venus_setattr(vtomi(vp), &cp->c_fid, vap, cred, curlwp);
639
640 if (!error)
641 cp->c_flags &= ~C_VATTR;
642
643 CODADEBUG(CODA_SETATTR, myprintf(("setattr %d\n", error)); )
644 return(error);
645 }
646
647 int
648 coda_access(void *v)
649 {
650 /* true args */
651 struct vop_access_args *ap = v;
652 vnode_t *vp = ap->a_vp;
653 struct cnode *cp = VTOC(vp);
654 int mode = ap->a_mode;
655 kauth_cred_t cred = ap->a_cred;
656 /* locals */
657 int error;
658
659 MARK_ENTRY(CODA_ACCESS_STATS);
660
661 /* Check for access of control object. Only read access is
662 allowed on it. */
663 if (IS_CTL_VP(vp)) {
664 /* bogus hack - all will be marked as successes */
665 MARK_INT_SAT(CODA_ACCESS_STATS);
666 return(((mode & VREAD) && !(mode & (VWRITE | VEXEC)))
667 ? 0 : EACCES);
668 }
669
670 /*
671 * if the file is a directory, and we are checking exec (eg lookup)
672 * access, and the file is in the namecache, then the user must have
673 * lookup access to it.
674 */
675 if (coda_access_cache) {
676 if ((vp->v_type == VDIR) && (mode & VEXEC)) {
677 if (coda_nc_lookup(cp, ".", 1, cred)) {
678 MARK_INT_SAT(CODA_ACCESS_STATS);
679 return(0); /* it was in the cache */
680 }
681 }
682 }
683
684 error = venus_access(vtomi(vp), &cp->c_fid, mode, cred, curlwp);
685
686 return(error);
687 }
688
689 /*
690 * CODA abort op, called after namei() when a CREATE/DELETE isn't actually
691 * done. If a buffer has been saved in anticipation of a coda_create or
692 * a coda_remove, delete it.
693 */
694 /* ARGSUSED */
695 int
696 coda_abortop(void *v)
697 {
698 /* true args */
699 struct vop_abortop_args /* {
700 vnode_t *a_dvp;
701 struct componentname *a_cnp;
702 } */ *ap = v;
703
704 (void)ap;
705 /* upcall decl */
706 /* locals */
707
708 return (0);
709 }
710
711 int
712 coda_readlink(void *v)
713 {
714 /* true args */
715 struct vop_readlink_args *ap = v;
716 vnode_t *vp = ap->a_vp;
717 struct cnode *cp = VTOC(vp);
718 struct uio *uiop = ap->a_uio;
719 kauth_cred_t cred = ap->a_cred;
720 /* locals */
721 struct lwp *l = curlwp;
722 int error;
723 char *str;
724 int len;
725
726 MARK_ENTRY(CODA_READLINK_STATS);
727
728 /* Check for readlink of control object. */
729 if (IS_CTL_VP(vp)) {
730 MARK_INT_FAIL(CODA_READLINK_STATS);
731 return(ENOENT);
732 }
733
734 if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) { /* symlink was cached */
735 uiop->uio_rw = UIO_READ;
736 error = uiomove(cp->c_symlink, (int)cp->c_symlen, uiop);
737 if (error)
738 MARK_INT_FAIL(CODA_READLINK_STATS);
739 else
740 MARK_INT_SAT(CODA_READLINK_STATS);
741 return(error);
742 }
743
744 error = venus_readlink(vtomi(vp), &cp->c_fid, cred, l, &str, &len);
745
746 if (!error) {
747 uiop->uio_rw = UIO_READ;
748 error = uiomove(str, len, uiop);
749
750 if (coda_symlink_cache) {
751 cp->c_symlink = str;
752 cp->c_symlen = len;
753 cp->c_flags |= C_SYMLINK;
754 } else
755 CODA_FREE(str, len);
756 }
757
758 CODADEBUG(CODA_READLINK, myprintf(("in readlink result %d\n",error));)
759 return(error);
760 }
761
762 int
763 coda_fsync(void *v)
764 {
765 /* true args */
766 struct vop_fsync_args *ap = v;
767 vnode_t *vp = ap->a_vp;
768 struct cnode *cp = VTOC(vp);
769 kauth_cred_t cred = ap->a_cred;
770 /* locals */
771 vnode_t *convp = cp->c_ovp;
772 int error;
773
774 MARK_ENTRY(CODA_FSYNC_STATS);
775
776 /* Check for fsync on an unmounting object */
777 /* The NetBSD kernel, in it's infinite wisdom, can try to fsync
778 * after an unmount has been initiated. This is a Bad Thing,
779 * which we have to avoid. Not a legitimate failure for stats.
780 */
781 if (IS_UNMOUNTING(cp)) {
782 return(ENODEV);
783 }
784
785 /* Check for fsync of control object. */
786 if (IS_CTL_VP(vp)) {
787 MARK_INT_SAT(CODA_FSYNC_STATS);
788 return(0);
789 }
790
791 if (convp)
792 VOP_FSYNC(convp, cred, MNT_WAIT, 0, 0);
793
794 /*
795 * We can expect fsync on any vnode at all if venus is pruging it.
796 * Venus can't very well answer the fsync request, now can it?
797 * Hopefully, it won't have to, because hopefully, venus preserves
798 * the (possibly untrue) invariant that it never purges an open
799 * vnode. Hopefully.
800 */
801 if (cp->c_flags & C_PURGING) {
802 return(0);
803 }
804
805 error = venus_fsync(vtomi(vp), &cp->c_fid, cred, curlwp);
806
807 CODADEBUG(CODA_FSYNC, myprintf(("in fsync result %d\n",error)); )
808 return(error);
809 }
810
811 /*
812 * vp is locked on entry, and we must unlock it.
813 * XXX This routine is suspect and probably needs rewriting.
814 */
815 int
816 coda_inactive(void *v)
817 {
818 /* true args */
819 struct vop_inactive_args *ap = v;
820 vnode_t *vp = ap->a_vp;
821 struct cnode *cp = VTOC(vp);
822 kauth_cred_t cred __unused = NULL;
823
824 /* We don't need to send inactive to venus - DCS */
825 MARK_ENTRY(CODA_INACTIVE_STATS);
826
827 if (IS_CTL_VP(vp)) {
828 MARK_INT_SAT(CODA_INACTIVE_STATS);
829 return 0;
830 }
831
832 CODADEBUG(CODA_INACTIVE, myprintf(("in inactive, %s, vfsp %p\n",
833 coda_f2s(&cp->c_fid), vp->v_mount));)
834
835 /* If an array has been allocated to hold the symlink, deallocate it */
836 if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) {
837 if (cp->c_symlink == NULL)
838 panic("%s: null symlink pointer in cnode", __func__);
839
840 CODA_FREE(cp->c_symlink, cp->c_symlen);
841 cp->c_flags &= ~C_SYMLINK;
842 cp->c_symlen = 0;
843 }
844
845 /* Remove it from the table so it can't be found. */
846 coda_unsave(cp);
847 if (vp->v_mount->mnt_data == NULL) {
848 myprintf(("Help! vfsp->vfs_data was NULL, but vnode %p wasn't dying\n", vp));
849 panic("badness in coda_inactive");
850 }
851
852 if (IS_UNMOUNTING(cp)) {
853 /* XXX Do we need to VOP_CLOSE container vnodes? */
854 if (vp->v_usecount > 1)
855 printf("%s: IS_UNMOUNTING %p usecount %d\n",
856 __func__, vp, vp->v_usecount);
857 if (cp->c_ovp != NULL)
858 printf("%s: %p ovp != NULL\n", __func__, vp);
859 VOP_UNLOCK(vp);
860 } else {
861 /* Sanity checks that perhaps should be panic. */
862 if (vp->v_usecount > 1) {
863 printf("%s: %p usecount %d\n", __func__, vp, vp->v_usecount);
864 }
865 if (cp->c_ovp != NULL) {
866 printf("%s: %p ovp != NULL\n", __func__, vp);
867 }
868 VOP_UNLOCK(vp);
869 *ap->a_recycle = true;
870 }
871
872 MARK_INT_SAT(CODA_INACTIVE_STATS);
873 return(0);
874 }
875
876 /*
877 * Coda does not use the normal namecache, but a private version.
878 * Consider how to use the standard facility instead.
879 */
880 int
881 coda_lookup(void *v)
882 {
883 /* true args */
884 struct vop_lookup_args *ap = v;
885 /* (locked) vnode of dir in which to do lookup */
886 vnode_t *dvp = ap->a_dvp;
887 struct cnode *dcp = VTOC(dvp);
888 /* output variable for result */
889 vnode_t **vpp = ap->a_vpp;
890 /* name to lookup */
891 struct componentname *cnp = ap->a_cnp;
892 kauth_cred_t cred = cnp->cn_cred;
893 struct lwp *l = curlwp;
894 /* locals */
895 struct cnode *cp;
896 const char *nm = cnp->cn_nameptr;
897 int len = cnp->cn_namelen;
898 int flags = cnp->cn_flags;
899 int isdot;
900 CodaFid VFid;
901 int vtype;
902 int error = 0;
903
904 MARK_ENTRY(CODA_LOOKUP_STATS);
905
906 CODADEBUG(CODA_LOOKUP, myprintf(("%s: %s in %s\n", __func__,
907 nm, coda_f2s(&dcp->c_fid)));)
908
909 /*
910 * XXX componentname flags in MODMASK are not handled at all
911 */
912
913 /*
914 * The overall strategy is to switch on the lookup type and get a
915 * result vnode that is vref'd but not locked. Then, the code at
916 * exit: switches on ., .., and regular lookups and does the right
917 * locking.
918 */
919
920 /* Check for lookup of control object. */
921 if (IS_CTL_NAME(dvp, nm, len)) {
922 *vpp = coda_ctlvp;
923 vref(*vpp);
924 MARK_INT_SAT(CODA_LOOKUP_STATS);
925 goto exit;
926 }
927
928 /* Avoid trying to hand venus an unreasonably long name. */
929 if (len+1 > CODA_MAXNAMLEN) {
930 MARK_INT_FAIL(CODA_LOOKUP_STATS);
931 CODADEBUG(CODA_LOOKUP, myprintf(("%s: name too long:, %s (%s)\n",
932 __func__, coda_f2s(&dcp->c_fid), nm));)
933 *vpp = (vnode_t *)0;
934 error = EINVAL;
935 goto exit;
936 }
937
938 /*
939 * XXX Check for DOT lookups, and short circuit all the caches,
940 * just doing an extra vref. (venus guarantees that lookup of
941 * . returns self.)
942 */
943 isdot = (len == 1 && nm[0] == '.');
944
945 /*
946 * Try to resolve the lookup in the minicache. If that fails, ask
947 * venus to do the lookup. XXX The interaction between vnode
948 * locking and any locking that coda does is not clear.
949 */
950 cp = coda_nc_lookup(dcp, nm, len, cred);
951 if (cp) {
952 *vpp = CTOV(cp);
953 vref(*vpp);
954 CODADEBUG(CODA_LOOKUP,
955 myprintf(("lookup result %d vpp %p\n",error,*vpp));)
956 } else {
957 /* The name wasn't cached, so ask Venus. */
958 error = venus_lookup(vtomi(dvp), &dcp->c_fid, nm, len, cred, l, &VFid, &vtype);
959
960 if (error) {
961 MARK_INT_FAIL(CODA_LOOKUP_STATS);
962 CODADEBUG(CODA_LOOKUP, myprintf(("lookup error on %s (%s)%d\n",
963 coda_f2s(&dcp->c_fid), nm, error));)
964 *vpp = (vnode_t *)0;
965 } else {
966 MARK_INT_SAT(CODA_LOOKUP_STATS);
967 CODADEBUG(CODA_LOOKUP, myprintf(("%s: %s type %o result %d\n",
968 __func__, coda_f2s(&VFid), vtype, error)); )
969
970 cp = make_coda_node(&VFid, dvp->v_mount, vtype);
971 *vpp = CTOV(cp);
972 /* vpp is now vrefed. */
973
974 /*
975 * Unless this vnode is marked CODA_NOCACHE, enter it into
976 * the coda name cache to avoid a future venus round-trip.
977 * XXX Interaction with componentname NOCACHE is unclear.
978 */
979 if (!(vtype & CODA_NOCACHE))
980 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
981 }
982 }
983
984 exit:
985 /*
986 * If we are creating, and this was the last name to be looked up,
987 * and the error was ENOENT, then make the leaf NULL and return
988 * success.
989 * XXX Check against new lookup rules.
990 */
991 if (((cnp->cn_nameiop == CREATE) || (cnp->cn_nameiop == RENAME))
992 && (cnp->cn_flags & ISLASTCN)
993 && (error == ENOENT))
994 {
995 error = EJUSTRETURN;
996 *ap->a_vpp = NULL;
997 }
998
999 /*
1000 * If the lookup succeeded, we must generally lock the returned
1001 * vnode. This could be a ., .., or normal lookup. See
1002 * vnodeops(9) for the details.
1003 */
1004 /*
1005 * XXX LK_RETRY is likely incorrect. Handle vn_lock failure
1006 * somehow, and remove LK_RETRY.
1007 */
1008 if (!error || (error == EJUSTRETURN)) {
1009 /* Lookup has a value and it isn't "."? */
1010 if (*ap->a_vpp && (*ap->a_vpp != dvp)) {
1011 if (flags & ISDOTDOT)
1012 /* ..: unlock parent */
1013 VOP_UNLOCK(dvp);
1014 /* all but .: lock child */
1015 vn_lock(*ap->a_vpp, LK_EXCLUSIVE | LK_RETRY);
1016 if (flags & ISDOTDOT)
1017 /* ..: relock parent */
1018 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
1019 }
1020 /* else .: leave dvp locked */
1021 } else {
1022 /* The lookup failed, so return NULL. Leave dvp locked. */
1023 *ap->a_vpp = NULL;
1024 }
1025 return(error);
1026 }
1027
1028 /*ARGSUSED*/
1029 int
1030 coda_create(void *v)
1031 {
1032 /* true args */
1033 struct vop_create_args *ap = v;
1034 vnode_t *dvp = ap->a_dvp;
1035 struct cnode *dcp = VTOC(dvp);
1036 struct vattr *va = ap->a_vap;
1037 int exclusive = 1;
1038 int mode = ap->a_vap->va_mode;
1039 vnode_t **vpp = ap->a_vpp;
1040 struct componentname *cnp = ap->a_cnp;
1041 kauth_cred_t cred = cnp->cn_cred;
1042 struct lwp *l = curlwp;
1043 /* locals */
1044 int error;
1045 struct cnode *cp;
1046 const char *nm = cnp->cn_nameptr;
1047 int len = cnp->cn_namelen;
1048 CodaFid VFid;
1049 struct vattr attr;
1050
1051 MARK_ENTRY(CODA_CREATE_STATS);
1052
1053 /* All creates are exclusive XXX */
1054 /* I'm assuming the 'mode' argument is the file mode bits XXX */
1055
1056 /* Check for create of control object. */
1057 if (IS_CTL_NAME(dvp, nm, len)) {
1058 *vpp = (vnode_t *)0;
1059 MARK_INT_FAIL(CODA_CREATE_STATS);
1060 return(EACCES);
1061 }
1062
1063 error = venus_create(vtomi(dvp), &dcp->c_fid, nm, len, exclusive, mode, va, cred, l, &VFid, &attr);
1064
1065 if (!error) {
1066
1067 /*
1068 * XXX Violation of venus/kernel invariants is a difficult case,
1069 * but venus should not be able to cause a panic.
1070 */
1071 /* If this is an exclusive create, panic if the file already exists. */
1072 /* Venus should have detected the file and reported EEXIST. */
1073
1074 if ((exclusive == 1) &&
1075 (coda_find(&VFid) != NULL))
1076 panic("cnode existed for newly created file!");
1077
1078 cp = make_coda_node(&VFid, dvp->v_mount, attr.va_type);
1079 *vpp = CTOV(cp);
1080
1081 /* XXX vnodeops doesn't say this argument can be changed. */
1082 /* Update va to reflect the new attributes. */
1083 (*va) = attr;
1084
1085 /* Update the attribute cache and mark it as valid */
1086 if (coda_attr_cache) {
1087 VTOC(*vpp)->c_vattr = attr;
1088 VTOC(*vpp)->c_flags |= C_VATTR;
1089 }
1090
1091 /* Invalidate parent's attr cache (modification time has changed). */
1092 VTOC(dvp)->c_flags &= ~C_VATTR;
1093
1094 /* enter the new vnode in the Name Cache */
1095 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
1096
1097 CODADEBUG(CODA_CREATE, myprintf(("%s: %s, result %d\n", __func__,
1098 coda_f2s(&VFid), error)); )
1099 } else {
1100 *vpp = (vnode_t *)0;
1101 CODADEBUG(CODA_CREATE, myprintf(("create error %d\n", error));)
1102 }
1103
1104 /*
1105 * vnodeops(9) says that we must unlock the parent and lock the child.
1106 * XXX Should we lock the child first?
1107 */
1108 vput(dvp);
1109 if (!error) {
1110 if ((cnp->cn_flags & LOCKLEAF) == 0) {
1111 /* This should not happen; flags are for lookup only. */
1112 printf("%s: LOCKLEAF not set!\n", __func__);
1113 }
1114
1115 if ((error = vn_lock(*ap->a_vpp, LK_EXCLUSIVE))) {
1116 /* XXX Perhaps avoid this panic. */
1117 panic("%s: couldn't lock child", __func__);
1118 }
1119 }
1120
1121 return(error);
1122 }
1123
1124 int
1125 coda_remove(void *v)
1126 {
1127 /* true args */
1128 struct vop_remove_args *ap = v;
1129 vnode_t *dvp = ap->a_dvp;
1130 struct cnode *cp = VTOC(dvp);
1131 vnode_t *vp = ap->a_vp;
1132 struct componentname *cnp = ap->a_cnp;
1133 kauth_cred_t cred = cnp->cn_cred;
1134 struct lwp *l = curlwp;
1135 /* locals */
1136 int error;
1137 const char *nm = cnp->cn_nameptr;
1138 int len = cnp->cn_namelen;
1139 struct cnode *tp;
1140
1141 MARK_ENTRY(CODA_REMOVE_STATS);
1142
1143 CODADEBUG(CODA_REMOVE, myprintf(("%s: %s in %s\n", __func__,
1144 nm, coda_f2s(&cp->c_fid)));)
1145
1146 /* Remove the file's entry from the CODA Name Cache */
1147 /* We're being conservative here, it might be that this person
1148 * doesn't really have sufficient access to delete the file
1149 * but we feel zapping the entry won't really hurt anyone -- dcs
1150 */
1151 /* I'm gonna go out on a limb here. If a file and a hardlink to it
1152 * exist, and one is removed, the link count on the other will be
1153 * off by 1. We could either invalidate the attrs if cached, or
1154 * fix them. I'll try to fix them. DCS 11/8/94
1155 */
1156 tp = coda_nc_lookup(VTOC(dvp), nm, len, cred);
1157 if (tp) {
1158 if (VALID_VATTR(tp)) { /* If attrs are cached */
1159 if (tp->c_vattr.va_nlink > 1) { /* If it's a hard link */
1160 tp->c_vattr.va_nlink--;
1161 }
1162 }
1163
1164 coda_nc_zapfile(VTOC(dvp), nm, len);
1165 /* No need to flush it if it doesn't exist! */
1166 }
1167 /* Invalidate the parent's attr cache, the modification time has changed */
1168 VTOC(dvp)->c_flags &= ~C_VATTR;
1169
1170 /* Check for remove of control object. */
1171 if (IS_CTL_NAME(dvp, nm, len)) {
1172 MARK_INT_FAIL(CODA_REMOVE_STATS);
1173 return(ENOENT);
1174 }
1175
1176 error = venus_remove(vtomi(dvp), &cp->c_fid, nm, len, cred, l);
1177
1178 CODADEBUG(CODA_REMOVE, myprintf(("in remove result %d\n",error)); )
1179
1180 /*
1181 * Unlock parent and child (avoiding double if ".").
1182 */
1183 if (dvp == vp) {
1184 vrele(vp);
1185 } else {
1186 vput(vp);
1187 }
1188 vput(dvp);
1189
1190 return(error);
1191 }
1192
1193 /*
1194 * dvp is the directory where the link is to go, and is locked.
1195 * vp is the object to be linked to, and is unlocked.
1196 * At exit, we must unlock dvp, and vput dvp.
1197 */
1198 int
1199 coda_link(void *v)
1200 {
1201 /* true args */
1202 struct vop_link_args *ap = v;
1203 vnode_t *vp = ap->a_vp;
1204 struct cnode *cp = VTOC(vp);
1205 vnode_t *dvp = ap->a_dvp;
1206 struct cnode *dcp = VTOC(dvp);
1207 struct componentname *cnp = ap->a_cnp;
1208 kauth_cred_t cred = cnp->cn_cred;
1209 struct lwp *l = curlwp;
1210 /* locals */
1211 int error;
1212 const char *nm = cnp->cn_nameptr;
1213 int len = cnp->cn_namelen;
1214
1215 MARK_ENTRY(CODA_LINK_STATS);
1216
1217 if (codadebug & CODADBGMSK(CODA_LINK)) {
1218
1219 myprintf(("%s: vp fid: %s\n", __func__, coda_f2s(&cp->c_fid)));
1220 myprintf(("%s: dvp fid: %s)\n", __func__, coda_f2s(&dcp->c_fid)));
1221
1222 }
1223 if (codadebug & CODADBGMSK(CODA_LINK)) {
1224 myprintf(("%s: vp fid: %s\n", __func__, coda_f2s(&cp->c_fid)));
1225 myprintf(("%s: dvp fid: %s\n", __func__, coda_f2s(&dcp->c_fid)));
1226
1227 }
1228
1229 /* Check for link to/from control object. */
1230 if (IS_CTL_NAME(dvp, nm, len) || IS_CTL_VP(vp)) {
1231 MARK_INT_FAIL(CODA_LINK_STATS);
1232 return(EACCES);
1233 }
1234
1235 /* If linking . to a name, error out earlier. */
1236 if (vp == dvp) {
1237 printf("coda_link vp==dvp\n");
1238 error = EISDIR;
1239 goto exit;
1240 }
1241
1242 /* XXX Why does venus_link need the vnode to be locked?*/
1243 if ((error = vn_lock(vp, LK_EXCLUSIVE)) != 0) {
1244 printf("%s: couldn't lock vnode %p\n", __func__, vp);
1245 error = EFAULT; /* XXX better value */
1246 goto exit;
1247 }
1248 error = venus_link(vtomi(vp), &cp->c_fid, &dcp->c_fid, nm, len, cred, l);
1249 VOP_UNLOCK(vp);
1250
1251 /* Invalidate parent's attr cache (the modification time has changed). */
1252 VTOC(dvp)->c_flags &= ~C_VATTR;
1253 /* Invalidate child's attr cache (XXX why). */
1254 VTOC(vp)->c_flags &= ~C_VATTR;
1255
1256 CODADEBUG(CODA_LINK, myprintf(("in link result %d\n",error)); )
1257
1258 exit:
1259 vput(dvp);
1260 return(error);
1261 }
1262
1263 int
1264 coda_rename(void *v)
1265 {
1266 /* true args */
1267 struct vop_rename_args *ap = v;
1268 vnode_t *odvp = ap->a_fdvp;
1269 struct cnode *odcp = VTOC(odvp);
1270 struct componentname *fcnp = ap->a_fcnp;
1271 vnode_t *ndvp = ap->a_tdvp;
1272 struct cnode *ndcp = VTOC(ndvp);
1273 struct componentname *tcnp = ap->a_tcnp;
1274 kauth_cred_t cred = fcnp->cn_cred;
1275 struct lwp *l = curlwp;
1276 /* true args */
1277 int error;
1278 const char *fnm = fcnp->cn_nameptr;
1279 int flen = fcnp->cn_namelen;
1280 const char *tnm = tcnp->cn_nameptr;
1281 int tlen = tcnp->cn_namelen;
1282
1283 MARK_ENTRY(CODA_RENAME_STATS);
1284
1285 /* Hmmm. The vnodes are already looked up. Perhaps they are locked?
1286 This could be Bad. XXX */
1287 #ifdef OLD_DIAGNOSTIC
1288 if ((fcnp->cn_cred != tcnp->cn_cred)
1289 || (fcnp->cn_lwp != tcnp->cn_lwp))
1290 {
1291 panic("%s: component names don't agree", __func__);
1292 }
1293 #endif
1294
1295 /* Check for rename involving control object. */
1296 if (IS_CTL_NAME(odvp, fnm, flen) || IS_CTL_NAME(ndvp, tnm, tlen)) {
1297 MARK_INT_FAIL(CODA_RENAME_STATS);
1298 return(EACCES);
1299 }
1300
1301 /* Problem with moving directories -- need to flush entry for .. */
1302 if (odvp != ndvp) {
1303 struct cnode *ovcp = coda_nc_lookup(VTOC(odvp), fnm, flen, cred);
1304 if (ovcp) {
1305 vnode_t *ovp = CTOV(ovcp);
1306 if ((ovp) &&
1307 (ovp->v_type == VDIR)) /* If it's a directory */
1308 coda_nc_zapfile(VTOC(ovp),"..", 2);
1309 }
1310 }
1311
1312 /* Remove the entries for both source and target files */
1313 coda_nc_zapfile(VTOC(odvp), fnm, flen);
1314 coda_nc_zapfile(VTOC(ndvp), tnm, tlen);
1315
1316 /* Invalidate the parent's attr cache, the modification time has changed */
1317 VTOC(odvp)->c_flags &= ~C_VATTR;
1318 VTOC(ndvp)->c_flags &= ~C_VATTR;
1319
1320 if (flen+1 > CODA_MAXNAMLEN) {
1321 MARK_INT_FAIL(CODA_RENAME_STATS);
1322 error = EINVAL;
1323 goto exit;
1324 }
1325
1326 if (tlen+1 > CODA_MAXNAMLEN) {
1327 MARK_INT_FAIL(CODA_RENAME_STATS);
1328 error = EINVAL;
1329 goto exit;
1330 }
1331
1332 error = venus_rename(vtomi(odvp), &odcp->c_fid, &ndcp->c_fid, fnm, flen, tnm, tlen, cred, l);
1333
1334 exit:
1335 CODADEBUG(CODA_RENAME, myprintf(("in rename result %d\n",error));)
1336 /* XXX - do we need to call cache pureg on the moved vnode? */
1337 cache_purge(ap->a_fvp);
1338
1339 /* It seems to be incumbent on us to drop locks on all four vnodes */
1340 /* From-vnodes are not locked, only ref'd. To-vnodes are locked. */
1341
1342 vrele(ap->a_fvp);
1343 vrele(odvp);
1344
1345 if (ap->a_tvp) {
1346 if (ap->a_tvp == ndvp) {
1347 vrele(ap->a_tvp);
1348 } else {
1349 vput(ap->a_tvp);
1350 }
1351 }
1352
1353 vput(ndvp);
1354 return(error);
1355 }
1356
1357 int
1358 coda_mkdir(void *v)
1359 {
1360 /* true args */
1361 struct vop_mkdir_args *ap = v;
1362 vnode_t *dvp = ap->a_dvp;
1363 struct cnode *dcp = VTOC(dvp);
1364 struct componentname *cnp = ap->a_cnp;
1365 struct vattr *va = ap->a_vap;
1366 vnode_t **vpp = ap->a_vpp;
1367 kauth_cred_t cred = cnp->cn_cred;
1368 struct lwp *l = curlwp;
1369 /* locals */
1370 int error;
1371 const char *nm = cnp->cn_nameptr;
1372 int len = cnp->cn_namelen;
1373 struct cnode *cp;
1374 CodaFid VFid;
1375 struct vattr ova;
1376
1377 MARK_ENTRY(CODA_MKDIR_STATS);
1378
1379 /* Check for mkdir of target object. */
1380 if (IS_CTL_NAME(dvp, nm, len)) {
1381 *vpp = (vnode_t *)0;
1382 MARK_INT_FAIL(CODA_MKDIR_STATS);
1383 return(EACCES);
1384 }
1385
1386 if (len+1 > CODA_MAXNAMLEN) {
1387 *vpp = (vnode_t *)0;
1388 MARK_INT_FAIL(CODA_MKDIR_STATS);
1389 return(EACCES);
1390 }
1391
1392 error = venus_mkdir(vtomi(dvp), &dcp->c_fid, nm, len, va, cred, l, &VFid, &ova);
1393
1394 if (!error) {
1395 if (coda_find(&VFid) != NULL)
1396 panic("cnode existed for newly created directory!");
1397
1398
1399 cp = make_coda_node(&VFid, dvp->v_mount, va->va_type);
1400 *vpp = CTOV(cp);
1401
1402 /* enter the new vnode in the Name Cache */
1403 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
1404
1405 /* as a side effect, enter "." and ".." for the directory */
1406 coda_nc_enter(VTOC(*vpp), ".", 1, cred, VTOC(*vpp));
1407 coda_nc_enter(VTOC(*vpp), "..", 2, cred, VTOC(dvp));
1408
1409 if (coda_attr_cache) {
1410 VTOC(*vpp)->c_vattr = ova; /* update the attr cache */
1411 VTOC(*vpp)->c_flags |= C_VATTR; /* Valid attributes in cnode */
1412 }
1413
1414 /* Invalidate the parent's attr cache, the modification time has changed */
1415 VTOC(dvp)->c_flags &= ~C_VATTR;
1416
1417 CODADEBUG( CODA_MKDIR, myprintf(("%s: %s result %d\n", __func__,
1418 coda_f2s(&VFid), error)); )
1419 } else {
1420 *vpp = (vnode_t *)0;
1421 CODADEBUG(CODA_MKDIR, myprintf(("%s error %d\n", __func__, error));)
1422 }
1423
1424 /*
1425 * Currently, all mkdirs explicitly vput their dvp's.
1426 * It also appears that we *must* lock the vpp, since
1427 * lockleaf isn't set, but someone down the road is going
1428 * to try to unlock the new directory.
1429 */
1430 vput(dvp);
1431 if (!error) {
1432 if ((error = vn_lock(*ap->a_vpp, LK_EXCLUSIVE))) {
1433 panic("%s: couldn't lock child", __func__);
1434 }
1435 }
1436
1437 return(error);
1438 }
1439
1440 int
1441 coda_rmdir(void *v)
1442 {
1443 /* true args */
1444 struct vop_rmdir_args *ap = v;
1445 vnode_t *dvp = ap->a_dvp;
1446 struct cnode *dcp = VTOC(dvp);
1447 vnode_t *vp = ap->a_vp;
1448 struct componentname *cnp = ap->a_cnp;
1449 kauth_cred_t cred = cnp->cn_cred;
1450 struct lwp *l = curlwp;
1451 /* true args */
1452 int error;
1453 const char *nm = cnp->cn_nameptr;
1454 int len = cnp->cn_namelen;
1455 struct cnode *cp;
1456
1457 MARK_ENTRY(CODA_RMDIR_STATS);
1458
1459 /* Check for rmdir of control object. */
1460 if (IS_CTL_NAME(dvp, nm, len)) {
1461 MARK_INT_FAIL(CODA_RMDIR_STATS);
1462 return(ENOENT);
1463 }
1464
1465 /* Can't remove . in self. */
1466 if (dvp == vp) {
1467 printf("%s: dvp == vp\n", __func__);
1468 error = EINVAL;
1469 goto exit;
1470 }
1471
1472 /*
1473 * The caller may not have adequate permissions, and the venus
1474 * operation may fail, but it doesn't hurt from a correctness
1475 * viewpoint to invalidate cache entries.
1476 * XXX Why isn't this done after the venus_rmdir call?
1477 */
1478 /* Look up child in name cache (by name, from parent). */
1479 cp = coda_nc_lookup(dcp, nm, len, cred);
1480 /* If found, remove all children of the child (., ..). */
1481 if (cp) coda_nc_zapParentfid(&(cp->c_fid), NOT_DOWNCALL);
1482
1483 /* Remove child's own entry. */
1484 coda_nc_zapfile(dcp, nm, len);
1485
1486 /* Invalidate parent's attr cache (the modification time has changed). */
1487 dcp->c_flags &= ~C_VATTR;
1488
1489 error = venus_rmdir(vtomi(dvp), &dcp->c_fid, nm, len, cred, l);
1490
1491 CODADEBUG(CODA_RMDIR, myprintf(("in rmdir result %d\n", error)); )
1492
1493 exit:
1494 /* vput both vnodes */
1495 vput(dvp);
1496 if (dvp == vp) {
1497 vrele(vp);
1498 } else {
1499 vput(vp);
1500 }
1501
1502 return(error);
1503 }
1504
1505 int
1506 coda_symlink(void *v)
1507 {
1508 /* true args */
1509 struct vop_symlink_args *ap = v;
1510 vnode_t *dvp = ap->a_dvp;
1511 struct cnode *dcp = VTOC(dvp);
1512 /* a_vpp is used in place below */
1513 struct componentname *cnp = ap->a_cnp;
1514 struct vattr *tva = ap->a_vap;
1515 char *path = ap->a_target;
1516 kauth_cred_t cred = cnp->cn_cred;
1517 struct lwp *l = curlwp;
1518 /* locals */
1519 int error;
1520 u_long saved_cn_flags;
1521 const char *nm = cnp->cn_nameptr;
1522 int len = cnp->cn_namelen;
1523 int plen = strlen(path);
1524
1525 /*
1526 * Here's the strategy for the moment: perform the symlink, then
1527 * do a lookup to grab the resulting vnode. I know this requires
1528 * two communications with Venus for a new sybolic link, but
1529 * that's the way the ball bounces. I don't yet want to change
1530 * the way the Mach symlink works. When Mach support is
1531 * deprecated, we should change symlink so that the common case
1532 * returns the resultant vnode in a vpp argument.
1533 */
1534
1535 MARK_ENTRY(CODA_SYMLINK_STATS);
1536
1537 /* Check for symlink of control object. */
1538 if (IS_CTL_NAME(dvp, nm, len)) {
1539 MARK_INT_FAIL(CODA_SYMLINK_STATS);
1540 error = EACCES;
1541 goto exit;
1542 }
1543
1544 if (plen+1 > CODA_MAXPATHLEN) {
1545 MARK_INT_FAIL(CODA_SYMLINK_STATS);
1546 error = EINVAL;
1547 goto exit;
1548 }
1549
1550 if (len+1 > CODA_MAXNAMLEN) {
1551 MARK_INT_FAIL(CODA_SYMLINK_STATS);
1552 error = EINVAL;
1553 goto exit;
1554 }
1555
1556 error = venus_symlink(vtomi(dvp), &dcp->c_fid, path, plen, nm, len, tva, cred, l);
1557
1558 /* Invalidate the parent's attr cache (modification time has changed). */
1559 dcp->c_flags &= ~C_VATTR;
1560
1561 if (!error) {
1562 /*
1563 * VOP_SYMLINK is not defined to pay attention to cnp->cn_flags;
1564 * these are defined only for VOP_LOOKUP. We desire to reuse
1565 * cnp for a VOP_LOOKUP operation, and must be sure to not pass
1566 * stray flags passed to us. Such stray flags can occur because
1567 * sys_symlink makes a namei call and then reuses the
1568 * componentname structure.
1569 */
1570 /*
1571 * XXX Arguably we should create our own componentname structure
1572 * and not reuse the one that was passed in.
1573 */
1574 saved_cn_flags = cnp->cn_flags;
1575 cnp->cn_flags &= ~(MODMASK | OPMASK);
1576 cnp->cn_flags |= LOOKUP;
1577 error = VOP_LOOKUP(dvp, ap->a_vpp, cnp);
1578 cnp->cn_flags = saved_cn_flags;
1579 /* Either an error occurs, or ap->a_vpp is locked. */
1580 }
1581
1582 exit:
1583 /* unlock and deference parent */
1584 vput(dvp);
1585
1586 CODADEBUG(CODA_SYMLINK, myprintf(("in symlink result %d\n",error)); )
1587 return(error);
1588 }
1589
1590 /*
1591 * Read directory entries.
1592 */
1593 int
1594 coda_readdir(void *v)
1595 {
1596 /* true args */
1597 struct vop_readdir_args *ap = v;
1598 vnode_t *vp = ap->a_vp;
1599 struct cnode *cp = VTOC(vp);
1600 struct uio *uiop = ap->a_uio;
1601 kauth_cred_t cred = ap->a_cred;
1602 int *eofflag = ap->a_eofflag;
1603 off_t **cookies = ap->a_cookies;
1604 int *ncookies = ap->a_ncookies;
1605 /* upcall decl */
1606 /* locals */
1607 int error = 0;
1608
1609 MARK_ENTRY(CODA_READDIR_STATS);
1610
1611 CODADEBUG(CODA_READDIR, myprintf(("%s: (%p, %lu, %lld)\n", __func__,
1612 uiop->uio_iov->iov_base, (unsigned long) uiop->uio_resid,
1613 (long long) uiop->uio_offset)); )
1614
1615 /* Check for readdir of control object. */
1616 if (IS_CTL_VP(vp)) {
1617 MARK_INT_FAIL(CODA_READDIR_STATS);
1618 return(ENOENT);
1619 }
1620
1621 {
1622 /* Redirect the request to UFS. */
1623
1624 /* If directory is not already open do an "internal open" on it. */
1625 int opened_internally = 0;
1626 if (cp->c_ovp == NULL) {
1627 opened_internally = 1;
1628 MARK_INT_GEN(CODA_OPEN_STATS);
1629 error = VOP_OPEN(vp, FREAD, cred);
1630 #ifdef CODA_VERBOSE
1631 printf("%s: Internally Opening %p\n", __func__, vp);
1632 #endif
1633 if (error) return(error);
1634 } else
1635 vp = cp->c_ovp;
1636
1637 /* Have UFS handle the call. */
1638 CODADEBUG(CODA_READDIR, myprintf(("%s: fid = %s, refcnt = %d\n",
1639 __func__, coda_f2s(&cp->c_fid), vp->v_usecount)); )
1640 error = VOP_READDIR(vp, uiop, cred, eofflag, cookies, ncookies);
1641 if (error)
1642 MARK_INT_FAIL(CODA_READDIR_STATS);
1643 else
1644 MARK_INT_SAT(CODA_READDIR_STATS);
1645
1646 /* Do an "internal close" if necessary. */
1647 if (opened_internally) {
1648 MARK_INT_GEN(CODA_CLOSE_STATS);
1649 (void)VOP_CLOSE(vp, FREAD, cred);
1650 }
1651 }
1652
1653 return(error);
1654 }
1655
1656 /*
1657 * Convert from file system blocks to device blocks
1658 */
1659 int
1660 coda_bmap(void *v)
1661 {
1662 /* XXX on the global proc */
1663 /* true args */
1664 struct vop_bmap_args *ap = v;
1665 vnode_t *vp __unused = ap->a_vp; /* file's vnode */
1666 daddr_t bn __unused = ap->a_bn; /* fs block number */
1667 vnode_t **vpp = ap->a_vpp; /* RETURN vp of device */
1668 daddr_t *bnp __unused = ap->a_bnp; /* RETURN device block number */
1669 struct lwp *l __unused = curlwp;
1670 /* upcall decl */
1671 /* locals */
1672
1673 *vpp = (vnode_t *)0;
1674 myprintf(("coda_bmap called!\n"));
1675 return(EINVAL);
1676 }
1677
1678 /*
1679 * I don't think the following two things are used anywhere, so I've
1680 * commented them out
1681 *
1682 * struct buf *async_bufhead;
1683 * int async_daemon_count;
1684 */
1685 int
1686 coda_strategy(void *v)
1687 {
1688 /* true args */
1689 struct vop_strategy_args *ap = v;
1690 struct buf *bp __unused = ap->a_bp;
1691 struct lwp *l __unused = curlwp;
1692 /* upcall decl */
1693 /* locals */
1694
1695 myprintf(("coda_strategy called! "));
1696 return(EINVAL);
1697 }
1698
1699 int
1700 coda_reclaim(void *v)
1701 {
1702 /* true args */
1703 struct vop_reclaim_args *ap = v;
1704 vnode_t *vp = ap->a_vp;
1705 struct cnode *cp = VTOC(vp);
1706 /* upcall decl */
1707 /* locals */
1708
1709 /*
1710 * Forced unmount/flush will let vnodes with non zero use be destroyed!
1711 */
1712 ENTRY;
1713
1714 if (IS_UNMOUNTING(cp)) {
1715 #ifdef DEBUG
1716 if (VTOC(vp)->c_ovp) {
1717 if (IS_UNMOUNTING(cp))
1718 printf("%s: c_ovp not void: vp %p, cp %p\n", __func__, vp, cp);
1719 }
1720 #endif
1721 } else {
1722 #ifdef OLD_DIAGNOSTIC
1723 if (vp->v_usecount != 0)
1724 print("%s: pushing active %p\n", __func__, vp);
1725 if (VTOC(vp)->c_ovp) {
1726 panic("%s: c_ovp not void", __func__);
1727 }
1728 #endif
1729 }
1730 coda_free(VTOC(vp));
1731 SET_VTOC(vp) = NULL;
1732 return (0);
1733 }
1734
1735 int
1736 coda_lock(void *v)
1737 {
1738 /* true args */
1739 struct vop_lock_args *ap = v;
1740 vnode_t *vp = ap->a_vp;
1741 struct cnode *cp = VTOC(vp);
1742 /* upcall decl */
1743 /* locals */
1744
1745 ENTRY;
1746
1747 if (coda_lockdebug) {
1748 myprintf(("Attempting lock on %s\n",
1749 coda_f2s(&cp->c_fid)));
1750 }
1751
1752 return genfs_lock(v);
1753 }
1754
1755 int
1756 coda_unlock(void *v)
1757 {
1758 /* true args */
1759 struct vop_unlock_args *ap = v;
1760 vnode_t *vp = ap->a_vp;
1761 struct cnode *cp = VTOC(vp);
1762 /* upcall decl */
1763 /* locals */
1764
1765 ENTRY;
1766 if (coda_lockdebug) {
1767 myprintf(("Attempting unlock on %s\n",
1768 coda_f2s(&cp->c_fid)));
1769 }
1770
1771 return genfs_unlock(v);
1772 }
1773
1774 int
1775 coda_islocked(void *v)
1776 {
1777 /* true args */
1778 ENTRY;
1779
1780 return genfs_islocked(v);
1781 }
1782
1783 /*
1784 * Given a device and inode, obtain a locked vnode. One reference is
1785 * obtained and passed back to the caller.
1786 */
1787 int
1788 coda_grab_vnode(vnode_t *uvp, dev_t dev, ino_t ino, vnode_t **vpp)
1789 {
1790 int error;
1791 struct mount *mp;
1792
1793 /* Obtain mount point structure from device. */
1794 if (!(mp = devtomp(dev))) {
1795 myprintf(("%s: devtomp(0x%llx) returns NULL\n", __func__,
1796 (unsigned long long)dev));
1797 return(ENXIO);
1798 }
1799
1800 /*
1801 * Obtain vnode from mount point and inode.
1802 * XXX VFS_VGET does not clearly define locked/referenced state of
1803 * returned vnode.
1804 */
1805 error = VFS_VGET(mp, ino, vpp);
1806 if (error) {
1807 myprintf(("%s: iget/vget(0x%llx, %llu) returns %p, err %d\n", __func__,
1808 (unsigned long long)dev, (unsigned long long)ino, *vpp, error));
1809 return(ENOENT);
1810 }
1811 /* share the underlying vnode lock with the coda vnode */
1812 mutex_obj_hold((*vpp)->v_interlock);
1813 uvm_obj_setlock(&uvp->v_uobj, (*vpp)->v_interlock);
1814 return(0);
1815 }
1816
1817 void
1818 print_vattr(struct vattr *attr)
1819 {
1820 const char *typestr;
1821
1822 switch (attr->va_type) {
1823 case VNON:
1824 typestr = "VNON";
1825 break;
1826 case VREG:
1827 typestr = "VREG";
1828 break;
1829 case VDIR:
1830 typestr = "VDIR";
1831 break;
1832 case VBLK:
1833 typestr = "VBLK";
1834 break;
1835 case VCHR:
1836 typestr = "VCHR";
1837 break;
1838 case VLNK:
1839 typestr = "VLNK";
1840 break;
1841 case VSOCK:
1842 typestr = "VSCK";
1843 break;
1844 case VFIFO:
1845 typestr = "VFFO";
1846 break;
1847 case VBAD:
1848 typestr = "VBAD";
1849 break;
1850 default:
1851 typestr = "????";
1852 break;
1853 }
1854
1855
1856 myprintf(("attr: type %s mode %d uid %d gid %d fsid %d rdev %d\n",
1857 typestr, (int)attr->va_mode, (int)attr->va_uid,
1858 (int)attr->va_gid, (int)attr->va_fsid, (int)attr->va_rdev));
1859
1860 myprintf((" fileid %d nlink %d size %d blocksize %d bytes %d\n",
1861 (int)attr->va_fileid, (int)attr->va_nlink,
1862 (int)attr->va_size,
1863 (int)attr->va_blocksize,(int)attr->va_bytes));
1864 myprintf((" gen %ld flags %ld vaflags %d\n",
1865 attr->va_gen, attr->va_flags, attr->va_vaflags));
1866 myprintf((" atime sec %d nsec %d\n",
1867 (int)attr->va_atime.tv_sec, (int)attr->va_atime.tv_nsec));
1868 myprintf((" mtime sec %d nsec %d\n",
1869 (int)attr->va_mtime.tv_sec, (int)attr->va_mtime.tv_nsec));
1870 myprintf((" ctime sec %d nsec %d\n",
1871 (int)attr->va_ctime.tv_sec, (int)attr->va_ctime.tv_nsec));
1872 }
1873
1874 /* How to print a ucred */
1875 void
1876 print_cred(kauth_cred_t cred)
1877 {
1878
1879 uint16_t ngroups;
1880 int i;
1881
1882 myprintf(("ref %d\tuid %d\n", kauth_cred_getrefcnt(cred),
1883 kauth_cred_geteuid(cred)));
1884
1885 ngroups = kauth_cred_ngroups(cred);
1886 for (i=0; i < ngroups; i++)
1887 myprintf(("\tgroup %d: (%d)\n", i, kauth_cred_group(cred, i)));
1888 myprintf(("\n"));
1889
1890 }
1891
1892 /*
1893 * Return a vnode for the given fid.
1894 * If no cnode exists for this fid create one and put it
1895 * in a table hashed by coda_f2i(). If the cnode for
1896 * this fid is already in the table return it (ref count is
1897 * incremented by coda_find. The cnode will be flushed from the
1898 * table when coda_inactive calls coda_unsave.
1899 */
1900 struct cnode *
1901 make_coda_node(CodaFid *fid, struct mount *vfsp, short type)
1902 {
1903 struct cnode *cp;
1904 int err;
1905
1906 if ((cp = coda_find(fid)) == NULL) {
1907 vnode_t *vp;
1908
1909 cp = coda_alloc();
1910 cp->c_fid = *fid;
1911
1912 err = getnewvnode(VT_CODA, vfsp, coda_vnodeop_p, NULL, &vp);
1913 if (err) {
1914 panic("%s: getnewvnode returned error %d", __func__, err);
1915 }
1916 vp->v_data = cp;
1917 vp->v_type = type;
1918 cp->c_vnode = vp;
1919 uvm_vnp_setsize(vp, 0);
1920 coda_save(cp);
1921
1922 } else {
1923 vref(CTOV(cp));
1924 }
1925
1926 return cp;
1927 }
1928
1929 /*
1930 * coda_getpages may be called on a vnode which has not been opened,
1931 * e.g. to fault in pages to execute a program. In that case, we must
1932 * open the file to get the container. The vnode may or may not be
1933 * locked, and we must leave it in the same state.
1934 */
1935 int
1936 coda_getpages(void *v)
1937 {
1938 struct vop_getpages_args /* {
1939 vnode_t *a_vp;
1940 voff_t a_offset;
1941 struct vm_page **a_m;
1942 int *a_count;
1943 int a_centeridx;
1944 vm_prot_t a_access_type;
1945 int a_advice;
1946 int a_flags;
1947 } */ *ap = v;
1948 vnode_t *vp = ap->a_vp, *cvp;
1949 struct cnode *cp = VTOC(vp);
1950 struct lwp *l = curlwp;
1951 kauth_cred_t cred = l->l_cred;
1952 int error, cerror;
1953 int waslocked; /* 1 if vnode lock was held on entry */
1954 int didopen = 0; /* 1 if we opened container file */
1955
1956 KASSERT(mutex_owned(vp->v_interlock));
1957
1958 /*
1959 * Handle a case that uvm_fault doesn't quite use yet.
1960 * See layer_vnops.c. for inspiration.
1961 */
1962 if (ap->a_flags & PGO_LOCKED) {
1963 return EBUSY;
1964 }
1965
1966 /* Check for control object. */
1967 if (IS_CTL_VP(vp)) {
1968 printf("%s: control object %p\n", __func__, vp);
1969 return(EINVAL);
1970 }
1971
1972 /*
1973 * XXX It's really not ok to be releasing the lock we get,
1974 * because we could be overlapping with another call to
1975 * getpages and drop a lock they are relying on. We need to
1976 * figure out whether getpages ever is called holding the
1977 * lock, and if we should serialize getpages calls by some
1978 * mechanism.
1979 */
1980 /* XXX VOP_ISLOCKED() may not be used for lock decisions. */
1981 waslocked = VOP_ISLOCKED(vp);
1982
1983 /* Get container file if not already present. */
1984 cvp = cp->c_ovp;
1985 if (cvp == NULL) {
1986 /*
1987 * VOP_OPEN requires a locked vnode. We must avoid
1988 * locking the vnode if it is already locked, and
1989 * leave it in the same state on exit.
1990 */
1991 if (waslocked == 0) {
1992 cerror = vn_lock(vp, LK_EXCLUSIVE);
1993 if (cerror) {
1994 printf("%s: can't lock vnode %p\n",
1995 __func__, vp);
1996 return cerror;
1997 }
1998 #if 0
1999 printf("%s: locked vnode %p\n", __func__, vp);
2000 #endif
2001 }
2002
2003 /*
2004 * Open file (causes upcall to venus).
2005 * XXX Perhaps we should not fully open the file, but
2006 * simply obtain a container file.
2007 */
2008 /* XXX Is it ok to do this while holding the simplelock? */
2009 cerror = VOP_OPEN(vp, FREAD, cred);
2010
2011 if (cerror) {
2012 printf("%s: cannot open vnode %p => %d\n", __func__,
2013 vp, cerror);
2014 if (waslocked == 0)
2015 VOP_UNLOCK(vp);
2016 return cerror;
2017 }
2018
2019 #if 0
2020 printf("%s: opened vnode %p\n", __func__, vp);
2021 #endif
2022 cvp = cp->c_ovp;
2023 didopen = 1;
2024 }
2025 KASSERT(cvp != NULL);
2026
2027 /* Munge the arg structure to refer to the container vnode. */
2028 KASSERT(cvp->v_interlock == vp->v_interlock);
2029 ap->a_vp = cp->c_ovp;
2030
2031 /* Finally, call getpages on it. */
2032 error = VCALL(ap->a_vp, VOFFSET(vop_getpages), ap);
2033
2034 /* If we opened the vnode, we must close it. */
2035 if (didopen) {
2036 /*
2037 * VOP_CLOSE requires a locked vnode, but we are still
2038 * holding the lock (or riding a caller's lock).
2039 */
2040 cerror = VOP_CLOSE(vp, FREAD, cred);
2041 if (cerror != 0)
2042 /* XXX How should we handle this? */
2043 printf("%s: closed vnode %p -> %d\n", __func__,
2044 vp, cerror);
2045
2046 /* If we obtained a lock, drop it. */
2047 if (waslocked == 0)
2048 VOP_UNLOCK(vp);
2049 }
2050
2051 return error;
2052 }
2053
2054 /*
2055 * The protocol requires v_interlock to be held by the caller.
2056 */
2057 int
2058 coda_putpages(void *v)
2059 {
2060 struct vop_putpages_args /* {
2061 vnode_t *a_vp;
2062 voff_t a_offlo;
2063 voff_t a_offhi;
2064 int a_flags;
2065 } */ *ap = v;
2066 vnode_t *vp = ap->a_vp, *cvp;
2067 struct cnode *cp = VTOC(vp);
2068 int error;
2069
2070 KASSERT(mutex_owned(vp->v_interlock));
2071
2072 /* Check for control object. */
2073 if (IS_CTL_VP(vp)) {
2074 mutex_exit(vp->v_interlock);
2075 printf("%s: control object %p\n", __func__, vp);
2076 return(EINVAL);
2077 }
2078
2079 /*
2080 * If container object is not present, then there are no pages
2081 * to put; just return without error. This happens all the
2082 * time, apparently during discard of a closed vnode (which
2083 * trivially can't have dirty pages).
2084 */
2085 cvp = cp->c_ovp;
2086 if (cvp == NULL) {
2087 mutex_exit(vp->v_interlock);
2088 return 0;
2089 }
2090
2091 /* Munge the arg structure to refer to the container vnode. */
2092 KASSERT(cvp->v_interlock == vp->v_interlock);
2093 ap->a_vp = cvp;
2094
2095 /* Finally, call putpages on it. */
2096 error = VCALL(ap->a_vp, VOFFSET(vop_putpages), ap);
2097
2098 return error;
2099 }
2100