coda_vnops.c revision 1.77 1 /* $NetBSD: coda_vnops.c,v 1.77 2010/11/30 10:29:57 dholland Exp $ */
2
3 /*
4 *
5 * Coda: an Experimental Distributed File System
6 * Release 3.1
7 *
8 * Copyright (c) 1987-1998 Carnegie Mellon University
9 * All Rights Reserved
10 *
11 * Permission to use, copy, modify and distribute this software and its
12 * documentation is hereby granted, provided that both the copyright
13 * notice and this permission notice appear in all copies of the
14 * software, derivative works or modified versions, and any portions
15 * thereof, and that both notices appear in supporting documentation, and
16 * that credit is given to Carnegie Mellon University in all documents
17 * and publicity pertaining to direct or indirect use of this code or its
18 * derivatives.
19 *
20 * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS,
21 * SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS
22 * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON
23 * DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
24 * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF
25 * ANY DERIVATIVE WORK.
26 *
27 * Carnegie Mellon encourages users of this software to return any
28 * improvements or extensions that they make, and to grant Carnegie
29 * Mellon the rights to redistribute these changes without encumbrance.
30 *
31 * @(#) coda/coda_vnops.c,v 1.1.1.1 1998/08/29 21:26:46 rvb Exp $
32 */
33
34 /*
35 * Mach Operating System
36 * Copyright (c) 1990 Carnegie-Mellon University
37 * Copyright (c) 1989 Carnegie-Mellon University
38 * All rights reserved. The CMU software License Agreement specifies
39 * the terms and conditions for use and redistribution.
40 */
41
42 /*
43 * This code was written for the Coda file system at Carnegie Mellon
44 * University. Contributers include David Steere, James Kistler, and
45 * M. Satyanarayanan.
46 */
47
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: coda_vnops.c,v 1.77 2010/11/30 10:29:57 dholland Exp $");
50
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/malloc.h>
54 #include <sys/errno.h>
55 #include <sys/acct.h>
56 #include <sys/file.h>
57 #include <sys/uio.h>
58 #include <sys/namei.h>
59 #include <sys/ioctl.h>
60 #include <sys/mount.h>
61 #include <sys/proc.h>
62 #include <sys/select.h>
63 #include <sys/vnode.h>
64 #include <sys/kauth.h>
65
66 #include <miscfs/genfs/genfs.h>
67
68 #include <coda/coda.h>
69 #include <coda/cnode.h>
70 #include <coda/coda_vnops.h>
71 #include <coda/coda_venus.h>
72 #include <coda/coda_opstats.h>
73 #include <coda/coda_subr.h>
74 #include <coda/coda_namecache.h>
75 #include <coda/coda_pioctl.h>
76
77 /*
78 * These flags select various performance enhancements.
79 */
80 int coda_attr_cache = 1; /* Set to cache attributes in the kernel */
81 int coda_symlink_cache = 1; /* Set to cache symbolic link information */
82 int coda_access_cache = 1; /* Set to handle some access checks directly */
83
84 /* structure to keep track of vfs calls */
85
86 struct coda_op_stats coda_vnodeopstats[CODA_VNODEOPS_SIZE];
87
88 #define MARK_ENTRY(op) (coda_vnodeopstats[op].entries++)
89 #define MARK_INT_SAT(op) (coda_vnodeopstats[op].sat_intrn++)
90 #define MARK_INT_FAIL(op) (coda_vnodeopstats[op].unsat_intrn++)
91 #define MARK_INT_GEN(op) (coda_vnodeopstats[op].gen_intrn++)
92
93 /* What we are delaying for in printf */
94 int coda_printf_delay = 0; /* in microseconds */
95 int coda_vnop_print_entry = 0;
96 static int coda_lockdebug = 0;
97
98 #define ENTRY if(coda_vnop_print_entry) myprintf(("Entered %s\n",__func__))
99
100 /* Definition of the vnode operation vector */
101
102 const struct vnodeopv_entry_desc coda_vnodeop_entries[] = {
103 { &vop_default_desc, coda_vop_error },
104 { &vop_lookup_desc, coda_lookup }, /* lookup */
105 { &vop_create_desc, coda_create }, /* create */
106 { &vop_mknod_desc, coda_vop_error }, /* mknod */
107 { &vop_open_desc, coda_open }, /* open */
108 { &vop_close_desc, coda_close }, /* close */
109 { &vop_access_desc, coda_access }, /* access */
110 { &vop_getattr_desc, coda_getattr }, /* getattr */
111 { &vop_setattr_desc, coda_setattr }, /* setattr */
112 { &vop_read_desc, coda_read }, /* read */
113 { &vop_write_desc, coda_write }, /* write */
114 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */
115 { &vop_ioctl_desc, coda_ioctl }, /* ioctl */
116 { &vop_mmap_desc, genfs_mmap }, /* mmap */
117 { &vop_fsync_desc, coda_fsync }, /* fsync */
118 { &vop_remove_desc, coda_remove }, /* remove */
119 { &vop_link_desc, coda_link }, /* link */
120 { &vop_rename_desc, coda_rename }, /* rename */
121 { &vop_mkdir_desc, coda_mkdir }, /* mkdir */
122 { &vop_rmdir_desc, coda_rmdir }, /* rmdir */
123 { &vop_symlink_desc, coda_symlink }, /* symlink */
124 { &vop_readdir_desc, coda_readdir }, /* readdir */
125 { &vop_readlink_desc, coda_readlink }, /* readlink */
126 { &vop_abortop_desc, coda_abortop }, /* abortop */
127 { &vop_inactive_desc, coda_inactive }, /* inactive */
128 { &vop_reclaim_desc, coda_reclaim }, /* reclaim */
129 { &vop_lock_desc, coda_lock }, /* lock */
130 { &vop_unlock_desc, coda_unlock }, /* unlock */
131 { &vop_bmap_desc, coda_bmap }, /* bmap */
132 { &vop_strategy_desc, coda_strategy }, /* strategy */
133 { &vop_print_desc, coda_vop_error }, /* print */
134 { &vop_islocked_desc, coda_islocked }, /* islocked */
135 { &vop_pathconf_desc, coda_vop_error }, /* pathconf */
136 { &vop_advlock_desc, coda_vop_nop }, /* advlock */
137 { &vop_bwrite_desc, coda_vop_error }, /* bwrite */
138 { &vop_seek_desc, genfs_seek }, /* seek */
139 { &vop_poll_desc, genfs_poll }, /* poll */
140 { &vop_getpages_desc, coda_getpages }, /* getpages */
141 { &vop_putpages_desc, coda_putpages }, /* putpages */
142 { NULL, NULL }
143 };
144
145 const struct vnodeopv_desc coda_vnodeop_opv_desc =
146 { &coda_vnodeop_p, coda_vnodeop_entries };
147
148 /* Definitions of NetBSD vnodeop interfaces */
149
150 /*
151 * A generic error routine. Return EIO without looking at arguments.
152 */
153 int
154 coda_vop_error(void *anon) {
155 struct vnodeop_desc **desc = (struct vnodeop_desc **)anon;
156
157 if (codadebug) {
158 myprintf(("coda_vop_error: Vnode operation %s called (error).\n",
159 (*desc)->vdesc_name));
160 }
161
162 return EIO;
163 }
164
165 /* A generic do-nothing. */
166 int
167 coda_vop_nop(void *anon) {
168 struct vnodeop_desc **desc = (struct vnodeop_desc **)anon;
169
170 if (codadebug) {
171 myprintf(("Vnode operation %s called, but unsupported\n",
172 (*desc)->vdesc_name));
173 }
174 return (0);
175 }
176
177 int
178 coda_vnodeopstats_init(void)
179 {
180 int i;
181
182 for(i=0;i<CODA_VNODEOPS_SIZE;i++) {
183 coda_vnodeopstats[i].opcode = i;
184 coda_vnodeopstats[i].entries = 0;
185 coda_vnodeopstats[i].sat_intrn = 0;
186 coda_vnodeopstats[i].unsat_intrn = 0;
187 coda_vnodeopstats[i].gen_intrn = 0;
188 }
189
190 return 0;
191 }
192
193 /*
194 * XXX The entire relationship between VOP_OPEN and having a container
195 * file (via venus_open) needs to be reexamined. In particular, it's
196 * valid to open/mmap/close and then reference. Instead of doing
197 * VOP_OPEN when getpages needs a container, we should do the
198 * venus_open part, and record that the vnode has opened the container
199 * for getpages, and do the matching logical close on coda_inactive.
200 * Further, coda_rdwr needs a container file, and sometimes needs to
201 * do the equivalent of open (core dumps).
202 */
203 /*
204 * coda_open calls Venus to return the device and inode of the
205 * container file, and then obtains a vnode for that file. The
206 * container vnode is stored in the coda vnode, and a reference is
207 * added for each open file.
208 */
209 int
210 coda_open(void *v)
211 {
212 /*
213 * NetBSD can pass the O_EXCL flag in mode, even though the check
214 * has already happened. Venus defensively assumes that if open
215 * is passed the EXCL, it must be a bug. We strip the flag here.
216 */
217 /* true args */
218 struct vop_open_args *ap = v;
219 struct vnode *vp = ap->a_vp;
220 struct cnode *cp = VTOC(vp);
221 int flag = ap->a_mode & (~O_EXCL);
222 kauth_cred_t cred = ap->a_cred;
223 /* locals */
224 int error;
225 dev_t dev; /* container file device, inode, vnode */
226 ino_t inode;
227 struct vnode *container_vp;
228
229 MARK_ENTRY(CODA_OPEN_STATS);
230
231 /* Check for open of control file. */
232 if (IS_CTL_VP(vp)) {
233 /* if (WRITABLE(flag)) */
234 if (flag & (FWRITE | O_TRUNC | O_CREAT | O_EXCL)) {
235 MARK_INT_FAIL(CODA_OPEN_STATS);
236 return(EACCES);
237 }
238 MARK_INT_SAT(CODA_OPEN_STATS);
239 return(0);
240 }
241
242 error = venus_open(vtomi(vp), &cp->c_fid, flag, cred, curlwp, &dev, &inode);
243 if (error)
244 return (error);
245 if (!error) {
246 CODADEBUG(CODA_OPEN,
247 myprintf(("open: dev 0x%llx inode %llu result %d\n",
248 (unsigned long long)dev, (unsigned long long)inode, error));)
249 }
250
251 /*
252 * Obtain locked and referenced container vnode from container
253 * device/inode.
254 */
255 error = coda_grab_vnode(dev, inode, &container_vp);
256 if (error)
257 return (error);
258
259 /* Save the vnode pointer for the container file. */
260 if (cp->c_ovp == NULL) {
261 cp->c_ovp = container_vp;
262 } else {
263 if (cp->c_ovp != container_vp)
264 /*
265 * Perhaps venus returned a different container, or
266 * something else went wrong.
267 */
268 panic("coda_open: cp->c_ovp != container_vp");
269 }
270 cp->c_ocount++;
271
272 /* Flush the attribute cache if writing the file. */
273 if (flag & FWRITE) {
274 cp->c_owrite++;
275 cp->c_flags &= ~C_VATTR;
276 }
277
278 /*
279 * Save the <device, inode> pair for the container file to speed
280 * up subsequent reads while closed (mmap, program execution).
281 * This is perhaps safe because venus will invalidate the node
282 * before changing the container file mapping.
283 */
284 cp->c_device = dev;
285 cp->c_inode = inode;
286
287 /* Open the container file. */
288 error = VOP_OPEN(container_vp, flag, cred);
289 /*
290 * Drop the lock on the container, after we have done VOP_OPEN
291 * (which requires a locked vnode).
292 */
293 VOP_UNLOCK(container_vp);
294 return(error);
295 }
296
297 /*
298 * Close the cache file used for I/O and notify Venus.
299 */
300 int
301 coda_close(void *v)
302 {
303 /* true args */
304 struct vop_close_args *ap = v;
305 struct vnode *vp = ap->a_vp;
306 struct cnode *cp = VTOC(vp);
307 int flag = ap->a_fflag;
308 kauth_cred_t cred = ap->a_cred;
309 /* locals */
310 int error;
311
312 MARK_ENTRY(CODA_CLOSE_STATS);
313
314 /* Check for close of control file. */
315 if (IS_CTL_VP(vp)) {
316 MARK_INT_SAT(CODA_CLOSE_STATS);
317 return(0);
318 }
319
320 /*
321 * XXX The IS_UNMOUNTING part of this is very suspect.
322 */
323 if (IS_UNMOUNTING(cp)) {
324 if (cp->c_ovp) {
325 #ifdef CODA_VERBOSE
326 printf("coda_close: destroying container ref %d, ufs vp %p of vp %p/cp %p\n",
327 vp->v_usecount, cp->c_ovp, vp, cp);
328 #endif
329 #ifdef hmm
330 vgone(cp->c_ovp);
331 #else
332 vn_lock(cp->c_ovp, LK_EXCLUSIVE | LK_RETRY);
333 VOP_CLOSE(cp->c_ovp, flag, cred); /* Do errors matter here? */
334 vput(cp->c_ovp);
335 #endif
336 } else {
337 #ifdef CODA_VERBOSE
338 printf("coda_close: NO container vp %p/cp %p\n", vp, cp);
339 #endif
340 }
341 return ENODEV;
342 }
343
344 /* Lock the container node, and VOP_CLOSE it. */
345 vn_lock(cp->c_ovp, LK_EXCLUSIVE | LK_RETRY);
346 VOP_CLOSE(cp->c_ovp, flag, cred); /* Do errors matter here? */
347 /*
348 * Drop the lock we just obtained, and vrele the container vnode.
349 * Decrement reference counts, and clear container vnode pointer on
350 * last close.
351 */
352 vput(cp->c_ovp);
353 if (flag & FWRITE)
354 --cp->c_owrite;
355 if (--cp->c_ocount == 0)
356 cp->c_ovp = NULL;
357
358 error = venus_close(vtomi(vp), &cp->c_fid, flag, cred, curlwp);
359
360 CODADEBUG(CODA_CLOSE, myprintf(("close: result %d\n",error)); )
361 return(error);
362 }
363
364 int
365 coda_read(void *v)
366 {
367 struct vop_read_args *ap = v;
368
369 ENTRY;
370 return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_READ,
371 ap->a_ioflag, ap->a_cred, curlwp));
372 }
373
374 int
375 coda_write(void *v)
376 {
377 struct vop_write_args *ap = v;
378
379 ENTRY;
380 return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_WRITE,
381 ap->a_ioflag, ap->a_cred, curlwp));
382 }
383
384 int
385 coda_rdwr(struct vnode *vp, struct uio *uiop, enum uio_rw rw, int ioflag,
386 kauth_cred_t cred, struct lwp *l)
387 {
388 /* upcall decl */
389 /* NOTE: container file operation!!! */
390 /* locals */
391 struct cnode *cp = VTOC(vp);
392 struct vnode *cfvp = cp->c_ovp;
393 struct proc *p = l->l_proc;
394 int opened_internally = 0;
395 int error = 0;
396
397 MARK_ENTRY(CODA_RDWR_STATS);
398
399 CODADEBUG(CODA_RDWR, myprintf(("coda_rdwr(%d, %p, %lu, %lld)\n", rw,
400 uiop->uio_iov->iov_base,
401 (unsigned long) uiop->uio_resid,
402 (long long) uiop->uio_offset)); )
403
404 /* Check for rdwr of control object. */
405 if (IS_CTL_VP(vp)) {
406 MARK_INT_FAIL(CODA_RDWR_STATS);
407 return(EINVAL);
408 }
409
410 /* Redirect the request to UFS. */
411
412 /*
413 * If file is not already open this must be a page
414 * {read,write} request. Iget the cache file's inode
415 * pointer if we still have its <device, inode> pair.
416 * Otherwise, we must do an internal open to derive the
417 * pair.
418 * XXX Integrate this into a coherent strategy for container
419 * file acquisition.
420 */
421 if (cfvp == NULL) {
422 /*
423 * If we're dumping core, do the internal open. Otherwise
424 * venus won't have the correct size of the core when
425 * it's completely written.
426 */
427 if (cp->c_inode != 0 && !(p && (p->p_acflag & ACORE))) {
428 printf("coda_rdwr: grabbing container vnode, losing reference\n");
429 /* Get locked and refed vnode. */
430 error = coda_grab_vnode(cp->c_device, cp->c_inode, &cfvp);
431 if (error) {
432 MARK_INT_FAIL(CODA_RDWR_STATS);
433 return(error);
434 }
435 /*
436 * Drop lock.
437 * XXX Where is reference released.
438 */
439 VOP_UNLOCK(cfvp);
440 }
441 else {
442 printf("coda_rdwr: internal VOP_OPEN\n");
443 opened_internally = 1;
444 MARK_INT_GEN(CODA_OPEN_STATS);
445 error = VOP_OPEN(vp, (rw == UIO_READ ? FREAD : FWRITE), cred);
446 #ifdef CODA_VERBOSE
447 printf("coda_rdwr: Internally Opening %p\n", vp);
448 #endif
449 if (error) {
450 MARK_INT_FAIL(CODA_RDWR_STATS);
451 return(error);
452 }
453 cfvp = cp->c_ovp;
454 }
455 }
456
457 /* Have UFS handle the call. */
458 CODADEBUG(CODA_RDWR, myprintf(("indirect rdwr: fid = %s, refcnt = %d\n",
459 coda_f2s(&cp->c_fid), CTOV(cp)->v_usecount)); )
460
461 if (rw == UIO_READ) {
462 error = VOP_READ(cfvp, uiop, ioflag, cred);
463 } else {
464 error = VOP_WRITE(cfvp, uiop, ioflag, cred);
465 }
466
467 if (error)
468 MARK_INT_FAIL(CODA_RDWR_STATS);
469 else
470 MARK_INT_SAT(CODA_RDWR_STATS);
471
472 /* Do an internal close if necessary. */
473 if (opened_internally) {
474 MARK_INT_GEN(CODA_CLOSE_STATS);
475 (void)VOP_CLOSE(vp, (rw == UIO_READ ? FREAD : FWRITE), cred);
476 }
477
478 /* Invalidate cached attributes if writing. */
479 if (rw == UIO_WRITE)
480 cp->c_flags &= ~C_VATTR;
481 return(error);
482 }
483
484 int
485 coda_ioctl(void *v)
486 {
487 /* true args */
488 struct vop_ioctl_args *ap = v;
489 struct vnode *vp = ap->a_vp;
490 int com = ap->a_command;
491 void *data = ap->a_data;
492 int flag = ap->a_fflag;
493 kauth_cred_t cred = ap->a_cred;
494 /* locals */
495 int error;
496 struct vnode *tvp;
497 struct PioctlData *iap = (struct PioctlData *)data;
498 namei_simple_flags_t sflags;
499
500 MARK_ENTRY(CODA_IOCTL_STATS);
501
502 CODADEBUG(CODA_IOCTL, myprintf(("in coda_ioctl on %s\n", iap->path));)
503
504 /* Don't check for operation on a dying object, for ctlvp it
505 shouldn't matter */
506
507 /* Must be control object to succeed. */
508 if (!IS_CTL_VP(vp)) {
509 MARK_INT_FAIL(CODA_IOCTL_STATS);
510 CODADEBUG(CODA_IOCTL, myprintf(("coda_ioctl error: vp != ctlvp"));)
511 return (EOPNOTSUPP);
512 }
513 /* Look up the pathname. */
514
515 /* Should we use the name cache here? It would get it from
516 lookupname sooner or later anyway, right? */
517
518 sflags = iap->follow ? NSM_FOLLOW_NOEMULROOT : NSM_NOFOLLOW_NOEMULROOT;
519 error = namei_simple_user(iap->path, sflags, &tvp);
520
521 if (error) {
522 MARK_INT_FAIL(CODA_IOCTL_STATS);
523 CODADEBUG(CODA_IOCTL, myprintf(("coda_ioctl error: lookup returns %d\n",
524 error));)
525 return(error);
526 }
527
528 /*
529 * Make sure this is a coda style cnode, but it may be a
530 * different vfsp
531 */
532 /* XXX: this totally violates the comment about vtagtype in vnode.h */
533 if (tvp->v_tag != VT_CODA) {
534 vrele(tvp);
535 MARK_INT_FAIL(CODA_IOCTL_STATS);
536 CODADEBUG(CODA_IOCTL,
537 myprintf(("coda_ioctl error: %s not a coda object\n",
538 iap->path));)
539 return(EINVAL);
540 }
541
542 if (iap->vi.in_size > VC_MAXDATASIZE || iap->vi.out_size > VC_MAXDATASIZE) {
543 vrele(tvp);
544 return(EINVAL);
545 }
546 error = venus_ioctl(vtomi(tvp), &((VTOC(tvp))->c_fid), com, flag, data,
547 cred, curlwp);
548
549 if (error)
550 MARK_INT_FAIL(CODA_IOCTL_STATS);
551 else
552 CODADEBUG(CODA_IOCTL, myprintf(("Ioctl returns %d \n", error)); )
553
554 vrele(tvp);
555 return(error);
556 }
557
558 /*
559 * To reduce the cost of a user-level venus;we cache attributes in
560 * the kernel. Each cnode has storage allocated for an attribute. If
561 * c_vattr is valid, return a reference to it. Otherwise, get the
562 * attributes from venus and store them in the cnode. There is some
563 * question if this method is a security leak. But I think that in
564 * order to make this call, the user must have done a lookup and
565 * opened the file, and therefore should already have access.
566 */
567 int
568 coda_getattr(void *v)
569 {
570 /* true args */
571 struct vop_getattr_args *ap = v;
572 struct vnode *vp = ap->a_vp;
573 struct cnode *cp = VTOC(vp);
574 struct vattr *vap = ap->a_vap;
575 kauth_cred_t cred = ap->a_cred;
576 /* locals */
577 int error;
578
579 MARK_ENTRY(CODA_GETATTR_STATS);
580
581 /* Check for getattr of control object. */
582 if (IS_CTL_VP(vp)) {
583 MARK_INT_FAIL(CODA_GETATTR_STATS);
584 return(ENOENT);
585 }
586
587 /* Check to see if the attributes have already been cached */
588 if (VALID_VATTR(cp)) {
589 CODADEBUG(CODA_GETATTR, { myprintf(("attr cache hit: %s\n",
590 coda_f2s(&cp->c_fid)));});
591 CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR))
592 print_vattr(&cp->c_vattr); );
593
594 *vap = cp->c_vattr;
595 MARK_INT_SAT(CODA_GETATTR_STATS);
596 return(0);
597 }
598
599 error = venus_getattr(vtomi(vp), &cp->c_fid, cred, curlwp, vap);
600
601 if (!error) {
602 CODADEBUG(CODA_GETATTR, myprintf(("getattr miss %s: result %d\n",
603 coda_f2s(&cp->c_fid), error)); )
604
605 CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR))
606 print_vattr(vap); );
607
608 /* If not open for write, store attributes in cnode */
609 if ((cp->c_owrite == 0) && (coda_attr_cache)) {
610 cp->c_vattr = *vap;
611 cp->c_flags |= C_VATTR;
612 }
613
614 }
615 return(error);
616 }
617
618 int
619 coda_setattr(void *v)
620 {
621 /* true args */
622 struct vop_setattr_args *ap = v;
623 struct vnode *vp = ap->a_vp;
624 struct cnode *cp = VTOC(vp);
625 struct vattr *vap = ap->a_vap;
626 kauth_cred_t cred = ap->a_cred;
627 /* locals */
628 int error;
629
630 MARK_ENTRY(CODA_SETATTR_STATS);
631
632 /* Check for setattr of control object. */
633 if (IS_CTL_VP(vp)) {
634 MARK_INT_FAIL(CODA_SETATTR_STATS);
635 return(ENOENT);
636 }
637
638 if (codadebug & CODADBGMSK(CODA_SETATTR)) {
639 print_vattr(vap);
640 }
641 error = venus_setattr(vtomi(vp), &cp->c_fid, vap, cred, curlwp);
642
643 if (!error)
644 cp->c_flags &= ~C_VATTR;
645
646 CODADEBUG(CODA_SETATTR, myprintf(("setattr %d\n", error)); )
647 return(error);
648 }
649
650 int
651 coda_access(void *v)
652 {
653 /* true args */
654 struct vop_access_args *ap = v;
655 struct vnode *vp = ap->a_vp;
656 struct cnode *cp = VTOC(vp);
657 int mode = ap->a_mode;
658 kauth_cred_t cred = ap->a_cred;
659 /* locals */
660 int error;
661
662 MARK_ENTRY(CODA_ACCESS_STATS);
663
664 /* Check for access of control object. Only read access is
665 allowed on it. */
666 if (IS_CTL_VP(vp)) {
667 /* bogus hack - all will be marked as successes */
668 MARK_INT_SAT(CODA_ACCESS_STATS);
669 return(((mode & VREAD) && !(mode & (VWRITE | VEXEC)))
670 ? 0 : EACCES);
671 }
672
673 /*
674 * if the file is a directory, and we are checking exec (eg lookup)
675 * access, and the file is in the namecache, then the user must have
676 * lookup access to it.
677 */
678 if (coda_access_cache) {
679 if ((vp->v_type == VDIR) && (mode & VEXEC)) {
680 if (coda_nc_lookup(cp, ".", 1, cred)) {
681 MARK_INT_SAT(CODA_ACCESS_STATS);
682 return(0); /* it was in the cache */
683 }
684 }
685 }
686
687 error = venus_access(vtomi(vp), &cp->c_fid, mode, cred, curlwp);
688
689 return(error);
690 }
691
692 /*
693 * CODA abort op, called after namei() when a CREATE/DELETE isn't actually
694 * done. If a buffer has been saved in anticipation of a coda_create or
695 * a coda_remove, delete it.
696 */
697 /* ARGSUSED */
698 int
699 coda_abortop(void *v)
700 {
701 /* true args */
702 struct vop_abortop_args /* {
703 struct vnode *a_dvp;
704 struct componentname *a_cnp;
705 } */ *ap = v;
706
707 (void)ap;
708 /* upcall decl */
709 /* locals */
710
711 return (0);
712 }
713
714 int
715 coda_readlink(void *v)
716 {
717 /* true args */
718 struct vop_readlink_args *ap = v;
719 struct vnode *vp = ap->a_vp;
720 struct cnode *cp = VTOC(vp);
721 struct uio *uiop = ap->a_uio;
722 kauth_cred_t cred = ap->a_cred;
723 /* locals */
724 struct lwp *l = curlwp;
725 int error;
726 char *str;
727 int len;
728
729 MARK_ENTRY(CODA_READLINK_STATS);
730
731 /* Check for readlink of control object. */
732 if (IS_CTL_VP(vp)) {
733 MARK_INT_FAIL(CODA_READLINK_STATS);
734 return(ENOENT);
735 }
736
737 if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) { /* symlink was cached */
738 uiop->uio_rw = UIO_READ;
739 error = uiomove(cp->c_symlink, (int)cp->c_symlen, uiop);
740 if (error)
741 MARK_INT_FAIL(CODA_READLINK_STATS);
742 else
743 MARK_INT_SAT(CODA_READLINK_STATS);
744 return(error);
745 }
746
747 error = venus_readlink(vtomi(vp), &cp->c_fid, cred, l, &str, &len);
748
749 if (!error) {
750 uiop->uio_rw = UIO_READ;
751 error = uiomove(str, len, uiop);
752
753 if (coda_symlink_cache) {
754 cp->c_symlink = str;
755 cp->c_symlen = len;
756 cp->c_flags |= C_SYMLINK;
757 } else
758 CODA_FREE(str, len);
759 }
760
761 CODADEBUG(CODA_READLINK, myprintf(("in readlink result %d\n",error));)
762 return(error);
763 }
764
765 int
766 coda_fsync(void *v)
767 {
768 /* true args */
769 struct vop_fsync_args *ap = v;
770 struct vnode *vp = ap->a_vp;
771 struct cnode *cp = VTOC(vp);
772 kauth_cred_t cred = ap->a_cred;
773 /* locals */
774 struct vnode *convp = cp->c_ovp;
775 int error;
776
777 MARK_ENTRY(CODA_FSYNC_STATS);
778
779 /* Check for fsync on an unmounting object */
780 /* The NetBSD kernel, in it's infinite wisdom, can try to fsync
781 * after an unmount has been initiated. This is a Bad Thing,
782 * which we have to avoid. Not a legitimate failure for stats.
783 */
784 if (IS_UNMOUNTING(cp)) {
785 return(ENODEV);
786 }
787
788 /* Check for fsync of control object. */
789 if (IS_CTL_VP(vp)) {
790 MARK_INT_SAT(CODA_FSYNC_STATS);
791 return(0);
792 }
793
794 if (convp)
795 VOP_FSYNC(convp, cred, MNT_WAIT, 0, 0);
796
797 /*
798 * We can expect fsync on any vnode at all if venus is pruging it.
799 * Venus can't very well answer the fsync request, now can it?
800 * Hopefully, it won't have to, because hopefully, venus preserves
801 * the (possibly untrue) invariant that it never purges an open
802 * vnode. Hopefully.
803 */
804 if (cp->c_flags & C_PURGING) {
805 return(0);
806 }
807
808 error = venus_fsync(vtomi(vp), &cp->c_fid, cred, curlwp);
809
810 CODADEBUG(CODA_FSYNC, myprintf(("in fsync result %d\n",error)); );
811 return(error);
812 }
813
814 /*
815 * vp is locked on entry, and we must unlock it.
816 * XXX This routine is suspect and probably needs rewriting.
817 */
818 int
819 coda_inactive(void *v)
820 {
821 /* true args */
822 struct vop_inactive_args *ap = v;
823 struct vnode *vp = ap->a_vp;
824 struct cnode *cp = VTOC(vp);
825 kauth_cred_t cred __unused = NULL;
826
827 /* We don't need to send inactive to venus - DCS */
828 MARK_ENTRY(CODA_INACTIVE_STATS);
829
830 if (IS_CTL_VP(vp)) {
831 MARK_INT_SAT(CODA_INACTIVE_STATS);
832 return 0;
833 }
834
835 CODADEBUG(CODA_INACTIVE, myprintf(("in inactive, %s, vfsp %p\n",
836 coda_f2s(&cp->c_fid), vp->v_mount));)
837
838 /* If an array has been allocated to hold the symlink, deallocate it */
839 if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) {
840 if (cp->c_symlink == NULL)
841 panic("coda_inactive: null symlink pointer in cnode");
842
843 CODA_FREE(cp->c_symlink, cp->c_symlen);
844 cp->c_flags &= ~C_SYMLINK;
845 cp->c_symlen = 0;
846 }
847
848 /* Remove it from the table so it can't be found. */
849 coda_unsave(cp);
850 if (vp->v_mount->mnt_data == NULL) {
851 myprintf(("Help! vfsp->vfs_data was NULL, but vnode %p wasn't dying\n", vp));
852 panic("badness in coda_inactive");
853 }
854
855 if (IS_UNMOUNTING(cp)) {
856 /* XXX Do we need to VOP_CLOSE container vnodes? */
857 if (vp->v_usecount > 0)
858 printf("coda_inactive: IS_UNMOUNTING %p usecount %d\n",
859 vp, vp->v_usecount);
860 if (cp->c_ovp != NULL)
861 printf("coda_inactive: %p ovp != NULL\n", vp);
862 VOP_UNLOCK(vp);
863 } else {
864 /* Sanity checks that perhaps should be panic. */
865 if (vp->v_usecount) {
866 printf("coda_inactive: %p usecount %d\n", vp, vp->v_usecount);
867 }
868 if (cp->c_ovp != NULL) {
869 printf("coda_inactive: %p ovp != NULL\n", vp);
870 }
871 VOP_UNLOCK(vp);
872 *ap->a_recycle = true;
873 }
874
875 MARK_INT_SAT(CODA_INACTIVE_STATS);
876 return(0);
877 }
878
879 /*
880 * Coda does not use the normal namecache, but a private version.
881 * Consider how to use the standard facility instead.
882 */
883 int
884 coda_lookup(void *v)
885 {
886 /* true args */
887 struct vop_lookup_args *ap = v;
888 /* (locked) vnode of dir in which to do lookup */
889 struct vnode *dvp = ap->a_dvp;
890 struct cnode *dcp = VTOC(dvp);
891 /* output variable for result */
892 struct vnode **vpp = ap->a_vpp;
893 /* name to lookup */
894 struct componentname *cnp = ap->a_cnp;
895 kauth_cred_t cred = cnp->cn_cred;
896 struct lwp *l = curlwp;
897 /* locals */
898 struct cnode *cp;
899 const char *nm = cnp->cn_nameptr;
900 int len = cnp->cn_namelen;
901 int flags = cnp->cn_flags;
902 int isdot;
903 CodaFid VFid;
904 int vtype;
905 int error = 0;
906
907 MARK_ENTRY(CODA_LOOKUP_STATS);
908
909 CODADEBUG(CODA_LOOKUP, myprintf(("lookup: %s in %s\n",
910 nm, coda_f2s(&dcp->c_fid))););
911
912 /*
913 * XXX componentname flags in MODMASK are not handled at all
914 */
915
916 /*
917 * The overall strategy is to switch on the lookup type and get a
918 * result vnode that is vref'd but not locked. Then, the code at
919 * exit: switches on ., .., and regular lookups and does the right
920 * locking.
921 */
922
923 /* Check for lookup of control object. */
924 if (IS_CTL_NAME(dvp, nm, len)) {
925 *vpp = coda_ctlvp;
926 vref(*vpp);
927 MARK_INT_SAT(CODA_LOOKUP_STATS);
928 goto exit;
929 }
930
931 /* Avoid trying to hand venus an unreasonably long name. */
932 if (len+1 > CODA_MAXNAMLEN) {
933 MARK_INT_FAIL(CODA_LOOKUP_STATS);
934 CODADEBUG(CODA_LOOKUP, myprintf(("name too long: lookup, %s (%s)\n",
935 coda_f2s(&dcp->c_fid), nm)););
936 *vpp = (struct vnode *)0;
937 error = EINVAL;
938 goto exit;
939 }
940
941 /*
942 * XXX Check for DOT lookups, and short circuit all the caches,
943 * just doing an extra vref. (venus guarantees that lookup of
944 * . returns self.)
945 */
946 isdot = (len == 1 && nm[0] == '.');
947
948 /*
949 * Try to resolve the lookup in the minicache. If that fails, ask
950 * venus to do the lookup. XXX The interaction between vnode
951 * locking and any locking that coda does is not clear.
952 */
953 cp = coda_nc_lookup(dcp, nm, len, cred);
954 if (cp) {
955 *vpp = CTOV(cp);
956 vref(*vpp);
957 CODADEBUG(CODA_LOOKUP,
958 myprintf(("lookup result %d vpp %p\n",error,*vpp));)
959 } else {
960 /* The name wasn't cached, so ask Venus. */
961 error = venus_lookup(vtomi(dvp), &dcp->c_fid, nm, len, cred, l, &VFid, &vtype);
962
963 if (error) {
964 MARK_INT_FAIL(CODA_LOOKUP_STATS);
965 CODADEBUG(CODA_LOOKUP, myprintf(("lookup error on %s (%s)%d\n",
966 coda_f2s(&dcp->c_fid), nm, error));)
967 *vpp = (struct vnode *)0;
968 } else {
969 MARK_INT_SAT(CODA_LOOKUP_STATS);
970 CODADEBUG(CODA_LOOKUP,
971 myprintf(("lookup: %s type %o result %d\n",
972 coda_f2s(&VFid), vtype, error)); )
973
974 cp = make_coda_node(&VFid, dvp->v_mount, vtype);
975 *vpp = CTOV(cp);
976 /* vpp is now vrefed. */
977
978 /*
979 * Unless this vnode is marked CODA_NOCACHE, enter it into
980 * the coda name cache to avoid a future venus round-trip.
981 * XXX Interaction with componentname NOCACHE is unclear.
982 */
983 if (!(vtype & CODA_NOCACHE))
984 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
985 }
986 }
987
988 exit:
989 /*
990 * If we are creating, and this was the last name to be looked up,
991 * and the error was ENOENT, then make the leaf NULL and return
992 * success.
993 * XXX Check against new lookup rules.
994 */
995 if (((cnp->cn_nameiop == CREATE) || (cnp->cn_nameiop == RENAME))
996 && (cnp->cn_flags & ISLASTCN)
997 && (error == ENOENT))
998 {
999 error = EJUSTRETURN;
1000 cnp->cn_flags |= SAVENAME;
1001 *ap->a_vpp = NULL;
1002 }
1003
1004 /*
1005 * If we are removing, and we are at the last element, and we
1006 * found it, then we need to keep the name around so that the
1007 * removal will go ahead as planned.
1008 * XXX Check against new lookup rules.
1009 */
1010 if ((cnp->cn_nameiop == DELETE)
1011 && (cnp->cn_flags & ISLASTCN)
1012 && !error)
1013 {
1014 cnp->cn_flags |= SAVENAME;
1015 }
1016
1017 /*
1018 * If the lookup succeeded, we must generally lock the returned
1019 * vnode. This could be a ., .., or normal lookup. See
1020 * vnodeops(9) for the details.
1021 */
1022 /*
1023 * XXX LK_RETRY is likely incorrect. Handle vn_lock failure
1024 * somehow, and remove LK_RETRY.
1025 */
1026 if (!error || (error == EJUSTRETURN)) {
1027 /* Lookup has a value and it isn't "."? */
1028 if (*ap->a_vpp && (*ap->a_vpp != dvp)) {
1029 if (flags & ISDOTDOT)
1030 /* ..: unlock parent */
1031 VOP_UNLOCK(dvp);
1032 /* all but .: lock child */
1033 vn_lock(*ap->a_vpp, LK_EXCLUSIVE | LK_RETRY);
1034 if (flags & ISDOTDOT)
1035 /* ..: relock parent */
1036 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
1037 }
1038 /* else .: leave dvp locked */
1039 } else {
1040 /* The lookup failed, so return NULL. Leave dvp locked. */
1041 *ap->a_vpp = NULL;
1042 }
1043 return(error);
1044 }
1045
1046 /*ARGSUSED*/
1047 int
1048 coda_create(void *v)
1049 {
1050 /* true args */
1051 struct vop_create_args *ap = v;
1052 struct vnode *dvp = ap->a_dvp;
1053 struct cnode *dcp = VTOC(dvp);
1054 struct vattr *va = ap->a_vap;
1055 int exclusive = 1;
1056 int mode = ap->a_vap->va_mode;
1057 struct vnode **vpp = ap->a_vpp;
1058 struct componentname *cnp = ap->a_cnp;
1059 kauth_cred_t cred = cnp->cn_cred;
1060 struct lwp *l = curlwp;
1061 /* locals */
1062 int error;
1063 struct cnode *cp;
1064 const char *nm = cnp->cn_nameptr;
1065 int len = cnp->cn_namelen;
1066 CodaFid VFid;
1067 struct vattr attr;
1068
1069 MARK_ENTRY(CODA_CREATE_STATS);
1070
1071 /* All creates are exclusive XXX */
1072 /* I'm assuming the 'mode' argument is the file mode bits XXX */
1073
1074 /* Check for create of control object. */
1075 if (IS_CTL_NAME(dvp, nm, len)) {
1076 *vpp = (struct vnode *)0;
1077 MARK_INT_FAIL(CODA_CREATE_STATS);
1078 return(EACCES);
1079 }
1080
1081 error = venus_create(vtomi(dvp), &dcp->c_fid, nm, len, exclusive, mode, va, cred, l, &VFid, &attr);
1082
1083 if (!error) {
1084
1085 /*
1086 * XXX Violation of venus/kernel invariants is a difficult case,
1087 * but venus should not be able to cause a panic.
1088 */
1089 /* If this is an exclusive create, panic if the file already exists. */
1090 /* Venus should have detected the file and reported EEXIST. */
1091
1092 if ((exclusive == 1) &&
1093 (coda_find(&VFid) != NULL))
1094 panic("cnode existed for newly created file!");
1095
1096 cp = make_coda_node(&VFid, dvp->v_mount, attr.va_type);
1097 *vpp = CTOV(cp);
1098
1099 /* XXX vnodeops doesn't say this argument can be changed. */
1100 /* Update va to reflect the new attributes. */
1101 (*va) = attr;
1102
1103 /* Update the attribute cache and mark it as valid */
1104 if (coda_attr_cache) {
1105 VTOC(*vpp)->c_vattr = attr;
1106 VTOC(*vpp)->c_flags |= C_VATTR;
1107 }
1108
1109 /* Invalidate parent's attr cache (modification time has changed). */
1110 VTOC(dvp)->c_flags &= ~C_VATTR;
1111
1112 /* enter the new vnode in the Name Cache */
1113 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
1114
1115 CODADEBUG(CODA_CREATE,
1116 myprintf(("create: %s, result %d\n",
1117 coda_f2s(&VFid), error)); )
1118 } else {
1119 *vpp = (struct vnode *)0;
1120 CODADEBUG(CODA_CREATE, myprintf(("create error %d\n", error));)
1121 }
1122
1123 /*
1124 * vnodeops(9) says that we must unlock the parent and lock the child.
1125 * XXX Should we lock the child first?
1126 */
1127 vput(dvp);
1128 if (!error) {
1129 if ((cnp->cn_flags & LOCKLEAF) == 0) {
1130 /* This should not happen; flags are for lookup only. */
1131 printf("coda_create: LOCKLEAF not set!\n");
1132 }
1133
1134 if ((error = vn_lock(*ap->a_vpp, LK_EXCLUSIVE))) {
1135 /* XXX Perhaps avoid this panic. */
1136 panic("coda_create: couldn't lock child");
1137 }
1138 }
1139
1140 return(error);
1141 }
1142
1143 int
1144 coda_remove(void *v)
1145 {
1146 /* true args */
1147 struct vop_remove_args *ap = v;
1148 struct vnode *dvp = ap->a_dvp;
1149 struct cnode *cp = VTOC(dvp);
1150 struct vnode *vp = ap->a_vp;
1151 struct componentname *cnp = ap->a_cnp;
1152 kauth_cred_t cred = cnp->cn_cred;
1153 struct lwp *l = curlwp;
1154 /* locals */
1155 int error;
1156 const char *nm = cnp->cn_nameptr;
1157 int len = cnp->cn_namelen;
1158 struct cnode *tp;
1159
1160 MARK_ENTRY(CODA_REMOVE_STATS);
1161
1162 CODADEBUG(CODA_REMOVE, myprintf(("remove: %s in %s\n",
1163 nm, coda_f2s(&cp->c_fid))););
1164
1165 /* Remove the file's entry from the CODA Name Cache */
1166 /* We're being conservative here, it might be that this person
1167 * doesn't really have sufficient access to delete the file
1168 * but we feel zapping the entry won't really hurt anyone -- dcs
1169 */
1170 /* I'm gonna go out on a limb here. If a file and a hardlink to it
1171 * exist, and one is removed, the link count on the other will be
1172 * off by 1. We could either invalidate the attrs if cached, or
1173 * fix them. I'll try to fix them. DCS 11/8/94
1174 */
1175 tp = coda_nc_lookup(VTOC(dvp), nm, len, cred);
1176 if (tp) {
1177 if (VALID_VATTR(tp)) { /* If attrs are cached */
1178 if (tp->c_vattr.va_nlink > 1) { /* If it's a hard link */
1179 tp->c_vattr.va_nlink--;
1180 }
1181 }
1182
1183 coda_nc_zapfile(VTOC(dvp), nm, len);
1184 /* No need to flush it if it doesn't exist! */
1185 }
1186 /* Invalidate the parent's attr cache, the modification time has changed */
1187 VTOC(dvp)->c_flags &= ~C_VATTR;
1188
1189 /* Check for remove of control object. */
1190 if (IS_CTL_NAME(dvp, nm, len)) {
1191 MARK_INT_FAIL(CODA_REMOVE_STATS);
1192 return(ENOENT);
1193 }
1194
1195 error = venus_remove(vtomi(dvp), &cp->c_fid, nm, len, cred, l);
1196
1197 CODADEBUG(CODA_REMOVE, myprintf(("in remove result %d\n",error)); )
1198
1199 /*
1200 * Unlock parent and child (avoiding double if ".").
1201 */
1202 if (dvp == vp) {
1203 vrele(vp);
1204 } else {
1205 vput(vp);
1206 }
1207 vput(dvp);
1208
1209 return(error);
1210 }
1211
1212 /*
1213 * dvp is the directory where the link is to go, and is locked.
1214 * vp is the object to be linked to, and is unlocked.
1215 * At exit, we must unlock dvp, and vput dvp.
1216 */
1217 int
1218 coda_link(void *v)
1219 {
1220 /* true args */
1221 struct vop_link_args *ap = v;
1222 struct vnode *vp = ap->a_vp;
1223 struct cnode *cp = VTOC(vp);
1224 struct vnode *dvp = ap->a_dvp;
1225 struct cnode *dcp = VTOC(dvp);
1226 struct componentname *cnp = ap->a_cnp;
1227 kauth_cred_t cred = cnp->cn_cred;
1228 struct lwp *l = curlwp;
1229 /* locals */
1230 int error;
1231 const char *nm = cnp->cn_nameptr;
1232 int len = cnp->cn_namelen;
1233
1234 MARK_ENTRY(CODA_LINK_STATS);
1235
1236 if (codadebug & CODADBGMSK(CODA_LINK)) {
1237
1238 myprintf(("nb_link: vp fid: %s\n",
1239 coda_f2s(&cp->c_fid)));
1240 myprintf(("nb_link: dvp fid: %s)\n",
1241 coda_f2s(&dcp->c_fid)));
1242
1243 }
1244 if (codadebug & CODADBGMSK(CODA_LINK)) {
1245 myprintf(("link: vp fid: %s\n",
1246 coda_f2s(&cp->c_fid)));
1247 myprintf(("link: dvp fid: %s\n",
1248 coda_f2s(&dcp->c_fid)));
1249
1250 }
1251
1252 /* Check for link to/from control object. */
1253 if (IS_CTL_NAME(dvp, nm, len) || IS_CTL_VP(vp)) {
1254 MARK_INT_FAIL(CODA_LINK_STATS);
1255 return(EACCES);
1256 }
1257
1258 /* If linking . to a name, error out earlier. */
1259 if (vp == dvp) {
1260 printf("coda_link vp==dvp\n");
1261 error = EISDIR;
1262 goto exit;
1263 }
1264
1265 /* XXX Why does venus_link need the vnode to be locked?*/
1266 if ((error = vn_lock(vp, LK_EXCLUSIVE)) != 0) {
1267 printf("coda_link: couldn't lock vnode %p\n", vp);
1268 error = EFAULT; /* XXX better value */
1269 goto exit;
1270 }
1271 error = venus_link(vtomi(vp), &cp->c_fid, &dcp->c_fid, nm, len, cred, l);
1272 VOP_UNLOCK(vp);
1273
1274 /* Invalidate parent's attr cache (the modification time has changed). */
1275 VTOC(dvp)->c_flags &= ~C_VATTR;
1276 /* Invalidate child's attr cache (XXX why). */
1277 VTOC(vp)->c_flags &= ~C_VATTR;
1278
1279 CODADEBUG(CODA_LINK, myprintf(("in link result %d\n",error)); )
1280
1281 exit:
1282 vput(dvp);
1283 return(error);
1284 }
1285
1286 int
1287 coda_rename(void *v)
1288 {
1289 /* true args */
1290 struct vop_rename_args *ap = v;
1291 struct vnode *odvp = ap->a_fdvp;
1292 struct cnode *odcp = VTOC(odvp);
1293 struct componentname *fcnp = ap->a_fcnp;
1294 struct vnode *ndvp = ap->a_tdvp;
1295 struct cnode *ndcp = VTOC(ndvp);
1296 struct componentname *tcnp = ap->a_tcnp;
1297 kauth_cred_t cred = fcnp->cn_cred;
1298 struct lwp *l = curlwp;
1299 /* true args */
1300 int error;
1301 const char *fnm = fcnp->cn_nameptr;
1302 int flen = fcnp->cn_namelen;
1303 const char *tnm = tcnp->cn_nameptr;
1304 int tlen = tcnp->cn_namelen;
1305
1306 MARK_ENTRY(CODA_RENAME_STATS);
1307
1308 /* Hmmm. The vnodes are already looked up. Perhaps they are locked?
1309 This could be Bad. XXX */
1310 #ifdef OLD_DIAGNOSTIC
1311 if ((fcnp->cn_cred != tcnp->cn_cred)
1312 || (fcnp->cn_lwp != tcnp->cn_lwp))
1313 {
1314 panic("coda_rename: component names don't agree");
1315 }
1316 #endif
1317
1318 /* Check for rename involving control object. */
1319 if (IS_CTL_NAME(odvp, fnm, flen) || IS_CTL_NAME(ndvp, tnm, tlen)) {
1320 MARK_INT_FAIL(CODA_RENAME_STATS);
1321 return(EACCES);
1322 }
1323
1324 /* Problem with moving directories -- need to flush entry for .. */
1325 if (odvp != ndvp) {
1326 struct cnode *ovcp = coda_nc_lookup(VTOC(odvp), fnm, flen, cred);
1327 if (ovcp) {
1328 struct vnode *ovp = CTOV(ovcp);
1329 if ((ovp) &&
1330 (ovp->v_type == VDIR)) /* If it's a directory */
1331 coda_nc_zapfile(VTOC(ovp),"..", 2);
1332 }
1333 }
1334
1335 /* Remove the entries for both source and target files */
1336 coda_nc_zapfile(VTOC(odvp), fnm, flen);
1337 coda_nc_zapfile(VTOC(ndvp), tnm, tlen);
1338
1339 /* Invalidate the parent's attr cache, the modification time has changed */
1340 VTOC(odvp)->c_flags &= ~C_VATTR;
1341 VTOC(ndvp)->c_flags &= ~C_VATTR;
1342
1343 if (flen+1 > CODA_MAXNAMLEN) {
1344 MARK_INT_FAIL(CODA_RENAME_STATS);
1345 error = EINVAL;
1346 goto exit;
1347 }
1348
1349 if (tlen+1 > CODA_MAXNAMLEN) {
1350 MARK_INT_FAIL(CODA_RENAME_STATS);
1351 error = EINVAL;
1352 goto exit;
1353 }
1354
1355 error = venus_rename(vtomi(odvp), &odcp->c_fid, &ndcp->c_fid, fnm, flen, tnm, tlen, cred, l);
1356
1357 exit:
1358 CODADEBUG(CODA_RENAME, myprintf(("in rename result %d\n",error));)
1359 /* XXX - do we need to call cache pureg on the moved vnode? */
1360 cache_purge(ap->a_fvp);
1361
1362 /* It seems to be incumbent on us to drop locks on all four vnodes */
1363 /* From-vnodes are not locked, only ref'd. To-vnodes are locked. */
1364
1365 vrele(ap->a_fvp);
1366 vrele(odvp);
1367
1368 if (ap->a_tvp) {
1369 if (ap->a_tvp == ndvp) {
1370 vrele(ap->a_tvp);
1371 } else {
1372 vput(ap->a_tvp);
1373 }
1374 }
1375
1376 vput(ndvp);
1377 return(error);
1378 }
1379
1380 int
1381 coda_mkdir(void *v)
1382 {
1383 /* true args */
1384 struct vop_mkdir_args *ap = v;
1385 struct vnode *dvp = ap->a_dvp;
1386 struct cnode *dcp = VTOC(dvp);
1387 struct componentname *cnp = ap->a_cnp;
1388 struct vattr *va = ap->a_vap;
1389 struct vnode **vpp = ap->a_vpp;
1390 kauth_cred_t cred = cnp->cn_cred;
1391 struct lwp *l = curlwp;
1392 /* locals */
1393 int error;
1394 const char *nm = cnp->cn_nameptr;
1395 int len = cnp->cn_namelen;
1396 struct cnode *cp;
1397 CodaFid VFid;
1398 struct vattr ova;
1399
1400 MARK_ENTRY(CODA_MKDIR_STATS);
1401
1402 /* Check for mkdir of target object. */
1403 if (IS_CTL_NAME(dvp, nm, len)) {
1404 *vpp = (struct vnode *)0;
1405 MARK_INT_FAIL(CODA_MKDIR_STATS);
1406 return(EACCES);
1407 }
1408
1409 if (len+1 > CODA_MAXNAMLEN) {
1410 *vpp = (struct vnode *)0;
1411 MARK_INT_FAIL(CODA_MKDIR_STATS);
1412 return(EACCES);
1413 }
1414
1415 error = venus_mkdir(vtomi(dvp), &dcp->c_fid, nm, len, va, cred, l, &VFid, &ova);
1416
1417 if (!error) {
1418 if (coda_find(&VFid) != NULL)
1419 panic("cnode existed for newly created directory!");
1420
1421
1422 cp = make_coda_node(&VFid, dvp->v_mount, va->va_type);
1423 *vpp = CTOV(cp);
1424
1425 /* enter the new vnode in the Name Cache */
1426 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
1427
1428 /* as a side effect, enter "." and ".." for the directory */
1429 coda_nc_enter(VTOC(*vpp), ".", 1, cred, VTOC(*vpp));
1430 coda_nc_enter(VTOC(*vpp), "..", 2, cred, VTOC(dvp));
1431
1432 if (coda_attr_cache) {
1433 VTOC(*vpp)->c_vattr = ova; /* update the attr cache */
1434 VTOC(*vpp)->c_flags |= C_VATTR; /* Valid attributes in cnode */
1435 }
1436
1437 /* Invalidate the parent's attr cache, the modification time has changed */
1438 VTOC(dvp)->c_flags &= ~C_VATTR;
1439
1440 CODADEBUG( CODA_MKDIR, myprintf(("mkdir: %s result %d\n",
1441 coda_f2s(&VFid), error)); )
1442 } else {
1443 *vpp = (struct vnode *)0;
1444 CODADEBUG(CODA_MKDIR, myprintf(("mkdir error %d\n",error));)
1445 }
1446
1447 /*
1448 * Currently, all mkdirs explicitly vput their dvp's.
1449 * It also appears that we *must* lock the vpp, since
1450 * lockleaf isn't set, but someone down the road is going
1451 * to try to unlock the new directory.
1452 */
1453 vput(dvp);
1454 if (!error) {
1455 if ((error = vn_lock(*ap->a_vpp, LK_EXCLUSIVE))) {
1456 panic("coda_mkdir: couldn't lock child");
1457 }
1458 }
1459
1460 return(error);
1461 }
1462
1463 int
1464 coda_rmdir(void *v)
1465 {
1466 /* true args */
1467 struct vop_rmdir_args *ap = v;
1468 struct vnode *dvp = ap->a_dvp;
1469 struct cnode *dcp = VTOC(dvp);
1470 struct vnode *vp = ap->a_vp;
1471 struct componentname *cnp = ap->a_cnp;
1472 kauth_cred_t cred = cnp->cn_cred;
1473 struct lwp *l = curlwp;
1474 /* true args */
1475 int error;
1476 const char *nm = cnp->cn_nameptr;
1477 int len = cnp->cn_namelen;
1478 struct cnode *cp;
1479
1480 MARK_ENTRY(CODA_RMDIR_STATS);
1481
1482 /* Check for rmdir of control object. */
1483 if (IS_CTL_NAME(dvp, nm, len)) {
1484 MARK_INT_FAIL(CODA_RMDIR_STATS);
1485 return(ENOENT);
1486 }
1487
1488 /* Can't remove . in self. */
1489 if (dvp == vp) {
1490 printf("coda_rmdir: dvp == vp\n");
1491 error = EINVAL;
1492 goto exit;
1493 }
1494
1495 /*
1496 * The caller may not have adequate permissions, and the venus
1497 * operation may fail, but it doesn't hurt from a correctness
1498 * viewpoint to invalidate cache entries.
1499 * XXX Why isn't this done after the venus_rmdir call?
1500 */
1501 /* Look up child in name cache (by name, from parent). */
1502 cp = coda_nc_lookup(dcp, nm, len, cred);
1503 /* If found, remove all children of the child (., ..). */
1504 if (cp) coda_nc_zapParentfid(&(cp->c_fid), NOT_DOWNCALL);
1505
1506 /* Remove child's own entry. */
1507 coda_nc_zapfile(dcp, nm, len);
1508
1509 /* Invalidate parent's attr cache (the modification time has changed). */
1510 dcp->c_flags &= ~C_VATTR;
1511
1512 error = venus_rmdir(vtomi(dvp), &dcp->c_fid, nm, len, cred, l);
1513
1514 CODADEBUG(CODA_RMDIR, myprintf(("in rmdir result %d\n", error)); )
1515
1516 exit:
1517 /* vput both vnodes */
1518 vput(dvp);
1519 if (dvp == vp) {
1520 vrele(vp);
1521 } else {
1522 vput(vp);
1523 }
1524
1525 return(error);
1526 }
1527
1528 int
1529 coda_symlink(void *v)
1530 {
1531 /* true args */
1532 struct vop_symlink_args *ap = v;
1533 struct vnode *dvp = ap->a_dvp;
1534 struct cnode *dcp = VTOC(dvp);
1535 /* a_vpp is used in place below */
1536 struct componentname *cnp = ap->a_cnp;
1537 struct vattr *tva = ap->a_vap;
1538 char *path = ap->a_target;
1539 kauth_cred_t cred = cnp->cn_cred;
1540 struct lwp *l = curlwp;
1541 /* locals */
1542 int error;
1543 u_long saved_cn_flags;
1544 const char *nm = cnp->cn_nameptr;
1545 int len = cnp->cn_namelen;
1546 int plen = strlen(path);
1547
1548 /*
1549 * Here's the strategy for the moment: perform the symlink, then
1550 * do a lookup to grab the resulting vnode. I know this requires
1551 * two communications with Venus for a new sybolic link, but
1552 * that's the way the ball bounces. I don't yet want to change
1553 * the way the Mach symlink works. When Mach support is
1554 * deprecated, we should change symlink so that the common case
1555 * returns the resultant vnode in a vpp argument.
1556 */
1557
1558 MARK_ENTRY(CODA_SYMLINK_STATS);
1559
1560 /* Check for symlink of control object. */
1561 if (IS_CTL_NAME(dvp, nm, len)) {
1562 MARK_INT_FAIL(CODA_SYMLINK_STATS);
1563 error = EACCES;
1564 goto exit;
1565 }
1566
1567 if (plen+1 > CODA_MAXPATHLEN) {
1568 MARK_INT_FAIL(CODA_SYMLINK_STATS);
1569 error = EINVAL;
1570 goto exit;
1571 }
1572
1573 if (len+1 > CODA_MAXNAMLEN) {
1574 MARK_INT_FAIL(CODA_SYMLINK_STATS);
1575 error = EINVAL;
1576 goto exit;
1577 }
1578
1579 error = venus_symlink(vtomi(dvp), &dcp->c_fid, path, plen, nm, len, tva, cred, l);
1580
1581 /* Invalidate the parent's attr cache (modification time has changed). */
1582 dcp->c_flags &= ~C_VATTR;
1583
1584 if (!error) {
1585 /*
1586 * VOP_SYMLINK is not defined to pay attention to cnp->cn_flags;
1587 * these are defined only for VOP_LOOKUP. We desire to reuse
1588 * cnp for a VOP_LOOKUP operation, and must be sure to not pass
1589 * stray flags passed to us. Such stray flags can occur because
1590 * sys_symlink makes a namei call and then reuses the
1591 * componentname structure.
1592 */
1593 /*
1594 * XXX Arguably we should create our own componentname structure
1595 * and not reuse the one that was passed in.
1596 */
1597 saved_cn_flags = cnp->cn_flags;
1598 cnp->cn_flags &= ~(MODMASK | OPMASK);
1599 cnp->cn_flags |= LOOKUP;
1600 error = VOP_LOOKUP(dvp, ap->a_vpp, cnp);
1601 cnp->cn_flags = saved_cn_flags;
1602 /* Either an error occurs, or ap->a_vpp is locked. */
1603 }
1604
1605 exit:
1606 /* unlock and deference parent */
1607 vput(dvp);
1608
1609 CODADEBUG(CODA_SYMLINK, myprintf(("in symlink result %d\n",error)); )
1610 return(error);
1611 }
1612
1613 /*
1614 * Read directory entries.
1615 */
1616 int
1617 coda_readdir(void *v)
1618 {
1619 /* true args */
1620 struct vop_readdir_args *ap = v;
1621 struct vnode *vp = ap->a_vp;
1622 struct cnode *cp = VTOC(vp);
1623 struct uio *uiop = ap->a_uio;
1624 kauth_cred_t cred = ap->a_cred;
1625 int *eofflag = ap->a_eofflag;
1626 off_t **cookies = ap->a_cookies;
1627 int *ncookies = ap->a_ncookies;
1628 /* upcall decl */
1629 /* locals */
1630 int error = 0;
1631
1632 MARK_ENTRY(CODA_READDIR_STATS);
1633
1634 CODADEBUG(CODA_READDIR, myprintf(("coda_readdir(%p, %lu, %lld)\n", uiop->uio_iov->iov_base, (unsigned long) uiop->uio_resid, (long long) uiop->uio_offset)); )
1635
1636 /* Check for readdir of control object. */
1637 if (IS_CTL_VP(vp)) {
1638 MARK_INT_FAIL(CODA_READDIR_STATS);
1639 return(ENOENT);
1640 }
1641
1642 {
1643 /* Redirect the request to UFS. */
1644
1645 /* If directory is not already open do an "internal open" on it. */
1646 int opened_internally = 0;
1647 if (cp->c_ovp == NULL) {
1648 opened_internally = 1;
1649 MARK_INT_GEN(CODA_OPEN_STATS);
1650 error = VOP_OPEN(vp, FREAD, cred);
1651 #ifdef CODA_VERBOSE
1652 printf("coda_readdir: Internally Opening %p\n", vp);
1653 #endif
1654 if (error) return(error);
1655 } else
1656 vp = cp->c_ovp;
1657
1658 /* Have UFS handle the call. */
1659 CODADEBUG(CODA_READDIR, myprintf((
1660 "indirect readdir: fid = %s, refcnt = %d\n",
1661 coda_f2s(&cp->c_fid), vp->v_usecount)); )
1662 error = VOP_READDIR(vp, uiop, cred, eofflag, cookies, ncookies);
1663 if (error)
1664 MARK_INT_FAIL(CODA_READDIR_STATS);
1665 else
1666 MARK_INT_SAT(CODA_READDIR_STATS);
1667
1668 /* Do an "internal close" if necessary. */
1669 if (opened_internally) {
1670 MARK_INT_GEN(CODA_CLOSE_STATS);
1671 (void)VOP_CLOSE(vp, FREAD, cred);
1672 }
1673 }
1674
1675 return(error);
1676 }
1677
1678 /*
1679 * Convert from file system blocks to device blocks
1680 */
1681 int
1682 coda_bmap(void *v)
1683 {
1684 /* XXX on the global proc */
1685 /* true args */
1686 struct vop_bmap_args *ap = v;
1687 struct vnode *vp __unused = ap->a_vp; /* file's vnode */
1688 daddr_t bn __unused = ap->a_bn; /* fs block number */
1689 struct vnode **vpp = ap->a_vpp; /* RETURN vp of device */
1690 daddr_t *bnp __unused = ap->a_bnp; /* RETURN device block number */
1691 struct lwp *l __unused = curlwp;
1692 /* upcall decl */
1693 /* locals */
1694
1695 *vpp = (struct vnode *)0;
1696 myprintf(("coda_bmap called!\n"));
1697 return(EINVAL);
1698 }
1699
1700 /*
1701 * I don't think the following two things are used anywhere, so I've
1702 * commented them out
1703 *
1704 * struct buf *async_bufhead;
1705 * int async_daemon_count;
1706 */
1707 int
1708 coda_strategy(void *v)
1709 {
1710 /* true args */
1711 struct vop_strategy_args *ap = v;
1712 struct buf *bp __unused = ap->a_bp;
1713 struct lwp *l __unused = curlwp;
1714 /* upcall decl */
1715 /* locals */
1716
1717 myprintf(("coda_strategy called! "));
1718 return(EINVAL);
1719 }
1720
1721 int
1722 coda_reclaim(void *v)
1723 {
1724 /* true args */
1725 struct vop_reclaim_args *ap = v;
1726 struct vnode *vp = ap->a_vp;
1727 struct cnode *cp = VTOC(vp);
1728 /* upcall decl */
1729 /* locals */
1730
1731 /*
1732 * Forced unmount/flush will let vnodes with non zero use be destroyed!
1733 */
1734 ENTRY;
1735
1736 if (IS_UNMOUNTING(cp)) {
1737 #ifdef DEBUG
1738 if (VTOC(vp)->c_ovp) {
1739 if (IS_UNMOUNTING(cp))
1740 printf("coda_reclaim: c_ovp not void: vp %p, cp %p\n", vp, cp);
1741 }
1742 #endif
1743 } else {
1744 #ifdef OLD_DIAGNOSTIC
1745 if (vp->v_usecount != 0)
1746 print("coda_reclaim: pushing active %p\n", vp);
1747 if (VTOC(vp)->c_ovp) {
1748 panic("coda_reclaim: c_ovp not void");
1749 }
1750 #endif
1751 }
1752 cache_purge(vp);
1753 coda_free(VTOC(vp));
1754 SET_VTOC(vp) = NULL;
1755 return (0);
1756 }
1757
1758 int
1759 coda_lock(void *v)
1760 {
1761 /* true args */
1762 struct vop_lock_args *ap = v;
1763 struct vnode *vp = ap->a_vp;
1764 struct cnode *cp = VTOC(vp);
1765 /* upcall decl */
1766 /* locals */
1767
1768 ENTRY;
1769
1770 if (coda_lockdebug) {
1771 myprintf(("Attempting lock on %s\n",
1772 coda_f2s(&cp->c_fid)));
1773 }
1774
1775 return genfs_lock(v);
1776 }
1777
1778 int
1779 coda_unlock(void *v)
1780 {
1781 /* true args */
1782 struct vop_unlock_args *ap = v;
1783 struct vnode *vp = ap->a_vp;
1784 struct cnode *cp = VTOC(vp);
1785 /* upcall decl */
1786 /* locals */
1787
1788 ENTRY;
1789 if (coda_lockdebug) {
1790 myprintf(("Attempting unlock on %s\n",
1791 coda_f2s(&cp->c_fid)));
1792 }
1793
1794 return genfs_unlock(v);
1795 }
1796
1797 int
1798 coda_islocked(void *v)
1799 {
1800 /* true args */
1801 ENTRY;
1802
1803 return genfs_islocked(v);
1804 }
1805
1806 /*
1807 * Given a device and inode, obtain a locked vnode. One reference is
1808 * obtained and passed back to the caller.
1809 */
1810 int
1811 coda_grab_vnode(dev_t dev, ino_t ino, struct vnode **vpp)
1812 {
1813 int error;
1814 struct mount *mp;
1815
1816 /* Obtain mount point structure from device. */
1817 if (!(mp = devtomp(dev))) {
1818 myprintf(("coda_grab_vnode: devtomp(0x%llx) returns NULL\n",
1819 (unsigned long long)dev));
1820 return(ENXIO);
1821 }
1822
1823 /*
1824 * Obtain vnode from mount point and inode.
1825 * XXX VFS_VGET does not clearly define locked/referenced state of
1826 * returned vnode.
1827 */
1828 error = VFS_VGET(mp, ino, vpp);
1829 if (error) {
1830 myprintf(("coda_grab_vnode: iget/vget(0x%llx, %llu) returns %p, err %d\n",
1831 (unsigned long long)dev, (unsigned long long)ino, *vpp, error));
1832 return(ENOENT);
1833 }
1834 return(0);
1835 }
1836
1837 void
1838 print_vattr(struct vattr *attr)
1839 {
1840 const char *typestr;
1841
1842 switch (attr->va_type) {
1843 case VNON:
1844 typestr = "VNON";
1845 break;
1846 case VREG:
1847 typestr = "VREG";
1848 break;
1849 case VDIR:
1850 typestr = "VDIR";
1851 break;
1852 case VBLK:
1853 typestr = "VBLK";
1854 break;
1855 case VCHR:
1856 typestr = "VCHR";
1857 break;
1858 case VLNK:
1859 typestr = "VLNK";
1860 break;
1861 case VSOCK:
1862 typestr = "VSCK";
1863 break;
1864 case VFIFO:
1865 typestr = "VFFO";
1866 break;
1867 case VBAD:
1868 typestr = "VBAD";
1869 break;
1870 default:
1871 typestr = "????";
1872 break;
1873 }
1874
1875
1876 myprintf(("attr: type %s mode %d uid %d gid %d fsid %d rdev %d\n",
1877 typestr, (int)attr->va_mode, (int)attr->va_uid,
1878 (int)attr->va_gid, (int)attr->va_fsid, (int)attr->va_rdev));
1879
1880 myprintf((" fileid %d nlink %d size %d blocksize %d bytes %d\n",
1881 (int)attr->va_fileid, (int)attr->va_nlink,
1882 (int)attr->va_size,
1883 (int)attr->va_blocksize,(int)attr->va_bytes));
1884 myprintf((" gen %ld flags %ld vaflags %d\n",
1885 attr->va_gen, attr->va_flags, attr->va_vaflags));
1886 myprintf((" atime sec %d nsec %d\n",
1887 (int)attr->va_atime.tv_sec, (int)attr->va_atime.tv_nsec));
1888 myprintf((" mtime sec %d nsec %d\n",
1889 (int)attr->va_mtime.tv_sec, (int)attr->va_mtime.tv_nsec));
1890 myprintf((" ctime sec %d nsec %d\n",
1891 (int)attr->va_ctime.tv_sec, (int)attr->va_ctime.tv_nsec));
1892 }
1893
1894 /* How to print a ucred */
1895 void
1896 print_cred(kauth_cred_t cred)
1897 {
1898
1899 uint16_t ngroups;
1900 int i;
1901
1902 myprintf(("ref %d\tuid %d\n", kauth_cred_getrefcnt(cred),
1903 kauth_cred_geteuid(cred)));
1904
1905 ngroups = kauth_cred_ngroups(cred);
1906 for (i=0; i < ngroups; i++)
1907 myprintf(("\tgroup %d: (%d)\n", i, kauth_cred_group(cred, i)));
1908 myprintf(("\n"));
1909
1910 }
1911
1912 /*
1913 * Return a vnode for the given fid.
1914 * If no cnode exists for this fid create one and put it
1915 * in a table hashed by coda_f2i(). If the cnode for
1916 * this fid is already in the table return it (ref count is
1917 * incremented by coda_find. The cnode will be flushed from the
1918 * table when coda_inactive calls coda_unsave.
1919 */
1920 struct cnode *
1921 make_coda_node(CodaFid *fid, struct mount *vfsp, short type)
1922 {
1923 struct cnode *cp;
1924 int err;
1925
1926 if ((cp = coda_find(fid)) == NULL) {
1927 struct vnode *vp;
1928
1929 cp = coda_alloc();
1930 cp->c_fid = *fid;
1931
1932 err = getnewvnode(VT_CODA, vfsp, coda_vnodeop_p, &vp);
1933 if (err) {
1934 panic("coda: getnewvnode returned error %d", err);
1935 }
1936 vp->v_data = cp;
1937 vp->v_type = type;
1938 cp->c_vnode = vp;
1939 uvm_vnp_setsize(vp, 0);
1940 coda_save(cp);
1941
1942 } else {
1943 vref(CTOV(cp));
1944 }
1945
1946 return cp;
1947 }
1948
1949 /*
1950 * coda_getpages may be called on a vnode which has not been opened,
1951 * e.g. to fault in pages to execute a program. In that case, we must
1952 * open the file to get the container. The vnode may or may not be
1953 * locked, and we must leave it in the same state.
1954 * XXX The protocol requires v_uobj.vmobjlock to be
1955 * held by caller, but this isn't documented in vnodeops(9) or vnode_if.src.
1956 */
1957 int
1958 coda_getpages(void *v)
1959 {
1960 struct vop_getpages_args /* {
1961 struct vnode *a_vp;
1962 voff_t a_offset;
1963 struct vm_page **a_m;
1964 int *a_count;
1965 int a_centeridx;
1966 vm_prot_t a_access_type;
1967 int a_advice;
1968 int a_flags;
1969 } */ *ap = v;
1970 struct vnode *vp = ap->a_vp;
1971 struct cnode *cp = VTOC(vp);
1972 struct lwp *l = curlwp;
1973 kauth_cred_t cred = l->l_cred;
1974 int error, cerror;
1975 int waslocked; /* 1 if vnode lock was held on entry */
1976 int didopen = 0; /* 1 if we opened container file */
1977
1978 /*
1979 * Handle a case that uvm_fault doesn't quite use yet.
1980 * See layer_vnops.c. for inspiration.
1981 */
1982 if (ap->a_flags & PGO_LOCKED) {
1983 return EBUSY;
1984 }
1985
1986 /* Check for control object. */
1987 if (IS_CTL_VP(vp)) {
1988 printf("coda_getpages: control object %p\n", vp);
1989 mutex_exit(&vp->v_uobj.vmobjlock);
1990 return(EINVAL);
1991 }
1992
1993 /*
1994 * XXX It's really not ok to be releasing the lock we get,
1995 * because we could be overlapping with another call to
1996 * getpages and drop a lock they are relying on. We need to
1997 * figure out whether getpages ever is called holding the
1998 * lock, and if we should serialize getpages calls by some
1999 * mechanism.
2000 */
2001 /* XXX VOP_ISLOCKED() may not be used for lock decisions. */
2002 waslocked = VOP_ISLOCKED(vp);
2003
2004 /* Drop the vmobject lock. */
2005 mutex_exit(&vp->v_uobj.vmobjlock);
2006
2007 /* Get container file if not already present. */
2008 if (cp->c_ovp == NULL) {
2009 /*
2010 * VOP_OPEN requires a locked vnode. We must avoid
2011 * locking the vnode if it is already locked, and
2012 * leave it in the same state on exit.
2013 */
2014 if (waslocked == 0) {
2015 cerror = vn_lock(vp, LK_EXCLUSIVE);
2016 if (cerror) {
2017 printf("coda_getpages: can't lock vnode %p\n",
2018 vp);
2019 return cerror;
2020 }
2021 #if 0
2022 printf("coda_getpages: locked vnode %p\n", vp);
2023 #endif
2024 }
2025
2026 /*
2027 * Open file (causes upcall to venus).
2028 * XXX Perhaps we should not fully open the file, but
2029 * simply obtain a container file.
2030 */
2031 /* XXX Is it ok to do this while holding the simplelock? */
2032 cerror = VOP_OPEN(vp, FREAD, cred);
2033
2034 if (cerror) {
2035 printf("coda_getpages: cannot open vnode %p => %d\n",
2036 vp, cerror);
2037 if (waslocked == 0)
2038 VOP_UNLOCK(vp);
2039 return cerror;
2040 }
2041
2042 #if 0
2043 printf("coda_getpages: opened vnode %p\n", vp);
2044 #endif
2045 didopen = 1;
2046 }
2047 KASSERT(cp->c_ovp != NULL);
2048
2049 /* Munge the arg structure to refer to the container vnode. */
2050 ap->a_vp = cp->c_ovp;
2051
2052 /* Get the lock on the container vnode, and call getpages on it. */
2053 mutex_enter(&ap->a_vp->v_uobj.vmobjlock);
2054 error = VCALL(ap->a_vp, VOFFSET(vop_getpages), ap);
2055
2056 /* If we opened the vnode, we must close it. */
2057 if (didopen) {
2058 /*
2059 * VOP_CLOSE requires a locked vnode, but we are still
2060 * holding the lock (or riding a caller's lock).
2061 */
2062 cerror = VOP_CLOSE(vp, FREAD, cred);
2063 if (cerror != 0)
2064 /* XXX How should we handle this? */
2065 printf("coda_getpages: closed vnode %p -> %d\n",
2066 vp, cerror);
2067
2068 /* If we obtained a lock, drop it. */
2069 if (waslocked == 0)
2070 VOP_UNLOCK(vp);
2071 }
2072
2073 return error;
2074 }
2075
2076 /*
2077 * The protocol requires v_uobj.vmobjlock to be held by the caller, as
2078 * documented in vnodeops(9). XXX vnode_if.src doesn't say this.
2079 */
2080 int
2081 coda_putpages(void *v)
2082 {
2083 struct vop_putpages_args /* {
2084 struct vnode *a_vp;
2085 voff_t a_offlo;
2086 voff_t a_offhi;
2087 int a_flags;
2088 } */ *ap = v;
2089 struct vnode *vp = ap->a_vp;
2090 struct cnode *cp = VTOC(vp);
2091 int error;
2092
2093 /* Drop the vmobject lock. */
2094 mutex_exit(&vp->v_uobj.vmobjlock);
2095
2096 /* Check for control object. */
2097 if (IS_CTL_VP(vp)) {
2098 printf("coda_putpages: control object %p\n", vp);
2099 return(EINVAL);
2100 }
2101
2102 /*
2103 * If container object is not present, then there are no pages
2104 * to put; just return without error. This happens all the
2105 * time, apparently during discard of a closed vnode (which
2106 * trivially can't have dirty pages).
2107 */
2108 if (cp->c_ovp == NULL)
2109 return 0;
2110
2111 /* Munge the arg structure to refer to the container vnode. */
2112 ap->a_vp = cp->c_ovp;
2113
2114 /* Get the lock on the container vnode, and call putpages on it. */
2115 mutex_enter(&ap->a_vp->v_uobj.vmobjlock);
2116 error = VCALL(ap->a_vp, VOFFSET(vop_putpages), ap);
2117
2118 return error;
2119 }
2120