coda_vnops.c revision 1.114.4.1 1 /* $NetBSD: coda_vnops.c,v 1.114.4.1 2021/08/01 22:42:20 thorpej Exp $ */
2
3 /*
4 *
5 * Coda: an Experimental Distributed File System
6 * Release 3.1
7 *
8 * Copyright (c) 1987-1998 Carnegie Mellon University
9 * All Rights Reserved
10 *
11 * Permission to use, copy, modify and distribute this software and its
12 * documentation is hereby granted, provided that both the copyright
13 * notice and this permission notice appear in all copies of the
14 * software, derivative works or modified versions, and any portions
15 * thereof, and that both notices appear in supporting documentation, and
16 * that credit is given to Carnegie Mellon University in all documents
17 * and publicity pertaining to direct or indirect use of this code or its
18 * derivatives.
19 *
20 * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS,
21 * SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS
22 * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON
23 * DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
24 * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF
25 * ANY DERIVATIVE WORK.
26 *
27 * Carnegie Mellon encourages users of this software to return any
28 * improvements or extensions that they make, and to grant Carnegie
29 * Mellon the rights to redistribute these changes without encumbrance.
30 *
31 * @(#) coda/coda_vnops.c,v 1.1.1.1 1998/08/29 21:26:46 rvb Exp $
32 */
33
34 /*
35 * Mach Operating System
36 * Copyright (c) 1990 Carnegie-Mellon University
37 * Copyright (c) 1989 Carnegie-Mellon University
38 * All rights reserved. The CMU software License Agreement specifies
39 * the terms and conditions for use and redistribution.
40 */
41
42 /*
43 * This code was written for the Coda file system at Carnegie Mellon
44 * University. Contributers include David Steere, James Kistler, and
45 * M. Satyanarayanan.
46 */
47
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: coda_vnops.c,v 1.114.4.1 2021/08/01 22:42:20 thorpej Exp $");
50
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/malloc.h>
54 #include <sys/errno.h>
55 #include <sys/acct.h>
56 #include <sys/file.h>
57 #include <sys/uio.h>
58 #include <sys/namei.h>
59 #include <sys/ioctl.h>
60 #include <sys/mount.h>
61 #include <sys/proc.h>
62 #include <sys/select.h>
63 #include <sys/vnode.h>
64 #include <sys/kauth.h>
65 #include <sys/dirent.h>
66
67 #include <miscfs/genfs/genfs.h>
68 #include <miscfs/specfs/specdev.h>
69
70 #include <coda/coda.h>
71 #include <coda/cnode.h>
72 #include <coda/coda_vnops.h>
73 #include <coda/coda_venus.h>
74 #include <coda/coda_opstats.h>
75 #include <coda/coda_subr.h>
76 #include <coda/coda_namecache.h>
77 #include <coda/coda_pioctl.h>
78
79 /*
80 * These flags select various performance enhancements.
81 */
82 int coda_attr_cache = 1; /* Set to cache attributes in the kernel */
83 int coda_symlink_cache = 1; /* Set to cache symbolic link information */
84 int coda_access_cache = 1; /* Set to handle some access checks directly */
85
86 /* structure to keep track of vfs calls */
87
88 struct coda_op_stats coda_vnodeopstats[CODA_VNODEOPS_SIZE];
89
90 #define MARK_ENTRY(op) (coda_vnodeopstats[op].entries++)
91 #define MARK_INT_SAT(op) (coda_vnodeopstats[op].sat_intrn++)
92 #define MARK_INT_FAIL(op) (coda_vnodeopstats[op].unsat_intrn++)
93 #define MARK_INT_GEN(op) (coda_vnodeopstats[op].gen_intrn++)
94
95 /* What we are delaying for in printf */
96 static int coda_lockdebug = 0;
97
98 #define ENTRY if(coda_vnop_print_entry) myprintf(("Entered %s\n",__func__))
99
100 /* Definition of the vnode operation vector */
101
102 const struct vnodeopv_entry_desc coda_vnodeop_entries[] = {
103 { &vop_default_desc, coda_vop_error },
104 { &vop_parsepath_desc, genfs_parsepath }, /* parsepath */
105 { &vop_lookup_desc, coda_lookup }, /* lookup */
106 { &vop_create_desc, coda_create }, /* create */
107 { &vop_mknod_desc, coda_vop_error }, /* mknod */
108 { &vop_open_desc, coda_open }, /* open */
109 { &vop_close_desc, coda_close }, /* close */
110 { &vop_access_desc, coda_access }, /* access */
111 { &vop_accessx_desc, genfs_accessx }, /* access */
112 { &vop_getattr_desc, coda_getattr }, /* getattr */
113 { &vop_setattr_desc, coda_setattr }, /* setattr */
114 { &vop_read_desc, coda_read }, /* read */
115 { &vop_write_desc, coda_write }, /* write */
116 { &vop_fallocate_desc, genfs_eopnotsupp }, /* fallocate */
117 { &vop_fdiscard_desc, genfs_eopnotsupp }, /* fdiscard */
118 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */
119 { &vop_ioctl_desc, coda_ioctl }, /* ioctl */
120 { &vop_mmap_desc, genfs_mmap }, /* mmap */
121 { &vop_fsync_desc, coda_fsync }, /* fsync */
122 { &vop_remove_desc, coda_remove }, /* remove */
123 { &vop_link_desc, coda_link }, /* link */
124 { &vop_rename_desc, coda_rename }, /* rename */
125 { &vop_mkdir_desc, coda_mkdir }, /* mkdir */
126 { &vop_rmdir_desc, coda_rmdir }, /* rmdir */
127 { &vop_symlink_desc, coda_symlink }, /* symlink */
128 { &vop_readdir_desc, coda_readdir }, /* readdir */
129 { &vop_readlink_desc, coda_readlink }, /* readlink */
130 { &vop_abortop_desc, coda_abortop }, /* abortop */
131 { &vop_inactive_desc, coda_inactive }, /* inactive */
132 { &vop_reclaim_desc, coda_reclaim }, /* reclaim */
133 { &vop_lock_desc, coda_lock }, /* lock */
134 { &vop_unlock_desc, coda_unlock }, /* unlock */
135 { &vop_bmap_desc, coda_bmap }, /* bmap */
136 { &vop_strategy_desc, coda_strategy }, /* strategy */
137 { &vop_print_desc, coda_vop_error }, /* print */
138 { &vop_islocked_desc, coda_islocked }, /* islocked */
139 { &vop_pathconf_desc, coda_pathconf }, /* pathconf */
140 { &vop_advlock_desc, coda_vop_nop }, /* advlock */
141 { &vop_bwrite_desc, coda_vop_error }, /* bwrite */
142 { &vop_seek_desc, genfs_seek }, /* seek */
143 { &vop_poll_desc, genfs_poll }, /* poll */
144 { &vop_getpages_desc, coda_getpages }, /* getpages */
145 { &vop_putpages_desc, coda_putpages }, /* putpages */
146 { NULL, NULL }
147 };
148
149 static void coda_print_vattr(struct vattr *);
150
151 int (**coda_vnodeop_p)(void *);
152 const struct vnodeopv_desc coda_vnodeop_opv_desc =
153 { &coda_vnodeop_p, coda_vnodeop_entries };
154
155 /* Definitions of NetBSD vnodeop interfaces */
156
157 /*
158 * A generic error routine. Return EIO without looking at arguments.
159 */
160 int
161 coda_vop_error(void *anon) {
162 struct vnodeop_desc **desc = (struct vnodeop_desc **)anon;
163
164 if (codadebug) {
165 myprintf(("%s: Vnode operation %s called (error).\n",
166 __func__, (*desc)->vdesc_name));
167 }
168
169 return EIO;
170 }
171
172 /* A generic do-nothing. */
173 int
174 coda_vop_nop(void *anon) {
175 struct vnodeop_desc **desc = (struct vnodeop_desc **)anon;
176
177 if (codadebug) {
178 myprintf(("Vnode operation %s called, but unsupported\n",
179 (*desc)->vdesc_name));
180 }
181 return (0);
182 }
183
184 int
185 coda_vnodeopstats_init(void)
186 {
187 int i;
188
189 for(i=0;i<CODA_VNODEOPS_SIZE;i++) {
190 coda_vnodeopstats[i].opcode = i;
191 coda_vnodeopstats[i].entries = 0;
192 coda_vnodeopstats[i].sat_intrn = 0;
193 coda_vnodeopstats[i].unsat_intrn = 0;
194 coda_vnodeopstats[i].gen_intrn = 0;
195 }
196
197 return 0;
198 }
199
200 /*
201 * XXX The entire relationship between VOP_OPEN and having a container
202 * file (via venus_open) needs to be reexamined. In particular, it's
203 * valid to open/mmap/close and then reference. Instead of doing
204 * VOP_OPEN when getpages needs a container, we should do the
205 * venus_open part, and record that the vnode has opened the container
206 * for getpages, and do the matching logical close on coda_inactive.
207 * Further, coda_rdwr needs a container file, and sometimes needs to
208 * do the equivalent of open (core dumps).
209 */
210 /*
211 * coda_open calls Venus to return the device and inode of the
212 * container file, and then obtains a vnode for that file. The
213 * container vnode is stored in the coda vnode, and a reference is
214 * added for each open file.
215 */
216 int
217 coda_open(void *v)
218 {
219 /*
220 * NetBSD can pass the O_EXCL flag in mode, even though the check
221 * has already happened. Venus defensively assumes that if open
222 * is passed the EXCL, it must be a bug. We strip the flag here.
223 */
224 /* true args */
225 struct vop_open_args *ap = v;
226 vnode_t *vp = ap->a_vp;
227 struct cnode *cp = VTOC(vp);
228 int flag = ap->a_mode & (~O_EXCL);
229 kauth_cred_t cred = ap->a_cred;
230 /* locals */
231 int error;
232 dev_t dev; /* container file device, inode, vnode */
233 ino_t inode;
234 vnode_t *container_vp;
235
236 MARK_ENTRY(CODA_OPEN_STATS);
237
238 KASSERT(VOP_ISLOCKED(vp));
239 /* Check for open of control file. */
240 if (IS_CTL_VP(vp)) {
241 /* if (WRITABLE(flag)) */
242 if (flag & (FWRITE | O_TRUNC | O_CREAT | O_EXCL)) {
243 MARK_INT_FAIL(CODA_OPEN_STATS);
244 return(EACCES);
245 }
246 MARK_INT_SAT(CODA_OPEN_STATS);
247 return(0);
248 }
249
250 error = venus_open(vtomi(vp), &cp->c_fid, flag, cred, curlwp, &dev, &inode);
251 if (error)
252 return (error);
253 if (!error) {
254 CODADEBUG(CODA_OPEN, myprintf((
255 "%s: dev 0x%llx inode %llu result %d\n", __func__,
256 (unsigned long long)dev, (unsigned long long)inode, error));)
257 }
258
259 /*
260 * Obtain locked and referenced container vnode from container
261 * device/inode.
262 */
263 error = coda_grab_vnode(vp, dev, inode, &container_vp);
264 if (error)
265 return (error);
266
267 /* Save the vnode pointer for the container file. */
268 if (cp->c_ovp == NULL) {
269 cp->c_ovp = container_vp;
270 } else {
271 if (cp->c_ovp != container_vp)
272 /*
273 * Perhaps venus returned a different container, or
274 * something else went wrong.
275 */
276 panic("%s: cp->c_ovp != container_vp", __func__);
277 }
278 cp->c_ocount++;
279
280 /* Flush the attribute cache if writing the file. */
281 if (flag & FWRITE) {
282 cp->c_owrite++;
283 cp->c_flags &= ~C_VATTR;
284 }
285
286 /*
287 * Save the <device, inode> pair for the container file to speed
288 * up subsequent reads while closed (mmap, program execution).
289 * This is perhaps safe because venus will invalidate the node
290 * before changing the container file mapping.
291 */
292 cp->c_device = dev;
293 cp->c_inode = inode;
294
295 /* Open the container file. */
296 error = VOP_OPEN(container_vp, flag, cred);
297 /*
298 * Drop the lock on the container, after we have done VOP_OPEN
299 * (which requires a locked vnode).
300 */
301 VOP_UNLOCK(container_vp);
302 return(error);
303 }
304
305 /*
306 * Close the cache file used for I/O and notify Venus.
307 */
308 int
309 coda_close(void *v)
310 {
311 /* true args */
312 struct vop_close_args *ap = v;
313 vnode_t *vp = ap->a_vp;
314 struct cnode *cp = VTOC(vp);
315 int flag = ap->a_fflag;
316 kauth_cred_t cred = ap->a_cred;
317 /* locals */
318 int error;
319
320 MARK_ENTRY(CODA_CLOSE_STATS);
321
322 /* Check for close of control file. */
323 if (IS_CTL_VP(vp)) {
324 MARK_INT_SAT(CODA_CLOSE_STATS);
325 return(0);
326 }
327
328 /*
329 * XXX The IS_UNMOUNTING part of this is very suspect.
330 */
331 if (IS_UNMOUNTING(cp)) {
332 if (cp->c_ovp) {
333 #ifdef CODA_VERBOSE
334 printf("%s: destroying container %d, ufs vp %p of vp %p/cp %p\n",
335 __func__, vrefcnt(vp), cp->c_ovp, vp, cp);
336 #endif
337 #ifdef hmm
338 vgone(cp->c_ovp);
339 #else
340 vn_lock(cp->c_ovp, LK_EXCLUSIVE | LK_RETRY);
341 VOP_CLOSE(cp->c_ovp, flag, cred); /* Do errors matter here? */
342 vput(cp->c_ovp);
343 #endif
344 } else {
345 #ifdef CODA_VERBOSE
346 printf("%s: NO container vp %p/cp %p\n", __func__, vp, cp);
347 #endif
348 }
349 return ENODEV;
350 }
351
352 /* Lock the container node, and VOP_CLOSE it. */
353 vn_lock(cp->c_ovp, LK_EXCLUSIVE | LK_RETRY);
354 VOP_CLOSE(cp->c_ovp, flag, cred); /* Do errors matter here? */
355 /*
356 * Drop the lock we just obtained, and vrele the container vnode.
357 * Decrement reference counts, and clear container vnode pointer on
358 * last close.
359 */
360 vput(cp->c_ovp);
361 if (flag & FWRITE)
362 --cp->c_owrite;
363 if (--cp->c_ocount == 0)
364 cp->c_ovp = NULL;
365
366 error = venus_close(vtomi(vp), &cp->c_fid, flag, cred, curlwp);
367
368 CODADEBUG(CODA_CLOSE, myprintf(("%s: result %d\n", __func__, error)); )
369 return(error);
370 }
371
372 int
373 coda_read(void *v)
374 {
375 struct vop_read_args *ap = v;
376
377 ENTRY;
378 return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_READ,
379 ap->a_ioflag, ap->a_cred, curlwp));
380 }
381
382 int
383 coda_write(void *v)
384 {
385 struct vop_write_args *ap = v;
386
387 ENTRY;
388 return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_WRITE,
389 ap->a_ioflag, ap->a_cred, curlwp));
390 }
391
392 int
393 coda_rdwr(vnode_t *vp, struct uio *uiop, enum uio_rw rw, int ioflag,
394 kauth_cred_t cred, struct lwp *l)
395 {
396 /* upcall decl */
397 /* NOTE: container file operation!!! */
398 /* locals */
399 struct cnode *cp = VTOC(vp);
400 vnode_t *cfvp = cp->c_ovp;
401 struct proc *p = l->l_proc;
402 int opened_internally = 0;
403 int error = 0;
404
405 MARK_ENTRY(CODA_RDWR_STATS);
406
407 CODADEBUG(CODA_RDWR, myprintf(("coda_rdwr(%d, %p, %lu, %lld)\n", rw,
408 uiop->uio_iov->iov_base, (unsigned long) uiop->uio_resid,
409 (long long) uiop->uio_offset)); )
410
411 /* Check for rdwr of control object. */
412 if (IS_CTL_VP(vp)) {
413 MARK_INT_FAIL(CODA_RDWR_STATS);
414 return(EINVAL);
415 }
416
417 /* Redirect the request to UFS. */
418
419 /*
420 * If file is not already open this must be a page
421 * {read,write} request. Iget the cache file's inode
422 * pointer if we still have its <device, inode> pair.
423 * Otherwise, we must do an internal open to derive the
424 * pair.
425 * XXX Integrate this into a coherent strategy for container
426 * file acquisition.
427 */
428 if (cfvp == NULL) {
429 /*
430 * If we're dumping core, do the internal open. Otherwise
431 * venus won't have the correct size of the core when
432 * it's completely written.
433 */
434 if (cp->c_inode != 0 && !(p && (p->p_acflag & ACORE))) {
435 #ifdef CODA_VERBOSE
436 printf("%s: grabbing container vnode, losing reference\n",
437 __func__);
438 #endif
439 /* Get locked and refed vnode. */
440 error = coda_grab_vnode(vp, cp->c_device, cp->c_inode, &cfvp);
441 if (error) {
442 MARK_INT_FAIL(CODA_RDWR_STATS);
443 return(error);
444 }
445 /*
446 * Drop lock.
447 * XXX Where is reference released.
448 */
449 VOP_UNLOCK(cfvp);
450 }
451 else {
452 #ifdef CODA_VERBOSE
453 printf("%s: internal VOP_OPEN\n", __func__);
454 #endif
455 opened_internally = 1;
456 MARK_INT_GEN(CODA_OPEN_STATS);
457 error = VOP_OPEN(vp, (rw == UIO_READ ? FREAD : FWRITE), cred);
458 #ifdef CODA_VERBOSE
459 printf("%s: Internally Opening %p\n", __func__, vp);
460 #endif
461 if (error) {
462 MARK_INT_FAIL(CODA_RDWR_STATS);
463 return(error);
464 }
465 cfvp = cp->c_ovp;
466 }
467 }
468
469 /* Have UFS handle the call. */
470 CODADEBUG(CODA_RDWR, myprintf(("%s: fid = %s, refcnt = %d\n", __func__,
471 coda_f2s(&cp->c_fid), vrefcnt(CTOV(cp)))); )
472
473 if (rw == UIO_READ) {
474 error = VOP_READ(cfvp, uiop, ioflag, cred);
475 } else {
476 error = VOP_WRITE(cfvp, uiop, ioflag, cred);
477 }
478
479 if (error)
480 MARK_INT_FAIL(CODA_RDWR_STATS);
481 else
482 MARK_INT_SAT(CODA_RDWR_STATS);
483
484 /* Do an internal close if necessary. */
485 if (opened_internally) {
486 MARK_INT_GEN(CODA_CLOSE_STATS);
487 (void)VOP_CLOSE(vp, (rw == UIO_READ ? FREAD : FWRITE), cred);
488 }
489
490 /* Invalidate cached attributes if writing. */
491 if (rw == UIO_WRITE)
492 cp->c_flags &= ~C_VATTR;
493 return(error);
494 }
495
496 int
497 coda_ioctl(void *v)
498 {
499 /* true args */
500 struct vop_ioctl_args *ap = v;
501 vnode_t *vp = ap->a_vp;
502 int com = ap->a_command;
503 void *data = ap->a_data;
504 int flag = ap->a_fflag;
505 kauth_cred_t cred = ap->a_cred;
506 /* locals */
507 int error;
508 vnode_t *tvp;
509 struct PioctlData *iap = (struct PioctlData *)data;
510 namei_simple_flags_t sflags;
511
512 MARK_ENTRY(CODA_IOCTL_STATS);
513
514 CODADEBUG(CODA_IOCTL, myprintf(("in coda_ioctl on %s\n", iap->path));)
515
516 /* Don't check for operation on a dying object, for ctlvp it
517 shouldn't matter */
518
519 /* Must be control object to succeed. */
520 if (!IS_CTL_VP(vp)) {
521 MARK_INT_FAIL(CODA_IOCTL_STATS);
522 CODADEBUG(CODA_IOCTL, myprintf(("%s error: vp != ctlvp", __func__));)
523 return (EOPNOTSUPP);
524 }
525 /* Look up the pathname. */
526
527 /* Should we use the name cache here? It would get it from
528 lookupname sooner or later anyway, right? */
529
530 sflags = iap->follow ? NSM_FOLLOW_NOEMULROOT : NSM_NOFOLLOW_NOEMULROOT;
531 error = namei_simple_user(iap->path, sflags, &tvp);
532
533 if (error) {
534 MARK_INT_FAIL(CODA_IOCTL_STATS);
535 CODADEBUG(CODA_IOCTL, myprintf(("%s error: lookup returns %d\n",
536 __func__, error));)
537 return(error);
538 }
539
540 /*
541 * Make sure this is a coda style cnode, but it may be a
542 * different vfsp
543 */
544 /* XXX: this totally violates the comment about vtagtype in vnode.h */
545 if (tvp->v_tag != VT_CODA) {
546 vrele(tvp);
547 MARK_INT_FAIL(CODA_IOCTL_STATS);
548 CODADEBUG(CODA_IOCTL, myprintf(("%s error: %s not a coda object\n",
549 __func__, iap->path));)
550 return(EINVAL);
551 }
552
553 if (iap->vi.in_size > VC_MAXDATASIZE || iap->vi.out_size > VC_MAXDATASIZE) {
554 vrele(tvp);
555 return(EINVAL);
556 }
557 error = venus_ioctl(vtomi(tvp), &((VTOC(tvp))->c_fid), com, flag, data,
558 cred, curlwp);
559
560 if (error)
561 MARK_INT_FAIL(CODA_IOCTL_STATS);
562 else
563 CODADEBUG(CODA_IOCTL, myprintf(("Ioctl returns %d \n", error)); )
564
565 vrele(tvp);
566 return(error);
567 }
568
569 /*
570 * To reduce the cost of a user-level venus;we cache attributes in
571 * the kernel. Each cnode has storage allocated for an attribute. If
572 * c_vattr is valid, return a reference to it. Otherwise, get the
573 * attributes from venus and store them in the cnode. There is some
574 * question if this method is a security leak. But I think that in
575 * order to make this call, the user must have done a lookup and
576 * opened the file, and therefore should already have access.
577 */
578 int
579 coda_getattr(void *v)
580 {
581 /* true args */
582 struct vop_getattr_args *ap = v;
583 vnode_t *vp = ap->a_vp;
584 struct cnode *cp = VTOC(vp);
585 struct vattr *vap = ap->a_vap;
586 kauth_cred_t cred = ap->a_cred;
587 /* locals */
588 int error;
589
590 MARK_ENTRY(CODA_GETATTR_STATS);
591
592 /* Check for getattr of control object. */
593 if (IS_CTL_VP(vp)) {
594 MARK_INT_FAIL(CODA_GETATTR_STATS);
595 return(ENOENT);
596 }
597
598 /* Check to see if the attributes have already been cached */
599 if (VALID_VATTR(cp)) {
600 CODADEBUG(CODA_GETATTR, { myprintf(("%s: attr cache hit: %s\n",
601 __func__, coda_f2s(&cp->c_fid)));})
602 CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR))
603 coda_print_vattr(&cp->c_vattr); )
604
605 *vap = cp->c_vattr;
606 MARK_INT_SAT(CODA_GETATTR_STATS);
607 return(0);
608 }
609
610 error = venus_getattr(vtomi(vp), &cp->c_fid, cred, curlwp, vap);
611
612 if (!error) {
613 CODADEBUG(CODA_GETATTR, myprintf(("%s miss %s: result %d\n",
614 __func__, coda_f2s(&cp->c_fid), error)); )
615
616 CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR))
617 coda_print_vattr(vap); )
618
619 /* If not open for write, store attributes in cnode */
620 if ((cp->c_owrite == 0) && (coda_attr_cache)) {
621 cp->c_vattr = *vap;
622 cp->c_flags |= C_VATTR;
623 }
624
625 }
626 return(error);
627 }
628
629 int
630 coda_setattr(void *v)
631 {
632 /* true args */
633 struct vop_setattr_args *ap = v;
634 vnode_t *vp = ap->a_vp;
635 struct cnode *cp = VTOC(vp);
636 struct vattr *vap = ap->a_vap;
637 kauth_cred_t cred = ap->a_cred;
638 /* locals */
639 int error;
640
641 MARK_ENTRY(CODA_SETATTR_STATS);
642
643 /* Check for setattr of control object. */
644 if (IS_CTL_VP(vp)) {
645 MARK_INT_FAIL(CODA_SETATTR_STATS);
646 return(ENOENT);
647 }
648
649 if (codadebug & CODADBGMSK(CODA_SETATTR)) {
650 coda_print_vattr(vap);
651 }
652 error = venus_setattr(vtomi(vp), &cp->c_fid, vap, cred, curlwp);
653
654 if (!error)
655 cp->c_flags &= ~C_VATTR;
656
657 CODADEBUG(CODA_SETATTR, myprintf(("setattr %d\n", error)); )
658 return(error);
659 }
660
661 int
662 coda_access(void *v)
663 {
664 /* true args */
665 struct vop_access_args *ap = v;
666 vnode_t *vp = ap->a_vp;
667 struct cnode *cp = VTOC(vp);
668 accmode_t accmode = ap->a_accmode;
669 kauth_cred_t cred = ap->a_cred;
670 /* locals */
671 int error;
672
673 MARK_ENTRY(CODA_ACCESS_STATS);
674
675 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0);
676 /* Check for access of control object. Only read access is
677 allowed on it. */
678 if (IS_CTL_VP(vp)) {
679 /* bogus hack - all will be marked as successes */
680 MARK_INT_SAT(CODA_ACCESS_STATS);
681 return(((accmode & VREAD) && !(accmode & (VWRITE | VEXEC)))
682 ? 0 : EACCES);
683 }
684
685 /*
686 * if the file is a directory, and we are checking exec (eg lookup)
687 * access, and the file is in the namecache, then the user must have
688 * lookup access to it.
689 */
690 if (coda_access_cache) {
691 if ((vp->v_type == VDIR) && (accmode & VEXEC)) {
692 if (coda_nc_lookup(cp, ".", 1, cred)) {
693 MARK_INT_SAT(CODA_ACCESS_STATS);
694 return(0); /* it was in the cache */
695 }
696 }
697 }
698
699 error = venus_access(vtomi(vp), &cp->c_fid, accmode, cred, curlwp);
700
701 return(error);
702 }
703
704 /*
705 * CODA abort op, called after namei() when a CREATE/DELETE isn't actually
706 * done. If a buffer has been saved in anticipation of a coda_create or
707 * a coda_remove, delete it.
708 */
709 /* ARGSUSED */
710 int
711 coda_abortop(void *v)
712 {
713 /* true args */
714 struct vop_abortop_args /* {
715 vnode_t *a_dvp;
716 struct componentname *a_cnp;
717 } */ *ap = v;
718
719 (void)ap;
720 /* upcall decl */
721 /* locals */
722
723 return (0);
724 }
725
726 int
727 coda_readlink(void *v)
728 {
729 /* true args */
730 struct vop_readlink_args *ap = v;
731 vnode_t *vp = ap->a_vp;
732 struct cnode *cp = VTOC(vp);
733 struct uio *uiop = ap->a_uio;
734 kauth_cred_t cred = ap->a_cred;
735 /* locals */
736 struct lwp *l = curlwp;
737 int error;
738 char *str;
739 int len;
740
741 MARK_ENTRY(CODA_READLINK_STATS);
742
743 /* Check for readlink of control object. */
744 if (IS_CTL_VP(vp)) {
745 MARK_INT_FAIL(CODA_READLINK_STATS);
746 return(ENOENT);
747 }
748
749 if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) { /* symlink was cached */
750 uiop->uio_rw = UIO_READ;
751 error = uiomove(cp->c_symlink, (int)cp->c_symlen, uiop);
752 if (error)
753 MARK_INT_FAIL(CODA_READLINK_STATS);
754 else
755 MARK_INT_SAT(CODA_READLINK_STATS);
756 return(error);
757 }
758
759 error = venus_readlink(vtomi(vp), &cp->c_fid, cred, l, &str, &len);
760
761 if (!error) {
762 uiop->uio_rw = UIO_READ;
763 error = uiomove(str, len, uiop);
764
765 if (coda_symlink_cache) {
766 cp->c_symlink = str;
767 cp->c_symlen = len;
768 cp->c_flags |= C_SYMLINK;
769 } else
770 CODA_FREE(str, len);
771 }
772
773 CODADEBUG(CODA_READLINK, myprintf(("in readlink result %d\n",error));)
774 return(error);
775 }
776
777 int
778 coda_fsync(void *v)
779 {
780 /* true args */
781 struct vop_fsync_args *ap = v;
782 vnode_t *vp = ap->a_vp;
783 struct cnode *cp = VTOC(vp);
784 kauth_cred_t cred = ap->a_cred;
785 /* locals */
786 vnode_t *convp = cp->c_ovp;
787 int error;
788
789 MARK_ENTRY(CODA_FSYNC_STATS);
790
791 /* Check for fsync on an unmounting object */
792 /* The NetBSD kernel, in its infinite wisdom, can try to fsync
793 * after an unmount has been initiated. This is a Bad Thing,
794 * which we have to avoid. Not a legitimate failure for stats.
795 */
796 if (IS_UNMOUNTING(cp)) {
797 return(ENODEV);
798 }
799
800 /* Check for fsync of control object or unitialized cnode. */
801 if (IS_CTL_VP(vp) || vp->v_type == VNON) {
802 MARK_INT_SAT(CODA_FSYNC_STATS);
803 return(0);
804 }
805
806 if (convp)
807 VOP_FSYNC(convp, cred, MNT_WAIT, 0, 0);
808
809 /*
810 * We can expect fsync on any vnode at all if venus is pruging it.
811 * Venus can't very well answer the fsync request, now can it?
812 * Hopefully, it won't have to, because hopefully, venus preserves
813 * the (possibly untrue) invariant that it never purges an open
814 * vnode. Hopefully.
815 */
816 if (cp->c_flags & C_PURGING) {
817 return(0);
818 }
819
820 error = venus_fsync(vtomi(vp), &cp->c_fid, cred, curlwp);
821
822 CODADEBUG(CODA_FSYNC, myprintf(("in fsync result %d\n",error)); )
823 return(error);
824 }
825
826 /*
827 * vp is locked on entry, and we must unlock it.
828 * XXX This routine is suspect and probably needs rewriting.
829 */
830 int
831 coda_inactive(void *v)
832 {
833 /* true args */
834 struct vop_inactive_v2_args *ap = v;
835 vnode_t *vp = ap->a_vp;
836 struct cnode *cp = VTOC(vp);
837 kauth_cred_t cred __unused = NULL;
838
839 /* We don't need to send inactive to venus - DCS */
840 MARK_ENTRY(CODA_INACTIVE_STATS);
841
842 if (IS_CTL_VP(vp)) {
843 MARK_INT_SAT(CODA_INACTIVE_STATS);
844 return 0;
845 }
846
847 CODADEBUG(CODA_INACTIVE, myprintf(("in inactive, %s, vfsp %p\n",
848 coda_f2s(&cp->c_fid), vp->v_mount));)
849
850 if (vp->v_mount->mnt_data == NULL) {
851 myprintf(("Help! vfsp->vfs_data was NULL, but vnode %p wasn't dying\n", vp));
852 panic("badness in coda_inactive");
853 }
854
855 #ifdef CODA_VERBOSE
856 /* Sanity checks that perhaps should be panic. */
857 if (vrefcnt(vp) > 1)
858 printf("%s: %p usecount %d\n", __func__, vp, vrefcnt(vp));
859 if (cp->c_ovp != NULL)
860 printf("%s: %p ovp != NULL\n", __func__, vp);
861 #endif
862 /* XXX Do we need to VOP_CLOSE container vnodes? */
863 if (!IS_UNMOUNTING(cp))
864 *ap->a_recycle = true;
865
866 MARK_INT_SAT(CODA_INACTIVE_STATS);
867 return(0);
868 }
869
870 /*
871 * Coda does not use the normal namecache, but a private version.
872 * Consider how to use the standard facility instead.
873 */
874 int
875 coda_lookup(void *v)
876 {
877 /* true args */
878 struct vop_lookup_v2_args *ap = v;
879 /* (locked) vnode of dir in which to do lookup */
880 vnode_t *dvp = ap->a_dvp;
881 struct cnode *dcp = VTOC(dvp);
882 /* output variable for result */
883 vnode_t **vpp = ap->a_vpp;
884 /* name to lookup */
885 struct componentname *cnp = ap->a_cnp;
886 kauth_cred_t cred = cnp->cn_cred;
887 struct lwp *l = curlwp;
888 /* locals */
889 struct cnode *cp;
890 const char *nm = cnp->cn_nameptr;
891 int len = cnp->cn_namelen;
892 CodaFid VFid;
893 int vtype;
894 int error = 0;
895
896 MARK_ENTRY(CODA_LOOKUP_STATS);
897
898 CODADEBUG(CODA_LOOKUP, myprintf(("%s: %s in %s\n", __func__,
899 nm, coda_f2s(&dcp->c_fid)));)
900
901 /*
902 * XXX componentname flags in MODMASK are not handled at all
903 */
904
905 /*
906 * The overall strategy is to switch on the lookup type and get a
907 * result vnode that is vref'd but not locked.
908 */
909
910 /* Check for lookup of control object. */
911 if (IS_CTL_NAME(dvp, nm, len)) {
912 *vpp = coda_ctlvp;
913 vref(*vpp);
914 MARK_INT_SAT(CODA_LOOKUP_STATS);
915 goto exit;
916 }
917
918 /* Avoid trying to hand venus an unreasonably long name. */
919 if (len+1 > CODA_MAXNAMLEN) {
920 MARK_INT_FAIL(CODA_LOOKUP_STATS);
921 CODADEBUG(CODA_LOOKUP, myprintf(("%s: name too long:, %s (%s)\n",
922 __func__, coda_f2s(&dcp->c_fid), nm));)
923 *vpp = (vnode_t *)0;
924 error = EINVAL;
925 goto exit;
926 }
927
928 /*
929 * Try to resolve the lookup in the minicache. If that fails, ask
930 * venus to do the lookup. XXX The interaction between vnode
931 * locking and any locking that coda does is not clear.
932 */
933 cp = coda_nc_lookup(dcp, nm, len, cred);
934 if (cp) {
935 *vpp = CTOV(cp);
936 vref(*vpp);
937 CODADEBUG(CODA_LOOKUP,
938 myprintf(("lookup result %d vpp %p\n",error,*vpp));)
939 } else {
940 /* The name wasn't cached, so ask Venus. */
941 error = venus_lookup(vtomi(dvp), &dcp->c_fid, nm, len, cred, l, &VFid,
942 &vtype);
943
944 if (error) {
945 MARK_INT_FAIL(CODA_LOOKUP_STATS);
946 CODADEBUG(CODA_LOOKUP, myprintf(("%s: lookup error on %s (%s)%d\n",
947 __func__, coda_f2s(&dcp->c_fid), nm, error));)
948 *vpp = (vnode_t *)0;
949 } else {
950 MARK_INT_SAT(CODA_LOOKUP_STATS);
951 CODADEBUG(CODA_LOOKUP, myprintf(("%s: %s type %o result %d\n",
952 __func__, coda_f2s(&VFid), vtype, error)); )
953
954 cp = make_coda_node(&VFid, dvp->v_mount, vtype);
955 *vpp = CTOV(cp);
956 /* vpp is now vrefed. */
957
958 /*
959 * Unless this vnode is marked CODA_NOCACHE, enter it into
960 * the coda name cache to avoid a future venus round-trip.
961 * XXX Interaction with componentname NOCACHE is unclear.
962 */
963 if (!(vtype & CODA_NOCACHE))
964 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
965 }
966 }
967
968 exit:
969 /*
970 * If we are creating, and this was the last name to be looked up,
971 * and the error was ENOENT, then make the leaf NULL and return
972 * success.
973 * XXX Check against new lookup rules.
974 */
975 if (((cnp->cn_nameiop == CREATE) || (cnp->cn_nameiop == RENAME))
976 && (cnp->cn_flags & ISLASTCN)
977 && (error == ENOENT))
978 {
979 error = EJUSTRETURN;
980 *ap->a_vpp = NULL;
981 }
982
983 return(error);
984 }
985
986 /*ARGSUSED*/
987 int
988 coda_create(void *v)
989 {
990 /* true args */
991 struct vop_create_v3_args *ap = v;
992 vnode_t *dvp = ap->a_dvp;
993 struct cnode *dcp = VTOC(dvp);
994 struct vattr *va = ap->a_vap;
995 int exclusive = 1;
996 int mode = ap->a_vap->va_mode;
997 vnode_t **vpp = ap->a_vpp;
998 struct componentname *cnp = ap->a_cnp;
999 kauth_cred_t cred = cnp->cn_cred;
1000 struct lwp *l = curlwp;
1001 /* locals */
1002 int error;
1003 struct cnode *cp;
1004 const char *nm = cnp->cn_nameptr;
1005 int len = cnp->cn_namelen;
1006 CodaFid VFid;
1007 struct vattr attr;
1008
1009 MARK_ENTRY(CODA_CREATE_STATS);
1010
1011 /* All creates are exclusive XXX */
1012 /* I'm assuming the 'mode' argument is the file mode bits XXX */
1013
1014 /* Check for create of control object. */
1015 if (IS_CTL_NAME(dvp, nm, len)) {
1016 *vpp = (vnode_t *)0;
1017 MARK_INT_FAIL(CODA_CREATE_STATS);
1018 return(EACCES);
1019 }
1020
1021 error = venus_create(vtomi(dvp), &dcp->c_fid, nm, len, exclusive, mode, va, cred, l, &VFid, &attr);
1022
1023 if (!error) {
1024
1025 /*
1026 * XXX Violation of venus/kernel invariants is a difficult case,
1027 * but venus should not be able to cause a panic.
1028 */
1029 /* If this is an exclusive create, panic if the file already exists. */
1030 /* Venus should have detected the file and reported EEXIST. */
1031
1032 if ((exclusive == 1) &&
1033 (coda_find(&VFid) != NULL))
1034 panic("cnode existed for newly created file!");
1035
1036 cp = make_coda_node(&VFid, dvp->v_mount, attr.va_type);
1037 *vpp = CTOV(cp);
1038
1039 /* XXX vnodeops doesn't say this argument can be changed. */
1040 /* Update va to reflect the new attributes. */
1041 (*va) = attr;
1042
1043 /* Update the attribute cache and mark it as valid */
1044 if (coda_attr_cache) {
1045 VTOC(*vpp)->c_vattr = attr;
1046 VTOC(*vpp)->c_flags |= C_VATTR;
1047 }
1048
1049 /* Invalidate parent's attr cache (modification time has changed). */
1050 VTOC(dvp)->c_flags &= ~C_VATTR;
1051
1052 /* enter the new vnode in the Name Cache */
1053 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
1054
1055 CODADEBUG(CODA_CREATE, myprintf(("%s: %s, result %d\n", __func__,
1056 coda_f2s(&VFid), error)); )
1057 } else {
1058 *vpp = (vnode_t *)0;
1059 CODADEBUG(CODA_CREATE, myprintf(("%s: create error %d\n", __func__,
1060 error));)
1061 }
1062
1063 if (!error) {
1064 #ifdef CODA_VERBOSE
1065 if ((cnp->cn_flags & LOCKLEAF) == 0)
1066 /* This should not happen; flags are for lookup only. */
1067 printf("%s: LOCKLEAF not set!\n", __func__);
1068 #endif
1069 }
1070
1071 return(error);
1072 }
1073
1074 int
1075 coda_remove(void *v)
1076 {
1077 /* true args */
1078 struct vop_remove_v2_args *ap = v;
1079 vnode_t *dvp = ap->a_dvp;
1080 struct cnode *cp = VTOC(dvp);
1081 vnode_t *vp = ap->a_vp;
1082 struct componentname *cnp = ap->a_cnp;
1083 kauth_cred_t cred = cnp->cn_cred;
1084 struct lwp *l = curlwp;
1085 /* locals */
1086 int error;
1087 const char *nm = cnp->cn_nameptr;
1088 int len = cnp->cn_namelen;
1089 struct cnode *tp;
1090
1091 MARK_ENTRY(CODA_REMOVE_STATS);
1092
1093 CODADEBUG(CODA_REMOVE, myprintf(("%s: %s in %s\n", __func__,
1094 nm, coda_f2s(&cp->c_fid)));)
1095
1096 /* Remove the file's entry from the CODA Name Cache */
1097 /* We're being conservative here, it might be that this person
1098 * doesn't really have sufficient access to delete the file
1099 * but we feel zapping the entry won't really hurt anyone -- dcs
1100 */
1101 /* I'm gonna go out on a limb here. If a file and a hardlink to it
1102 * exist, and one is removed, the link count on the other will be
1103 * off by 1. We could either invalidate the attrs if cached, or
1104 * fix them. I'll try to fix them. DCS 11/8/94
1105 */
1106 tp = coda_nc_lookup(VTOC(dvp), nm, len, cred);
1107 if (tp) {
1108 if (VALID_VATTR(tp)) { /* If attrs are cached */
1109 if (tp->c_vattr.va_nlink > 1) { /* If it's a hard link */
1110 tp->c_vattr.va_nlink--;
1111 }
1112 }
1113
1114 coda_nc_zapfile(VTOC(dvp), nm, len);
1115 /* No need to flush it if it doesn't exist! */
1116 }
1117 /* Invalidate the parent's attr cache, the modification time has changed */
1118 VTOC(dvp)->c_flags &= ~C_VATTR;
1119
1120 /* Check for remove of control object. */
1121 if (IS_CTL_NAME(dvp, nm, len)) {
1122 MARK_INT_FAIL(CODA_REMOVE_STATS);
1123 return(ENOENT);
1124 }
1125
1126 error = venus_remove(vtomi(dvp), &cp->c_fid, nm, len, cred, l);
1127
1128 CODADEBUG(CODA_REMOVE, myprintf(("in remove result %d\n",error)); )
1129
1130 /*
1131 * Unlock and release child (avoiding double if ".").
1132 */
1133 if (dvp == vp) {
1134 vrele(vp);
1135 } else {
1136 vput(vp);
1137 }
1138
1139 return(error);
1140 }
1141
1142 /*
1143 * dvp is the directory where the link is to go, and is locked.
1144 * vp is the object to be linked to, and is unlocked.
1145 * At exit, we must unlock dvp, and vput dvp.
1146 */
1147 int
1148 coda_link(void *v)
1149 {
1150 /* true args */
1151 struct vop_link_v2_args *ap = v;
1152 vnode_t *vp = ap->a_vp;
1153 struct cnode *cp = VTOC(vp);
1154 vnode_t *dvp = ap->a_dvp;
1155 struct cnode *dcp = VTOC(dvp);
1156 struct componentname *cnp = ap->a_cnp;
1157 kauth_cred_t cred = cnp->cn_cred;
1158 struct lwp *l = curlwp;
1159 /* locals */
1160 int error;
1161 const char *nm = cnp->cn_nameptr;
1162 int len = cnp->cn_namelen;
1163
1164 MARK_ENTRY(CODA_LINK_STATS);
1165
1166 if (codadebug & CODADBGMSK(CODA_LINK)) {
1167
1168 myprintf(("%s: vp fid: %s\n", __func__, coda_f2s(&cp->c_fid)));
1169 myprintf(("%s: dvp fid: %s)\n", __func__, coda_f2s(&dcp->c_fid)));
1170
1171 }
1172 if (codadebug & CODADBGMSK(CODA_LINK)) {
1173 myprintf(("%s: vp fid: %s\n", __func__, coda_f2s(&cp->c_fid)));
1174 myprintf(("%s: dvp fid: %s\n", __func__, coda_f2s(&dcp->c_fid)));
1175
1176 }
1177
1178 /* Check for link to/from control object. */
1179 if (IS_CTL_NAME(dvp, nm, len) || IS_CTL_VP(vp)) {
1180 MARK_INT_FAIL(CODA_LINK_STATS);
1181 return(EACCES);
1182 }
1183
1184 /* If linking . to a name, error out earlier. */
1185 if (vp == dvp) {
1186 #ifdef CODA_VERBOSE
1187 printf("%s coda_link vp==dvp\n", __func__);
1188 #endif
1189 error = EISDIR;
1190 goto exit;
1191 }
1192
1193 /* XXX Why does venus_link need the vnode to be locked?*/
1194 if ((error = vn_lock(vp, LK_EXCLUSIVE)) != 0) {
1195 #ifdef CODA_VERBOSE
1196 printf("%s: couldn't lock vnode %p\n", __func__, vp);
1197 #endif
1198 error = EFAULT; /* XXX better value */
1199 goto exit;
1200 }
1201 error = venus_link(vtomi(vp), &cp->c_fid, &dcp->c_fid, nm, len, cred, l);
1202 VOP_UNLOCK(vp);
1203
1204 /* Invalidate parent's attr cache (the modification time has changed). */
1205 VTOC(dvp)->c_flags &= ~C_VATTR;
1206 /* Invalidate child's attr cache (XXX why). */
1207 VTOC(vp)->c_flags &= ~C_VATTR;
1208
1209 CODADEBUG(CODA_LINK, myprintf(("in link result %d\n",error)); )
1210
1211 exit:
1212 return(error);
1213 }
1214
1215 int
1216 coda_rename(void *v)
1217 {
1218 /* true args */
1219 struct vop_rename_args *ap = v;
1220 vnode_t *odvp = ap->a_fdvp;
1221 struct cnode *odcp = VTOC(odvp);
1222 struct componentname *fcnp = ap->a_fcnp;
1223 vnode_t *ndvp = ap->a_tdvp;
1224 struct cnode *ndcp = VTOC(ndvp);
1225 struct componentname *tcnp = ap->a_tcnp;
1226 kauth_cred_t cred = fcnp->cn_cred;
1227 struct lwp *l = curlwp;
1228 /* true args */
1229 int error;
1230 const char *fnm = fcnp->cn_nameptr;
1231 int flen = fcnp->cn_namelen;
1232 const char *tnm = tcnp->cn_nameptr;
1233 int tlen = tcnp->cn_namelen;
1234
1235 MARK_ENTRY(CODA_RENAME_STATS);
1236
1237 /* Hmmm. The vnodes are already looked up. Perhaps they are locked?
1238 This could be Bad. XXX */
1239 #ifdef OLD_DIAGNOSTIC
1240 if ((fcnp->cn_cred != tcnp->cn_cred)
1241 || (fcnp->cn_lwp != tcnp->cn_lwp))
1242 {
1243 panic("%s: component names don't agree", __func__);
1244 }
1245 #endif
1246
1247 /* Check for rename involving control object. */
1248 if (IS_CTL_NAME(odvp, fnm, flen) || IS_CTL_NAME(ndvp, tnm, tlen)) {
1249 MARK_INT_FAIL(CODA_RENAME_STATS);
1250 return(EACCES);
1251 }
1252
1253 /* Problem with moving directories -- need to flush entry for .. */
1254 if (odvp != ndvp) {
1255 struct cnode *ovcp = coda_nc_lookup(VTOC(odvp), fnm, flen, cred);
1256 if (ovcp) {
1257 vnode_t *ovp = CTOV(ovcp);
1258 if ((ovp) &&
1259 (ovp->v_type == VDIR)) /* If it's a directory */
1260 coda_nc_zapfile(VTOC(ovp),"..", 2);
1261 }
1262 }
1263
1264 /* Remove the entries for both source and target files */
1265 coda_nc_zapfile(VTOC(odvp), fnm, flen);
1266 coda_nc_zapfile(VTOC(ndvp), tnm, tlen);
1267
1268 /* Invalidate the parent's attr cache, the modification time has changed */
1269 VTOC(odvp)->c_flags &= ~C_VATTR;
1270 VTOC(ndvp)->c_flags &= ~C_VATTR;
1271
1272 if (flen+1 > CODA_MAXNAMLEN) {
1273 MARK_INT_FAIL(CODA_RENAME_STATS);
1274 error = EINVAL;
1275 goto exit;
1276 }
1277
1278 if (tlen+1 > CODA_MAXNAMLEN) {
1279 MARK_INT_FAIL(CODA_RENAME_STATS);
1280 error = EINVAL;
1281 goto exit;
1282 }
1283
1284 error = venus_rename(vtomi(odvp), &odcp->c_fid, &ndcp->c_fid, fnm, flen, tnm, tlen, cred, l);
1285
1286 exit:
1287 CODADEBUG(CODA_RENAME, myprintf(("in rename result %d\n",error));)
1288 /* XXX - do we need to call cache pureg on the moved vnode? */
1289 cache_purge(ap->a_fvp);
1290
1291 /* It seems to be incumbent on us to drop locks on all four vnodes */
1292 /* From-vnodes are not locked, only ref'd. To-vnodes are locked. */
1293
1294 vrele(ap->a_fvp);
1295 vrele(odvp);
1296
1297 if (ap->a_tvp) {
1298 if (ap->a_tvp == ndvp) {
1299 vrele(ap->a_tvp);
1300 } else {
1301 vput(ap->a_tvp);
1302 }
1303 }
1304
1305 vput(ndvp);
1306 return(error);
1307 }
1308
1309 int
1310 coda_mkdir(void *v)
1311 {
1312 /* true args */
1313 struct vop_mkdir_v3_args *ap = v;
1314 vnode_t *dvp = ap->a_dvp;
1315 struct cnode *dcp = VTOC(dvp);
1316 struct componentname *cnp = ap->a_cnp;
1317 struct vattr *va = ap->a_vap;
1318 vnode_t **vpp = ap->a_vpp;
1319 kauth_cred_t cred = cnp->cn_cred;
1320 struct lwp *l = curlwp;
1321 /* locals */
1322 int error;
1323 const char *nm = cnp->cn_nameptr;
1324 int len = cnp->cn_namelen;
1325 struct cnode *cp;
1326 CodaFid VFid;
1327 struct vattr ova;
1328
1329 MARK_ENTRY(CODA_MKDIR_STATS);
1330
1331 /* Check for mkdir of target object. */
1332 if (IS_CTL_NAME(dvp, nm, len)) {
1333 *vpp = (vnode_t *)0;
1334 MARK_INT_FAIL(CODA_MKDIR_STATS);
1335 return(EACCES);
1336 }
1337
1338 if (len+1 > CODA_MAXNAMLEN) {
1339 *vpp = (vnode_t *)0;
1340 MARK_INT_FAIL(CODA_MKDIR_STATS);
1341 return(EACCES);
1342 }
1343
1344 error = venus_mkdir(vtomi(dvp), &dcp->c_fid, nm, len, va, cred, l, &VFid, &ova);
1345
1346 if (!error) {
1347 if (coda_find(&VFid) != NULL)
1348 panic("cnode existed for newly created directory!");
1349
1350
1351 cp = make_coda_node(&VFid, dvp->v_mount, va->va_type);
1352 *vpp = CTOV(cp);
1353
1354 /* enter the new vnode in the Name Cache */
1355 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
1356
1357 /* as a side effect, enter "." and ".." for the directory */
1358 coda_nc_enter(VTOC(*vpp), ".", 1, cred, VTOC(*vpp));
1359 coda_nc_enter(VTOC(*vpp), "..", 2, cred, VTOC(dvp));
1360
1361 if (coda_attr_cache) {
1362 VTOC(*vpp)->c_vattr = ova; /* update the attr cache */
1363 VTOC(*vpp)->c_flags |= C_VATTR; /* Valid attributes in cnode */
1364 }
1365
1366 /* Invalidate the parent's attr cache, the modification time has changed */
1367 VTOC(dvp)->c_flags &= ~C_VATTR;
1368
1369 CODADEBUG( CODA_MKDIR, myprintf(("%s: %s result %d\n", __func__,
1370 coda_f2s(&VFid), error)); )
1371 } else {
1372 *vpp = (vnode_t *)0;
1373 CODADEBUG(CODA_MKDIR, myprintf(("%s error %d\n", __func__, error));)
1374 }
1375
1376 return(error);
1377 }
1378
1379 int
1380 coda_rmdir(void *v)
1381 {
1382 /* true args */
1383 struct vop_rmdir_v2_args *ap = v;
1384 vnode_t *dvp = ap->a_dvp;
1385 struct cnode *dcp = VTOC(dvp);
1386 vnode_t *vp = ap->a_vp;
1387 struct componentname *cnp = ap->a_cnp;
1388 kauth_cred_t cred = cnp->cn_cred;
1389 struct lwp *l = curlwp;
1390 /* true args */
1391 int error;
1392 const char *nm = cnp->cn_nameptr;
1393 int len = cnp->cn_namelen;
1394 struct cnode *cp;
1395
1396 MARK_ENTRY(CODA_RMDIR_STATS);
1397
1398 /* Check for rmdir of control object. */
1399 if (IS_CTL_NAME(dvp, nm, len)) {
1400 MARK_INT_FAIL(CODA_RMDIR_STATS);
1401 return(ENOENT);
1402 }
1403
1404 /* Can't remove . in self. */
1405 if (dvp == vp) {
1406 #ifdef CODA_VERBOSE
1407 printf("%s: dvp == vp\n", __func__);
1408 #endif
1409 error = EINVAL;
1410 goto exit;
1411 }
1412
1413 /*
1414 * The caller may not have adequate permissions, and the venus
1415 * operation may fail, but it doesn't hurt from a correctness
1416 * viewpoint to invalidate cache entries.
1417 * XXX Why isn't this done after the venus_rmdir call?
1418 */
1419 /* Look up child in name cache (by name, from parent). */
1420 cp = coda_nc_lookup(dcp, nm, len, cred);
1421 /* If found, remove all children of the child (., ..). */
1422 if (cp) coda_nc_zapParentfid(&(cp->c_fid), NOT_DOWNCALL);
1423
1424 /* Remove child's own entry. */
1425 coda_nc_zapfile(dcp, nm, len);
1426
1427 /* Invalidate parent's attr cache (the modification time has changed). */
1428 dcp->c_flags &= ~C_VATTR;
1429
1430 error = venus_rmdir(vtomi(dvp), &dcp->c_fid, nm, len, cred, l);
1431
1432 CODADEBUG(CODA_RMDIR, myprintf(("in rmdir result %d\n", error)); )
1433
1434 exit:
1435 /* unlock and release child */
1436 if (dvp == vp) {
1437 vrele(vp);
1438 } else {
1439 vput(vp);
1440 }
1441
1442 return(error);
1443 }
1444
1445 int
1446 coda_symlink(void *v)
1447 {
1448 /* true args */
1449 struct vop_symlink_v3_args *ap = v;
1450 vnode_t *dvp = ap->a_dvp;
1451 struct cnode *dcp = VTOC(dvp);
1452 /* a_vpp is used in place below */
1453 struct componentname *cnp = ap->a_cnp;
1454 struct vattr *tva = ap->a_vap;
1455 char *path = ap->a_target;
1456 kauth_cred_t cred = cnp->cn_cred;
1457 struct lwp *l = curlwp;
1458 /* locals */
1459 int error;
1460 u_long saved_cn_flags;
1461 const char *nm = cnp->cn_nameptr;
1462 int len = cnp->cn_namelen;
1463 int plen = strlen(path);
1464
1465 /*
1466 * Here's the strategy for the moment: perform the symlink, then
1467 * do a lookup to grab the resulting vnode. I know this requires
1468 * two communications with Venus for a new sybolic link, but
1469 * that's the way the ball bounces. I don't yet want to change
1470 * the way the Mach symlink works. When Mach support is
1471 * deprecated, we should change symlink so that the common case
1472 * returns the resultant vnode in a vpp argument.
1473 */
1474
1475 MARK_ENTRY(CODA_SYMLINK_STATS);
1476
1477 /* Check for symlink of control object. */
1478 if (IS_CTL_NAME(dvp, nm, len)) {
1479 MARK_INT_FAIL(CODA_SYMLINK_STATS);
1480 error = EACCES;
1481 goto exit;
1482 }
1483
1484 if (plen+1 > CODA_MAXPATHLEN) {
1485 MARK_INT_FAIL(CODA_SYMLINK_STATS);
1486 error = EINVAL;
1487 goto exit;
1488 }
1489
1490 if (len+1 > CODA_MAXNAMLEN) {
1491 MARK_INT_FAIL(CODA_SYMLINK_STATS);
1492 error = EINVAL;
1493 goto exit;
1494 }
1495
1496 error = venus_symlink(vtomi(dvp), &dcp->c_fid, path, plen, nm, len, tva, cred, l);
1497
1498 /* Invalidate the parent's attr cache (modification time has changed). */
1499 dcp->c_flags &= ~C_VATTR;
1500
1501 if (!error) {
1502 /*
1503 * VOP_SYMLINK is not defined to pay attention to cnp->cn_flags;
1504 * these are defined only for VOP_LOOKUP. We desire to reuse
1505 * cnp for a VOP_LOOKUP operation, and must be sure to not pass
1506 * stray flags passed to us. Such stray flags can occur because
1507 * sys_symlink makes a namei call and then reuses the
1508 * componentname structure.
1509 */
1510 /*
1511 * XXX Arguably we should create our own componentname structure
1512 * and not reuse the one that was passed in.
1513 */
1514 saved_cn_flags = cnp->cn_flags;
1515 cnp->cn_flags &= ~(MODMASK | OPMASK);
1516 cnp->cn_flags |= LOOKUP;
1517 error = VOP_LOOKUP(dvp, ap->a_vpp, cnp);
1518 cnp->cn_flags = saved_cn_flags;
1519 }
1520
1521 exit:
1522 CODADEBUG(CODA_SYMLINK, myprintf(("in symlink result %d\n",error)); )
1523 return(error);
1524 }
1525
1526 /*
1527 * Read directory entries.
1528 */
1529 int
1530 coda_readdir(void *v)
1531 {
1532 /* true args */
1533 struct vop_readdir_args *ap = v;
1534 vnode_t *vp = ap->a_vp;
1535 struct cnode *cp = VTOC(vp);
1536 struct uio *uiop = ap->a_uio;
1537 kauth_cred_t cred = ap->a_cred;
1538 int *eofflag = ap->a_eofflag;
1539 /* upcall decl */
1540 /* locals */
1541 size_t initial_resid = uiop->uio_resid;
1542 int error = 0;
1543 int opened_internally = 0;
1544 int ncookies;
1545 char *buf;
1546 struct vnode *cvp;
1547 struct dirent *dirp;
1548
1549 MARK_ENTRY(CODA_READDIR_STATS);
1550
1551 CODADEBUG(CODA_READDIR, myprintf(("%s: (%p, %lu, %lld)\n", __func__,
1552 uiop->uio_iov->iov_base, (unsigned long) uiop->uio_resid,
1553 (long long) uiop->uio_offset)); )
1554
1555 /* Check for readdir of control object. */
1556 if (IS_CTL_VP(vp)) {
1557 MARK_INT_FAIL(CODA_READDIR_STATS);
1558 return ENOENT;
1559 }
1560
1561 /* If directory is not already open do an "internal open" on it. */
1562 if (cp->c_ovp == NULL) {
1563 opened_internally = 1;
1564 MARK_INT_GEN(CODA_OPEN_STATS);
1565 error = VOP_OPEN(vp, FREAD, cred);
1566 #ifdef CODA_VERBOSE
1567 printf("%s: Internally Opening %p\n", __func__, vp);
1568 #endif
1569 if (error)
1570 return error;
1571 KASSERT(cp->c_ovp != NULL);
1572 }
1573 cvp = cp->c_ovp;
1574
1575 CODADEBUG(CODA_READDIR, myprintf(("%s: fid = %s, refcnt = %d\n",
1576 __func__, coda_f2s(&cp->c_fid), vrefcnt(cvp))); )
1577
1578 if (ap->a_ncookies) {
1579 ncookies = ap->a_uio->uio_resid / _DIRENT_RECLEN(dirp, 1);
1580 *ap->a_ncookies = 0;
1581 *ap->a_cookies = malloc(ncookies * sizeof (off_t),
1582 M_TEMP, M_WAITOK);
1583 }
1584 buf = kmem_alloc(CODA_DIRBLKSIZ, KM_SLEEP);
1585 dirp = kmem_alloc(sizeof(*dirp), KM_SLEEP);
1586 vn_lock(cvp, LK_EXCLUSIVE | LK_RETRY);
1587
1588 while (error == 0) {
1589 size_t resid = 0;
1590 char *dp, *ep;
1591
1592 if (!ALIGNED_POINTER(uiop->uio_offset, uint32_t)) {
1593 error = EINVAL;
1594 break;
1595 }
1596 error = vn_rdwr(UIO_READ, cvp, buf,
1597 CODA_DIRBLKSIZ, uiop->uio_offset,
1598 UIO_SYSSPACE, IO_NODELOCKED, cred, &resid, curlwp);
1599 if (error || resid == CODA_DIRBLKSIZ)
1600 break;
1601 for (dp = buf, ep = dp + CODA_DIRBLKSIZ - resid; dp < ep; ) {
1602 off_t off;
1603 struct venus_dirent *vd = (struct venus_dirent *)dp;
1604
1605 if (!ALIGNED_POINTER(vd, uint32_t) ||
1606 !ALIGNED_POINTER(vd->d_reclen, uint32_t) ||
1607 vd->d_reclen == 0) {
1608 error = EINVAL;
1609 break;
1610 }
1611 if (dp + vd->d_reclen > ep) {
1612 error = ENAMETOOLONG;
1613 break;
1614 }
1615 if (vd->d_namlen == 0) {
1616 uiop->uio_offset += vd->d_reclen;
1617 dp += vd->d_reclen;
1618 continue;
1619 }
1620
1621 dirp->d_fileno = vd->d_fileno;
1622 dirp->d_type = vd->d_type;
1623 dirp->d_namlen = vd->d_namlen;
1624 dirp->d_reclen = _DIRENT_SIZE(dirp);
1625 strlcpy(dirp->d_name, vd->d_name, dirp->d_namlen + 1);
1626
1627 if (uiop->uio_resid < dirp->d_reclen) {
1628 error = ENAMETOOLONG;
1629 break;
1630 }
1631
1632 off = uiop->uio_offset;
1633 error = uiomove(dirp, dirp->d_reclen, uiop);
1634 uiop->uio_offset = off;
1635 if (error)
1636 break;
1637
1638 uiop->uio_offset += vd->d_reclen;
1639 dp += vd->d_reclen;
1640 if (ap->a_ncookies)
1641 (*ap->a_cookies)[(*ap->a_ncookies)++] =
1642 uiop->uio_offset;
1643 }
1644 }
1645
1646 VOP_UNLOCK(cvp);
1647 kmem_free(dirp, sizeof(*dirp));
1648 kmem_free(buf, CODA_DIRBLKSIZ);
1649 if (eofflag && error == 0)
1650 *eofflag = 1;
1651 if (uiop->uio_resid < initial_resid && error == ENAMETOOLONG)
1652 error = 0;
1653 if (ap->a_ncookies && error) {
1654 free(*ap->a_cookies, M_TEMP);
1655 *ap->a_ncookies = 0;
1656 *ap->a_cookies = NULL;
1657 }
1658 if (error)
1659 MARK_INT_FAIL(CODA_READDIR_STATS);
1660 else
1661 MARK_INT_SAT(CODA_READDIR_STATS);
1662
1663 /* Do an "internal close" if necessary. */
1664 if (opened_internally) {
1665 MARK_INT_GEN(CODA_CLOSE_STATS);
1666 (void)VOP_CLOSE(vp, FREAD, cred);
1667 }
1668
1669 return error;
1670 }
1671
1672 /*
1673 * Convert from file system blocks to device blocks
1674 */
1675 int
1676 coda_bmap(void *v)
1677 {
1678 /* XXX on the global proc */
1679 /* true args */
1680 struct vop_bmap_args *ap = v;
1681 vnode_t *vp __unused = ap->a_vp; /* file's vnode */
1682 daddr_t bn __unused = ap->a_bn; /* fs block number */
1683 vnode_t **vpp = ap->a_vpp; /* RETURN vp of device */
1684 daddr_t *bnp __unused = ap->a_bnp; /* RETURN device block number */
1685 struct lwp *l __unused = curlwp;
1686 /* upcall decl */
1687 /* locals */
1688
1689 *vpp = (vnode_t *)0;
1690 myprintf(("coda_bmap called!\n"));
1691 return(EINVAL);
1692 }
1693
1694 /*
1695 * I don't think the following two things are used anywhere, so I've
1696 * commented them out
1697 *
1698 * struct buf *async_bufhead;
1699 * int async_daemon_count;
1700 */
1701 int
1702 coda_strategy(void *v)
1703 {
1704 /* true args */
1705 struct vop_strategy_args *ap = v;
1706 struct buf *bp __unused = ap->a_bp;
1707 struct lwp *l __unused = curlwp;
1708 /* upcall decl */
1709 /* locals */
1710
1711 myprintf(("coda_strategy called! "));
1712 return(EINVAL);
1713 }
1714
1715 int
1716 coda_reclaim(void *v)
1717 {
1718 /* true args */
1719 struct vop_reclaim_v2_args *ap = v;
1720 vnode_t *vp = ap->a_vp;
1721 struct cnode *cp = VTOC(vp);
1722 /* upcall decl */
1723 /* locals */
1724
1725 VOP_UNLOCK(vp);
1726
1727 /*
1728 * Forced unmount/flush will let vnodes with non zero use be destroyed!
1729 */
1730 ENTRY;
1731
1732 if (IS_UNMOUNTING(cp)) {
1733 #ifdef DEBUG
1734 if (VTOC(vp)->c_ovp) {
1735 if (IS_UNMOUNTING(cp))
1736 printf("%s: c_ovp not void: vp %p, cp %p\n", __func__, vp, cp);
1737 }
1738 #endif
1739 } else {
1740 #ifdef OLD_DIAGNOSTIC
1741 if (vrefcnt(vp) != 0)
1742 print("%s: pushing active %p\n", __func__, vp);
1743 if (VTOC(vp)->c_ovp) {
1744 panic("%s: c_ovp not void", __func__);
1745 }
1746 #endif
1747 }
1748 /* If an array has been allocated to hold the symlink, deallocate it */
1749 if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) {
1750 if (cp->c_symlink == NULL)
1751 panic("%s: null symlink pointer in cnode", __func__);
1752
1753 CODA_FREE(cp->c_symlink, cp->c_symlen);
1754 cp->c_flags &= ~C_SYMLINK;
1755 cp->c_symlen = 0;
1756 }
1757
1758 mutex_enter(vp->v_interlock);
1759 mutex_enter(&cp->c_lock);
1760 SET_VTOC(vp) = NULL;
1761 mutex_exit(&cp->c_lock);
1762 mutex_exit(vp->v_interlock);
1763 mutex_destroy(&cp->c_lock);
1764 kmem_free(cp, sizeof(*cp));
1765
1766 return (0);
1767 }
1768
1769 int
1770 coda_lock(void *v)
1771 {
1772 /* true args */
1773 struct vop_lock_args *ap = v;
1774 vnode_t *vp = ap->a_vp;
1775 struct cnode *cp = VTOC(vp);
1776 /* upcall decl */
1777 /* locals */
1778
1779 ENTRY;
1780
1781 if (coda_lockdebug) {
1782 myprintf(("Attempting lock on %s\n",
1783 coda_f2s(&cp->c_fid)));
1784 }
1785
1786 return genfs_lock(v);
1787 }
1788
1789 int
1790 coda_unlock(void *v)
1791 {
1792 /* true args */
1793 struct vop_unlock_args *ap = v;
1794 vnode_t *vp = ap->a_vp;
1795 struct cnode *cp = VTOC(vp);
1796 /* upcall decl */
1797 /* locals */
1798
1799 ENTRY;
1800 if (coda_lockdebug) {
1801 myprintf(("Attempting unlock on %s\n",
1802 coda_f2s(&cp->c_fid)));
1803 }
1804
1805 return genfs_unlock(v);
1806 }
1807
1808 int
1809 coda_islocked(void *v)
1810 {
1811 /* true args */
1812 ENTRY;
1813
1814 return genfs_islocked(v);
1815 }
1816
1817 int
1818 coda_pathconf(void *v)
1819 {
1820 struct vop_pathconf_args *ap = v;
1821
1822 switch (ap->a_name) {
1823 default:
1824 return EINVAL;
1825 }
1826 /* NOTREACHED */
1827 }
1828
1829 /*
1830 * Given a device and inode, obtain a locked vnode. One reference is
1831 * obtained and passed back to the caller.
1832 */
1833 int
1834 coda_grab_vnode(vnode_t *uvp, dev_t dev, ino_t ino, vnode_t **vpp)
1835 {
1836 int error;
1837 struct mount *mp;
1838
1839 /* Obtain mount point structure from device. */
1840 if (!(mp = devtomp(dev))) {
1841 myprintf(("%s: devtomp(0x%llx) returns NULL\n", __func__,
1842 (unsigned long long)dev));
1843 return(ENXIO);
1844 }
1845
1846 /*
1847 * Obtain vnode from mount point and inode.
1848 */
1849 error = VFS_VGET(mp, ino, LK_EXCLUSIVE, vpp);
1850 if (error) {
1851 myprintf(("%s: iget/vget(0x%llx, %llu) returns %p, err %d\n", __func__,
1852 (unsigned long long)dev, (unsigned long long)ino, *vpp, error));
1853 return(ENOENT);
1854 }
1855 /* share the underlying vnode lock with the coda vnode */
1856 vshareilock(*vpp, uvp);
1857 KASSERT(VOP_ISLOCKED(*vpp));
1858 return(0);
1859 }
1860
1861 static void
1862 coda_print_vattr(struct vattr *attr)
1863 {
1864 const char *typestr;
1865
1866 switch (attr->va_type) {
1867 case VNON:
1868 typestr = "VNON";
1869 break;
1870 case VREG:
1871 typestr = "VREG";
1872 break;
1873 case VDIR:
1874 typestr = "VDIR";
1875 break;
1876 case VBLK:
1877 typestr = "VBLK";
1878 break;
1879 case VCHR:
1880 typestr = "VCHR";
1881 break;
1882 case VLNK:
1883 typestr = "VLNK";
1884 break;
1885 case VSOCK:
1886 typestr = "VSCK";
1887 break;
1888 case VFIFO:
1889 typestr = "VFFO";
1890 break;
1891 case VBAD:
1892 typestr = "VBAD";
1893 break;
1894 default:
1895 typestr = "????";
1896 break;
1897 }
1898
1899
1900 myprintf(("attr: type %s mode %d uid %d gid %d fsid %d rdev %d\n",
1901 typestr, (int)attr->va_mode, (int)attr->va_uid,
1902 (int)attr->va_gid, (int)attr->va_fsid, (int)attr->va_rdev));
1903
1904 myprintf((" fileid %d nlink %d size %d blocksize %d bytes %d\n",
1905 (int)attr->va_fileid, (int)attr->va_nlink,
1906 (int)attr->va_size,
1907 (int)attr->va_blocksize,(int)attr->va_bytes));
1908 myprintf((" gen %ld flags %ld vaflags %d\n",
1909 attr->va_gen, attr->va_flags, attr->va_vaflags));
1910 myprintf((" atime sec %d nsec %d\n",
1911 (int)attr->va_atime.tv_sec, (int)attr->va_atime.tv_nsec));
1912 myprintf((" mtime sec %d nsec %d\n",
1913 (int)attr->va_mtime.tv_sec, (int)attr->va_mtime.tv_nsec));
1914 myprintf((" ctime sec %d nsec %d\n",
1915 (int)attr->va_ctime.tv_sec, (int)attr->va_ctime.tv_nsec));
1916 }
1917
1918 /*
1919 * Return a vnode for the given fid.
1920 * If no cnode exists for this fid create one and put it
1921 * in a table hashed by coda_f2i(). If the cnode for
1922 * this fid is already in the table return it (ref count is
1923 * incremented by coda_find. The cnode will be flushed from the
1924 * table when coda_inactive calls coda_unsave.
1925 */
1926 struct cnode *
1927 make_coda_node(CodaFid *fid, struct mount *fvsp, short type)
1928 {
1929 int error __diagused;
1930 struct vnode *vp;
1931 struct cnode *cp;
1932
1933 error = vcache_get(fvsp, fid, sizeof(CodaFid), &vp);
1934 KASSERT(error == 0);
1935
1936 mutex_enter(vp->v_interlock);
1937 cp = VTOC(vp);
1938 KASSERT(cp != NULL);
1939 mutex_enter(&cp->c_lock);
1940 mutex_exit(vp->v_interlock);
1941
1942 if (vp->v_type != type) {
1943 if (vp->v_type == VCHR || vp->v_type == VBLK)
1944 spec_node_destroy(vp);
1945 vp->v_type = type;
1946 if (type == VCHR || type == VBLK)
1947 spec_node_init(vp, NODEV);
1948 uvm_vnp_setsize(vp, 0);
1949 }
1950 mutex_exit(&cp->c_lock);
1951
1952 return cp;
1953 }
1954
1955 /*
1956 * coda_getpages may be called on a vnode which has not been opened,
1957 * e.g. to fault in pages to execute a program. In that case, we must
1958 * open the file to get the container. The vnode may or may not be
1959 * locked, and we must leave it in the same state.
1960 */
1961 int
1962 coda_getpages(void *v)
1963 {
1964 struct vop_getpages_args /* {
1965 vnode_t *a_vp;
1966 voff_t a_offset;
1967 struct vm_page **a_m;
1968 int *a_count;
1969 int a_centeridx;
1970 vm_prot_t a_access_type;
1971 int a_advice;
1972 int a_flags;
1973 } */ *ap = v;
1974 vnode_t *vp = ap->a_vp, *cvp;
1975 struct cnode *cp = VTOC(vp);
1976 struct lwp *l = curlwp;
1977 kauth_cred_t cred = l->l_cred;
1978 int error, cerror;
1979 int waslocked; /* 1 if vnode lock was held on entry */
1980 int didopen = 0; /* 1 if we opened container file */
1981 krw_t op;
1982
1983 /*
1984 * Handle a case that uvm_fault doesn't quite use yet.
1985 * See layer_vnops.c. for inspiration.
1986 */
1987 if (ap->a_flags & PGO_LOCKED) {
1988 return EBUSY;
1989 }
1990
1991 KASSERT(rw_lock_held(vp->v_uobj.vmobjlock));
1992
1993 /* Check for control object. */
1994 if (IS_CTL_VP(vp)) {
1995 #ifdef CODA_VERBOSE
1996 printf("%s: control object %p\n", __func__, vp);
1997 #endif
1998 return(EINVAL);
1999 }
2000
2001 /*
2002 * XXX It's really not ok to be releasing the lock we get,
2003 * because we could be overlapping with another call to
2004 * getpages and drop a lock they are relying on. We need to
2005 * figure out whether getpages ever is called holding the
2006 * lock, and if we should serialize getpages calls by some
2007 * mechanism.
2008 */
2009 /* XXX VOP_ISLOCKED() may not be used for lock decisions. */
2010 op = rw_lock_op(vp->v_uobj.vmobjlock);
2011 waslocked = VOP_ISLOCKED(vp);
2012
2013 /* Get container file if not already present. */
2014 cvp = cp->c_ovp;
2015 if (cvp == NULL) {
2016 /*
2017 * VOP_OPEN requires a locked vnode. We must avoid
2018 * locking the vnode if it is already locked, and
2019 * leave it in the same state on exit.
2020 */
2021 if (waslocked == 0) {
2022 rw_exit(vp->v_uobj.vmobjlock);
2023 cerror = vn_lock(vp, LK_EXCLUSIVE);
2024 if (cerror) {
2025 #ifdef CODA_VERBOSE
2026 printf("%s: can't lock vnode %p\n",
2027 __func__, vp);
2028 #endif
2029 return cerror;
2030 }
2031 #ifdef CODA_VERBOSE
2032 printf("%s: locked vnode %p\n", __func__, vp);
2033 #endif
2034 }
2035
2036 /*
2037 * Open file (causes upcall to venus).
2038 * XXX Perhaps we should not fully open the file, but
2039 * simply obtain a container file.
2040 */
2041 /* XXX Is it ok to do this while holding the mutex? */
2042 cerror = VOP_OPEN(vp, FREAD, cred);
2043
2044 if (cerror) {
2045 #ifdef CODA_VERBOSE
2046 printf("%s: cannot open vnode %p => %d\n", __func__,
2047 vp, cerror);
2048 #endif
2049 if (waslocked == 0)
2050 VOP_UNLOCK(vp);
2051 return cerror;
2052 }
2053
2054 #ifdef CODA_VERBOSE
2055 printf("%s: opened vnode %p\n", __func__, vp);
2056 #endif
2057 cvp = cp->c_ovp;
2058 didopen = 1;
2059 if (waslocked == 0)
2060 rw_enter(vp->v_uobj.vmobjlock, op);
2061 }
2062 KASSERT(cvp != NULL);
2063
2064 /* Munge the arg structure to refer to the container vnode. */
2065 KASSERT(cvp->v_uobj.vmobjlock == vp->v_uobj.vmobjlock);
2066 ap->a_vp = cp->c_ovp;
2067
2068 /* Finally, call getpages on it. */
2069 error = VCALL(ap->a_vp, VOFFSET(vop_getpages), ap);
2070
2071 /* If we opened the vnode, we must close it. */
2072 if (didopen) {
2073 /*
2074 * VOP_CLOSE requires a locked vnode, but we are still
2075 * holding the lock (or riding a caller's lock).
2076 */
2077 cerror = VOP_CLOSE(vp, FREAD, cred);
2078 #ifdef CODA_VERBOSE
2079 if (cerror != 0)
2080 /* XXX How should we handle this? */
2081 printf("%s: closed vnode %p -> %d\n", __func__,
2082 vp, cerror);
2083 #endif
2084
2085 /* If we obtained a lock, drop it. */
2086 if (waslocked == 0)
2087 VOP_UNLOCK(vp);
2088 }
2089
2090 return error;
2091 }
2092
2093 /*
2094 * The protocol requires v_interlock to be held by the caller.
2095 */
2096 int
2097 coda_putpages(void *v)
2098 {
2099 struct vop_putpages_args /* {
2100 vnode_t *a_vp;
2101 voff_t a_offlo;
2102 voff_t a_offhi;
2103 int a_flags;
2104 } */ *ap = v;
2105 vnode_t *vp = ap->a_vp, *cvp;
2106 struct cnode *cp = VTOC(vp);
2107 int error;
2108
2109 KASSERT(rw_write_held(vp->v_uobj.vmobjlock));
2110
2111 /* Check for control object. */
2112 if (IS_CTL_VP(vp)) {
2113 rw_exit(vp->v_uobj.vmobjlock);
2114 #ifdef CODA_VERBOSE
2115 printf("%s: control object %p\n", __func__, vp);
2116 #endif
2117 return 0;
2118 }
2119
2120 /*
2121 * If container object is not present, then there are no pages
2122 * to put; just return without error. This happens all the
2123 * time, apparently during discard of a closed vnode (which
2124 * trivially can't have dirty pages).
2125 */
2126 cvp = cp->c_ovp;
2127 if (cvp == NULL) {
2128 rw_exit(vp->v_uobj.vmobjlock);
2129 return 0;
2130 }
2131
2132 /* Munge the arg structure to refer to the container vnode. */
2133 KASSERT(cvp->v_uobj.vmobjlock == vp->v_uobj.vmobjlock);
2134 ap->a_vp = cvp;
2135
2136 /* Finally, call putpages on it. */
2137 error = VCALL(ap->a_vp, VOFFSET(vop_putpages), ap);
2138
2139 return error;
2140 }
2141