coda_subr.c revision 1.27 1 /* $NetBSD: coda_subr.c,v 1.27 2012/08/02 16:06:58 christos Exp $ */
2
3 /*
4 *
5 * Coda: an Experimental Distributed File System
6 * Release 3.1
7 *
8 * Copyright (c) 1987-1998 Carnegie Mellon University
9 * All Rights Reserved
10 *
11 * Permission to use, copy, modify and distribute this software and its
12 * documentation is hereby granted, provided that both the copyright
13 * notice and this permission notice appear in all copies of the
14 * software, derivative works or modified versions, and any portions
15 * thereof, and that both notices appear in supporting documentation, and
16 * that credit is given to Carnegie Mellon University in all documents
17 * and publicity pertaining to direct or indirect use of this code or its
18 * derivatives.
19 *
20 * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS,
21 * SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS
22 * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON
23 * DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
24 * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF
25 * ANY DERIVATIVE WORK.
26 *
27 * Carnegie Mellon encourages users of this software to return any
28 * improvements or extensions that they make, and to grant Carnegie
29 * Mellon the rights to redistribute these changes without encumbrance.
30 *
31 * @(#) coda/coda_subr.c,v 1.1.1.1 1998/08/29 21:26:45 rvb Exp $
32 */
33
34 /*
35 * Mach Operating System
36 * Copyright (c) 1989 Carnegie-Mellon University
37 * All rights reserved. The CMU software License Agreement specifies
38 * the terms and conditions for use and redistribution.
39 */
40
41 /*
42 * This code was written for the Coda file system at Carnegie Mellon
43 * University. Contributers include David Steere, James Kistler, and
44 * M. Satyanarayanan. */
45
46 /* NOTES: rvb
47 * 1. Added coda_unmounting to mark all cnodes as being UNMOUNTING. This has to
48 * be done before dounmount is called. Because some of the routines that
49 * dounmount calls before coda_unmounted might try to force flushes to venus.
50 * The vnode pager does this.
51 * 2. coda_unmounting marks all cnodes scanning coda_cache.
52 * 3. cfs_checkunmounting (under DEBUG) checks all cnodes by chasing the vnodes
53 * under the /coda mount point.
54 * 4. coda_cacheprint (under DEBUG) prints names with vnode/cnode address
55 */
56
57 #include <sys/cdefs.h>
58 __KERNEL_RCSID(0, "$NetBSD: coda_subr.c,v 1.27 2012/08/02 16:06:58 christos Exp $");
59
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/malloc.h>
63 #include <sys/proc.h>
64 #include <sys/select.h>
65 #include <sys/mount.h>
66 #include <sys/kauth.h>
67
68 #include <coda/coda.h>
69 #include <coda/cnode.h>
70 #include <coda/coda_subr.h>
71 #include <coda/coda_namecache.h>
72
73 #ifdef _KERNEL_OPT
74 #include "opt_coda_compat.h"
75 #endif
76
77 int coda_active = 0;
78 int coda_reuse = 0;
79 int coda_new = 0;
80
81 struct cnode *coda_freelist = NULL;
82 struct cnode *coda_cache[CODA_CACHESIZE];
83 MALLOC_DEFINE(M_CODA, "coda", "Coda file system structures and tables");
84
85 int codadebug = 0;
86 int coda_printf_delay = 0; /* in microseconds */
87 int coda_vnop_print_entry = 0;
88 int coda_vfsop_print_entry = 0;
89
90 #define CNODE_NEXT(cp) ((cp)->c_next)
91
92 #ifdef CODA_COMPAT_5
93 #define coda_hash(fid) \
94 (((fid)->Volume + (fid)->Vnode) & (CODA_CACHESIZE-1))
95 #define IS_DIR(cnode) (cnode.Vnode & 0x1)
96 #else
97 #define coda_hash(fid) \
98 (coda_f2i(fid) & (CODA_CACHESIZE-1))
99 #define IS_DIR(cnode) (cnode.opaque[2] & 0x1)
100 #endif
101
102 struct vnode *coda_ctlvp;
103
104 /*
105 * Allocate a cnode.
106 */
107 struct cnode *
108 coda_alloc(void)
109 {
110 struct cnode *cp;
111
112 if (coda_freelist) {
113 cp = coda_freelist;
114 coda_freelist = CNODE_NEXT(cp);
115 coda_reuse++;
116 }
117 else {
118 CODA_ALLOC(cp, struct cnode *, sizeof(struct cnode));
119 /* NetBSD vnodes don't have any Pager info in them ('cause there are
120 no external pagers, duh!) */
121 #define VNODE_VM_INFO_INIT(vp) /* MT */
122 VNODE_VM_INFO_INIT(CTOV(cp));
123 coda_new++;
124 }
125 memset(cp, 0, sizeof (struct cnode));
126
127 return(cp);
128 }
129
130 /*
131 * Deallocate a cnode.
132 */
133 void
134 coda_free(struct cnode *cp)
135 {
136
137 CNODE_NEXT(cp) = coda_freelist;
138 coda_freelist = cp;
139 }
140
141 /*
142 * Put a cnode in the hash table
143 */
144 void
145 coda_save(struct cnode *cp)
146 {
147 CNODE_NEXT(cp) = coda_cache[coda_hash(&cp->c_fid)];
148 coda_cache[coda_hash(&cp->c_fid)] = cp;
149 }
150
151 /*
152 * Remove a cnode from the hash table
153 */
154 void
155 coda_unsave(struct cnode *cp)
156 {
157 struct cnode *ptr;
158 struct cnode *ptrprev = NULL;
159
160 ptr = coda_cache[coda_hash(&cp->c_fid)];
161 while (ptr != NULL) {
162 if (ptr == cp) {
163 if (ptrprev == NULL) {
164 coda_cache[coda_hash(&cp->c_fid)]
165 = CNODE_NEXT(ptr);
166 } else {
167 CNODE_NEXT(ptrprev) = CNODE_NEXT(ptr);
168 }
169 CNODE_NEXT(cp) = NULL;
170
171 return;
172 }
173 ptrprev = ptr;
174 ptr = CNODE_NEXT(ptr);
175 }
176 }
177
178 /*
179 * Lookup a cnode by fid. If the cnode is dying, it is bogus so skip it.
180 * NOTE: this allows multiple cnodes with same fid -- dcs 1/25/95
181 */
182 struct cnode *
183 coda_find(CodaFid *fid)
184 {
185 struct cnode *cp;
186
187 cp = coda_cache[coda_hash(fid)];
188 while (cp) {
189 if (coda_fid_eq(&(cp->c_fid), fid) &&
190 (!IS_UNMOUNTING(cp)))
191 {
192 coda_active++;
193 return(cp);
194 }
195 cp = CNODE_NEXT(cp);
196 }
197 return(NULL);
198 }
199
200 /*
201 * coda_kill is called as a side effect to vcopen. To prevent any
202 * cnodes left around from an earlier run of a venus or warden from
203 * causing problems with the new instance, mark any outstanding cnodes
204 * as dying. Future operations on these cnodes should fail (excepting
205 * coda_inactive of course!). Since multiple venii/wardens can be
206 * running, only kill the cnodes for a particular entry in the
207 * coda_mnttbl. -- DCS 12/1/94 */
208
209 int
210 coda_kill(struct mount *whoIam, enum dc_status dcstat)
211 {
212 int hash, count = 0;
213 struct cnode *cp;
214
215 /*
216 * Algorithm is as follows:
217 * Second, flush whatever vnodes we can from the name cache.
218 *
219 * Finally, step through whatever is left and mark them dying.
220 * This prevents any operation at all.
221
222 */
223
224 /* This is slightly overkill, but should work. Eventually it'd be
225 * nice to only flush those entries from the namecache that
226 * reference a vnode in this vfs. */
227 coda_nc_flush(dcstat);
228
229 for (hash = 0; hash < CODA_CACHESIZE; hash++) {
230 for (cp = coda_cache[hash]; cp != NULL; cp = CNODE_NEXT(cp)) {
231 if (CTOV(cp)->v_mount == whoIam) {
232 #ifdef DEBUG
233 printf("coda_kill: vp %p, cp %p\n", CTOV(cp), cp);
234 #endif
235 count++;
236 CODADEBUG(CODA_FLUSH,
237 myprintf(("Live cnode fid %s flags %d count %d\n",
238 coda_f2s(&cp->c_fid),
239 cp->c_flags,
240 CTOV(cp)->v_usecount)); );
241 }
242 }
243 }
244 return count;
245 }
246
247 /*
248 * There are two reasons why a cnode may be in use, it may be in the
249 * name cache or it may be executing.
250 */
251 void
252 coda_flush(enum dc_status dcstat)
253 {
254 int hash;
255 struct cnode *cp;
256
257 coda_clstat.ncalls++;
258 coda_clstat.reqs[CODA_FLUSH]++;
259
260 coda_nc_flush(dcstat); /* flush files from the name cache */
261
262 for (hash = 0; hash < CODA_CACHESIZE; hash++) {
263 for (cp = coda_cache[hash]; cp != NULL; cp = CNODE_NEXT(cp)) {
264 if (!IS_DIR(cp->c_fid)) /* only files can be executed */
265 coda_vmflush(cp);
266 }
267 }
268 }
269
270 /*
271 * As a debugging measure, print out any cnodes that lived through a
272 * name cache flush.
273 */
274 void
275 coda_testflush(void)
276 {
277 int hash;
278 struct cnode *cp;
279
280 for (hash = 0; hash < CODA_CACHESIZE; hash++) {
281 for (cp = coda_cache[hash];
282 cp != NULL;
283 cp = CNODE_NEXT(cp)) {
284 myprintf(("Live cnode fid %s count %d\n",
285 coda_f2s(&cp->c_fid), CTOV(cp)->v_usecount));
286 }
287 }
288 }
289
290 /*
291 * First, step through all cnodes and mark them unmounting.
292 * NetBSD kernels may try to fsync them now that venus
293 * is dead, which would be a bad thing.
294 *
295 */
296 void
297 coda_unmounting(struct mount *whoIam)
298 {
299 int hash;
300 struct cnode *cp;
301
302 for (hash = 0; hash < CODA_CACHESIZE; hash++) {
303 for (cp = coda_cache[hash]; cp != NULL; cp = CNODE_NEXT(cp)) {
304 if (CTOV(cp)->v_mount == whoIam) {
305 if (cp->c_flags & (C_LOCKED|C_WANTED)) {
306 printf("coda_unmounting: Unlocking %p\n", cp);
307 cp->c_flags &= ~(C_LOCKED|C_WANTED);
308 wakeup((void *) cp);
309 }
310 cp->c_flags |= C_UNMOUNTING;
311 }
312 }
313 }
314 }
315
316 #ifdef DEBUG
317 void
318 coda_checkunmounting(struct mount *mp)
319 {
320 struct vnode *vp;
321 struct cnode *cp;
322 int count = 0, bad = 0;
323 loop:
324 TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
325 if (vp->v_mount != mp)
326 goto loop;
327 cp = VTOC(vp);
328 count++;
329 if (!(cp->c_flags & C_UNMOUNTING)) {
330 bad++;
331 printf("vp %p, cp %p missed\n", vp, cp);
332 cp->c_flags |= C_UNMOUNTING;
333 }
334 }
335 }
336
337 void
338 coda_cacheprint(struct mount *whoIam)
339 {
340 int hash;
341 struct cnode *cp;
342 int count = 0;
343
344 printf("coda_cacheprint: coda_ctlvp %p, cp %p", coda_ctlvp, VTOC(coda_ctlvp));
345 coda_nc_name(VTOC(coda_ctlvp));
346 printf("\n");
347
348 for (hash = 0; hash < CODA_CACHESIZE; hash++) {
349 for (cp = coda_cache[hash]; cp != NULL; cp = CNODE_NEXT(cp)) {
350 if (CTOV(cp)->v_mount == whoIam) {
351 printf("coda_cacheprint: vp %p, cp %p", CTOV(cp), cp);
352 coda_nc_name(cp);
353 printf("\n");
354 count++;
355 }
356 }
357 }
358 printf("coda_cacheprint: count %d\n", count);
359 }
360 #endif
361
362 /*
363 * There are 6 cases where invalidations occur. The semantics of each
364 * is listed here.
365 *
366 * CODA_FLUSH -- flush all entries from the name cache and the cnode cache.
367 * CODA_PURGEUSER -- flush all entries from the name cache for a specific user
368 * This call is a result of token expiration.
369 *
370 * The next two are the result of callbacks on a file or directory.
371 * CODA_ZAPDIR -- flush the attributes for the dir from its cnode.
372 * Zap all children of this directory from the namecache.
373 * CODA_ZAPFILE -- flush the attributes for a file.
374 *
375 * The fifth is a result of Venus detecting an inconsistent file.
376 * CODA_PURGEFID -- flush the attribute for the file
377 * If it is a dir (odd vnode), purge its
378 * children from the namecache
379 * remove the file from the namecache.
380 *
381 * The sixth allows Venus to replace local fids with global ones
382 * during reintegration.
383 *
384 * CODA_REPLACE -- replace one CodaFid with another throughout the name cache
385 */
386
387 int handleDownCall(int opcode, union outputArgs *out)
388 {
389 int error;
390
391 /* Handle invalidate requests. */
392 switch (opcode) {
393 case CODA_FLUSH : {
394
395 coda_flush(IS_DOWNCALL);
396
397 CODADEBUG(CODA_FLUSH,coda_testflush();) /* print remaining cnodes */
398 return(0);
399 }
400
401 case CODA_PURGEUSER : {
402 coda_clstat.ncalls++;
403 coda_clstat.reqs[CODA_PURGEUSER]++;
404
405 /* XXX - need to prevent fsync's */
406 #ifdef CODA_COMPAT_5
407 coda_nc_purge_user(out->coda_purgeuser.cred.cr_uid, IS_DOWNCALL);
408 #else
409 coda_nc_purge_user(out->coda_purgeuser.uid, IS_DOWNCALL);
410 #endif
411 return(0);
412 }
413
414 case CODA_ZAPFILE : {
415 struct cnode *cp;
416
417 error = 0;
418 coda_clstat.ncalls++;
419 coda_clstat.reqs[CODA_ZAPFILE]++;
420
421 cp = coda_find(&out->coda_zapfile.Fid);
422 if (cp != NULL) {
423 vref(CTOV(cp));
424
425 cp->c_flags &= ~C_VATTR;
426 if (CTOV(cp)->v_iflag & VI_TEXT)
427 error = coda_vmflush(cp);
428 CODADEBUG(CODA_ZAPFILE, myprintf((
429 "zapfile: fid = %s, refcnt = %d, error = %d\n",
430 coda_f2s(&cp->c_fid), CTOV(cp)->v_usecount - 1, error)););
431 if (CTOV(cp)->v_usecount == 1) {
432 cp->c_flags |= C_PURGING;
433 }
434 vrele(CTOV(cp));
435 }
436
437 return(error);
438 }
439
440 case CODA_ZAPDIR : {
441 struct cnode *cp;
442
443 coda_clstat.ncalls++;
444 coda_clstat.reqs[CODA_ZAPDIR]++;
445
446 cp = coda_find(&out->coda_zapdir.Fid);
447 if (cp != NULL) {
448 vref(CTOV(cp));
449
450 cp->c_flags &= ~C_VATTR;
451 coda_nc_zapParentfid(&out->coda_zapdir.Fid, IS_DOWNCALL);
452
453 CODADEBUG(CODA_ZAPDIR, myprintf((
454 "zapdir: fid = %s, refcnt = %d\n",
455 coda_f2s(&cp->c_fid), CTOV(cp)->v_usecount - 1)););
456 if (CTOV(cp)->v_usecount == 1) {
457 cp->c_flags |= C_PURGING;
458 }
459 vrele(CTOV(cp));
460 }
461
462 return(0);
463 }
464
465 case CODA_PURGEFID : {
466 struct cnode *cp;
467
468 error = 0;
469 coda_clstat.ncalls++;
470 coda_clstat.reqs[CODA_PURGEFID]++;
471
472 cp = coda_find(&out->coda_purgefid.Fid);
473 if (cp != NULL) {
474 vref(CTOV(cp));
475 if (IS_DIR(out->coda_purgefid.Fid)) { /* Vnode is a directory */
476 coda_nc_zapParentfid(&out->coda_purgefid.Fid,
477 IS_DOWNCALL);
478 }
479 cp->c_flags &= ~C_VATTR;
480 coda_nc_zapfid(&out->coda_purgefid.Fid, IS_DOWNCALL);
481 if (!(IS_DIR(out->coda_purgefid.Fid))
482 && (CTOV(cp)->v_iflag & VI_TEXT)) {
483
484 error = coda_vmflush(cp);
485 }
486 CODADEBUG(CODA_PURGEFID, myprintf((
487 "purgefid: fid = %s, refcnt = %d, error = %d\n",
488 coda_f2s(&cp->c_fid), CTOV(cp)->v_usecount - 1, error)););
489 if (CTOV(cp)->v_usecount == 1) {
490 cp->c_flags |= C_PURGING;
491 }
492 vrele(CTOV(cp));
493 }
494 return(error);
495 }
496
497 case CODA_REPLACE : {
498 struct cnode *cp = NULL;
499
500 coda_clstat.ncalls++;
501 coda_clstat.reqs[CODA_REPLACE]++;
502
503 cp = coda_find(&out->coda_replace.OldFid);
504 if (cp != NULL) {
505 /* remove the cnode from the hash table, replace the fid, and reinsert */
506 vref(CTOV(cp));
507 coda_unsave(cp);
508 cp->c_fid = out->coda_replace.NewFid;
509 coda_save(cp);
510
511 CODADEBUG(CODA_REPLACE, myprintf((
512 "replace: oldfid = %s, newfid = %s, cp = %p\n",
513 coda_f2s(&out->coda_replace.OldFid),
514 coda_f2s(&cp->c_fid), cp));)
515 vrele(CTOV(cp));
516 }
517 return (0);
518 }
519 default:
520 myprintf(("handleDownCall: unknown opcode %d\n", opcode));
521 return (EINVAL);
522 }
523 }
524
525 /* coda_grab_vnode: lives in either cfs_mach.c or cfs_nbsd.c */
526
527 int
528 coda_vmflush(struct cnode *cp)
529 {
530 return 0;
531 }
532
533
534 /*
535 * kernel-internal debugging switches
536 */
537
538 void coda_debugon(void)
539 {
540 codadebug = -1;
541 coda_nc_debug = -1;
542 coda_vnop_print_entry = 1;
543 coda_psdev_print_entry = 1;
544 coda_vfsop_print_entry = 1;
545 }
546
547 void coda_debugoff(void)
548 {
549 codadebug = 0;
550 coda_nc_debug = 0;
551 coda_vnop_print_entry = 0;
552 coda_psdev_print_entry = 0;
553 coda_vfsop_print_entry = 0;
554 }
555
556 /* How to print a ucred */
557 void
558 coda_print_cred(kauth_cred_t cred)
559 {
560
561 uint16_t ngroups;
562 int i;
563
564 myprintf(("ref %d\tuid %d\n", kauth_cred_getrefcnt(cred),
565 kauth_cred_geteuid(cred)));
566
567 ngroups = kauth_cred_ngroups(cred);
568 for (i=0; i < ngroups; i++)
569 myprintf(("\tgroup %d: (%d)\n", i, kauth_cred_group(cred, i)));
570 myprintf(("\n"));
571
572 }
573
574 /*
575 * Utilities used by both client and server
576 * Standard levels:
577 * 0) no debugging
578 * 1) hard failures
579 * 2) soft failures
580 * 3) current test software
581 * 4) main procedure entry points
582 * 5) main procedure exit points
583 * 6) utility procedure entry points
584 * 7) utility procedure exit points
585 * 8) obscure procedure entry points
586 * 9) obscure procedure exit points
587 * 10) random stuff
588 * 11) all <= 1
589 * 12) all <= 2
590 * 13) all <= 3
591 * ...
592 */
593