puffs_subr.c revision 1.33 1 /* $NetBSD: puffs_subr.c,v 1.33 2007/06/21 14:11:34 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2005, 2006 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by the
7 * Google Summer of Code program and the Ulla Tuominen Foundation.
8 * The Google SoC project was mentored by Bill Studenmund.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
20 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: puffs_subr.c,v 1.33 2007/06/21 14:11:34 pooka Exp $");
34
35 #include <sys/param.h>
36 #include <sys/conf.h>
37 #include <sys/hash.h>
38 #include <sys/kauth.h>
39 #include <sys/malloc.h>
40 #include <sys/mount.h>
41 #include <sys/namei.h>
42 #include <sys/poll.h>
43 #include <sys/socketvar.h>
44 #include <sys/vnode.h>
45
46 #include <fs/puffs/puffs_msgif.h>
47 #include <fs/puffs/puffs_sys.h>
48
49 #include <miscfs/genfs/genfs_node.h>
50 #include <miscfs/specfs/specdev.h>
51
52 struct pool puffs_pnpool;
53
54 #ifdef PUFFSDEBUG
55 int puffsdebug;
56 #endif
57
58 static __inline struct puffs_node_hashlist
59 *puffs_cookie2hashlist(struct puffs_mount *, void *);
60 static struct puffs_node *puffs_cookie2pnode(struct puffs_mount *, void *);
61
62 static void puffs_gop_size(struct vnode *, off_t, off_t *, int);
63 static void puffs_gop_markupdate(struct vnode *, int);
64
65 static const struct genfs_ops puffs_genfsops = {
66 .gop_size = puffs_gop_size,
67 .gop_write = genfs_gop_write,
68 .gop_markupdate = puffs_gop_markupdate,
69 #if 0
70 .gop_alloc, should ask userspace
71 #endif
72 };
73
74 /*
75 * Grab a vnode, intialize all the puffs-dependant stuff.
76 */
77 int
78 puffs_getvnode(struct mount *mp, void *cookie, enum vtype type,
79 voff_t vsize, dev_t rdev, struct vnode **vpp)
80 {
81 struct puffs_mount *pmp;
82 struct vnode *vp, *nvp;
83 struct puffs_node *pnode;
84 struct puffs_node_hashlist *plist;
85 int error;
86
87 pmp = MPTOPUFFSMP(mp);
88
89 /*
90 * XXX: there is a deadlock condition between vfs_busy() and
91 * vnode locks. For an unmounting file system the mountpoint
92 * is frozen, but in unmount(FORCE) vflush() wants to access all
93 * of the vnodes. If we are here waiting for the mountpoint
94 * lock while holding on to a vnode lock, well, we ain't
95 * just pining for the fjords anymore. If we release the
96 * vnode lock, we will be in the situation "mount point
97 * is dying" and panic() will ensue in insmntque. So as a
98 * temporary workaround, get a vnode without putting it on
99 * the mount point list, check if mount point is still alive
100 * and kicking and only then add the vnode to the list.
101 */
102 error = getnewvnode(VT_PUFFS, NULL, puffs_vnodeop_p, &vp);
103 if (error)
104 return error;
105 vp->v_vnlock = NULL;
106 vp->v_type = type;
107
108 /*
109 * Check what mount point isn't going away. This will work
110 * until we decide to remove biglock or make the kernel
111 * preemptive. But hopefully the real problem will be fixed
112 * by then.
113 *
114 * XXX: yes, should call vfs_busy(), but thar be rabbits with
115 * vicious streaks a mile wide ...
116 */
117 if (mp->mnt_iflag & IMNT_UNMOUNT) {
118 DPRINTF(("puffs_getvnode: mp %p unmount, unable to create "
119 "vnode for cookie %p\n", mp, cookie));
120 ungetnewvnode(vp);
121 return ENXIO;
122 }
123
124 /* So it's not dead yet.. good.. inform new vnode of its master */
125 simple_lock(&mntvnode_slock);
126 TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes);
127 simple_unlock(&mntvnode_slock);
128 vp->v_mount = mp;
129
130 /*
131 * clerical tasks & footwork
132 */
133
134 /* default size */
135 uvm_vnp_setsize(vp, 0);
136
137 /* dances based on vnode type. almost ufs_vinit(), but not quite */
138 switch (type) {
139 case VCHR:
140 case VBLK:
141 /*
142 * replace vnode operation vector with the specops vector.
143 * our user server has very little control over the node
144 * if it decides its a character or block special file
145 */
146 vp->v_op = puffs_specop_p;
147
148 /* do the standard checkalias-dance */
149 if ((nvp = checkalias(vp, rdev, mp)) != NULL) {
150 /*
151 * found: release & unallocate aliased
152 * old (well, actually, new) node
153 */
154 vp->v_op = spec_vnodeop_p;
155 vp->v_flag &= ~VLOCKSWORK;
156 vrele(vp);
157 vgone(vp); /* cya */
158
159 /* init "new" vnode */
160 vp = nvp;
161 vp->v_vnlock = NULL;
162 vp->v_mount = mp;
163 }
164 break;
165
166 case VFIFO:
167 vp->v_op = puffs_fifoop_p;
168 break;
169
170 case VREG:
171 uvm_vnp_setsize(vp, vsize);
172 break;
173
174 case VDIR:
175 case VLNK:
176 case VSOCK:
177 break;
178 default:
179 #ifdef DIAGNOSTIC
180 panic("puffs_getvnode: invalid vtype %d", type);
181 #endif
182 break;
183 }
184
185 pnode = pool_get(&puffs_pnpool, PR_WAITOK);
186 pnode->pn_cookie = cookie;
187 pnode->pn_stat = 0;
188 pnode->pn_refcount = 1;
189
190 mutex_init(&pnode->pn_mtx, MUTEX_DEFAULT, IPL_NONE);
191 SLIST_INIT(&pnode->pn_sel.sel_klist);
192 pnode->pn_revents = 0;
193
194 plist = puffs_cookie2hashlist(pmp, cookie);
195 LIST_INSERT_HEAD(plist, pnode, pn_hashent);
196 vp->v_data = pnode;
197 vp->v_type = type;
198 pnode->pn_vp = vp;
199
200 genfs_node_init(vp, &puffs_genfsops);
201 *vpp = vp;
202
203 DPRINTF(("new vnode at %p, pnode %p, cookie %p\n", vp,
204 pnode, pnode->pn_cookie));
205
206 return 0;
207 }
208
209 /* new node creating for creative vop ops (create, symlink, mkdir, mknod) */
210 int
211 puffs_newnode(struct mount *mp, struct vnode *dvp, struct vnode **vpp,
212 void *cookie, struct componentname *cnp, enum vtype type, dev_t rdev)
213 {
214 struct puffs_mount *pmp = MPTOPUFFSMP(mp);
215 struct vnode *vp;
216 int error;
217
218 /* userspace probably has this as a NULL op */
219 if (cookie == NULL) {
220 error = EOPNOTSUPP;
221 return error;
222 }
223
224 /*
225 * Check for previous node with the same designation.
226 * Explicitly check the root node cookie, since it might be
227 * reclaimed from the kernel when this check is made.
228 *
229 * XXX: technically this error check should punish the fs,
230 * not the caller.
231 */
232 mutex_enter(&pmp->pmp_lock);
233 if (cookie == pmp->pmp_root_cookie
234 || puffs_cookie2pnode(pmp, cookie) != NULL) {
235 mutex_exit(&pmp->pmp_lock);
236 error = EEXIST;
237 return error;
238 }
239 mutex_exit(&pmp->pmp_lock);
240
241 error = puffs_getvnode(dvp->v_mount, cookie, type, 0, rdev, &vp);
242 if (error)
243 return error;
244
245 vp->v_type = type;
246 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
247 *vpp = vp;
248
249 if ((cnp->cn_flags & MAKEENTRY) && PUFFS_DOCACHE(pmp))
250 cache_enter(dvp, vp, cnp);
251
252 return 0;
253 }
254
255 /*
256 * Release pnode structure which dealing with references to the
257 * puffs_node instead of the vnode. Can't use vref()/vrele() on
258 * the vnode there, since that causes the lovely VOP_INACTIVE(),
259 * which in turn causes the lovely deadlock when called by the one
260 * who is supposed to handle it.
261 */
262 void
263 puffs_releasenode(struct puffs_node *pn)
264 {
265
266 mutex_enter(&pn->pn_mtx);
267 if (--pn->pn_refcount == 0) {
268 mutex_exit(&pn->pn_mtx);
269 mutex_destroy(&pn->pn_mtx);
270 pool_put(&puffs_pnpool, pn);
271 } else {
272 mutex_exit(&pn->pn_mtx);
273 }
274 }
275
276 /*
277 * Add reference to node.
278 * mutex held on entry and return
279 */
280 void
281 puffs_referencenode(struct puffs_node *pn)
282 {
283
284 KASSERT(mutex_owned(&pn->pn_mtx));
285 pn->pn_refcount++;
286 }
287
288 void
289 puffs_putvnode(struct vnode *vp)
290 {
291 struct puffs_mount *pmp;
292 struct puffs_node *pnode;
293
294 pmp = VPTOPUFFSMP(vp);
295 pnode = VPTOPP(vp);
296
297 #ifdef DIAGNOSTIC
298 if (vp->v_tag != VT_PUFFS)
299 panic("puffs_putvnode: %p not a puffs vnode", vp);
300 #endif
301
302 LIST_REMOVE(pnode, pn_hashent);
303 genfs_node_destroy(vp);
304 puffs_releasenode(pnode);
305 vp->v_data = NULL;
306
307 return;
308 }
309
310 static __inline struct puffs_node_hashlist *
311 puffs_cookie2hashlist(struct puffs_mount *pmp, void *cookie)
312 {
313 uint32_t hash;
314
315 hash = hash32_buf(&cookie, sizeof(void *), HASH32_BUF_INIT);
316 return &pmp->pmp_pnodehash[hash % pmp->pmp_npnodehash];
317 }
318
319 /*
320 * Translate cookie to puffs_node. Caller must hold mountpoint
321 * lock and it will be held upon return.
322 */
323 static struct puffs_node *
324 puffs_cookie2pnode(struct puffs_mount *pmp, void *cookie)
325 {
326 struct puffs_node_hashlist *plist;
327 struct puffs_node *pnode;
328
329 plist = puffs_cookie2hashlist(pmp, cookie);
330 LIST_FOREACH(pnode, plist, pn_hashent) {
331 if (pnode->pn_cookie == cookie)
332 break;
333 }
334
335 return pnode;
336 }
337
338 /*
339 * Make sure root vnode exists and reference it. Does NOT lock.
340 */
341 int
342 puffs_makeroot(struct puffs_mount *pmp)
343 {
344 struct vnode *vp;
345 int rv;
346
347 /*
348 * pmp_lock must be held if vref()'ing or vrele()'ing the
349 * root vnode. the latter is controlled by puffs_inactive().
350 *
351 * pmp_root is set here and cleared in puffs_reclaim().
352 */
353 retry:
354 mutex_enter(&pmp->pmp_lock);
355 vp = pmp->pmp_root;
356 if (vp) {
357 simple_lock(&vp->v_interlock);
358 mutex_exit(&pmp->pmp_lock);
359 if (vget(vp, LK_INTERLOCK) == 0)
360 return 0;
361 } else
362 mutex_exit(&pmp->pmp_lock);
363
364 /*
365 * So, didn't have the magic root vnode available.
366 * No matter, grab another an stuff it with the cookie.
367 */
368 if ((rv = puffs_getvnode(pmp->pmp_mp, pmp->pmp_root_cookie,
369 pmp->pmp_root_vtype, pmp->pmp_root_vsize, pmp->pmp_root_rdev, &vp)))
370 return rv;
371
372 /*
373 * Someone magically managed to race us into puffs_getvnode?
374 * Put our previous new vnode back and retry.
375 */
376 mutex_enter(&pmp->pmp_lock);
377 if (pmp->pmp_root) {
378 mutex_exit(&pmp->pmp_lock);
379 puffs_putvnode(vp);
380 goto retry;
381 }
382
383 /* store cache */
384 vp->v_flag = VROOT;
385 pmp->pmp_root = vp;
386 mutex_exit(&pmp->pmp_lock);
387
388 return 0;
389 }
390
391 /*
392 * Locate the in-kernel vnode based on the cookie received given
393 * from userspace. Returns a vnode, if found, NULL otherwise.
394 * The parameter "lock" control whether to lock the possible or
395 * not. Locking always might cause us to lock against ourselves
396 * in situations where we want the vnode but don't care for the
397 * vnode lock, e.g. file server issued putpages.
398 */
399 struct vnode *
400 puffs_pnode2vnode(struct puffs_mount *pmp, void *cookie, int lock)
401 {
402 struct puffs_node *pnode;
403 struct vnode *vp;
404 int vgetflags;
405
406 /*
407 * Handle root in a special manner, since we want to make sure
408 * pmp_root is properly set.
409 */
410 if (cookie == pmp->pmp_root_cookie) {
411 if (puffs_makeroot(pmp))
412 return NULL;
413 if (lock)
414 vn_lock(pmp->pmp_root, LK_EXCLUSIVE | LK_RETRY);
415
416 return pmp->pmp_root;
417 }
418
419 vgetflags = LK_INTERLOCK;
420 if (lock)
421 vgetflags |= LK_EXCLUSIVE | LK_RETRY;
422
423 mutex_enter(&pmp->pmp_lock);
424 pnode = puffs_cookie2pnode(pmp, cookie);
425
426 if (pnode == NULL) {
427 mutex_exit(&pmp->pmp_lock);
428 return NULL;
429 }
430 vp = pnode->pn_vp;
431
432 simple_lock(&vp->v_interlock);
433 mutex_exit(&pmp->pmp_lock);
434
435 if (vget(vp, vgetflags))
436 return NULL;
437
438 return vp;
439 }
440
441 void
442 puffs_makecn(struct puffs_kcn *pkcn, const struct componentname *cn)
443 {
444
445 pkcn->pkcn_nameiop = cn->cn_nameiop;
446 pkcn->pkcn_flags = cn->cn_flags;
447 pkcn->pkcn_pid = cn->cn_lwp->l_proc->p_pid;
448 puffs_credcvt(&pkcn->pkcn_cred, cn->cn_cred);
449
450 (void)memcpy(&pkcn->pkcn_name, cn->cn_nameptr, cn->cn_namelen);
451 pkcn->pkcn_name[cn->cn_namelen] = '\0';
452 pkcn->pkcn_namelen = cn->cn_namelen;
453 }
454
455 /*
456 * Convert given credentials to struct puffs_cred for userspace.
457 */
458 void
459 puffs_credcvt(struct puffs_cred *pcr, const kauth_cred_t cred)
460 {
461
462 memset(pcr, 0, sizeof(struct puffs_cred));
463
464 if (cred == NOCRED || cred == FSCRED) {
465 pcr->pcr_type = PUFFCRED_TYPE_INTERNAL;
466 if (cred == NOCRED)
467 pcr->pcr_internal = PUFFCRED_CRED_NOCRED;
468 if (cred == FSCRED)
469 pcr->pcr_internal = PUFFCRED_CRED_FSCRED;
470 } else {
471 pcr->pcr_type = PUFFCRED_TYPE_UUC;
472 kauth_cred_to_uucred(&pcr->pcr_uuc, cred);
473 }
474 }
475
476 /*
477 * Return pid. In case the operation is coming from within the
478 * kernel without any process context, borrow the swapper's pid.
479 */
480 pid_t
481 puffs_lwp2pid(struct lwp *l)
482 {
483
484 return l ? l->l_proc->p_pid : 0;
485 }
486
487
488 static void
489 puffs_gop_size(struct vnode *vp, off_t size, off_t *eobp,
490 int flags)
491 {
492
493 *eobp = size;
494 }
495
496 static void
497 puffs_gop_markupdate(struct vnode *vp, int flags)
498 {
499 int uflags = 0;
500
501 if (flags & GOP_UPDATE_ACCESSED)
502 uflags |= PUFFS_UPDATEATIME;
503 if (flags & GOP_UPDATE_MODIFIED)
504 uflags |= PUFFS_UPDATEMTIME;
505
506 puffs_updatenode(vp, uflags);
507 }
508
509 void
510 puffs_updatenode(struct vnode *vp, int flags)
511 {
512 struct puffs_node *pn;
513 struct timespec ts;
514
515 if (flags == 0)
516 return;
517
518 pn = VPTOPP(vp);
519 nanotime(&ts);
520
521 if (flags & PUFFS_UPDATEATIME) {
522 pn->pn_mc_atime = ts;
523 pn->pn_stat |= PNODE_METACACHE_ATIME;
524 }
525 if (flags & PUFFS_UPDATECTIME) {
526 pn->pn_mc_ctime = ts;
527 pn->pn_stat |= PNODE_METACACHE_CTIME;
528 }
529 if (flags & PUFFS_UPDATEMTIME) {
530 pn->pn_mc_mtime = ts;
531 pn->pn_stat |= PNODE_METACACHE_MTIME;
532 }
533 if (flags & PUFFS_UPDATESIZE) {
534 pn->pn_mc_size = vp->v_size;
535 pn->pn_stat |= PNODE_METACACHE_SIZE;
536 }
537 }
538
539 void
540 puffs_updatevpsize(struct vnode *vp)
541 {
542 struct vattr va;
543
544 if (VOP_GETATTR(vp, &va, FSCRED, NULL))
545 return;
546
547 if (va.va_size != VNOVAL)
548 vp->v_size = va.va_size;
549 }
550
551 void
552 puffs_parkdone_asyncbioread(struct puffs_req *preq, void *arg)
553 {
554 struct puffs_vnreq_read *read_argp = (void *)preq;
555 struct buf *bp = arg;
556 size_t moved;
557
558 bp->b_error = preq->preq_rv;
559 if (bp->b_error == 0) {
560 moved = bp->b_bcount - read_argp->pvnr_resid;
561 bp->b_resid = read_argp->pvnr_resid;
562
563 memcpy(bp->b_data, read_argp->pvnr_data, moved);
564 } else {
565 bp->b_flags |= B_ERROR;
566 }
567
568 biodone(bp);
569 free(preq, M_PUFFS);
570 }
571
572 void
573 puffs_parkdone_poll(struct puffs_req *preq, void *arg)
574 {
575 struct puffs_vnreq_poll *poll_argp = (void *)preq;
576 struct puffs_node *pn = arg;
577 int revents;
578
579 if (preq->preq_rv == 0)
580 revents = poll_argp->pvnr_events;
581 else
582 revents = POLLERR;
583
584 mutex_enter(&pn->pn_mtx);
585 pn->pn_revents |= revents;
586 mutex_exit(&pn->pn_mtx);
587
588 selnotify(&pn->pn_sel, 0);
589 free(preq, M_PUFFS);
590
591 puffs_releasenode(pn);
592 }
593
594 void
595 puffs_mp_reference(struct puffs_mount *pmp)
596 {
597
598 KASSERT(mutex_owned(&pmp->pmp_lock));
599 pmp->pmp_refcount++;
600 }
601
602 void
603 puffs_mp_release(struct puffs_mount *pmp)
604 {
605
606 KASSERT(mutex_owned(&pmp->pmp_lock));
607 if (--pmp->pmp_refcount == 0)
608 cv_broadcast(&pmp->pmp_refcount_cv);
609 }
610