spec_vnops.c revision 1.212 1 /* $NetBSD: spec_vnops.c,v 1.212 2022/08/12 17:05:49 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. Neither the name of the University nor the names of its contributors
42 * may be used to endorse or promote products derived from this software
43 * without specific prior written permission.
44 *
45 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * SUCH DAMAGE.
56 *
57 * @(#)spec_vnops.c 8.15 (Berkeley) 7/14/95
58 */
59
60 #include <sys/cdefs.h>
61 __KERNEL_RCSID(0, "$NetBSD: spec_vnops.c,v 1.212 2022/08/12 17:05:49 riastradh Exp $");
62
63 #include <sys/param.h>
64 #include <sys/proc.h>
65 #include <sys/systm.h>
66 #include <sys/kernel.h>
67 #include <sys/conf.h>
68 #include <sys/buf.h>
69 #include <sys/mount.h>
70 #include <sys/namei.h>
71 #include <sys/vnode_impl.h>
72 #include <sys/stat.h>
73 #include <sys/errno.h>
74 #include <sys/ioctl.h>
75 #include <sys/poll.h>
76 #include <sys/file.h>
77 #include <sys/disklabel.h>
78 #include <sys/disk.h>
79 #include <sys/lockf.h>
80 #include <sys/tty.h>
81 #include <sys/kauth.h>
82 #include <sys/fstrans.h>
83 #include <sys/module.h>
84 #include <sys/atomic.h>
85
86 #include <miscfs/genfs/genfs.h>
87 #include <miscfs/specfs/specdev.h>
88
89 /*
90 * Lock order:
91 *
92 * vnode lock
93 * -> device_lock
94 * -> struct vnode::v_interlock
95 */
96
97 /* symbolic sleep message strings for devices */
98 const char devopn[] = "devopn";
99 const char devio[] = "devio";
100 const char devwait[] = "devwait";
101 const char devin[] = "devin";
102 const char devout[] = "devout";
103 const char devioc[] = "devioc";
104 const char devcls[] = "devcls";
105
106 #define SPECHSZ 64
107 #if ((SPECHSZ&(SPECHSZ-1)) == 0)
108 #define SPECHASH(rdev) (((rdev>>5)+(rdev))&(SPECHSZ-1))
109 #else
110 #define SPECHASH(rdev) (((unsigned)((rdev>>5)+(rdev)))%SPECHSZ)
111 #endif
112
113 static vnode_t *specfs_hash[SPECHSZ];
114 extern struct mount *dead_rootmount;
115
116 /*
117 * This vnode operations vector is used for special device nodes
118 * created from whole cloth by the kernel. For the ops vector for
119 * vnodes built from special devices found in a filesystem, see (e.g)
120 * ffs_specop_entries[] in ffs_vnops.c or the equivalent for other
121 * filesystems.
122 */
123
124 int (**spec_vnodeop_p)(void *);
125 const struct vnodeopv_entry_desc spec_vnodeop_entries[] = {
126 { &vop_default_desc, vn_default_error },
127 { &vop_parsepath_desc, genfs_parsepath }, /* parsepath */
128 { &vop_lookup_desc, spec_lookup }, /* lookup */
129 { &vop_create_desc, genfs_badop }, /* create */
130 { &vop_mknod_desc, genfs_badop }, /* mknod */
131 { &vop_open_desc, spec_open }, /* open */
132 { &vop_close_desc, spec_close }, /* close */
133 { &vop_access_desc, genfs_ebadf }, /* access */
134 { &vop_accessx_desc, genfs_ebadf }, /* accessx */
135 { &vop_getattr_desc, genfs_ebadf }, /* getattr */
136 { &vop_setattr_desc, genfs_ebadf }, /* setattr */
137 { &vop_read_desc, spec_read }, /* read */
138 { &vop_write_desc, spec_write }, /* write */
139 { &vop_fallocate_desc, genfs_eopnotsupp }, /* fallocate */
140 { &vop_fdiscard_desc, spec_fdiscard }, /* fdiscard */
141 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */
142 { &vop_ioctl_desc, spec_ioctl }, /* ioctl */
143 { &vop_poll_desc, spec_poll }, /* poll */
144 { &vop_kqfilter_desc, spec_kqfilter }, /* kqfilter */
145 { &vop_revoke_desc, genfs_revoke }, /* revoke */
146 { &vop_mmap_desc, spec_mmap }, /* mmap */
147 { &vop_fsync_desc, spec_fsync }, /* fsync */
148 { &vop_seek_desc, spec_seek }, /* seek */
149 { &vop_remove_desc, genfs_badop }, /* remove */
150 { &vop_link_desc, genfs_badop }, /* link */
151 { &vop_rename_desc, genfs_badop }, /* rename */
152 { &vop_mkdir_desc, genfs_badop }, /* mkdir */
153 { &vop_rmdir_desc, genfs_badop }, /* rmdir */
154 { &vop_symlink_desc, genfs_badop }, /* symlink */
155 { &vop_readdir_desc, genfs_badop }, /* readdir */
156 { &vop_readlink_desc, genfs_badop }, /* readlink */
157 { &vop_abortop_desc, genfs_badop }, /* abortop */
158 { &vop_inactive_desc, spec_inactive }, /* inactive */
159 { &vop_reclaim_desc, spec_reclaim }, /* reclaim */
160 { &vop_lock_desc, genfs_lock }, /* lock */
161 { &vop_unlock_desc, genfs_unlock }, /* unlock */
162 { &vop_bmap_desc, spec_bmap }, /* bmap */
163 { &vop_strategy_desc, spec_strategy }, /* strategy */
164 { &vop_print_desc, spec_print }, /* print */
165 { &vop_islocked_desc, genfs_islocked }, /* islocked */
166 { &vop_pathconf_desc, spec_pathconf }, /* pathconf */
167 { &vop_advlock_desc, spec_advlock }, /* advlock */
168 { &vop_bwrite_desc, vn_bwrite }, /* bwrite */
169 { &vop_getpages_desc, genfs_getpages }, /* getpages */
170 { &vop_putpages_desc, genfs_putpages }, /* putpages */
171 { NULL, NULL }
172 };
173 const struct vnodeopv_desc spec_vnodeop_opv_desc =
174 { &spec_vnodeop_p, spec_vnodeop_entries };
175
176 static kauth_listener_t rawio_listener;
177 static struct kcondvar specfs_iocv;
178
179 /* Returns true if vnode is /dev/mem or /dev/kmem. */
180 bool
181 iskmemvp(struct vnode *vp)
182 {
183 return ((vp->v_type == VCHR) && iskmemdev(vp->v_rdev));
184 }
185
186 /*
187 * Returns true if dev is /dev/mem or /dev/kmem.
188 */
189 int
190 iskmemdev(dev_t dev)
191 {
192 /* mem_no is emitted by config(8) to generated devsw.c */
193 extern const int mem_no;
194
195 /* minor 14 is /dev/io on i386 with COMPAT_10 */
196 return (major(dev) == mem_no && (minor(dev) < 2 || minor(dev) == 14));
197 }
198
199 static int
200 rawio_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie,
201 void *arg0, void *arg1, void *arg2, void *arg3)
202 {
203 int result;
204
205 result = KAUTH_RESULT_DEFER;
206
207 if ((action != KAUTH_DEVICE_RAWIO_SPEC) &&
208 (action != KAUTH_DEVICE_RAWIO_PASSTHRU))
209 return result;
210
211 /* Access is mandated by permissions. */
212 result = KAUTH_RESULT_ALLOW;
213
214 return result;
215 }
216
217 void
218 spec_init(void)
219 {
220
221 rawio_listener = kauth_listen_scope(KAUTH_SCOPE_DEVICE,
222 rawio_listener_cb, NULL);
223 cv_init(&specfs_iocv, "specio");
224 }
225
226 /*
227 * spec_io_enter(vp, &sn, &dev)
228 *
229 * Enter an operation that may not hold vp's vnode lock or an
230 * fstrans on vp's mount. Until spec_io_exit, the vnode will not
231 * be revoked.
232 *
233 * On success, set sn to the specnode pointer and dev to the dev_t
234 * number and return zero. Caller must later call spec_io_exit
235 * when done.
236 *
237 * On failure, return ENXIO -- the device has been revoked and no
238 * longer exists.
239 */
240 static int
241 spec_io_enter(struct vnode *vp, struct specnode **snp, dev_t *devp)
242 {
243 dev_t dev;
244 struct specnode *sn;
245 unsigned iocnt;
246 int error = 0;
247
248 mutex_enter(vp->v_interlock);
249
250 /*
251 * Extract all the info we need from the vnode, unless the
252 * vnode has already been reclaimed. This can happen if the
253 * underlying device has been removed and all the device nodes
254 * for it have been revoked. The caller may not hold a vnode
255 * lock or fstrans to prevent this from happening before it has
256 * had an opportunity to notice the vnode is dead.
257 */
258 if (vdead_check(vp, VDEAD_NOWAIT) != 0 ||
259 (sn = vp->v_specnode) == NULL ||
260 (dev = vp->v_rdev) == NODEV) {
261 error = ENXIO;
262 goto out;
263 }
264
265 /*
266 * Notify spec_close that we are doing an I/O operation which
267 * may not be not bracketed by fstrans(9) and thus is not
268 * blocked by vfs suspension.
269 *
270 * We could hold this reference with psref(9) instead, but we
271 * already have to take the interlock for vdead_check, so
272 * there's not much more cost here to another atomic operation.
273 */
274 do {
275 iocnt = atomic_load_relaxed(&sn->sn_dev->sd_iocnt);
276 if (__predict_false(iocnt == UINT_MAX)) {
277 /*
278 * The I/O count is limited by the number of
279 * LWPs (which will never overflow this) --
280 * unless one driver uses another driver via
281 * specfs, which is rather unusual, but which
282 * could happen via pud(4) userspace drivers.
283 * We could use a 64-bit count, but can't use
284 * atomics for that on all platforms.
285 * (Probably better to switch to psref or
286 * localcount instead.)
287 */
288 error = EBUSY;
289 goto out;
290 }
291 } while (atomic_cas_uint(&sn->sn_dev->sd_iocnt, iocnt, iocnt + 1)
292 != iocnt);
293
294 /* Success! */
295 *snp = sn;
296 *devp = dev;
297 error = 0;
298
299 out: mutex_exit(vp->v_interlock);
300 return error;
301 }
302
303 /*
304 * spec_io_exit(vp, sn)
305 *
306 * Exit an operation entered with a successful spec_io_enter --
307 * allow concurrent spec_node_revoke to proceed. The argument sn
308 * must match the struct specnode pointer returned by spec_io_exit
309 * for vp.
310 */
311 static void
312 spec_io_exit(struct vnode *vp, struct specnode *sn)
313 {
314 struct specdev *sd = sn->sn_dev;
315 unsigned iocnt;
316
317 KASSERT(vp->v_specnode == sn);
318
319 /*
320 * We are done. Notify spec_close if appropriate. The
321 * transition of 1 -> 0 must happen under device_lock so
322 * spec_close doesn't miss a wakeup.
323 */
324 do {
325 iocnt = atomic_load_relaxed(&sd->sd_iocnt);
326 KASSERT(iocnt > 0);
327 if (iocnt == 1) {
328 mutex_enter(&device_lock);
329 if (atomic_dec_uint_nv(&sd->sd_iocnt) == 0)
330 cv_broadcast(&specfs_iocv);
331 mutex_exit(&device_lock);
332 break;
333 }
334 } while (atomic_cas_uint(&sd->sd_iocnt, iocnt, iocnt - 1) != iocnt);
335 }
336
337 /*
338 * spec_io_drain(sd)
339 *
340 * Wait for all existing spec_io_enter/exit sections to complete.
341 * Caller must ensure spec_io_enter will fail at this point.
342 */
343 static void
344 spec_io_drain(struct specdev *sd)
345 {
346
347 /*
348 * I/O at the same time as closing is unlikely -- it often
349 * indicates an application bug.
350 */
351 if (__predict_true(atomic_load_relaxed(&sd->sd_iocnt) == 0))
352 return;
353
354 mutex_enter(&device_lock);
355 while (atomic_load_relaxed(&sd->sd_iocnt) > 0)
356 cv_wait(&specfs_iocv, &device_lock);
357 mutex_exit(&device_lock);
358 }
359
360 /*
361 * Initialize a vnode that represents a device.
362 */
363 void
364 spec_node_init(vnode_t *vp, dev_t rdev)
365 {
366 specnode_t *sn;
367 specdev_t *sd;
368 vnode_t *vp2;
369 vnode_t **vpp;
370
371 KASSERT(vp->v_type == VBLK || vp->v_type == VCHR);
372 KASSERT(vp->v_specnode == NULL);
373
374 /*
375 * Search the hash table for this device. If known, add a
376 * reference to the device structure. If not known, create
377 * a new entry to represent the device. In all cases add
378 * the vnode to the hash table.
379 */
380 sn = kmem_alloc(sizeof(*sn), KM_SLEEP);
381 sd = kmem_alloc(sizeof(*sd), KM_SLEEP);
382 mutex_enter(&device_lock);
383 vpp = &specfs_hash[SPECHASH(rdev)];
384 for (vp2 = *vpp; vp2 != NULL; vp2 = vp2->v_specnext) {
385 KASSERT(vp2->v_specnode != NULL);
386 if (rdev == vp2->v_rdev && vp->v_type == vp2->v_type) {
387 break;
388 }
389 }
390 if (vp2 == NULL) {
391 /* No existing record, create a new one. */
392 sd->sd_rdev = rdev;
393 sd->sd_mountpoint = NULL;
394 sd->sd_lockf = NULL;
395 sd->sd_refcnt = 1;
396 sd->sd_opencnt = 0;
397 sd->sd_bdevvp = NULL;
398 sd->sd_iocnt = 0;
399 sd->sd_opened = false;
400 sd->sd_closing = false;
401 sn->sn_dev = sd;
402 sd = NULL;
403 } else {
404 /* Use the existing record. */
405 sn->sn_dev = vp2->v_specnode->sn_dev;
406 sn->sn_dev->sd_refcnt++;
407 }
408 /* Insert vnode into the hash chain. */
409 sn->sn_opencnt = 0;
410 sn->sn_rdev = rdev;
411 sn->sn_gone = false;
412 vp->v_specnode = sn;
413 vp->v_specnext = *vpp;
414 *vpp = vp;
415 mutex_exit(&device_lock);
416
417 /* Free the record we allocated if unused. */
418 if (sd != NULL) {
419 kmem_free(sd, sizeof(*sd));
420 }
421 }
422
423 /*
424 * Lookup a vnode by device number and return it referenced.
425 */
426 int
427 spec_node_lookup_by_dev(enum vtype type, dev_t dev, int flags, vnode_t **vpp)
428 {
429 int error;
430 vnode_t *vp;
431
432 top: mutex_enter(&device_lock);
433 for (vp = specfs_hash[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
434 if (type == vp->v_type && dev == vp->v_rdev) {
435 mutex_enter(vp->v_interlock);
436 /* If clean or being cleaned, then ignore it. */
437 if (vdead_check(vp, VDEAD_NOWAIT) == 0)
438 break;
439 if ((flags & VDEAD_NOWAIT) == 0) {
440 mutex_exit(&device_lock);
441 /*
442 * It may be being revoked as we speak,
443 * and the caller wants to wait until
444 * all revocation has completed. Let
445 * vcache_vget wait for it to finish
446 * dying; as a side effect, vcache_vget
447 * releases vp->v_interlock. Note that
448 * vcache_vget cannot succeed at this
449 * point because vdead_check already
450 * failed.
451 */
452 error = vcache_vget(vp);
453 KASSERT(error);
454 goto top;
455 }
456 mutex_exit(vp->v_interlock);
457 }
458 }
459 KASSERT(vp == NULL || mutex_owned(vp->v_interlock));
460 if (vp == NULL) {
461 mutex_exit(&device_lock);
462 return ENOENT;
463 }
464 /*
465 * If it is an opened block device return the opened vnode.
466 */
467 if (type == VBLK && vp->v_specnode->sn_dev->sd_bdevvp != NULL) {
468 mutex_exit(vp->v_interlock);
469 vp = vp->v_specnode->sn_dev->sd_bdevvp;
470 mutex_enter(vp->v_interlock);
471 }
472 mutex_exit(&device_lock);
473 error = vcache_vget(vp);
474 if (error != 0)
475 return error;
476 *vpp = vp;
477
478 return 0;
479 }
480
481 /*
482 * Lookup a vnode by file system mounted on and return it referenced.
483 */
484 int
485 spec_node_lookup_by_mount(struct mount *mp, vnode_t **vpp)
486 {
487 int i, error;
488 vnode_t *vp, *vq;
489
490 mutex_enter(&device_lock);
491 for (i = 0, vq = NULL; i < SPECHSZ && vq == NULL; i++) {
492 for (vp = specfs_hash[i]; vp; vp = vp->v_specnext) {
493 if (vp->v_type != VBLK)
494 continue;
495 vq = vp->v_specnode->sn_dev->sd_bdevvp;
496 if (vq != NULL &&
497 vq->v_specnode->sn_dev->sd_mountpoint == mp)
498 break;
499 vq = NULL;
500 }
501 }
502 if (vq == NULL) {
503 mutex_exit(&device_lock);
504 return ENOENT;
505 }
506 mutex_enter(vq->v_interlock);
507 mutex_exit(&device_lock);
508 error = vcache_vget(vq);
509 if (error != 0)
510 return error;
511 *vpp = vq;
512
513 return 0;
514
515 }
516
517 /*
518 * Get the file system mounted on this block device.
519 *
520 * XXX Caller should hold the vnode lock -- shared or exclusive -- so
521 * that this can't changed, and the vnode can't be revoked while we
522 * examine it. But not all callers do, and they're scattered through a
523 * lot of file systems, so we can't assert this yet.
524 */
525 struct mount *
526 spec_node_getmountedfs(vnode_t *devvp)
527 {
528 struct mount *mp;
529
530 KASSERT(devvp->v_type == VBLK);
531 mp = devvp->v_specnode->sn_dev->sd_mountpoint;
532
533 return mp;
534 }
535
536 /*
537 * Set the file system mounted on this block device.
538 *
539 * XXX Caller should hold the vnode lock exclusively so this can't be
540 * changed or assumed by spec_node_getmountedfs while we change it, and
541 * the vnode can't be revoked while we handle it. But not all callers
542 * do, and they're scattered through a lot of file systems, so we can't
543 * assert this yet. Instead, for now, we'll take an I/O reference so
544 * at least the ioctl doesn't race with revoke/detach.
545 *
546 * If you do change this to assert an exclusive vnode lock, you must
547 * also do vdead_check before trying bdev_ioctl, because the vnode may
548 * have been revoked by the time the caller locked it, and this is
549 * _not_ a vop -- calls to spec_node_setmountedfs don't go through
550 * v_op, so revoking the vnode doesn't prevent further calls.
551 *
552 * XXX Caller should additionally have the vnode open, at least if mp
553 * is nonnull, but I'm not sure all callers do that -- need to audit.
554 * Currently udf closes the vnode before clearing the mount.
555 */
556 void
557 spec_node_setmountedfs(vnode_t *devvp, struct mount *mp)
558 {
559 struct dkwedge_info dkw;
560 struct specnode *sn;
561 dev_t dev;
562 int error;
563
564 KASSERT(devvp->v_type == VBLK);
565
566 error = spec_io_enter(devvp, &sn, &dev);
567 if (error)
568 return;
569
570 KASSERT(sn->sn_dev->sd_mountpoint == NULL || mp == NULL);
571 sn->sn_dev->sd_mountpoint = mp;
572 if (mp == NULL)
573 goto out;
574
575 error = bdev_ioctl(dev, DIOCGWEDGEINFO, &dkw, FREAD, curlwp);
576 if (error)
577 goto out;
578
579 strlcpy(mp->mnt_stat.f_mntfromlabel, dkw.dkw_wname,
580 sizeof(mp->mnt_stat.f_mntfromlabel));
581
582 out: spec_io_exit(devvp, sn);
583 }
584
585 /*
586 * A vnode representing a special device is going away. Close
587 * the device if the vnode holds it open.
588 */
589 void
590 spec_node_revoke(vnode_t *vp)
591 {
592 specnode_t *sn;
593 specdev_t *sd;
594 struct vnode **vpp;
595
596 KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
597
598 sn = vp->v_specnode;
599 sd = sn->sn_dev;
600
601 KASSERT(vp->v_type == VBLK || vp->v_type == VCHR);
602 KASSERT(vp->v_specnode != NULL);
603 KASSERT(sn->sn_gone == false);
604
605 mutex_enter(&device_lock);
606 KASSERTMSG(sn->sn_opencnt <= sd->sd_opencnt,
607 "sn_opencnt=%u > sd_opencnt=%u",
608 sn->sn_opencnt, sd->sd_opencnt);
609 sn->sn_gone = true;
610 if (sn->sn_opencnt != 0) {
611 sd->sd_opencnt -= (sn->sn_opencnt - 1);
612 sn->sn_opencnt = 1;
613 mutex_exit(&device_lock);
614
615 VOP_CLOSE(vp, FNONBLOCK, NOCRED);
616
617 mutex_enter(&device_lock);
618 KASSERT(sn->sn_opencnt == 0);
619 }
620
621 /*
622 * We may have revoked the vnode in this thread while another
623 * thread was in the middle of spec_close, in the window when
624 * spec_close releases the vnode lock to call .d_close for the
625 * last close. In that case, wait for the concurrent
626 * spec_close to complete.
627 */
628 while (sd->sd_closing)
629 cv_wait(&specfs_iocv, &device_lock);
630
631 /*
632 * Remove from the hash so lookups stop returning this
633 * specnode. We will dissociate it from the specdev -- and
634 * possibly free the specdev -- in spec_node_destroy.
635 */
636 KASSERT(sn->sn_gone);
637 KASSERT(sn->sn_opencnt == 0);
638 for (vpp = &specfs_hash[SPECHASH(vp->v_rdev)];;
639 vpp = &(*vpp)->v_specnext) {
640 if (*vpp == vp) {
641 *vpp = vp->v_specnext;
642 vp->v_specnext = NULL;
643 break;
644 }
645 }
646 mutex_exit(&device_lock);
647 }
648
649 /*
650 * A vnode representing a special device is being recycled.
651 * Destroy the specfs component.
652 */
653 void
654 spec_node_destroy(vnode_t *vp)
655 {
656 specnode_t *sn;
657 specdev_t *sd;
658 int refcnt;
659
660 sn = vp->v_specnode;
661 sd = sn->sn_dev;
662
663 KASSERT(vp->v_type == VBLK || vp->v_type == VCHR);
664 KASSERT(vp->v_specnode != NULL);
665 KASSERT(sn->sn_opencnt == 0);
666
667 mutex_enter(&device_lock);
668 sn = vp->v_specnode;
669 vp->v_specnode = NULL;
670 refcnt = sd->sd_refcnt--;
671 KASSERT(refcnt > 0);
672 mutex_exit(&device_lock);
673
674 /* If the device is no longer in use, destroy our record. */
675 if (refcnt == 1) {
676 KASSERT(sd->sd_iocnt == 0);
677 KASSERT(sd->sd_opencnt == 0);
678 KASSERT(sd->sd_bdevvp == NULL);
679 kmem_free(sd, sizeof(*sd));
680 }
681 kmem_free(sn, sizeof(*sn));
682 }
683
684 /*
685 * Trivial lookup routine that always fails.
686 */
687 int
688 spec_lookup(void *v)
689 {
690 struct vop_lookup_v2_args /* {
691 struct vnode *a_dvp;
692 struct vnode **a_vpp;
693 struct componentname *a_cnp;
694 } */ *ap = v;
695
696 *ap->a_vpp = NULL;
697 return (ENOTDIR);
698 }
699
700 typedef int (*spec_ioctl_t)(dev_t, u_long, void *, int, struct lwp *);
701
702 /*
703 * Open a special file.
704 */
705 /* ARGSUSED */
706 int
707 spec_open(void *v)
708 {
709 struct vop_open_args /* {
710 struct vnode *a_vp;
711 int a_mode;
712 kauth_cred_t a_cred;
713 } */ *ap = v;
714 struct lwp *l = curlwp;
715 struct vnode *vp = ap->a_vp;
716 dev_t dev, dev1;
717 int error;
718 enum kauth_device_req req;
719 specnode_t *sn, *sn1;
720 specdev_t *sd;
721 spec_ioctl_t ioctl;
722 u_int gen = 0;
723 const char *name = NULL;
724 bool needclose = false;
725 struct partinfo pi;
726
727 KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
728 KASSERTMSG(vp->v_type == VBLK || vp->v_type == VCHR, "type=%d",
729 vp->v_type);
730
731 dev = vp->v_rdev;
732 sn = vp->v_specnode;
733 sd = sn->sn_dev;
734
735 /*
736 * Don't allow open if fs is mounted -nodev.
737 */
738 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV))
739 return (ENXIO);
740
741 switch (ap->a_mode & (FREAD | FWRITE)) {
742 case FREAD | FWRITE:
743 req = KAUTH_REQ_DEVICE_RAWIO_SPEC_RW;
744 break;
745 case FWRITE:
746 req = KAUTH_REQ_DEVICE_RAWIO_SPEC_WRITE;
747 break;
748 default:
749 req = KAUTH_REQ_DEVICE_RAWIO_SPEC_READ;
750 break;
751 }
752 error = kauth_authorize_device_spec(ap->a_cred, req, vp);
753 if (error != 0)
754 return (error);
755
756 /*
757 * Acquire an open reference -- as long as we hold onto it, and
758 * the vnode isn't revoked, it can't be closed, and the vnode
759 * can't be revoked until we release the vnode lock.
760 */
761 mutex_enter(&device_lock);
762 KASSERT(!sn->sn_gone);
763 switch (vp->v_type) {
764 case VCHR:
765 /*
766 * Character devices can accept opens from multiple
767 * vnodes. But first, wait for any close to finish.
768 * Wait under the vnode lock so we don't have to worry
769 * about the vnode being revoked while we wait.
770 */
771 while (sd->sd_closing) {
772 error = cv_wait_sig(&specfs_iocv, &device_lock);
773 if (error)
774 break;
775 }
776 if (error)
777 break;
778 sd->sd_opencnt++;
779 sn->sn_opencnt++;
780 KASSERTMSG(sn->sn_opencnt <= sd->sd_opencnt,
781 "sn_opencnt=%u > sd_opencnt=%u",
782 sn->sn_opencnt, sd->sd_opencnt);
783 break;
784 case VBLK:
785 /*
786 * For block devices, permit only one open. The buffer
787 * cache cannot remain self-consistent with multiple
788 * vnodes holding a block device open.
789 *
790 * Treat zero opencnt with non-NULL mountpoint as open.
791 * This may happen after forced detach of a mounted device.
792 */
793 if (sd->sd_opencnt != 0 || sd->sd_mountpoint != NULL) {
794 error = EBUSY;
795 break;
796 }
797 KASSERTMSG(sn->sn_opencnt == 0, "sn_opencnt=%u",
798 sn->sn_opencnt);
799 sn->sn_opencnt = 1;
800 sd->sd_opencnt = 1;
801 sd->sd_bdevvp = vp;
802 break;
803 default:
804 panic("invalid specfs vnode type: %d", vp->v_type);
805 }
806 mutex_exit(&device_lock);
807 if (error)
808 return error;
809
810 /*
811 * Set VV_ISTTY if this is a tty cdev.
812 *
813 * XXX This does the wrong thing if the module has to be
814 * autoloaded. We should maybe set this after autoloading
815 * modules and calling .d_open successfully, except (a) we need
816 * the vnode lock to touch it, and (b) once we acquire the
817 * vnode lock again, the vnode may have been revoked, and
818 * deadfs's dead_read needs VV_ISTTY to be already set in order
819 * to return the right answer. So this needs some additional
820 * synchronization to be made to work correctly with tty driver
821 * module autoload. For now, let's just hope it doesn't cause
822 * too much trouble for a tty from an autoloaded driver module
823 * to fail with EIO instead of returning EOF.
824 */
825 if (vp->v_type == VCHR) {
826 if (cdev_type(dev) == D_TTY)
827 vp->v_vflag |= VV_ISTTY;
828 }
829
830 /*
831 * Because opening the device may block indefinitely, e.g. when
832 * opening a tty, and loading a module may cross into many
833 * other subsystems, we must not hold the vnode lock while
834 * calling .d_open, so release it now and reacquire it when
835 * done.
836 *
837 * Take an I/O reference so that any concurrent spec_close via
838 * spec_node_revoke will wait for us to finish calling .d_open.
839 * The vnode can't be dead at this point because we have it
840 * locked. Note that if revoked, the driver must interrupt
841 * .d_open before spec_close starts waiting for I/O to drain so
842 * this doesn't deadlock.
843 */
844 VOP_UNLOCK(vp);
845 error = spec_io_enter(vp, &sn1, &dev1);
846 if (error) {
847 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
848 return error;
849 }
850 KASSERT(sn1 == sn);
851 KASSERT(dev1 == dev);
852
853 /*
854 * Open the device. If .d_open returns ENXIO (device not
855 * configured), the driver may not be loaded, so try
856 * autoloading a module and then try .d_open again if anything
857 * got loaded.
858 */
859 switch (vp->v_type) {
860 case VCHR:
861 do {
862 const struct cdevsw *cdev;
863
864 gen = module_gen;
865 error = cdev_open(dev, ap->a_mode, S_IFCHR, l);
866 if (error != ENXIO)
867 break;
868
869 /* Check if we already have a valid driver */
870 mutex_enter(&device_lock);
871 cdev = cdevsw_lookup(dev);
872 mutex_exit(&device_lock);
873 if (cdev != NULL)
874 break;
875
876 /* Get device name from devsw_conv array */
877 if ((name = cdevsw_getname(major(dev))) == NULL)
878 break;
879
880 /* Try to autoload device module */
881 (void) module_autoload(name, MODULE_CLASS_DRIVER);
882 } while (gen != module_gen);
883 break;
884
885 case VBLK:
886 do {
887 const struct bdevsw *bdev;
888
889 gen = module_gen;
890 error = bdev_open(dev, ap->a_mode, S_IFBLK, l);
891 if (error != ENXIO)
892 break;
893
894 /* Check if we already have a valid driver */
895 mutex_enter(&device_lock);
896 bdev = bdevsw_lookup(dev);
897 mutex_exit(&device_lock);
898 if (bdev != NULL)
899 break;
900
901 /* Get device name from devsw_conv array */
902 if ((name = bdevsw_getname(major(dev))) == NULL)
903 break;
904
905 /* Try to autoload device module */
906 (void) module_autoload(name, MODULE_CLASS_DRIVER);
907 } while (gen != module_gen);
908 break;
909
910 default:
911 __unreachable();
912 }
913
914 /*
915 * Release the I/O reference now that we have called .d_open,
916 * and reacquire the vnode lock. At this point, the device may
917 * have been revoked, so we must tread carefully. However, sn
918 * and sd remain valid pointers until we drop our reference.
919 */
920 spec_io_exit(vp, sn);
921 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
922 KASSERT(vp->v_specnode == sn);
923
924 /*
925 * If it has been revoked since we released the vnode lock and
926 * reacquired it, then spec_node_revoke has closed it, and we
927 * must fail with EBADF.
928 *
929 * Otherwise, if opening it failed, back out and release the
930 * open reference. If it was ever successfully opened and we
931 * got the last reference this way, it's now our job to close
932 * it. This might happen in the following scenario:
933 *
934 * Thread 1 Thread 2
935 * VOP_OPEN
936 * ...
937 * .d_open -> 0 (success)
938 * acquire vnode lock
939 * do stuff VOP_OPEN
940 * release vnode lock ...
941 * .d_open -> EBUSY
942 * VOP_CLOSE
943 * acquire vnode lock
944 * --sd_opencnt != 0
945 * => no .d_close
946 * release vnode lock
947 * acquire vnode lock
948 * --sd_opencnt == 0
949 *
950 * We can't resolve this by making spec_close wait for .d_open
951 * to complete before examining sd_opencnt, because .d_open can
952 * hang indefinitely, e.g. for a tty.
953 */
954 mutex_enter(&device_lock);
955 if (sn->sn_gone) {
956 if (error == 0)
957 error = EBADF;
958 } else if (error == 0) {
959 /*
960 * Device has not been revoked, so our opencnt can't
961 * have gone away at this point -- transition to
962 * sn_gone=true happens before transition to
963 * sn_opencnt=0 in spec_node_revoke.
964 */
965 KASSERT(sd->sd_opencnt);
966 KASSERT(sn->sn_opencnt);
967 KASSERTMSG(sn->sn_opencnt <= sd->sd_opencnt,
968 "sn_opencnt=%u > sd_opencnt=%u",
969 sn->sn_opencnt, sd->sd_opencnt);
970 sd->sd_opened = true;
971 } else if (sd->sd_opencnt == 1 && sd->sd_opened) {
972 /*
973 * We're the last reference to a _previous_ open even
974 * though this one failed, so we have to close it.
975 * Don't decrement the reference count here --
976 * spec_close will do that.
977 */
978 KASSERT(sn->sn_opencnt == 1);
979 needclose = true;
980 } else {
981 KASSERT(sd->sd_opencnt);
982 KASSERT(sn->sn_opencnt);
983 KASSERTMSG(sn->sn_opencnt <= sd->sd_opencnt,
984 "sn_opencnt=%u > sd_opencnt=%u",
985 sn->sn_opencnt, sd->sd_opencnt);
986 sd->sd_opencnt--;
987 sn->sn_opencnt--;
988 if (vp->v_type == VBLK)
989 sd->sd_bdevvp = NULL;
990 }
991 mutex_exit(&device_lock);
992
993 /*
994 * If this open failed, but the device was previously opened,
995 * and another thread concurrently closed the vnode while we
996 * were in the middle of reopening it, the other thread will
997 * see sd_opencnt > 0 and thus decide not to call .d_close --
998 * it is now our responsibility to do so.
999 *
1000 * XXX The flags passed to VOP_CLOSE here are wrong, but
1001 * drivers can't rely on FREAD|FWRITE anyway -- e.g., consider
1002 * a device opened by thread 0 with O_READ, then opened by
1003 * thread 1 with O_WRITE, then closed by thread 0, and finally
1004 * closed by thread 1; the last .d_close call will have FWRITE
1005 * but not FREAD. We should just eliminate the FREAD/FWRITE
1006 * parameter to .d_close altogether.
1007 */
1008 if (needclose) {
1009 KASSERT(error);
1010 VOP_CLOSE(vp, FNONBLOCK, NOCRED);
1011 }
1012
1013 /* If anything went wrong, we're done. */
1014 if (error)
1015 return error;
1016
1017 /*
1018 * For disk devices, automagically set the vnode size to the
1019 * partition size, if we can. This applies to block devices
1020 * and character devices alike -- every block device must have
1021 * a corresponding character device. And if the module is
1022 * loaded it will remain loaded until we're done here (it is
1023 * forbidden to devsw_detach until closed). So it is safe to
1024 * query cdev_type unconditionally here.
1025 */
1026 if (cdev_type(dev) == D_DISK) {
1027 ioctl = vp->v_type == VCHR ? cdev_ioctl : bdev_ioctl;
1028 if ((*ioctl)(dev, DIOCGPARTINFO, &pi, FREAD, curlwp) == 0)
1029 uvm_vnp_setsize(vp,
1030 (voff_t)pi.pi_secsize * pi.pi_size);
1031 }
1032
1033 /* Success! */
1034 return 0;
1035 }
1036
1037 /*
1038 * Vnode op for read
1039 */
1040 /* ARGSUSED */
1041 int
1042 spec_read(void *v)
1043 {
1044 struct vop_read_args /* {
1045 struct vnode *a_vp;
1046 struct uio *a_uio;
1047 int a_ioflag;
1048 kauth_cred_t a_cred;
1049 } */ *ap = v;
1050 struct vnode *vp = ap->a_vp;
1051 struct uio *uio = ap->a_uio;
1052 struct lwp *l = curlwp;
1053 struct specnode *sn;
1054 dev_t dev;
1055 struct buf *bp;
1056 daddr_t bn;
1057 int bsize, bscale;
1058 struct partinfo pi;
1059 int n, on;
1060 int error = 0;
1061 int i, nra;
1062 daddr_t lastbn, *rablks;
1063 int *rasizes;
1064 int nrablks, ratogo;
1065
1066 KASSERT(uio->uio_rw == UIO_READ);
1067 KASSERTMSG(VMSPACE_IS_KERNEL_P(uio->uio_vmspace) ||
1068 uio->uio_vmspace == curproc->p_vmspace,
1069 "vmspace belongs to neither kernel nor curproc");
1070
1071 if (uio->uio_resid == 0)
1072 return (0);
1073
1074 switch (vp->v_type) {
1075
1076 case VCHR:
1077 /*
1078 * Release the lock while we sleep -- possibly
1079 * indefinitely, if this is, e.g., a tty -- in
1080 * cdev_read, so we don't hold up everything else that
1081 * might want access to the vnode.
1082 *
1083 * But before we issue the read, take an I/O reference
1084 * to the specnode so close will know when we're done
1085 * reading. Note that the moment we release the lock,
1086 * the vnode's identity may change; hence spec_io_enter
1087 * may fail, and the caller may have a dead vnode on
1088 * their hands, if the file system on which vp lived
1089 * has been unmounted.
1090 */
1091 VOP_UNLOCK(vp);
1092 error = spec_io_enter(vp, &sn, &dev);
1093 if (error)
1094 goto out;
1095 error = cdev_read(dev, uio, ap->a_ioflag);
1096 spec_io_exit(vp, sn);
1097 out: vn_lock(vp, LK_SHARED | LK_RETRY);
1098 return (error);
1099
1100 case VBLK:
1101 KASSERT(vp == vp->v_specnode->sn_dev->sd_bdevvp);
1102 if (uio->uio_offset < 0)
1103 return (EINVAL);
1104
1105 if (bdev_ioctl(vp->v_rdev, DIOCGPARTINFO, &pi, FREAD, l) == 0)
1106 bsize = imin(imax(pi.pi_bsize, DEV_BSIZE), MAXBSIZE);
1107 else
1108 bsize = BLKDEV_IOSIZE;
1109
1110 bscale = bsize >> DEV_BSHIFT;
1111
1112 nra = uimax(16 * MAXPHYS / bsize - 1, 511);
1113 rablks = kmem_alloc(nra * sizeof(*rablks), KM_SLEEP);
1114 rasizes = kmem_alloc(nra * sizeof(*rasizes), KM_SLEEP);
1115 lastbn = ((uio->uio_offset + uio->uio_resid - 1) >> DEV_BSHIFT)
1116 &~ (bscale - 1);
1117 nrablks = ratogo = 0;
1118 do {
1119 bn = (uio->uio_offset >> DEV_BSHIFT) &~ (bscale - 1);
1120 on = uio->uio_offset % bsize;
1121 n = uimin((unsigned)(bsize - on), uio->uio_resid);
1122
1123 if (ratogo == 0) {
1124 nrablks = uimin((lastbn - bn) / bscale, nra);
1125 ratogo = nrablks;
1126
1127 for (i = 0; i < nrablks; ++i) {
1128 rablks[i] = bn + (i+1) * bscale;
1129 rasizes[i] = bsize;
1130 }
1131
1132 error = breadn(vp, bn, bsize,
1133 rablks, rasizes, nrablks,
1134 0, &bp);
1135 } else {
1136 if (ratogo > 0)
1137 --ratogo;
1138 error = bread(vp, bn, bsize, 0, &bp);
1139 }
1140 if (error)
1141 break;
1142 n = uimin(n, bsize - bp->b_resid);
1143 error = uiomove((char *)bp->b_data + on, n, uio);
1144 brelse(bp, 0);
1145 } while (error == 0 && uio->uio_resid > 0 && n != 0);
1146
1147 kmem_free(rablks, nra * sizeof(*rablks));
1148 kmem_free(rasizes, nra * sizeof(*rasizes));
1149
1150 return (error);
1151
1152 default:
1153 panic("spec_read type");
1154 }
1155 /* NOTREACHED */
1156 }
1157
1158 /*
1159 * Vnode op for write
1160 */
1161 /* ARGSUSED */
1162 int
1163 spec_write(void *v)
1164 {
1165 struct vop_write_args /* {
1166 struct vnode *a_vp;
1167 struct uio *a_uio;
1168 int a_ioflag;
1169 kauth_cred_t a_cred;
1170 } */ *ap = v;
1171 struct vnode *vp = ap->a_vp;
1172 struct uio *uio = ap->a_uio;
1173 struct lwp *l = curlwp;
1174 struct specnode *sn;
1175 dev_t dev;
1176 struct buf *bp;
1177 daddr_t bn;
1178 int bsize, bscale;
1179 struct partinfo pi;
1180 int n, on;
1181 int error = 0;
1182
1183 KASSERT(uio->uio_rw == UIO_WRITE);
1184 KASSERTMSG(VMSPACE_IS_KERNEL_P(uio->uio_vmspace) ||
1185 uio->uio_vmspace == curproc->p_vmspace,
1186 "vmspace belongs to neither kernel nor curproc");
1187
1188 switch (vp->v_type) {
1189
1190 case VCHR:
1191 /*
1192 * Release the lock while we sleep -- possibly
1193 * indefinitely, if this is, e.g., a tty -- in
1194 * cdev_write, so we don't hold up everything else that
1195 * might want access to the vnode.
1196 *
1197 * But before we issue the write, take an I/O reference
1198 * to the specnode so close will know when we're done
1199 * writing. Note that the moment we release the lock,
1200 * the vnode's identity may change; hence spec_io_enter
1201 * may fail, and the caller may have a dead vnode on
1202 * their hands, if the file system on which vp lived
1203 * has been unmounted.
1204 */
1205 VOP_UNLOCK(vp);
1206 error = spec_io_enter(vp, &sn, &dev);
1207 if (error)
1208 goto out;
1209 error = cdev_write(dev, uio, ap->a_ioflag);
1210 spec_io_exit(vp, sn);
1211 out: vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1212 return (error);
1213
1214 case VBLK:
1215 KASSERT(vp == vp->v_specnode->sn_dev->sd_bdevvp);
1216 if (uio->uio_resid == 0)
1217 return (0);
1218 if (uio->uio_offset < 0)
1219 return (EINVAL);
1220
1221 if (bdev_ioctl(vp->v_rdev, DIOCGPARTINFO, &pi, FREAD, l) == 0)
1222 bsize = imin(imax(pi.pi_bsize, DEV_BSIZE), MAXBSIZE);
1223 else
1224 bsize = BLKDEV_IOSIZE;
1225
1226 bscale = bsize >> DEV_BSHIFT;
1227 do {
1228 bn = (uio->uio_offset >> DEV_BSHIFT) &~ (bscale - 1);
1229 on = uio->uio_offset % bsize;
1230 n = uimin((unsigned)(bsize - on), uio->uio_resid);
1231 if (n == bsize)
1232 bp = getblk(vp, bn, bsize, 0, 0);
1233 else
1234 error = bread(vp, bn, bsize, B_MODIFY, &bp);
1235 if (error) {
1236 return (error);
1237 }
1238 n = uimin(n, bsize - bp->b_resid);
1239 error = uiomove((char *)bp->b_data + on, n, uio);
1240 if (error)
1241 brelse(bp, 0);
1242 else {
1243 if (n + on == bsize)
1244 bawrite(bp);
1245 else
1246 bdwrite(bp);
1247 error = bp->b_error;
1248 }
1249 } while (error == 0 && uio->uio_resid > 0 && n != 0);
1250 return (error);
1251
1252 default:
1253 panic("spec_write type");
1254 }
1255 /* NOTREACHED */
1256 }
1257
1258 /*
1259 * fdiscard, which on disk devices becomes TRIM.
1260 */
1261 int
1262 spec_fdiscard(void *v)
1263 {
1264 struct vop_fdiscard_args /* {
1265 struct vnode *a_vp;
1266 off_t a_pos;
1267 off_t a_len;
1268 } */ *ap = v;
1269 struct vnode *vp = ap->a_vp;
1270 dev_t dev;
1271
1272 KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1273
1274 dev = vp->v_rdev;
1275
1276 switch (vp->v_type) {
1277 case VCHR:
1278 // this is not stored for character devices
1279 //KASSERT(vp == vp->v_specnode->sn_dev->sd_cdevvp);
1280 return cdev_discard(dev, ap->a_pos, ap->a_len);
1281 case VBLK:
1282 KASSERT(vp == vp->v_specnode->sn_dev->sd_bdevvp);
1283 return bdev_discard(dev, ap->a_pos, ap->a_len);
1284 default:
1285 panic("spec_fdiscard: not a device\n");
1286 }
1287 }
1288
1289 /*
1290 * Device ioctl operation.
1291 */
1292 /* ARGSUSED */
1293 int
1294 spec_ioctl(void *v)
1295 {
1296 struct vop_ioctl_args /* {
1297 struct vnode *a_vp;
1298 u_long a_command;
1299 void *a_data;
1300 int a_fflag;
1301 kauth_cred_t a_cred;
1302 } */ *ap = v;
1303 struct vnode *vp = ap->a_vp;
1304 struct specnode *sn;
1305 dev_t dev;
1306 int error;
1307
1308 error = spec_io_enter(vp, &sn, &dev);
1309 if (error)
1310 return error;
1311
1312 switch (vp->v_type) {
1313 case VCHR:
1314 error = cdev_ioctl(dev, ap->a_command, ap->a_data,
1315 ap->a_fflag, curlwp);
1316 break;
1317 case VBLK:
1318 KASSERT(vp == vp->v_specnode->sn_dev->sd_bdevvp);
1319 error = bdev_ioctl(dev, ap->a_command, ap->a_data,
1320 ap->a_fflag, curlwp);
1321 break;
1322 default:
1323 panic("spec_ioctl");
1324 /* NOTREACHED */
1325 }
1326
1327 spec_io_exit(vp, sn);
1328 return error;
1329 }
1330
1331 /* ARGSUSED */
1332 int
1333 spec_poll(void *v)
1334 {
1335 struct vop_poll_args /* {
1336 struct vnode *a_vp;
1337 int a_events;
1338 } */ *ap = v;
1339 struct vnode *vp = ap->a_vp;
1340 struct specnode *sn;
1341 dev_t dev;
1342 int revents;
1343
1344 if (spec_io_enter(vp, &sn, &dev) != 0)
1345 return POLLERR;
1346
1347 switch (vp->v_type) {
1348 case VCHR:
1349 revents = cdev_poll(dev, ap->a_events, curlwp);
1350 break;
1351 default:
1352 revents = genfs_poll(v);
1353 break;
1354 }
1355
1356 spec_io_exit(vp, sn);
1357 return revents;
1358 }
1359
1360 /* ARGSUSED */
1361 int
1362 spec_kqfilter(void *v)
1363 {
1364 struct vop_kqfilter_args /* {
1365 struct vnode *a_vp;
1366 struct proc *a_kn;
1367 } */ *ap = v;
1368 struct vnode *vp = ap->a_vp;
1369 struct specnode *sn;
1370 dev_t dev;
1371 int error;
1372
1373 error = spec_io_enter(vp, &sn, &dev);
1374 if (error)
1375 return error;
1376
1377 switch (vp->v_type) {
1378 case VCHR:
1379 error = cdev_kqfilter(dev, ap->a_kn);
1380 break;
1381 default:
1382 /*
1383 * Block devices don't support kqfilter, and refuse it
1384 * for any other files (like those vflush()ed) too.
1385 */
1386 error = EOPNOTSUPP;
1387 break;
1388 }
1389
1390 spec_io_exit(vp, sn);
1391 return error;
1392 }
1393
1394 /*
1395 * Allow mapping of only D_DISK. This is called only for VBLK.
1396 */
1397 int
1398 spec_mmap(void *v)
1399 {
1400 struct vop_mmap_args /* {
1401 struct vnode *a_vp;
1402 vm_prot_t a_prot;
1403 kauth_cred_t a_cred;
1404 } */ *ap = v;
1405 struct vnode *vp = ap->a_vp;
1406 struct specnode *sn;
1407 dev_t dev;
1408 int error;
1409
1410 KASSERT(vp->v_type == VBLK);
1411
1412 error = spec_io_enter(vp, &sn, &dev);
1413 if (error)
1414 return error;
1415
1416 error = bdev_type(dev) == D_DISK ? 0 : EINVAL;
1417
1418 spec_io_exit(vp, sn);
1419 return 0;
1420 }
1421
1422 /*
1423 * Synch buffers associated with a block device
1424 */
1425 /* ARGSUSED */
1426 int
1427 spec_fsync(void *v)
1428 {
1429 struct vop_fsync_args /* {
1430 struct vnode *a_vp;
1431 kauth_cred_t a_cred;
1432 int a_flags;
1433 off_t offlo;
1434 off_t offhi;
1435 } */ *ap = v;
1436 struct vnode *vp = ap->a_vp;
1437 struct mount *mp;
1438 int error;
1439
1440 if (vp->v_type == VBLK) {
1441 if ((mp = spec_node_getmountedfs(vp)) != NULL) {
1442 error = VFS_FSYNC(mp, vp, ap->a_flags);
1443 if (error != EOPNOTSUPP)
1444 return error;
1445 }
1446 return vflushbuf(vp, ap->a_flags);
1447 }
1448 return (0);
1449 }
1450
1451 /*
1452 * Just call the device strategy routine
1453 */
1454 int
1455 spec_strategy(void *v)
1456 {
1457 struct vop_strategy_args /* {
1458 struct vnode *a_vp;
1459 struct buf *a_bp;
1460 } */ *ap = v;
1461 struct vnode *vp = ap->a_vp;
1462 struct buf *bp = ap->a_bp;
1463 struct specnode *sn = NULL;
1464 dev_t dev;
1465 int error;
1466
1467 error = spec_io_enter(vp, &sn, &dev);
1468 if (error)
1469 goto out;
1470
1471 bp->b_dev = dev;
1472
1473 if (!(bp->b_flags & B_READ)) {
1474 #ifdef DIAGNOSTIC
1475 if (bp->b_vp && bp->b_vp->v_type == VBLK) {
1476 struct mount *mp = spec_node_getmountedfs(bp->b_vp);
1477
1478 if (mp && (mp->mnt_flag & MNT_RDONLY)) {
1479 printf("%s blk %"PRId64" written while ro!\n",
1480 mp->mnt_stat.f_mntonname, bp->b_blkno);
1481 }
1482 }
1483 #endif /* DIAGNOSTIC */
1484 error = fscow_run(bp, false);
1485 if (error)
1486 goto out;
1487 }
1488 bdev_strategy(bp);
1489
1490 error = 0;
1491
1492 out: if (sn)
1493 spec_io_exit(vp, sn);
1494 if (error) {
1495 bp->b_error = error;
1496 bp->b_resid = bp->b_bcount;
1497 biodone(bp);
1498 }
1499 return error;
1500 }
1501
1502 int
1503 spec_inactive(void *v)
1504 {
1505 struct vop_inactive_v2_args /* {
1506 struct vnode *a_vp;
1507 struct bool *a_recycle;
1508 } */ *ap = v;
1509
1510 KASSERT(ap->a_vp->v_mount == dead_rootmount);
1511 *ap->a_recycle = true;
1512
1513 return 0;
1514 }
1515
1516 int
1517 spec_reclaim(void *v)
1518 {
1519 struct vop_reclaim_v2_args /* {
1520 struct vnode *a_vp;
1521 } */ *ap = v;
1522 struct vnode *vp = ap->a_vp;
1523
1524 KASSERT(vp->v_specnode->sn_opencnt == 0);
1525
1526 VOP_UNLOCK(vp);
1527
1528 KASSERT(vp->v_mount == dead_rootmount);
1529 return 0;
1530 }
1531
1532 /*
1533 * This is a noop, simply returning what one has been given.
1534 */
1535 int
1536 spec_bmap(void *v)
1537 {
1538 struct vop_bmap_args /* {
1539 struct vnode *a_vp;
1540 daddr_t a_bn;
1541 struct vnode **a_vpp;
1542 daddr_t *a_bnp;
1543 int *a_runp;
1544 } */ *ap = v;
1545
1546 if (ap->a_vpp != NULL)
1547 *ap->a_vpp = ap->a_vp;
1548 if (ap->a_bnp != NULL)
1549 *ap->a_bnp = ap->a_bn;
1550 if (ap->a_runp != NULL)
1551 *ap->a_runp = (MAXBSIZE >> DEV_BSHIFT) - 1;
1552 return (0);
1553 }
1554
1555 /*
1556 * Device close routine
1557 */
1558 /* ARGSUSED */
1559 int
1560 spec_close(void *v)
1561 {
1562 struct vop_close_args /* {
1563 struct vnode *a_vp;
1564 int a_fflag;
1565 kauth_cred_t a_cred;
1566 } */ *ap = v;
1567 struct vnode *vp = ap->a_vp;
1568 struct session *sess;
1569 dev_t dev;
1570 int flags = ap->a_fflag;
1571 int mode, error, count;
1572 specnode_t *sn;
1573 specdev_t *sd;
1574
1575 KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1576
1577 mutex_enter(vp->v_interlock);
1578 sn = vp->v_specnode;
1579 dev = vp->v_rdev;
1580 sd = sn->sn_dev;
1581 /*
1582 * If we're going away soon, make this non-blocking.
1583 * Also ensures that we won't wedge in vn_lock below.
1584 */
1585 if (vdead_check(vp, VDEAD_NOWAIT) != 0)
1586 flags |= FNONBLOCK;
1587 mutex_exit(vp->v_interlock);
1588
1589 switch (vp->v_type) {
1590
1591 case VCHR:
1592 /*
1593 * Hack: a tty device that is a controlling terminal
1594 * has a reference from the session structure. We
1595 * cannot easily tell that a character device is a
1596 * controlling terminal, unless it is the closing
1597 * process' controlling terminal. In that case, if the
1598 * open count is 1 release the reference from the
1599 * session. Also, remove the link from the tty back to
1600 * the session and pgrp.
1601 *
1602 * XXX V. fishy.
1603 */
1604 mutex_enter(&proc_lock);
1605 sess = curlwp->l_proc->p_session;
1606 if (sn->sn_opencnt == 1 && vp == sess->s_ttyvp) {
1607 mutex_spin_enter(&tty_lock);
1608 sess->s_ttyvp = NULL;
1609 if (sess->s_ttyp->t_session != NULL) {
1610 sess->s_ttyp->t_pgrp = NULL;
1611 sess->s_ttyp->t_session = NULL;
1612 mutex_spin_exit(&tty_lock);
1613 /* Releases proc_lock. */
1614 proc_sessrele(sess);
1615 } else {
1616 mutex_spin_exit(&tty_lock);
1617 if (sess->s_ttyp->t_pgrp != NULL)
1618 panic("spec_close: spurious pgrp ref");
1619 mutex_exit(&proc_lock);
1620 }
1621 vrele(vp);
1622 } else
1623 mutex_exit(&proc_lock);
1624
1625 /*
1626 * If the vnode is locked, then we are in the midst
1627 * of forcably closing the device, otherwise we only
1628 * close on last reference.
1629 */
1630 mode = S_IFCHR;
1631 break;
1632
1633 case VBLK:
1634 KASSERT(vp == vp->v_specnode->sn_dev->sd_bdevvp);
1635 /*
1636 * On last close of a block device (that isn't mounted)
1637 * we must invalidate any in core blocks, so that
1638 * we can, for instance, change floppy disks.
1639 */
1640 error = vinvalbuf(vp, V_SAVE, ap->a_cred, curlwp, 0, 0);
1641 if (error)
1642 return (error);
1643 /*
1644 * We do not want to really close the device if it
1645 * is still in use unless we are trying to close it
1646 * forcibly. Since every use (buffer, vnode, swap, cmap)
1647 * holds a reference to the vnode, and because we mark
1648 * any other vnodes that alias this device, when the
1649 * sum of the reference counts on all the aliased
1650 * vnodes descends to one, we are on last close.
1651 */
1652 mode = S_IFBLK;
1653 break;
1654
1655 default:
1656 panic("spec_close: not special");
1657 }
1658
1659 /*
1660 * Decrement the open reference count of this node and the
1661 * device. For block devices, the open reference count must be
1662 * 1 at this point. If the device's open reference count goes
1663 * to zero, we're the last one out so get the lights.
1664 *
1665 * We may find --sd->sd_opencnt gives zero, and yet
1666 * sd->sd_opened is false. This happens if the vnode is
1667 * revoked at the same time as it is being opened, which can
1668 * happen when opening a tty blocks indefinitely. In that
1669 * case, we still must call close -- it is the job of close to
1670 * interrupt the open. Either way, the device will be no
1671 * longer opened, so we have to clear sd->sd_opened; subsequent
1672 * opens will have responsibility for issuing close.
1673 *
1674 * This has the side effect that the sequence of opens might
1675 * happen out of order -- we might end up doing open, open,
1676 * close, close, instead of open, close, open, close. This is
1677 * unavoidable with the current devsw API, where open is
1678 * allowed to block and close must be able to run concurrently
1679 * to interrupt it. It is the driver's responsibility to
1680 * ensure that close is idempotent so that this works. Drivers
1681 * requiring per-open state and exact 1:1 correspondence
1682 * between open and close can use fd_clone.
1683 */
1684 mutex_enter(&device_lock);
1685 KASSERT(sn->sn_opencnt);
1686 KASSERT(sd->sd_opencnt);
1687 KASSERTMSG(sn->sn_opencnt <= sd->sd_opencnt,
1688 "sn_opencnt=%u > sd_opencnt=%u",
1689 sn->sn_opencnt, sd->sd_opencnt);
1690 sn->sn_opencnt--;
1691 count = --sd->sd_opencnt;
1692 if (vp->v_type == VBLK) {
1693 KASSERTMSG(count == 0, "block device with %u opens",
1694 count + 1);
1695 sd->sd_bdevvp = NULL;
1696 }
1697 if (count == 0) {
1698 KASSERTMSG(sn->sn_opencnt == 0, "sn_opencnt=%u",
1699 sn->sn_opencnt);
1700 KASSERT(!sd->sd_closing);
1701 sd->sd_opened = false;
1702 sd->sd_closing = true;
1703 }
1704 mutex_exit(&device_lock);
1705
1706 if (count != 0)
1707 return 0;
1708
1709 /*
1710 * If we're able to block, release the vnode lock & reacquire. We
1711 * might end up sleeping for someone else who wants our queues. They
1712 * won't get them if we hold the vnode locked.
1713 */
1714 if (!(flags & FNONBLOCK))
1715 VOP_UNLOCK(vp);
1716
1717 /*
1718 * If we can cancel all outstanding I/O, then wait for it to
1719 * drain before we call .d_close. Drivers that split up
1720 * .d_cancel and .d_close this way need not have any internal
1721 * mechanism for waiting in .d_close for I/O to drain.
1722 */
1723 if (vp->v_type == VBLK)
1724 error = bdev_cancel(dev, flags, mode, curlwp);
1725 else
1726 error = cdev_cancel(dev, flags, mode, curlwp);
1727 if (error == 0)
1728 spec_io_drain(sd);
1729 else
1730 KASSERTMSG(error == ENODEV, "cancel dev=0x%lx failed with %d",
1731 (unsigned long)dev, error);
1732
1733 if (vp->v_type == VBLK)
1734 error = bdev_close(dev, flags, mode, curlwp);
1735 else
1736 error = cdev_close(dev, flags, mode, curlwp);
1737
1738 /*
1739 * Wait for all other devsw operations to drain. After this
1740 * point, no bdev/cdev_* can be active for this specdev.
1741 */
1742 spec_io_drain(sd);
1743
1744 /*
1745 * Wake any spec_open calls waiting for close to finish -- do
1746 * this before reacquiring the vnode lock, because spec_open
1747 * holds the vnode lock while waiting, so doing this after
1748 * reacquiring the lock would deadlock.
1749 */
1750 mutex_enter(&device_lock);
1751 KASSERT(!sd->sd_opened);
1752 KASSERT(sd->sd_closing);
1753 sd->sd_closing = false;
1754 cv_broadcast(&specfs_iocv);
1755 mutex_exit(&device_lock);
1756
1757 if (!(flags & FNONBLOCK))
1758 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1759
1760 return (error);
1761 }
1762
1763 /*
1764 * Print out the contents of a special device vnode.
1765 */
1766 int
1767 spec_print(void *v)
1768 {
1769 struct vop_print_args /* {
1770 struct vnode *a_vp;
1771 } */ *ap = v;
1772
1773 printf("dev %llu, %llu\n", (unsigned long long)major(ap->a_vp->v_rdev),
1774 (unsigned long long)minor(ap->a_vp->v_rdev));
1775 return 0;
1776 }
1777
1778 /*
1779 * Return POSIX pathconf information applicable to special devices.
1780 */
1781 int
1782 spec_pathconf(void *v)
1783 {
1784 struct vop_pathconf_args /* {
1785 struct vnode *a_vp;
1786 int a_name;
1787 register_t *a_retval;
1788 } */ *ap = v;
1789
1790 switch (ap->a_name) {
1791 case _PC_LINK_MAX:
1792 *ap->a_retval = LINK_MAX;
1793 return (0);
1794 case _PC_MAX_CANON:
1795 *ap->a_retval = MAX_CANON;
1796 return (0);
1797 case _PC_MAX_INPUT:
1798 *ap->a_retval = MAX_INPUT;
1799 return (0);
1800 case _PC_PIPE_BUF:
1801 *ap->a_retval = PIPE_BUF;
1802 return (0);
1803 case _PC_CHOWN_RESTRICTED:
1804 *ap->a_retval = 1;
1805 return (0);
1806 case _PC_VDISABLE:
1807 *ap->a_retval = _POSIX_VDISABLE;
1808 return (0);
1809 case _PC_SYNC_IO:
1810 *ap->a_retval = 1;
1811 return (0);
1812 default:
1813 return genfs_pathconf(ap);
1814 }
1815 /* NOTREACHED */
1816 }
1817
1818 /*
1819 * Advisory record locking support.
1820 */
1821 int
1822 spec_advlock(void *v)
1823 {
1824 struct vop_advlock_args /* {
1825 struct vnode *a_vp;
1826 void *a_id;
1827 int a_op;
1828 struct flock *a_fl;
1829 int a_flags;
1830 } */ *ap = v;
1831 struct vnode *vp = ap->a_vp;
1832
1833 return lf_advlock(ap, &vp->v_speclockf, (off_t)0);
1834 }
1835