1 1.219 mlelstv /* $NetBSD: spec_vnops.c,v 1.219 2025/01/06 09:45:49 mlelstv Exp $ */ 2 1.112 ad 3 1.112 ad /*- 4 1.112 ad * Copyright (c) 2008 The NetBSD Foundation, Inc. 5 1.112 ad * All rights reserved. 6 1.112 ad * 7 1.112 ad * Redistribution and use in source and binary forms, with or without 8 1.112 ad * modification, are permitted provided that the following conditions 9 1.112 ad * are met: 10 1.112 ad * 1. Redistributions of source code must retain the above copyright 11 1.112 ad * notice, this list of conditions and the following disclaimer. 12 1.112 ad * 2. Redistributions in binary form must reproduce the above copyright 13 1.112 ad * notice, this list of conditions and the following disclaimer in the 14 1.112 ad * documentation and/or other materials provided with the distribution. 15 1.112 ad * 16 1.112 ad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 17 1.112 ad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 18 1.112 ad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 19 1.112 ad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 20 1.112 ad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 1.112 ad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 1.112 ad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 1.112 ad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 1.112 ad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 1.112 ad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 1.112 ad * POSSIBILITY OF SUCH DAMAGE. 27 1.112 ad */ 28 1.16 cgd 29 1.1 cgd /* 30 1.15 mycroft * Copyright (c) 1989, 1993 31 1.15 mycroft * The Regents of the University of California. All rights reserved. 32 1.1 cgd * 33 1.1 cgd * Redistribution and use in source and binary forms, with or without 34 1.1 cgd * modification, are permitted provided that the following conditions 35 1.1 cgd * are met: 36 1.1 cgd * 1. Redistributions of source code must retain the above copyright 37 1.1 cgd * notice, this list of conditions and the following disclaimer. 38 1.1 cgd * 2. Redistributions in binary form must reproduce the above copyright 39 1.1 cgd * notice, this list of conditions and the following disclaimer in the 40 1.1 cgd * documentation and/or other materials provided with the distribution. 41 1.69 agc * 3. Neither the name of the University nor the names of its contributors 42 1.1 cgd * may be used to endorse or promote products derived from this software 43 1.1 cgd * without specific prior written permission. 44 1.1 cgd * 45 1.1 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 46 1.1 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 47 1.1 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 48 1.1 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 49 1.1 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 50 1.1 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 51 1.1 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 52 1.1 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 53 1.1 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 54 1.1 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 55 1.1 cgd * SUCH DAMAGE. 56 1.1 cgd * 57 1.39 fvdl * @(#)spec_vnops.c 8.15 (Berkeley) 7/14/95 58 1.1 cgd */ 59 1.60 lukem 60 1.60 lukem #include <sys/cdefs.h> 61 1.219 mlelstv __KERNEL_RCSID(0, "$NetBSD: spec_vnops.c,v 1.219 2025/01/06 09:45:49 mlelstv Exp $"); 62 1.216 riastrad 63 1.216 riastrad #ifdef _KERNEL_OPT 64 1.216 riastrad #include "opt_ddb.h" 65 1.216 riastrad #endif 66 1.1 cgd 67 1.9 mycroft #include <sys/param.h> 68 1.9 mycroft #include <sys/proc.h> 69 1.9 mycroft #include <sys/systm.h> 70 1.9 mycroft #include <sys/kernel.h> 71 1.9 mycroft #include <sys/conf.h> 72 1.9 mycroft #include <sys/buf.h> 73 1.9 mycroft #include <sys/mount.h> 74 1.9 mycroft #include <sys/namei.h> 75 1.168 hannken #include <sys/vnode_impl.h> 76 1.9 mycroft #include <sys/stat.h> 77 1.9 mycroft #include <sys/errno.h> 78 1.9 mycroft #include <sys/ioctl.h> 79 1.81 ws #include <sys/poll.h> 80 1.9 mycroft #include <sys/file.h> 81 1.9 mycroft #include <sys/disklabel.h> 82 1.176 christos #include <sys/disk.h> 83 1.35 kleink #include <sys/lockf.h> 84 1.71 dsl #include <sys/tty.h> 85 1.87 elad #include <sys/kauth.h> 86 1.106 hannken #include <sys/fstrans.h> 87 1.122 haad #include <sys/module.h> 88 1.202 riastrad #include <sys/atomic.h> 89 1.28 christos 90 1.30 mycroft #include <miscfs/genfs/genfs.h> 91 1.15 mycroft #include <miscfs/specfs/specdev.h> 92 1.1 cgd 93 1.216 riastrad #ifdef DDB 94 1.216 riastrad #include <ddb/ddb.h> 95 1.216 riastrad #endif 96 1.216 riastrad 97 1.186 riastrad /* 98 1.186 riastrad * Lock order: 99 1.186 riastrad * 100 1.186 riastrad * vnode lock 101 1.186 riastrad * -> device_lock 102 1.186 riastrad * -> struct vnode::v_interlock 103 1.186 riastrad */ 104 1.186 riastrad 105 1.1 cgd /* symbolic sleep message strings for devices */ 106 1.37 mycroft const char devopn[] = "devopn"; 107 1.37 mycroft const char devio[] = "devio"; 108 1.37 mycroft const char devwait[] = "devwait"; 109 1.37 mycroft const char devin[] = "devin"; 110 1.37 mycroft const char devout[] = "devout"; 111 1.37 mycroft const char devioc[] = "devioc"; 112 1.37 mycroft const char devcls[] = "devcls"; 113 1.61 matt 114 1.137 hannken #define SPECHSZ 64 115 1.137 hannken #if ((SPECHSZ&(SPECHSZ-1)) == 0) 116 1.137 hannken #define SPECHASH(rdev) (((rdev>>5)+(rdev))&(SPECHSZ-1)) 117 1.137 hannken #else 118 1.137 hannken #define SPECHASH(rdev) (((unsigned)((rdev>>5)+(rdev)))%SPECHSZ) 119 1.137 hannken #endif 120 1.137 hannken 121 1.137 hannken static vnode_t *specfs_hash[SPECHSZ]; 122 1.148 hannken extern struct mount *dead_rootmount; 123 1.46 sommerfe 124 1.46 sommerfe /* 125 1.112 ad * This vnode operations vector is used for special device nodes 126 1.112 ad * created from whole cloth by the kernel. For the ops vector for 127 1.112 ad * vnodes built from special devices found in a filesystem, see (e.g) 128 1.112 ad * ffs_specop_entries[] in ffs_vnops.c or the equivalent for other 129 1.112 ad * filesystems. 130 1.46 sommerfe */ 131 1.1 cgd 132 1.82 xtraeme int (**spec_vnodeop_p)(void *); 133 1.53 jdolecek const struct vnodeopv_entry_desc spec_vnodeop_entries[] = { 134 1.15 mycroft { &vop_default_desc, vn_default_error }, 135 1.182 dholland { &vop_parsepath_desc, genfs_parsepath }, /* parsepath */ 136 1.15 mycroft { &vop_lookup_desc, spec_lookup }, /* lookup */ 137 1.183 dholland { &vop_create_desc, genfs_badop }, /* create */ 138 1.183 dholland { &vop_mknod_desc, genfs_badop }, /* mknod */ 139 1.15 mycroft { &vop_open_desc, spec_open }, /* open */ 140 1.15 mycroft { &vop_close_desc, spec_close }, /* close */ 141 1.183 dholland { &vop_access_desc, genfs_ebadf }, /* access */ 142 1.183 dholland { &vop_accessx_desc, genfs_ebadf }, /* accessx */ 143 1.183 dholland { &vop_getattr_desc, genfs_ebadf }, /* getattr */ 144 1.183 dholland { &vop_setattr_desc, genfs_ebadf }, /* setattr */ 145 1.15 mycroft { &vop_read_desc, spec_read }, /* read */ 146 1.15 mycroft { &vop_write_desc, spec_write }, /* write */ 147 1.183 dholland { &vop_fallocate_desc, genfs_eopnotsupp }, /* fallocate */ 148 1.145 dholland { &vop_fdiscard_desc, spec_fdiscard }, /* fdiscard */ 149 1.183 dholland { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */ 150 1.15 mycroft { &vop_ioctl_desc, spec_ioctl }, /* ioctl */ 151 1.32 mycroft { &vop_poll_desc, spec_poll }, /* poll */ 152 1.65 jdolecek { &vop_kqfilter_desc, spec_kqfilter }, /* kqfilter */ 153 1.183 dholland { &vop_revoke_desc, genfs_revoke }, /* revoke */ 154 1.15 mycroft { &vop_mmap_desc, spec_mmap }, /* mmap */ 155 1.15 mycroft { &vop_fsync_desc, spec_fsync }, /* fsync */ 156 1.15 mycroft { &vop_seek_desc, spec_seek }, /* seek */ 157 1.183 dholland { &vop_remove_desc, genfs_badop }, /* remove */ 158 1.183 dholland { &vop_link_desc, genfs_badop }, /* link */ 159 1.183 dholland { &vop_rename_desc, genfs_badop }, /* rename */ 160 1.183 dholland { &vop_mkdir_desc, genfs_badop }, /* mkdir */ 161 1.183 dholland { &vop_rmdir_desc, genfs_badop }, /* rmdir */ 162 1.183 dholland { &vop_symlink_desc, genfs_badop }, /* symlink */ 163 1.183 dholland { &vop_readdir_desc, genfs_badop }, /* readdir */ 164 1.183 dholland { &vop_readlink_desc, genfs_badop }, /* readlink */ 165 1.183 dholland { &vop_abortop_desc, genfs_badop }, /* abortop */ 166 1.15 mycroft { &vop_inactive_desc, spec_inactive }, /* inactive */ 167 1.15 mycroft { &vop_reclaim_desc, spec_reclaim }, /* reclaim */ 168 1.184 hannken { &vop_lock_desc, genfs_lock }, /* lock */ 169 1.184 hannken { &vop_unlock_desc, genfs_unlock }, /* unlock */ 170 1.15 mycroft { &vop_bmap_desc, spec_bmap }, /* bmap */ 171 1.15 mycroft { &vop_strategy_desc, spec_strategy }, /* strategy */ 172 1.15 mycroft { &vop_print_desc, spec_print }, /* print */ 173 1.184 hannken { &vop_islocked_desc, genfs_islocked }, /* islocked */ 174 1.15 mycroft { &vop_pathconf_desc, spec_pathconf }, /* pathconf */ 175 1.15 mycroft { &vop_advlock_desc, spec_advlock }, /* advlock */ 176 1.183 dholland { &vop_bwrite_desc, vn_bwrite }, /* bwrite */ 177 1.183 dholland { &vop_getpages_desc, genfs_getpages }, /* getpages */ 178 1.183 dholland { &vop_putpages_desc, genfs_putpages }, /* putpages */ 179 1.55 chs { NULL, NULL } 180 1.1 cgd }; 181 1.53 jdolecek const struct vnodeopv_desc spec_vnodeop_opv_desc = 182 1.15 mycroft { &spec_vnodeop_p, spec_vnodeop_entries }; 183 1.1 cgd 184 1.127 elad static kauth_listener_t rawio_listener; 185 1.202 riastrad static struct kcondvar specfs_iocv; 186 1.127 elad 187 1.218 riastrad /* 188 1.218 riastrad * Returns true if vnode is /dev/mem or /dev/kmem. 189 1.218 riastrad */ 190 1.126 elad bool 191 1.126 elad iskmemvp(struct vnode *vp) 192 1.126 elad { 193 1.126 elad return ((vp->v_type == VCHR) && iskmemdev(vp->v_rdev)); 194 1.126 elad } 195 1.126 elad 196 1.1 cgd /* 197 1.112 ad * Returns true if dev is /dev/mem or /dev/kmem. 198 1.112 ad */ 199 1.112 ad int 200 1.112 ad iskmemdev(dev_t dev) 201 1.112 ad { 202 1.112 ad /* mem_no is emitted by config(8) to generated devsw.c */ 203 1.112 ad extern const int mem_no; 204 1.112 ad 205 1.112 ad /* minor 14 is /dev/io on i386 with COMPAT_10 */ 206 1.112 ad return (major(dev) == mem_no && (minor(dev) < 2 || minor(dev) == 14)); 207 1.112 ad } 208 1.112 ad 209 1.127 elad static int 210 1.127 elad rawio_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie, 211 1.127 elad void *arg0, void *arg1, void *arg2, void *arg3) 212 1.127 elad { 213 1.127 elad int result; 214 1.127 elad 215 1.127 elad result = KAUTH_RESULT_DEFER; 216 1.127 elad 217 1.127 elad if ((action != KAUTH_DEVICE_RAWIO_SPEC) && 218 1.127 elad (action != KAUTH_DEVICE_RAWIO_PASSTHRU)) 219 1.127 elad return result; 220 1.127 elad 221 1.127 elad /* Access is mandated by permissions. */ 222 1.127 elad result = KAUTH_RESULT_ALLOW; 223 1.127 elad 224 1.127 elad return result; 225 1.127 elad } 226 1.127 elad 227 1.127 elad void 228 1.127 elad spec_init(void) 229 1.127 elad { 230 1.127 elad 231 1.127 elad rawio_listener = kauth_listen_scope(KAUTH_SCOPE_DEVICE, 232 1.127 elad rawio_listener_cb, NULL); 233 1.202 riastrad cv_init(&specfs_iocv, "specio"); 234 1.202 riastrad } 235 1.202 riastrad 236 1.202 riastrad /* 237 1.202 riastrad * spec_io_enter(vp, &sn, &dev) 238 1.202 riastrad * 239 1.202 riastrad * Enter an operation that may not hold vp's vnode lock or an 240 1.202 riastrad * fstrans on vp's mount. Until spec_io_exit, the vnode will not 241 1.202 riastrad * be revoked. 242 1.202 riastrad * 243 1.202 riastrad * On success, set sn to the specnode pointer and dev to the dev_t 244 1.202 riastrad * number and return zero. Caller must later call spec_io_exit 245 1.202 riastrad * when done. 246 1.202 riastrad * 247 1.202 riastrad * On failure, return ENXIO -- the device has been revoked and no 248 1.202 riastrad * longer exists. 249 1.202 riastrad */ 250 1.202 riastrad static int 251 1.202 riastrad spec_io_enter(struct vnode *vp, struct specnode **snp, dev_t *devp) 252 1.202 riastrad { 253 1.202 riastrad dev_t dev; 254 1.202 riastrad struct specnode *sn; 255 1.202 riastrad unsigned iocnt; 256 1.202 riastrad int error = 0; 257 1.202 riastrad 258 1.202 riastrad mutex_enter(vp->v_interlock); 259 1.202 riastrad 260 1.202 riastrad /* 261 1.202 riastrad * Extract all the info we need from the vnode, unless the 262 1.202 riastrad * vnode has already been reclaimed. This can happen if the 263 1.202 riastrad * underlying device has been removed and all the device nodes 264 1.202 riastrad * for it have been revoked. The caller may not hold a vnode 265 1.202 riastrad * lock or fstrans to prevent this from happening before it has 266 1.202 riastrad * had an opportunity to notice the vnode is dead. 267 1.202 riastrad */ 268 1.202 riastrad if (vdead_check(vp, VDEAD_NOWAIT) != 0 || 269 1.202 riastrad (sn = vp->v_specnode) == NULL || 270 1.202 riastrad (dev = vp->v_rdev) == NODEV) { 271 1.202 riastrad error = ENXIO; 272 1.202 riastrad goto out; 273 1.202 riastrad } 274 1.202 riastrad 275 1.202 riastrad /* 276 1.202 riastrad * Notify spec_close that we are doing an I/O operation which 277 1.202 riastrad * may not be not bracketed by fstrans(9) and thus is not 278 1.202 riastrad * blocked by vfs suspension. 279 1.202 riastrad * 280 1.202 riastrad * We could hold this reference with psref(9) instead, but we 281 1.202 riastrad * already have to take the interlock for vdead_check, so 282 1.202 riastrad * there's not much more cost here to another atomic operation. 283 1.202 riastrad */ 284 1.202 riastrad do { 285 1.202 riastrad iocnt = atomic_load_relaxed(&sn->sn_dev->sd_iocnt); 286 1.202 riastrad if (__predict_false(iocnt == UINT_MAX)) { 287 1.202 riastrad /* 288 1.202 riastrad * The I/O count is limited by the number of 289 1.202 riastrad * LWPs (which will never overflow this) -- 290 1.202 riastrad * unless one driver uses another driver via 291 1.202 riastrad * specfs, which is rather unusual, but which 292 1.202 riastrad * could happen via pud(4) userspace drivers. 293 1.202 riastrad * We could use a 64-bit count, but can't use 294 1.202 riastrad * atomics for that on all platforms. 295 1.202 riastrad * (Probably better to switch to psref or 296 1.202 riastrad * localcount instead.) 297 1.202 riastrad */ 298 1.202 riastrad error = EBUSY; 299 1.202 riastrad goto out; 300 1.202 riastrad } 301 1.202 riastrad } while (atomic_cas_uint(&sn->sn_dev->sd_iocnt, iocnt, iocnt + 1) 302 1.202 riastrad != iocnt); 303 1.202 riastrad 304 1.202 riastrad /* Success! */ 305 1.202 riastrad *snp = sn; 306 1.202 riastrad *devp = dev; 307 1.202 riastrad error = 0; 308 1.202 riastrad 309 1.202 riastrad out: mutex_exit(vp->v_interlock); 310 1.202 riastrad return error; 311 1.202 riastrad } 312 1.202 riastrad 313 1.202 riastrad /* 314 1.202 riastrad * spec_io_exit(vp, sn) 315 1.202 riastrad * 316 1.202 riastrad * Exit an operation entered with a successful spec_io_enter -- 317 1.202 riastrad * allow concurrent spec_node_revoke to proceed. The argument sn 318 1.202 riastrad * must match the struct specnode pointer returned by spec_io_exit 319 1.202 riastrad * for vp. 320 1.202 riastrad */ 321 1.202 riastrad static void 322 1.202 riastrad spec_io_exit(struct vnode *vp, struct specnode *sn) 323 1.202 riastrad { 324 1.202 riastrad struct specdev *sd = sn->sn_dev; 325 1.202 riastrad unsigned iocnt; 326 1.202 riastrad 327 1.202 riastrad KASSERT(vp->v_specnode == sn); 328 1.202 riastrad 329 1.202 riastrad /* 330 1.202 riastrad * We are done. Notify spec_close if appropriate. The 331 1.202 riastrad * transition of 1 -> 0 must happen under device_lock so 332 1.202 riastrad * spec_close doesn't miss a wakeup. 333 1.202 riastrad */ 334 1.202 riastrad do { 335 1.202 riastrad iocnt = atomic_load_relaxed(&sd->sd_iocnt); 336 1.202 riastrad KASSERT(iocnt > 0); 337 1.202 riastrad if (iocnt == 1) { 338 1.202 riastrad mutex_enter(&device_lock); 339 1.202 riastrad if (atomic_dec_uint_nv(&sd->sd_iocnt) == 0) 340 1.202 riastrad cv_broadcast(&specfs_iocv); 341 1.202 riastrad mutex_exit(&device_lock); 342 1.202 riastrad break; 343 1.202 riastrad } 344 1.202 riastrad } while (atomic_cas_uint(&sd->sd_iocnt, iocnt, iocnt - 1) != iocnt); 345 1.202 riastrad } 346 1.202 riastrad 347 1.202 riastrad /* 348 1.202 riastrad * spec_io_drain(sd) 349 1.202 riastrad * 350 1.202 riastrad * Wait for all existing spec_io_enter/exit sections to complete. 351 1.202 riastrad * Caller must ensure spec_io_enter will fail at this point. 352 1.202 riastrad */ 353 1.202 riastrad static void 354 1.202 riastrad spec_io_drain(struct specdev *sd) 355 1.202 riastrad { 356 1.202 riastrad 357 1.202 riastrad /* 358 1.202 riastrad * I/O at the same time as closing is unlikely -- it often 359 1.202 riastrad * indicates an application bug. 360 1.202 riastrad */ 361 1.202 riastrad if (__predict_true(atomic_load_relaxed(&sd->sd_iocnt) == 0)) 362 1.202 riastrad return; 363 1.202 riastrad 364 1.202 riastrad mutex_enter(&device_lock); 365 1.202 riastrad while (atomic_load_relaxed(&sd->sd_iocnt) > 0) 366 1.202 riastrad cv_wait(&specfs_iocv, &device_lock); 367 1.202 riastrad mutex_exit(&device_lock); 368 1.127 elad } 369 1.127 elad 370 1.112 ad /* 371 1.112 ad * Initialize a vnode that represents a device. 372 1.112 ad */ 373 1.112 ad void 374 1.112 ad spec_node_init(vnode_t *vp, dev_t rdev) 375 1.112 ad { 376 1.112 ad specnode_t *sn; 377 1.112 ad specdev_t *sd; 378 1.112 ad vnode_t *vp2; 379 1.112 ad vnode_t **vpp; 380 1.112 ad 381 1.112 ad KASSERT(vp->v_type == VBLK || vp->v_type == VCHR); 382 1.112 ad KASSERT(vp->v_specnode == NULL); 383 1.112 ad 384 1.112 ad /* 385 1.112 ad * Search the hash table for this device. If known, add a 386 1.112 ad * reference to the device structure. If not known, create 387 1.112 ad * a new entry to represent the device. In all cases add 388 1.112 ad * the vnode to the hash table. 389 1.112 ad */ 390 1.112 ad sn = kmem_alloc(sizeof(*sn), KM_SLEEP); 391 1.112 ad sd = kmem_alloc(sizeof(*sd), KM_SLEEP); 392 1.120 pooka mutex_enter(&device_lock); 393 1.112 ad vpp = &specfs_hash[SPECHASH(rdev)]; 394 1.112 ad for (vp2 = *vpp; vp2 != NULL; vp2 = vp2->v_specnext) { 395 1.112 ad KASSERT(vp2->v_specnode != NULL); 396 1.112 ad if (rdev == vp2->v_rdev && vp->v_type == vp2->v_type) { 397 1.112 ad break; 398 1.112 ad } 399 1.112 ad } 400 1.112 ad if (vp2 == NULL) { 401 1.112 ad /* No existing record, create a new one. */ 402 1.112 ad sd->sd_mountpoint = NULL; 403 1.112 ad sd->sd_lockf = NULL; 404 1.112 ad sd->sd_refcnt = 1; 405 1.112 ad sd->sd_opencnt = 0; 406 1.112 ad sd->sd_bdevvp = NULL; 407 1.202 riastrad sd->sd_iocnt = 0; 408 1.201 riastrad sd->sd_opened = false; 409 1.204 riastrad sd->sd_closing = false; 410 1.112 ad sn->sn_dev = sd; 411 1.112 ad sd = NULL; 412 1.112 ad } else { 413 1.112 ad /* Use the existing record. */ 414 1.112 ad sn->sn_dev = vp2->v_specnode->sn_dev; 415 1.112 ad sn->sn_dev->sd_refcnt++; 416 1.112 ad } 417 1.112 ad /* Insert vnode into the hash chain. */ 418 1.112 ad sn->sn_opencnt = 0; 419 1.112 ad sn->sn_rdev = rdev; 420 1.112 ad sn->sn_gone = false; 421 1.112 ad vp->v_specnode = sn; 422 1.112 ad vp->v_specnext = *vpp; 423 1.112 ad *vpp = vp; 424 1.120 pooka mutex_exit(&device_lock); 425 1.112 ad 426 1.112 ad /* Free the record we allocated if unused. */ 427 1.112 ad if (sd != NULL) { 428 1.112 ad kmem_free(sd, sizeof(*sd)); 429 1.112 ad } 430 1.112 ad } 431 1.112 ad 432 1.112 ad /* 433 1.137 hannken * Lookup a vnode by device number and return it referenced. 434 1.137 hannken */ 435 1.137 hannken int 436 1.208 riastrad spec_node_lookup_by_dev(enum vtype type, dev_t dev, int flags, vnode_t **vpp) 437 1.137 hannken { 438 1.137 hannken int error; 439 1.137 hannken vnode_t *vp; 440 1.137 hannken 441 1.208 riastrad top: mutex_enter(&device_lock); 442 1.137 hannken for (vp = specfs_hash[SPECHASH(dev)]; vp; vp = vp->v_specnext) { 443 1.137 hannken if (type == vp->v_type && dev == vp->v_rdev) { 444 1.137 hannken mutex_enter(vp->v_interlock); 445 1.137 hannken /* If clean or being cleaned, then ignore it. */ 446 1.143 hannken if (vdead_check(vp, VDEAD_NOWAIT) == 0) 447 1.137 hannken break; 448 1.208 riastrad if ((flags & VDEAD_NOWAIT) == 0) { 449 1.208 riastrad mutex_exit(&device_lock); 450 1.208 riastrad /* 451 1.208 riastrad * It may be being revoked as we speak, 452 1.208 riastrad * and the caller wants to wait until 453 1.208 riastrad * all revocation has completed. Let 454 1.208 riastrad * vcache_vget wait for it to finish 455 1.208 riastrad * dying; as a side effect, vcache_vget 456 1.208 riastrad * releases vp->v_interlock. Note that 457 1.208 riastrad * vcache_vget cannot succeed at this 458 1.208 riastrad * point because vdead_check already 459 1.208 riastrad * failed. 460 1.208 riastrad */ 461 1.208 riastrad error = vcache_vget(vp); 462 1.208 riastrad KASSERT(error); 463 1.208 riastrad goto top; 464 1.208 riastrad } 465 1.137 hannken mutex_exit(vp->v_interlock); 466 1.137 hannken } 467 1.137 hannken } 468 1.137 hannken KASSERT(vp == NULL || mutex_owned(vp->v_interlock)); 469 1.137 hannken if (vp == NULL) { 470 1.137 hannken mutex_exit(&device_lock); 471 1.137 hannken return ENOENT; 472 1.137 hannken } 473 1.137 hannken /* 474 1.137 hannken * If it is an opened block device return the opened vnode. 475 1.137 hannken */ 476 1.137 hannken if (type == VBLK && vp->v_specnode->sn_dev->sd_bdevvp != NULL) { 477 1.137 hannken mutex_exit(vp->v_interlock); 478 1.137 hannken vp = vp->v_specnode->sn_dev->sd_bdevvp; 479 1.137 hannken mutex_enter(vp->v_interlock); 480 1.137 hannken } 481 1.137 hannken mutex_exit(&device_lock); 482 1.168 hannken error = vcache_vget(vp); 483 1.218 riastrad if (error) 484 1.137 hannken return error; 485 1.137 hannken *vpp = vp; 486 1.137 hannken 487 1.137 hannken return 0; 488 1.137 hannken } 489 1.137 hannken 490 1.137 hannken /* 491 1.137 hannken * Lookup a vnode by file system mounted on and return it referenced. 492 1.137 hannken */ 493 1.137 hannken int 494 1.137 hannken spec_node_lookup_by_mount(struct mount *mp, vnode_t **vpp) 495 1.137 hannken { 496 1.137 hannken int i, error; 497 1.137 hannken vnode_t *vp, *vq; 498 1.137 hannken 499 1.137 hannken mutex_enter(&device_lock); 500 1.137 hannken for (i = 0, vq = NULL; i < SPECHSZ && vq == NULL; i++) { 501 1.137 hannken for (vp = specfs_hash[i]; vp; vp = vp->v_specnext) { 502 1.137 hannken if (vp->v_type != VBLK) 503 1.137 hannken continue; 504 1.137 hannken vq = vp->v_specnode->sn_dev->sd_bdevvp; 505 1.141 hannken if (vq != NULL && 506 1.141 hannken vq->v_specnode->sn_dev->sd_mountpoint == mp) 507 1.137 hannken break; 508 1.137 hannken vq = NULL; 509 1.137 hannken } 510 1.137 hannken } 511 1.137 hannken if (vq == NULL) { 512 1.137 hannken mutex_exit(&device_lock); 513 1.137 hannken return ENOENT; 514 1.137 hannken } 515 1.137 hannken mutex_enter(vq->v_interlock); 516 1.137 hannken mutex_exit(&device_lock); 517 1.168 hannken error = vcache_vget(vq); 518 1.218 riastrad if (error) 519 1.137 hannken return error; 520 1.137 hannken *vpp = vq; 521 1.137 hannken 522 1.137 hannken return 0; 523 1.137 hannken 524 1.137 hannken } 525 1.137 hannken 526 1.137 hannken /* 527 1.141 hannken * Get the file system mounted on this block device. 528 1.203 riastrad * 529 1.203 riastrad * XXX Caller should hold the vnode lock -- shared or exclusive -- so 530 1.203 riastrad * that this can't changed, and the vnode can't be revoked while we 531 1.203 riastrad * examine it. But not all callers do, and they're scattered through a 532 1.203 riastrad * lot of file systems, so we can't assert this yet. 533 1.141 hannken */ 534 1.141 hannken struct mount * 535 1.141 hannken spec_node_getmountedfs(vnode_t *devvp) 536 1.141 hannken { 537 1.141 hannken struct mount *mp; 538 1.141 hannken 539 1.141 hannken KASSERT(devvp->v_type == VBLK); 540 1.141 hannken mp = devvp->v_specnode->sn_dev->sd_mountpoint; 541 1.141 hannken 542 1.141 hannken return mp; 543 1.141 hannken } 544 1.141 hannken 545 1.141 hannken /* 546 1.141 hannken * Set the file system mounted on this block device. 547 1.203 riastrad * 548 1.203 riastrad * XXX Caller should hold the vnode lock exclusively so this can't be 549 1.203 riastrad * changed or assumed by spec_node_getmountedfs while we change it, and 550 1.203 riastrad * the vnode can't be revoked while we handle it. But not all callers 551 1.203 riastrad * do, and they're scattered through a lot of file systems, so we can't 552 1.203 riastrad * assert this yet. Instead, for now, we'll take an I/O reference so 553 1.203 riastrad * at least the ioctl doesn't race with revoke/detach. 554 1.203 riastrad * 555 1.203 riastrad * If you do change this to assert an exclusive vnode lock, you must 556 1.203 riastrad * also do vdead_check before trying bdev_ioctl, because the vnode may 557 1.203 riastrad * have been revoked by the time the caller locked it, and this is 558 1.203 riastrad * _not_ a vop -- calls to spec_node_setmountedfs don't go through 559 1.203 riastrad * v_op, so revoking the vnode doesn't prevent further calls. 560 1.203 riastrad * 561 1.203 riastrad * XXX Caller should additionally have the vnode open, at least if mp 562 1.203 riastrad * is nonnull, but I'm not sure all callers do that -- need to audit. 563 1.203 riastrad * Currently udf closes the vnode before clearing the mount. 564 1.141 hannken */ 565 1.141 hannken void 566 1.141 hannken spec_node_setmountedfs(vnode_t *devvp, struct mount *mp) 567 1.141 hannken { 568 1.176 christos struct dkwedge_info dkw; 569 1.203 riastrad struct specnode *sn; 570 1.203 riastrad dev_t dev; 571 1.203 riastrad int error; 572 1.141 hannken 573 1.141 hannken KASSERT(devvp->v_type == VBLK); 574 1.203 riastrad 575 1.203 riastrad error = spec_io_enter(devvp, &sn, &dev); 576 1.203 riastrad if (error) 577 1.203 riastrad return; 578 1.203 riastrad 579 1.203 riastrad KASSERT(sn->sn_dev->sd_mountpoint == NULL || mp == NULL); 580 1.203 riastrad sn->sn_dev->sd_mountpoint = mp; 581 1.176 christos if (mp == NULL) 582 1.203 riastrad goto out; 583 1.176 christos 584 1.203 riastrad error = bdev_ioctl(dev, DIOCGWEDGEINFO, &dkw, FREAD, curlwp); 585 1.203 riastrad if (error) 586 1.203 riastrad goto out; 587 1.176 christos 588 1.176 christos strlcpy(mp->mnt_stat.f_mntfromlabel, dkw.dkw_wname, 589 1.176 christos sizeof(mp->mnt_stat.f_mntfromlabel)); 590 1.203 riastrad 591 1.203 riastrad out: spec_io_exit(devvp, sn); 592 1.141 hannken } 593 1.141 hannken 594 1.141 hannken /* 595 1.112 ad * A vnode representing a special device is going away. Close 596 1.112 ad * the device if the vnode holds it open. 597 1.112 ad */ 598 1.112 ad void 599 1.112 ad spec_node_revoke(vnode_t *vp) 600 1.112 ad { 601 1.112 ad specnode_t *sn; 602 1.112 ad specdev_t *sd; 603 1.209 riastrad struct vnode **vpp; 604 1.112 ad 605 1.195 riastrad KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE); 606 1.195 riastrad 607 1.112 ad sn = vp->v_specnode; 608 1.112 ad sd = sn->sn_dev; 609 1.112 ad 610 1.112 ad KASSERT(vp->v_type == VBLK || vp->v_type == VCHR); 611 1.112 ad KASSERT(vp->v_specnode != NULL); 612 1.112 ad KASSERT(sn->sn_gone == false); 613 1.112 ad 614 1.120 pooka mutex_enter(&device_lock); 615 1.211 riastrad KASSERTMSG(sn->sn_opencnt <= sd->sd_opencnt, 616 1.211 riastrad "sn_opencnt=%u > sd_opencnt=%u", 617 1.211 riastrad sn->sn_opencnt, sd->sd_opencnt); 618 1.209 riastrad sn->sn_gone = true; 619 1.112 ad if (sn->sn_opencnt != 0) { 620 1.112 ad sd->sd_opencnt -= (sn->sn_opencnt - 1); 621 1.112 ad sn->sn_opencnt = 1; 622 1.120 pooka mutex_exit(&device_lock); 623 1.112 ad 624 1.112 ad VOP_CLOSE(vp, FNONBLOCK, NOCRED); 625 1.112 ad 626 1.120 pooka mutex_enter(&device_lock); 627 1.112 ad KASSERT(sn->sn_opencnt == 0); 628 1.112 ad } 629 1.205 riastrad 630 1.205 riastrad /* 631 1.205 riastrad * We may have revoked the vnode in this thread while another 632 1.205 riastrad * thread was in the middle of spec_close, in the window when 633 1.205 riastrad * spec_close releases the vnode lock to call .d_close for the 634 1.205 riastrad * last close. In that case, wait for the concurrent 635 1.205 riastrad * spec_close to complete. 636 1.205 riastrad */ 637 1.205 riastrad while (sd->sd_closing) 638 1.205 riastrad cv_wait(&specfs_iocv, &device_lock); 639 1.209 riastrad 640 1.209 riastrad /* 641 1.209 riastrad * Remove from the hash so lookups stop returning this 642 1.209 riastrad * specnode. We will dissociate it from the specdev -- and 643 1.209 riastrad * possibly free the specdev -- in spec_node_destroy. 644 1.209 riastrad */ 645 1.209 riastrad KASSERT(sn->sn_gone); 646 1.209 riastrad KASSERT(sn->sn_opencnt == 0); 647 1.209 riastrad for (vpp = &specfs_hash[SPECHASH(vp->v_rdev)];; 648 1.209 riastrad vpp = &(*vpp)->v_specnext) { 649 1.209 riastrad if (*vpp == vp) { 650 1.209 riastrad *vpp = vp->v_specnext; 651 1.209 riastrad vp->v_specnext = NULL; 652 1.209 riastrad break; 653 1.209 riastrad } 654 1.209 riastrad } 655 1.120 pooka mutex_exit(&device_lock); 656 1.112 ad } 657 1.112 ad 658 1.112 ad /* 659 1.112 ad * A vnode representing a special device is being recycled. 660 1.112 ad * Destroy the specfs component. 661 1.112 ad */ 662 1.112 ad void 663 1.112 ad spec_node_destroy(vnode_t *vp) 664 1.112 ad { 665 1.112 ad specnode_t *sn; 666 1.112 ad specdev_t *sd; 667 1.112 ad int refcnt; 668 1.112 ad 669 1.112 ad sn = vp->v_specnode; 670 1.112 ad sd = sn->sn_dev; 671 1.112 ad 672 1.112 ad KASSERT(vp->v_type == VBLK || vp->v_type == VCHR); 673 1.112 ad KASSERT(vp->v_specnode != NULL); 674 1.112 ad KASSERT(sn->sn_opencnt == 0); 675 1.112 ad 676 1.120 pooka mutex_enter(&device_lock); 677 1.112 ad sn = vp->v_specnode; 678 1.112 ad vp->v_specnode = NULL; 679 1.112 ad refcnt = sd->sd_refcnt--; 680 1.112 ad KASSERT(refcnt > 0); 681 1.120 pooka mutex_exit(&device_lock); 682 1.112 ad 683 1.112 ad /* If the device is no longer in use, destroy our record. */ 684 1.112 ad if (refcnt == 1) { 685 1.202 riastrad KASSERT(sd->sd_iocnt == 0); 686 1.112 ad KASSERT(sd->sd_opencnt == 0); 687 1.112 ad KASSERT(sd->sd_bdevvp == NULL); 688 1.112 ad kmem_free(sd, sizeof(*sd)); 689 1.112 ad } 690 1.112 ad kmem_free(sn, sizeof(*sn)); 691 1.112 ad } 692 1.112 ad 693 1.112 ad /* 694 1.1 cgd * Trivial lookup routine that always fails. 695 1.1 cgd */ 696 1.4 andrew int 697 1.104 pooka spec_lookup(void *v) 698 1.28 christos { 699 1.142 hannken struct vop_lookup_v2_args /* { 700 1.15 mycroft struct vnode *a_dvp; 701 1.15 mycroft struct vnode **a_vpp; 702 1.15 mycroft struct componentname *a_cnp; 703 1.28 christos } */ *ap = v; 704 1.1 cgd 705 1.15 mycroft *ap->a_vpp = NULL; 706 1.218 riastrad return ENOTDIR; 707 1.66 jdolecek } 708 1.66 jdolecek 709 1.154 christos typedef int (*spec_ioctl_t)(dev_t, u_long, void *, int, struct lwp *); 710 1.154 christos 711 1.66 jdolecek /* 712 1.15 mycroft * Open a special file. 713 1.1 cgd */ 714 1.1 cgd /* ARGSUSED */ 715 1.28 christos int 716 1.104 pooka spec_open(void *v) 717 1.28 christos { 718 1.15 mycroft struct vop_open_args /* { 719 1.15 mycroft struct vnode *a_vp; 720 1.15 mycroft int a_mode; 721 1.87 elad kauth_cred_t a_cred; 722 1.28 christos } */ *ap = v; 723 1.202 riastrad struct lwp *l = curlwp; 724 1.202 riastrad struct vnode *vp = ap->a_vp; 725 1.206 riastrad dev_t dev, dev1; 726 1.1 cgd int error; 727 1.96 elad enum kauth_device_req req; 728 1.206 riastrad specnode_t *sn, *sn1; 729 1.112 ad specdev_t *sd; 730 1.219 mlelstv int dtype; 731 1.154 christos spec_ioctl_t ioctl; 732 1.202 riastrad u_int gen = 0; 733 1.202 riastrad const char *name = NULL; 734 1.201 riastrad bool needclose = false; 735 1.202 riastrad 736 1.202 riastrad KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE); 737 1.202 riastrad KASSERTMSG(vp->v_type == VBLK || vp->v_type == VCHR, "type=%d", 738 1.202 riastrad vp->v_type); 739 1.202 riastrad 740 1.112 ad dev = vp->v_rdev; 741 1.112 ad sn = vp->v_specnode; 742 1.112 ad sd = sn->sn_dev; 743 1.188 riastrad 744 1.15 mycroft /* 745 1.15 mycroft * Don't allow open if fs is mounted -nodev. 746 1.15 mycroft */ 747 1.1 cgd if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV)) 748 1.218 riastrad return ENXIO; 749 1.1 cgd 750 1.112 ad switch (ap->a_mode & (FREAD | FWRITE)) { 751 1.112 ad case FREAD | FWRITE: 752 1.112 ad req = KAUTH_REQ_DEVICE_RAWIO_SPEC_RW; 753 1.112 ad break; 754 1.112 ad case FWRITE: 755 1.112 ad req = KAUTH_REQ_DEVICE_RAWIO_SPEC_WRITE; 756 1.112 ad break; 757 1.112 ad default: 758 1.112 ad req = KAUTH_REQ_DEVICE_RAWIO_SPEC_READ; 759 1.112 ad break; 760 1.112 ad } 761 1.189 riastrad error = kauth_authorize_device_spec(ap->a_cred, req, vp); 762 1.218 riastrad if (error) 763 1.218 riastrad return error; 764 1.89 elad 765 1.190 riastrad /* 766 1.190 riastrad * Acquire an open reference -- as long as we hold onto it, and 767 1.195 riastrad * the vnode isn't revoked, it can't be closed, and the vnode 768 1.195 riastrad * can't be revoked until we release the vnode lock. 769 1.190 riastrad */ 770 1.192 riastrad mutex_enter(&device_lock); 771 1.196 riastrad KASSERT(!sn->sn_gone); 772 1.1 cgd switch (vp->v_type) { 773 1.1 cgd case VCHR: 774 1.112 ad /* 775 1.112 ad * Character devices can accept opens from multiple 776 1.204 riastrad * vnodes. But first, wait for any close to finish. 777 1.204 riastrad * Wait under the vnode lock so we don't have to worry 778 1.204 riastrad * about the vnode being revoked while we wait. 779 1.112 ad */ 780 1.204 riastrad while (sd->sd_closing) { 781 1.204 riastrad error = cv_wait_sig(&specfs_iocv, &device_lock); 782 1.204 riastrad if (error) 783 1.204 riastrad break; 784 1.204 riastrad } 785 1.204 riastrad if (error) 786 1.204 riastrad break; 787 1.112 ad sd->sd_opencnt++; 788 1.112 ad sn->sn_opencnt++; 789 1.211 riastrad KASSERTMSG(sn->sn_opencnt <= sd->sd_opencnt, 790 1.211 riastrad "sn_opencnt=%u > sd_opencnt=%u", 791 1.211 riastrad sn->sn_opencnt, sd->sd_opencnt); 792 1.190 riastrad break; 793 1.190 riastrad case VBLK: 794 1.190 riastrad /* 795 1.190 riastrad * For block devices, permit only one open. The buffer 796 1.190 riastrad * cache cannot remain self-consistent with multiple 797 1.190 riastrad * vnodes holding a block device open. 798 1.190 riastrad * 799 1.190 riastrad * Treat zero opencnt with non-NULL mountpoint as open. 800 1.190 riastrad * This may happen after forced detach of a mounted device. 801 1.214 riastrad * 802 1.214 riastrad * Also treat sd_closing, meaning there is a concurrent 803 1.214 riastrad * close in progress, as still open. 804 1.190 riastrad */ 805 1.214 riastrad if (sd->sd_opencnt != 0 || 806 1.214 riastrad sd->sd_mountpoint != NULL || 807 1.214 riastrad sd->sd_closing) { 808 1.192 riastrad error = EBUSY; 809 1.192 riastrad break; 810 1.190 riastrad } 811 1.211 riastrad KASSERTMSG(sn->sn_opencnt == 0, "sn_opencnt=%u", 812 1.211 riastrad sn->sn_opencnt); 813 1.190 riastrad sn->sn_opencnt = 1; 814 1.190 riastrad sd->sd_opencnt = 1; 815 1.190 riastrad sd->sd_bdevvp = vp; 816 1.190 riastrad break; 817 1.190 riastrad default: 818 1.190 riastrad panic("invalid specfs vnode type: %d", vp->v_type); 819 1.190 riastrad } 820 1.192 riastrad mutex_exit(&device_lock); 821 1.192 riastrad if (error) 822 1.192 riastrad return error; 823 1.190 riastrad 824 1.190 riastrad /* 825 1.190 riastrad * Set VV_ISTTY if this is a tty cdev. 826 1.190 riastrad * 827 1.190 riastrad * XXX This does the wrong thing if the module has to be 828 1.190 riastrad * autoloaded. We should maybe set this after autoloading 829 1.190 riastrad * modules and calling .d_open successfully, except (a) we need 830 1.190 riastrad * the vnode lock to touch it, and (b) once we acquire the 831 1.190 riastrad * vnode lock again, the vnode may have been revoked, and 832 1.190 riastrad * deadfs's dead_read needs VV_ISTTY to be already set in order 833 1.190 riastrad * to return the right answer. So this needs some additional 834 1.190 riastrad * synchronization to be made to work correctly with tty driver 835 1.190 riastrad * module autoload. For now, let's just hope it doesn't cause 836 1.190 riastrad * too much trouble for a tty from an autoloaded driver module 837 1.190 riastrad * to fail with EIO instead of returning EOF. 838 1.190 riastrad */ 839 1.190 riastrad if (vp->v_type == VCHR) { 840 1.100 ad if (cdev_type(dev) == D_TTY) 841 1.108 ad vp->v_vflag |= VV_ISTTY; 842 1.190 riastrad } 843 1.190 riastrad 844 1.190 riastrad /* 845 1.190 riastrad * Because opening the device may block indefinitely, e.g. when 846 1.190 riastrad * opening a tty, and loading a module may cross into many 847 1.190 riastrad * other subsystems, we must not hold the vnode lock while 848 1.190 riastrad * calling .d_open, so release it now and reacquire it when 849 1.190 riastrad * done. 850 1.206 riastrad * 851 1.206 riastrad * Take an I/O reference so that any concurrent spec_close via 852 1.206 riastrad * spec_node_revoke will wait for us to finish calling .d_open. 853 1.206 riastrad * The vnode can't be dead at this point because we have it 854 1.206 riastrad * locked. Note that if revoked, the driver must interrupt 855 1.206 riastrad * .d_open before spec_close starts waiting for I/O to drain so 856 1.206 riastrad * this doesn't deadlock. 857 1.190 riastrad */ 858 1.193 riastrad VOP_UNLOCK(vp); 859 1.206 riastrad error = spec_io_enter(vp, &sn1, &dev1); 860 1.206 riastrad if (error) { 861 1.206 riastrad vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 862 1.206 riastrad return error; 863 1.206 riastrad } 864 1.206 riastrad KASSERT(sn1 == sn); 865 1.206 riastrad KASSERT(dev1 == dev); 866 1.206 riastrad 867 1.206 riastrad /* 868 1.206 riastrad * Open the device. If .d_open returns ENXIO (device not 869 1.206 riastrad * configured), the driver may not be loaded, so try 870 1.206 riastrad * autoloading a module and then try .d_open again if anything 871 1.206 riastrad * got loaded. 872 1.206 riastrad */ 873 1.190 riastrad switch (vp->v_type) { 874 1.190 riastrad case VCHR: 875 1.122 haad do { 876 1.125 tsutsui const struct cdevsw *cdev; 877 1.125 tsutsui 878 1.122 haad gen = module_gen; 879 1.122 haad error = cdev_open(dev, ap->a_mode, S_IFCHR, l); 880 1.122 haad if (error != ENXIO) 881 1.122 haad break; 882 1.218 riastrad 883 1.125 tsutsui /* Check if we already have a valid driver */ 884 1.125 tsutsui mutex_enter(&device_lock); 885 1.125 tsutsui cdev = cdevsw_lookup(dev); 886 1.125 tsutsui mutex_exit(&device_lock); 887 1.125 tsutsui if (cdev != NULL) 888 1.125 tsutsui break; 889 1.125 tsutsui 890 1.122 haad /* Get device name from devsw_conv array */ 891 1.122 haad if ((name = cdevsw_getname(major(dev))) == NULL) 892 1.122 haad break; 893 1.218 riastrad 894 1.122 haad /* Try to autoload device module */ 895 1.218 riastrad (void)module_autoload(name, MODULE_CLASS_DRIVER); 896 1.122 haad } while (gen != module_gen); 897 1.70 dsl break; 898 1.1 cgd 899 1.1 cgd case VBLK: 900 1.122 haad do { 901 1.125 tsutsui const struct bdevsw *bdev; 902 1.125 tsutsui 903 1.122 haad gen = module_gen; 904 1.122 haad error = bdev_open(dev, ap->a_mode, S_IFBLK, l); 905 1.122 haad if (error != ENXIO) 906 1.122 haad break; 907 1.122 haad 908 1.125 tsutsui /* Check if we already have a valid driver */ 909 1.125 tsutsui mutex_enter(&device_lock); 910 1.125 tsutsui bdev = bdevsw_lookup(dev); 911 1.125 tsutsui mutex_exit(&device_lock); 912 1.125 tsutsui if (bdev != NULL) 913 1.125 tsutsui break; 914 1.125 tsutsui 915 1.122 haad /* Get device name from devsw_conv array */ 916 1.122 haad if ((name = bdevsw_getname(major(dev))) == NULL) 917 1.122 haad break; 918 1.122 haad 919 1.218 riastrad /* Try to autoload device module */ 920 1.218 riastrad (void)module_autoload(name, MODULE_CLASS_DRIVER); 921 1.122 haad } while (gen != module_gen); 922 1.70 dsl break; 923 1.55 chs 924 1.70 dsl default: 925 1.190 riastrad __unreachable(); 926 1.1 cgd } 927 1.206 riastrad 928 1.206 riastrad /* 929 1.206 riastrad * Release the I/O reference now that we have called .d_open, 930 1.206 riastrad * and reacquire the vnode lock. At this point, the device may 931 1.206 riastrad * have been revoked, so we must tread carefully. However, sn 932 1.206 riastrad * and sd remain valid pointers until we drop our reference. 933 1.206 riastrad */ 934 1.206 riastrad spec_io_exit(vp, sn); 935 1.193 riastrad vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 936 1.206 riastrad KASSERT(vp->v_specnode == sn); 937 1.70 dsl 938 1.190 riastrad /* 939 1.190 riastrad * If it has been revoked since we released the vnode lock and 940 1.190 riastrad * reacquired it, then spec_node_revoke has closed it, and we 941 1.190 riastrad * must fail with EBADF. 942 1.190 riastrad * 943 1.190 riastrad * Otherwise, if opening it failed, back out and release the 944 1.201 riastrad * open reference. If it was ever successfully opened and we 945 1.201 riastrad * got the last reference this way, it's now our job to close 946 1.201 riastrad * it. This might happen in the following scenario: 947 1.190 riastrad * 948 1.190 riastrad * Thread 1 Thread 2 949 1.190 riastrad * VOP_OPEN 950 1.190 riastrad * ... 951 1.190 riastrad * .d_open -> 0 (success) 952 1.190 riastrad * acquire vnode lock 953 1.190 riastrad * do stuff VOP_OPEN 954 1.190 riastrad * release vnode lock ... 955 1.190 riastrad * .d_open -> EBUSY 956 1.190 riastrad * VOP_CLOSE 957 1.190 riastrad * acquire vnode lock 958 1.190 riastrad * --sd_opencnt != 0 959 1.190 riastrad * => no .d_close 960 1.190 riastrad * release vnode lock 961 1.190 riastrad * acquire vnode lock 962 1.190 riastrad * --sd_opencnt == 0 963 1.201 riastrad * 964 1.201 riastrad * We can't resolve this by making spec_close wait for .d_open 965 1.201 riastrad * to complete before examining sd_opencnt, because .d_open can 966 1.201 riastrad * hang indefinitely, e.g. for a tty. 967 1.190 riastrad */ 968 1.120 pooka mutex_enter(&device_lock); 969 1.112 ad if (sn->sn_gone) { 970 1.112 ad if (error == 0) 971 1.112 ad error = EBADF; 972 1.201 riastrad } else if (error == 0) { 973 1.212 riastrad /* 974 1.212 riastrad * Device has not been revoked, so our opencnt can't 975 1.212 riastrad * have gone away at this point -- transition to 976 1.212 riastrad * sn_gone=true happens before transition to 977 1.212 riastrad * sn_opencnt=0 in spec_node_revoke. 978 1.212 riastrad */ 979 1.212 riastrad KASSERT(sd->sd_opencnt); 980 1.212 riastrad KASSERT(sn->sn_opencnt); 981 1.212 riastrad KASSERTMSG(sn->sn_opencnt <= sd->sd_opencnt, 982 1.212 riastrad "sn_opencnt=%u > sd_opencnt=%u", 983 1.212 riastrad sn->sn_opencnt, sd->sd_opencnt); 984 1.213 riastrad KASSERT(!sd->sd_closing); 985 1.201 riastrad sd->sd_opened = true; 986 1.201 riastrad } else if (sd->sd_opencnt == 1 && sd->sd_opened) { 987 1.201 riastrad /* 988 1.201 riastrad * We're the last reference to a _previous_ open even 989 1.201 riastrad * though this one failed, so we have to close it. 990 1.201 riastrad * Don't decrement the reference count here -- 991 1.201 riastrad * spec_close will do that. 992 1.201 riastrad */ 993 1.201 riastrad KASSERT(sn->sn_opencnt == 1); 994 1.201 riastrad needclose = true; 995 1.201 riastrad } else { 996 1.207 riastrad KASSERT(sd->sd_opencnt); 997 1.207 riastrad KASSERT(sn->sn_opencnt); 998 1.211 riastrad KASSERTMSG(sn->sn_opencnt <= sd->sd_opencnt, 999 1.211 riastrad "sn_opencnt=%u > sd_opencnt=%u", 1000 1.211 riastrad sn->sn_opencnt, sd->sd_opencnt); 1001 1.112 ad sd->sd_opencnt--; 1002 1.112 ad sn->sn_opencnt--; 1003 1.115 hannken if (vp->v_type == VBLK) 1004 1.115 hannken sd->sd_bdevvp = NULL; 1005 1.201 riastrad } 1006 1.201 riastrad mutex_exit(&device_lock); 1007 1.115 hannken 1008 1.201 riastrad /* 1009 1.201 riastrad * If this open failed, but the device was previously opened, 1010 1.201 riastrad * and another thread concurrently closed the vnode while we 1011 1.201 riastrad * were in the middle of reopening it, the other thread will 1012 1.201 riastrad * see sd_opencnt > 0 and thus decide not to call .d_close -- 1013 1.201 riastrad * it is now our responsibility to do so. 1014 1.201 riastrad * 1015 1.201 riastrad * XXX The flags passed to VOP_CLOSE here are wrong, but 1016 1.201 riastrad * drivers can't rely on FREAD|FWRITE anyway -- e.g., consider 1017 1.201 riastrad * a device opened by thread 0 with O_READ, then opened by 1018 1.201 riastrad * thread 1 with O_WRITE, then closed by thread 0, and finally 1019 1.201 riastrad * closed by thread 1; the last .d_close call will have FWRITE 1020 1.201 riastrad * but not FREAD. We should just eliminate the FREAD/FWRITE 1021 1.201 riastrad * parameter to .d_close altogether. 1022 1.201 riastrad */ 1023 1.201 riastrad if (needclose) { 1024 1.201 riastrad KASSERT(error); 1025 1.201 riastrad VOP_CLOSE(vp, FNONBLOCK, NOCRED); 1026 1.112 ad } 1027 1.89 elad 1028 1.194 riastrad /* If anything went wrong, we're done. */ 1029 1.194 riastrad if (error) 1030 1.70 dsl return error; 1031 1.112 ad 1032 1.194 riastrad /* 1033 1.194 riastrad * For disk devices, automagically set the vnode size to the 1034 1.194 riastrad * partition size, if we can. This applies to block devices 1035 1.194 riastrad * and character devices alike -- every block device must have 1036 1.194 riastrad * a corresponding character device. And if the module is 1037 1.194 riastrad * loaded it will remain loaded until we're done here (it is 1038 1.194 riastrad * forbidden to devsw_detach until closed). So it is safe to 1039 1.194 riastrad * query cdev_type unconditionally here. 1040 1.194 riastrad */ 1041 1.219 mlelstv switch (vp->v_type) { 1042 1.219 mlelstv case VCHR: 1043 1.219 mlelstv ioctl = cdev_ioctl; 1044 1.219 mlelstv dtype = cdev_type(dev); 1045 1.219 mlelstv break; 1046 1.219 mlelstv default: 1047 1.219 mlelstv ioctl = bdev_ioctl; 1048 1.219 mlelstv dtype = bdev_type(dev); 1049 1.219 mlelstv break; 1050 1.219 mlelstv } 1051 1.219 mlelstv if (dtype == D_DISK) { 1052 1.219 mlelstv struct partinfo pi; 1053 1.219 mlelstv off_t sz; 1054 1.219 mlelstv 1055 1.219 mlelstv error = (*ioctl)(dev, DIOCGPARTINFO, &pi, FREAD, curlwp); 1056 1.219 mlelstv if (error == 0) 1057 1.219 mlelstv sz = (off_t)pi.pi_size * pi.pi_secsize; 1058 1.219 mlelstv else if (error == ENOTTY) 1059 1.219 mlelstv error = (*ioctl)(dev, DIOCGMEDIASIZE, &sz, FREAD, curlwp); 1060 1.219 mlelstv 1061 1.219 mlelstv if (error == 0) 1062 1.219 mlelstv uvm_vnp_setsize(vp, (voff_t)sz); 1063 1.194 riastrad } 1064 1.154 christos 1065 1.194 riastrad /* Success! */ 1066 1.70 dsl return 0; 1067 1.1 cgd } 1068 1.1 cgd 1069 1.1 cgd /* 1070 1.1 cgd * Vnode op for read 1071 1.1 cgd */ 1072 1.1 cgd /* ARGSUSED */ 1073 1.28 christos int 1074 1.104 pooka spec_read(void *v) 1075 1.28 christos { 1076 1.15 mycroft struct vop_read_args /* { 1077 1.15 mycroft struct vnode *a_vp; 1078 1.15 mycroft struct uio *a_uio; 1079 1.15 mycroft int a_ioflag; 1080 1.87 elad kauth_cred_t a_cred; 1081 1.28 christos } */ *ap = v; 1082 1.48 augustss struct vnode *vp = ap->a_vp; 1083 1.48 augustss struct uio *uio = ap->a_uio; 1084 1.218 riastrad struct lwp *l = curlwp; 1085 1.202 riastrad struct specnode *sn; 1086 1.202 riastrad dev_t dev; 1087 1.56 chs struct buf *bp; 1088 1.57 chs daddr_t bn; 1089 1.59 chs int bsize, bscale; 1090 1.157 christos struct partinfo pi; 1091 1.64 gehenna int n, on; 1092 1.1 cgd int error = 0; 1093 1.181 mlelstv int i, nra; 1094 1.181 mlelstv daddr_t lastbn, *rablks; 1095 1.181 mlelstv int *rasizes; 1096 1.181 mlelstv int nrablks, ratogo; 1097 1.1 cgd 1098 1.160 pgoyette KASSERT(uio->uio_rw == UIO_READ); 1099 1.218 riastrad KASSERTMSG((VMSPACE_IS_KERNEL_P(uio->uio_vmspace) || 1100 1.218 riastrad uio->uio_vmspace == curproc->p_vmspace), 1101 1.218 riastrad "vmspace belongs to neither kernel nor curproc"); 1102 1.160 pgoyette 1103 1.1 cgd if (uio->uio_resid == 0) 1104 1.218 riastrad return 0; 1105 1.1 cgd 1106 1.56 chs switch (vp->v_type) { 1107 1.56 chs 1108 1.56 chs case VCHR: 1109 1.202 riastrad /* 1110 1.202 riastrad * Release the lock while we sleep -- possibly 1111 1.202 riastrad * indefinitely, if this is, e.g., a tty -- in 1112 1.202 riastrad * cdev_read, so we don't hold up everything else that 1113 1.202 riastrad * might want access to the vnode. 1114 1.202 riastrad * 1115 1.202 riastrad * But before we issue the read, take an I/O reference 1116 1.202 riastrad * to the specnode so close will know when we're done 1117 1.202 riastrad * reading. Note that the moment we release the lock, 1118 1.202 riastrad * the vnode's identity may change; hence spec_io_enter 1119 1.202 riastrad * may fail, and the caller may have a dead vnode on 1120 1.202 riastrad * their hands, if the file system on which vp lived 1121 1.202 riastrad * has been unmounted. 1122 1.202 riastrad */ 1123 1.130 hannken VOP_UNLOCK(vp); 1124 1.202 riastrad error = spec_io_enter(vp, &sn, &dev); 1125 1.202 riastrad if (error) 1126 1.202 riastrad goto out; 1127 1.202 riastrad error = cdev_read(dev, uio, ap->a_ioflag); 1128 1.202 riastrad spec_io_exit(vp, sn); 1129 1.215 riastrad out: /* XXX What if the caller held an exclusive lock? */ 1130 1.215 riastrad vn_lock(vp, LK_SHARED | LK_RETRY); 1131 1.218 riastrad return error; 1132 1.1 cgd 1133 1.56 chs case VBLK: 1134 1.112 ad KASSERT(vp == vp->v_specnode->sn_dev->sd_bdevvp); 1135 1.56 chs if (uio->uio_offset < 0) 1136 1.218 riastrad return EINVAL; 1137 1.138 dholland 1138 1.157 christos if (bdev_ioctl(vp->v_rdev, DIOCGPARTINFO, &pi, FREAD, l) == 0) 1139 1.177 jdolecek bsize = imin(imax(pi.pi_bsize, DEV_BSIZE), MAXBSIZE); 1140 1.157 christos else 1141 1.157 christos bsize = BLKDEV_IOSIZE; 1142 1.138 dholland 1143 1.59 chs bscale = bsize >> DEV_BSHIFT; 1144 1.181 mlelstv 1145 1.181 mlelstv nra = uimax(16 * MAXPHYS / bsize - 1, 511); 1146 1.181 mlelstv rablks = kmem_alloc(nra * sizeof(*rablks), KM_SLEEP); 1147 1.181 mlelstv rasizes = kmem_alloc(nra * sizeof(*rasizes), KM_SLEEP); 1148 1.181 mlelstv lastbn = ((uio->uio_offset + uio->uio_resid - 1) >> DEV_BSHIFT) 1149 1.181 mlelstv &~ (bscale - 1); 1150 1.181 mlelstv nrablks = ratogo = 0; 1151 1.56 chs do { 1152 1.59 chs bn = (uio->uio_offset >> DEV_BSHIFT) &~ (bscale - 1); 1153 1.56 chs on = uio->uio_offset % bsize; 1154 1.175 riastrad n = uimin((unsigned)(bsize - on), uio->uio_resid); 1155 1.181 mlelstv 1156 1.181 mlelstv if (ratogo == 0) { 1157 1.181 mlelstv nrablks = uimin((lastbn - bn) / bscale, nra); 1158 1.181 mlelstv ratogo = nrablks; 1159 1.181 mlelstv 1160 1.181 mlelstv for (i = 0; i < nrablks; ++i) { 1161 1.181 mlelstv rablks[i] = bn + (i+1) * bscale; 1162 1.181 mlelstv rasizes[i] = bsize; 1163 1.181 mlelstv } 1164 1.181 mlelstv 1165 1.181 mlelstv error = breadn(vp, bn, bsize, 1166 1.218 riastrad rablks, rasizes, nrablks, 1167 1.218 riastrad 0, &bp); 1168 1.181 mlelstv } else { 1169 1.181 mlelstv if (ratogo > 0) 1170 1.181 mlelstv --ratogo; 1171 1.181 mlelstv error = bread(vp, bn, bsize, 0, &bp); 1172 1.56 chs } 1173 1.181 mlelstv if (error) 1174 1.181 mlelstv break; 1175 1.175 riastrad n = uimin(n, bsize - bp->b_resid); 1176 1.56 chs error = uiomove((char *)bp->b_data + on, n, uio); 1177 1.107 ad brelse(bp, 0); 1178 1.56 chs } while (error == 0 && uio->uio_resid > 0 && n != 0); 1179 1.181 mlelstv 1180 1.181 mlelstv kmem_free(rablks, nra * sizeof(*rablks)); 1181 1.181 mlelstv kmem_free(rasizes, nra * sizeof(*rasizes)); 1182 1.181 mlelstv 1183 1.218 riastrad return error; 1184 1.56 chs 1185 1.56 chs default: 1186 1.56 chs panic("spec_read type"); 1187 1.1 cgd } 1188 1.56 chs /* NOTREACHED */ 1189 1.1 cgd } 1190 1.1 cgd 1191 1.1 cgd /* 1192 1.1 cgd * Vnode op for write 1193 1.1 cgd */ 1194 1.1 cgd /* ARGSUSED */ 1195 1.28 christos int 1196 1.104 pooka spec_write(void *v) 1197 1.28 christos { 1198 1.15 mycroft struct vop_write_args /* { 1199 1.15 mycroft struct vnode *a_vp; 1200 1.15 mycroft struct uio *a_uio; 1201 1.15 mycroft int a_ioflag; 1202 1.87 elad kauth_cred_t a_cred; 1203 1.28 christos } */ *ap = v; 1204 1.48 augustss struct vnode *vp = ap->a_vp; 1205 1.48 augustss struct uio *uio = ap->a_uio; 1206 1.86 yamt struct lwp *l = curlwp; 1207 1.202 riastrad struct specnode *sn; 1208 1.202 riastrad dev_t dev; 1209 1.56 chs struct buf *bp; 1210 1.56 chs daddr_t bn; 1211 1.59 chs int bsize, bscale; 1212 1.157 christos struct partinfo pi; 1213 1.64 gehenna int n, on; 1214 1.1 cgd int error = 0; 1215 1.1 cgd 1216 1.160 pgoyette KASSERT(uio->uio_rw == UIO_WRITE); 1217 1.218 riastrad KASSERTMSG((VMSPACE_IS_KERNEL_P(uio->uio_vmspace) || 1218 1.218 riastrad uio->uio_vmspace == curproc->p_vmspace), 1219 1.218 riastrad "vmspace belongs to neither kernel nor curproc"); 1220 1.1 cgd 1221 1.56 chs switch (vp->v_type) { 1222 1.56 chs 1223 1.56 chs case VCHR: 1224 1.202 riastrad /* 1225 1.202 riastrad * Release the lock while we sleep -- possibly 1226 1.202 riastrad * indefinitely, if this is, e.g., a tty -- in 1227 1.202 riastrad * cdev_write, so we don't hold up everything else that 1228 1.202 riastrad * might want access to the vnode. 1229 1.202 riastrad * 1230 1.202 riastrad * But before we issue the write, take an I/O reference 1231 1.202 riastrad * to the specnode so close will know when we're done 1232 1.202 riastrad * writing. Note that the moment we release the lock, 1233 1.202 riastrad * the vnode's identity may change; hence spec_io_enter 1234 1.202 riastrad * may fail, and the caller may have a dead vnode on 1235 1.202 riastrad * their hands, if the file system on which vp lived 1236 1.202 riastrad * has been unmounted. 1237 1.202 riastrad */ 1238 1.130 hannken VOP_UNLOCK(vp); 1239 1.202 riastrad error = spec_io_enter(vp, &sn, &dev); 1240 1.202 riastrad if (error) 1241 1.202 riastrad goto out; 1242 1.202 riastrad error = cdev_write(dev, uio, ap->a_ioflag); 1243 1.202 riastrad spec_io_exit(vp, sn); 1244 1.202 riastrad out: vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1245 1.218 riastrad return error; 1246 1.56 chs 1247 1.56 chs case VBLK: 1248 1.112 ad KASSERT(vp == vp->v_specnode->sn_dev->sd_bdevvp); 1249 1.56 chs if (uio->uio_resid == 0) 1250 1.218 riastrad return 0; 1251 1.56 chs if (uio->uio_offset < 0) 1252 1.218 riastrad return EINVAL; 1253 1.157 christos 1254 1.157 christos if (bdev_ioctl(vp->v_rdev, DIOCGPARTINFO, &pi, FREAD, l) == 0) 1255 1.177 jdolecek bsize = imin(imax(pi.pi_bsize, DEV_BSIZE), MAXBSIZE); 1256 1.157 christos else 1257 1.157 christos bsize = BLKDEV_IOSIZE; 1258 1.157 christos 1259 1.59 chs bscale = bsize >> DEV_BSHIFT; 1260 1.56 chs do { 1261 1.59 chs bn = (uio->uio_offset >> DEV_BSHIFT) &~ (bscale - 1); 1262 1.56 chs on = uio->uio_offset % bsize; 1263 1.175 riastrad n = uimin((unsigned)(bsize - on), uio->uio_resid); 1264 1.56 chs if (n == bsize) 1265 1.56 chs bp = getblk(vp, bn, bsize, 0, 0); 1266 1.56 chs else 1267 1.146 maxv error = bread(vp, bn, bsize, B_MODIFY, &bp); 1268 1.56 chs if (error) { 1269 1.218 riastrad return error; 1270 1.56 chs } 1271 1.175 riastrad n = uimin(n, bsize - bp->b_resid); 1272 1.56 chs error = uiomove((char *)bp->b_data + on, n, uio); 1273 1.56 chs if (error) 1274 1.107 ad brelse(bp, 0); 1275 1.56 chs else { 1276 1.56 chs if (n + on == bsize) 1277 1.56 chs bawrite(bp); 1278 1.56 chs else 1279 1.56 chs bdwrite(bp); 1280 1.107 ad error = bp->b_error; 1281 1.56 chs } 1282 1.56 chs } while (error == 0 && uio->uio_resid > 0 && n != 0); 1283 1.218 riastrad return error; 1284 1.56 chs 1285 1.56 chs default: 1286 1.56 chs panic("spec_write type"); 1287 1.55 chs } 1288 1.56 chs /* NOTREACHED */ 1289 1.1 cgd } 1290 1.1 cgd 1291 1.1 cgd /* 1292 1.144 dholland * fdiscard, which on disk devices becomes TRIM. 1293 1.144 dholland */ 1294 1.144 dholland int 1295 1.144 dholland spec_fdiscard(void *v) 1296 1.144 dholland { 1297 1.144 dholland struct vop_fdiscard_args /* { 1298 1.144 dholland struct vnode *a_vp; 1299 1.144 dholland off_t a_pos; 1300 1.144 dholland off_t a_len; 1301 1.144 dholland } */ *ap = v; 1302 1.202 riastrad struct vnode *vp = ap->a_vp; 1303 1.144 dholland dev_t dev; 1304 1.144 dholland 1305 1.202 riastrad KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE); 1306 1.202 riastrad 1307 1.199 riastrad dev = vp->v_rdev; 1308 1.144 dholland 1309 1.144 dholland switch (vp->v_type) { 1310 1.218 riastrad case VCHR: 1311 1.218 riastrad #if 0 /* This is not stored for character devices. */ 1312 1.218 riastrad KASSERT(vp == vp->v_specnode->sn_dev->sd_cdevvp); 1313 1.218 riastrad #endif 1314 1.144 dholland return cdev_discard(dev, ap->a_pos, ap->a_len); 1315 1.218 riastrad case VBLK: 1316 1.144 dholland KASSERT(vp == vp->v_specnode->sn_dev->sd_bdevvp); 1317 1.144 dholland return bdev_discard(dev, ap->a_pos, ap->a_len); 1318 1.218 riastrad default: 1319 1.144 dholland panic("spec_fdiscard: not a device\n"); 1320 1.144 dholland } 1321 1.144 dholland } 1322 1.144 dholland 1323 1.144 dholland /* 1324 1.1 cgd * Device ioctl operation. 1325 1.1 cgd */ 1326 1.1 cgd /* ARGSUSED */ 1327 1.28 christos int 1328 1.104 pooka spec_ioctl(void *v) 1329 1.28 christos { 1330 1.15 mycroft struct vop_ioctl_args /* { 1331 1.15 mycroft struct vnode *a_vp; 1332 1.19 cgd u_long a_command; 1333 1.78 jrf void *a_data; 1334 1.15 mycroft int a_fflag; 1335 1.87 elad kauth_cred_t a_cred; 1336 1.28 christos } */ *ap = v; 1337 1.202 riastrad struct vnode *vp = ap->a_vp; 1338 1.202 riastrad struct specnode *sn; 1339 1.83 chs dev_t dev; 1340 1.202 riastrad int error; 1341 1.1 cgd 1342 1.202 riastrad error = spec_io_enter(vp, &sn, &dev); 1343 1.202 riastrad if (error) 1344 1.202 riastrad return error; 1345 1.83 chs 1346 1.83 chs switch (vp->v_type) { 1347 1.1 cgd case VCHR: 1348 1.202 riastrad error = cdev_ioctl(dev, ap->a_command, ap->a_data, 1349 1.109 pooka ap->a_fflag, curlwp); 1350 1.202 riastrad break; 1351 1.1 cgd case VBLK: 1352 1.112 ad KASSERT(vp == vp->v_specnode->sn_dev->sd_bdevvp); 1353 1.202 riastrad error = bdev_ioctl(dev, ap->a_command, ap->a_data, 1354 1.218 riastrad ap->a_fflag, curlwp); 1355 1.202 riastrad break; 1356 1.1 cgd default: 1357 1.1 cgd panic("spec_ioctl"); 1358 1.1 cgd /* NOTREACHED */ 1359 1.1 cgd } 1360 1.202 riastrad 1361 1.202 riastrad spec_io_exit(vp, sn); 1362 1.202 riastrad return error; 1363 1.1 cgd } 1364 1.1 cgd 1365 1.1 cgd /* ARGSUSED */ 1366 1.28 christos int 1367 1.104 pooka spec_poll(void *v) 1368 1.28 christos { 1369 1.32 mycroft struct vop_poll_args /* { 1370 1.15 mycroft struct vnode *a_vp; 1371 1.32 mycroft int a_events; 1372 1.28 christos } */ *ap = v; 1373 1.202 riastrad struct vnode *vp = ap->a_vp; 1374 1.202 riastrad struct specnode *sn; 1375 1.48 augustss dev_t dev; 1376 1.202 riastrad int revents; 1377 1.1 cgd 1378 1.202 riastrad if (spec_io_enter(vp, &sn, &dev) != 0) 1379 1.92 jld return POLLERR; 1380 1.91 jld 1381 1.91 jld switch (vp->v_type) { 1382 1.1 cgd case VCHR: 1383 1.202 riastrad revents = cdev_poll(dev, ap->a_events, curlwp); 1384 1.202 riastrad break; 1385 1.30 mycroft default: 1386 1.202 riastrad revents = genfs_poll(v); 1387 1.202 riastrad break; 1388 1.15 mycroft } 1389 1.202 riastrad 1390 1.202 riastrad spec_io_exit(vp, sn); 1391 1.202 riastrad return revents; 1392 1.15 mycroft } 1393 1.65 jdolecek 1394 1.65 jdolecek /* ARGSUSED */ 1395 1.65 jdolecek int 1396 1.104 pooka spec_kqfilter(void *v) 1397 1.65 jdolecek { 1398 1.65 jdolecek struct vop_kqfilter_args /* { 1399 1.65 jdolecek struct vnode *a_vp; 1400 1.65 jdolecek struct proc *a_kn; 1401 1.65 jdolecek } */ *ap = v; 1402 1.202 riastrad struct vnode *vp = ap->a_vp; 1403 1.202 riastrad struct specnode *sn; 1404 1.65 jdolecek dev_t dev; 1405 1.202 riastrad int error; 1406 1.65 jdolecek 1407 1.202 riastrad error = spec_io_enter(vp, &sn, &dev); 1408 1.202 riastrad if (error) 1409 1.202 riastrad return error; 1410 1.65 jdolecek 1411 1.202 riastrad switch (vp->v_type) { 1412 1.65 jdolecek case VCHR: 1413 1.202 riastrad error = cdev_kqfilter(dev, ap->a_kn); 1414 1.202 riastrad break; 1415 1.65 jdolecek default: 1416 1.65 jdolecek /* 1417 1.65 jdolecek * Block devices don't support kqfilter, and refuse it 1418 1.65 jdolecek * for any other files (like those vflush()ed) too. 1419 1.65 jdolecek */ 1420 1.202 riastrad error = EOPNOTSUPP; 1421 1.202 riastrad break; 1422 1.65 jdolecek } 1423 1.202 riastrad 1424 1.202 riastrad spec_io_exit(vp, sn); 1425 1.202 riastrad return error; 1426 1.65 jdolecek } 1427 1.65 jdolecek 1428 1.15 mycroft /* 1429 1.101 pooka * Allow mapping of only D_DISK. This is called only for VBLK. 1430 1.101 pooka */ 1431 1.101 pooka int 1432 1.104 pooka spec_mmap(void *v) 1433 1.101 pooka { 1434 1.101 pooka struct vop_mmap_args /* { 1435 1.101 pooka struct vnode *a_vp; 1436 1.102 pooka vm_prot_t a_prot; 1437 1.101 pooka kauth_cred_t a_cred; 1438 1.101 pooka } */ *ap = v; 1439 1.101 pooka struct vnode *vp = ap->a_vp; 1440 1.202 riastrad struct specnode *sn; 1441 1.202 riastrad dev_t dev; 1442 1.202 riastrad int error; 1443 1.101 pooka 1444 1.101 pooka KASSERT(vp->v_type == VBLK); 1445 1.101 pooka 1446 1.202 riastrad error = spec_io_enter(vp, &sn, &dev); 1447 1.202 riastrad if (error) 1448 1.202 riastrad return error; 1449 1.202 riastrad 1450 1.202 riastrad error = bdev_type(dev) == D_DISK ? 0 : EINVAL; 1451 1.202 riastrad 1452 1.202 riastrad spec_io_exit(vp, sn); 1453 1.101 pooka return 0; 1454 1.101 pooka } 1455 1.101 pooka 1456 1.101 pooka /* 1457 1.15 mycroft * Synch buffers associated with a block device 1458 1.15 mycroft */ 1459 1.15 mycroft /* ARGSUSED */ 1460 1.15 mycroft int 1461 1.104 pooka spec_fsync(void *v) 1462 1.28 christos { 1463 1.15 mycroft struct vop_fsync_args /* { 1464 1.15 mycroft struct vnode *a_vp; 1465 1.87 elad kauth_cred_t a_cred; 1466 1.40 kleink int a_flags; 1467 1.50 fvdl off_t offlo; 1468 1.50 fvdl off_t offhi; 1469 1.28 christos } */ *ap = v; 1470 1.48 augustss struct vnode *vp = ap->a_vp; 1471 1.118 ad struct mount *mp; 1472 1.118 ad int error; 1473 1.15 mycroft 1474 1.112 ad if (vp->v_type == VBLK) { 1475 1.141 hannken if ((mp = spec_node_getmountedfs(vp)) != NULL) { 1476 1.133 hannken error = VFS_FSYNC(mp, vp, ap->a_flags); 1477 1.118 ad if (error != EOPNOTSUPP) 1478 1.118 ad return error; 1479 1.118 ad } 1480 1.135 chs return vflushbuf(vp, ap->a_flags); 1481 1.112 ad } 1482 1.218 riastrad return 0; 1483 1.1 cgd } 1484 1.1 cgd 1485 1.1 cgd /* 1486 1.1 cgd * Just call the device strategy routine 1487 1.1 cgd */ 1488 1.28 christos int 1489 1.104 pooka spec_strategy(void *v) 1490 1.28 christos { 1491 1.15 mycroft struct vop_strategy_args /* { 1492 1.76 hannken struct vnode *a_vp; 1493 1.15 mycroft struct buf *a_bp; 1494 1.28 christos } */ *ap = v; 1495 1.76 hannken struct vnode *vp = ap->a_vp; 1496 1.76 hannken struct buf *bp = ap->a_bp; 1497 1.202 riastrad struct specnode *sn = NULL; 1498 1.161 hannken dev_t dev; 1499 1.106 hannken int error; 1500 1.1 cgd 1501 1.202 riastrad error = spec_io_enter(vp, &sn, &dev); 1502 1.202 riastrad if (error) 1503 1.202 riastrad goto out; 1504 1.77 hannken 1505 1.161 hannken bp->b_dev = dev; 1506 1.79 hannken 1507 1.161 hannken if (!(bp->b_flags & B_READ)) { 1508 1.169 hannken #ifdef DIAGNOSTIC 1509 1.169 hannken if (bp->b_vp && bp->b_vp->v_type == VBLK) { 1510 1.169 hannken struct mount *mp = spec_node_getmountedfs(bp->b_vp); 1511 1.169 hannken 1512 1.169 hannken if (mp && (mp->mnt_flag & MNT_RDONLY)) { 1513 1.169 hannken printf("%s blk %"PRId64" written while ro!\n", 1514 1.169 hannken mp->mnt_stat.f_mntonname, bp->b_blkno); 1515 1.216 riastrad #ifdef DDB 1516 1.216 riastrad db_stacktrace(); 1517 1.216 riastrad #endif 1518 1.169 hannken } 1519 1.169 hannken } 1520 1.169 hannken #endif /* DIAGNOSTIC */ 1521 1.161 hannken error = fscow_run(bp, false); 1522 1.161 hannken if (error) 1523 1.161 hannken goto out; 1524 1.161 hannken } 1525 1.100 ad bdev_strategy(bp); 1526 1.76 hannken 1527 1.202 riastrad error = 0; 1528 1.161 hannken 1529 1.202 riastrad out: if (sn) 1530 1.202 riastrad spec_io_exit(vp, sn); 1531 1.202 riastrad if (error) { 1532 1.202 riastrad bp->b_error = error; 1533 1.202 riastrad bp->b_resid = bp->b_bcount; 1534 1.202 riastrad biodone(bp); 1535 1.202 riastrad } 1536 1.161 hannken return error; 1537 1.1 cgd } 1538 1.1 cgd 1539 1.39 fvdl int 1540 1.104 pooka spec_inactive(void *v) 1541 1.39 fvdl { 1542 1.170 riastrad struct vop_inactive_v2_args /* { 1543 1.39 fvdl struct vnode *a_vp; 1544 1.148 hannken struct bool *a_recycle; 1545 1.39 fvdl } */ *ap = v; 1546 1.148 hannken 1547 1.171 martin KASSERT(ap->a_vp->v_mount == dead_rootmount); 1548 1.148 hannken *ap->a_recycle = true; 1549 1.170 riastrad 1550 1.148 hannken return 0; 1551 1.148 hannken } 1552 1.148 hannken 1553 1.148 hannken int 1554 1.148 hannken spec_reclaim(void *v) 1555 1.148 hannken { 1556 1.172 riastrad struct vop_reclaim_v2_args /* { 1557 1.148 hannken struct vnode *a_vp; 1558 1.148 hannken } */ *ap = v; 1559 1.172 riastrad struct vnode *vp = ap->a_vp; 1560 1.172 riastrad 1561 1.200 riastrad KASSERT(vp->v_specnode->sn_opencnt == 0); 1562 1.200 riastrad 1563 1.172 riastrad VOP_UNLOCK(vp); 1564 1.39 fvdl 1565 1.148 hannken KASSERT(vp->v_mount == dead_rootmount); 1566 1.148 hannken return 0; 1567 1.39 fvdl } 1568 1.39 fvdl 1569 1.1 cgd /* 1570 1.1 cgd * This is a noop, simply returning what one has been given. 1571 1.1 cgd */ 1572 1.28 christos int 1573 1.104 pooka spec_bmap(void *v) 1574 1.28 christos { 1575 1.15 mycroft struct vop_bmap_args /* { 1576 1.15 mycroft struct vnode *a_vp; 1577 1.15 mycroft daddr_t a_bn; 1578 1.15 mycroft struct vnode **a_vpp; 1579 1.15 mycroft daddr_t *a_bnp; 1580 1.39 fvdl int *a_runp; 1581 1.28 christos } */ *ap = v; 1582 1.1 cgd 1583 1.15 mycroft if (ap->a_vpp != NULL) 1584 1.15 mycroft *ap->a_vpp = ap->a_vp; 1585 1.15 mycroft if (ap->a_bnp != NULL) 1586 1.15 mycroft *ap->a_bnp = ap->a_bn; 1587 1.39 fvdl if (ap->a_runp != NULL) 1588 1.55 chs *ap->a_runp = (MAXBSIZE >> DEV_BSHIFT) - 1; 1589 1.218 riastrad return 0; 1590 1.1 cgd } 1591 1.1 cgd 1592 1.1 cgd /* 1593 1.1 cgd * Device close routine 1594 1.1 cgd */ 1595 1.1 cgd /* ARGSUSED */ 1596 1.28 christos int 1597 1.104 pooka spec_close(void *v) 1598 1.28 christos { 1599 1.15 mycroft struct vop_close_args /* { 1600 1.15 mycroft struct vnode *a_vp; 1601 1.15 mycroft int a_fflag; 1602 1.87 elad kauth_cred_t a_cred; 1603 1.28 christos } */ *ap = v; 1604 1.48 augustss struct vnode *vp = ap->a_vp; 1605 1.71 dsl struct session *sess; 1606 1.202 riastrad dev_t dev; 1607 1.143 hannken int flags = ap->a_fflag; 1608 1.143 hannken int mode, error, count; 1609 1.112 ad specnode_t *sn; 1610 1.112 ad specdev_t *sd; 1611 1.44 wrstuden 1612 1.202 riastrad KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE); 1613 1.202 riastrad 1614 1.143 hannken mutex_enter(vp->v_interlock); 1615 1.112 ad sn = vp->v_specnode; 1616 1.202 riastrad dev = vp->v_rdev; 1617 1.112 ad sd = sn->sn_dev; 1618 1.143 hannken /* 1619 1.143 hannken * If we're going away soon, make this non-blocking. 1620 1.143 hannken * Also ensures that we won't wedge in vn_lock below. 1621 1.143 hannken */ 1622 1.143 hannken if (vdead_check(vp, VDEAD_NOWAIT) != 0) 1623 1.143 hannken flags |= FNONBLOCK; 1624 1.143 hannken mutex_exit(vp->v_interlock); 1625 1.1 cgd 1626 1.1 cgd switch (vp->v_type) { 1627 1.1 cgd 1628 1.1 cgd case VCHR: 1629 1.11 cgd /* 1630 1.11 cgd * Hack: a tty device that is a controlling terminal 1631 1.112 ad * has a reference from the session structure. We 1632 1.112 ad * cannot easily tell that a character device is a 1633 1.112 ad * controlling terminal, unless it is the closing 1634 1.112 ad * process' controlling terminal. In that case, if the 1635 1.112 ad * open count is 1 release the reference from the 1636 1.112 ad * session. Also, remove the link from the tty back to 1637 1.112 ad * the session and pgrp. 1638 1.112 ad * 1639 1.112 ad * XXX V. fishy. 1640 1.11 cgd */ 1641 1.179 ad mutex_enter(&proc_lock); 1642 1.112 ad sess = curlwp->l_proc->p_session; 1643 1.112 ad if (sn->sn_opencnt == 1 && vp == sess->s_ttyvp) { 1644 1.112 ad mutex_spin_enter(&tty_lock); 1645 1.71 dsl sess->s_ttyvp = NULL; 1646 1.72 pk if (sess->s_ttyp->t_session != NULL) { 1647 1.72 pk sess->s_ttyp->t_pgrp = NULL; 1648 1.72 pk sess->s_ttyp->t_session = NULL; 1649 1.112 ad mutex_spin_exit(&tty_lock); 1650 1.124 rmind /* Releases proc_lock. */ 1651 1.124 rmind proc_sessrele(sess); 1652 1.100 ad } else { 1653 1.112 ad mutex_spin_exit(&tty_lock); 1654 1.100 ad if (sess->s_ttyp->t_pgrp != NULL) 1655 1.100 ad panic("spec_close: spurious pgrp ref"); 1656 1.179 ad mutex_exit(&proc_lock); 1657 1.100 ad } 1658 1.11 cgd vrele(vp); 1659 1.100 ad } else 1660 1.179 ad mutex_exit(&proc_lock); 1661 1.100 ad 1662 1.1 cgd /* 1663 1.1 cgd * If the vnode is locked, then we are in the midst 1664 1.1 cgd * of forcably closing the device, otherwise we only 1665 1.1 cgd * close on last reference. 1666 1.1 cgd */ 1667 1.1 cgd mode = S_IFCHR; 1668 1.1 cgd break; 1669 1.1 cgd 1670 1.1 cgd case VBLK: 1671 1.112 ad KASSERT(vp == vp->v_specnode->sn_dev->sd_bdevvp); 1672 1.1 cgd /* 1673 1.1 cgd * On last close of a block device (that isn't mounted) 1674 1.1 cgd * we must invalidate any in core blocks, so that 1675 1.1 cgd * we can, for instance, change floppy disks. 1676 1.1 cgd */ 1677 1.109 pooka error = vinvalbuf(vp, V_SAVE, ap->a_cred, curlwp, 0, 0); 1678 1.28 christos if (error) 1679 1.218 riastrad return error; 1680 1.1 cgd /* 1681 1.1 cgd * We do not want to really close the device if it 1682 1.1 cgd * is still in use unless we are trying to close it 1683 1.1 cgd * forcibly. Since every use (buffer, vnode, swap, cmap) 1684 1.1 cgd * holds a reference to the vnode, and because we mark 1685 1.1 cgd * any other vnodes that alias this device, when the 1686 1.1 cgd * sum of the reference counts on all the aliased 1687 1.1 cgd * vnodes descends to one, we are on last close. 1688 1.1 cgd */ 1689 1.1 cgd mode = S_IFBLK; 1690 1.1 cgd break; 1691 1.5 cgd 1692 1.1 cgd default: 1693 1.1 cgd panic("spec_close: not special"); 1694 1.1 cgd } 1695 1.1 cgd 1696 1.198 riastrad /* 1697 1.198 riastrad * Decrement the open reference count of this node and the 1698 1.198 riastrad * device. For block devices, the open reference count must be 1699 1.198 riastrad * 1 at this point. If the device's open reference count goes 1700 1.198 riastrad * to zero, we're the last one out so get the lights. 1701 1.201 riastrad * 1702 1.201 riastrad * We may find --sd->sd_opencnt gives zero, and yet 1703 1.201 riastrad * sd->sd_opened is false. This happens if the vnode is 1704 1.201 riastrad * revoked at the same time as it is being opened, which can 1705 1.201 riastrad * happen when opening a tty blocks indefinitely. In that 1706 1.201 riastrad * case, we still must call close -- it is the job of close to 1707 1.201 riastrad * interrupt the open. Either way, the device will be no 1708 1.201 riastrad * longer opened, so we have to clear sd->sd_opened; subsequent 1709 1.201 riastrad * opens will have responsibility for issuing close. 1710 1.201 riastrad * 1711 1.201 riastrad * This has the side effect that the sequence of opens might 1712 1.201 riastrad * happen out of order -- we might end up doing open, open, 1713 1.201 riastrad * close, close, instead of open, close, open, close. This is 1714 1.201 riastrad * unavoidable with the current devsw API, where open is 1715 1.201 riastrad * allowed to block and close must be able to run concurrently 1716 1.201 riastrad * to interrupt it. It is the driver's responsibility to 1717 1.201 riastrad * ensure that close is idempotent so that this works. Drivers 1718 1.201 riastrad * requiring per-open state and exact 1:1 correspondence 1719 1.201 riastrad * between open and close can use fd_clone. 1720 1.198 riastrad */ 1721 1.120 pooka mutex_enter(&device_lock); 1722 1.207 riastrad KASSERT(sn->sn_opencnt); 1723 1.207 riastrad KASSERT(sd->sd_opencnt); 1724 1.211 riastrad KASSERTMSG(sn->sn_opencnt <= sd->sd_opencnt, 1725 1.211 riastrad "sn_opencnt=%u > sd_opencnt=%u", 1726 1.211 riastrad sn->sn_opencnt, sd->sd_opencnt); 1727 1.112 ad sn->sn_opencnt--; 1728 1.112 ad count = --sd->sd_opencnt; 1729 1.198 riastrad if (vp->v_type == VBLK) { 1730 1.198 riastrad KASSERTMSG(count == 0, "block device with %u opens", 1731 1.198 riastrad count + 1); 1732 1.112 ad sd->sd_bdevvp = NULL; 1733 1.198 riastrad } 1734 1.204 riastrad if (count == 0) { 1735 1.211 riastrad KASSERTMSG(sn->sn_opencnt == 0, "sn_opencnt=%u", 1736 1.211 riastrad sn->sn_opencnt); 1737 1.211 riastrad KASSERT(!sd->sd_closing); 1738 1.201 riastrad sd->sd_opened = false; 1739 1.204 riastrad sd->sd_closing = true; 1740 1.204 riastrad } 1741 1.120 pooka mutex_exit(&device_lock); 1742 1.112 ad 1743 1.185 riastrad if (count != 0) 1744 1.112 ad return 0; 1745 1.112 ad 1746 1.44 wrstuden /* 1747 1.62 wiz * If we're able to block, release the vnode lock & reacquire. We 1748 1.72 pk * might end up sleeping for someone else who wants our queues. They 1749 1.143 hannken * won't get them if we hold the vnode locked. 1750 1.44 wrstuden */ 1751 1.143 hannken if (!(flags & FNONBLOCK)) 1752 1.130 hannken VOP_UNLOCK(vp); 1753 1.44 wrstuden 1754 1.210 riastrad /* 1755 1.210 riastrad * If we can cancel all outstanding I/O, then wait for it to 1756 1.210 riastrad * drain before we call .d_close. Drivers that split up 1757 1.210 riastrad * .d_cancel and .d_close this way need not have any internal 1758 1.210 riastrad * mechanism for waiting in .d_close for I/O to drain. 1759 1.210 riastrad */ 1760 1.210 riastrad if (vp->v_type == VBLK) 1761 1.210 riastrad error = bdev_cancel(dev, flags, mode, curlwp); 1762 1.210 riastrad else 1763 1.210 riastrad error = cdev_cancel(dev, flags, mode, curlwp); 1764 1.210 riastrad if (error == 0) 1765 1.210 riastrad spec_io_drain(sd); 1766 1.210 riastrad else 1767 1.210 riastrad KASSERTMSG(error == ENODEV, "cancel dev=0x%lx failed with %d", 1768 1.210 riastrad (unsigned long)dev, error); 1769 1.210 riastrad 1770 1.100 ad if (vp->v_type == VBLK) 1771 1.143 hannken error = bdev_close(dev, flags, mode, curlwp); 1772 1.64 gehenna else 1773 1.143 hannken error = cdev_close(dev, flags, mode, curlwp); 1774 1.44 wrstuden 1775 1.202 riastrad /* 1776 1.202 riastrad * Wait for all other devsw operations to drain. After this 1777 1.202 riastrad * point, no bdev/cdev_* can be active for this specdev. 1778 1.202 riastrad */ 1779 1.202 riastrad spec_io_drain(sd); 1780 1.202 riastrad 1781 1.204 riastrad /* 1782 1.204 riastrad * Wake any spec_open calls waiting for close to finish -- do 1783 1.204 riastrad * this before reacquiring the vnode lock, because spec_open 1784 1.204 riastrad * holds the vnode lock while waiting, so doing this after 1785 1.204 riastrad * reacquiring the lock would deadlock. 1786 1.204 riastrad */ 1787 1.204 riastrad mutex_enter(&device_lock); 1788 1.211 riastrad KASSERT(!sd->sd_opened); 1789 1.204 riastrad KASSERT(sd->sd_closing); 1790 1.204 riastrad sd->sd_closing = false; 1791 1.204 riastrad cv_broadcast(&specfs_iocv); 1792 1.204 riastrad mutex_exit(&device_lock); 1793 1.204 riastrad 1794 1.143 hannken if (!(flags & FNONBLOCK)) 1795 1.44 wrstuden vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1796 1.44 wrstuden 1797 1.218 riastrad return error; 1798 1.1 cgd } 1799 1.1 cgd 1800 1.1 cgd /* 1801 1.1 cgd * Print out the contents of a special device vnode. 1802 1.1 cgd */ 1803 1.28 christos int 1804 1.104 pooka spec_print(void *v) 1805 1.28 christos { 1806 1.15 mycroft struct vop_print_args /* { 1807 1.15 mycroft struct vnode *a_vp; 1808 1.28 christos } */ *ap = v; 1809 1.15 mycroft 1810 1.121 christos printf("dev %llu, %llu\n", (unsigned long long)major(ap->a_vp->v_rdev), 1811 1.121 christos (unsigned long long)minor(ap->a_vp->v_rdev)); 1812 1.28 christos return 0; 1813 1.15 mycroft } 1814 1.15 mycroft 1815 1.15 mycroft /* 1816 1.15 mycroft * Return POSIX pathconf information applicable to special devices. 1817 1.15 mycroft */ 1818 1.28 christos int 1819 1.104 pooka spec_pathconf(void *v) 1820 1.28 christos { 1821 1.15 mycroft struct vop_pathconf_args /* { 1822 1.15 mycroft struct vnode *a_vp; 1823 1.15 mycroft int a_name; 1824 1.18 cgd register_t *a_retval; 1825 1.28 christos } */ *ap = v; 1826 1.1 cgd 1827 1.15 mycroft switch (ap->a_name) { 1828 1.15 mycroft case _PC_LINK_MAX: 1829 1.15 mycroft *ap->a_retval = LINK_MAX; 1830 1.218 riastrad return 0; 1831 1.15 mycroft case _PC_MAX_CANON: 1832 1.15 mycroft *ap->a_retval = MAX_CANON; 1833 1.218 riastrad return 0; 1834 1.15 mycroft case _PC_MAX_INPUT: 1835 1.15 mycroft *ap->a_retval = MAX_INPUT; 1836 1.218 riastrad return 0; 1837 1.15 mycroft case _PC_PIPE_BUF: 1838 1.15 mycroft *ap->a_retval = PIPE_BUF; 1839 1.218 riastrad return 0; 1840 1.15 mycroft case _PC_CHOWN_RESTRICTED: 1841 1.15 mycroft *ap->a_retval = 1; 1842 1.218 riastrad return 0; 1843 1.15 mycroft case _PC_VDISABLE: 1844 1.15 mycroft *ap->a_retval = _POSIX_VDISABLE; 1845 1.218 riastrad return 0; 1846 1.41 kleink case _PC_SYNC_IO: 1847 1.41 kleink *ap->a_retval = 1; 1848 1.218 riastrad return 0; 1849 1.15 mycroft default: 1850 1.180 christos return genfs_pathconf(ap); 1851 1.15 mycroft } 1852 1.1 cgd /* NOTREACHED */ 1853 1.35 kleink } 1854 1.35 kleink 1855 1.80 perry /* 1856 1.35 kleink * Advisory record locking support. 1857 1.35 kleink */ 1858 1.35 kleink int 1859 1.104 pooka spec_advlock(void *v) 1860 1.35 kleink { 1861 1.35 kleink struct vop_advlock_args /* { 1862 1.35 kleink struct vnode *a_vp; 1863 1.78 jrf void *a_id; 1864 1.35 kleink int a_op; 1865 1.35 kleink struct flock *a_fl; 1866 1.35 kleink int a_flags; 1867 1.35 kleink } */ *ap = v; 1868 1.48 augustss struct vnode *vp = ap->a_vp; 1869 1.35 kleink 1870 1.49 jdolecek return lf_advlock(ap, &vp->v_speclockf, (off_t)0); 1871 1.1 cgd } 1872