subr_devsw.c revision 1.48 1 1.48 riastrad /* $NetBSD: subr_devsw.c,v 1.48 2022/08/28 12:24:39 riastradh Exp $ */
2 1.11 ad
3 1.2 gehenna /*-
4 1.20 ad * Copyright (c) 2001, 2002, 2007, 2008 The NetBSD Foundation, Inc.
5 1.2 gehenna * All rights reserved.
6 1.2 gehenna *
7 1.2 gehenna * This code is derived from software contributed to The NetBSD Foundation
8 1.11 ad * by MAEKAWA Masahide <gehenna (at) NetBSD.org>, and by Andrew Doran.
9 1.2 gehenna *
10 1.2 gehenna * Redistribution and use in source and binary forms, with or without
11 1.2 gehenna * modification, are permitted provided that the following conditions
12 1.2 gehenna * are met:
13 1.2 gehenna * 1. Redistributions of source code must retain the above copyright
14 1.2 gehenna * notice, this list of conditions and the following disclaimer.
15 1.2 gehenna * 2. Redistributions in binary form must reproduce the above copyright
16 1.2 gehenna * notice, this list of conditions and the following disclaimer in the
17 1.2 gehenna * documentation and/or other materials provided with the distribution.
18 1.2 gehenna *
19 1.2 gehenna * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.2 gehenna * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.2 gehenna * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.2 gehenna * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.2 gehenna * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.2 gehenna * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.2 gehenna * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.2 gehenna * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.2 gehenna * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.2 gehenna * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.2 gehenna * POSSIBILITY OF SUCH DAMAGE.
30 1.2 gehenna */
31 1.45 riastrad
32 1.11 ad /*
33 1.11 ad * Overview
34 1.11 ad *
35 1.11 ad * subr_devsw.c: registers device drivers by name and by major
36 1.11 ad * number, and provides wrapper methods for performing I/O and
37 1.11 ad * other tasks on device drivers, keying on the device number
38 1.11 ad * (dev_t).
39 1.11 ad *
40 1.11 ad * When the system is built, the config(8) command generates
41 1.11 ad * static tables of device drivers built into the kernel image
42 1.11 ad * along with their associated methods. These are recorded in
43 1.11 ad * the cdevsw0 and bdevsw0 tables. Drivers can also be added to
44 1.11 ad * and removed from the system dynamically.
45 1.11 ad *
46 1.11 ad * Allocation
47 1.11 ad *
48 1.11 ad * When the system initially boots only the statically allocated
49 1.11 ad * indexes (bdevsw0, cdevsw0) are used. If these overflow due to
50 1.11 ad * allocation, we allocate a fixed block of memory to hold the new,
51 1.11 ad * expanded index. This "fork" of the table is only ever performed
52 1.11 ad * once in order to guarantee that other threads may safely access
53 1.11 ad * the device tables:
54 1.11 ad *
55 1.11 ad * o Once a thread has a "reference" to the table via an earlier
56 1.11 ad * open() call, we know that the entry in the table must exist
57 1.11 ad * and so it is safe to access it.
58 1.11 ad *
59 1.11 ad * o Regardless of whether other threads see the old or new
60 1.11 ad * pointers, they will point to a correct device switch
61 1.11 ad * structure for the operation being performed.
62 1.11 ad *
63 1.11 ad * XXX Currently, the wrapper methods such as cdev_read() verify
64 1.11 ad * that a device driver does in fact exist before calling the
65 1.11 ad * associated driver method. This should be changed so that
66 1.11 ad * once the device is has been referenced by a vnode (opened),
67 1.11 ad * calling the other methods should be valid until that reference
68 1.11 ad * is dropped.
69 1.11 ad */
70 1.7 lukem
71 1.7 lukem #include <sys/cdefs.h>
72 1.48 riastrad __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.48 2022/08/28 12:24:39 riastradh Exp $");
73 1.34 riz
74 1.34 riz #ifdef _KERNEL_OPT
75 1.34 riz #include "opt_dtrace.h"
76 1.34 riz #endif
77 1.2 gehenna
78 1.2 gehenna #include <sys/param.h>
79 1.2 gehenna #include <sys/conf.h>
80 1.11 ad #include <sys/kmem.h>
81 1.2 gehenna #include <sys/systm.h>
82 1.11 ad #include <sys/poll.h>
83 1.11 ad #include <sys/tty.h>
84 1.15 matt #include <sys/cpu.h>
85 1.11 ad #include <sys/buf.h>
86 1.29 mrg #include <sys/reboot.h>
87 1.34 riz #include <sys/sdt.h>
88 1.40 riastrad #include <sys/atomic.h>
89 1.40 riastrad #include <sys/localcount.h>
90 1.40 riastrad #include <sys/pserialize.h>
91 1.40 riastrad #include <sys/xcall.h>
92 1.41 riastrad #include <sys/device.h>
93 1.2 gehenna
94 1.2 gehenna #ifdef DEVSW_DEBUG
95 1.2 gehenna #define DPRINTF(x) printf x
96 1.2 gehenna #else /* DEVSW_DEBUG */
97 1.2 gehenna #define DPRINTF(x)
98 1.2 gehenna #endif /* DEVSW_DEBUG */
99 1.2 gehenna
100 1.11 ad #define MAXDEVSW 512 /* the maximum of major device number */
101 1.2 gehenna #define BDEVSW_SIZE (sizeof(struct bdevsw *))
102 1.2 gehenna #define CDEVSW_SIZE (sizeof(struct cdevsw *))
103 1.2 gehenna #define DEVSWCONV_SIZE (sizeof(struct devsw_conv))
104 1.2 gehenna
105 1.40 riastrad struct devswref {
106 1.40 riastrad struct localcount *dr_lc;
107 1.40 riastrad };
108 1.40 riastrad
109 1.40 riastrad /* XXX bdevsw, cdevsw, max_bdevsws, and max_cdevsws should be volatile */
110 1.2 gehenna extern const struct bdevsw **bdevsw, *bdevsw0[];
111 1.2 gehenna extern const struct cdevsw **cdevsw, *cdevsw0[];
112 1.2 gehenna extern struct devsw_conv *devsw_conv, devsw_conv0[];
113 1.2 gehenna extern const int sys_bdevsws, sys_cdevsws;
114 1.2 gehenna extern int max_bdevsws, max_cdevsws, max_devsw_convs;
115 1.2 gehenna
116 1.40 riastrad static struct devswref *cdevswref;
117 1.40 riastrad static struct devswref *bdevswref;
118 1.40 riastrad static kcondvar_t devsw_cv;
119 1.40 riastrad
120 1.24 drochner static int bdevsw_attach(const struct bdevsw *, devmajor_t *);
121 1.24 drochner static int cdevsw_attach(const struct cdevsw *, devmajor_t *);
122 1.11 ad static void devsw_detach_locked(const struct bdevsw *, const struct cdevsw *);
123 1.11 ad
124 1.23 pooka kmutex_t device_lock;
125 1.23 pooka
126 1.31 pooka void (*biodone_vfs)(buf_t *) = (void *)nullop;
127 1.31 pooka
128 1.11 ad void
129 1.11 ad devsw_init(void)
130 1.11 ad {
131 1.11 ad
132 1.11 ad KASSERT(sys_bdevsws < MAXDEVSW - 1);
133 1.11 ad KASSERT(sys_cdevsws < MAXDEVSW - 1);
134 1.23 pooka mutex_init(&device_lock, MUTEX_DEFAULT, IPL_NONE);
135 1.40 riastrad
136 1.40 riastrad cv_init(&devsw_cv, "devsw");
137 1.11 ad }
138 1.2 gehenna
139 1.2 gehenna int
140 1.24 drochner devsw_attach(const char *devname,
141 1.24 drochner const struct bdevsw *bdev, devmajor_t *bmajor,
142 1.24 drochner const struct cdevsw *cdev, devmajor_t *cmajor)
143 1.2 gehenna {
144 1.2 gehenna struct devsw_conv *conv;
145 1.2 gehenna char *name;
146 1.2 gehenna int error, i;
147 1.2 gehenna
148 1.2 gehenna if (devname == NULL || cdev == NULL)
149 1.45 riastrad return EINVAL;
150 1.2 gehenna
151 1.23 pooka mutex_enter(&device_lock);
152 1.11 ad
153 1.45 riastrad for (i = 0; i < max_devsw_convs; i++) {
154 1.2 gehenna conv = &devsw_conv[i];
155 1.2 gehenna if (conv->d_name == NULL || strcmp(devname, conv->d_name) != 0)
156 1.2 gehenna continue;
157 1.2 gehenna
158 1.2 gehenna if (*bmajor < 0)
159 1.2 gehenna *bmajor = conv->d_bmajor;
160 1.2 gehenna if (*cmajor < 0)
161 1.2 gehenna *cmajor = conv->d_cmajor;
162 1.2 gehenna
163 1.11 ad if (*bmajor != conv->d_bmajor || *cmajor != conv->d_cmajor) {
164 1.11 ad error = EINVAL;
165 1.45 riastrad goto out;
166 1.11 ad }
167 1.11 ad if ((*bmajor >= 0 && bdev == NULL) || *cmajor < 0) {
168 1.11 ad error = EINVAL;
169 1.45 riastrad goto out;
170 1.11 ad }
171 1.2 gehenna
172 1.2 gehenna if ((*bmajor >= 0 && bdevsw[*bmajor] != NULL) ||
173 1.11 ad cdevsw[*cmajor] != NULL) {
174 1.11 ad error = EEXIST;
175 1.45 riastrad goto out;
176 1.11 ad }
177 1.40 riastrad break;
178 1.2 gehenna }
179 1.2 gehenna
180 1.40 riastrad /*
181 1.40 riastrad * XXX This should allocate what it needs up front so we never
182 1.40 riastrad * need to flail around trying to unwind.
183 1.40 riastrad */
184 1.14 pooka error = bdevsw_attach(bdev, bmajor);
185 1.45 riastrad if (error != 0)
186 1.45 riastrad goto out;
187 1.14 pooka error = cdevsw_attach(cdev, cmajor);
188 1.2 gehenna if (error != 0) {
189 1.11 ad devsw_detach_locked(bdev, NULL);
190 1.45 riastrad goto out;
191 1.2 gehenna }
192 1.2 gehenna
193 1.40 riastrad /*
194 1.40 riastrad * If we already found a conv, we're done. Otherwise, find an
195 1.40 riastrad * empty slot or extend the table.
196 1.40 riastrad */
197 1.48 riastrad if (i < max_devsw_convs) {
198 1.47 riastrad error = 0;
199 1.45 riastrad goto out;
200 1.47 riastrad }
201 1.40 riastrad
202 1.45 riastrad for (i = 0; i < max_devsw_convs; i++) {
203 1.2 gehenna if (devsw_conv[i].d_name == NULL)
204 1.2 gehenna break;
205 1.2 gehenna }
206 1.2 gehenna if (i == max_devsw_convs) {
207 1.2 gehenna struct devsw_conv *newptr;
208 1.33 matt int old_convs, new_convs;
209 1.2 gehenna
210 1.33 matt old_convs = max_devsw_convs;
211 1.33 matt new_convs = old_convs + 1;
212 1.2 gehenna
213 1.33 matt newptr = kmem_zalloc(new_convs * DEVSWCONV_SIZE, KM_NOSLEEP);
214 1.2 gehenna if (newptr == NULL) {
215 1.11 ad devsw_detach_locked(bdev, cdev);
216 1.11 ad error = ENOMEM;
217 1.45 riastrad goto out;
218 1.2 gehenna }
219 1.33 matt newptr[old_convs].d_name = NULL;
220 1.33 matt newptr[old_convs].d_bmajor = -1;
221 1.33 matt newptr[old_convs].d_cmajor = -1;
222 1.33 matt memcpy(newptr, devsw_conv, old_convs * DEVSWCONV_SIZE);
223 1.2 gehenna if (devsw_conv != devsw_conv0)
224 1.33 matt kmem_free(devsw_conv, old_convs * DEVSWCONV_SIZE);
225 1.2 gehenna devsw_conv = newptr;
226 1.33 matt max_devsw_convs = new_convs;
227 1.2 gehenna }
228 1.2 gehenna
229 1.38 christos name = kmem_strdupsize(devname, NULL, KM_NOSLEEP);
230 1.2 gehenna if (name == NULL) {
231 1.11 ad devsw_detach_locked(bdev, cdev);
232 1.25 enami error = ENOMEM;
233 1.45 riastrad goto out;
234 1.2 gehenna }
235 1.2 gehenna
236 1.2 gehenna devsw_conv[i].d_name = name;
237 1.2 gehenna devsw_conv[i].d_bmajor = *bmajor;
238 1.2 gehenna devsw_conv[i].d_cmajor = *cmajor;
239 1.45 riastrad error = 0;
240 1.45 riastrad out:
241 1.23 pooka mutex_exit(&device_lock);
242 1.45 riastrad return error;
243 1.2 gehenna }
244 1.2 gehenna
245 1.2 gehenna static int
246 1.24 drochner bdevsw_attach(const struct bdevsw *devsw, devmajor_t *devmajor)
247 1.2 gehenna {
248 1.40 riastrad const struct bdevsw **newbdevsw = NULL;
249 1.40 riastrad struct devswref *newbdevswref = NULL;
250 1.40 riastrad struct localcount *lc;
251 1.24 drochner devmajor_t bmajor;
252 1.24 drochner int i;
253 1.2 gehenna
254 1.23 pooka KASSERT(mutex_owned(&device_lock));
255 1.11 ad
256 1.2 gehenna if (devsw == NULL)
257 1.45 riastrad return 0;
258 1.2 gehenna
259 1.2 gehenna if (*devmajor < 0) {
260 1.45 riastrad for (bmajor = sys_bdevsws; bmajor < max_bdevsws; bmajor++) {
261 1.2 gehenna if (bdevsw[bmajor] != NULL)
262 1.2 gehenna continue;
263 1.45 riastrad for (i = 0; i < max_devsw_convs; i++) {
264 1.2 gehenna if (devsw_conv[i].d_bmajor == bmajor)
265 1.2 gehenna break;
266 1.2 gehenna }
267 1.2 gehenna if (i != max_devsw_convs)
268 1.2 gehenna continue;
269 1.2 gehenna break;
270 1.2 gehenna }
271 1.3 gehenna *devmajor = bmajor;
272 1.2 gehenna }
273 1.11 ad
274 1.2 gehenna if (*devmajor >= MAXDEVSW) {
275 1.45 riastrad printf("%s: block majors exhausted\n", __func__);
276 1.45 riastrad return ENOMEM;
277 1.2 gehenna }
278 1.2 gehenna
279 1.40 riastrad if (bdevswref == NULL) {
280 1.40 riastrad newbdevswref = kmem_zalloc(MAXDEVSW * sizeof(newbdevswref[0]),
281 1.40 riastrad KM_NOSLEEP);
282 1.40 riastrad if (newbdevswref == NULL)
283 1.40 riastrad return ENOMEM;
284 1.40 riastrad atomic_store_release(&bdevswref, newbdevswref);
285 1.40 riastrad }
286 1.40 riastrad
287 1.2 gehenna if (*devmajor >= max_bdevsws) {
288 1.11 ad KASSERT(bdevsw == bdevsw0);
289 1.40 riastrad newbdevsw = kmem_zalloc(MAXDEVSW * sizeof(newbdevsw[0]),
290 1.40 riastrad KM_NOSLEEP);
291 1.40 riastrad if (newbdevsw == NULL)
292 1.40 riastrad return ENOMEM;
293 1.40 riastrad memcpy(newbdevsw, bdevsw, max_bdevsws * sizeof(bdevsw[0]));
294 1.40 riastrad atomic_store_release(&bdevsw, newbdevsw);
295 1.40 riastrad atomic_store_release(&max_bdevsws, MAXDEVSW);
296 1.2 gehenna }
297 1.2 gehenna
298 1.2 gehenna if (bdevsw[*devmajor] != NULL)
299 1.45 riastrad return EEXIST;
300 1.2 gehenna
301 1.40 riastrad KASSERT(bdevswref[*devmajor].dr_lc == NULL);
302 1.40 riastrad lc = kmem_zalloc(sizeof(*lc), KM_SLEEP);
303 1.40 riastrad localcount_init(lc);
304 1.40 riastrad bdevswref[*devmajor].dr_lc = lc;
305 1.40 riastrad
306 1.40 riastrad atomic_store_release(&bdevsw[*devmajor], devsw);
307 1.2 gehenna
308 1.45 riastrad return 0;
309 1.2 gehenna }
310 1.2 gehenna
311 1.2 gehenna static int
312 1.24 drochner cdevsw_attach(const struct cdevsw *devsw, devmajor_t *devmajor)
313 1.2 gehenna {
314 1.40 riastrad const struct cdevsw **newcdevsw = NULL;
315 1.40 riastrad struct devswref *newcdevswref = NULL;
316 1.40 riastrad struct localcount *lc;
317 1.24 drochner devmajor_t cmajor;
318 1.24 drochner int i;
319 1.2 gehenna
320 1.23 pooka KASSERT(mutex_owned(&device_lock));
321 1.11 ad
322 1.2 gehenna if (*devmajor < 0) {
323 1.45 riastrad for (cmajor = sys_cdevsws; cmajor < max_cdevsws; cmajor++) {
324 1.2 gehenna if (cdevsw[cmajor] != NULL)
325 1.2 gehenna continue;
326 1.45 riastrad for (i = 0; i < max_devsw_convs; i++) {
327 1.2 gehenna if (devsw_conv[i].d_cmajor == cmajor)
328 1.2 gehenna break;
329 1.2 gehenna }
330 1.2 gehenna if (i != max_devsw_convs)
331 1.2 gehenna continue;
332 1.2 gehenna break;
333 1.2 gehenna }
334 1.3 gehenna *devmajor = cmajor;
335 1.2 gehenna }
336 1.11 ad
337 1.2 gehenna if (*devmajor >= MAXDEVSW) {
338 1.45 riastrad printf("%s: character majors exhausted\n", __func__);
339 1.45 riastrad return ENOMEM;
340 1.2 gehenna }
341 1.2 gehenna
342 1.40 riastrad if (cdevswref == NULL) {
343 1.40 riastrad newcdevswref = kmem_zalloc(MAXDEVSW * sizeof(newcdevswref[0]),
344 1.40 riastrad KM_NOSLEEP);
345 1.40 riastrad if (newcdevswref == NULL)
346 1.40 riastrad return ENOMEM;
347 1.40 riastrad atomic_store_release(&cdevswref, newcdevswref);
348 1.40 riastrad }
349 1.40 riastrad
350 1.2 gehenna if (*devmajor >= max_cdevsws) {
351 1.11 ad KASSERT(cdevsw == cdevsw0);
352 1.40 riastrad newcdevsw = kmem_zalloc(MAXDEVSW * sizeof(newcdevsw[0]),
353 1.40 riastrad KM_NOSLEEP);
354 1.40 riastrad if (newcdevsw == NULL)
355 1.40 riastrad return ENOMEM;
356 1.40 riastrad memcpy(newcdevsw, cdevsw, max_cdevsws * sizeof(cdevsw[0]));
357 1.40 riastrad atomic_store_release(&cdevsw, newcdevsw);
358 1.40 riastrad atomic_store_release(&max_cdevsws, MAXDEVSW);
359 1.2 gehenna }
360 1.2 gehenna
361 1.2 gehenna if (cdevsw[*devmajor] != NULL)
362 1.45 riastrad return EEXIST;
363 1.2 gehenna
364 1.40 riastrad KASSERT(cdevswref[*devmajor].dr_lc == NULL);
365 1.40 riastrad lc = kmem_zalloc(sizeof(*lc), KM_SLEEP);
366 1.40 riastrad localcount_init(lc);
367 1.40 riastrad cdevswref[*devmajor].dr_lc = lc;
368 1.40 riastrad
369 1.40 riastrad atomic_store_release(&cdevsw[*devmajor], devsw);
370 1.2 gehenna
371 1.45 riastrad return 0;
372 1.2 gehenna }
373 1.2 gehenna
374 1.11 ad static void
375 1.11 ad devsw_detach_locked(const struct bdevsw *bdev, const struct cdevsw *cdev)
376 1.2 gehenna {
377 1.46 riastrad int bi, ci = -1/*XXXGCC*/, di;
378 1.46 riastrad struct cfdriver *cd;
379 1.46 riastrad device_t dv;
380 1.2 gehenna
381 1.23 pooka KASSERT(mutex_owned(&device_lock));
382 1.11 ad
383 1.46 riastrad /*
384 1.46 riastrad * If this is wired to an autoconf device, make sure the device
385 1.46 riastrad * has no more instances. No locking here because under
386 1.46 riastrad * correct use of devsw_detach, none of this state can change
387 1.46 riastrad * at this point.
388 1.46 riastrad */
389 1.46 riastrad if (cdev != NULL && (cd = cdev->d_cfdriver) != NULL) {
390 1.46 riastrad for (di = 0; di < cd->cd_ndevs; di++) {
391 1.46 riastrad KASSERTMSG((dv = cd->cd_devs[di]) == NULL,
392 1.46 riastrad "detaching character device driver %s"
393 1.46 riastrad " still has attached unit %s",
394 1.46 riastrad cd->cd_name, device_xname(dv));
395 1.46 riastrad }
396 1.46 riastrad }
397 1.46 riastrad if (bdev != NULL && (cd = bdev->d_cfdriver) != NULL) {
398 1.46 riastrad for (di = 0; di < cd->cd_ndevs; di++) {
399 1.46 riastrad KASSERTMSG((dv = cd->cd_devs[di]) == NULL,
400 1.46 riastrad "detaching block device driver %s"
401 1.46 riastrad " still has attached unit %s",
402 1.46 riastrad cd->cd_name, device_xname(dv));
403 1.46 riastrad }
404 1.46 riastrad }
405 1.46 riastrad
406 1.40 riastrad /* Prevent new references. */
407 1.2 gehenna if (bdev != NULL) {
408 1.40 riastrad for (bi = 0; bi < max_bdevsws; bi++) {
409 1.40 riastrad if (bdevsw[bi] != bdev)
410 1.2 gehenna continue;
411 1.40 riastrad atomic_store_relaxed(&bdevsw[bi], NULL);
412 1.2 gehenna break;
413 1.2 gehenna }
414 1.40 riastrad KASSERT(bi < max_bdevsws);
415 1.2 gehenna }
416 1.2 gehenna if (cdev != NULL) {
417 1.40 riastrad for (ci = 0; ci < max_cdevsws; ci++) {
418 1.40 riastrad if (cdevsw[ci] != cdev)
419 1.2 gehenna continue;
420 1.40 riastrad atomic_store_relaxed(&cdevsw[ci], NULL);
421 1.2 gehenna break;
422 1.2 gehenna }
423 1.40 riastrad KASSERT(ci < max_cdevsws);
424 1.40 riastrad }
425 1.40 riastrad
426 1.40 riastrad if (bdev == NULL && cdev == NULL) /* XXX possible? */
427 1.40 riastrad return;
428 1.40 riastrad
429 1.40 riastrad /*
430 1.40 riastrad * Wait for all bdevsw_lookup_acquire, cdevsw_lookup_acquire
431 1.40 riastrad * calls to notice that the devsw is gone.
432 1.40 riastrad *
433 1.40 riastrad * XXX Despite the use of the pserialize_read_enter/exit API
434 1.40 riastrad * elsewhere in this file, we use xc_barrier here instead of
435 1.40 riastrad * pserialize_perform -- because devsw_init is too early for
436 1.40 riastrad * pserialize_create. Either pserialize_create should be made
437 1.40 riastrad * to work earlier, or it should be nixed altogether. Until
438 1.40 riastrad * that is fixed, xc_barrier will serve the same purpose.
439 1.40 riastrad */
440 1.40 riastrad xc_barrier(0);
441 1.40 riastrad
442 1.40 riastrad /*
443 1.40 riastrad * Wait for all references to drain. It is the caller's
444 1.40 riastrad * responsibility to ensure that at this point, there are no
445 1.40 riastrad * extant open instances and all new d_open calls will fail.
446 1.40 riastrad *
447 1.40 riastrad * Note that localcount_drain may release and reacquire
448 1.40 riastrad * device_lock.
449 1.40 riastrad */
450 1.40 riastrad if (bdev != NULL) {
451 1.40 riastrad localcount_drain(bdevswref[bi].dr_lc,
452 1.40 riastrad &devsw_cv, &device_lock);
453 1.40 riastrad localcount_fini(bdevswref[bi].dr_lc);
454 1.40 riastrad kmem_free(bdevswref[bi].dr_lc, sizeof(*bdevswref[bi].dr_lc));
455 1.40 riastrad bdevswref[bi].dr_lc = NULL;
456 1.40 riastrad }
457 1.40 riastrad if (cdev != NULL) {
458 1.40 riastrad localcount_drain(cdevswref[ci].dr_lc,
459 1.40 riastrad &devsw_cv, &device_lock);
460 1.40 riastrad localcount_fini(cdevswref[ci].dr_lc);
461 1.40 riastrad kmem_free(cdevswref[ci].dr_lc, sizeof(*cdevswref[ci].dr_lc));
462 1.40 riastrad cdevswref[ci].dr_lc = NULL;
463 1.2 gehenna }
464 1.2 gehenna }
465 1.2 gehenna
466 1.39 riastrad void
467 1.11 ad devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev)
468 1.11 ad {
469 1.11 ad
470 1.23 pooka mutex_enter(&device_lock);
471 1.11 ad devsw_detach_locked(bdev, cdev);
472 1.23 pooka mutex_exit(&device_lock);
473 1.11 ad }
474 1.11 ad
475 1.11 ad /*
476 1.11 ad * Look up a block device by number.
477 1.11 ad *
478 1.11 ad * => Caller must ensure that the device is attached.
479 1.11 ad */
480 1.2 gehenna const struct bdevsw *
481 1.2 gehenna bdevsw_lookup(dev_t dev)
482 1.2 gehenna {
483 1.24 drochner devmajor_t bmajor;
484 1.2 gehenna
485 1.2 gehenna if (dev == NODEV)
486 1.45 riastrad return NULL;
487 1.2 gehenna bmajor = major(dev);
488 1.40 riastrad if (bmajor < 0 || bmajor >= atomic_load_relaxed(&max_bdevsws))
489 1.45 riastrad return NULL;
490 1.2 gehenna
491 1.40 riastrad return atomic_load_consume(&bdevsw)[bmajor];
492 1.40 riastrad }
493 1.40 riastrad
494 1.40 riastrad static const struct bdevsw *
495 1.40 riastrad bdevsw_lookup_acquire(dev_t dev, struct localcount **lcp)
496 1.40 riastrad {
497 1.40 riastrad devmajor_t bmajor;
498 1.40 riastrad const struct bdevsw *bdev = NULL, *const *curbdevsw;
499 1.40 riastrad struct devswref *curbdevswref;
500 1.40 riastrad int s;
501 1.40 riastrad
502 1.40 riastrad if (dev == NODEV)
503 1.40 riastrad return NULL;
504 1.40 riastrad bmajor = major(dev);
505 1.40 riastrad if (bmajor < 0)
506 1.40 riastrad return NULL;
507 1.40 riastrad
508 1.40 riastrad s = pserialize_read_enter();
509 1.40 riastrad
510 1.40 riastrad /*
511 1.40 riastrad * max_bdevsws never goes down, so it is safe to rely on this
512 1.40 riastrad * condition without any locking for the array access below.
513 1.40 riastrad * Test sys_bdevsws first so we can avoid the memory barrier in
514 1.40 riastrad * that case.
515 1.40 riastrad */
516 1.40 riastrad if (bmajor >= sys_bdevsws &&
517 1.40 riastrad bmajor >= atomic_load_acquire(&max_bdevsws))
518 1.40 riastrad goto out;
519 1.40 riastrad curbdevsw = atomic_load_consume(&bdevsw);
520 1.40 riastrad if ((bdev = atomic_load_consume(&curbdevsw[bmajor])) == NULL)
521 1.40 riastrad goto out;
522 1.40 riastrad
523 1.40 riastrad curbdevswref = atomic_load_consume(&bdevswref);
524 1.40 riastrad if (curbdevswref == NULL) {
525 1.40 riastrad *lcp = NULL;
526 1.40 riastrad } else if ((*lcp = curbdevswref[bmajor].dr_lc) != NULL) {
527 1.40 riastrad localcount_acquire(*lcp);
528 1.40 riastrad }
529 1.40 riastrad out:
530 1.40 riastrad pserialize_read_exit(s);
531 1.40 riastrad return bdev;
532 1.40 riastrad }
533 1.40 riastrad
534 1.40 riastrad static void
535 1.40 riastrad bdevsw_release(const struct bdevsw *bdev, struct localcount *lc)
536 1.40 riastrad {
537 1.40 riastrad
538 1.40 riastrad if (lc == NULL)
539 1.40 riastrad return;
540 1.40 riastrad localcount_release(lc, &devsw_cv, &device_lock);
541 1.2 gehenna }
542 1.2 gehenna
543 1.11 ad /*
544 1.11 ad * Look up a character device by number.
545 1.11 ad *
546 1.11 ad * => Caller must ensure that the device is attached.
547 1.11 ad */
548 1.2 gehenna const struct cdevsw *
549 1.2 gehenna cdevsw_lookup(dev_t dev)
550 1.2 gehenna {
551 1.24 drochner devmajor_t cmajor;
552 1.2 gehenna
553 1.2 gehenna if (dev == NODEV)
554 1.45 riastrad return NULL;
555 1.2 gehenna cmajor = major(dev);
556 1.40 riastrad if (cmajor < 0 || cmajor >= atomic_load_relaxed(&max_cdevsws))
557 1.45 riastrad return NULL;
558 1.2 gehenna
559 1.40 riastrad return atomic_load_consume(&cdevsw)[cmajor];
560 1.40 riastrad }
561 1.40 riastrad
562 1.40 riastrad static const struct cdevsw *
563 1.40 riastrad cdevsw_lookup_acquire(dev_t dev, struct localcount **lcp)
564 1.40 riastrad {
565 1.40 riastrad devmajor_t cmajor;
566 1.40 riastrad const struct cdevsw *cdev = NULL, *const *curcdevsw;
567 1.40 riastrad struct devswref *curcdevswref;
568 1.40 riastrad int s;
569 1.40 riastrad
570 1.40 riastrad if (dev == NODEV)
571 1.40 riastrad return NULL;
572 1.40 riastrad cmajor = major(dev);
573 1.40 riastrad if (cmajor < 0)
574 1.40 riastrad return NULL;
575 1.40 riastrad
576 1.40 riastrad s = pserialize_read_enter();
577 1.40 riastrad
578 1.40 riastrad /*
579 1.40 riastrad * max_cdevsws never goes down, so it is safe to rely on this
580 1.40 riastrad * condition without any locking for the array access below.
581 1.40 riastrad * Test sys_cdevsws first so we can avoid the memory barrier in
582 1.40 riastrad * that case.
583 1.40 riastrad */
584 1.40 riastrad if (cmajor >= sys_cdevsws &&
585 1.40 riastrad cmajor >= atomic_load_acquire(&max_cdevsws))
586 1.40 riastrad goto out;
587 1.40 riastrad curcdevsw = atomic_load_consume(&cdevsw);
588 1.40 riastrad if ((cdev = atomic_load_consume(&curcdevsw[cmajor])) == NULL)
589 1.40 riastrad goto out;
590 1.40 riastrad
591 1.40 riastrad curcdevswref = atomic_load_consume(&cdevswref);
592 1.40 riastrad if (curcdevswref == NULL) {
593 1.40 riastrad *lcp = NULL;
594 1.40 riastrad } else if ((*lcp = curcdevswref[cmajor].dr_lc) != NULL) {
595 1.40 riastrad localcount_acquire(*lcp);
596 1.40 riastrad }
597 1.40 riastrad out:
598 1.40 riastrad pserialize_read_exit(s);
599 1.40 riastrad return cdev;
600 1.40 riastrad }
601 1.40 riastrad
602 1.40 riastrad static void
603 1.40 riastrad cdevsw_release(const struct cdevsw *cdev, struct localcount *lc)
604 1.40 riastrad {
605 1.40 riastrad
606 1.40 riastrad if (lc == NULL)
607 1.40 riastrad return;
608 1.40 riastrad localcount_release(lc, &devsw_cv, &device_lock);
609 1.2 gehenna }
610 1.2 gehenna
611 1.11 ad /*
612 1.11 ad * Look up a block device by reference to its operations set.
613 1.11 ad *
614 1.11 ad * => Caller must ensure that the device is not detached, and therefore
615 1.11 ad * that the returned major is still valid when dereferenced.
616 1.11 ad */
617 1.24 drochner devmajor_t
618 1.2 gehenna bdevsw_lookup_major(const struct bdevsw *bdev)
619 1.2 gehenna {
620 1.40 riastrad const struct bdevsw *const *curbdevsw;
621 1.40 riastrad devmajor_t bmajor, bmax;
622 1.2 gehenna
623 1.40 riastrad bmax = atomic_load_acquire(&max_bdevsws);
624 1.40 riastrad curbdevsw = atomic_load_consume(&bdevsw);
625 1.40 riastrad for (bmajor = 0; bmajor < bmax; bmajor++) {
626 1.40 riastrad if (atomic_load_relaxed(&curbdevsw[bmajor]) == bdev)
627 1.45 riastrad return bmajor;
628 1.2 gehenna }
629 1.2 gehenna
630 1.45 riastrad return NODEVMAJOR;
631 1.2 gehenna }
632 1.2 gehenna
633 1.11 ad /*
634 1.11 ad * Look up a character device by reference to its operations set.
635 1.11 ad *
636 1.11 ad * => Caller must ensure that the device is not detached, and therefore
637 1.11 ad * that the returned major is still valid when dereferenced.
638 1.11 ad */
639 1.24 drochner devmajor_t
640 1.2 gehenna cdevsw_lookup_major(const struct cdevsw *cdev)
641 1.2 gehenna {
642 1.40 riastrad const struct cdevsw *const *curcdevsw;
643 1.40 riastrad devmajor_t cmajor, cmax;
644 1.2 gehenna
645 1.40 riastrad cmax = atomic_load_acquire(&max_cdevsws);
646 1.40 riastrad curcdevsw = atomic_load_consume(&cdevsw);
647 1.40 riastrad for (cmajor = 0; cmajor < cmax; cmajor++) {
648 1.40 riastrad if (atomic_load_relaxed(&curcdevsw[cmajor]) == cdev)
649 1.45 riastrad return cmajor;
650 1.2 gehenna }
651 1.2 gehenna
652 1.45 riastrad return NODEVMAJOR;
653 1.2 gehenna }
654 1.2 gehenna
655 1.2 gehenna /*
656 1.2 gehenna * Convert from block major number to name.
657 1.11 ad *
658 1.11 ad * => Caller must ensure that the device is not detached, and therefore
659 1.11 ad * that the name pointer is still valid when dereferenced.
660 1.2 gehenna */
661 1.2 gehenna const char *
662 1.24 drochner devsw_blk2name(devmajor_t bmajor)
663 1.2 gehenna {
664 1.11 ad const char *name;
665 1.24 drochner devmajor_t cmajor;
666 1.24 drochner int i;
667 1.2 gehenna
668 1.11 ad name = NULL;
669 1.11 ad cmajor = -1;
670 1.11 ad
671 1.23 pooka mutex_enter(&device_lock);
672 1.11 ad if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
673 1.23 pooka mutex_exit(&device_lock);
674 1.45 riastrad return NULL;
675 1.2 gehenna }
676 1.45 riastrad for (i = 0; i < max_devsw_convs; i++) {
677 1.11 ad if (devsw_conv[i].d_bmajor == bmajor) {
678 1.11 ad cmajor = devsw_conv[i].d_cmajor;
679 1.11 ad break;
680 1.11 ad }
681 1.11 ad }
682 1.11 ad if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
683 1.11 ad name = devsw_conv[i].d_name;
684 1.23 pooka mutex_exit(&device_lock);
685 1.2 gehenna
686 1.45 riastrad return name;
687 1.2 gehenna }
688 1.2 gehenna
689 1.2 gehenna /*
690 1.26 haad * Convert char major number to device driver name.
691 1.26 haad */
692 1.27 yamt const char *
693 1.26 haad cdevsw_getname(devmajor_t major)
694 1.26 haad {
695 1.26 haad const char *name;
696 1.26 haad int i;
697 1.26 haad
698 1.26 haad name = NULL;
699 1.26 haad
700 1.26 haad if (major < 0)
701 1.45 riastrad return NULL;
702 1.45 riastrad
703 1.26 haad mutex_enter(&device_lock);
704 1.45 riastrad for (i = 0; i < max_devsw_convs; i++) {
705 1.26 haad if (devsw_conv[i].d_cmajor == major) {
706 1.26 haad name = devsw_conv[i].d_name;
707 1.26 haad break;
708 1.26 haad }
709 1.26 haad }
710 1.26 haad mutex_exit(&device_lock);
711 1.45 riastrad return name;
712 1.26 haad }
713 1.26 haad
714 1.26 haad /*
715 1.26 haad * Convert block major number to device driver name.
716 1.26 haad */
717 1.27 yamt const char *
718 1.26 haad bdevsw_getname(devmajor_t major)
719 1.26 haad {
720 1.26 haad const char *name;
721 1.26 haad int i;
722 1.26 haad
723 1.26 haad name = NULL;
724 1.26 haad
725 1.26 haad if (major < 0)
726 1.45 riastrad return NULL;
727 1.45 riastrad
728 1.26 haad mutex_enter(&device_lock);
729 1.45 riastrad for (i = 0; i < max_devsw_convs; i++) {
730 1.26 haad if (devsw_conv[i].d_bmajor == major) {
731 1.26 haad name = devsw_conv[i].d_name;
732 1.26 haad break;
733 1.26 haad }
734 1.26 haad }
735 1.26 haad mutex_exit(&device_lock);
736 1.45 riastrad return name;
737 1.26 haad }
738 1.26 haad
739 1.26 haad /*
740 1.2 gehenna * Convert from device name to block major number.
741 1.11 ad *
742 1.11 ad * => Caller must ensure that the device is not detached, and therefore
743 1.11 ad * that the major number is still valid when dereferenced.
744 1.2 gehenna */
745 1.24 drochner devmajor_t
746 1.2 gehenna devsw_name2blk(const char *name, char *devname, size_t devnamelen)
747 1.2 gehenna {
748 1.2 gehenna struct devsw_conv *conv;
749 1.24 drochner devmajor_t bmajor;
750 1.24 drochner int i;
751 1.2 gehenna
752 1.2 gehenna if (name == NULL)
753 1.45 riastrad return NODEVMAJOR;
754 1.2 gehenna
755 1.23 pooka mutex_enter(&device_lock);
756 1.45 riastrad for (i = 0; i < max_devsw_convs; i++) {
757 1.5 mrg size_t len;
758 1.5 mrg
759 1.2 gehenna conv = &devsw_conv[i];
760 1.2 gehenna if (conv->d_name == NULL)
761 1.2 gehenna continue;
762 1.5 mrg len = strlen(conv->d_name);
763 1.5 mrg if (strncmp(conv->d_name, name, len) != 0)
764 1.5 mrg continue;
765 1.45 riastrad if (name[len] != '\0' && !isdigit((unsigned char)name[len]))
766 1.2 gehenna continue;
767 1.2 gehenna bmajor = conv->d_bmajor;
768 1.2 gehenna if (bmajor < 0 || bmajor >= max_bdevsws ||
769 1.2 gehenna bdevsw[bmajor] == NULL)
770 1.5 mrg break;
771 1.2 gehenna if (devname != NULL) {
772 1.2 gehenna #ifdef DEVSW_DEBUG
773 1.2 gehenna if (strlen(conv->d_name) >= devnamelen)
774 1.45 riastrad printf("%s: too short buffer\n", __func__);
775 1.2 gehenna #endif /* DEVSW_DEBUG */
776 1.4 tsutsui strncpy(devname, conv->d_name, devnamelen);
777 1.2 gehenna devname[devnamelen - 1] = '\0';
778 1.2 gehenna }
779 1.23 pooka mutex_exit(&device_lock);
780 1.45 riastrad return bmajor;
781 1.2 gehenna }
782 1.2 gehenna
783 1.23 pooka mutex_exit(&device_lock);
784 1.45 riastrad return NODEVMAJOR;
785 1.2 gehenna }
786 1.2 gehenna
787 1.2 gehenna /*
788 1.16 plunky * Convert from device name to char major number.
789 1.16 plunky *
790 1.16 plunky * => Caller must ensure that the device is not detached, and therefore
791 1.16 plunky * that the major number is still valid when dereferenced.
792 1.16 plunky */
793 1.24 drochner devmajor_t
794 1.16 plunky devsw_name2chr(const char *name, char *devname, size_t devnamelen)
795 1.16 plunky {
796 1.16 plunky struct devsw_conv *conv;
797 1.24 drochner devmajor_t cmajor;
798 1.24 drochner int i;
799 1.16 plunky
800 1.16 plunky if (name == NULL)
801 1.45 riastrad return NODEVMAJOR;
802 1.16 plunky
803 1.23 pooka mutex_enter(&device_lock);
804 1.45 riastrad for (i = 0; i < max_devsw_convs; i++) {
805 1.16 plunky size_t len;
806 1.16 plunky
807 1.16 plunky conv = &devsw_conv[i];
808 1.16 plunky if (conv->d_name == NULL)
809 1.16 plunky continue;
810 1.16 plunky len = strlen(conv->d_name);
811 1.16 plunky if (strncmp(conv->d_name, name, len) != 0)
812 1.16 plunky continue;
813 1.45 riastrad if (name[len] != '\0' && !isdigit((unsigned char)name[len]))
814 1.16 plunky continue;
815 1.16 plunky cmajor = conv->d_cmajor;
816 1.16 plunky if (cmajor < 0 || cmajor >= max_cdevsws ||
817 1.16 plunky cdevsw[cmajor] == NULL)
818 1.16 plunky break;
819 1.16 plunky if (devname != NULL) {
820 1.16 plunky #ifdef DEVSW_DEBUG
821 1.16 plunky if (strlen(conv->d_name) >= devnamelen)
822 1.37 pgoyette printf("%s: too short buffer", __func__);
823 1.16 plunky #endif /* DEVSW_DEBUG */
824 1.16 plunky strncpy(devname, conv->d_name, devnamelen);
825 1.16 plunky devname[devnamelen - 1] = '\0';
826 1.16 plunky }
827 1.23 pooka mutex_exit(&device_lock);
828 1.45 riastrad return cmajor;
829 1.16 plunky }
830 1.16 plunky
831 1.23 pooka mutex_exit(&device_lock);
832 1.45 riastrad return NODEVMAJOR;
833 1.16 plunky }
834 1.16 plunky
835 1.16 plunky /*
836 1.2 gehenna * Convert from character dev_t to block dev_t.
837 1.11 ad *
838 1.11 ad * => Caller must ensure that the device is not detached, and therefore
839 1.11 ad * that the major number is still valid when dereferenced.
840 1.2 gehenna */
841 1.2 gehenna dev_t
842 1.2 gehenna devsw_chr2blk(dev_t cdev)
843 1.2 gehenna {
844 1.24 drochner devmajor_t bmajor, cmajor;
845 1.24 drochner int i;
846 1.11 ad dev_t rv;
847 1.2 gehenna
848 1.2 gehenna cmajor = major(cdev);
849 1.24 drochner bmajor = NODEVMAJOR;
850 1.11 ad rv = NODEV;
851 1.2 gehenna
852 1.23 pooka mutex_enter(&device_lock);
853 1.11 ad if (cmajor < 0 || cmajor >= max_cdevsws || cdevsw[cmajor] == NULL) {
854 1.23 pooka mutex_exit(&device_lock);
855 1.45 riastrad return NODEV;
856 1.11 ad }
857 1.45 riastrad for (i = 0; i < max_devsw_convs; i++) {
858 1.11 ad if (devsw_conv[i].d_cmajor == cmajor) {
859 1.11 ad bmajor = devsw_conv[i].d_bmajor;
860 1.11 ad break;
861 1.11 ad }
862 1.2 gehenna }
863 1.11 ad if (bmajor >= 0 && bmajor < max_bdevsws && bdevsw[bmajor] != NULL)
864 1.11 ad rv = makedev(bmajor, minor(cdev));
865 1.23 pooka mutex_exit(&device_lock);
866 1.2 gehenna
867 1.45 riastrad return rv;
868 1.2 gehenna }
869 1.2 gehenna
870 1.2 gehenna /*
871 1.2 gehenna * Convert from block dev_t to character dev_t.
872 1.11 ad *
873 1.11 ad * => Caller must ensure that the device is not detached, and therefore
874 1.11 ad * that the major number is still valid when dereferenced.
875 1.2 gehenna */
876 1.2 gehenna dev_t
877 1.2 gehenna devsw_blk2chr(dev_t bdev)
878 1.2 gehenna {
879 1.24 drochner devmajor_t bmajor, cmajor;
880 1.24 drochner int i;
881 1.11 ad dev_t rv;
882 1.2 gehenna
883 1.11 ad bmajor = major(bdev);
884 1.24 drochner cmajor = NODEVMAJOR;
885 1.11 ad rv = NODEV;
886 1.11 ad
887 1.23 pooka mutex_enter(&device_lock);
888 1.11 ad if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
889 1.23 pooka mutex_exit(&device_lock);
890 1.45 riastrad return NODEV;
891 1.11 ad }
892 1.45 riastrad for (i = 0; i < max_devsw_convs; i++) {
893 1.11 ad if (devsw_conv[i].d_bmajor == bmajor) {
894 1.11 ad cmajor = devsw_conv[i].d_cmajor;
895 1.11 ad break;
896 1.11 ad }
897 1.11 ad }
898 1.11 ad if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
899 1.11 ad rv = makedev(cmajor, minor(bdev));
900 1.23 pooka mutex_exit(&device_lock);
901 1.2 gehenna
902 1.45 riastrad return rv;
903 1.11 ad }
904 1.11 ad
905 1.11 ad /*
906 1.11 ad * Device access methods.
907 1.11 ad */
908 1.11 ad
909 1.11 ad #define DEV_LOCK(d) \
910 1.17 ad if ((mpflag = (d->d_flag & D_MPSAFE)) == 0) { \
911 1.17 ad KERNEL_LOCK(1, NULL); \
912 1.11 ad }
913 1.2 gehenna
914 1.11 ad #define DEV_UNLOCK(d) \
915 1.17 ad if (mpflag == 0) { \
916 1.17 ad KERNEL_UNLOCK_ONE(NULL); \
917 1.2 gehenna }
918 1.2 gehenna
919 1.11 ad int
920 1.11 ad bdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
921 1.11 ad {
922 1.11 ad const struct bdevsw *d;
923 1.40 riastrad struct localcount *lc;
924 1.41 riastrad device_t dv = NULL/*XXXGCC*/;
925 1.41 riastrad int unit, rv, mpflag;
926 1.11 ad
927 1.40 riastrad d = bdevsw_lookup_acquire(dev, &lc);
928 1.11 ad if (d == NULL)
929 1.11 ad return ENXIO;
930 1.11 ad
931 1.41 riastrad if (d->d_devtounit) {
932 1.41 riastrad /*
933 1.41 riastrad * If the device node corresponds to an autoconf device
934 1.41 riastrad * instance, acquire a reference to it so that during
935 1.41 riastrad * d_open, device_lookup is stable.
936 1.41 riastrad *
937 1.41 riastrad * XXX This should also arrange to instantiate cloning
938 1.41 riastrad * pseudo-devices if appropriate, but that requires
939 1.41 riastrad * reviewing them all to find and verify a common
940 1.41 riastrad * pattern.
941 1.41 riastrad */
942 1.41 riastrad if ((unit = (*d->d_devtounit)(dev)) == -1)
943 1.41 riastrad return ENXIO;
944 1.41 riastrad if ((dv = device_lookup_acquire(d->d_cfdriver, unit)) == NULL)
945 1.41 riastrad return ENXIO;
946 1.41 riastrad }
947 1.41 riastrad
948 1.11 ad DEV_LOCK(d);
949 1.11 ad rv = (*d->d_open)(dev, flag, devtype, l);
950 1.11 ad DEV_UNLOCK(d);
951 1.11 ad
952 1.41 riastrad if (d->d_devtounit) {
953 1.41 riastrad device_release(dv);
954 1.41 riastrad }
955 1.41 riastrad
956 1.40 riastrad bdevsw_release(d, lc);
957 1.40 riastrad
958 1.11 ad return rv;
959 1.11 ad }
960 1.11 ad
961 1.11 ad int
962 1.44 riastrad bdev_cancel(dev_t dev, int flag, int devtype, struct lwp *l)
963 1.44 riastrad {
964 1.44 riastrad const struct bdevsw *d;
965 1.44 riastrad int rv, mpflag;
966 1.44 riastrad
967 1.44 riastrad if ((d = bdevsw_lookup(dev)) == NULL)
968 1.44 riastrad return ENXIO;
969 1.44 riastrad if (d->d_cancel == NULL)
970 1.44 riastrad return ENODEV;
971 1.44 riastrad
972 1.44 riastrad DEV_LOCK(d);
973 1.44 riastrad rv = (*d->d_cancel)(dev, flag, devtype, l);
974 1.44 riastrad DEV_UNLOCK(d);
975 1.44 riastrad
976 1.44 riastrad return rv;
977 1.44 riastrad }
978 1.44 riastrad
979 1.44 riastrad int
980 1.11 ad bdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
981 1.11 ad {
982 1.11 ad const struct bdevsw *d;
983 1.17 ad int rv, mpflag;
984 1.11 ad
985 1.11 ad if ((d = bdevsw_lookup(dev)) == NULL)
986 1.11 ad return ENXIO;
987 1.11 ad
988 1.11 ad DEV_LOCK(d);
989 1.11 ad rv = (*d->d_close)(dev, flag, devtype, l);
990 1.11 ad DEV_UNLOCK(d);
991 1.11 ad
992 1.11 ad return rv;
993 1.11 ad }
994 1.11 ad
995 1.34 riz SDT_PROVIDER_DECLARE(io);
996 1.34 riz SDT_PROBE_DEFINE1(io, kernel, , start, "struct buf *"/*bp*/);
997 1.34 riz
998 1.11 ad void
999 1.11 ad bdev_strategy(struct buf *bp)
1000 1.11 ad {
1001 1.11 ad const struct bdevsw *d;
1002 1.17 ad int mpflag;
1003 1.11 ad
1004 1.34 riz SDT_PROBE1(io, kernel, , start, bp);
1005 1.34 riz
1006 1.28 jmcneill if ((d = bdevsw_lookup(bp->b_dev)) == NULL) {
1007 1.28 jmcneill bp->b_error = ENXIO;
1008 1.28 jmcneill bp->b_resid = bp->b_bcount;
1009 1.31 pooka biodone_vfs(bp); /* biodone() iff vfs present */
1010 1.28 jmcneill return;
1011 1.28 jmcneill }
1012 1.11 ad
1013 1.11 ad DEV_LOCK(d);
1014 1.11 ad (*d->d_strategy)(bp);
1015 1.11 ad DEV_UNLOCK(d);
1016 1.11 ad }
1017 1.11 ad
1018 1.11 ad int
1019 1.11 ad bdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
1020 1.11 ad {
1021 1.11 ad const struct bdevsw *d;
1022 1.17 ad int rv, mpflag;
1023 1.11 ad
1024 1.11 ad if ((d = bdevsw_lookup(dev)) == NULL)
1025 1.11 ad return ENXIO;
1026 1.11 ad
1027 1.11 ad DEV_LOCK(d);
1028 1.11 ad rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
1029 1.11 ad DEV_UNLOCK(d);
1030 1.11 ad
1031 1.11 ad return rv;
1032 1.11 ad }
1033 1.11 ad
1034 1.11 ad int
1035 1.11 ad bdev_dump(dev_t dev, daddr_t addr, void *data, size_t sz)
1036 1.11 ad {
1037 1.11 ad const struct bdevsw *d;
1038 1.11 ad int rv;
1039 1.11 ad
1040 1.11 ad /*
1041 1.11 ad * Dump can be called without the device open. Since it can
1042 1.11 ad * currently only be called with the system paused (and in a
1043 1.11 ad * potentially unstable state), we don't perform any locking.
1044 1.11 ad */
1045 1.11 ad if ((d = bdevsw_lookup(dev)) == NULL)
1046 1.11 ad return ENXIO;
1047 1.11 ad
1048 1.11 ad /* DEV_LOCK(d); */
1049 1.11 ad rv = (*d->d_dump)(dev, addr, data, sz);
1050 1.11 ad /* DEV_UNLOCK(d); */
1051 1.11 ad
1052 1.11 ad return rv;
1053 1.11 ad }
1054 1.11 ad
1055 1.11 ad int
1056 1.35 nat bdev_flags(dev_t dev)
1057 1.35 nat {
1058 1.35 nat const struct bdevsw *d;
1059 1.35 nat
1060 1.35 nat if ((d = bdevsw_lookup(dev)) == NULL)
1061 1.35 nat return 0;
1062 1.35 nat return d->d_flag & ~D_TYPEMASK;
1063 1.35 nat }
1064 1.35 nat
1065 1.35 nat int
1066 1.11 ad bdev_type(dev_t dev)
1067 1.11 ad {
1068 1.11 ad const struct bdevsw *d;
1069 1.11 ad
1070 1.11 ad if ((d = bdevsw_lookup(dev)) == NULL)
1071 1.11 ad return D_OTHER;
1072 1.11 ad return d->d_flag & D_TYPEMASK;
1073 1.11 ad }
1074 1.11 ad
1075 1.11 ad int
1076 1.29 mrg bdev_size(dev_t dev)
1077 1.29 mrg {
1078 1.29 mrg const struct bdevsw *d;
1079 1.29 mrg int rv, mpflag = 0;
1080 1.29 mrg
1081 1.29 mrg if ((d = bdevsw_lookup(dev)) == NULL ||
1082 1.29 mrg d->d_psize == NULL)
1083 1.29 mrg return -1;
1084 1.29 mrg
1085 1.29 mrg /*
1086 1.29 mrg * Don't to try lock the device if we're dumping.
1087 1.30 mrg * XXX: is there a better way to test this?
1088 1.29 mrg */
1089 1.29 mrg if ((boothowto & RB_DUMP) == 0)
1090 1.29 mrg DEV_LOCK(d);
1091 1.29 mrg rv = (*d->d_psize)(dev);
1092 1.29 mrg if ((boothowto & RB_DUMP) == 0)
1093 1.29 mrg DEV_UNLOCK(d);
1094 1.29 mrg
1095 1.29 mrg return rv;
1096 1.29 mrg }
1097 1.29 mrg
1098 1.29 mrg int
1099 1.32 dholland bdev_discard(dev_t dev, off_t pos, off_t len)
1100 1.32 dholland {
1101 1.32 dholland const struct bdevsw *d;
1102 1.32 dholland int rv, mpflag;
1103 1.32 dholland
1104 1.32 dholland if ((d = bdevsw_lookup(dev)) == NULL)
1105 1.32 dholland return ENXIO;
1106 1.32 dholland
1107 1.32 dholland DEV_LOCK(d);
1108 1.32 dholland rv = (*d->d_discard)(dev, pos, len);
1109 1.32 dholland DEV_UNLOCK(d);
1110 1.32 dholland
1111 1.32 dholland return rv;
1112 1.32 dholland }
1113 1.32 dholland
1114 1.43 riastrad void
1115 1.43 riastrad bdev_detached(dev_t dev)
1116 1.43 riastrad {
1117 1.43 riastrad const struct bdevsw *d;
1118 1.43 riastrad device_t dv;
1119 1.43 riastrad int unit;
1120 1.43 riastrad
1121 1.43 riastrad if ((d = bdevsw_lookup(dev)) == NULL)
1122 1.43 riastrad return;
1123 1.43 riastrad if (d->d_devtounit == NULL)
1124 1.43 riastrad return;
1125 1.43 riastrad if ((unit = (*d->d_devtounit)(dev)) == -1)
1126 1.43 riastrad return;
1127 1.43 riastrad if ((dv = device_lookup(d->d_cfdriver, unit)) == NULL)
1128 1.43 riastrad return;
1129 1.43 riastrad config_detach_commit(dv);
1130 1.43 riastrad }
1131 1.43 riastrad
1132 1.32 dholland int
1133 1.11 ad cdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
1134 1.11 ad {
1135 1.11 ad const struct cdevsw *d;
1136 1.40 riastrad struct localcount *lc;
1137 1.41 riastrad device_t dv = NULL/*XXXGCC*/;
1138 1.41 riastrad int unit, rv, mpflag;
1139 1.11 ad
1140 1.40 riastrad d = cdevsw_lookup_acquire(dev, &lc);
1141 1.11 ad if (d == NULL)
1142 1.11 ad return ENXIO;
1143 1.11 ad
1144 1.41 riastrad if (d->d_devtounit) {
1145 1.41 riastrad /*
1146 1.41 riastrad * If the device node corresponds to an autoconf device
1147 1.41 riastrad * instance, acquire a reference to it so that during
1148 1.41 riastrad * d_open, device_lookup is stable.
1149 1.41 riastrad *
1150 1.41 riastrad * XXX This should also arrange to instantiate cloning
1151 1.41 riastrad * pseudo-devices if appropriate, but that requires
1152 1.41 riastrad * reviewing them all to find and verify a common
1153 1.41 riastrad * pattern.
1154 1.41 riastrad */
1155 1.41 riastrad if ((unit = (*d->d_devtounit)(dev)) == -1)
1156 1.41 riastrad return ENXIO;
1157 1.41 riastrad if ((dv = device_lookup_acquire(d->d_cfdriver, unit)) == NULL)
1158 1.41 riastrad return ENXIO;
1159 1.41 riastrad }
1160 1.41 riastrad
1161 1.11 ad DEV_LOCK(d);
1162 1.11 ad rv = (*d->d_open)(dev, flag, devtype, l);
1163 1.11 ad DEV_UNLOCK(d);
1164 1.11 ad
1165 1.41 riastrad if (d->d_devtounit) {
1166 1.41 riastrad device_release(dv);
1167 1.41 riastrad }
1168 1.41 riastrad
1169 1.40 riastrad cdevsw_release(d, lc);
1170 1.40 riastrad
1171 1.11 ad return rv;
1172 1.11 ad }
1173 1.11 ad
1174 1.11 ad int
1175 1.44 riastrad cdev_cancel(dev_t dev, int flag, int devtype, struct lwp *l)
1176 1.44 riastrad {
1177 1.44 riastrad const struct cdevsw *d;
1178 1.44 riastrad int rv, mpflag;
1179 1.44 riastrad
1180 1.44 riastrad if ((d = cdevsw_lookup(dev)) == NULL)
1181 1.44 riastrad return ENXIO;
1182 1.44 riastrad if (d->d_cancel == NULL)
1183 1.44 riastrad return ENODEV;
1184 1.44 riastrad
1185 1.44 riastrad DEV_LOCK(d);
1186 1.44 riastrad rv = (*d->d_cancel)(dev, flag, devtype, l);
1187 1.44 riastrad DEV_UNLOCK(d);
1188 1.44 riastrad
1189 1.44 riastrad return rv;
1190 1.44 riastrad }
1191 1.44 riastrad
1192 1.44 riastrad int
1193 1.11 ad cdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
1194 1.11 ad {
1195 1.11 ad const struct cdevsw *d;
1196 1.17 ad int rv, mpflag;
1197 1.11 ad
1198 1.11 ad if ((d = cdevsw_lookup(dev)) == NULL)
1199 1.11 ad return ENXIO;
1200 1.11 ad
1201 1.11 ad DEV_LOCK(d);
1202 1.11 ad rv = (*d->d_close)(dev, flag, devtype, l);
1203 1.11 ad DEV_UNLOCK(d);
1204 1.11 ad
1205 1.11 ad return rv;
1206 1.11 ad }
1207 1.11 ad
1208 1.11 ad int
1209 1.11 ad cdev_read(dev_t dev, struct uio *uio, int flag)
1210 1.11 ad {
1211 1.11 ad const struct cdevsw *d;
1212 1.17 ad int rv, mpflag;
1213 1.11 ad
1214 1.11 ad if ((d = cdevsw_lookup(dev)) == NULL)
1215 1.11 ad return ENXIO;
1216 1.11 ad
1217 1.11 ad DEV_LOCK(d);
1218 1.11 ad rv = (*d->d_read)(dev, uio, flag);
1219 1.11 ad DEV_UNLOCK(d);
1220 1.11 ad
1221 1.11 ad return rv;
1222 1.11 ad }
1223 1.11 ad
1224 1.11 ad int
1225 1.11 ad cdev_write(dev_t dev, struct uio *uio, int flag)
1226 1.11 ad {
1227 1.11 ad const struct cdevsw *d;
1228 1.17 ad int rv, mpflag;
1229 1.11 ad
1230 1.11 ad if ((d = cdevsw_lookup(dev)) == NULL)
1231 1.11 ad return ENXIO;
1232 1.11 ad
1233 1.11 ad DEV_LOCK(d);
1234 1.11 ad rv = (*d->d_write)(dev, uio, flag);
1235 1.11 ad DEV_UNLOCK(d);
1236 1.11 ad
1237 1.11 ad return rv;
1238 1.11 ad }
1239 1.11 ad
1240 1.11 ad int
1241 1.11 ad cdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
1242 1.11 ad {
1243 1.11 ad const struct cdevsw *d;
1244 1.17 ad int rv, mpflag;
1245 1.11 ad
1246 1.11 ad if ((d = cdevsw_lookup(dev)) == NULL)
1247 1.11 ad return ENXIO;
1248 1.11 ad
1249 1.11 ad DEV_LOCK(d);
1250 1.11 ad rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
1251 1.11 ad DEV_UNLOCK(d);
1252 1.11 ad
1253 1.11 ad return rv;
1254 1.11 ad }
1255 1.11 ad
1256 1.11 ad void
1257 1.11 ad cdev_stop(struct tty *tp, int flag)
1258 1.11 ad {
1259 1.11 ad const struct cdevsw *d;
1260 1.17 ad int mpflag;
1261 1.11 ad
1262 1.11 ad if ((d = cdevsw_lookup(tp->t_dev)) == NULL)
1263 1.11 ad return;
1264 1.11 ad
1265 1.11 ad DEV_LOCK(d);
1266 1.11 ad (*d->d_stop)(tp, flag);
1267 1.11 ad DEV_UNLOCK(d);
1268 1.11 ad }
1269 1.11 ad
1270 1.11 ad struct tty *
1271 1.11 ad cdev_tty(dev_t dev)
1272 1.11 ad {
1273 1.11 ad const struct cdevsw *d;
1274 1.11 ad
1275 1.11 ad if ((d = cdevsw_lookup(dev)) == NULL)
1276 1.11 ad return NULL;
1277 1.11 ad
1278 1.12 ad /* XXX Check if necessary. */
1279 1.12 ad if (d->d_tty == NULL)
1280 1.12 ad return NULL;
1281 1.12 ad
1282 1.21 ad return (*d->d_tty)(dev);
1283 1.11 ad }
1284 1.11 ad
1285 1.11 ad int
1286 1.11 ad cdev_poll(dev_t dev, int flag, lwp_t *l)
1287 1.11 ad {
1288 1.11 ad const struct cdevsw *d;
1289 1.17 ad int rv, mpflag;
1290 1.11 ad
1291 1.11 ad if ((d = cdevsw_lookup(dev)) == NULL)
1292 1.11 ad return POLLERR;
1293 1.11 ad
1294 1.11 ad DEV_LOCK(d);
1295 1.11 ad rv = (*d->d_poll)(dev, flag, l);
1296 1.11 ad DEV_UNLOCK(d);
1297 1.11 ad
1298 1.11 ad return rv;
1299 1.11 ad }
1300 1.11 ad
1301 1.11 ad paddr_t
1302 1.11 ad cdev_mmap(dev_t dev, off_t off, int flag)
1303 1.11 ad {
1304 1.11 ad const struct cdevsw *d;
1305 1.11 ad paddr_t rv;
1306 1.17 ad int mpflag;
1307 1.11 ad
1308 1.11 ad if ((d = cdevsw_lookup(dev)) == NULL)
1309 1.11 ad return (paddr_t)-1LL;
1310 1.11 ad
1311 1.11 ad DEV_LOCK(d);
1312 1.11 ad rv = (*d->d_mmap)(dev, off, flag);
1313 1.11 ad DEV_UNLOCK(d);
1314 1.11 ad
1315 1.11 ad return rv;
1316 1.11 ad }
1317 1.11 ad
1318 1.11 ad int
1319 1.11 ad cdev_kqfilter(dev_t dev, struct knote *kn)
1320 1.11 ad {
1321 1.11 ad const struct cdevsw *d;
1322 1.17 ad int rv, mpflag;
1323 1.11 ad
1324 1.11 ad if ((d = cdevsw_lookup(dev)) == NULL)
1325 1.11 ad return ENXIO;
1326 1.11 ad
1327 1.11 ad DEV_LOCK(d);
1328 1.11 ad rv = (*d->d_kqfilter)(dev, kn);
1329 1.11 ad DEV_UNLOCK(d);
1330 1.11 ad
1331 1.11 ad return rv;
1332 1.11 ad }
1333 1.11 ad
1334 1.11 ad int
1335 1.32 dholland cdev_discard(dev_t dev, off_t pos, off_t len)
1336 1.32 dholland {
1337 1.32 dholland const struct cdevsw *d;
1338 1.32 dholland int rv, mpflag;
1339 1.32 dholland
1340 1.32 dholland if ((d = cdevsw_lookup(dev)) == NULL)
1341 1.32 dholland return ENXIO;
1342 1.32 dholland
1343 1.32 dholland DEV_LOCK(d);
1344 1.32 dholland rv = (*d->d_discard)(dev, pos, len);
1345 1.32 dholland DEV_UNLOCK(d);
1346 1.32 dholland
1347 1.32 dholland return rv;
1348 1.32 dholland }
1349 1.32 dholland
1350 1.32 dholland int
1351 1.35 nat cdev_flags(dev_t dev)
1352 1.35 nat {
1353 1.35 nat const struct cdevsw *d;
1354 1.35 nat
1355 1.35 nat if ((d = cdevsw_lookup(dev)) == NULL)
1356 1.35 nat return 0;
1357 1.35 nat return d->d_flag & ~D_TYPEMASK;
1358 1.35 nat }
1359 1.35 nat
1360 1.35 nat int
1361 1.11 ad cdev_type(dev_t dev)
1362 1.11 ad {
1363 1.11 ad const struct cdevsw *d;
1364 1.11 ad
1365 1.11 ad if ((d = cdevsw_lookup(dev)) == NULL)
1366 1.11 ad return D_OTHER;
1367 1.11 ad return d->d_flag & D_TYPEMASK;
1368 1.2 gehenna }
1369 1.36 riastrad
1370 1.43 riastrad void
1371 1.43 riastrad cdev_detached(dev_t dev)
1372 1.43 riastrad {
1373 1.43 riastrad const struct cdevsw *d;
1374 1.43 riastrad device_t dv;
1375 1.43 riastrad int unit;
1376 1.43 riastrad
1377 1.43 riastrad if ((d = cdevsw_lookup(dev)) == NULL)
1378 1.43 riastrad return;
1379 1.43 riastrad if (d->d_devtounit == NULL)
1380 1.43 riastrad return;
1381 1.43 riastrad if ((unit = (*d->d_devtounit)(dev)) == -1)
1382 1.43 riastrad return;
1383 1.43 riastrad if ((dv = device_lookup(d->d_cfdriver, unit)) == NULL)
1384 1.43 riastrad return;
1385 1.43 riastrad config_detach_commit(dv);
1386 1.43 riastrad }
1387 1.43 riastrad
1388 1.36 riastrad /*
1389 1.36 riastrad * nommap(dev, off, prot)
1390 1.36 riastrad *
1391 1.36 riastrad * mmap routine that always fails, for non-mmappable devices.
1392 1.36 riastrad */
1393 1.36 riastrad paddr_t
1394 1.36 riastrad nommap(dev_t dev, off_t off, int prot)
1395 1.36 riastrad {
1396 1.36 riastrad
1397 1.36 riastrad return (paddr_t)-1;
1398 1.36 riastrad }
1399 1.42 riastrad
1400 1.42 riastrad /*
1401 1.42 riastrad * dev_minor_unit(dev)
1402 1.42 riastrad *
1403 1.42 riastrad * Returns minor(dev) as an int. Intended for use with struct
1404 1.42 riastrad * bdevsw, cdevsw::d_devtounit for drivers whose /dev nodes are
1405 1.42 riastrad * implemented by reference to an autoconf instance with the minor
1406 1.42 riastrad * number.
1407 1.42 riastrad */
1408 1.42 riastrad int
1409 1.42 riastrad dev_minor_unit(dev_t dev)
1410 1.42 riastrad {
1411 1.42 riastrad
1412 1.42 riastrad return minor(dev);
1413 1.42 riastrad }
1414