subr_devsw.c revision 1.40 1 1.40 riastrad /* $NetBSD: subr_devsw.c,v 1.40 2022/03/28 12:33:32 riastradh Exp $ */
2 1.11 ad
3 1.2 gehenna /*-
4 1.20 ad * Copyright (c) 2001, 2002, 2007, 2008 The NetBSD Foundation, Inc.
5 1.2 gehenna * All rights reserved.
6 1.2 gehenna *
7 1.2 gehenna * This code is derived from software contributed to The NetBSD Foundation
8 1.11 ad * by MAEKAWA Masahide <gehenna (at) NetBSD.org>, and by Andrew Doran.
9 1.2 gehenna *
10 1.2 gehenna * Redistribution and use in source and binary forms, with or without
11 1.2 gehenna * modification, are permitted provided that the following conditions
12 1.2 gehenna * are met:
13 1.2 gehenna * 1. Redistributions of source code must retain the above copyright
14 1.2 gehenna * notice, this list of conditions and the following disclaimer.
15 1.2 gehenna * 2. Redistributions in binary form must reproduce the above copyright
16 1.2 gehenna * notice, this list of conditions and the following disclaimer in the
17 1.2 gehenna * documentation and/or other materials provided with the distribution.
18 1.2 gehenna *
19 1.2 gehenna * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.2 gehenna * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.2 gehenna * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.2 gehenna * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.2 gehenna * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.2 gehenna * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.2 gehenna * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.2 gehenna * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.2 gehenna * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.2 gehenna * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.2 gehenna * POSSIBILITY OF SUCH DAMAGE.
30 1.2 gehenna */
31 1.11 ad
32 1.11 ad /*
33 1.11 ad * Overview
34 1.11 ad *
35 1.11 ad * subr_devsw.c: registers device drivers by name and by major
36 1.11 ad * number, and provides wrapper methods for performing I/O and
37 1.11 ad * other tasks on device drivers, keying on the device number
38 1.11 ad * (dev_t).
39 1.11 ad *
40 1.11 ad * When the system is built, the config(8) command generates
41 1.11 ad * static tables of device drivers built into the kernel image
42 1.11 ad * along with their associated methods. These are recorded in
43 1.11 ad * the cdevsw0 and bdevsw0 tables. Drivers can also be added to
44 1.11 ad * and removed from the system dynamically.
45 1.11 ad *
46 1.11 ad * Allocation
47 1.11 ad *
48 1.11 ad * When the system initially boots only the statically allocated
49 1.11 ad * indexes (bdevsw0, cdevsw0) are used. If these overflow due to
50 1.11 ad * allocation, we allocate a fixed block of memory to hold the new,
51 1.11 ad * expanded index. This "fork" of the table is only ever performed
52 1.11 ad * once in order to guarantee that other threads may safely access
53 1.11 ad * the device tables:
54 1.11 ad *
55 1.11 ad * o Once a thread has a "reference" to the table via an earlier
56 1.11 ad * open() call, we know that the entry in the table must exist
57 1.11 ad * and so it is safe to access it.
58 1.11 ad *
59 1.11 ad * o Regardless of whether other threads see the old or new
60 1.11 ad * pointers, they will point to a correct device switch
61 1.11 ad * structure for the operation being performed.
62 1.11 ad *
63 1.11 ad * XXX Currently, the wrapper methods such as cdev_read() verify
64 1.11 ad * that a device driver does in fact exist before calling the
65 1.11 ad * associated driver method. This should be changed so that
66 1.11 ad * once the device is has been referenced by a vnode (opened),
67 1.11 ad * calling the other methods should be valid until that reference
68 1.11 ad * is dropped.
69 1.11 ad */
70 1.7 lukem
71 1.7 lukem #include <sys/cdefs.h>
72 1.40 riastrad __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.40 2022/03/28 12:33:32 riastradh Exp $");
73 1.34 riz
74 1.34 riz #ifdef _KERNEL_OPT
75 1.34 riz #include "opt_dtrace.h"
76 1.34 riz #endif
77 1.2 gehenna
78 1.2 gehenna #include <sys/param.h>
79 1.2 gehenna #include <sys/conf.h>
80 1.11 ad #include <sys/kmem.h>
81 1.2 gehenna #include <sys/systm.h>
82 1.11 ad #include <sys/poll.h>
83 1.11 ad #include <sys/tty.h>
84 1.15 matt #include <sys/cpu.h>
85 1.11 ad #include <sys/buf.h>
86 1.29 mrg #include <sys/reboot.h>
87 1.34 riz #include <sys/sdt.h>
88 1.40 riastrad #include <sys/atomic.h>
89 1.40 riastrad #include <sys/localcount.h>
90 1.40 riastrad #include <sys/pserialize.h>
91 1.40 riastrad #include <sys/xcall.h>
92 1.2 gehenna
93 1.2 gehenna #ifdef DEVSW_DEBUG
94 1.2 gehenna #define DPRINTF(x) printf x
95 1.2 gehenna #else /* DEVSW_DEBUG */
96 1.2 gehenna #define DPRINTF(x)
97 1.2 gehenna #endif /* DEVSW_DEBUG */
98 1.2 gehenna
99 1.11 ad #define MAXDEVSW 512 /* the maximum of major device number */
100 1.2 gehenna #define BDEVSW_SIZE (sizeof(struct bdevsw *))
101 1.2 gehenna #define CDEVSW_SIZE (sizeof(struct cdevsw *))
102 1.2 gehenna #define DEVSWCONV_SIZE (sizeof(struct devsw_conv))
103 1.2 gehenna
104 1.40 riastrad struct devswref {
105 1.40 riastrad struct localcount *dr_lc;
106 1.40 riastrad };
107 1.40 riastrad
108 1.40 riastrad /* XXX bdevsw, cdevsw, max_bdevsws, and max_cdevsws should be volatile */
109 1.2 gehenna extern const struct bdevsw **bdevsw, *bdevsw0[];
110 1.2 gehenna extern const struct cdevsw **cdevsw, *cdevsw0[];
111 1.2 gehenna extern struct devsw_conv *devsw_conv, devsw_conv0[];
112 1.2 gehenna extern const int sys_bdevsws, sys_cdevsws;
113 1.2 gehenna extern int max_bdevsws, max_cdevsws, max_devsw_convs;
114 1.2 gehenna
115 1.40 riastrad static struct devswref *cdevswref;
116 1.40 riastrad static struct devswref *bdevswref;
117 1.40 riastrad static kcondvar_t devsw_cv;
118 1.40 riastrad
119 1.24 drochner static int bdevsw_attach(const struct bdevsw *, devmajor_t *);
120 1.24 drochner static int cdevsw_attach(const struct cdevsw *, devmajor_t *);
121 1.11 ad static void devsw_detach_locked(const struct bdevsw *, const struct cdevsw *);
122 1.11 ad
123 1.23 pooka kmutex_t device_lock;
124 1.23 pooka
125 1.31 pooka void (*biodone_vfs)(buf_t *) = (void *)nullop;
126 1.31 pooka
127 1.11 ad void
128 1.11 ad devsw_init(void)
129 1.11 ad {
130 1.11 ad
131 1.11 ad KASSERT(sys_bdevsws < MAXDEVSW - 1);
132 1.11 ad KASSERT(sys_cdevsws < MAXDEVSW - 1);
133 1.23 pooka mutex_init(&device_lock, MUTEX_DEFAULT, IPL_NONE);
134 1.40 riastrad
135 1.40 riastrad cv_init(&devsw_cv, "devsw");
136 1.11 ad }
137 1.2 gehenna
138 1.2 gehenna int
139 1.24 drochner devsw_attach(const char *devname,
140 1.24 drochner const struct bdevsw *bdev, devmajor_t *bmajor,
141 1.24 drochner const struct cdevsw *cdev, devmajor_t *cmajor)
142 1.2 gehenna {
143 1.2 gehenna struct devsw_conv *conv;
144 1.2 gehenna char *name;
145 1.2 gehenna int error, i;
146 1.2 gehenna
147 1.2 gehenna if (devname == NULL || cdev == NULL)
148 1.2 gehenna return (EINVAL);
149 1.2 gehenna
150 1.23 pooka mutex_enter(&device_lock);
151 1.11 ad
152 1.2 gehenna for (i = 0 ; i < max_devsw_convs ; i++) {
153 1.2 gehenna conv = &devsw_conv[i];
154 1.2 gehenna if (conv->d_name == NULL || strcmp(devname, conv->d_name) != 0)
155 1.2 gehenna continue;
156 1.2 gehenna
157 1.2 gehenna if (*bmajor < 0)
158 1.2 gehenna *bmajor = conv->d_bmajor;
159 1.2 gehenna if (*cmajor < 0)
160 1.2 gehenna *cmajor = conv->d_cmajor;
161 1.2 gehenna
162 1.11 ad if (*bmajor != conv->d_bmajor || *cmajor != conv->d_cmajor) {
163 1.11 ad error = EINVAL;
164 1.11 ad goto fail;
165 1.11 ad }
166 1.11 ad if ((*bmajor >= 0 && bdev == NULL) || *cmajor < 0) {
167 1.11 ad error = EINVAL;
168 1.11 ad goto fail;
169 1.11 ad }
170 1.2 gehenna
171 1.2 gehenna if ((*bmajor >= 0 && bdevsw[*bmajor] != NULL) ||
172 1.11 ad cdevsw[*cmajor] != NULL) {
173 1.11 ad error = EEXIST;
174 1.11 ad goto fail;
175 1.11 ad }
176 1.40 riastrad break;
177 1.2 gehenna }
178 1.2 gehenna
179 1.40 riastrad /*
180 1.40 riastrad * XXX This should allocate what it needs up front so we never
181 1.40 riastrad * need to flail around trying to unwind.
182 1.40 riastrad */
183 1.14 pooka error = bdevsw_attach(bdev, bmajor);
184 1.11 ad if (error != 0)
185 1.11 ad goto fail;
186 1.14 pooka error = cdevsw_attach(cdev, cmajor);
187 1.2 gehenna if (error != 0) {
188 1.11 ad devsw_detach_locked(bdev, NULL);
189 1.11 ad goto fail;
190 1.2 gehenna }
191 1.2 gehenna
192 1.40 riastrad /*
193 1.40 riastrad * If we already found a conv, we're done. Otherwise, find an
194 1.40 riastrad * empty slot or extend the table.
195 1.40 riastrad */
196 1.40 riastrad if (i == max_devsw_convs)
197 1.40 riastrad goto fail;
198 1.40 riastrad
199 1.2 gehenna for (i = 0 ; i < max_devsw_convs ; i++) {
200 1.2 gehenna if (devsw_conv[i].d_name == NULL)
201 1.2 gehenna break;
202 1.2 gehenna }
203 1.2 gehenna if (i == max_devsw_convs) {
204 1.2 gehenna struct devsw_conv *newptr;
205 1.33 matt int old_convs, new_convs;
206 1.2 gehenna
207 1.33 matt old_convs = max_devsw_convs;
208 1.33 matt new_convs = old_convs + 1;
209 1.2 gehenna
210 1.33 matt newptr = kmem_zalloc(new_convs * DEVSWCONV_SIZE, KM_NOSLEEP);
211 1.2 gehenna if (newptr == NULL) {
212 1.11 ad devsw_detach_locked(bdev, cdev);
213 1.11 ad error = ENOMEM;
214 1.11 ad goto fail;
215 1.2 gehenna }
216 1.33 matt newptr[old_convs].d_name = NULL;
217 1.33 matt newptr[old_convs].d_bmajor = -1;
218 1.33 matt newptr[old_convs].d_cmajor = -1;
219 1.33 matt memcpy(newptr, devsw_conv, old_convs * DEVSWCONV_SIZE);
220 1.2 gehenna if (devsw_conv != devsw_conv0)
221 1.33 matt kmem_free(devsw_conv, old_convs * DEVSWCONV_SIZE);
222 1.2 gehenna devsw_conv = newptr;
223 1.33 matt max_devsw_convs = new_convs;
224 1.2 gehenna }
225 1.2 gehenna
226 1.38 christos name = kmem_strdupsize(devname, NULL, KM_NOSLEEP);
227 1.2 gehenna if (name == NULL) {
228 1.11 ad devsw_detach_locked(bdev, cdev);
229 1.25 enami error = ENOMEM;
230 1.11 ad goto fail;
231 1.2 gehenna }
232 1.2 gehenna
233 1.2 gehenna devsw_conv[i].d_name = name;
234 1.2 gehenna devsw_conv[i].d_bmajor = *bmajor;
235 1.2 gehenna devsw_conv[i].d_cmajor = *cmajor;
236 1.2 gehenna
237 1.23 pooka mutex_exit(&device_lock);
238 1.2 gehenna return (0);
239 1.11 ad fail:
240 1.23 pooka mutex_exit(&device_lock);
241 1.11 ad return (error);
242 1.2 gehenna }
243 1.2 gehenna
244 1.2 gehenna static int
245 1.24 drochner bdevsw_attach(const struct bdevsw *devsw, devmajor_t *devmajor)
246 1.2 gehenna {
247 1.40 riastrad const struct bdevsw **newbdevsw = NULL;
248 1.40 riastrad struct devswref *newbdevswref = NULL;
249 1.40 riastrad struct localcount *lc;
250 1.24 drochner devmajor_t bmajor;
251 1.24 drochner int i;
252 1.2 gehenna
253 1.23 pooka KASSERT(mutex_owned(&device_lock));
254 1.11 ad
255 1.2 gehenna if (devsw == NULL)
256 1.2 gehenna return (0);
257 1.2 gehenna
258 1.2 gehenna if (*devmajor < 0) {
259 1.2 gehenna for (bmajor = sys_bdevsws ; bmajor < max_bdevsws ; bmajor++) {
260 1.2 gehenna if (bdevsw[bmajor] != NULL)
261 1.2 gehenna continue;
262 1.2 gehenna for (i = 0 ; i < max_devsw_convs ; i++) {
263 1.2 gehenna if (devsw_conv[i].d_bmajor == bmajor)
264 1.2 gehenna break;
265 1.2 gehenna }
266 1.2 gehenna if (i != max_devsw_convs)
267 1.2 gehenna continue;
268 1.2 gehenna break;
269 1.2 gehenna }
270 1.3 gehenna *devmajor = bmajor;
271 1.2 gehenna }
272 1.11 ad
273 1.2 gehenna if (*devmajor >= MAXDEVSW) {
274 1.37 pgoyette printf("%s: block majors exhausted", __func__);
275 1.2 gehenna return (ENOMEM);
276 1.2 gehenna }
277 1.2 gehenna
278 1.40 riastrad if (bdevswref == NULL) {
279 1.40 riastrad newbdevswref = kmem_zalloc(MAXDEVSW * sizeof(newbdevswref[0]),
280 1.40 riastrad KM_NOSLEEP);
281 1.40 riastrad if (newbdevswref == NULL)
282 1.40 riastrad return ENOMEM;
283 1.40 riastrad atomic_store_release(&bdevswref, newbdevswref);
284 1.40 riastrad }
285 1.40 riastrad
286 1.2 gehenna if (*devmajor >= max_bdevsws) {
287 1.11 ad KASSERT(bdevsw == bdevsw0);
288 1.40 riastrad newbdevsw = kmem_zalloc(MAXDEVSW * sizeof(newbdevsw[0]),
289 1.40 riastrad KM_NOSLEEP);
290 1.40 riastrad if (newbdevsw == NULL)
291 1.40 riastrad return ENOMEM;
292 1.40 riastrad memcpy(newbdevsw, bdevsw, max_bdevsws * sizeof(bdevsw[0]));
293 1.40 riastrad atomic_store_release(&bdevsw, newbdevsw);
294 1.40 riastrad atomic_store_release(&max_bdevsws, MAXDEVSW);
295 1.2 gehenna }
296 1.2 gehenna
297 1.2 gehenna if (bdevsw[*devmajor] != NULL)
298 1.2 gehenna return (EEXIST);
299 1.2 gehenna
300 1.40 riastrad KASSERT(bdevswref[*devmajor].dr_lc == NULL);
301 1.40 riastrad lc = kmem_zalloc(sizeof(*lc), KM_SLEEP);
302 1.40 riastrad localcount_init(lc);
303 1.40 riastrad bdevswref[*devmajor].dr_lc = lc;
304 1.40 riastrad
305 1.40 riastrad atomic_store_release(&bdevsw[*devmajor], devsw);
306 1.2 gehenna
307 1.2 gehenna return (0);
308 1.2 gehenna }
309 1.2 gehenna
310 1.2 gehenna static int
311 1.24 drochner cdevsw_attach(const struct cdevsw *devsw, devmajor_t *devmajor)
312 1.2 gehenna {
313 1.40 riastrad const struct cdevsw **newcdevsw = NULL;
314 1.40 riastrad struct devswref *newcdevswref = NULL;
315 1.40 riastrad struct localcount *lc;
316 1.24 drochner devmajor_t cmajor;
317 1.24 drochner int i;
318 1.2 gehenna
319 1.23 pooka KASSERT(mutex_owned(&device_lock));
320 1.11 ad
321 1.2 gehenna if (*devmajor < 0) {
322 1.2 gehenna for (cmajor = sys_cdevsws ; cmajor < max_cdevsws ; cmajor++) {
323 1.2 gehenna if (cdevsw[cmajor] != NULL)
324 1.2 gehenna continue;
325 1.2 gehenna for (i = 0 ; i < max_devsw_convs ; i++) {
326 1.2 gehenna if (devsw_conv[i].d_cmajor == cmajor)
327 1.2 gehenna break;
328 1.2 gehenna }
329 1.2 gehenna if (i != max_devsw_convs)
330 1.2 gehenna continue;
331 1.2 gehenna break;
332 1.2 gehenna }
333 1.3 gehenna *devmajor = cmajor;
334 1.2 gehenna }
335 1.11 ad
336 1.2 gehenna if (*devmajor >= MAXDEVSW) {
337 1.37 pgoyette printf("%s: character majors exhausted", __func__);
338 1.2 gehenna return (ENOMEM);
339 1.2 gehenna }
340 1.2 gehenna
341 1.40 riastrad if (cdevswref == NULL) {
342 1.40 riastrad newcdevswref = kmem_zalloc(MAXDEVSW * sizeof(newcdevswref[0]),
343 1.40 riastrad KM_NOSLEEP);
344 1.40 riastrad if (newcdevswref == NULL)
345 1.40 riastrad return ENOMEM;
346 1.40 riastrad atomic_store_release(&cdevswref, newcdevswref);
347 1.40 riastrad }
348 1.40 riastrad
349 1.2 gehenna if (*devmajor >= max_cdevsws) {
350 1.11 ad KASSERT(cdevsw == cdevsw0);
351 1.40 riastrad newcdevsw = kmem_zalloc(MAXDEVSW * sizeof(newcdevsw[0]),
352 1.40 riastrad KM_NOSLEEP);
353 1.40 riastrad if (newcdevsw == NULL)
354 1.40 riastrad return ENOMEM;
355 1.40 riastrad memcpy(newcdevsw, cdevsw, max_cdevsws * sizeof(cdevsw[0]));
356 1.40 riastrad atomic_store_release(&cdevsw, newcdevsw);
357 1.40 riastrad atomic_store_release(&max_cdevsws, MAXDEVSW);
358 1.2 gehenna }
359 1.2 gehenna
360 1.2 gehenna if (cdevsw[*devmajor] != NULL)
361 1.2 gehenna return (EEXIST);
362 1.2 gehenna
363 1.40 riastrad KASSERT(cdevswref[*devmajor].dr_lc == NULL);
364 1.40 riastrad lc = kmem_zalloc(sizeof(*lc), KM_SLEEP);
365 1.40 riastrad localcount_init(lc);
366 1.40 riastrad cdevswref[*devmajor].dr_lc = lc;
367 1.40 riastrad
368 1.40 riastrad atomic_store_release(&cdevsw[*devmajor], devsw);
369 1.2 gehenna
370 1.2 gehenna return (0);
371 1.2 gehenna }
372 1.2 gehenna
373 1.11 ad static void
374 1.11 ad devsw_detach_locked(const struct bdevsw *bdev, const struct cdevsw *cdev)
375 1.2 gehenna {
376 1.40 riastrad int bi, ci = -1/*XXXGCC*/;
377 1.2 gehenna
378 1.23 pooka KASSERT(mutex_owned(&device_lock));
379 1.11 ad
380 1.40 riastrad /* Prevent new references. */
381 1.2 gehenna if (bdev != NULL) {
382 1.40 riastrad for (bi = 0; bi < max_bdevsws; bi++) {
383 1.40 riastrad if (bdevsw[bi] != bdev)
384 1.2 gehenna continue;
385 1.40 riastrad atomic_store_relaxed(&bdevsw[bi], NULL);
386 1.2 gehenna break;
387 1.2 gehenna }
388 1.40 riastrad KASSERT(bi < max_bdevsws);
389 1.2 gehenna }
390 1.2 gehenna if (cdev != NULL) {
391 1.40 riastrad for (ci = 0; ci < max_cdevsws; ci++) {
392 1.40 riastrad if (cdevsw[ci] != cdev)
393 1.2 gehenna continue;
394 1.40 riastrad atomic_store_relaxed(&cdevsw[ci], NULL);
395 1.2 gehenna break;
396 1.2 gehenna }
397 1.40 riastrad KASSERT(ci < max_cdevsws);
398 1.40 riastrad }
399 1.40 riastrad
400 1.40 riastrad if (bdev == NULL && cdev == NULL) /* XXX possible? */
401 1.40 riastrad return;
402 1.40 riastrad
403 1.40 riastrad /*
404 1.40 riastrad * Wait for all bdevsw_lookup_acquire, cdevsw_lookup_acquire
405 1.40 riastrad * calls to notice that the devsw is gone.
406 1.40 riastrad *
407 1.40 riastrad * XXX Despite the use of the pserialize_read_enter/exit API
408 1.40 riastrad * elsewhere in this file, we use xc_barrier here instead of
409 1.40 riastrad * pserialize_perform -- because devsw_init is too early for
410 1.40 riastrad * pserialize_create. Either pserialize_create should be made
411 1.40 riastrad * to work earlier, or it should be nixed altogether. Until
412 1.40 riastrad * that is fixed, xc_barrier will serve the same purpose.
413 1.40 riastrad */
414 1.40 riastrad xc_barrier(0);
415 1.40 riastrad
416 1.40 riastrad /*
417 1.40 riastrad * Wait for all references to drain. It is the caller's
418 1.40 riastrad * responsibility to ensure that at this point, there are no
419 1.40 riastrad * extant open instances and all new d_open calls will fail.
420 1.40 riastrad *
421 1.40 riastrad * Note that localcount_drain may release and reacquire
422 1.40 riastrad * device_lock.
423 1.40 riastrad */
424 1.40 riastrad if (bdev != NULL) {
425 1.40 riastrad localcount_drain(bdevswref[bi].dr_lc,
426 1.40 riastrad &devsw_cv, &device_lock);
427 1.40 riastrad localcount_fini(bdevswref[bi].dr_lc);
428 1.40 riastrad kmem_free(bdevswref[bi].dr_lc, sizeof(*bdevswref[bi].dr_lc));
429 1.40 riastrad bdevswref[bi].dr_lc = NULL;
430 1.40 riastrad }
431 1.40 riastrad if (cdev != NULL) {
432 1.40 riastrad localcount_drain(cdevswref[ci].dr_lc,
433 1.40 riastrad &devsw_cv, &device_lock);
434 1.40 riastrad localcount_fini(cdevswref[ci].dr_lc);
435 1.40 riastrad kmem_free(cdevswref[ci].dr_lc, sizeof(*cdevswref[ci].dr_lc));
436 1.40 riastrad cdevswref[ci].dr_lc = NULL;
437 1.2 gehenna }
438 1.2 gehenna }
439 1.2 gehenna
440 1.39 riastrad void
441 1.11 ad devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev)
442 1.11 ad {
443 1.11 ad
444 1.23 pooka mutex_enter(&device_lock);
445 1.11 ad devsw_detach_locked(bdev, cdev);
446 1.23 pooka mutex_exit(&device_lock);
447 1.11 ad }
448 1.11 ad
449 1.11 ad /*
450 1.11 ad * Look up a block device by number.
451 1.11 ad *
452 1.11 ad * => Caller must ensure that the device is attached.
453 1.11 ad */
454 1.2 gehenna const struct bdevsw *
455 1.2 gehenna bdevsw_lookup(dev_t dev)
456 1.2 gehenna {
457 1.24 drochner devmajor_t bmajor;
458 1.2 gehenna
459 1.2 gehenna if (dev == NODEV)
460 1.2 gehenna return (NULL);
461 1.2 gehenna bmajor = major(dev);
462 1.40 riastrad if (bmajor < 0 || bmajor >= atomic_load_relaxed(&max_bdevsws))
463 1.2 gehenna return (NULL);
464 1.2 gehenna
465 1.40 riastrad return atomic_load_consume(&bdevsw)[bmajor];
466 1.40 riastrad }
467 1.40 riastrad
468 1.40 riastrad static const struct bdevsw *
469 1.40 riastrad bdevsw_lookup_acquire(dev_t dev, struct localcount **lcp)
470 1.40 riastrad {
471 1.40 riastrad devmajor_t bmajor;
472 1.40 riastrad const struct bdevsw *bdev = NULL, *const *curbdevsw;
473 1.40 riastrad struct devswref *curbdevswref;
474 1.40 riastrad int s;
475 1.40 riastrad
476 1.40 riastrad if (dev == NODEV)
477 1.40 riastrad return NULL;
478 1.40 riastrad bmajor = major(dev);
479 1.40 riastrad if (bmajor < 0)
480 1.40 riastrad return NULL;
481 1.40 riastrad
482 1.40 riastrad s = pserialize_read_enter();
483 1.40 riastrad
484 1.40 riastrad /*
485 1.40 riastrad * max_bdevsws never goes down, so it is safe to rely on this
486 1.40 riastrad * condition without any locking for the array access below.
487 1.40 riastrad * Test sys_bdevsws first so we can avoid the memory barrier in
488 1.40 riastrad * that case.
489 1.40 riastrad */
490 1.40 riastrad if (bmajor >= sys_bdevsws &&
491 1.40 riastrad bmajor >= atomic_load_acquire(&max_bdevsws))
492 1.40 riastrad goto out;
493 1.40 riastrad curbdevsw = atomic_load_consume(&bdevsw);
494 1.40 riastrad if ((bdev = atomic_load_consume(&curbdevsw[bmajor])) == NULL)
495 1.40 riastrad goto out;
496 1.40 riastrad
497 1.40 riastrad curbdevswref = atomic_load_consume(&bdevswref);
498 1.40 riastrad if (curbdevswref == NULL) {
499 1.40 riastrad *lcp = NULL;
500 1.40 riastrad } else if ((*lcp = curbdevswref[bmajor].dr_lc) != NULL) {
501 1.40 riastrad localcount_acquire(*lcp);
502 1.40 riastrad }
503 1.40 riastrad out:
504 1.40 riastrad pserialize_read_exit(s);
505 1.40 riastrad return bdev;
506 1.40 riastrad }
507 1.40 riastrad
508 1.40 riastrad static void
509 1.40 riastrad bdevsw_release(const struct bdevsw *bdev, struct localcount *lc)
510 1.40 riastrad {
511 1.40 riastrad
512 1.40 riastrad if (lc == NULL)
513 1.40 riastrad return;
514 1.40 riastrad localcount_release(lc, &devsw_cv, &device_lock);
515 1.2 gehenna }
516 1.2 gehenna
517 1.11 ad /*
518 1.11 ad * Look up a character device by number.
519 1.11 ad *
520 1.11 ad * => Caller must ensure that the device is attached.
521 1.11 ad */
522 1.2 gehenna const struct cdevsw *
523 1.2 gehenna cdevsw_lookup(dev_t dev)
524 1.2 gehenna {
525 1.24 drochner devmajor_t cmajor;
526 1.2 gehenna
527 1.2 gehenna if (dev == NODEV)
528 1.2 gehenna return (NULL);
529 1.2 gehenna cmajor = major(dev);
530 1.40 riastrad if (cmajor < 0 || cmajor >= atomic_load_relaxed(&max_cdevsws))
531 1.2 gehenna return (NULL);
532 1.2 gehenna
533 1.40 riastrad return atomic_load_consume(&cdevsw)[cmajor];
534 1.40 riastrad }
535 1.40 riastrad
536 1.40 riastrad static const struct cdevsw *
537 1.40 riastrad cdevsw_lookup_acquire(dev_t dev, struct localcount **lcp)
538 1.40 riastrad {
539 1.40 riastrad devmajor_t cmajor;
540 1.40 riastrad const struct cdevsw *cdev = NULL, *const *curcdevsw;
541 1.40 riastrad struct devswref *curcdevswref;
542 1.40 riastrad int s;
543 1.40 riastrad
544 1.40 riastrad if (dev == NODEV)
545 1.40 riastrad return NULL;
546 1.40 riastrad cmajor = major(dev);
547 1.40 riastrad if (cmajor < 0)
548 1.40 riastrad return NULL;
549 1.40 riastrad
550 1.40 riastrad s = pserialize_read_enter();
551 1.40 riastrad
552 1.40 riastrad /*
553 1.40 riastrad * max_cdevsws never goes down, so it is safe to rely on this
554 1.40 riastrad * condition without any locking for the array access below.
555 1.40 riastrad * Test sys_cdevsws first so we can avoid the memory barrier in
556 1.40 riastrad * that case.
557 1.40 riastrad */
558 1.40 riastrad if (cmajor >= sys_cdevsws &&
559 1.40 riastrad cmajor >= atomic_load_acquire(&max_cdevsws))
560 1.40 riastrad goto out;
561 1.40 riastrad curcdevsw = atomic_load_consume(&cdevsw);
562 1.40 riastrad if ((cdev = atomic_load_consume(&curcdevsw[cmajor])) == NULL)
563 1.40 riastrad goto out;
564 1.40 riastrad
565 1.40 riastrad curcdevswref = atomic_load_consume(&cdevswref);
566 1.40 riastrad if (curcdevswref == NULL) {
567 1.40 riastrad *lcp = NULL;
568 1.40 riastrad } else if ((*lcp = curcdevswref[cmajor].dr_lc) != NULL) {
569 1.40 riastrad localcount_acquire(*lcp);
570 1.40 riastrad }
571 1.40 riastrad out:
572 1.40 riastrad pserialize_read_exit(s);
573 1.40 riastrad return cdev;
574 1.40 riastrad }
575 1.40 riastrad
576 1.40 riastrad static void
577 1.40 riastrad cdevsw_release(const struct cdevsw *cdev, struct localcount *lc)
578 1.40 riastrad {
579 1.40 riastrad
580 1.40 riastrad if (lc == NULL)
581 1.40 riastrad return;
582 1.40 riastrad localcount_release(lc, &devsw_cv, &device_lock);
583 1.2 gehenna }
584 1.2 gehenna
585 1.11 ad /*
586 1.11 ad * Look up a block device by reference to its operations set.
587 1.11 ad *
588 1.11 ad * => Caller must ensure that the device is not detached, and therefore
589 1.11 ad * that the returned major is still valid when dereferenced.
590 1.11 ad */
591 1.24 drochner devmajor_t
592 1.2 gehenna bdevsw_lookup_major(const struct bdevsw *bdev)
593 1.2 gehenna {
594 1.40 riastrad const struct bdevsw *const *curbdevsw;
595 1.40 riastrad devmajor_t bmajor, bmax;
596 1.2 gehenna
597 1.40 riastrad bmax = atomic_load_acquire(&max_bdevsws);
598 1.40 riastrad curbdevsw = atomic_load_consume(&bdevsw);
599 1.40 riastrad for (bmajor = 0; bmajor < bmax; bmajor++) {
600 1.40 riastrad if (atomic_load_relaxed(&curbdevsw[bmajor]) == bdev)
601 1.2 gehenna return (bmajor);
602 1.2 gehenna }
603 1.2 gehenna
604 1.24 drochner return (NODEVMAJOR);
605 1.2 gehenna }
606 1.2 gehenna
607 1.11 ad /*
608 1.11 ad * Look up a character device by reference to its operations set.
609 1.11 ad *
610 1.11 ad * => Caller must ensure that the device is not detached, and therefore
611 1.11 ad * that the returned major is still valid when dereferenced.
612 1.11 ad */
613 1.24 drochner devmajor_t
614 1.2 gehenna cdevsw_lookup_major(const struct cdevsw *cdev)
615 1.2 gehenna {
616 1.40 riastrad const struct cdevsw *const *curcdevsw;
617 1.40 riastrad devmajor_t cmajor, cmax;
618 1.2 gehenna
619 1.40 riastrad cmax = atomic_load_acquire(&max_cdevsws);
620 1.40 riastrad curcdevsw = atomic_load_consume(&cdevsw);
621 1.40 riastrad for (cmajor = 0; cmajor < cmax; cmajor++) {
622 1.40 riastrad if (atomic_load_relaxed(&curcdevsw[cmajor]) == cdev)
623 1.2 gehenna return (cmajor);
624 1.2 gehenna }
625 1.2 gehenna
626 1.24 drochner return (NODEVMAJOR);
627 1.2 gehenna }
628 1.2 gehenna
629 1.2 gehenna /*
630 1.2 gehenna * Convert from block major number to name.
631 1.11 ad *
632 1.11 ad * => Caller must ensure that the device is not detached, and therefore
633 1.11 ad * that the name pointer is still valid when dereferenced.
634 1.2 gehenna */
635 1.2 gehenna const char *
636 1.24 drochner devsw_blk2name(devmajor_t bmajor)
637 1.2 gehenna {
638 1.11 ad const char *name;
639 1.24 drochner devmajor_t cmajor;
640 1.24 drochner int i;
641 1.2 gehenna
642 1.11 ad name = NULL;
643 1.11 ad cmajor = -1;
644 1.11 ad
645 1.23 pooka mutex_enter(&device_lock);
646 1.11 ad if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
647 1.23 pooka mutex_exit(&device_lock);
648 1.2 gehenna return (NULL);
649 1.2 gehenna }
650 1.11 ad for (i = 0 ; i < max_devsw_convs; i++) {
651 1.11 ad if (devsw_conv[i].d_bmajor == bmajor) {
652 1.11 ad cmajor = devsw_conv[i].d_cmajor;
653 1.11 ad break;
654 1.11 ad }
655 1.11 ad }
656 1.11 ad if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
657 1.11 ad name = devsw_conv[i].d_name;
658 1.23 pooka mutex_exit(&device_lock);
659 1.2 gehenna
660 1.11 ad return (name);
661 1.2 gehenna }
662 1.2 gehenna
663 1.2 gehenna /*
664 1.26 haad * Convert char major number to device driver name.
665 1.26 haad */
666 1.27 yamt const char *
667 1.26 haad cdevsw_getname(devmajor_t major)
668 1.26 haad {
669 1.26 haad const char *name;
670 1.26 haad int i;
671 1.26 haad
672 1.26 haad name = NULL;
673 1.26 haad
674 1.26 haad if (major < 0)
675 1.26 haad return (NULL);
676 1.26 haad
677 1.26 haad mutex_enter(&device_lock);
678 1.26 haad for (i = 0 ; i < max_devsw_convs; i++) {
679 1.26 haad if (devsw_conv[i].d_cmajor == major) {
680 1.26 haad name = devsw_conv[i].d_name;
681 1.26 haad break;
682 1.26 haad }
683 1.26 haad }
684 1.26 haad mutex_exit(&device_lock);
685 1.26 haad return (name);
686 1.26 haad }
687 1.26 haad
688 1.26 haad /*
689 1.26 haad * Convert block major number to device driver name.
690 1.26 haad */
691 1.27 yamt const char *
692 1.26 haad bdevsw_getname(devmajor_t major)
693 1.26 haad {
694 1.26 haad const char *name;
695 1.26 haad int i;
696 1.26 haad
697 1.26 haad name = NULL;
698 1.26 haad
699 1.26 haad if (major < 0)
700 1.26 haad return (NULL);
701 1.26 haad
702 1.26 haad mutex_enter(&device_lock);
703 1.26 haad for (i = 0 ; i < max_devsw_convs; i++) {
704 1.26 haad if (devsw_conv[i].d_bmajor == major) {
705 1.26 haad name = devsw_conv[i].d_name;
706 1.26 haad break;
707 1.26 haad }
708 1.26 haad }
709 1.26 haad mutex_exit(&device_lock);
710 1.26 haad return (name);
711 1.26 haad }
712 1.26 haad
713 1.26 haad /*
714 1.2 gehenna * Convert from device name to block major number.
715 1.11 ad *
716 1.11 ad * => Caller must ensure that the device is not detached, and therefore
717 1.11 ad * that the major number is still valid when dereferenced.
718 1.2 gehenna */
719 1.24 drochner devmajor_t
720 1.2 gehenna devsw_name2blk(const char *name, char *devname, size_t devnamelen)
721 1.2 gehenna {
722 1.2 gehenna struct devsw_conv *conv;
723 1.24 drochner devmajor_t bmajor;
724 1.24 drochner int i;
725 1.2 gehenna
726 1.2 gehenna if (name == NULL)
727 1.24 drochner return (NODEVMAJOR);
728 1.2 gehenna
729 1.23 pooka mutex_enter(&device_lock);
730 1.2 gehenna for (i = 0 ; i < max_devsw_convs ; i++) {
731 1.5 mrg size_t len;
732 1.5 mrg
733 1.2 gehenna conv = &devsw_conv[i];
734 1.2 gehenna if (conv->d_name == NULL)
735 1.2 gehenna continue;
736 1.5 mrg len = strlen(conv->d_name);
737 1.5 mrg if (strncmp(conv->d_name, name, len) != 0)
738 1.5 mrg continue;
739 1.5 mrg if (*(name +len) && !isdigit(*(name + len)))
740 1.2 gehenna continue;
741 1.2 gehenna bmajor = conv->d_bmajor;
742 1.2 gehenna if (bmajor < 0 || bmajor >= max_bdevsws ||
743 1.2 gehenna bdevsw[bmajor] == NULL)
744 1.5 mrg break;
745 1.2 gehenna if (devname != NULL) {
746 1.2 gehenna #ifdef DEVSW_DEBUG
747 1.2 gehenna if (strlen(conv->d_name) >= devnamelen)
748 1.37 pgoyette printf("%s: too short buffer", __func__);
749 1.2 gehenna #endif /* DEVSW_DEBUG */
750 1.4 tsutsui strncpy(devname, conv->d_name, devnamelen);
751 1.2 gehenna devname[devnamelen - 1] = '\0';
752 1.2 gehenna }
753 1.23 pooka mutex_exit(&device_lock);
754 1.2 gehenna return (bmajor);
755 1.2 gehenna }
756 1.2 gehenna
757 1.23 pooka mutex_exit(&device_lock);
758 1.24 drochner return (NODEVMAJOR);
759 1.2 gehenna }
760 1.2 gehenna
761 1.2 gehenna /*
762 1.16 plunky * Convert from device name to char major number.
763 1.16 plunky *
764 1.16 plunky * => Caller must ensure that the device is not detached, and therefore
765 1.16 plunky * that the major number is still valid when dereferenced.
766 1.16 plunky */
767 1.24 drochner devmajor_t
768 1.16 plunky devsw_name2chr(const char *name, char *devname, size_t devnamelen)
769 1.16 plunky {
770 1.16 plunky struct devsw_conv *conv;
771 1.24 drochner devmajor_t cmajor;
772 1.24 drochner int i;
773 1.16 plunky
774 1.16 plunky if (name == NULL)
775 1.24 drochner return (NODEVMAJOR);
776 1.16 plunky
777 1.23 pooka mutex_enter(&device_lock);
778 1.16 plunky for (i = 0 ; i < max_devsw_convs ; i++) {
779 1.16 plunky size_t len;
780 1.16 plunky
781 1.16 plunky conv = &devsw_conv[i];
782 1.16 plunky if (conv->d_name == NULL)
783 1.16 plunky continue;
784 1.16 plunky len = strlen(conv->d_name);
785 1.16 plunky if (strncmp(conv->d_name, name, len) != 0)
786 1.16 plunky continue;
787 1.16 plunky if (*(name +len) && !isdigit(*(name + len)))
788 1.16 plunky continue;
789 1.16 plunky cmajor = conv->d_cmajor;
790 1.16 plunky if (cmajor < 0 || cmajor >= max_cdevsws ||
791 1.16 plunky cdevsw[cmajor] == NULL)
792 1.16 plunky break;
793 1.16 plunky if (devname != NULL) {
794 1.16 plunky #ifdef DEVSW_DEBUG
795 1.16 plunky if (strlen(conv->d_name) >= devnamelen)
796 1.37 pgoyette printf("%s: too short buffer", __func__);
797 1.16 plunky #endif /* DEVSW_DEBUG */
798 1.16 plunky strncpy(devname, conv->d_name, devnamelen);
799 1.16 plunky devname[devnamelen - 1] = '\0';
800 1.16 plunky }
801 1.23 pooka mutex_exit(&device_lock);
802 1.16 plunky return (cmajor);
803 1.16 plunky }
804 1.16 plunky
805 1.23 pooka mutex_exit(&device_lock);
806 1.24 drochner return (NODEVMAJOR);
807 1.16 plunky }
808 1.16 plunky
809 1.16 plunky /*
810 1.2 gehenna * Convert from character dev_t to block dev_t.
811 1.11 ad *
812 1.11 ad * => Caller must ensure that the device is not detached, and therefore
813 1.11 ad * that the major number is still valid when dereferenced.
814 1.2 gehenna */
815 1.2 gehenna dev_t
816 1.2 gehenna devsw_chr2blk(dev_t cdev)
817 1.2 gehenna {
818 1.24 drochner devmajor_t bmajor, cmajor;
819 1.24 drochner int i;
820 1.11 ad dev_t rv;
821 1.2 gehenna
822 1.2 gehenna cmajor = major(cdev);
823 1.24 drochner bmajor = NODEVMAJOR;
824 1.11 ad rv = NODEV;
825 1.2 gehenna
826 1.23 pooka mutex_enter(&device_lock);
827 1.11 ad if (cmajor < 0 || cmajor >= max_cdevsws || cdevsw[cmajor] == NULL) {
828 1.23 pooka mutex_exit(&device_lock);
829 1.11 ad return (NODEV);
830 1.11 ad }
831 1.2 gehenna for (i = 0 ; i < max_devsw_convs ; i++) {
832 1.11 ad if (devsw_conv[i].d_cmajor == cmajor) {
833 1.11 ad bmajor = devsw_conv[i].d_bmajor;
834 1.11 ad break;
835 1.11 ad }
836 1.2 gehenna }
837 1.11 ad if (bmajor >= 0 && bmajor < max_bdevsws && bdevsw[bmajor] != NULL)
838 1.11 ad rv = makedev(bmajor, minor(cdev));
839 1.23 pooka mutex_exit(&device_lock);
840 1.2 gehenna
841 1.11 ad return (rv);
842 1.2 gehenna }
843 1.2 gehenna
844 1.2 gehenna /*
845 1.2 gehenna * Convert from block dev_t to character dev_t.
846 1.11 ad *
847 1.11 ad * => Caller must ensure that the device is not detached, and therefore
848 1.11 ad * that the major number is still valid when dereferenced.
849 1.2 gehenna */
850 1.2 gehenna dev_t
851 1.2 gehenna devsw_blk2chr(dev_t bdev)
852 1.2 gehenna {
853 1.24 drochner devmajor_t bmajor, cmajor;
854 1.24 drochner int i;
855 1.11 ad dev_t rv;
856 1.2 gehenna
857 1.11 ad bmajor = major(bdev);
858 1.24 drochner cmajor = NODEVMAJOR;
859 1.11 ad rv = NODEV;
860 1.11 ad
861 1.23 pooka mutex_enter(&device_lock);
862 1.11 ad if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
863 1.23 pooka mutex_exit(&device_lock);
864 1.2 gehenna return (NODEV);
865 1.11 ad }
866 1.11 ad for (i = 0 ; i < max_devsw_convs ; i++) {
867 1.11 ad if (devsw_conv[i].d_bmajor == bmajor) {
868 1.11 ad cmajor = devsw_conv[i].d_cmajor;
869 1.11 ad break;
870 1.11 ad }
871 1.11 ad }
872 1.11 ad if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
873 1.11 ad rv = makedev(cmajor, minor(bdev));
874 1.23 pooka mutex_exit(&device_lock);
875 1.2 gehenna
876 1.11 ad return (rv);
877 1.11 ad }
878 1.11 ad
879 1.11 ad /*
880 1.11 ad * Device access methods.
881 1.11 ad */
882 1.11 ad
883 1.11 ad #define DEV_LOCK(d) \
884 1.17 ad if ((mpflag = (d->d_flag & D_MPSAFE)) == 0) { \
885 1.17 ad KERNEL_LOCK(1, NULL); \
886 1.11 ad }
887 1.2 gehenna
888 1.11 ad #define DEV_UNLOCK(d) \
889 1.17 ad if (mpflag == 0) { \
890 1.17 ad KERNEL_UNLOCK_ONE(NULL); \
891 1.2 gehenna }
892 1.2 gehenna
893 1.11 ad int
894 1.11 ad bdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
895 1.11 ad {
896 1.11 ad const struct bdevsw *d;
897 1.40 riastrad struct localcount *lc;
898 1.17 ad int rv, mpflag;
899 1.11 ad
900 1.40 riastrad d = bdevsw_lookup_acquire(dev, &lc);
901 1.11 ad if (d == NULL)
902 1.11 ad return ENXIO;
903 1.11 ad
904 1.11 ad DEV_LOCK(d);
905 1.11 ad rv = (*d->d_open)(dev, flag, devtype, l);
906 1.11 ad DEV_UNLOCK(d);
907 1.11 ad
908 1.40 riastrad bdevsw_release(d, lc);
909 1.40 riastrad
910 1.11 ad return rv;
911 1.11 ad }
912 1.11 ad
913 1.11 ad int
914 1.11 ad bdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
915 1.11 ad {
916 1.11 ad const struct bdevsw *d;
917 1.17 ad int rv, mpflag;
918 1.11 ad
919 1.11 ad if ((d = bdevsw_lookup(dev)) == NULL)
920 1.11 ad return ENXIO;
921 1.11 ad
922 1.11 ad DEV_LOCK(d);
923 1.11 ad rv = (*d->d_close)(dev, flag, devtype, l);
924 1.11 ad DEV_UNLOCK(d);
925 1.11 ad
926 1.11 ad return rv;
927 1.11 ad }
928 1.11 ad
929 1.34 riz SDT_PROVIDER_DECLARE(io);
930 1.34 riz SDT_PROBE_DEFINE1(io, kernel, , start, "struct buf *"/*bp*/);
931 1.34 riz
932 1.11 ad void
933 1.11 ad bdev_strategy(struct buf *bp)
934 1.11 ad {
935 1.11 ad const struct bdevsw *d;
936 1.17 ad int mpflag;
937 1.11 ad
938 1.34 riz SDT_PROBE1(io, kernel, , start, bp);
939 1.34 riz
940 1.28 jmcneill if ((d = bdevsw_lookup(bp->b_dev)) == NULL) {
941 1.28 jmcneill bp->b_error = ENXIO;
942 1.28 jmcneill bp->b_resid = bp->b_bcount;
943 1.31 pooka biodone_vfs(bp); /* biodone() iff vfs present */
944 1.28 jmcneill return;
945 1.28 jmcneill }
946 1.11 ad
947 1.11 ad DEV_LOCK(d);
948 1.11 ad (*d->d_strategy)(bp);
949 1.11 ad DEV_UNLOCK(d);
950 1.11 ad }
951 1.11 ad
952 1.11 ad int
953 1.11 ad bdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
954 1.11 ad {
955 1.11 ad const struct bdevsw *d;
956 1.17 ad int rv, mpflag;
957 1.11 ad
958 1.11 ad if ((d = bdevsw_lookup(dev)) == NULL)
959 1.11 ad return ENXIO;
960 1.11 ad
961 1.11 ad DEV_LOCK(d);
962 1.11 ad rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
963 1.11 ad DEV_UNLOCK(d);
964 1.11 ad
965 1.11 ad return rv;
966 1.11 ad }
967 1.11 ad
968 1.11 ad int
969 1.11 ad bdev_dump(dev_t dev, daddr_t addr, void *data, size_t sz)
970 1.11 ad {
971 1.11 ad const struct bdevsw *d;
972 1.11 ad int rv;
973 1.11 ad
974 1.11 ad /*
975 1.11 ad * Dump can be called without the device open. Since it can
976 1.11 ad * currently only be called with the system paused (and in a
977 1.11 ad * potentially unstable state), we don't perform any locking.
978 1.11 ad */
979 1.11 ad if ((d = bdevsw_lookup(dev)) == NULL)
980 1.11 ad return ENXIO;
981 1.11 ad
982 1.11 ad /* DEV_LOCK(d); */
983 1.11 ad rv = (*d->d_dump)(dev, addr, data, sz);
984 1.11 ad /* DEV_UNLOCK(d); */
985 1.11 ad
986 1.11 ad return rv;
987 1.11 ad }
988 1.11 ad
989 1.11 ad int
990 1.35 nat bdev_flags(dev_t dev)
991 1.35 nat {
992 1.35 nat const struct bdevsw *d;
993 1.35 nat
994 1.35 nat if ((d = bdevsw_lookup(dev)) == NULL)
995 1.35 nat return 0;
996 1.35 nat return d->d_flag & ~D_TYPEMASK;
997 1.35 nat }
998 1.35 nat
999 1.35 nat int
1000 1.11 ad bdev_type(dev_t dev)
1001 1.11 ad {
1002 1.11 ad const struct bdevsw *d;
1003 1.11 ad
1004 1.11 ad if ((d = bdevsw_lookup(dev)) == NULL)
1005 1.11 ad return D_OTHER;
1006 1.11 ad return d->d_flag & D_TYPEMASK;
1007 1.11 ad }
1008 1.11 ad
1009 1.11 ad int
1010 1.29 mrg bdev_size(dev_t dev)
1011 1.29 mrg {
1012 1.29 mrg const struct bdevsw *d;
1013 1.29 mrg int rv, mpflag = 0;
1014 1.29 mrg
1015 1.29 mrg if ((d = bdevsw_lookup(dev)) == NULL ||
1016 1.29 mrg d->d_psize == NULL)
1017 1.29 mrg return -1;
1018 1.29 mrg
1019 1.29 mrg /*
1020 1.29 mrg * Don't to try lock the device if we're dumping.
1021 1.30 mrg * XXX: is there a better way to test this?
1022 1.29 mrg */
1023 1.29 mrg if ((boothowto & RB_DUMP) == 0)
1024 1.29 mrg DEV_LOCK(d);
1025 1.29 mrg rv = (*d->d_psize)(dev);
1026 1.29 mrg if ((boothowto & RB_DUMP) == 0)
1027 1.29 mrg DEV_UNLOCK(d);
1028 1.29 mrg
1029 1.29 mrg return rv;
1030 1.29 mrg }
1031 1.29 mrg
1032 1.29 mrg int
1033 1.32 dholland bdev_discard(dev_t dev, off_t pos, off_t len)
1034 1.32 dholland {
1035 1.32 dholland const struct bdevsw *d;
1036 1.32 dholland int rv, mpflag;
1037 1.32 dholland
1038 1.32 dholland if ((d = bdevsw_lookup(dev)) == NULL)
1039 1.32 dholland return ENXIO;
1040 1.32 dholland
1041 1.32 dholland DEV_LOCK(d);
1042 1.32 dholland rv = (*d->d_discard)(dev, pos, len);
1043 1.32 dholland DEV_UNLOCK(d);
1044 1.32 dholland
1045 1.32 dholland return rv;
1046 1.32 dholland }
1047 1.32 dholland
1048 1.32 dholland int
1049 1.11 ad cdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
1050 1.11 ad {
1051 1.11 ad const struct cdevsw *d;
1052 1.40 riastrad struct localcount *lc;
1053 1.17 ad int rv, mpflag;
1054 1.11 ad
1055 1.40 riastrad d = cdevsw_lookup_acquire(dev, &lc);
1056 1.11 ad if (d == NULL)
1057 1.11 ad return ENXIO;
1058 1.11 ad
1059 1.11 ad DEV_LOCK(d);
1060 1.11 ad rv = (*d->d_open)(dev, flag, devtype, l);
1061 1.11 ad DEV_UNLOCK(d);
1062 1.11 ad
1063 1.40 riastrad cdevsw_release(d, lc);
1064 1.40 riastrad
1065 1.11 ad return rv;
1066 1.11 ad }
1067 1.11 ad
1068 1.11 ad int
1069 1.11 ad cdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
1070 1.11 ad {
1071 1.11 ad const struct cdevsw *d;
1072 1.17 ad int rv, mpflag;
1073 1.11 ad
1074 1.11 ad if ((d = cdevsw_lookup(dev)) == NULL)
1075 1.11 ad return ENXIO;
1076 1.11 ad
1077 1.11 ad DEV_LOCK(d);
1078 1.11 ad rv = (*d->d_close)(dev, flag, devtype, l);
1079 1.11 ad DEV_UNLOCK(d);
1080 1.11 ad
1081 1.11 ad return rv;
1082 1.11 ad }
1083 1.11 ad
1084 1.11 ad int
1085 1.11 ad cdev_read(dev_t dev, struct uio *uio, int flag)
1086 1.11 ad {
1087 1.11 ad const struct cdevsw *d;
1088 1.17 ad int rv, mpflag;
1089 1.11 ad
1090 1.11 ad if ((d = cdevsw_lookup(dev)) == NULL)
1091 1.11 ad return ENXIO;
1092 1.11 ad
1093 1.11 ad DEV_LOCK(d);
1094 1.11 ad rv = (*d->d_read)(dev, uio, flag);
1095 1.11 ad DEV_UNLOCK(d);
1096 1.11 ad
1097 1.11 ad return rv;
1098 1.11 ad }
1099 1.11 ad
1100 1.11 ad int
1101 1.11 ad cdev_write(dev_t dev, struct uio *uio, int flag)
1102 1.11 ad {
1103 1.11 ad const struct cdevsw *d;
1104 1.17 ad int rv, mpflag;
1105 1.11 ad
1106 1.11 ad if ((d = cdevsw_lookup(dev)) == NULL)
1107 1.11 ad return ENXIO;
1108 1.11 ad
1109 1.11 ad DEV_LOCK(d);
1110 1.11 ad rv = (*d->d_write)(dev, uio, flag);
1111 1.11 ad DEV_UNLOCK(d);
1112 1.11 ad
1113 1.11 ad return rv;
1114 1.11 ad }
1115 1.11 ad
1116 1.11 ad int
1117 1.11 ad cdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
1118 1.11 ad {
1119 1.11 ad const struct cdevsw *d;
1120 1.17 ad int rv, mpflag;
1121 1.11 ad
1122 1.11 ad if ((d = cdevsw_lookup(dev)) == NULL)
1123 1.11 ad return ENXIO;
1124 1.11 ad
1125 1.11 ad DEV_LOCK(d);
1126 1.11 ad rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
1127 1.11 ad DEV_UNLOCK(d);
1128 1.11 ad
1129 1.11 ad return rv;
1130 1.11 ad }
1131 1.11 ad
1132 1.11 ad void
1133 1.11 ad cdev_stop(struct tty *tp, int flag)
1134 1.11 ad {
1135 1.11 ad const struct cdevsw *d;
1136 1.17 ad int mpflag;
1137 1.11 ad
1138 1.11 ad if ((d = cdevsw_lookup(tp->t_dev)) == NULL)
1139 1.11 ad return;
1140 1.11 ad
1141 1.11 ad DEV_LOCK(d);
1142 1.11 ad (*d->d_stop)(tp, flag);
1143 1.11 ad DEV_UNLOCK(d);
1144 1.11 ad }
1145 1.11 ad
1146 1.11 ad struct tty *
1147 1.11 ad cdev_tty(dev_t dev)
1148 1.11 ad {
1149 1.11 ad const struct cdevsw *d;
1150 1.11 ad
1151 1.11 ad if ((d = cdevsw_lookup(dev)) == NULL)
1152 1.11 ad return NULL;
1153 1.11 ad
1154 1.12 ad /* XXX Check if necessary. */
1155 1.12 ad if (d->d_tty == NULL)
1156 1.12 ad return NULL;
1157 1.12 ad
1158 1.21 ad return (*d->d_tty)(dev);
1159 1.11 ad }
1160 1.11 ad
1161 1.11 ad int
1162 1.11 ad cdev_poll(dev_t dev, int flag, lwp_t *l)
1163 1.11 ad {
1164 1.11 ad const struct cdevsw *d;
1165 1.17 ad int rv, mpflag;
1166 1.11 ad
1167 1.11 ad if ((d = cdevsw_lookup(dev)) == NULL)
1168 1.11 ad return POLLERR;
1169 1.11 ad
1170 1.11 ad DEV_LOCK(d);
1171 1.11 ad rv = (*d->d_poll)(dev, flag, l);
1172 1.11 ad DEV_UNLOCK(d);
1173 1.11 ad
1174 1.11 ad return rv;
1175 1.11 ad }
1176 1.11 ad
1177 1.11 ad paddr_t
1178 1.11 ad cdev_mmap(dev_t dev, off_t off, int flag)
1179 1.11 ad {
1180 1.11 ad const struct cdevsw *d;
1181 1.11 ad paddr_t rv;
1182 1.17 ad int mpflag;
1183 1.11 ad
1184 1.11 ad if ((d = cdevsw_lookup(dev)) == NULL)
1185 1.11 ad return (paddr_t)-1LL;
1186 1.11 ad
1187 1.11 ad DEV_LOCK(d);
1188 1.11 ad rv = (*d->d_mmap)(dev, off, flag);
1189 1.11 ad DEV_UNLOCK(d);
1190 1.11 ad
1191 1.11 ad return rv;
1192 1.11 ad }
1193 1.11 ad
1194 1.11 ad int
1195 1.11 ad cdev_kqfilter(dev_t dev, struct knote *kn)
1196 1.11 ad {
1197 1.11 ad const struct cdevsw *d;
1198 1.17 ad int rv, mpflag;
1199 1.11 ad
1200 1.11 ad if ((d = cdevsw_lookup(dev)) == NULL)
1201 1.11 ad return ENXIO;
1202 1.11 ad
1203 1.11 ad DEV_LOCK(d);
1204 1.11 ad rv = (*d->d_kqfilter)(dev, kn);
1205 1.11 ad DEV_UNLOCK(d);
1206 1.11 ad
1207 1.11 ad return rv;
1208 1.11 ad }
1209 1.11 ad
1210 1.11 ad int
1211 1.32 dholland cdev_discard(dev_t dev, off_t pos, off_t len)
1212 1.32 dholland {
1213 1.32 dholland const struct cdevsw *d;
1214 1.32 dholland int rv, mpflag;
1215 1.32 dholland
1216 1.32 dholland if ((d = cdevsw_lookup(dev)) == NULL)
1217 1.32 dholland return ENXIO;
1218 1.32 dholland
1219 1.32 dholland DEV_LOCK(d);
1220 1.32 dholland rv = (*d->d_discard)(dev, pos, len);
1221 1.32 dholland DEV_UNLOCK(d);
1222 1.32 dholland
1223 1.32 dholland return rv;
1224 1.32 dholland }
1225 1.32 dholland
1226 1.32 dholland int
1227 1.35 nat cdev_flags(dev_t dev)
1228 1.35 nat {
1229 1.35 nat const struct cdevsw *d;
1230 1.35 nat
1231 1.35 nat if ((d = cdevsw_lookup(dev)) == NULL)
1232 1.35 nat return 0;
1233 1.35 nat return d->d_flag & ~D_TYPEMASK;
1234 1.35 nat }
1235 1.35 nat
1236 1.35 nat int
1237 1.11 ad cdev_type(dev_t dev)
1238 1.11 ad {
1239 1.11 ad const struct cdevsw *d;
1240 1.11 ad
1241 1.11 ad if ((d = cdevsw_lookup(dev)) == NULL)
1242 1.11 ad return D_OTHER;
1243 1.11 ad return d->d_flag & D_TYPEMASK;
1244 1.2 gehenna }
1245 1.36 riastrad
1246 1.36 riastrad /*
1247 1.36 riastrad * nommap(dev, off, prot)
1248 1.36 riastrad *
1249 1.36 riastrad * mmap routine that always fails, for non-mmappable devices.
1250 1.36 riastrad */
1251 1.36 riastrad paddr_t
1252 1.36 riastrad nommap(dev_t dev, off_t off, int prot)
1253 1.36 riastrad {
1254 1.36 riastrad
1255 1.36 riastrad return (paddr_t)-1;
1256 1.36 riastrad }
1257