nvme_pci.c revision 1.14 1 /* $NetBSD: nvme_pci.c,v 1.14 2016/09/18 21:19:39 jdolecek Exp $ */
2 /* $OpenBSD: nvme_pci.c,v 1.3 2016/04/14 11:18:32 dlg Exp $ */
3
4 /*
5 * Copyright (c) 2014 David Gwynne <dlg (at) openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /*-
21 * Copyright (C) 2016 NONAKA Kimihiro <nonaka (at) netbsd.org>
22 * All rights reserved.
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 * 1. Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * 2. Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in the
31 * documentation and/or other materials provided with the distribution.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
34 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
35 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
36 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
38 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
39 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
40 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
41 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
42 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 */
44
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: nvme_pci.c,v 1.14 2016/09/18 21:19:39 jdolecek Exp $");
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/device.h>
52 #include <sys/bitops.h>
53 #include <sys/bus.h>
54 #include <sys/cpu.h>
55 #include <sys/interrupt.h>
56 #include <sys/kmem.h>
57 #include <sys/pmf.h>
58 #include <sys/module.h>
59
60 #include <dev/pci/pcireg.h>
61 #include <dev/pci/pcivar.h>
62
63 #include <dev/ic/nvmereg.h>
64 #include <dev/ic/nvmevar.h>
65
66 int nvme_pci_force_intx = 0;
67 int nvme_pci_mpsafe = 1;
68 int nvme_pci_mq = 1; /* INTx: ioq=1, MSI/MSI-X: ioq=ncpu */
69
70 #define NVME_PCI_BAR 0x10
71
72 #ifndef __HAVE_PCI_MSI_MSIX
73 #define pci_intr_release(pc, intrs, nintrs) \
74 kmem_free(intrs, sizeof(*intrs) * nintrs)
75 #define pci_intr_establish_xname(pc, ih, level, intrhand, intrarg, xname) \
76 pci_intr_establish(pc, ih, level, intrhand, intrarg)
77 #endif
78
79 struct nvme_pci_softc {
80 struct nvme_softc psc_nvme;
81
82 pci_chipset_tag_t psc_pc;
83 pci_intr_handle_t *psc_intrs;
84 int psc_nintrs;
85 };
86
87 static int nvme_pci_match(device_t, cfdata_t, void *);
88 static void nvme_pci_attach(device_t, device_t, void *);
89 static int nvme_pci_detach(device_t, int);
90
91 CFATTACH_DECL3_NEW(nvme_pci, sizeof(struct nvme_pci_softc),
92 nvme_pci_match, nvme_pci_attach, nvme_pci_detach, NULL, NULL,
93 nvme_childdet, DVF_DETACH_SHUTDOWN);
94
95 static int nvme_pci_intr_establish(struct nvme_softc *,
96 uint16_t, struct nvme_queue *);
97 static int nvme_pci_intr_disestablish(struct nvme_softc *, uint16_t);
98 static int nvme_pci_setup_intr(struct pci_attach_args *,
99 struct nvme_pci_softc *);
100
101 static int
102 nvme_pci_match(device_t parent, cfdata_t match, void *aux)
103 {
104 struct pci_attach_args *pa = aux;
105
106 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
107 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_NVM &&
108 PCI_INTERFACE(pa->pa_class) == PCI_INTERFACE_NVM_NVME)
109 return 1;
110
111 return 0;
112 }
113
114 static void
115 nvme_pci_attach(device_t parent, device_t self, void *aux)
116 {
117 struct nvme_pci_softc *psc = device_private(self);
118 struct nvme_softc *sc = &psc->psc_nvme;
119 struct pci_attach_args *pa = aux;
120 pcireg_t memtype, reg;
121 bus_addr_t memaddr;
122 int flags, error;
123 #ifdef __HAVE_PCI_MSI_MSIX
124 int msixoff;
125 #endif
126
127 sc->sc_dev = self;
128 psc->psc_pc = pa->pa_pc;
129 if (pci_dma64_available(pa))
130 sc->sc_dmat = pa->pa_dmat64;
131 else
132 sc->sc_dmat = pa->pa_dmat;
133
134 pci_aprint_devinfo(pa, NULL);
135
136 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
137 if ((reg & PCI_COMMAND_MASTER_ENABLE) == 0) {
138 reg |= PCI_COMMAND_MASTER_ENABLE;
139 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, reg);
140 }
141
142 /* Map registers */
143 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NVME_PCI_BAR);
144 if (PCI_MAPREG_TYPE(memtype) != PCI_MAPREG_TYPE_MEM) {
145 aprint_error_dev(self, "invalid type (type=0x%x)\n", memtype);
146 return;
147 }
148 sc->sc_iot = pa->pa_memt;
149 error = pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START,
150 memtype, &memaddr, &sc->sc_ios, &flags);
151 if (error) {
152 aprint_error_dev(self, "can't get map info\n");
153 return;
154 }
155
156 #ifdef __HAVE_PCI_MSI_MSIX
157 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSIX, &msixoff,
158 NULL)) {
159 pcireg_t msixtbl;
160 uint32_t table_offset;
161 int bir;
162
163 msixtbl = pci_conf_read(pa->pa_pc, pa->pa_tag,
164 msixoff + PCI_MSIX_TBLOFFSET);
165 table_offset = msixtbl & PCI_MSIX_TBLOFFSET_MASK;
166 bir = msixtbl & PCI_MSIX_PBABIR_MASK;
167 if (bir == 0) {
168 sc->sc_ios = table_offset;
169 }
170 }
171 #endif /* __HAVE_PCI_MSI_MSIX */
172
173 error = bus_space_map(sc->sc_iot, memaddr, sc->sc_ios, flags,
174 &sc->sc_ioh);
175 if (error != 0) {
176 aprint_error_dev(self, "can't map mem space (error=%d)\n",
177 error);
178 return;
179 }
180
181 /* Establish interrupts */
182 if (nvme_pci_setup_intr(pa, psc) != 0) {
183 aprint_error_dev(self, "unable to allocate interrupt\n");
184 goto unmap;
185 }
186 sc->sc_intr_establish = nvme_pci_intr_establish;
187 sc->sc_intr_disestablish = nvme_pci_intr_disestablish;
188
189 sc->sc_ih = kmem_zalloc(sizeof(*sc->sc_ih) * psc->psc_nintrs, KM_SLEEP);
190 if (sc->sc_ih == NULL) {
191 aprint_error_dev(self, "unable to allocate ih memory\n");
192 goto intr_release;
193 }
194
195 if (sc->sc_use_mq) {
196 sc->sc_softih = kmem_zalloc(
197 sizeof(*sc->sc_softih) * psc->psc_nintrs, KM_SLEEP);
198 if (sc->sc_softih == NULL) {
199 aprint_error_dev(self,
200 "unable to allocate softih memory\n");
201 goto intr_free;
202 }
203 }
204
205 if (nvme_attach(sc) != 0) {
206 /* error printed by nvme_attach() */
207 goto softintr_free;
208 }
209
210 if (!pmf_device_register(self, NULL, NULL))
211 aprint_error_dev(self, "couldn't establish power handler\n");
212
213 SET(sc->sc_flags, NVME_F_ATTACHED);
214 return;
215
216 softintr_free:
217 if (sc->sc_softih) {
218 kmem_free(sc->sc_softih,
219 sizeof(*sc->sc_softih) * psc->psc_nintrs);
220 }
221 intr_free:
222 kmem_free(sc->sc_ih, sizeof(*sc->sc_ih) * psc->psc_nintrs);
223 sc->sc_nq = 0;
224 intr_release:
225 pci_intr_release(pa->pa_pc, psc->psc_intrs, psc->psc_nintrs);
226 psc->psc_nintrs = 0;
227 unmap:
228 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
229 sc->sc_ios = 0;
230 }
231
232 static int
233 nvme_pci_detach(device_t self, int flags)
234 {
235 struct nvme_pci_softc *psc = device_private(self);
236 struct nvme_softc *sc = &psc->psc_nvme;
237 int error;
238
239 if (!ISSET(sc->sc_flags, NVME_F_ATTACHED))
240 return 0;
241
242 error = nvme_detach(sc, flags);
243 if (error)
244 return error;
245
246 if (sc->sc_softih) {
247 kmem_free(sc->sc_softih,
248 sizeof(*sc->sc_softih) * psc->psc_nintrs);
249 sc->sc_softih = NULL;
250 }
251 kmem_free(sc->sc_ih, sizeof(*sc->sc_ih) * psc->psc_nintrs);
252 pci_intr_release(psc->psc_pc, psc->psc_intrs, psc->psc_nintrs);
253 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
254 return 0;
255 }
256
257 static int
258 nvme_pci_intr_establish(struct nvme_softc *sc, uint16_t qid,
259 struct nvme_queue *q)
260 {
261 struct nvme_pci_softc *psc = (struct nvme_pci_softc *)sc;
262 char intr_xname[INTRDEVNAMEBUF];
263 char intrbuf[PCI_INTRSTR_LEN];
264 const char *intrstr = NULL;
265 int (*ih_func)(void *);
266 void *ih_arg;
267 #ifdef __HAVE_PCI_MSI_MSIX
268 int error;
269 #endif
270
271 KASSERT(sc->sc_use_mq || qid == NVME_ADMIN_Q);
272 KASSERT(sc->sc_ih[qid] == NULL);
273
274 if (nvme_pci_mpsafe) {
275 pci_intr_setattr(psc->psc_pc, &psc->psc_intrs[qid],
276 PCI_INTR_MPSAFE, true);
277 }
278
279 #ifdef __HAVE_PCI_MSI_MSIX
280 if (!sc->sc_use_mq) {
281 #endif
282 snprintf(intr_xname, sizeof(intr_xname), "%s",
283 device_xname(sc->sc_dev));
284 ih_arg = sc;
285 ih_func = nvme_intr;
286 #ifdef __HAVE_PCI_MSI_MSIX
287 }
288 else {
289 if (qid == NVME_ADMIN_Q) {
290 snprintf(intr_xname, sizeof(intr_xname), "%s adminq",
291 device_xname(sc->sc_dev));
292 } else {
293 snprintf(intr_xname, sizeof(intr_xname), "%s ioq%d",
294 device_xname(sc->sc_dev), qid);
295 }
296 ih_arg = q;
297 ih_func = nvme_intr_msi;
298 }
299 #endif /* __HAVE_PCI_MSI_MSIX */
300
301 /* establish hardware interrupt */
302 sc->sc_ih[qid] = pci_intr_establish_xname(psc->psc_pc,
303 psc->psc_intrs[qid], IPL_BIO, ih_func, ih_arg, intr_xname);
304 if (sc->sc_ih[qid] == NULL) {
305 aprint_error_dev(sc->sc_dev,
306 "unable to establish %s interrupt\n", intr_xname);
307 return 1;
308 }
309
310 /* if MSI, establish also the software interrupt */
311 if (sc->sc_softih) {
312 sc->sc_softih[qid] = softint_establish(
313 SOFTINT_BIO|(nvme_pci_mpsafe ? SOFTINT_MPSAFE : 0),
314 nvme_softintr_msi, q);
315 if (sc->sc_softih[qid] == NULL) {
316 pci_intr_disestablish(psc->psc_pc, sc->sc_ih[qid]);
317 sc->sc_ih[qid] = NULL;
318
319 aprint_error_dev(sc->sc_dev,
320 "unable to establish %s soft interrupt\n",
321 intr_xname);
322 return 1;
323 }
324 }
325
326 intrstr = pci_intr_string(psc->psc_pc, psc->psc_intrs[qid], intrbuf,
327 sizeof(intrbuf));
328 if (!sc->sc_use_mq) {
329 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
330 }
331 #ifdef __HAVE_PCI_MSI_MSIX
332 else if (qid == NVME_ADMIN_Q) {
333 aprint_normal_dev(sc->sc_dev,
334 "for admin queue interrupting at %s\n", intrstr);
335 } else if (!nvme_pci_mpsafe) {
336 aprint_normal_dev(sc->sc_dev,
337 "for io queue %d interrupting at %s\n", qid, intrstr);
338 } else {
339 kcpuset_t *affinity;
340 cpuid_t affinity_to;
341
342 kcpuset_create(&affinity, true);
343 affinity_to = (qid - 1) % ncpu;
344 kcpuset_set(affinity, affinity_to);
345 error = interrupt_distribute(sc->sc_ih[qid], affinity, NULL);
346 kcpuset_destroy(affinity);
347 aprint_normal_dev(sc->sc_dev,
348 "for io queue %d interrupting at %s", qid, intrstr);
349 if (error == 0)
350 aprint_normal(" affinity to cpu%lu", affinity_to);
351 aprint_normal("\n");
352 }
353 #endif
354 return 0;
355 }
356
357 static int
358 nvme_pci_intr_disestablish(struct nvme_softc *sc, uint16_t qid)
359 {
360 struct nvme_pci_softc *psc = (struct nvme_pci_softc *)sc;
361
362 KASSERT(sc->sc_use_mq || qid == NVME_ADMIN_Q);
363 KASSERT(sc->sc_ih[qid] != NULL);
364
365 if (sc->sc_softih) {
366 softint_disestablish(sc->sc_softih[qid]);
367 sc->sc_softih[qid] = NULL;
368 }
369
370 pci_intr_disestablish(psc->psc_pc, sc->sc_ih[qid]);
371 sc->sc_ih[qid] = NULL;
372
373 return 0;
374 }
375
376 static int
377 nvme_pci_setup_intr(struct pci_attach_args *pa, struct nvme_pci_softc *psc)
378 {
379 struct nvme_softc *sc = &psc->psc_nvme;
380 #ifdef __HAVE_PCI_MSI_MSIX
381 int error;
382 int counts[PCI_INTR_TYPE_SIZE], alloced_counts[PCI_INTR_TYPE_SIZE];
383 pci_intr_handle_t *ihps;
384 int max_type, intr_type;
385 #else
386 pci_intr_handle_t ih;
387 #endif /* __HAVE_PCI_MSI_MSIX */
388
389 #ifdef __HAVE_PCI_MSI_MSIX
390 if (nvme_pci_force_intx) {
391 max_type = PCI_INTR_TYPE_INTX;
392 goto force_intx;
393 }
394
395 /* MSI-X */
396 max_type = PCI_INTR_TYPE_MSIX;
397 counts[PCI_INTR_TYPE_MSIX] = min(pci_msix_count(pa->pa_pc, pa->pa_tag),
398 ncpu + 1);
399 if (counts[PCI_INTR_TYPE_MSIX] > 0) {
400 memset(alloced_counts, 0, sizeof(alloced_counts));
401 alloced_counts[PCI_INTR_TYPE_MSIX] = counts[PCI_INTR_TYPE_MSIX];
402 if (pci_intr_alloc(pa, &ihps, alloced_counts,
403 PCI_INTR_TYPE_MSIX)) {
404 counts[PCI_INTR_TYPE_MSIX] = 0;
405 } else {
406 counts[PCI_INTR_TYPE_MSIX] =
407 alloced_counts[PCI_INTR_TYPE_MSIX];
408 pci_intr_release(pa->pa_pc, ihps,
409 alloced_counts[PCI_INTR_TYPE_MSIX]);
410 }
411 }
412 if (counts[PCI_INTR_TYPE_MSIX] < 2) {
413 counts[PCI_INTR_TYPE_MSIX] = 0;
414 max_type = PCI_INTR_TYPE_MSI;
415 } else if (!nvme_pci_mq || !nvme_pci_mpsafe) {
416 counts[PCI_INTR_TYPE_MSIX] = 2; /* adminq + 1 ioq */
417 }
418
419 retry_msi:
420 /* MSI */
421 counts[PCI_INTR_TYPE_MSI] = pci_msi_count(pa->pa_pc, pa->pa_tag);
422 if (counts[PCI_INTR_TYPE_MSI] > 0) {
423 while (counts[PCI_INTR_TYPE_MSI] > ncpu + 1) {
424 if (counts[PCI_INTR_TYPE_MSI] / 2 <= ncpu + 1)
425 break;
426 counts[PCI_INTR_TYPE_MSI] /= 2;
427 }
428 memset(alloced_counts, 0, sizeof(alloced_counts));
429 alloced_counts[PCI_INTR_TYPE_MSI] = counts[PCI_INTR_TYPE_MSI];
430 if (pci_intr_alloc(pa, &ihps, alloced_counts,
431 PCI_INTR_TYPE_MSI)) {
432 counts[PCI_INTR_TYPE_MSI] = 0;
433 } else {
434 counts[PCI_INTR_TYPE_MSI] =
435 alloced_counts[PCI_INTR_TYPE_MSI];
436 pci_intr_release(pa->pa_pc, ihps,
437 alloced_counts[PCI_INTR_TYPE_MSI]);
438 }
439 }
440 if (counts[PCI_INTR_TYPE_MSI] < 1) {
441 counts[PCI_INTR_TYPE_MSI] = 0;
442 if (max_type == PCI_INTR_TYPE_MSI)
443 max_type = PCI_INTR_TYPE_INTX;
444 } else if (!nvme_pci_mq || !nvme_pci_mpsafe) {
445 if (counts[PCI_INTR_TYPE_MSI] > 2)
446 counts[PCI_INTR_TYPE_MSI] = 2; /* adminq + 1 ioq */
447 }
448
449 force_intx:
450 /* INTx */
451 counts[PCI_INTR_TYPE_INTX] = 1;
452
453 memcpy(alloced_counts, counts, sizeof(counts));
454 error = pci_intr_alloc(pa, &ihps, alloced_counts, max_type);
455 if (error) {
456 if (max_type != PCI_INTR_TYPE_INTX) {
457 retry:
458 memset(counts, 0, sizeof(counts));
459 if (max_type == PCI_INTR_TYPE_MSIX) {
460 max_type = PCI_INTR_TYPE_MSI;
461 goto retry_msi;
462 } else {
463 max_type = PCI_INTR_TYPE_INTX;
464 goto force_intx;
465 }
466 }
467 return error;
468 }
469
470 intr_type = pci_intr_type(pa->pa_pc, ihps[0]);
471 if (alloced_counts[intr_type] < counts[intr_type]) {
472 if (intr_type != PCI_INTR_TYPE_INTX) {
473 pci_intr_release(pa->pa_pc, ihps,
474 alloced_counts[intr_type]);
475 max_type = intr_type;
476 goto retry;
477 }
478 return EBUSY;
479 }
480
481 psc->psc_intrs = ihps;
482 psc->psc_nintrs = alloced_counts[intr_type];
483 if (intr_type == PCI_INTR_TYPE_MSI) {
484 if (alloced_counts[intr_type] > ncpu + 1)
485 alloced_counts[intr_type] = ncpu + 1;
486 }
487 sc->sc_use_mq = alloced_counts[intr_type] > 1;
488 sc->sc_nq = sc->sc_use_mq ? alloced_counts[intr_type] - 1 : 1;
489
490 #else /* !__HAVE_PCI_MSI_MSIX */
491 if (pci_intr_map(pa, &ih)) {
492 aprint_error_dev(sc->sc_dev, "couldn't map interrupt\n");
493 return EBUSY;
494 }
495
496 psc->psc_intrs = kmem_zalloc(sizeof(ih), KM_SLEEP);
497 psc->psc_intrs[0] = ih;
498 psc->psc_nintrs = 1;
499 sc->sc_use_mq = 0;
500 sc->sc_nq = 1;
501 #endif /* __HAVE_PCI_MSI_MSIX */
502
503 return 0;
504 }
505
506 MODULE(MODULE_CLASS_DRIVER, nvme, "pci,dk_subr");
507
508 #ifdef _MODULE
509 #include "ioconf.c"
510 #endif
511
512 static int
513 nvme_modcmd(modcmd_t cmd, void *opaque)
514 {
515 #ifdef _MODULE
516 devmajor_t cmajor, bmajor;
517 extern const struct bdevsw ld_bdevsw;
518 extern const struct cdevsw ld_cdevsw;
519 extern const struct cdevsw nvme_cdevsw;
520 #endif
521 int error = 0;
522
523 switch (cmd) {
524 case MODULE_CMD_INIT:
525 #ifdef _MODULE
526 /* devsw must be done before configuring the actual device,
527 * otherwise ldattach() fails
528 */
529 bmajor = cmajor = NODEVMAJOR;
530 error = devsw_attach(ld_cd.cd_name, &ld_bdevsw, &bmajor,
531 &ld_cdevsw, &cmajor);
532 if (error) {
533 aprint_error("%s: unable to register devsw\n",
534 ld_cd.cd_name);
535 return error;
536 }
537
538 error = config_init_component(cfdriver_ioconf_nvme_pci,
539 cfattach_ioconf_nvme_pci, cfdata_ioconf_nvme_pci);
540 if (error)
541 return error;
542
543 bmajor = cmajor = NODEVMAJOR;
544 error = devsw_attach(nvme_cd.cd_name, NULL, &bmajor,
545 &nvme_cdevsw, &cmajor);
546 if (error) {
547 aprint_error("%s: unable to register devsw\n",
548 nvme_cd.cd_name);
549 /* do not abort, just /dev/nvme* will not work */
550 }
551 #endif
552 return error;
553 case MODULE_CMD_FINI:
554 #ifdef _MODULE
555 devsw_detach(NULL, &nvme_cdevsw);
556
557 error = config_fini_component(cfdriver_ioconf_nvme_pci,
558 cfattach_ioconf_nvme_pci, cfdata_ioconf_nvme_pci);
559 if (error)
560 return error;
561
562 devsw_detach(&ld_bdevsw, &ld_cdevsw);
563 #endif
564 return error;
565 default:
566 return ENOTTY;
567 }
568 }
569