nvme_pci.c revision 1.26.4.1 1 /* $NetBSD: nvme_pci.c,v 1.26.4.1 2021/06/21 17:25:48 martin Exp $ */
2 /* $OpenBSD: nvme_pci.c,v 1.3 2016/04/14 11:18:32 dlg Exp $ */
3
4 /*
5 * Copyright (c) 2014 David Gwynne <dlg (at) openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /*-
21 * Copyright (C) 2016 NONAKA Kimihiro <nonaka (at) netbsd.org>
22 * All rights reserved.
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 * 1. Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * 2. Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in the
31 * documentation and/or other materials provided with the distribution.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
34 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
35 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
36 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
38 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
39 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
40 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
41 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
42 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 */
44
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: nvme_pci.c,v 1.26.4.1 2021/06/21 17:25:48 martin Exp $");
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/device.h>
52 #include <sys/bitops.h>
53 #include <sys/bus.h>
54 #include <sys/cpu.h>
55 #include <sys/interrupt.h>
56 #include <sys/kmem.h>
57 #include <sys/pmf.h>
58 #include <sys/module.h>
59
60 #include <dev/pci/pcireg.h>
61 #include <dev/pci/pcivar.h>
62 #include <dev/pci/pcidevs.h>
63
64 #include <dev/ic/nvmereg.h>
65 #include <dev/ic/nvmevar.h>
66
67 int nvme_pci_force_intx = 0;
68 int nvme_pci_mpsafe = 1;
69 int nvme_pci_mq = 1; /* INTx: ioq=1, MSI/MSI-X: ioq=ncpu */
70
71 #define NVME_PCI_BAR 0x10
72
73 struct nvme_pci_softc {
74 struct nvme_softc psc_nvme;
75
76 pci_chipset_tag_t psc_pc;
77 pci_intr_handle_t *psc_intrs;
78 int psc_nintrs;
79 };
80
81 static int nvme_pci_match(device_t, cfdata_t, void *);
82 static void nvme_pci_attach(device_t, device_t, void *);
83 static int nvme_pci_detach(device_t, int);
84 static int nvme_pci_rescan(device_t, const char *, const int *);
85 static bool nvme_pci_suspend(device_t, const pmf_qual_t *);
86 static bool nvme_pci_resume(device_t, const pmf_qual_t *);
87
88 CFATTACH_DECL3_NEW(nvme_pci, sizeof(struct nvme_pci_softc),
89 nvme_pci_match, nvme_pci_attach, nvme_pci_detach, NULL, nvme_pci_rescan,
90 nvme_childdet, DVF_DETACH_SHUTDOWN);
91
92 static int nvme_pci_intr_establish(struct nvme_softc *,
93 uint16_t, struct nvme_queue *);
94 static int nvme_pci_intr_disestablish(struct nvme_softc *, uint16_t);
95 static int nvme_pci_setup_intr(struct pci_attach_args *,
96 struct nvme_pci_softc *);
97
98 static const struct nvme_pci_quirk {
99 pci_vendor_id_t vendor;
100 pci_product_id_t product;
101 uint32_t quirks;
102 } nvme_pci_quirks[] = {
103 { PCI_VENDOR_HGST, PCI_PRODUCT_HGST_SN100,
104 NVME_QUIRK_DELAY_B4_CHK_RDY },
105 { PCI_VENDOR_HGST, PCI_PRODUCT_HGST_SN200,
106 NVME_QUIRK_DELAY_B4_CHK_RDY },
107 { PCI_VENDOR_BEIJING_MEMBLAZE, PCI_PRODUCT_BEIJING_MEMBLAZE_PBLAZE4,
108 NVME_QUIRK_DELAY_B4_CHK_RDY },
109 { PCI_VENDOR_SAMSUNGELEC3, PCI_PRODUCT_SAMSUNGELEC3_172X,
110 NVME_QUIRK_DELAY_B4_CHK_RDY },
111 { PCI_VENDOR_SAMSUNGELEC3, PCI_PRODUCT_SAMSUNGELEC3_172XAB,
112 NVME_QUIRK_DELAY_B4_CHK_RDY },
113 };
114
115 static const struct nvme_pci_quirk *
116 nvme_pci_lookup_quirk(struct pci_attach_args *pa)
117 {
118 const struct nvme_pci_quirk *q;
119 int i;
120
121 for (i = 0; i < __arraycount(nvme_pci_quirks); i++) {
122 q = &nvme_pci_quirks[i];
123
124 if (PCI_VENDOR(pa->pa_id) == q->vendor &&
125 PCI_PRODUCT(pa->pa_id) == q->product)
126 return q;
127 }
128 return NULL;
129 }
130
131 static int
132 nvme_pci_match(device_t parent, cfdata_t match, void *aux)
133 {
134 struct pci_attach_args *pa = aux;
135
136 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
137 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_NVM &&
138 PCI_INTERFACE(pa->pa_class) == PCI_INTERFACE_NVM_NVME)
139 return 1;
140
141 return 0;
142 }
143
144 static void
145 nvme_pci_attach(device_t parent, device_t self, void *aux)
146 {
147 struct nvme_pci_softc *psc = device_private(self);
148 struct nvme_softc *sc = &psc->psc_nvme;
149 struct pci_attach_args *pa = aux;
150 const struct nvme_pci_quirk *quirk;
151 pcireg_t memtype, reg;
152 bus_addr_t memaddr;
153 int flags, error;
154 int msixoff;
155
156 sc->sc_dev = self;
157 psc->psc_pc = pa->pa_pc;
158 if (pci_dma64_available(pa))
159 sc->sc_dmat = pa->pa_dmat64;
160 else
161 sc->sc_dmat = pa->pa_dmat;
162
163 pci_aprint_devinfo(pa, NULL);
164
165 /* Map registers */
166 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NVME_PCI_BAR);
167 if (PCI_MAPREG_TYPE(memtype) != PCI_MAPREG_TYPE_MEM) {
168 aprint_error_dev(self, "invalid type (type=0x%x)\n", memtype);
169 return;
170 }
171 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
172 if (((reg & PCI_COMMAND_MASTER_ENABLE) == 0) ||
173 ((reg & PCI_COMMAND_MEM_ENABLE) == 0)) {
174 /*
175 * Enable address decoding for memory range in case BIOS or
176 * UEFI didn't set it.
177 */
178 reg |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_MEM_ENABLE;
179 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
180 reg);
181 }
182
183 sc->sc_iot = pa->pa_memt;
184 error = pci_mapreg_info(pa->pa_pc, pa->pa_tag, NVME_PCI_BAR,
185 memtype, &memaddr, &sc->sc_ios, &flags);
186 if (error) {
187 aprint_error_dev(self, "can't get map info\n");
188 return;
189 }
190
191 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSIX, &msixoff,
192 NULL)) {
193 pcireg_t msixtbl;
194 uint32_t table_offset;
195 int bir;
196
197 msixtbl = pci_conf_read(pa->pa_pc, pa->pa_tag,
198 msixoff + PCI_MSIX_TBLOFFSET);
199 table_offset = msixtbl & PCI_MSIX_TBLOFFSET_MASK;
200 bir = msixtbl & PCI_MSIX_PBABIR_MASK;
201 if (bir == PCI_MAPREG_NUM(NVME_PCI_BAR)) {
202 sc->sc_ios = table_offset;
203 }
204 }
205
206 error = bus_space_map(sc->sc_iot, memaddr, sc->sc_ios, flags,
207 &sc->sc_ioh);
208 if (error != 0) {
209 aprint_error_dev(self, "can't map mem space (error=%d)\n",
210 error);
211 return;
212 }
213
214 /* Establish interrupts */
215 if (nvme_pci_setup_intr(pa, psc) != 0) {
216 aprint_error_dev(self, "unable to allocate interrupt\n");
217 goto unmap;
218 }
219 sc->sc_intr_establish = nvme_pci_intr_establish;
220 sc->sc_intr_disestablish = nvme_pci_intr_disestablish;
221
222 sc->sc_ih = kmem_zalloc(sizeof(*sc->sc_ih) * psc->psc_nintrs, KM_SLEEP);
223 sc->sc_softih = kmem_zalloc(
224 sizeof(*sc->sc_softih) * psc->psc_nintrs, KM_SLEEP);
225
226 quirk = nvme_pci_lookup_quirk(pa);
227 if (quirk != NULL)
228 sc->sc_quirks = quirk->quirks;
229
230 if (nvme_attach(sc) != 0) {
231 /* error printed by nvme_attach() */
232 goto softintr_free;
233 }
234
235 if (!pmf_device_register(self, nvme_pci_suspend, nvme_pci_resume))
236 aprint_error_dev(self, "couldn't establish power handler\n");
237
238 SET(sc->sc_flags, NVME_F_ATTACHED);
239 return;
240
241 softintr_free:
242 kmem_free(sc->sc_softih, sizeof(*sc->sc_softih) * psc->psc_nintrs);
243 kmem_free(sc->sc_ih, sizeof(*sc->sc_ih) * psc->psc_nintrs);
244 sc->sc_nq = 0;
245 pci_intr_release(pa->pa_pc, psc->psc_intrs, psc->psc_nintrs);
246 psc->psc_nintrs = 0;
247 unmap:
248 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
249 sc->sc_ios = 0;
250 }
251
252 static int
253 nvme_pci_rescan(device_t self, const char *attr, const int *flags)
254 {
255
256 return nvme_rescan(self, attr, flags);
257 }
258
259 static bool
260 nvme_pci_suspend(device_t self, const pmf_qual_t *qual)
261 {
262 struct nvme_pci_softc *psc = device_private(self);
263 struct nvme_softc *sc = &psc->psc_nvme;
264 int error;
265
266 error = nvme_suspend(sc);
267 if (error)
268 return false;
269
270 return true;
271 }
272
273 static bool
274 nvme_pci_resume(device_t self, const pmf_qual_t *qual)
275 {
276 struct nvme_pci_softc *psc = device_private(self);
277 struct nvme_softc *sc = &psc->psc_nvme;
278 int error;
279
280 error = nvme_resume(sc);
281 if (error)
282 return false;
283
284 return true;
285 }
286
287 static int
288 nvme_pci_detach(device_t self, int flags)
289 {
290 struct nvme_pci_softc *psc = device_private(self);
291 struct nvme_softc *sc = &psc->psc_nvme;
292 int error;
293
294 if (!ISSET(sc->sc_flags, NVME_F_ATTACHED))
295 return 0;
296
297 error = nvme_detach(sc, flags);
298 if (error)
299 return error;
300
301 kmem_free(sc->sc_softih, sizeof(*sc->sc_softih) * psc->psc_nintrs);
302 sc->sc_softih = NULL;
303
304 kmem_free(sc->sc_ih, sizeof(*sc->sc_ih) * psc->psc_nintrs);
305 pci_intr_release(psc->psc_pc, psc->psc_intrs, psc->psc_nintrs);
306 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
307 return 0;
308 }
309
310 static int
311 nvme_pci_intr_establish(struct nvme_softc *sc, uint16_t qid,
312 struct nvme_queue *q)
313 {
314 struct nvme_pci_softc *psc = (struct nvme_pci_softc *)sc;
315 char intr_xname[INTRDEVNAMEBUF];
316 char intrbuf[PCI_INTRSTR_LEN];
317 const char *intrstr = NULL;
318 int (*ih_func)(void *);
319 void (*ih_func_soft)(void *);
320 void *ih_arg;
321 int error;
322
323 KASSERT(sc->sc_use_mq || qid == NVME_ADMIN_Q);
324 KASSERT(sc->sc_ih[qid] == NULL);
325
326 if (nvme_pci_mpsafe) {
327 pci_intr_setattr(psc->psc_pc, &psc->psc_intrs[qid],
328 PCI_INTR_MPSAFE, true);
329 }
330
331 if (!sc->sc_use_mq) {
332 snprintf(intr_xname, sizeof(intr_xname), "%s",
333 device_xname(sc->sc_dev));
334 ih_arg = sc;
335 ih_func = nvme_intr;
336 ih_func_soft = nvme_softintr_intx;
337 } else {
338 if (qid == NVME_ADMIN_Q) {
339 snprintf(intr_xname, sizeof(intr_xname), "%s adminq",
340 device_xname(sc->sc_dev));
341 } else {
342 snprintf(intr_xname, sizeof(intr_xname), "%s ioq%d",
343 device_xname(sc->sc_dev), qid);
344 }
345 ih_arg = q;
346 ih_func = nvme_intr_msi;
347 ih_func_soft = nvme_softintr_msi;
348 }
349
350 /* establish hardware interrupt */
351 sc->sc_ih[qid] = pci_intr_establish_xname(psc->psc_pc,
352 psc->psc_intrs[qid], IPL_BIO, ih_func, ih_arg, intr_xname);
353 if (sc->sc_ih[qid] == NULL) {
354 aprint_error_dev(sc->sc_dev,
355 "unable to establish %s interrupt\n", intr_xname);
356 return 1;
357 }
358
359 /* establish also the software interrupt */
360 sc->sc_softih[qid] = softint_establish(
361 SOFTINT_BIO|(nvme_pci_mpsafe ? SOFTINT_MPSAFE : 0),
362 ih_func_soft, q);
363 if (sc->sc_softih[qid] == NULL) {
364 pci_intr_disestablish(psc->psc_pc, sc->sc_ih[qid]);
365 sc->sc_ih[qid] = NULL;
366
367 aprint_error_dev(sc->sc_dev,
368 "unable to establish %s soft interrupt\n",
369 intr_xname);
370 return 1;
371 }
372
373 intrstr = pci_intr_string(psc->psc_pc, psc->psc_intrs[qid], intrbuf,
374 sizeof(intrbuf));
375 if (!sc->sc_use_mq) {
376 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
377 } else if (qid == NVME_ADMIN_Q) {
378 aprint_normal_dev(sc->sc_dev,
379 "for admin queue interrupting at %s\n", intrstr);
380 } else if (!nvme_pci_mpsafe) {
381 aprint_normal_dev(sc->sc_dev,
382 "for io queue %d interrupting at %s\n", qid, intrstr);
383 } else {
384 kcpuset_t *affinity;
385 cpuid_t affinity_to;
386
387 kcpuset_create(&affinity, true);
388 affinity_to = (qid - 1) % ncpu;
389 kcpuset_set(affinity, affinity_to);
390 error = interrupt_distribute(sc->sc_ih[qid], affinity, NULL);
391 kcpuset_destroy(affinity);
392 aprint_normal_dev(sc->sc_dev,
393 "for io queue %d interrupting at %s", qid, intrstr);
394 if (error == 0)
395 aprint_normal(" affinity to cpu%lu", affinity_to);
396 aprint_normal("\n");
397 }
398 return 0;
399 }
400
401 static int
402 nvme_pci_intr_disestablish(struct nvme_softc *sc, uint16_t qid)
403 {
404 struct nvme_pci_softc *psc = (struct nvme_pci_softc *)sc;
405
406 KASSERT(sc->sc_use_mq || qid == NVME_ADMIN_Q);
407 KASSERT(sc->sc_ih[qid] != NULL);
408
409 if (sc->sc_softih) {
410 softint_disestablish(sc->sc_softih[qid]);
411 sc->sc_softih[qid] = NULL;
412 }
413
414 pci_intr_disestablish(psc->psc_pc, sc->sc_ih[qid]);
415 sc->sc_ih[qid] = NULL;
416
417 return 0;
418 }
419
420 static int
421 nvme_pci_setup_intr(struct pci_attach_args *pa, struct nvme_pci_softc *psc)
422 {
423 struct nvme_softc *sc = &psc->psc_nvme;
424 int error;
425 int counts[PCI_INTR_TYPE_SIZE];
426 pci_intr_handle_t *ihps;
427 int intr_type;
428
429 memset(counts, 0, sizeof(counts));
430
431 if (nvme_pci_force_intx)
432 goto force_intx;
433
434 /* MSI-X */
435 counts[PCI_INTR_TYPE_MSIX] = uimin(pci_msix_count(pa->pa_pc, pa->pa_tag),
436 ncpu + 1);
437 if (counts[PCI_INTR_TYPE_MSIX] < 1) {
438 counts[PCI_INTR_TYPE_MSIX] = 0;
439 } else if (!nvme_pci_mq || !nvme_pci_mpsafe) {
440 if (counts[PCI_INTR_TYPE_MSIX] > 2)
441 counts[PCI_INTR_TYPE_MSIX] = 2; /* adminq + 1 ioq */
442 }
443
444 /* MSI */
445 counts[PCI_INTR_TYPE_MSI] = pci_msi_count(pa->pa_pc, pa->pa_tag);
446 if (counts[PCI_INTR_TYPE_MSI] > 0) {
447 while (counts[PCI_INTR_TYPE_MSI] > ncpu + 1) {
448 if (counts[PCI_INTR_TYPE_MSI] / 2 <= ncpu + 1)
449 break;
450 counts[PCI_INTR_TYPE_MSI] /= 2;
451 }
452 }
453 if (counts[PCI_INTR_TYPE_MSI] < 1) {
454 counts[PCI_INTR_TYPE_MSI] = 0;
455 } else if (!nvme_pci_mq || !nvme_pci_mpsafe) {
456 if (counts[PCI_INTR_TYPE_MSI] > 2)
457 counts[PCI_INTR_TYPE_MSI] = 2; /* adminq + 1 ioq */
458 }
459
460 force_intx:
461 /* INTx */
462 counts[PCI_INTR_TYPE_INTX] = 1;
463
464 error = pci_intr_alloc(pa, &ihps, counts, PCI_INTR_TYPE_MSIX);
465 if (error)
466 return error;
467
468 intr_type = pci_intr_type(pa->pa_pc, ihps[0]);
469
470 psc->psc_intrs = ihps;
471 psc->psc_nintrs = counts[intr_type];
472 if (intr_type == PCI_INTR_TYPE_MSI) {
473 if (counts[intr_type] > ncpu + 1)
474 counts[intr_type] = ncpu + 1;
475 }
476 sc->sc_use_mq = counts[intr_type] > 1;
477 sc->sc_nq = sc->sc_use_mq ? counts[intr_type] - 1 : 1;
478
479 return 0;
480 }
481
482 MODULE(MODULE_CLASS_DRIVER, nvme, "pci,dk_subr");
483
484 #ifdef _MODULE
485 #include "ioconf.c"
486 #endif
487
488 static int
489 nvme_modcmd(modcmd_t cmd, void *opaque)
490 {
491 #ifdef _MODULE
492 devmajor_t cmajor, bmajor;
493 extern const struct cdevsw nvme_cdevsw;
494 #endif
495 int error = 0;
496
497 #ifdef _MODULE
498 switch (cmd) {
499 case MODULE_CMD_INIT:
500 error = config_init_component(cfdriver_ioconf_nvme_pci,
501 cfattach_ioconf_nvme_pci, cfdata_ioconf_nvme_pci);
502 if (error)
503 break;
504
505 bmajor = cmajor = NODEVMAJOR;
506 error = devsw_attach(nvme_cd.cd_name, NULL, &bmajor,
507 &nvme_cdevsw, &cmajor);
508 if (error) {
509 aprint_error("%s: unable to register devsw\n",
510 nvme_cd.cd_name);
511 /* do not abort, just /dev/nvme* will not work */
512 }
513 break;
514 case MODULE_CMD_FINI:
515 devsw_detach(NULL, &nvme_cdevsw);
516
517 error = config_fini_component(cfdriver_ioconf_nvme_pci,
518 cfattach_ioconf_nvme_pci, cfdata_ioconf_nvme_pci);
519 break;
520 default:
521 break;
522 }
523 #endif
524 return error;
525 }
526