nvme.c revision 1.2.2.2 1 /* $NetBSD: nvme.c,v 1.2.2.2 2016/05/29 08:44:21 skrll Exp $ */
2 /* $OpenBSD: nvme.c,v 1.49 2016/04/18 05:59:50 dlg Exp $ */
3
4 /*
5 * Copyright (c) 2014 David Gwynne <dlg (at) openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include <sys/cdefs.h>
21 __KERNEL_RCSID(0, "$NetBSD: nvme.c,v 1.2.2.2 2016/05/29 08:44:21 skrll Exp $");
22
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/kernel.h>
26 #include <sys/atomic.h>
27 #include <sys/bus.h>
28 #include <sys/buf.h>
29 #include <sys/device.h>
30 #include <sys/kmem.h>
31 #include <sys/once.h>
32 #include <sys/queue.h>
33 #include <sys/mutex.h>
34
35 #include <dev/ic/nvmereg.h>
36 #include <dev/ic/nvmevar.h>
37
38 int nvme_adminq_size = 128;
39 int nvme_ioq_size = 128;
40
41 static int nvme_print(void *, const char *);
42
43 static int nvme_ready(struct nvme_softc *, uint32_t);
44 static int nvme_enable(struct nvme_softc *, u_int);
45 static int nvme_disable(struct nvme_softc *);
46 static int nvme_shutdown(struct nvme_softc *);
47
48 static void nvme_version(struct nvme_softc *, uint32_t);
49 #ifdef NVME_DEBUG
50 static void nvme_dumpregs(struct nvme_softc *);
51 #endif
52 static int nvme_identify(struct nvme_softc *, u_int);
53 static void nvme_fill_identify(struct nvme_queue *, struct nvme_ccb *,
54 void *);
55
56 static int nvme_ccbs_alloc(struct nvme_queue *, u_int);
57 static void nvme_ccbs_free(struct nvme_queue *);
58
59 static struct nvme_ccb *
60 nvme_ccb_get(struct nvme_queue *);
61 static void nvme_ccb_put(struct nvme_queue *, struct nvme_ccb *);
62
63 static int nvme_poll(struct nvme_softc *, struct nvme_queue *,
64 struct nvme_ccb *, void (*)(struct nvme_queue *,
65 struct nvme_ccb *, void *));
66 static void nvme_poll_fill(struct nvme_queue *, struct nvme_ccb *, void *);
67 static void nvme_poll_done(struct nvme_queue *, struct nvme_ccb *,
68 struct nvme_cqe *);
69 static void nvme_sqe_fill(struct nvme_queue *, struct nvme_ccb *, void *);
70 static void nvme_empty_done(struct nvme_queue *, struct nvme_ccb *,
71 struct nvme_cqe *);
72
73 static struct nvme_queue *
74 nvme_q_alloc(struct nvme_softc *, uint16_t, u_int, u_int);
75 static int nvme_q_create(struct nvme_softc *, struct nvme_queue *);
76 static int nvme_q_delete(struct nvme_softc *, struct nvme_queue *);
77 static void nvme_q_submit(struct nvme_softc *, struct nvme_queue *,
78 struct nvme_ccb *, void (*)(struct nvme_queue *,
79 struct nvme_ccb *, void *));
80 static int nvme_q_complete(struct nvme_softc *, struct nvme_queue *q);
81 static void nvme_q_free(struct nvme_softc *, struct nvme_queue *);
82
83 static struct nvme_dmamem *
84 nvme_dmamem_alloc(struct nvme_softc *, size_t);
85 static void nvme_dmamem_free(struct nvme_softc *, struct nvme_dmamem *);
86 static void nvme_dmamem_sync(struct nvme_softc *, struct nvme_dmamem *,
87 int);
88
89 static void nvme_ns_io_fill(struct nvme_queue *, struct nvme_ccb *,
90 void *);
91 static void nvme_ns_io_done(struct nvme_queue *, struct nvme_ccb *,
92 struct nvme_cqe *);
93 static void nvme_ns_sync_fill(struct nvme_queue *, struct nvme_ccb *,
94 void *);
95 static void nvme_ns_sync_done(struct nvme_queue *, struct nvme_ccb *,
96 struct nvme_cqe *);
97
98 #define nvme_read4(_s, _r) \
99 bus_space_read_4((_s)->sc_iot, (_s)->sc_ioh, (_r))
100 #define nvme_write4(_s, _r, _v) \
101 bus_space_write_4((_s)->sc_iot, (_s)->sc_ioh, (_r), (_v))
102 #ifdef __LP64__
103 #define nvme_read8(_s, _r) \
104 bus_space_read_8((_s)->sc_iot, (_s)->sc_ioh, (_r))
105 #define nvme_write8(_s, _r, _v) \
106 bus_space_write_8((_s)->sc_iot, (_s)->sc_ioh, (_r), (_v))
107 #else /* __LP64__ */
108 static inline uint64_t
109 nvme_read8(struct nvme_softc *sc, bus_size_t r)
110 {
111 uint64_t v;
112 uint32_t *a = (uint32_t *)&v;
113
114 #if _BYTE_ORDER == _LITTLE_ENDIAN
115 a[0] = nvme_read4(sc, r);
116 a[1] = nvme_read4(sc, r + 4);
117 #else /* _BYTE_ORDER == _LITTLE_ENDIAN */
118 a[1] = nvme_read4(sc, r);
119 a[0] = nvme_read4(sc, r + 4);
120 #endif
121
122 return v;
123 }
124
125 static inline void
126 nvme_write8(struct nvme_softc *sc, bus_size_t r, uint64_t v)
127 {
128 uint32_t *a = (uint32_t *)&v;
129
130 #if _BYTE_ORDER == _LITTLE_ENDIAN
131 nvme_write4(sc, r, a[0]);
132 nvme_write4(sc, r + 4, a[1]);
133 #else /* _BYTE_ORDER == _LITTLE_ENDIAN */
134 nvme_write4(sc, r, a[1]);
135 nvme_write4(sc, r + 4, a[0]);
136 #endif
137 }
138 #endif /* __LP64__ */
139 #define nvme_barrier(_s, _r, _l, _f) \
140 bus_space_barrier((_s)->sc_iot, (_s)->sc_ioh, (_r), (_l), (_f))
141
142 pool_cache_t nvme_ns_ctx_cache;
143 ONCE_DECL(nvme_init_once);
144
145 static int
146 nvme_init(void)
147 {
148 nvme_ns_ctx_cache = pool_cache_init(sizeof(struct nvme_ns_context),
149 0, 0, 0, "nvme_ns_ctx", NULL, IPL_BIO, NULL, NULL, NULL);
150 KASSERT(nvme_ns_ctx_cache != NULL);
151 return 0;
152 }
153
154 static void
155 nvme_version(struct nvme_softc *sc, uint32_t ver)
156 {
157 const char *v = NULL;
158
159 switch (ver) {
160 case NVME_VS_1_0:
161 v = "1.0";
162 break;
163 case NVME_VS_1_1:
164 v = "1.1";
165 break;
166 case NVME_VS_1_2:
167 v = "1.2";
168 break;
169 default:
170 aprint_error_dev(sc->sc_dev, "unknown version 0x%08x\n", ver);
171 return;
172 }
173
174 aprint_normal_dev(sc->sc_dev, "NVMe %s\n", v);
175 }
176
177 #ifdef NVME_DEBUG
178 static void
179 nvme_dumpregs(struct nvme_softc *sc)
180 {
181 uint64_t r8;
182 uint32_t r4;
183
184 #define DEVNAME(_sc) device_xname((_sc)->sc_dev)
185 r8 = nvme_read8(sc, NVME_CAP);
186 printf("%s: cap 0x%016llx\n", DEVNAME(sc), nvme_read8(sc, NVME_CAP));
187 printf("%s: mpsmax %u (%u)\n", DEVNAME(sc),
188 (u_int)NVME_CAP_MPSMAX(r8), (1 << NVME_CAP_MPSMAX(r8)));
189 printf("%s: mpsmin %u (%u)\n", DEVNAME(sc),
190 (u_int)NVME_CAP_MPSMIN(r8), (1 << NVME_CAP_MPSMIN(r8)));
191 printf("%s: css %llu\n", DEVNAME(sc), NVME_CAP_CSS(r8));
192 printf("%s: nssrs %llu\n", DEVNAME(sc), NVME_CAP_NSSRS(r8));
193 printf("%s: dstrd %u\n", DEVNAME(sc), NVME_CAP_DSTRD(r8));
194 printf("%s: to %llu msec\n", DEVNAME(sc), NVME_CAP_TO(r8));
195 printf("%s: ams %llu\n", DEVNAME(sc), NVME_CAP_AMS(r8));
196 printf("%s: cqr %llu\n", DEVNAME(sc), NVME_CAP_CQR(r8));
197 printf("%s: mqes %llu\n", DEVNAME(sc), NVME_CAP_MQES(r8));
198
199 printf("%s: vs 0x%04x\n", DEVNAME(sc), nvme_read4(sc, NVME_VS));
200
201 r4 = nvme_read4(sc, NVME_CC);
202 printf("%s: cc 0x%04x\n", DEVNAME(sc), r4);
203 printf("%s: iocqes %u\n", DEVNAME(sc), NVME_CC_IOCQES_R(r4));
204 printf("%s: iosqes %u\n", DEVNAME(sc), NVME_CC_IOSQES_R(r4));
205 printf("%s: shn %u\n", DEVNAME(sc), NVME_CC_SHN_R(r4));
206 printf("%s: ams %u\n", DEVNAME(sc), NVME_CC_AMS_R(r4));
207 printf("%s: mps %u\n", DEVNAME(sc), NVME_CC_MPS_R(r4));
208 printf("%s: css %u\n", DEVNAME(sc), NVME_CC_CSS_R(r4));
209 printf("%s: en %u\n", DEVNAME(sc), ISSET(r4, NVME_CC_EN));
210
211 printf("%s: csts 0x%08x\n", DEVNAME(sc), nvme_read4(sc, NVME_CSTS));
212 printf("%s: aqa 0x%08x\n", DEVNAME(sc), nvme_read4(sc, NVME_AQA));
213 printf("%s: asq 0x%016llx\n", DEVNAME(sc), nvme_read8(sc, NVME_ASQ));
214 printf("%s: acq 0x%016llx\n", DEVNAME(sc), nvme_read8(sc, NVME_ACQ));
215 #undef DEVNAME
216 }
217 #endif /* NVME_DEBUG */
218
219 static int
220 nvme_ready(struct nvme_softc *sc, uint32_t rdy)
221 {
222 u_int i = 0;
223
224 while ((nvme_read4(sc, NVME_CSTS) & NVME_CSTS_RDY) != rdy) {
225 if (i++ > sc->sc_rdy_to)
226 return 1;
227
228 delay(1000);
229 nvme_barrier(sc, NVME_CSTS, 4, BUS_SPACE_BARRIER_READ);
230 }
231
232 return 0;
233 }
234
235 static int
236 nvme_enable(struct nvme_softc *sc, u_int mps)
237 {
238 uint32_t cc;
239
240 cc = nvme_read4(sc, NVME_CC);
241 if (ISSET(cc, NVME_CC_EN))
242 return nvme_ready(sc, NVME_CSTS_RDY);
243
244 nvme_write4(sc, NVME_AQA, NVME_AQA_ACQS(sc->sc_admin_q->q_entries) |
245 NVME_AQA_ASQS(sc->sc_admin_q->q_entries));
246 nvme_barrier(sc, 0, sc->sc_ios, BUS_SPACE_BARRIER_WRITE);
247
248 nvme_write8(sc, NVME_ASQ, NVME_DMA_DVA(sc->sc_admin_q->q_sq_dmamem));
249 nvme_barrier(sc, 0, sc->sc_ios, BUS_SPACE_BARRIER_WRITE);
250 nvme_write8(sc, NVME_ACQ, NVME_DMA_DVA(sc->sc_admin_q->q_cq_dmamem));
251 nvme_barrier(sc, 0, sc->sc_ios, BUS_SPACE_BARRIER_WRITE);
252
253 CLR(cc, NVME_CC_IOCQES_MASK | NVME_CC_IOSQES_MASK | NVME_CC_SHN_MASK |
254 NVME_CC_AMS_MASK | NVME_CC_MPS_MASK | NVME_CC_CSS_MASK);
255 SET(cc, NVME_CC_IOSQES(ffs(64) - 1) | NVME_CC_IOCQES(ffs(16) - 1));
256 SET(cc, NVME_CC_SHN(NVME_CC_SHN_NONE));
257 SET(cc, NVME_CC_CSS(NVME_CC_CSS_NVM));
258 SET(cc, NVME_CC_AMS(NVME_CC_AMS_RR));
259 SET(cc, NVME_CC_MPS(mps));
260 SET(cc, NVME_CC_EN);
261
262 nvme_write4(sc, NVME_CC, cc);
263 nvme_barrier(sc, 0, sc->sc_ios,
264 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
265
266 return nvme_ready(sc, NVME_CSTS_RDY);
267 }
268
269 static int
270 nvme_disable(struct nvme_softc *sc)
271 {
272 uint32_t cc, csts;
273
274 cc = nvme_read4(sc, NVME_CC);
275 if (ISSET(cc, NVME_CC_EN)) {
276 csts = nvme_read4(sc, NVME_CSTS);
277 if (!ISSET(csts, NVME_CSTS_CFS) &&
278 nvme_ready(sc, NVME_CSTS_RDY) != 0)
279 return 1;
280 }
281
282 CLR(cc, NVME_CC_EN);
283
284 nvme_write4(sc, NVME_CC, cc);
285 nvme_barrier(sc, 0, sc->sc_ios,
286 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
287
288 return nvme_ready(sc, 0);
289 }
290
291 int
292 nvme_attach(struct nvme_softc *sc)
293 {
294 struct nvme_attach_args naa;
295 uint64_t cap;
296 uint32_t reg;
297 u_int dstrd;
298 u_int mps = PAGE_SHIFT;
299 int adminq_entries = nvme_adminq_size;
300 int ioq_entries = nvme_ioq_size;
301 int i;
302
303 RUN_ONCE(&nvme_init_once, nvme_init);
304
305 reg = nvme_read4(sc, NVME_VS);
306 if (reg == 0xffffffff) {
307 aprint_error_dev(sc->sc_dev, "invalid mapping\n");
308 return 1;
309 }
310
311 nvme_version(sc, reg);
312
313 cap = nvme_read8(sc, NVME_CAP);
314 dstrd = NVME_CAP_DSTRD(cap);
315 if (NVME_CAP_MPSMIN(cap) > PAGE_SHIFT) {
316 aprint_error_dev(sc->sc_dev, "NVMe minimum page size %u "
317 "is greater than CPU page size %u\n",
318 1 << NVME_CAP_MPSMIN(cap), 1 << PAGE_SHIFT);
319 return 1;
320 }
321 if (NVME_CAP_MPSMAX(cap) < mps)
322 mps = NVME_CAP_MPSMAX(cap);
323
324 sc->sc_rdy_to = NVME_CAP_TO(cap);
325 sc->sc_mps = 1 << mps;
326 sc->sc_mdts = MAXPHYS;
327 sc->sc_max_sgl = 2;
328
329 if (nvme_disable(sc) != 0) {
330 aprint_error_dev(sc->sc_dev, "unable to disable controller\n");
331 return 1;
332 }
333
334 sc->sc_admin_q = nvme_q_alloc(sc, NVME_ADMIN_Q, adminq_entries, dstrd);
335 if (sc->sc_admin_q == NULL) {
336 aprint_error_dev(sc->sc_dev,
337 "unable to allocate admin queue\n");
338 return 1;
339 }
340 if (sc->sc_intr_establish(sc, NVME_ADMIN_Q, sc->sc_admin_q))
341 goto free_admin_q;
342
343 if (nvme_enable(sc, mps) != 0) {
344 aprint_error_dev(sc->sc_dev, "unable to enable controller\n");
345 goto disestablish_admin_q;
346 }
347
348 if (nvme_identify(sc, NVME_CAP_MPSMIN(cap)) != 0) {
349 aprint_error_dev(sc->sc_dev, "unable to identify controller\n");
350 goto disable;
351 }
352
353 /* we know how big things are now */
354 sc->sc_max_sgl = sc->sc_mdts / sc->sc_mps;
355
356 /* reallocate ccbs of admin queue with new max sgl. */
357 nvme_ccbs_free(sc->sc_admin_q);
358 nvme_ccbs_alloc(sc->sc_admin_q, sc->sc_admin_q->q_entries);
359
360 sc->sc_q = kmem_zalloc(sizeof(*sc->sc_q) * sc->sc_nq, KM_SLEEP);
361 if (sc->sc_q == NULL) {
362 aprint_error_dev(sc->sc_dev, "unable to allocate io queue\n");
363 goto disable;
364 }
365 for (i = 0; i < sc->sc_nq; i++) {
366 sc->sc_q[i] = nvme_q_alloc(sc, i + 1, ioq_entries, dstrd);
367 if (sc->sc_q[i] == NULL) {
368 aprint_error_dev(sc->sc_dev,
369 "unable to allocate io queue\n");
370 goto free_q;
371 }
372 if (nvme_q_create(sc, sc->sc_q[i]) != 0) {
373 aprint_error_dev(sc->sc_dev,
374 "unable to create io queue\n");
375 nvme_q_free(sc, sc->sc_q[i]);
376 goto free_q;
377 }
378 }
379
380 if (!sc->sc_use_mq)
381 nvme_write4(sc, NVME_INTMC, 1);
382
383 sc->sc_namespaces = kmem_zalloc(sizeof(*sc->sc_namespaces) * sc->sc_nn,
384 KM_SLEEP);
385 for (i = 0; i < sc->sc_nn; i++) {
386 memset(&naa, 0, sizeof(naa));
387 naa.naa_nsid = i + 1;
388 naa.naa_qentries = ioq_entries;
389 sc->sc_namespaces[i].dev = config_found(sc->sc_dev, &naa,
390 nvme_print);
391 }
392
393 return 0;
394
395 free_q:
396 while (--i >= 0) {
397 nvme_q_delete(sc, sc->sc_q[i]);
398 nvme_q_free(sc, sc->sc_q[i]);
399 }
400 disable:
401 nvme_disable(sc);
402 disestablish_admin_q:
403 sc->sc_intr_disestablish(sc, NVME_ADMIN_Q);
404 free_admin_q:
405 nvme_q_free(sc, sc->sc_admin_q);
406
407 return 1;
408 }
409
410 static int
411 nvme_print(void *aux, const char *pnp)
412 {
413 struct nvme_attach_args *naa = aux;
414
415 if (pnp)
416 aprint_normal("at %s", pnp);
417
418 if (naa->naa_nsid > 0)
419 aprint_normal(" nsid %d", naa->naa_nsid);
420
421 return UNCONF;
422 }
423
424 int
425 nvme_detach(struct nvme_softc *sc, int flags)
426 {
427 int i, error;
428
429 error = config_detach_children(sc->sc_dev, flags);
430 if (error)
431 return error;
432
433 error = nvme_shutdown(sc);
434 if (error)
435 return error;
436
437 for (i = 0; i < sc->sc_nq; i++)
438 nvme_q_free(sc, sc->sc_q[i]);
439 kmem_free(sc->sc_q, sizeof(*sc->sc_q) * sc->sc_nq);
440 nvme_q_free(sc, sc->sc_admin_q);
441
442 return 0;
443 }
444
445 static int
446 nvme_shutdown(struct nvme_softc *sc)
447 {
448 uint32_t cc, csts;
449 bool disabled = false;
450 int i;
451
452 if (!sc->sc_use_mq)
453 nvme_write4(sc, NVME_INTMS, 1);
454
455 for (i = 0; i < sc->sc_nq; i++) {
456 if (nvme_q_delete(sc, sc->sc_q[i]) != 0) {
457 aprint_error_dev(sc->sc_dev,
458 "unable to delete io queue %d, disabling\n", i + 1);
459 disabled = true;
460 }
461 }
462 sc->sc_intr_disestablish(sc, NVME_ADMIN_Q);
463 if (disabled)
464 goto disable;
465
466 cc = nvme_read4(sc, NVME_CC);
467 CLR(cc, NVME_CC_SHN_MASK);
468 SET(cc, NVME_CC_SHN(NVME_CC_SHN_NORMAL));
469 nvme_write4(sc, NVME_CC, cc);
470
471 for (i = 0; i < 4000; i++) {
472 nvme_barrier(sc, 0, sc->sc_ios,
473 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
474 csts = nvme_read4(sc, NVME_CSTS);
475 if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_DONE)
476 return 0;
477
478 delay(1000);
479 }
480
481 aprint_error_dev(sc->sc_dev, "unable to shudown, disabling\n");
482
483 disable:
484 nvme_disable(sc);
485 return 0;
486 }
487
488 void
489 nvme_childdet(device_t self, device_t child)
490 {
491 struct nvme_softc *sc = device_private(self);
492 int i;
493
494 for (i = 0; i < sc->sc_nn; i++) {
495 if (sc->sc_namespaces[i].dev == child) {
496 /* Already freed ns->ident. */
497 sc->sc_namespaces[i].dev = NULL;
498 break;
499 }
500 }
501 }
502
503 int
504 nvme_ns_identify(struct nvme_softc *sc, uint16_t nsid)
505 {
506 struct nvme_sqe sqe;
507 struct nvm_identify_namespace *identify;
508 struct nvme_dmamem *mem;
509 struct nvme_ccb *ccb;
510 struct nvme_namespace *ns;
511 int rv;
512
513 KASSERT(nsid > 0);
514
515 ccb = nvme_ccb_get(sc->sc_admin_q);
516 KASSERT(ccb != NULL);
517
518 mem = nvme_dmamem_alloc(sc, sizeof(*identify));
519 if (mem == NULL)
520 return ENOMEM;
521
522 memset(&sqe, 0, sizeof(sqe));
523 sqe.opcode = NVM_ADMIN_IDENTIFY;
524 htolem32(&sqe.nsid, nsid);
525 htolem64(&sqe.entry.prp[0], NVME_DMA_DVA(mem));
526 htolem32(&sqe.cdw10, 0);
527
528 ccb->ccb_done = nvme_empty_done;
529 ccb->ccb_cookie = &sqe;
530
531 nvme_dmamem_sync(sc, mem, BUS_DMASYNC_PREREAD);
532 rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill);
533 nvme_dmamem_sync(sc, mem, BUS_DMASYNC_POSTREAD);
534
535 nvme_ccb_put(sc->sc_admin_q, ccb);
536
537 if (rv != 0) {
538 rv = EIO;
539 goto done;
540 }
541
542 /* commit */
543
544 identify = kmem_zalloc(sizeof(*identify), KM_SLEEP);
545 memcpy(identify, NVME_DMA_KVA(mem), sizeof(*identify));
546
547 ns = nvme_ns_get(sc, nsid);
548 KASSERT(ns);
549 ns->ident = identify;
550
551 done:
552 nvme_dmamem_free(sc, mem);
553
554 return rv;
555 }
556
557 int
558 nvme_ns_dobio(struct nvme_softc *sc, struct nvme_ns_context *ctx)
559 {
560 struct nvme_queue *q = nvme_get_q(sc);
561 struct nvme_ccb *ccb;
562 bus_dmamap_t dmap;
563 int i, error;
564
565 ccb = nvme_ccb_get(q);
566 if (ccb == NULL)
567 return EAGAIN;
568
569 ccb->ccb_done = nvme_ns_io_done;
570 ccb->ccb_cookie = ctx;
571
572 dmap = ccb->ccb_dmamap;
573 error = bus_dmamap_load(sc->sc_dmat, dmap, ctx->nnc_data,
574 ctx->nnc_datasize, NULL,
575 (ISSET(ctx->nnc_flags, NVME_NS_CTX_F_POLL) ?
576 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) |
577 (ISSET(ctx->nnc_flags, NVME_NS_CTX_F_READ) ?
578 BUS_DMA_READ : BUS_DMA_WRITE));
579 if (error) {
580 nvme_ccb_put(q, ccb);
581 return error;
582 }
583
584 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
585 ISSET(ctx->nnc_flags, NVME_NS_CTX_F_READ) ?
586 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
587
588 if (dmap->dm_nsegs > 2) {
589 for (i = 1; i < dmap->dm_nsegs; i++) {
590 htolem64(&ccb->ccb_prpl[i - 1],
591 dmap->dm_segs[i].ds_addr);
592 }
593 bus_dmamap_sync(sc->sc_dmat,
594 NVME_DMA_MAP(q->q_ccb_prpls),
595 ccb->ccb_prpl_off,
596 sizeof(*ccb->ccb_prpl) * dmap->dm_nsegs - 1,
597 BUS_DMASYNC_PREWRITE);
598 }
599
600 if (ISSET(ctx->nnc_flags, NVME_NS_CTX_F_POLL)) {
601 if (nvme_poll(sc, q, ccb, nvme_ns_io_fill) != 0)
602 return EIO;
603 return 0;
604 }
605
606 nvme_q_submit(sc, q, ccb, nvme_ns_io_fill);
607 return 0;
608 }
609
610 static void
611 nvme_ns_io_fill(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot)
612 {
613 struct nvme_sqe_io *sqe = slot;
614 struct nvme_ns_context *ctx = ccb->ccb_cookie;
615 bus_dmamap_t dmap = ccb->ccb_dmamap;
616
617 sqe->opcode = ISSET(ctx->nnc_flags, NVME_NS_CTX_F_READ) ?
618 NVM_CMD_READ : NVM_CMD_WRITE;
619 htolem32(&sqe->nsid, ctx->nnc_nsid);
620
621 htolem64(&sqe->entry.prp[0], dmap->dm_segs[0].ds_addr);
622 switch (dmap->dm_nsegs) {
623 case 1:
624 break;
625 case 2:
626 htolem64(&sqe->entry.prp[1], dmap->dm_segs[1].ds_addr);
627 break;
628 default:
629 /* the prp list is already set up and synced */
630 htolem64(&sqe->entry.prp[1], ccb->ccb_prpl_dva);
631 break;
632 }
633
634 htolem64(&sqe->slba, ctx->nnc_blkno);
635 htolem16(&sqe->nlb, (ctx->nnc_datasize / ctx->nnc_secsize) - 1);
636 }
637
638 static void
639 nvme_ns_io_done(struct nvme_queue *q, struct nvme_ccb *ccb,
640 struct nvme_cqe *cqe)
641 {
642 struct nvme_softc *sc = q->q_sc;
643 struct nvme_ns_context *ctx = ccb->ccb_cookie;
644 bus_dmamap_t dmap = ccb->ccb_dmamap;
645 uint16_t flags;
646
647 if (dmap->dm_nsegs > 2) {
648 bus_dmamap_sync(sc->sc_dmat,
649 NVME_DMA_MAP(q->q_ccb_prpls),
650 ccb->ccb_prpl_off,
651 sizeof(*ccb->ccb_prpl) * dmap->dm_nsegs - 1,
652 BUS_DMASYNC_POSTWRITE);
653 }
654
655 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
656 ISSET(ctx->nnc_flags, NVME_NS_CTX_F_READ) ?
657 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
658
659 bus_dmamap_unload(sc->sc_dmat, dmap);
660 nvme_ccb_put(q, ccb);
661
662 flags = lemtoh16(&cqe->flags);
663
664 ctx->nnc_status = flags;
665 (*ctx->nnc_done)(ctx);
666 }
667
668 int
669 nvme_ns_sync(struct nvme_softc *sc, struct nvme_ns_context *ctx)
670 {
671 struct nvme_queue *q = nvme_get_q(sc);
672 struct nvme_ccb *ccb;
673
674 ccb = nvme_ccb_get(q);
675 if (ccb == NULL)
676 return EAGAIN;
677
678 ccb->ccb_done = nvme_ns_sync_done;
679 ccb->ccb_cookie = ctx;
680
681 if (ISSET(ctx->nnc_flags, NVME_NS_CTX_F_POLL)) {
682 if (nvme_poll(sc, q, ccb, nvme_ns_sync_fill) != 0)
683 return EIO;
684 return 0;
685 }
686
687 nvme_q_submit(sc, q, ccb, nvme_ns_sync_fill);
688 return 0;
689 }
690
691 static void
692 nvme_ns_sync_fill(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot)
693 {
694 struct nvme_sqe *sqe = slot;
695 struct nvme_ns_context *ctx = ccb->ccb_cookie;
696
697 sqe->opcode = NVM_CMD_FLUSH;
698 htolem32(&sqe->nsid, ctx->nnc_nsid);
699 }
700
701 static void
702 nvme_ns_sync_done(struct nvme_queue *q, struct nvme_ccb *ccb,
703 struct nvme_cqe *cqe)
704 {
705 struct nvme_ns_context *ctx = ccb->ccb_cookie;
706 uint16_t flags;
707
708 nvme_ccb_put(q, ccb);
709
710 flags = lemtoh16(&cqe->flags);
711
712 ctx->nnc_status = flags;
713 (*ctx->nnc_done)(ctx);
714 }
715
716 void
717 nvme_ns_free(struct nvme_softc *sc, uint16_t nsid)
718 {
719 struct nvme_namespace *ns;
720 struct nvm_identify_namespace *identify;
721
722 ns = nvme_ns_get(sc, nsid);
723 KASSERT(ns);
724
725 identify = ns->ident;
726 ns->ident = NULL;
727 if (identify != NULL)
728 kmem_free(identify, sizeof(*identify));
729 }
730
731 static void
732 nvme_q_submit(struct nvme_softc *sc, struct nvme_queue *q, struct nvme_ccb *ccb,
733 void (*fill)(struct nvme_queue *, struct nvme_ccb *, void *))
734 {
735 struct nvme_sqe *sqe = NVME_DMA_KVA(q->q_sq_dmamem);
736 uint32_t tail;
737
738 mutex_enter(&q->q_sq_mtx);
739 tail = q->q_sq_tail;
740 if (++q->q_sq_tail >= q->q_entries)
741 q->q_sq_tail = 0;
742
743 sqe += tail;
744
745 bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_sq_dmamem),
746 sizeof(*sqe) * tail, sizeof(*sqe), BUS_DMASYNC_POSTWRITE);
747 memset(sqe, 0, sizeof(*sqe));
748 (*fill)(q, ccb, sqe);
749 sqe->cid = ccb->ccb_id;
750 bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_sq_dmamem),
751 sizeof(*sqe) * tail, sizeof(*sqe), BUS_DMASYNC_PREWRITE);
752
753 nvme_write4(sc, q->q_sqtdbl, q->q_sq_tail);
754 mutex_exit(&q->q_sq_mtx);
755 }
756
757 struct nvme_poll_state {
758 struct nvme_sqe s;
759 struct nvme_cqe c;
760 };
761
762 static int
763 nvme_poll(struct nvme_softc *sc, struct nvme_queue *q, struct nvme_ccb *ccb,
764 void (*fill)(struct nvme_queue *, struct nvme_ccb *, void *))
765 {
766 struct nvme_poll_state state;
767 void (*done)(struct nvme_queue *, struct nvme_ccb *, struct nvme_cqe *);
768 void *cookie;
769 uint16_t flags;
770
771 memset(&state, 0, sizeof(state));
772 (*fill)(q, ccb, &state.s);
773
774 done = ccb->ccb_done;
775 cookie = ccb->ccb_cookie;
776
777 ccb->ccb_done = nvme_poll_done;
778 ccb->ccb_cookie = &state;
779
780 nvme_q_submit(sc, q, ccb, nvme_poll_fill);
781 while (!ISSET(state.c.flags, htole16(NVME_CQE_PHASE))) {
782 if (nvme_q_complete(sc, q) == 0)
783 delay(10);
784
785 /* XXX no timeout? */
786 }
787
788 ccb->ccb_cookie = cookie;
789 done(q, ccb, &state.c);
790
791 flags = lemtoh16(&state.c.flags);
792
793 return flags & ~NVME_CQE_PHASE;
794 }
795
796 static void
797 nvme_poll_fill(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot)
798 {
799 struct nvme_sqe *sqe = slot;
800 struct nvme_poll_state *state = ccb->ccb_cookie;
801
802 *sqe = state->s;
803 }
804
805 static void
806 nvme_poll_done(struct nvme_queue *q, struct nvme_ccb *ccb,
807 struct nvme_cqe *cqe)
808 {
809 struct nvme_poll_state *state = ccb->ccb_cookie;
810
811 SET(cqe->flags, htole16(NVME_CQE_PHASE));
812 state->c = *cqe;
813 }
814
815 static void
816 nvme_sqe_fill(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot)
817 {
818 struct nvme_sqe *src = ccb->ccb_cookie;
819 struct nvme_sqe *dst = slot;
820
821 *dst = *src;
822 }
823
824 static void
825 nvme_empty_done(struct nvme_queue *q, struct nvme_ccb *ccb,
826 struct nvme_cqe *cqe)
827 {
828 }
829
830 static int
831 nvme_q_complete(struct nvme_softc *sc, struct nvme_queue *q)
832 {
833 struct nvme_ccb *ccb;
834 struct nvme_cqe *ring = NVME_DMA_KVA(q->q_cq_dmamem), *cqe;
835 uint32_t head;
836 uint16_t flags;
837 int rv = 0;
838
839 if (!mutex_tryenter(&q->q_cq_mtx))
840 return -1;
841
842 nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_POSTREAD);
843 head = q->q_cq_head;
844 for (;;) {
845 cqe = &ring[head];
846 flags = lemtoh16(&cqe->flags);
847 if ((flags & NVME_CQE_PHASE) != q->q_cq_phase)
848 break;
849
850 ccb = &q->q_ccbs[cqe->cid];
851 ccb->ccb_done(q, ccb, cqe);
852
853 if (++head >= q->q_entries) {
854 head = 0;
855 q->q_cq_phase ^= NVME_CQE_PHASE;
856 }
857
858 rv = 1;
859 }
860 nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_PREREAD);
861
862 if (rv)
863 nvme_write4(sc, q->q_cqhdbl, q->q_cq_head = head);
864 mutex_exit(&q->q_cq_mtx);
865
866 return rv;
867 }
868
869 static int
870 nvme_identify(struct nvme_softc *sc, u_int mps)
871 {
872 char sn[41], mn[81], fr[17];
873 struct nvm_identify_controller *identify;
874 struct nvme_dmamem *mem;
875 struct nvme_ccb *ccb;
876 u_int mdts;
877 int rv = 1;
878
879 ccb = nvme_ccb_get(sc->sc_admin_q);
880 if (ccb == NULL)
881 panic("%s: nvme_ccb_get returned NULL", __func__);
882
883 mem = nvme_dmamem_alloc(sc, sizeof(*identify));
884 if (mem == NULL)
885 return 1;
886
887 ccb->ccb_done = nvme_empty_done;
888 ccb->ccb_cookie = mem;
889
890 nvme_dmamem_sync(sc, mem, BUS_DMASYNC_PREREAD);
891 rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_fill_identify);
892 nvme_dmamem_sync(sc, mem, BUS_DMASYNC_POSTREAD);
893
894 nvme_ccb_put(sc->sc_admin_q, ccb);
895
896 if (rv != 0)
897 goto done;
898
899 identify = NVME_DMA_KVA(mem);
900
901 strnvisx(sn, sizeof(sn), (const char *)identify->sn,
902 sizeof(identify->sn), VIS_TRIM|VIS_SAFE|VIS_OCTAL);
903 strnvisx(mn, sizeof(mn), (const char *)identify->mn,
904 sizeof(identify->mn), VIS_TRIM|VIS_SAFE|VIS_OCTAL);
905 strnvisx(fr, sizeof(fr), (const char *)identify->fr,
906 sizeof(identify->fr), VIS_TRIM|VIS_SAFE|VIS_OCTAL);
907 aprint_normal_dev(sc->sc_dev, "%s, firmware %s, serial %s\n", mn, fr,
908 sn);
909
910 if (identify->mdts > 0) {
911 mdts = (1 << identify->mdts) * (1 << mps);
912 if (mdts < sc->sc_mdts)
913 sc->sc_mdts = mdts;
914 }
915
916 sc->sc_nn = lemtoh32(&identify->nn);
917
918 memcpy(&sc->sc_identify, identify, sizeof(sc->sc_identify));
919
920 done:
921 nvme_dmamem_free(sc, mem);
922
923 return rv;
924 }
925
926 static int
927 nvme_q_create(struct nvme_softc *sc, struct nvme_queue *q)
928 {
929 struct nvme_sqe_q sqe;
930 struct nvme_ccb *ccb;
931 int rv;
932
933 if (sc->sc_use_mq && sc->sc_intr_establish(sc, q->q_id, q))
934 return 1;
935
936 ccb = nvme_ccb_get(sc->sc_admin_q);
937 KASSERT(ccb != NULL);
938
939 ccb->ccb_done = nvme_empty_done;
940 ccb->ccb_cookie = &sqe;
941
942 memset(&sqe, 0, sizeof(sqe));
943 sqe.opcode = NVM_ADMIN_ADD_IOCQ;
944 htolem64(&sqe.prp1, NVME_DMA_DVA(q->q_cq_dmamem));
945 htolem16(&sqe.qsize, q->q_entries - 1);
946 htolem16(&sqe.qid, q->q_id);
947 sqe.qflags = NVM_SQE_CQ_IEN | NVM_SQE_Q_PC;
948 if (sc->sc_use_mq)
949 htolem16(&sqe.cqid, q->q_id); /* qid == vector */
950
951 rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill);
952 if (rv != 0)
953 goto fail;
954
955 ccb->ccb_done = nvme_empty_done;
956 ccb->ccb_cookie = &sqe;
957
958 memset(&sqe, 0, sizeof(sqe));
959 sqe.opcode = NVM_ADMIN_ADD_IOSQ;
960 htolem64(&sqe.prp1, NVME_DMA_DVA(q->q_sq_dmamem));
961 htolem16(&sqe.qsize, q->q_entries - 1);
962 htolem16(&sqe.qid, q->q_id);
963 htolem16(&sqe.cqid, q->q_id);
964 sqe.qflags = NVM_SQE_Q_PC;
965
966 rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill);
967 if (rv != 0)
968 goto fail;
969
970 fail:
971 nvme_ccb_put(sc->sc_admin_q, ccb);
972 return rv;
973 }
974
975 static int
976 nvme_q_delete(struct nvme_softc *sc, struct nvme_queue *q)
977 {
978 struct nvme_sqe_q sqe;
979 struct nvme_ccb *ccb;
980 int rv;
981
982 ccb = nvme_ccb_get(sc->sc_admin_q);
983 KASSERT(ccb != NULL);
984
985 ccb->ccb_done = nvme_empty_done;
986 ccb->ccb_cookie = &sqe;
987
988 memset(&sqe, 0, sizeof(sqe));
989 sqe.opcode = NVM_ADMIN_DEL_IOSQ;
990 htolem16(&sqe.qid, q->q_id);
991
992 rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill);
993 if (rv != 0)
994 goto fail;
995
996 ccb->ccb_done = nvme_empty_done;
997 ccb->ccb_cookie = &sqe;
998
999 memset(&sqe, 0, sizeof(sqe));
1000 sqe.opcode = NVM_ADMIN_DEL_IOCQ;
1001 htolem64(&sqe.prp1, NVME_DMA_DVA(q->q_sq_dmamem));
1002 htolem16(&sqe.qid, q->q_id);
1003
1004 rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill);
1005 if (rv != 0)
1006 goto fail;
1007
1008 fail:
1009 nvme_ccb_put(sc->sc_admin_q, ccb);
1010
1011 if (rv == 0 && sc->sc_use_mq) {
1012 if (sc->sc_intr_disestablish(sc, q->q_id))
1013 rv = 1;
1014 }
1015
1016 return rv;
1017 }
1018
1019 static void
1020 nvme_fill_identify(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot)
1021 {
1022 struct nvme_sqe *sqe = slot;
1023 struct nvme_dmamem *mem = ccb->ccb_cookie;
1024
1025 sqe->opcode = NVM_ADMIN_IDENTIFY;
1026 htolem64(&sqe->entry.prp[0], NVME_DMA_DVA(mem));
1027 htolem32(&sqe->cdw10, 1);
1028 }
1029
1030 static int
1031 nvme_ccbs_alloc(struct nvme_queue *q, u_int nccbs)
1032 {
1033 struct nvme_softc *sc = q->q_sc;
1034 struct nvme_ccb *ccb;
1035 bus_addr_t off;
1036 uint64_t *prpl;
1037 u_int i;
1038
1039 mutex_init(&q->q_ccb_mtx, MUTEX_DEFAULT, IPL_BIO);
1040 SIMPLEQ_INIT(&q->q_ccb_list);
1041
1042 q->q_ccbs = kmem_alloc(sizeof(*ccb) * nccbs, KM_SLEEP);
1043 if (q->q_ccbs == NULL)
1044 return 1;
1045
1046 q->q_nccbs = nccbs;
1047 q->q_ccb_prpls = nvme_dmamem_alloc(sc,
1048 sizeof(*prpl) * sc->sc_max_sgl * nccbs);
1049
1050 prpl = NVME_DMA_KVA(q->q_ccb_prpls);
1051 off = 0;
1052
1053 for (i = 0; i < nccbs; i++) {
1054 ccb = &q->q_ccbs[i];
1055
1056 if (bus_dmamap_create(sc->sc_dmat, sc->sc_mdts,
1057 sc->sc_max_sgl + 1 /* we get a free prp in the sqe */,
1058 sc->sc_mps, sc->sc_mps, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
1059 &ccb->ccb_dmamap) != 0)
1060 goto free_maps;
1061
1062 ccb->ccb_id = i;
1063 ccb->ccb_prpl = prpl;
1064 ccb->ccb_prpl_off = off;
1065 ccb->ccb_prpl_dva = NVME_DMA_DVA(q->q_ccb_prpls) + off;
1066
1067 SIMPLEQ_INSERT_TAIL(&q->q_ccb_list, ccb, ccb_entry);
1068
1069 prpl += sc->sc_max_sgl;
1070 off += sizeof(*prpl) * sc->sc_max_sgl;
1071 }
1072
1073 return 0;
1074
1075 free_maps:
1076 nvme_ccbs_free(q);
1077 return 1;
1078 }
1079
1080 static struct nvme_ccb *
1081 nvme_ccb_get(struct nvme_queue *q)
1082 {
1083 struct nvme_ccb *ccb;
1084
1085 mutex_enter(&q->q_ccb_mtx);
1086 ccb = SIMPLEQ_FIRST(&q->q_ccb_list);
1087 if (ccb != NULL)
1088 SIMPLEQ_REMOVE_HEAD(&q->q_ccb_list, ccb_entry);
1089 mutex_exit(&q->q_ccb_mtx);
1090
1091 return ccb;
1092 }
1093
1094 static void
1095 nvme_ccb_put(struct nvme_queue *q, struct nvme_ccb *ccb)
1096 {
1097
1098 mutex_enter(&q->q_ccb_mtx);
1099 SIMPLEQ_INSERT_HEAD(&q->q_ccb_list, ccb, ccb_entry);
1100 mutex_exit(&q->q_ccb_mtx);
1101 }
1102
1103 static void
1104 nvme_ccbs_free(struct nvme_queue *q)
1105 {
1106 struct nvme_softc *sc = q->q_sc;
1107 struct nvme_ccb *ccb;
1108
1109 mutex_enter(&q->q_ccb_mtx);
1110 while ((ccb = SIMPLEQ_FIRST(&q->q_ccb_list)) != NULL) {
1111 SIMPLEQ_REMOVE_HEAD(&q->q_ccb_list, ccb_entry);
1112 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
1113 }
1114 mutex_exit(&q->q_ccb_mtx);
1115
1116 nvme_dmamem_free(sc, q->q_ccb_prpls);
1117 kmem_free(q->q_ccbs, sizeof(*ccb) * q->q_nccbs);
1118 q->q_ccbs = NULL;
1119 mutex_destroy(&q->q_ccb_mtx);
1120 }
1121
1122 static struct nvme_queue *
1123 nvme_q_alloc(struct nvme_softc *sc, uint16_t id, u_int entries, u_int dstrd)
1124 {
1125 struct nvme_queue *q;
1126
1127 q = kmem_alloc(sizeof(*q), KM_SLEEP);
1128 if (q == NULL)
1129 return NULL;
1130
1131 q->q_sc = sc;
1132 q->q_sq_dmamem = nvme_dmamem_alloc(sc,
1133 sizeof(struct nvme_sqe) * entries);
1134 if (q->q_sq_dmamem == NULL)
1135 goto free;
1136
1137 q->q_cq_dmamem = nvme_dmamem_alloc(sc,
1138 sizeof(struct nvme_cqe) * entries);
1139 if (q->q_cq_dmamem == NULL)
1140 goto free_sq;
1141
1142 memset(NVME_DMA_KVA(q->q_sq_dmamem), 0, NVME_DMA_LEN(q->q_sq_dmamem));
1143 memset(NVME_DMA_KVA(q->q_cq_dmamem), 0, NVME_DMA_LEN(q->q_cq_dmamem));
1144
1145 mutex_init(&q->q_sq_mtx, MUTEX_DEFAULT, IPL_BIO);
1146 mutex_init(&q->q_cq_mtx, MUTEX_DEFAULT, IPL_BIO);
1147 q->q_sqtdbl = NVME_SQTDBL(id, dstrd);
1148 q->q_cqhdbl = NVME_CQHDBL(id, dstrd);
1149 q->q_id = id;
1150 q->q_entries = entries;
1151 q->q_sq_tail = 0;
1152 q->q_cq_head = 0;
1153 q->q_cq_phase = NVME_CQE_PHASE;
1154
1155 nvme_dmamem_sync(sc, q->q_sq_dmamem, BUS_DMASYNC_PREWRITE);
1156 nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_PREREAD);
1157
1158 if (nvme_ccbs_alloc(q, entries) != 0) {
1159 aprint_error_dev(sc->sc_dev, "unable to allocate ccbs\n");
1160 goto free_cq;
1161 }
1162
1163 return q;
1164
1165 free_cq:
1166 nvme_dmamem_free(sc, q->q_cq_dmamem);
1167 free_sq:
1168 nvme_dmamem_free(sc, q->q_sq_dmamem);
1169 free:
1170 kmem_free(q, sizeof(*q));
1171
1172 return NULL;
1173 }
1174
1175 static void
1176 nvme_q_free(struct nvme_softc *sc, struct nvme_queue *q)
1177 {
1178 nvme_ccbs_free(q);
1179 nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_POSTREAD);
1180 nvme_dmamem_sync(sc, q->q_sq_dmamem, BUS_DMASYNC_POSTWRITE);
1181 nvme_dmamem_free(sc, q->q_cq_dmamem);
1182 nvme_dmamem_free(sc, q->q_sq_dmamem);
1183 kmem_free(q, sizeof(*q));
1184 }
1185
1186 int
1187 nvme_intr(void *xsc)
1188 {
1189 struct nvme_softc *sc = xsc;
1190 int rv = 0;
1191
1192 nvme_write4(sc, NVME_INTMS, 1);
1193
1194 if (nvme_q_complete(sc, sc->sc_admin_q))
1195 rv = 1;
1196 if (sc->sc_q != NULL)
1197 if (nvme_q_complete(sc, sc->sc_q[0]))
1198 rv = 1;
1199
1200 nvme_write4(sc, NVME_INTMC, 1);
1201
1202 return rv;
1203 }
1204
1205 int
1206 nvme_mq_msi_intr(void *xq)
1207 {
1208 struct nvme_queue *q = xq;
1209 struct nvme_softc *sc = q->q_sc;
1210 int rv = 0;
1211
1212 nvme_write4(sc, NVME_INTMS, 1U << q->q_id);
1213
1214 if (nvme_q_complete(sc, q))
1215 rv = 1;
1216
1217 nvme_write4(sc, NVME_INTMC, 1U << q->q_id);
1218
1219 return rv;
1220 }
1221
1222 int
1223 nvme_mq_msix_intr(void *xq)
1224 {
1225 struct nvme_queue *q = xq;
1226 int rv = 0;
1227
1228 if (nvme_q_complete(q->q_sc, q))
1229 rv = 1;
1230
1231 return rv;
1232 }
1233
1234 static struct nvme_dmamem *
1235 nvme_dmamem_alloc(struct nvme_softc *sc, size_t size)
1236 {
1237 struct nvme_dmamem *ndm;
1238 int nsegs;
1239
1240 ndm = kmem_zalloc(sizeof(*ndm), KM_SLEEP);
1241 if (ndm == NULL)
1242 return NULL;
1243
1244 ndm->ndm_size = size;
1245
1246 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1247 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &ndm->ndm_map) != 0)
1248 goto ndmfree;
1249
1250 if (bus_dmamem_alloc(sc->sc_dmat, size, sc->sc_mps, 0, &ndm->ndm_seg,
1251 1, &nsegs, BUS_DMA_WAITOK) != 0)
1252 goto destroy;
1253
1254 if (bus_dmamem_map(sc->sc_dmat, &ndm->ndm_seg, nsegs, size,
1255 &ndm->ndm_kva, BUS_DMA_WAITOK) != 0)
1256 goto free;
1257 memset(ndm->ndm_kva, 0, size);
1258
1259 if (bus_dmamap_load(sc->sc_dmat, ndm->ndm_map, ndm->ndm_kva, size,
1260 NULL, BUS_DMA_WAITOK) != 0)
1261 goto unmap;
1262
1263 return ndm;
1264
1265 unmap:
1266 bus_dmamem_unmap(sc->sc_dmat, ndm->ndm_kva, size);
1267 free:
1268 bus_dmamem_free(sc->sc_dmat, &ndm->ndm_seg, 1);
1269 destroy:
1270 bus_dmamap_destroy(sc->sc_dmat, ndm->ndm_map);
1271 ndmfree:
1272 kmem_free(ndm, sizeof(*ndm));
1273 return NULL;
1274 }
1275
1276 static void
1277 nvme_dmamem_sync(struct nvme_softc *sc, struct nvme_dmamem *mem, int ops)
1278 {
1279 bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(mem),
1280 0, NVME_DMA_LEN(mem), ops);
1281 }
1282
1283 void
1284 nvme_dmamem_free(struct nvme_softc *sc, struct nvme_dmamem *ndm)
1285 {
1286 bus_dmamap_unload(sc->sc_dmat, ndm->ndm_map);
1287 bus_dmamem_unmap(sc->sc_dmat, ndm->ndm_kva, ndm->ndm_size);
1288 bus_dmamem_free(sc->sc_dmat, &ndm->ndm_seg, 1);
1289 bus_dmamap_destroy(sc->sc_dmat, ndm->ndm_map);
1290 kmem_free(ndm, sizeof(*ndm));
1291 }
1292