nvme.c revision 1.1 1 /* $NetBSD: nvme.c,v 1.1 2016/05/01 10:21:02 nonaka Exp $ */
2 /* $OpenBSD: nvme.c,v 1.49 2016/04/18 05:59:50 dlg Exp $ */
3
4 /*
5 * Copyright (c) 2014 David Gwynne <dlg (at) openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include <sys/cdefs.h>
21 __KERNEL_RCSID(0, "$NetBSD: nvme.c,v 1.1 2016/05/01 10:21:02 nonaka Exp $");
22
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/kernel.h>
26 #include <sys/atomic.h>
27 #include <sys/bus.h>
28 #include <sys/buf.h>
29 #include <sys/device.h>
30 #include <sys/kmem.h>
31 #include <sys/once.h>
32 #include <sys/queue.h>
33 #include <sys/mutex.h>
34
35 #include <dev/ic/nvmereg.h>
36 #include <dev/ic/nvmevar.h>
37
38 int nvme_adminq_size = 128;
39 int nvme_ioq_size = 128;
40
41 static int nvme_print(void *, const char *);
42
43 static int nvme_ready(struct nvme_softc *, uint32_t);
44 static int nvme_enable(struct nvme_softc *, u_int);
45 static int nvme_disable(struct nvme_softc *);
46 static int nvme_shutdown(struct nvme_softc *);
47
48 static void nvme_version(struct nvme_softc *, uint32_t);
49 #ifdef NVME_DEBUG
50 static void nvme_dumpregs(struct nvme_softc *);
51 #endif
52 static int nvme_identify(struct nvme_softc *, u_int);
53 static void nvme_fill_identify(struct nvme_queue *, struct nvme_ccb *,
54 void *);
55
56 static int nvme_ccbs_alloc(struct nvme_queue *, u_int);
57 static void nvme_ccbs_free(struct nvme_queue *);
58
59 static struct nvme_ccb *
60 nvme_ccb_get(struct nvme_queue *);
61 static void nvme_ccb_put(struct nvme_queue *, struct nvme_ccb *);
62
63 static int nvme_poll(struct nvme_softc *, struct nvme_queue *,
64 struct nvme_ccb *, void (*)(struct nvme_queue *,
65 struct nvme_ccb *, void *));
66 static void nvme_poll_fill(struct nvme_queue *, struct nvme_ccb *, void *);
67 static void nvme_poll_done(struct nvme_queue *, struct nvme_ccb *,
68 struct nvme_cqe *);
69 static void nvme_sqe_fill(struct nvme_queue *, struct nvme_ccb *, void *);
70 static void nvme_empty_done(struct nvme_queue *, struct nvme_ccb *,
71 struct nvme_cqe *);
72
73 static struct nvme_queue *
74 nvme_q_alloc(struct nvme_softc *, uint16_t, u_int, u_int);
75 static int nvme_q_create(struct nvme_softc *, struct nvme_queue *);
76 static int nvme_q_delete(struct nvme_softc *, struct nvme_queue *);
77 static void nvme_q_submit(struct nvme_softc *, struct nvme_queue *,
78 struct nvme_ccb *, void (*)(struct nvme_queue *,
79 struct nvme_ccb *, void *));
80 static int nvme_q_complete(struct nvme_softc *, struct nvme_queue *q);
81 static void nvme_q_free(struct nvme_softc *, struct nvme_queue *);
82
83 static struct nvme_dmamem *
84 nvme_dmamem_alloc(struct nvme_softc *, size_t);
85 static void nvme_dmamem_free(struct nvme_softc *, struct nvme_dmamem *);
86 static void nvme_dmamem_sync(struct nvme_softc *, struct nvme_dmamem *,
87 int);
88
89 static void nvme_ns_io_fill(struct nvme_queue *, struct nvme_ccb *,
90 void *);
91 static void nvme_ns_io_done(struct nvme_queue *, struct nvme_ccb *,
92 struct nvme_cqe *);
93 static void nvme_ns_sync_fill(struct nvme_queue *, struct nvme_ccb *,
94 void *);
95 static void nvme_ns_sync_done(struct nvme_queue *, struct nvme_ccb *,
96 struct nvme_cqe *);
97
98 static void nvme_strvis(u_char *, int, const u_char *, int);
99
100 #define nvme_read4(_s, _r) \
101 bus_space_read_4((_s)->sc_iot, (_s)->sc_ioh, (_r))
102 #define nvme_write4(_s, _r, _v) \
103 bus_space_write_4((_s)->sc_iot, (_s)->sc_ioh, (_r), (_v))
104 #ifdef __LP64__
105 #define nvme_read8(_s, _r) \
106 bus_space_read_8((_s)->sc_iot, (_s)->sc_ioh, (_r))
107 #define nvme_write8(_s, _r, _v) \
108 bus_space_write_8((_s)->sc_iot, (_s)->sc_ioh, (_r), (_v))
109 #else /* __LP64__ */
110 static inline uint64_t
111 nvme_read8(struct nvme_softc *sc, bus_size_t r)
112 {
113 uint64_t v;
114 uint32_t *a = (uint32_t *)&v;
115
116 #if _BYTE_ORDER == _LITTLE_ENDIAN
117 a[0] = nvme_read4(sc, r);
118 a[1] = nvme_read4(sc, r + 4);
119 #else /* _BYTE_ORDER == _LITTLE_ENDIAN */
120 a[1] = nvme_read4(sc, r);
121 a[0] = nvme_read4(sc, r + 4);
122 #endif
123
124 return v;
125 }
126
127 static inline void
128 nvme_write8(struct nvme_softc *sc, bus_size_t r, uint64_t v)
129 {
130 uint32_t *a = (uint32_t *)&v;
131
132 #if _BYTE_ORDER == _LITTLE_ENDIAN
133 nvme_write4(sc, r, a[0]);
134 nvme_write4(sc, r + 4, a[1]);
135 #else /* _BYTE_ORDER == _LITTLE_ENDIAN */
136 nvme_write4(sc, r, a[1]);
137 nvme_write4(sc, r + 4, a[0]);
138 #endif
139 }
140 #endif /* __LP64__ */
141 #define nvme_barrier(_s, _r, _l, _f) \
142 bus_space_barrier((_s)->sc_iot, (_s)->sc_ioh, (_r), (_l), (_f))
143
144 pool_cache_t nvme_ns_ctx_cache;
145 ONCE_DECL(nvme_init_once);
146
147 static int
148 nvme_init(void)
149 {
150 nvme_ns_ctx_cache = pool_cache_init(sizeof(struct nvme_ns_context),
151 0, 0, 0, "nvme_ns_ctx", NULL, IPL_BIO, NULL, NULL, NULL);
152 KASSERT(nvme_ns_ctx_cache != NULL);
153 return 0;
154 }
155
156 static void
157 nvme_version(struct nvme_softc *sc, uint32_t ver)
158 {
159 const char *v = NULL;
160
161 switch (ver) {
162 case NVME_VS_1_0:
163 v = "1.0";
164 break;
165 case NVME_VS_1_1:
166 v = "1.1";
167 break;
168 case NVME_VS_1_2:
169 v = "1.2";
170 break;
171 default:
172 aprint_error_dev(sc->sc_dev, "unknown version 0x%08x\n", ver);
173 return;
174 }
175
176 aprint_normal_dev(sc->sc_dev, "NVMe %s\n", v);
177 }
178
179 #ifdef NVME_DEBUG
180 static void
181 nvme_dumpregs(struct nvme_softc *sc)
182 {
183 uint64_t r8;
184 uint32_t r4;
185
186 #define DEVNAME(_sc) device_xname((_sc)->sc_dev)
187 r8 = nvme_read8(sc, NVME_CAP);
188 printf("%s: cap 0x%016llx\n", DEVNAME(sc), nvme_read8(sc, NVME_CAP));
189 printf("%s: mpsmax %u (%u)\n", DEVNAME(sc),
190 (u_int)NVME_CAP_MPSMAX(r8), (1 << NVME_CAP_MPSMAX(r8)));
191 printf("%s: mpsmin %u (%u)\n", DEVNAME(sc),
192 (u_int)NVME_CAP_MPSMIN(r8), (1 << NVME_CAP_MPSMIN(r8)));
193 printf("%s: css %llu\n", DEVNAME(sc), NVME_CAP_CSS(r8));
194 printf("%s: nssrs %llu\n", DEVNAME(sc), NVME_CAP_NSSRS(r8));
195 printf("%s: dstrd %u\n", DEVNAME(sc), NVME_CAP_DSTRD(r8));
196 printf("%s: to %llu msec\n", DEVNAME(sc), NVME_CAP_TO(r8));
197 printf("%s: ams %llu\n", DEVNAME(sc), NVME_CAP_AMS(r8));
198 printf("%s: cqr %llu\n", DEVNAME(sc), NVME_CAP_CQR(r8));
199 printf("%s: mqes %llu\n", DEVNAME(sc), NVME_CAP_MQES(r8));
200
201 printf("%s: vs 0x%04x\n", DEVNAME(sc), nvme_read4(sc, NVME_VS));
202
203 r4 = nvme_read4(sc, NVME_CC);
204 printf("%s: cc 0x%04x\n", DEVNAME(sc), r4);
205 printf("%s: iocqes %u\n", DEVNAME(sc), NVME_CC_IOCQES_R(r4));
206 printf("%s: iosqes %u\n", DEVNAME(sc), NVME_CC_IOSQES_R(r4));
207 printf("%s: shn %u\n", DEVNAME(sc), NVME_CC_SHN_R(r4));
208 printf("%s: ams %u\n", DEVNAME(sc), NVME_CC_AMS_R(r4));
209 printf("%s: mps %u\n", DEVNAME(sc), NVME_CC_MPS_R(r4));
210 printf("%s: css %u\n", DEVNAME(sc), NVME_CC_CSS_R(r4));
211 printf("%s: en %u\n", DEVNAME(sc), ISSET(r4, NVME_CC_EN));
212
213 printf("%s: csts 0x%08x\n", DEVNAME(sc), nvme_read4(sc, NVME_CSTS));
214 printf("%s: aqa 0x%08x\n", DEVNAME(sc), nvme_read4(sc, NVME_AQA));
215 printf("%s: asq 0x%016llx\n", DEVNAME(sc), nvme_read8(sc, NVME_ASQ));
216 printf("%s: acq 0x%016llx\n", DEVNAME(sc), nvme_read8(sc, NVME_ACQ));
217 #undef DEVNAME
218 }
219 #endif /* NVME_DEBUG */
220
221 static int
222 nvme_ready(struct nvme_softc *sc, uint32_t rdy)
223 {
224 u_int i = 0;
225
226 while ((nvme_read4(sc, NVME_CSTS) & NVME_CSTS_RDY) != rdy) {
227 if (i++ > sc->sc_rdy_to)
228 return 1;
229
230 delay(1000);
231 nvme_barrier(sc, NVME_CSTS, 4, BUS_SPACE_BARRIER_READ);
232 }
233
234 return 0;
235 }
236
237 static int
238 nvme_enable(struct nvme_softc *sc, u_int mps)
239 {
240 uint32_t cc;
241
242 cc = nvme_read4(sc, NVME_CC);
243 if (ISSET(cc, NVME_CC_EN))
244 return nvme_ready(sc, NVME_CSTS_RDY);
245
246 nvme_write4(sc, NVME_AQA, NVME_AQA_ACQS(sc->sc_admin_q->q_entries) |
247 NVME_AQA_ASQS(sc->sc_admin_q->q_entries));
248 nvme_barrier(sc, 0, sc->sc_ios, BUS_SPACE_BARRIER_WRITE);
249
250 nvme_write8(sc, NVME_ASQ, NVME_DMA_DVA(sc->sc_admin_q->q_sq_dmamem));
251 nvme_barrier(sc, 0, sc->sc_ios, BUS_SPACE_BARRIER_WRITE);
252 nvme_write8(sc, NVME_ACQ, NVME_DMA_DVA(sc->sc_admin_q->q_cq_dmamem));
253 nvme_barrier(sc, 0, sc->sc_ios, BUS_SPACE_BARRIER_WRITE);
254
255 CLR(cc, NVME_CC_IOCQES_MASK | NVME_CC_IOSQES_MASK | NVME_CC_SHN_MASK |
256 NVME_CC_AMS_MASK | NVME_CC_MPS_MASK | NVME_CC_CSS_MASK);
257 SET(cc, NVME_CC_IOSQES(ffs(64) - 1) | NVME_CC_IOCQES(ffs(16) - 1));
258 SET(cc, NVME_CC_SHN(NVME_CC_SHN_NONE));
259 SET(cc, NVME_CC_CSS(NVME_CC_CSS_NVM));
260 SET(cc, NVME_CC_AMS(NVME_CC_AMS_RR));
261 SET(cc, NVME_CC_MPS(mps));
262 SET(cc, NVME_CC_EN);
263
264 nvme_write4(sc, NVME_CC, cc);
265 nvme_barrier(sc, 0, sc->sc_ios,
266 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
267
268 return nvme_ready(sc, NVME_CSTS_RDY);
269 }
270
271 static int
272 nvme_disable(struct nvme_softc *sc)
273 {
274 uint32_t cc, csts;
275
276 cc = nvme_read4(sc, NVME_CC);
277 if (ISSET(cc, NVME_CC_EN)) {
278 csts = nvme_read4(sc, NVME_CSTS);
279 if (!ISSET(csts, NVME_CSTS_CFS) &&
280 nvme_ready(sc, NVME_CSTS_RDY) != 0)
281 return 1;
282 }
283
284 CLR(cc, NVME_CC_EN);
285
286 nvme_write4(sc, NVME_CC, cc);
287 nvme_barrier(sc, 0, sc->sc_ios,
288 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
289
290 return nvme_ready(sc, 0);
291 }
292
293 int
294 nvme_attach(struct nvme_softc *sc)
295 {
296 struct nvme_attach_args naa;
297 uint64_t cap;
298 uint32_t reg;
299 u_int dstrd;
300 u_int mps = PAGE_SHIFT;
301 int adminq_entries = nvme_adminq_size;
302 int ioq_entries = nvme_ioq_size;
303 int i;
304
305 RUN_ONCE(&nvme_init_once, nvme_init);
306
307 reg = nvme_read4(sc, NVME_VS);
308 if (reg == 0xffffffff) {
309 aprint_error_dev(sc->sc_dev, "invalid mapping\n");
310 return 1;
311 }
312
313 nvme_version(sc, reg);
314
315 cap = nvme_read8(sc, NVME_CAP);
316 dstrd = NVME_CAP_DSTRD(cap);
317 if (NVME_CAP_MPSMIN(cap) > PAGE_SHIFT) {
318 aprint_error_dev(sc->sc_dev, "NVMe minimum page size %u "
319 "is greater than CPU page size %u\n",
320 1 << NVME_CAP_MPSMIN(cap), 1 << PAGE_SHIFT);
321 return 1;
322 }
323 if (NVME_CAP_MPSMAX(cap) < mps)
324 mps = NVME_CAP_MPSMAX(cap);
325
326 sc->sc_rdy_to = NVME_CAP_TO(cap);
327 sc->sc_mps = 1 << mps;
328 sc->sc_mdts = MAXPHYS;
329 sc->sc_max_sgl = 2;
330
331 if (nvme_disable(sc) != 0) {
332 aprint_error_dev(sc->sc_dev, "unable to disable controller\n");
333 return 1;
334 }
335
336 sc->sc_admin_q = nvme_q_alloc(sc, NVME_ADMIN_Q, adminq_entries, dstrd);
337 if (sc->sc_admin_q == NULL) {
338 aprint_error_dev(sc->sc_dev,
339 "unable to allocate admin queue\n");
340 return 1;
341 }
342 if (sc->sc_intr_establish(sc, NVME_ADMIN_Q, sc->sc_admin_q))
343 goto free_admin_q;
344
345 if (nvme_enable(sc, mps) != 0) {
346 aprint_error_dev(sc->sc_dev, "unable to enable controller\n");
347 goto disestablish_admin_q;
348 }
349
350 if (nvme_identify(sc, NVME_CAP_MPSMIN(cap)) != 0) {
351 aprint_error_dev(sc->sc_dev, "unable to identify controller\n");
352 goto disable;
353 }
354
355 /* we know how big things are now */
356 sc->sc_max_sgl = sc->sc_mdts / sc->sc_mps;
357
358 /* reallocate ccbs of admin queue with new max sgl. */
359 nvme_ccbs_free(sc->sc_admin_q);
360 nvme_ccbs_alloc(sc->sc_admin_q, sc->sc_admin_q->q_entries);
361
362 sc->sc_q = kmem_zalloc(sizeof(*sc->sc_q) * sc->sc_nq, KM_SLEEP);
363 if (sc->sc_q == NULL) {
364 aprint_error_dev(sc->sc_dev, "unable to allocate io queue\n");
365 goto disable;
366 }
367 for (i = 0; i < sc->sc_nq; i++) {
368 sc->sc_q[i] = nvme_q_alloc(sc, i + 1, ioq_entries, dstrd);
369 if (sc->sc_q[i] == NULL) {
370 aprint_error_dev(sc->sc_dev,
371 "unable to allocate io queue\n");
372 goto free_q;
373 }
374 if (nvme_q_create(sc, sc->sc_q[i]) != 0) {
375 aprint_error_dev(sc->sc_dev,
376 "unable to create io queue\n");
377 nvme_q_free(sc, sc->sc_q[i]);
378 goto free_q;
379 }
380 }
381
382 if (!sc->sc_use_mq)
383 nvme_write4(sc, NVME_INTMC, 1);
384
385 sc->sc_namespaces = kmem_zalloc(sizeof(*sc->sc_namespaces) * sc->sc_nn,
386 KM_SLEEP);
387 for (i = 0; i < sc->sc_nn; i++) {
388 memset(&naa, 0, sizeof(naa));
389 naa.naa_nsid = i + 1;
390 naa.naa_qentries = ioq_entries;
391 sc->sc_namespaces[i].dev = config_found(sc->sc_dev, &naa,
392 nvme_print);
393 }
394
395 return 0;
396
397 free_q:
398 while (--i >= 0) {
399 nvme_q_delete(sc, sc->sc_q[i]);
400 nvme_q_free(sc, sc->sc_q[i]);
401 }
402 disable:
403 nvme_disable(sc);
404 disestablish_admin_q:
405 sc->sc_intr_disestablish(sc, NVME_ADMIN_Q);
406 free_admin_q:
407 nvme_q_free(sc, sc->sc_admin_q);
408
409 return 1;
410 }
411
412 static int
413 nvme_print(void *aux, const char *pnp)
414 {
415 struct nvme_attach_args *naa = aux;
416
417 if (pnp)
418 aprint_normal("at %s", pnp);
419
420 if (naa->naa_nsid > 0)
421 aprint_normal(" nsid %d", naa->naa_nsid);
422
423 return UNCONF;
424 }
425
426 int
427 nvme_detach(struct nvme_softc *sc, int flags)
428 {
429 int i, error;
430
431 error = config_detach_children(sc->sc_dev, flags);
432 if (error)
433 return error;
434
435 error = nvme_shutdown(sc);
436 if (error)
437 return error;
438
439 for (i = 0; i < sc->sc_nq; i++)
440 nvme_q_free(sc, sc->sc_q[i]);
441 kmem_free(sc->sc_q, sizeof(*sc->sc_q) * sc->sc_nq);
442 nvme_q_free(sc, sc->sc_admin_q);
443
444 return 0;
445 }
446
447 static int
448 nvme_shutdown(struct nvme_softc *sc)
449 {
450 uint32_t cc, csts;
451 bool disabled = false;
452 int i;
453
454 if (!sc->sc_use_mq)
455 nvme_write4(sc, NVME_INTMS, 1);
456
457 for (i = 0; i < sc->sc_nq; i++) {
458 if (nvme_q_delete(sc, sc->sc_q[i]) != 0) {
459 aprint_error_dev(sc->sc_dev,
460 "unable to delete io queue %d, disabling\n", i + 1);
461 disabled = true;
462 }
463 }
464 sc->sc_intr_disestablish(sc, NVME_ADMIN_Q);
465 if (disabled)
466 goto disable;
467
468 cc = nvme_read4(sc, NVME_CC);
469 CLR(cc, NVME_CC_SHN_MASK);
470 SET(cc, NVME_CC_SHN(NVME_CC_SHN_NORMAL));
471 nvme_write4(sc, NVME_CC, cc);
472
473 for (i = 0; i < 4000; i++) {
474 nvme_barrier(sc, 0, sc->sc_ios,
475 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
476 csts = nvme_read4(sc, NVME_CSTS);
477 if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_DONE)
478 return 0;
479
480 delay(1000);
481 }
482
483 aprint_error_dev(sc->sc_dev, "unable to shudown, disabling\n");
484
485 disable:
486 nvme_disable(sc);
487 return 0;
488 }
489
490 void
491 nvme_childdet(device_t self, device_t child)
492 {
493 struct nvme_softc *sc = device_private(self);
494 int i;
495
496 for (i = 0; i < sc->sc_nn; i++) {
497 if (sc->sc_namespaces[i].dev == child) {
498 /* Already freed ns->ident. */
499 sc->sc_namespaces[i].dev = NULL;
500 break;
501 }
502 }
503 }
504
505 int
506 nvme_ns_identify(struct nvme_softc *sc, uint16_t nsid)
507 {
508 struct nvme_sqe sqe;
509 struct nvm_identify_namespace *identify;
510 struct nvme_dmamem *mem;
511 struct nvme_ccb *ccb;
512 struct nvme_namespace *ns;
513 int rv;
514
515 KASSERT(nsid > 0);
516
517 ccb = nvme_ccb_get(sc->sc_admin_q);
518 KASSERT(ccb != NULL);
519
520 mem = nvme_dmamem_alloc(sc, sizeof(*identify));
521 if (mem == NULL)
522 return ENOMEM;
523
524 memset(&sqe, 0, sizeof(sqe));
525 sqe.opcode = NVM_ADMIN_IDENTIFY;
526 htolem32(&sqe.nsid, nsid);
527 htolem64(&sqe.entry.prp[0], NVME_DMA_DVA(mem));
528 htolem32(&sqe.cdw10, 0);
529
530 ccb->ccb_done = nvme_empty_done;
531 ccb->ccb_cookie = &sqe;
532
533 nvme_dmamem_sync(sc, mem, BUS_DMASYNC_PREREAD);
534 rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill);
535 nvme_dmamem_sync(sc, mem, BUS_DMASYNC_POSTREAD);
536
537 nvme_ccb_put(sc->sc_admin_q, ccb);
538
539 if (rv != 0) {
540 rv = EIO;
541 goto done;
542 }
543
544 /* commit */
545
546 identify = kmem_zalloc(sizeof(*identify), KM_SLEEP);
547 memcpy(identify, NVME_DMA_KVA(mem), sizeof(*identify));
548
549 ns = nvme_ns_get(sc, nsid);
550 KASSERT(ns);
551 ns->ident = identify;
552
553 done:
554 nvme_dmamem_free(sc, mem);
555
556 return rv;
557 }
558
559 int
560 nvme_ns_dobio(struct nvme_softc *sc, struct nvme_ns_context *ctx)
561 {
562 struct nvme_queue *q = nvme_get_q(sc);
563 struct nvme_ccb *ccb;
564 bus_dmamap_t dmap;
565 int i, error;
566
567 ccb = nvme_ccb_get(q);
568 if (ccb == NULL)
569 return EAGAIN;
570
571 ccb->ccb_done = nvme_ns_io_done;
572 ccb->ccb_cookie = ctx;
573
574 dmap = ccb->ccb_dmamap;
575 error = bus_dmamap_load(sc->sc_dmat, dmap, ctx->nnc_data,
576 ctx->nnc_datasize, NULL,
577 (ISSET(ctx->nnc_flags, NVME_NS_CTX_F_POLL) ?
578 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) |
579 (ISSET(ctx->nnc_flags, NVME_NS_CTX_F_READ) ?
580 BUS_DMA_READ : BUS_DMA_WRITE));
581 if (error) {
582 nvme_ccb_put(q, ccb);
583 return error;
584 }
585
586 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
587 ISSET(ctx->nnc_flags, NVME_NS_CTX_F_READ) ?
588 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
589
590 if (dmap->dm_nsegs > 2) {
591 for (i = 1; i < dmap->dm_nsegs; i++) {
592 htolem64(&ccb->ccb_prpl[i - 1],
593 dmap->dm_segs[i].ds_addr);
594 }
595 bus_dmamap_sync(sc->sc_dmat,
596 NVME_DMA_MAP(q->q_ccb_prpls),
597 ccb->ccb_prpl_off,
598 sizeof(*ccb->ccb_prpl) * dmap->dm_nsegs - 1,
599 BUS_DMASYNC_PREWRITE);
600 }
601
602 if (ISSET(ctx->nnc_flags, NVME_NS_CTX_F_POLL)) {
603 if (nvme_poll(sc, q, ccb, nvme_ns_io_fill) != 0)
604 return EIO;
605 return 0;
606 }
607
608 nvme_q_submit(sc, q, ccb, nvme_ns_io_fill);
609 return 0;
610 }
611
612 static void
613 nvme_ns_io_fill(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot)
614 {
615 struct nvme_sqe_io *sqe = slot;
616 struct nvme_ns_context *ctx = ccb->ccb_cookie;
617 bus_dmamap_t dmap = ccb->ccb_dmamap;
618
619 sqe->opcode = ISSET(ctx->nnc_flags, NVME_NS_CTX_F_READ) ?
620 NVM_CMD_READ : NVM_CMD_WRITE;
621 htolem32(&sqe->nsid, ctx->nnc_nsid);
622
623 htolem64(&sqe->entry.prp[0], dmap->dm_segs[0].ds_addr);
624 switch (dmap->dm_nsegs) {
625 case 1:
626 break;
627 case 2:
628 htolem64(&sqe->entry.prp[1], dmap->dm_segs[1].ds_addr);
629 break;
630 default:
631 /* the prp list is already set up and synced */
632 htolem64(&sqe->entry.prp[1], ccb->ccb_prpl_dva);
633 break;
634 }
635
636 htolem64(&sqe->slba, ctx->nnc_blkno);
637 htolem16(&sqe->nlb, (ctx->nnc_datasize / ctx->nnc_secsize) - 1);
638 }
639
640 static void
641 nvme_ns_io_done(struct nvme_queue *q, struct nvme_ccb *ccb,
642 struct nvme_cqe *cqe)
643 {
644 struct nvme_softc *sc = q->q_sc;
645 struct nvme_ns_context *ctx = ccb->ccb_cookie;
646 bus_dmamap_t dmap = ccb->ccb_dmamap;
647 uint16_t flags;
648
649 if (dmap->dm_nsegs > 2) {
650 bus_dmamap_sync(sc->sc_dmat,
651 NVME_DMA_MAP(q->q_ccb_prpls),
652 ccb->ccb_prpl_off,
653 sizeof(*ccb->ccb_prpl) * dmap->dm_nsegs - 1,
654 BUS_DMASYNC_POSTWRITE);
655 }
656
657 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
658 ISSET(ctx->nnc_flags, NVME_NS_CTX_F_READ) ?
659 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
660
661 bus_dmamap_unload(sc->sc_dmat, dmap);
662 nvme_ccb_put(q, ccb);
663
664 flags = lemtoh16(&cqe->flags);
665
666 ctx->nnc_status = flags;
667 (*ctx->nnc_done)(ctx);
668 }
669
670 int
671 nvme_ns_sync(struct nvme_softc *sc, struct nvme_ns_context *ctx)
672 {
673 struct nvme_queue *q = nvme_get_q(sc);
674 struct nvme_ccb *ccb;
675
676 ccb = nvme_ccb_get(q);
677 if (ccb == NULL)
678 return EAGAIN;
679
680 ccb->ccb_done = nvme_ns_sync_done;
681 ccb->ccb_cookie = ctx;
682
683 if (ISSET(ctx->nnc_flags, NVME_NS_CTX_F_POLL)) {
684 if (nvme_poll(sc, q, ccb, nvme_ns_sync_fill) != 0)
685 return EIO;
686 return 0;
687 }
688
689 nvme_q_submit(sc, q, ccb, nvme_ns_sync_fill);
690 return 0;
691 }
692
693 static void
694 nvme_ns_sync_fill(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot)
695 {
696 struct nvme_sqe *sqe = slot;
697 struct nvme_ns_context *ctx = ccb->ccb_cookie;
698
699 sqe->opcode = NVM_CMD_FLUSH;
700 htolem32(&sqe->nsid, ctx->nnc_nsid);
701 }
702
703 static void
704 nvme_ns_sync_done(struct nvme_queue *q, struct nvme_ccb *ccb,
705 struct nvme_cqe *cqe)
706 {
707 struct nvme_ns_context *ctx = ccb->ccb_cookie;
708 uint16_t flags;
709
710 nvme_ccb_put(q, ccb);
711
712 flags = lemtoh16(&cqe->flags);
713
714 ctx->nnc_status = flags;
715 (*ctx->nnc_done)(ctx);
716 }
717
718 void
719 nvme_ns_free(struct nvme_softc *sc, uint16_t nsid)
720 {
721 struct nvme_namespace *ns;
722 struct nvm_identify_namespace *identify;
723
724 ns = nvme_ns_get(sc, nsid);
725 KASSERT(ns);
726
727 identify = ns->ident;
728 ns->ident = NULL;
729 if (identify != NULL)
730 kmem_free(identify, sizeof(*identify));
731 }
732
733 static void
734 nvme_q_submit(struct nvme_softc *sc, struct nvme_queue *q, struct nvme_ccb *ccb,
735 void (*fill)(struct nvme_queue *, struct nvme_ccb *, void *))
736 {
737 struct nvme_sqe *sqe = NVME_DMA_KVA(q->q_sq_dmamem);
738 uint32_t tail;
739
740 mutex_enter(&q->q_sq_mtx);
741 tail = q->q_sq_tail;
742 if (++q->q_sq_tail >= q->q_entries)
743 q->q_sq_tail = 0;
744
745 sqe += tail;
746
747 bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_sq_dmamem),
748 sizeof(*sqe) * tail, sizeof(*sqe), BUS_DMASYNC_POSTWRITE);
749 memset(sqe, 0, sizeof(*sqe));
750 (*fill)(q, ccb, sqe);
751 sqe->cid = ccb->ccb_id;
752 bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_sq_dmamem),
753 sizeof(*sqe) * tail, sizeof(*sqe), BUS_DMASYNC_PREWRITE);
754
755 nvme_write4(sc, q->q_sqtdbl, q->q_sq_tail);
756 mutex_exit(&q->q_sq_mtx);
757 }
758
759 struct nvme_poll_state {
760 struct nvme_sqe s;
761 struct nvme_cqe c;
762 };
763
764 static int
765 nvme_poll(struct nvme_softc *sc, struct nvme_queue *q, struct nvme_ccb *ccb,
766 void (*fill)(struct nvme_queue *, struct nvme_ccb *, void *))
767 {
768 struct nvme_poll_state state;
769 void (*done)(struct nvme_queue *, struct nvme_ccb *, struct nvme_cqe *);
770 void *cookie;
771 uint16_t flags;
772
773 memset(&state, 0, sizeof(state));
774 (*fill)(q, ccb, &state.s);
775
776 done = ccb->ccb_done;
777 cookie = ccb->ccb_cookie;
778
779 ccb->ccb_done = nvme_poll_done;
780 ccb->ccb_cookie = &state;
781
782 nvme_q_submit(sc, q, ccb, nvme_poll_fill);
783 while (!ISSET(state.c.flags, htole16(NVME_CQE_PHASE))) {
784 if (nvme_q_complete(sc, q) == 0)
785 delay(10);
786
787 /* XXX no timeout? */
788 }
789
790 ccb->ccb_cookie = cookie;
791 done(q, ccb, &state.c);
792
793 flags = lemtoh16(&state.c.flags);
794
795 return flags & ~NVME_CQE_PHASE;
796 }
797
798 static void
799 nvme_poll_fill(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot)
800 {
801 struct nvme_sqe *sqe = slot;
802 struct nvme_poll_state *state = ccb->ccb_cookie;
803
804 *sqe = state->s;
805 }
806
807 static void
808 nvme_poll_done(struct nvme_queue *q, struct nvme_ccb *ccb,
809 struct nvme_cqe *cqe)
810 {
811 struct nvme_poll_state *state = ccb->ccb_cookie;
812
813 SET(cqe->flags, htole16(NVME_CQE_PHASE));
814 state->c = *cqe;
815 }
816
817 static void
818 nvme_sqe_fill(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot)
819 {
820 struct nvme_sqe *src = ccb->ccb_cookie;
821 struct nvme_sqe *dst = slot;
822
823 *dst = *src;
824 }
825
826 static void
827 nvme_empty_done(struct nvme_queue *q, struct nvme_ccb *ccb,
828 struct nvme_cqe *cqe)
829 {
830 }
831
832 static int
833 nvme_q_complete(struct nvme_softc *sc, struct nvme_queue *q)
834 {
835 struct nvme_ccb *ccb;
836 struct nvme_cqe *ring = NVME_DMA_KVA(q->q_cq_dmamem), *cqe;
837 uint32_t head;
838 uint16_t flags;
839 int rv = 0;
840
841 if (!mutex_tryenter(&q->q_cq_mtx))
842 return -1;
843
844 nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_POSTREAD);
845 head = q->q_cq_head;
846 for (;;) {
847 cqe = &ring[head];
848 flags = lemtoh16(&cqe->flags);
849 if ((flags & NVME_CQE_PHASE) != q->q_cq_phase)
850 break;
851
852 ccb = &q->q_ccbs[cqe->cid];
853 ccb->ccb_done(q, ccb, cqe);
854
855 if (++head >= q->q_entries) {
856 head = 0;
857 q->q_cq_phase ^= NVME_CQE_PHASE;
858 }
859
860 rv = 1;
861 }
862 nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_PREREAD);
863
864 if (rv)
865 nvme_write4(sc, q->q_cqhdbl, q->q_cq_head = head);
866 mutex_exit(&q->q_cq_mtx);
867
868 return rv;
869 }
870
871 static int
872 nvme_identify(struct nvme_softc *sc, u_int mps)
873 {
874 char sn[41], mn[81], fr[17];
875 struct nvm_identify_controller *identify;
876 struct nvme_dmamem *mem;
877 struct nvme_ccb *ccb;
878 u_int mdts;
879 int rv = 1;
880
881 ccb = nvme_ccb_get(sc->sc_admin_q);
882 if (ccb == NULL)
883 panic("%s: nvme_ccb_get returned NULL", __func__);
884
885 mem = nvme_dmamem_alloc(sc, sizeof(*identify));
886 if (mem == NULL)
887 return 1;
888
889 ccb->ccb_done = nvme_empty_done;
890 ccb->ccb_cookie = mem;
891
892 nvme_dmamem_sync(sc, mem, BUS_DMASYNC_PREREAD);
893 rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_fill_identify);
894 nvme_dmamem_sync(sc, mem, BUS_DMASYNC_POSTREAD);
895
896 nvme_ccb_put(sc->sc_admin_q, ccb);
897
898 if (rv != 0)
899 goto done;
900
901 identify = NVME_DMA_KVA(mem);
902
903 nvme_strvis(sn, sizeof(sn), identify->sn, sizeof(identify->sn));
904 nvme_strvis(mn, sizeof(mn), identify->mn, sizeof(identify->mn));
905 nvme_strvis(fr, sizeof(fr), identify->fr, sizeof(identify->fr));
906 aprint_normal_dev(sc->sc_dev, "%s, firmware %s, serial %s\n", mn, fr,
907 sn);
908
909 if (identify->mdts > 0) {
910 mdts = (1 << identify->mdts) * (1 << mps);
911 if (mdts < sc->sc_mdts)
912 sc->sc_mdts = mdts;
913 }
914
915 sc->sc_nn = lemtoh32(&identify->nn);
916
917 memcpy(&sc->sc_identify, identify, sizeof(sc->sc_identify));
918
919 done:
920 nvme_dmamem_free(sc, mem);
921
922 return rv;
923 }
924
925 static int
926 nvme_q_create(struct nvme_softc *sc, struct nvme_queue *q)
927 {
928 struct nvme_sqe_q sqe;
929 struct nvme_ccb *ccb;
930 int rv;
931
932 if (sc->sc_use_mq && sc->sc_intr_establish(sc, q->q_id, q))
933 return 1;
934
935 ccb = nvme_ccb_get(sc->sc_admin_q);
936 KASSERT(ccb != NULL);
937
938 ccb->ccb_done = nvme_empty_done;
939 ccb->ccb_cookie = &sqe;
940
941 memset(&sqe, 0, sizeof(sqe));
942 sqe.opcode = NVM_ADMIN_ADD_IOCQ;
943 htolem64(&sqe.prp1, NVME_DMA_DVA(q->q_cq_dmamem));
944 htolem16(&sqe.qsize, q->q_entries - 1);
945 htolem16(&sqe.qid, q->q_id);
946 sqe.qflags = NVM_SQE_CQ_IEN | NVM_SQE_Q_PC;
947 if (sc->sc_use_mq)
948 htolem16(&sqe.cqid, q->q_id); /* qid == vector */
949
950 rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill);
951 if (rv != 0)
952 goto fail;
953
954 ccb->ccb_done = nvme_empty_done;
955 ccb->ccb_cookie = &sqe;
956
957 memset(&sqe, 0, sizeof(sqe));
958 sqe.opcode = NVM_ADMIN_ADD_IOSQ;
959 htolem64(&sqe.prp1, NVME_DMA_DVA(q->q_sq_dmamem));
960 htolem16(&sqe.qsize, q->q_entries - 1);
961 htolem16(&sqe.qid, q->q_id);
962 htolem16(&sqe.cqid, q->q_id);
963 sqe.qflags = NVM_SQE_Q_PC;
964
965 rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill);
966 if (rv != 0)
967 goto fail;
968
969 fail:
970 nvme_ccb_put(sc->sc_admin_q, ccb);
971 return rv;
972 }
973
974 static int
975 nvme_q_delete(struct nvme_softc *sc, struct nvme_queue *q)
976 {
977 struct nvme_sqe_q sqe;
978 struct nvme_ccb *ccb;
979 int rv;
980
981 ccb = nvme_ccb_get(sc->sc_admin_q);
982 KASSERT(ccb != NULL);
983
984 ccb->ccb_done = nvme_empty_done;
985 ccb->ccb_cookie = &sqe;
986
987 memset(&sqe, 0, sizeof(sqe));
988 sqe.opcode = NVM_ADMIN_DEL_IOSQ;
989 htolem16(&sqe.qid, q->q_id);
990
991 rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill);
992 if (rv != 0)
993 goto fail;
994
995 ccb->ccb_done = nvme_empty_done;
996 ccb->ccb_cookie = &sqe;
997
998 memset(&sqe, 0, sizeof(sqe));
999 sqe.opcode = NVM_ADMIN_DEL_IOCQ;
1000 htolem64(&sqe.prp1, NVME_DMA_DVA(q->q_sq_dmamem));
1001 htolem16(&sqe.qid, q->q_id);
1002
1003 rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill);
1004 if (rv != 0)
1005 goto fail;
1006
1007 fail:
1008 nvme_ccb_put(sc->sc_admin_q, ccb);
1009
1010 if (rv == 0 && sc->sc_use_mq) {
1011 if (sc->sc_intr_disestablish(sc, q->q_id))
1012 rv = 1;
1013 }
1014
1015 return rv;
1016 }
1017
1018 static void
1019 nvme_fill_identify(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot)
1020 {
1021 struct nvme_sqe *sqe = slot;
1022 struct nvme_dmamem *mem = ccb->ccb_cookie;
1023
1024 sqe->opcode = NVM_ADMIN_IDENTIFY;
1025 htolem64(&sqe->entry.prp[0], NVME_DMA_DVA(mem));
1026 htolem32(&sqe->cdw10, 1);
1027 }
1028
1029 static int
1030 nvme_ccbs_alloc(struct nvme_queue *q, u_int nccbs)
1031 {
1032 struct nvme_softc *sc = q->q_sc;
1033 struct nvme_ccb *ccb;
1034 bus_addr_t off;
1035 uint64_t *prpl;
1036 u_int i;
1037
1038 mutex_init(&q->q_ccb_mtx, MUTEX_DEFAULT, IPL_BIO);
1039 SIMPLEQ_INIT(&q->q_ccb_list);
1040
1041 q->q_ccbs = kmem_alloc(sizeof(*ccb) * nccbs, KM_SLEEP);
1042 if (q->q_ccbs == NULL)
1043 return 1;
1044
1045 q->q_nccbs = nccbs;
1046 q->q_ccb_prpls = nvme_dmamem_alloc(sc,
1047 sizeof(*prpl) * sc->sc_max_sgl * nccbs);
1048
1049 prpl = NVME_DMA_KVA(q->q_ccb_prpls);
1050 off = 0;
1051
1052 for (i = 0; i < nccbs; i++) {
1053 ccb = &q->q_ccbs[i];
1054
1055 if (bus_dmamap_create(sc->sc_dmat, sc->sc_mdts,
1056 sc->sc_max_sgl + 1 /* we get a free prp in the sqe */,
1057 sc->sc_mps, sc->sc_mps, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
1058 &ccb->ccb_dmamap) != 0)
1059 goto free_maps;
1060
1061 ccb->ccb_id = i;
1062 ccb->ccb_prpl = prpl;
1063 ccb->ccb_prpl_off = off;
1064 ccb->ccb_prpl_dva = NVME_DMA_DVA(q->q_ccb_prpls) + off;
1065
1066 SIMPLEQ_INSERT_TAIL(&q->q_ccb_list, ccb, ccb_entry);
1067
1068 prpl += sc->sc_max_sgl;
1069 off += sizeof(*prpl) * sc->sc_max_sgl;
1070 }
1071
1072 return 0;
1073
1074 free_maps:
1075 nvme_ccbs_free(q);
1076 return 1;
1077 }
1078
1079 static struct nvme_ccb *
1080 nvme_ccb_get(struct nvme_queue *q)
1081 {
1082 struct nvme_ccb *ccb;
1083
1084 mutex_enter(&q->q_ccb_mtx);
1085 ccb = SIMPLEQ_FIRST(&q->q_ccb_list);
1086 if (ccb != NULL)
1087 SIMPLEQ_REMOVE_HEAD(&q->q_ccb_list, ccb_entry);
1088 mutex_exit(&q->q_ccb_mtx);
1089
1090 return ccb;
1091 }
1092
1093 static void
1094 nvme_ccb_put(struct nvme_queue *q, struct nvme_ccb *ccb)
1095 {
1096
1097 mutex_enter(&q->q_ccb_mtx);
1098 SIMPLEQ_INSERT_HEAD(&q->q_ccb_list, ccb, ccb_entry);
1099 mutex_exit(&q->q_ccb_mtx);
1100 }
1101
1102 static void
1103 nvme_ccbs_free(struct nvme_queue *q)
1104 {
1105 struct nvme_softc *sc = q->q_sc;
1106 struct nvme_ccb *ccb;
1107
1108 mutex_enter(&q->q_ccb_mtx);
1109 while ((ccb = SIMPLEQ_FIRST(&q->q_ccb_list)) != NULL) {
1110 SIMPLEQ_REMOVE_HEAD(&q->q_ccb_list, ccb_entry);
1111 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
1112 }
1113 mutex_exit(&q->q_ccb_mtx);
1114
1115 nvme_dmamem_free(sc, q->q_ccb_prpls);
1116 kmem_free(q->q_ccbs, sizeof(*ccb) * q->q_nccbs);
1117 q->q_ccbs = NULL;
1118 mutex_destroy(&q->q_ccb_mtx);
1119 }
1120
1121 static struct nvme_queue *
1122 nvme_q_alloc(struct nvme_softc *sc, uint16_t id, u_int entries, u_int dstrd)
1123 {
1124 struct nvme_queue *q;
1125
1126 q = kmem_alloc(sizeof(*q), KM_SLEEP);
1127 if (q == NULL)
1128 return NULL;
1129
1130 q->q_sc = sc;
1131 q->q_sq_dmamem = nvme_dmamem_alloc(sc,
1132 sizeof(struct nvme_sqe) * entries);
1133 if (q->q_sq_dmamem == NULL)
1134 goto free;
1135
1136 q->q_cq_dmamem = nvme_dmamem_alloc(sc,
1137 sizeof(struct nvme_cqe) * entries);
1138 if (q->q_cq_dmamem == NULL)
1139 goto free_sq;
1140
1141 memset(NVME_DMA_KVA(q->q_sq_dmamem), 0, NVME_DMA_LEN(q->q_sq_dmamem));
1142 memset(NVME_DMA_KVA(q->q_cq_dmamem), 0, NVME_DMA_LEN(q->q_cq_dmamem));
1143
1144 mutex_init(&q->q_sq_mtx, MUTEX_DEFAULT, IPL_BIO);
1145 mutex_init(&q->q_cq_mtx, MUTEX_DEFAULT, IPL_BIO);
1146 q->q_sqtdbl = NVME_SQTDBL(id, dstrd);
1147 q->q_cqhdbl = NVME_CQHDBL(id, dstrd);
1148 q->q_id = id;
1149 q->q_entries = entries;
1150 q->q_sq_tail = 0;
1151 q->q_cq_head = 0;
1152 q->q_cq_phase = NVME_CQE_PHASE;
1153
1154 nvme_dmamem_sync(sc, q->q_sq_dmamem, BUS_DMASYNC_PREWRITE);
1155 nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_PREREAD);
1156
1157 if (nvme_ccbs_alloc(q, entries) != 0) {
1158 aprint_error_dev(sc->sc_dev, "unable to allocate ccbs\n");
1159 goto free_cq;
1160 }
1161
1162 return q;
1163
1164 free_cq:
1165 nvme_dmamem_free(sc, q->q_cq_dmamem);
1166 free_sq:
1167 nvme_dmamem_free(sc, q->q_sq_dmamem);
1168 free:
1169 kmem_free(q, sizeof(*q));
1170
1171 return NULL;
1172 }
1173
1174 static void
1175 nvme_q_free(struct nvme_softc *sc, struct nvme_queue *q)
1176 {
1177 nvme_ccbs_free(q);
1178 nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_POSTREAD);
1179 nvme_dmamem_sync(sc, q->q_sq_dmamem, BUS_DMASYNC_POSTWRITE);
1180 nvme_dmamem_free(sc, q->q_cq_dmamem);
1181 nvme_dmamem_free(sc, q->q_sq_dmamem);
1182 kmem_free(q, sizeof(*q));
1183 }
1184
1185 int
1186 nvme_intr(void *xsc)
1187 {
1188 struct nvme_softc *sc = xsc;
1189 int rv = 0;
1190
1191 nvme_write4(sc, NVME_INTMS, 1);
1192
1193 if (nvme_q_complete(sc, sc->sc_admin_q))
1194 rv = 1;
1195 if (sc->sc_q != NULL)
1196 if (nvme_q_complete(sc, sc->sc_q[0]))
1197 rv = 1;
1198
1199 nvme_write4(sc, NVME_INTMC, 1);
1200
1201 return rv;
1202 }
1203
1204 int
1205 nvme_mq_msi_intr(void *xq)
1206 {
1207 struct nvme_queue *q = xq;
1208 struct nvme_softc *sc = q->q_sc;
1209 int rv = 0;
1210
1211 nvme_write4(sc, NVME_INTMS, 1U << q->q_id);
1212
1213 if (nvme_q_complete(sc, q))
1214 rv = 1;
1215
1216 nvme_write4(sc, NVME_INTMC, 1U << q->q_id);
1217
1218 return rv;
1219 }
1220
1221 int
1222 nvme_mq_msix_intr(void *xq)
1223 {
1224 struct nvme_queue *q = xq;
1225 int rv = 0;
1226
1227 if (nvme_q_complete(q->q_sc, q))
1228 rv = 1;
1229
1230 return rv;
1231 }
1232
1233 static struct nvme_dmamem *
1234 nvme_dmamem_alloc(struct nvme_softc *sc, size_t size)
1235 {
1236 struct nvme_dmamem *ndm;
1237 int nsegs;
1238
1239 ndm = kmem_zalloc(sizeof(*ndm), KM_SLEEP);
1240 if (ndm == NULL)
1241 return NULL;
1242
1243 ndm->ndm_size = size;
1244
1245 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1246 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &ndm->ndm_map) != 0)
1247 goto ndmfree;
1248
1249 if (bus_dmamem_alloc(sc->sc_dmat, size, sc->sc_mps, 0, &ndm->ndm_seg,
1250 1, &nsegs, BUS_DMA_WAITOK) != 0)
1251 goto destroy;
1252
1253 if (bus_dmamem_map(sc->sc_dmat, &ndm->ndm_seg, nsegs, size,
1254 &ndm->ndm_kva, BUS_DMA_WAITOK) != 0)
1255 goto free;
1256 memset(ndm->ndm_kva, 0, size);
1257
1258 if (bus_dmamap_load(sc->sc_dmat, ndm->ndm_map, ndm->ndm_kva, size,
1259 NULL, BUS_DMA_WAITOK) != 0)
1260 goto unmap;
1261
1262 return ndm;
1263
1264 unmap:
1265 bus_dmamem_unmap(sc->sc_dmat, ndm->ndm_kva, size);
1266 free:
1267 bus_dmamem_free(sc->sc_dmat, &ndm->ndm_seg, 1);
1268 destroy:
1269 bus_dmamap_destroy(sc->sc_dmat, ndm->ndm_map);
1270 ndmfree:
1271 kmem_free(ndm, sizeof(*ndm));
1272 return NULL;
1273 }
1274
1275 static void
1276 nvme_dmamem_sync(struct nvme_softc *sc, struct nvme_dmamem *mem, int ops)
1277 {
1278 bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(mem),
1279 0, NVME_DMA_LEN(mem), ops);
1280 }
1281
1282 void
1283 nvme_dmamem_free(struct nvme_softc *sc, struct nvme_dmamem *ndm)
1284 {
1285 bus_dmamap_unload(sc->sc_dmat, ndm->ndm_map);
1286 bus_dmamem_unmap(sc->sc_dmat, ndm->ndm_kva, ndm->ndm_size);
1287 bus_dmamem_free(sc->sc_dmat, &ndm->ndm_seg, 1);
1288 bus_dmamap_destroy(sc->sc_dmat, ndm->ndm_map);
1289 kmem_free(ndm, sizeof(*ndm));
1290 }
1291
1292 /*
1293 * Copy of sys/dev/scsipi/scsipiconf.c:scsipi_strvis()
1294 */
1295 static void
1296 nvme_strvis(u_char *dst, int dlen, const u_char *src, int slen)
1297 {
1298
1299 #define STRVIS_ISWHITE(x) ((x) == ' ' || (x) == '\0' || (x) == (u_char)'\377')
1300 /* Trim leading and trailing blanks and NULs. */
1301 while (slen > 0 && STRVIS_ISWHITE(src[0]))
1302 ++src, --slen;
1303 while (slen > 0 && STRVIS_ISWHITE(src[slen - 1]))
1304 --slen;
1305
1306 while (slen > 0) {
1307 if (*src < 0x20 || *src >= 0x80) {
1308 /* non-printable characters */
1309 dlen -= 4;
1310 if (dlen < 1)
1311 break;
1312 *dst++ = '\\';
1313 *dst++ = ((*src & 0300) >> 6) + '0';
1314 *dst++ = ((*src & 0070) >> 3) + '0';
1315 *dst++ = ((*src & 0007) >> 0) + '0';
1316 } else if (*src == '\\') {
1317 /* quote characters */
1318 dlen -= 2;
1319 if (dlen < 1)
1320 break;
1321 *dst++ = '\\';
1322 *dst++ = '\\';
1323 } else {
1324 /* normal characters */
1325 if (--dlen < 1)
1326 break;
1327 *dst++ = *src;
1328 }
1329 ++src, --slen;
1330 }
1331
1332 *dst++ = 0;
1333 }
1334