nvme.c revision 1.1 1 1.1 nonaka /* $NetBSD: nvme.c,v 1.1 2016/05/01 10:21:02 nonaka Exp $ */
2 1.1 nonaka /* $OpenBSD: nvme.c,v 1.49 2016/04/18 05:59:50 dlg Exp $ */
3 1.1 nonaka
4 1.1 nonaka /*
5 1.1 nonaka * Copyright (c) 2014 David Gwynne <dlg (at) openbsd.org>
6 1.1 nonaka *
7 1.1 nonaka * Permission to use, copy, modify, and distribute this software for any
8 1.1 nonaka * purpose with or without fee is hereby granted, provided that the above
9 1.1 nonaka * copyright notice and this permission notice appear in all copies.
10 1.1 nonaka *
11 1.1 nonaka * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 1.1 nonaka * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 1.1 nonaka * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 1.1 nonaka * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 1.1 nonaka * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 1.1 nonaka * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 1.1 nonaka * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 1.1 nonaka */
19 1.1 nonaka
20 1.1 nonaka #include <sys/cdefs.h>
21 1.1 nonaka __KERNEL_RCSID(0, "$NetBSD: nvme.c,v 1.1 2016/05/01 10:21:02 nonaka Exp $");
22 1.1 nonaka
23 1.1 nonaka #include <sys/param.h>
24 1.1 nonaka #include <sys/systm.h>
25 1.1 nonaka #include <sys/kernel.h>
26 1.1 nonaka #include <sys/atomic.h>
27 1.1 nonaka #include <sys/bus.h>
28 1.1 nonaka #include <sys/buf.h>
29 1.1 nonaka #include <sys/device.h>
30 1.1 nonaka #include <sys/kmem.h>
31 1.1 nonaka #include <sys/once.h>
32 1.1 nonaka #include <sys/queue.h>
33 1.1 nonaka #include <sys/mutex.h>
34 1.1 nonaka
35 1.1 nonaka #include <dev/ic/nvmereg.h>
36 1.1 nonaka #include <dev/ic/nvmevar.h>
37 1.1 nonaka
38 1.1 nonaka int nvme_adminq_size = 128;
39 1.1 nonaka int nvme_ioq_size = 128;
40 1.1 nonaka
41 1.1 nonaka static int nvme_print(void *, const char *);
42 1.1 nonaka
43 1.1 nonaka static int nvme_ready(struct nvme_softc *, uint32_t);
44 1.1 nonaka static int nvme_enable(struct nvme_softc *, u_int);
45 1.1 nonaka static int nvme_disable(struct nvme_softc *);
46 1.1 nonaka static int nvme_shutdown(struct nvme_softc *);
47 1.1 nonaka
48 1.1 nonaka static void nvme_version(struct nvme_softc *, uint32_t);
49 1.1 nonaka #ifdef NVME_DEBUG
50 1.1 nonaka static void nvme_dumpregs(struct nvme_softc *);
51 1.1 nonaka #endif
52 1.1 nonaka static int nvme_identify(struct nvme_softc *, u_int);
53 1.1 nonaka static void nvme_fill_identify(struct nvme_queue *, struct nvme_ccb *,
54 1.1 nonaka void *);
55 1.1 nonaka
56 1.1 nonaka static int nvme_ccbs_alloc(struct nvme_queue *, u_int);
57 1.1 nonaka static void nvme_ccbs_free(struct nvme_queue *);
58 1.1 nonaka
59 1.1 nonaka static struct nvme_ccb *
60 1.1 nonaka nvme_ccb_get(struct nvme_queue *);
61 1.1 nonaka static void nvme_ccb_put(struct nvme_queue *, struct nvme_ccb *);
62 1.1 nonaka
63 1.1 nonaka static int nvme_poll(struct nvme_softc *, struct nvme_queue *,
64 1.1 nonaka struct nvme_ccb *, void (*)(struct nvme_queue *,
65 1.1 nonaka struct nvme_ccb *, void *));
66 1.1 nonaka static void nvme_poll_fill(struct nvme_queue *, struct nvme_ccb *, void *);
67 1.1 nonaka static void nvme_poll_done(struct nvme_queue *, struct nvme_ccb *,
68 1.1 nonaka struct nvme_cqe *);
69 1.1 nonaka static void nvme_sqe_fill(struct nvme_queue *, struct nvme_ccb *, void *);
70 1.1 nonaka static void nvme_empty_done(struct nvme_queue *, struct nvme_ccb *,
71 1.1 nonaka struct nvme_cqe *);
72 1.1 nonaka
73 1.1 nonaka static struct nvme_queue *
74 1.1 nonaka nvme_q_alloc(struct nvme_softc *, uint16_t, u_int, u_int);
75 1.1 nonaka static int nvme_q_create(struct nvme_softc *, struct nvme_queue *);
76 1.1 nonaka static int nvme_q_delete(struct nvme_softc *, struct nvme_queue *);
77 1.1 nonaka static void nvme_q_submit(struct nvme_softc *, struct nvme_queue *,
78 1.1 nonaka struct nvme_ccb *, void (*)(struct nvme_queue *,
79 1.1 nonaka struct nvme_ccb *, void *));
80 1.1 nonaka static int nvme_q_complete(struct nvme_softc *, struct nvme_queue *q);
81 1.1 nonaka static void nvme_q_free(struct nvme_softc *, struct nvme_queue *);
82 1.1 nonaka
83 1.1 nonaka static struct nvme_dmamem *
84 1.1 nonaka nvme_dmamem_alloc(struct nvme_softc *, size_t);
85 1.1 nonaka static void nvme_dmamem_free(struct nvme_softc *, struct nvme_dmamem *);
86 1.1 nonaka static void nvme_dmamem_sync(struct nvme_softc *, struct nvme_dmamem *,
87 1.1 nonaka int);
88 1.1 nonaka
89 1.1 nonaka static void nvme_ns_io_fill(struct nvme_queue *, struct nvme_ccb *,
90 1.1 nonaka void *);
91 1.1 nonaka static void nvme_ns_io_done(struct nvme_queue *, struct nvme_ccb *,
92 1.1 nonaka struct nvme_cqe *);
93 1.1 nonaka static void nvme_ns_sync_fill(struct nvme_queue *, struct nvme_ccb *,
94 1.1 nonaka void *);
95 1.1 nonaka static void nvme_ns_sync_done(struct nvme_queue *, struct nvme_ccb *,
96 1.1 nonaka struct nvme_cqe *);
97 1.1 nonaka
98 1.1 nonaka static void nvme_strvis(u_char *, int, const u_char *, int);
99 1.1 nonaka
100 1.1 nonaka #define nvme_read4(_s, _r) \
101 1.1 nonaka bus_space_read_4((_s)->sc_iot, (_s)->sc_ioh, (_r))
102 1.1 nonaka #define nvme_write4(_s, _r, _v) \
103 1.1 nonaka bus_space_write_4((_s)->sc_iot, (_s)->sc_ioh, (_r), (_v))
104 1.1 nonaka #ifdef __LP64__
105 1.1 nonaka #define nvme_read8(_s, _r) \
106 1.1 nonaka bus_space_read_8((_s)->sc_iot, (_s)->sc_ioh, (_r))
107 1.1 nonaka #define nvme_write8(_s, _r, _v) \
108 1.1 nonaka bus_space_write_8((_s)->sc_iot, (_s)->sc_ioh, (_r), (_v))
109 1.1 nonaka #else /* __LP64__ */
110 1.1 nonaka static inline uint64_t
111 1.1 nonaka nvme_read8(struct nvme_softc *sc, bus_size_t r)
112 1.1 nonaka {
113 1.1 nonaka uint64_t v;
114 1.1 nonaka uint32_t *a = (uint32_t *)&v;
115 1.1 nonaka
116 1.1 nonaka #if _BYTE_ORDER == _LITTLE_ENDIAN
117 1.1 nonaka a[0] = nvme_read4(sc, r);
118 1.1 nonaka a[1] = nvme_read4(sc, r + 4);
119 1.1 nonaka #else /* _BYTE_ORDER == _LITTLE_ENDIAN */
120 1.1 nonaka a[1] = nvme_read4(sc, r);
121 1.1 nonaka a[0] = nvme_read4(sc, r + 4);
122 1.1 nonaka #endif
123 1.1 nonaka
124 1.1 nonaka return v;
125 1.1 nonaka }
126 1.1 nonaka
127 1.1 nonaka static inline void
128 1.1 nonaka nvme_write8(struct nvme_softc *sc, bus_size_t r, uint64_t v)
129 1.1 nonaka {
130 1.1 nonaka uint32_t *a = (uint32_t *)&v;
131 1.1 nonaka
132 1.1 nonaka #if _BYTE_ORDER == _LITTLE_ENDIAN
133 1.1 nonaka nvme_write4(sc, r, a[0]);
134 1.1 nonaka nvme_write4(sc, r + 4, a[1]);
135 1.1 nonaka #else /* _BYTE_ORDER == _LITTLE_ENDIAN */
136 1.1 nonaka nvme_write4(sc, r, a[1]);
137 1.1 nonaka nvme_write4(sc, r + 4, a[0]);
138 1.1 nonaka #endif
139 1.1 nonaka }
140 1.1 nonaka #endif /* __LP64__ */
141 1.1 nonaka #define nvme_barrier(_s, _r, _l, _f) \
142 1.1 nonaka bus_space_barrier((_s)->sc_iot, (_s)->sc_ioh, (_r), (_l), (_f))
143 1.1 nonaka
144 1.1 nonaka pool_cache_t nvme_ns_ctx_cache;
145 1.1 nonaka ONCE_DECL(nvme_init_once);
146 1.1 nonaka
147 1.1 nonaka static int
148 1.1 nonaka nvme_init(void)
149 1.1 nonaka {
150 1.1 nonaka nvme_ns_ctx_cache = pool_cache_init(sizeof(struct nvme_ns_context),
151 1.1 nonaka 0, 0, 0, "nvme_ns_ctx", NULL, IPL_BIO, NULL, NULL, NULL);
152 1.1 nonaka KASSERT(nvme_ns_ctx_cache != NULL);
153 1.1 nonaka return 0;
154 1.1 nonaka }
155 1.1 nonaka
156 1.1 nonaka static void
157 1.1 nonaka nvme_version(struct nvme_softc *sc, uint32_t ver)
158 1.1 nonaka {
159 1.1 nonaka const char *v = NULL;
160 1.1 nonaka
161 1.1 nonaka switch (ver) {
162 1.1 nonaka case NVME_VS_1_0:
163 1.1 nonaka v = "1.0";
164 1.1 nonaka break;
165 1.1 nonaka case NVME_VS_1_1:
166 1.1 nonaka v = "1.1";
167 1.1 nonaka break;
168 1.1 nonaka case NVME_VS_1_2:
169 1.1 nonaka v = "1.2";
170 1.1 nonaka break;
171 1.1 nonaka default:
172 1.1 nonaka aprint_error_dev(sc->sc_dev, "unknown version 0x%08x\n", ver);
173 1.1 nonaka return;
174 1.1 nonaka }
175 1.1 nonaka
176 1.1 nonaka aprint_normal_dev(sc->sc_dev, "NVMe %s\n", v);
177 1.1 nonaka }
178 1.1 nonaka
179 1.1 nonaka #ifdef NVME_DEBUG
180 1.1 nonaka static void
181 1.1 nonaka nvme_dumpregs(struct nvme_softc *sc)
182 1.1 nonaka {
183 1.1 nonaka uint64_t r8;
184 1.1 nonaka uint32_t r4;
185 1.1 nonaka
186 1.1 nonaka #define DEVNAME(_sc) device_xname((_sc)->sc_dev)
187 1.1 nonaka r8 = nvme_read8(sc, NVME_CAP);
188 1.1 nonaka printf("%s: cap 0x%016llx\n", DEVNAME(sc), nvme_read8(sc, NVME_CAP));
189 1.1 nonaka printf("%s: mpsmax %u (%u)\n", DEVNAME(sc),
190 1.1 nonaka (u_int)NVME_CAP_MPSMAX(r8), (1 << NVME_CAP_MPSMAX(r8)));
191 1.1 nonaka printf("%s: mpsmin %u (%u)\n", DEVNAME(sc),
192 1.1 nonaka (u_int)NVME_CAP_MPSMIN(r8), (1 << NVME_CAP_MPSMIN(r8)));
193 1.1 nonaka printf("%s: css %llu\n", DEVNAME(sc), NVME_CAP_CSS(r8));
194 1.1 nonaka printf("%s: nssrs %llu\n", DEVNAME(sc), NVME_CAP_NSSRS(r8));
195 1.1 nonaka printf("%s: dstrd %u\n", DEVNAME(sc), NVME_CAP_DSTRD(r8));
196 1.1 nonaka printf("%s: to %llu msec\n", DEVNAME(sc), NVME_CAP_TO(r8));
197 1.1 nonaka printf("%s: ams %llu\n", DEVNAME(sc), NVME_CAP_AMS(r8));
198 1.1 nonaka printf("%s: cqr %llu\n", DEVNAME(sc), NVME_CAP_CQR(r8));
199 1.1 nonaka printf("%s: mqes %llu\n", DEVNAME(sc), NVME_CAP_MQES(r8));
200 1.1 nonaka
201 1.1 nonaka printf("%s: vs 0x%04x\n", DEVNAME(sc), nvme_read4(sc, NVME_VS));
202 1.1 nonaka
203 1.1 nonaka r4 = nvme_read4(sc, NVME_CC);
204 1.1 nonaka printf("%s: cc 0x%04x\n", DEVNAME(sc), r4);
205 1.1 nonaka printf("%s: iocqes %u\n", DEVNAME(sc), NVME_CC_IOCQES_R(r4));
206 1.1 nonaka printf("%s: iosqes %u\n", DEVNAME(sc), NVME_CC_IOSQES_R(r4));
207 1.1 nonaka printf("%s: shn %u\n", DEVNAME(sc), NVME_CC_SHN_R(r4));
208 1.1 nonaka printf("%s: ams %u\n", DEVNAME(sc), NVME_CC_AMS_R(r4));
209 1.1 nonaka printf("%s: mps %u\n", DEVNAME(sc), NVME_CC_MPS_R(r4));
210 1.1 nonaka printf("%s: css %u\n", DEVNAME(sc), NVME_CC_CSS_R(r4));
211 1.1 nonaka printf("%s: en %u\n", DEVNAME(sc), ISSET(r4, NVME_CC_EN));
212 1.1 nonaka
213 1.1 nonaka printf("%s: csts 0x%08x\n", DEVNAME(sc), nvme_read4(sc, NVME_CSTS));
214 1.1 nonaka printf("%s: aqa 0x%08x\n", DEVNAME(sc), nvme_read4(sc, NVME_AQA));
215 1.1 nonaka printf("%s: asq 0x%016llx\n", DEVNAME(sc), nvme_read8(sc, NVME_ASQ));
216 1.1 nonaka printf("%s: acq 0x%016llx\n", DEVNAME(sc), nvme_read8(sc, NVME_ACQ));
217 1.1 nonaka #undef DEVNAME
218 1.1 nonaka }
219 1.1 nonaka #endif /* NVME_DEBUG */
220 1.1 nonaka
221 1.1 nonaka static int
222 1.1 nonaka nvme_ready(struct nvme_softc *sc, uint32_t rdy)
223 1.1 nonaka {
224 1.1 nonaka u_int i = 0;
225 1.1 nonaka
226 1.1 nonaka while ((nvme_read4(sc, NVME_CSTS) & NVME_CSTS_RDY) != rdy) {
227 1.1 nonaka if (i++ > sc->sc_rdy_to)
228 1.1 nonaka return 1;
229 1.1 nonaka
230 1.1 nonaka delay(1000);
231 1.1 nonaka nvme_barrier(sc, NVME_CSTS, 4, BUS_SPACE_BARRIER_READ);
232 1.1 nonaka }
233 1.1 nonaka
234 1.1 nonaka return 0;
235 1.1 nonaka }
236 1.1 nonaka
237 1.1 nonaka static int
238 1.1 nonaka nvme_enable(struct nvme_softc *sc, u_int mps)
239 1.1 nonaka {
240 1.1 nonaka uint32_t cc;
241 1.1 nonaka
242 1.1 nonaka cc = nvme_read4(sc, NVME_CC);
243 1.1 nonaka if (ISSET(cc, NVME_CC_EN))
244 1.1 nonaka return nvme_ready(sc, NVME_CSTS_RDY);
245 1.1 nonaka
246 1.1 nonaka nvme_write4(sc, NVME_AQA, NVME_AQA_ACQS(sc->sc_admin_q->q_entries) |
247 1.1 nonaka NVME_AQA_ASQS(sc->sc_admin_q->q_entries));
248 1.1 nonaka nvme_barrier(sc, 0, sc->sc_ios, BUS_SPACE_BARRIER_WRITE);
249 1.1 nonaka
250 1.1 nonaka nvme_write8(sc, NVME_ASQ, NVME_DMA_DVA(sc->sc_admin_q->q_sq_dmamem));
251 1.1 nonaka nvme_barrier(sc, 0, sc->sc_ios, BUS_SPACE_BARRIER_WRITE);
252 1.1 nonaka nvme_write8(sc, NVME_ACQ, NVME_DMA_DVA(sc->sc_admin_q->q_cq_dmamem));
253 1.1 nonaka nvme_barrier(sc, 0, sc->sc_ios, BUS_SPACE_BARRIER_WRITE);
254 1.1 nonaka
255 1.1 nonaka CLR(cc, NVME_CC_IOCQES_MASK | NVME_CC_IOSQES_MASK | NVME_CC_SHN_MASK |
256 1.1 nonaka NVME_CC_AMS_MASK | NVME_CC_MPS_MASK | NVME_CC_CSS_MASK);
257 1.1 nonaka SET(cc, NVME_CC_IOSQES(ffs(64) - 1) | NVME_CC_IOCQES(ffs(16) - 1));
258 1.1 nonaka SET(cc, NVME_CC_SHN(NVME_CC_SHN_NONE));
259 1.1 nonaka SET(cc, NVME_CC_CSS(NVME_CC_CSS_NVM));
260 1.1 nonaka SET(cc, NVME_CC_AMS(NVME_CC_AMS_RR));
261 1.1 nonaka SET(cc, NVME_CC_MPS(mps));
262 1.1 nonaka SET(cc, NVME_CC_EN);
263 1.1 nonaka
264 1.1 nonaka nvme_write4(sc, NVME_CC, cc);
265 1.1 nonaka nvme_barrier(sc, 0, sc->sc_ios,
266 1.1 nonaka BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
267 1.1 nonaka
268 1.1 nonaka return nvme_ready(sc, NVME_CSTS_RDY);
269 1.1 nonaka }
270 1.1 nonaka
271 1.1 nonaka static int
272 1.1 nonaka nvme_disable(struct nvme_softc *sc)
273 1.1 nonaka {
274 1.1 nonaka uint32_t cc, csts;
275 1.1 nonaka
276 1.1 nonaka cc = nvme_read4(sc, NVME_CC);
277 1.1 nonaka if (ISSET(cc, NVME_CC_EN)) {
278 1.1 nonaka csts = nvme_read4(sc, NVME_CSTS);
279 1.1 nonaka if (!ISSET(csts, NVME_CSTS_CFS) &&
280 1.1 nonaka nvme_ready(sc, NVME_CSTS_RDY) != 0)
281 1.1 nonaka return 1;
282 1.1 nonaka }
283 1.1 nonaka
284 1.1 nonaka CLR(cc, NVME_CC_EN);
285 1.1 nonaka
286 1.1 nonaka nvme_write4(sc, NVME_CC, cc);
287 1.1 nonaka nvme_barrier(sc, 0, sc->sc_ios,
288 1.1 nonaka BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
289 1.1 nonaka
290 1.1 nonaka return nvme_ready(sc, 0);
291 1.1 nonaka }
292 1.1 nonaka
293 1.1 nonaka int
294 1.1 nonaka nvme_attach(struct nvme_softc *sc)
295 1.1 nonaka {
296 1.1 nonaka struct nvme_attach_args naa;
297 1.1 nonaka uint64_t cap;
298 1.1 nonaka uint32_t reg;
299 1.1 nonaka u_int dstrd;
300 1.1 nonaka u_int mps = PAGE_SHIFT;
301 1.1 nonaka int adminq_entries = nvme_adminq_size;
302 1.1 nonaka int ioq_entries = nvme_ioq_size;
303 1.1 nonaka int i;
304 1.1 nonaka
305 1.1 nonaka RUN_ONCE(&nvme_init_once, nvme_init);
306 1.1 nonaka
307 1.1 nonaka reg = nvme_read4(sc, NVME_VS);
308 1.1 nonaka if (reg == 0xffffffff) {
309 1.1 nonaka aprint_error_dev(sc->sc_dev, "invalid mapping\n");
310 1.1 nonaka return 1;
311 1.1 nonaka }
312 1.1 nonaka
313 1.1 nonaka nvme_version(sc, reg);
314 1.1 nonaka
315 1.1 nonaka cap = nvme_read8(sc, NVME_CAP);
316 1.1 nonaka dstrd = NVME_CAP_DSTRD(cap);
317 1.1 nonaka if (NVME_CAP_MPSMIN(cap) > PAGE_SHIFT) {
318 1.1 nonaka aprint_error_dev(sc->sc_dev, "NVMe minimum page size %u "
319 1.1 nonaka "is greater than CPU page size %u\n",
320 1.1 nonaka 1 << NVME_CAP_MPSMIN(cap), 1 << PAGE_SHIFT);
321 1.1 nonaka return 1;
322 1.1 nonaka }
323 1.1 nonaka if (NVME_CAP_MPSMAX(cap) < mps)
324 1.1 nonaka mps = NVME_CAP_MPSMAX(cap);
325 1.1 nonaka
326 1.1 nonaka sc->sc_rdy_to = NVME_CAP_TO(cap);
327 1.1 nonaka sc->sc_mps = 1 << mps;
328 1.1 nonaka sc->sc_mdts = MAXPHYS;
329 1.1 nonaka sc->sc_max_sgl = 2;
330 1.1 nonaka
331 1.1 nonaka if (nvme_disable(sc) != 0) {
332 1.1 nonaka aprint_error_dev(sc->sc_dev, "unable to disable controller\n");
333 1.1 nonaka return 1;
334 1.1 nonaka }
335 1.1 nonaka
336 1.1 nonaka sc->sc_admin_q = nvme_q_alloc(sc, NVME_ADMIN_Q, adminq_entries, dstrd);
337 1.1 nonaka if (sc->sc_admin_q == NULL) {
338 1.1 nonaka aprint_error_dev(sc->sc_dev,
339 1.1 nonaka "unable to allocate admin queue\n");
340 1.1 nonaka return 1;
341 1.1 nonaka }
342 1.1 nonaka if (sc->sc_intr_establish(sc, NVME_ADMIN_Q, sc->sc_admin_q))
343 1.1 nonaka goto free_admin_q;
344 1.1 nonaka
345 1.1 nonaka if (nvme_enable(sc, mps) != 0) {
346 1.1 nonaka aprint_error_dev(sc->sc_dev, "unable to enable controller\n");
347 1.1 nonaka goto disestablish_admin_q;
348 1.1 nonaka }
349 1.1 nonaka
350 1.1 nonaka if (nvme_identify(sc, NVME_CAP_MPSMIN(cap)) != 0) {
351 1.1 nonaka aprint_error_dev(sc->sc_dev, "unable to identify controller\n");
352 1.1 nonaka goto disable;
353 1.1 nonaka }
354 1.1 nonaka
355 1.1 nonaka /* we know how big things are now */
356 1.1 nonaka sc->sc_max_sgl = sc->sc_mdts / sc->sc_mps;
357 1.1 nonaka
358 1.1 nonaka /* reallocate ccbs of admin queue with new max sgl. */
359 1.1 nonaka nvme_ccbs_free(sc->sc_admin_q);
360 1.1 nonaka nvme_ccbs_alloc(sc->sc_admin_q, sc->sc_admin_q->q_entries);
361 1.1 nonaka
362 1.1 nonaka sc->sc_q = kmem_zalloc(sizeof(*sc->sc_q) * sc->sc_nq, KM_SLEEP);
363 1.1 nonaka if (sc->sc_q == NULL) {
364 1.1 nonaka aprint_error_dev(sc->sc_dev, "unable to allocate io queue\n");
365 1.1 nonaka goto disable;
366 1.1 nonaka }
367 1.1 nonaka for (i = 0; i < sc->sc_nq; i++) {
368 1.1 nonaka sc->sc_q[i] = nvme_q_alloc(sc, i + 1, ioq_entries, dstrd);
369 1.1 nonaka if (sc->sc_q[i] == NULL) {
370 1.1 nonaka aprint_error_dev(sc->sc_dev,
371 1.1 nonaka "unable to allocate io queue\n");
372 1.1 nonaka goto free_q;
373 1.1 nonaka }
374 1.1 nonaka if (nvme_q_create(sc, sc->sc_q[i]) != 0) {
375 1.1 nonaka aprint_error_dev(sc->sc_dev,
376 1.1 nonaka "unable to create io queue\n");
377 1.1 nonaka nvme_q_free(sc, sc->sc_q[i]);
378 1.1 nonaka goto free_q;
379 1.1 nonaka }
380 1.1 nonaka }
381 1.1 nonaka
382 1.1 nonaka if (!sc->sc_use_mq)
383 1.1 nonaka nvme_write4(sc, NVME_INTMC, 1);
384 1.1 nonaka
385 1.1 nonaka sc->sc_namespaces = kmem_zalloc(sizeof(*sc->sc_namespaces) * sc->sc_nn,
386 1.1 nonaka KM_SLEEP);
387 1.1 nonaka for (i = 0; i < sc->sc_nn; i++) {
388 1.1 nonaka memset(&naa, 0, sizeof(naa));
389 1.1 nonaka naa.naa_nsid = i + 1;
390 1.1 nonaka naa.naa_qentries = ioq_entries;
391 1.1 nonaka sc->sc_namespaces[i].dev = config_found(sc->sc_dev, &naa,
392 1.1 nonaka nvme_print);
393 1.1 nonaka }
394 1.1 nonaka
395 1.1 nonaka return 0;
396 1.1 nonaka
397 1.1 nonaka free_q:
398 1.1 nonaka while (--i >= 0) {
399 1.1 nonaka nvme_q_delete(sc, sc->sc_q[i]);
400 1.1 nonaka nvme_q_free(sc, sc->sc_q[i]);
401 1.1 nonaka }
402 1.1 nonaka disable:
403 1.1 nonaka nvme_disable(sc);
404 1.1 nonaka disestablish_admin_q:
405 1.1 nonaka sc->sc_intr_disestablish(sc, NVME_ADMIN_Q);
406 1.1 nonaka free_admin_q:
407 1.1 nonaka nvme_q_free(sc, sc->sc_admin_q);
408 1.1 nonaka
409 1.1 nonaka return 1;
410 1.1 nonaka }
411 1.1 nonaka
412 1.1 nonaka static int
413 1.1 nonaka nvme_print(void *aux, const char *pnp)
414 1.1 nonaka {
415 1.1 nonaka struct nvme_attach_args *naa = aux;
416 1.1 nonaka
417 1.1 nonaka if (pnp)
418 1.1 nonaka aprint_normal("at %s", pnp);
419 1.1 nonaka
420 1.1 nonaka if (naa->naa_nsid > 0)
421 1.1 nonaka aprint_normal(" nsid %d", naa->naa_nsid);
422 1.1 nonaka
423 1.1 nonaka return UNCONF;
424 1.1 nonaka }
425 1.1 nonaka
426 1.1 nonaka int
427 1.1 nonaka nvme_detach(struct nvme_softc *sc, int flags)
428 1.1 nonaka {
429 1.1 nonaka int i, error;
430 1.1 nonaka
431 1.1 nonaka error = config_detach_children(sc->sc_dev, flags);
432 1.1 nonaka if (error)
433 1.1 nonaka return error;
434 1.1 nonaka
435 1.1 nonaka error = nvme_shutdown(sc);
436 1.1 nonaka if (error)
437 1.1 nonaka return error;
438 1.1 nonaka
439 1.1 nonaka for (i = 0; i < sc->sc_nq; i++)
440 1.1 nonaka nvme_q_free(sc, sc->sc_q[i]);
441 1.1 nonaka kmem_free(sc->sc_q, sizeof(*sc->sc_q) * sc->sc_nq);
442 1.1 nonaka nvme_q_free(sc, sc->sc_admin_q);
443 1.1 nonaka
444 1.1 nonaka return 0;
445 1.1 nonaka }
446 1.1 nonaka
447 1.1 nonaka static int
448 1.1 nonaka nvme_shutdown(struct nvme_softc *sc)
449 1.1 nonaka {
450 1.1 nonaka uint32_t cc, csts;
451 1.1 nonaka bool disabled = false;
452 1.1 nonaka int i;
453 1.1 nonaka
454 1.1 nonaka if (!sc->sc_use_mq)
455 1.1 nonaka nvme_write4(sc, NVME_INTMS, 1);
456 1.1 nonaka
457 1.1 nonaka for (i = 0; i < sc->sc_nq; i++) {
458 1.1 nonaka if (nvme_q_delete(sc, sc->sc_q[i]) != 0) {
459 1.1 nonaka aprint_error_dev(sc->sc_dev,
460 1.1 nonaka "unable to delete io queue %d, disabling\n", i + 1);
461 1.1 nonaka disabled = true;
462 1.1 nonaka }
463 1.1 nonaka }
464 1.1 nonaka sc->sc_intr_disestablish(sc, NVME_ADMIN_Q);
465 1.1 nonaka if (disabled)
466 1.1 nonaka goto disable;
467 1.1 nonaka
468 1.1 nonaka cc = nvme_read4(sc, NVME_CC);
469 1.1 nonaka CLR(cc, NVME_CC_SHN_MASK);
470 1.1 nonaka SET(cc, NVME_CC_SHN(NVME_CC_SHN_NORMAL));
471 1.1 nonaka nvme_write4(sc, NVME_CC, cc);
472 1.1 nonaka
473 1.1 nonaka for (i = 0; i < 4000; i++) {
474 1.1 nonaka nvme_barrier(sc, 0, sc->sc_ios,
475 1.1 nonaka BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
476 1.1 nonaka csts = nvme_read4(sc, NVME_CSTS);
477 1.1 nonaka if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_DONE)
478 1.1 nonaka return 0;
479 1.1 nonaka
480 1.1 nonaka delay(1000);
481 1.1 nonaka }
482 1.1 nonaka
483 1.1 nonaka aprint_error_dev(sc->sc_dev, "unable to shudown, disabling\n");
484 1.1 nonaka
485 1.1 nonaka disable:
486 1.1 nonaka nvme_disable(sc);
487 1.1 nonaka return 0;
488 1.1 nonaka }
489 1.1 nonaka
490 1.1 nonaka void
491 1.1 nonaka nvme_childdet(device_t self, device_t child)
492 1.1 nonaka {
493 1.1 nonaka struct nvme_softc *sc = device_private(self);
494 1.1 nonaka int i;
495 1.1 nonaka
496 1.1 nonaka for (i = 0; i < sc->sc_nn; i++) {
497 1.1 nonaka if (sc->sc_namespaces[i].dev == child) {
498 1.1 nonaka /* Already freed ns->ident. */
499 1.1 nonaka sc->sc_namespaces[i].dev = NULL;
500 1.1 nonaka break;
501 1.1 nonaka }
502 1.1 nonaka }
503 1.1 nonaka }
504 1.1 nonaka
505 1.1 nonaka int
506 1.1 nonaka nvme_ns_identify(struct nvme_softc *sc, uint16_t nsid)
507 1.1 nonaka {
508 1.1 nonaka struct nvme_sqe sqe;
509 1.1 nonaka struct nvm_identify_namespace *identify;
510 1.1 nonaka struct nvme_dmamem *mem;
511 1.1 nonaka struct nvme_ccb *ccb;
512 1.1 nonaka struct nvme_namespace *ns;
513 1.1 nonaka int rv;
514 1.1 nonaka
515 1.1 nonaka KASSERT(nsid > 0);
516 1.1 nonaka
517 1.1 nonaka ccb = nvme_ccb_get(sc->sc_admin_q);
518 1.1 nonaka KASSERT(ccb != NULL);
519 1.1 nonaka
520 1.1 nonaka mem = nvme_dmamem_alloc(sc, sizeof(*identify));
521 1.1 nonaka if (mem == NULL)
522 1.1 nonaka return ENOMEM;
523 1.1 nonaka
524 1.1 nonaka memset(&sqe, 0, sizeof(sqe));
525 1.1 nonaka sqe.opcode = NVM_ADMIN_IDENTIFY;
526 1.1 nonaka htolem32(&sqe.nsid, nsid);
527 1.1 nonaka htolem64(&sqe.entry.prp[0], NVME_DMA_DVA(mem));
528 1.1 nonaka htolem32(&sqe.cdw10, 0);
529 1.1 nonaka
530 1.1 nonaka ccb->ccb_done = nvme_empty_done;
531 1.1 nonaka ccb->ccb_cookie = &sqe;
532 1.1 nonaka
533 1.1 nonaka nvme_dmamem_sync(sc, mem, BUS_DMASYNC_PREREAD);
534 1.1 nonaka rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill);
535 1.1 nonaka nvme_dmamem_sync(sc, mem, BUS_DMASYNC_POSTREAD);
536 1.1 nonaka
537 1.1 nonaka nvme_ccb_put(sc->sc_admin_q, ccb);
538 1.1 nonaka
539 1.1 nonaka if (rv != 0) {
540 1.1 nonaka rv = EIO;
541 1.1 nonaka goto done;
542 1.1 nonaka }
543 1.1 nonaka
544 1.1 nonaka /* commit */
545 1.1 nonaka
546 1.1 nonaka identify = kmem_zalloc(sizeof(*identify), KM_SLEEP);
547 1.1 nonaka memcpy(identify, NVME_DMA_KVA(mem), sizeof(*identify));
548 1.1 nonaka
549 1.1 nonaka ns = nvme_ns_get(sc, nsid);
550 1.1 nonaka KASSERT(ns);
551 1.1 nonaka ns->ident = identify;
552 1.1 nonaka
553 1.1 nonaka done:
554 1.1 nonaka nvme_dmamem_free(sc, mem);
555 1.1 nonaka
556 1.1 nonaka return rv;
557 1.1 nonaka }
558 1.1 nonaka
559 1.1 nonaka int
560 1.1 nonaka nvme_ns_dobio(struct nvme_softc *sc, struct nvme_ns_context *ctx)
561 1.1 nonaka {
562 1.1 nonaka struct nvme_queue *q = nvme_get_q(sc);
563 1.1 nonaka struct nvme_ccb *ccb;
564 1.1 nonaka bus_dmamap_t dmap;
565 1.1 nonaka int i, error;
566 1.1 nonaka
567 1.1 nonaka ccb = nvme_ccb_get(q);
568 1.1 nonaka if (ccb == NULL)
569 1.1 nonaka return EAGAIN;
570 1.1 nonaka
571 1.1 nonaka ccb->ccb_done = nvme_ns_io_done;
572 1.1 nonaka ccb->ccb_cookie = ctx;
573 1.1 nonaka
574 1.1 nonaka dmap = ccb->ccb_dmamap;
575 1.1 nonaka error = bus_dmamap_load(sc->sc_dmat, dmap, ctx->nnc_data,
576 1.1 nonaka ctx->nnc_datasize, NULL,
577 1.1 nonaka (ISSET(ctx->nnc_flags, NVME_NS_CTX_F_POLL) ?
578 1.1 nonaka BUS_DMA_NOWAIT : BUS_DMA_WAITOK) |
579 1.1 nonaka (ISSET(ctx->nnc_flags, NVME_NS_CTX_F_READ) ?
580 1.1 nonaka BUS_DMA_READ : BUS_DMA_WRITE));
581 1.1 nonaka if (error) {
582 1.1 nonaka nvme_ccb_put(q, ccb);
583 1.1 nonaka return error;
584 1.1 nonaka }
585 1.1 nonaka
586 1.1 nonaka bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
587 1.1 nonaka ISSET(ctx->nnc_flags, NVME_NS_CTX_F_READ) ?
588 1.1 nonaka BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
589 1.1 nonaka
590 1.1 nonaka if (dmap->dm_nsegs > 2) {
591 1.1 nonaka for (i = 1; i < dmap->dm_nsegs; i++) {
592 1.1 nonaka htolem64(&ccb->ccb_prpl[i - 1],
593 1.1 nonaka dmap->dm_segs[i].ds_addr);
594 1.1 nonaka }
595 1.1 nonaka bus_dmamap_sync(sc->sc_dmat,
596 1.1 nonaka NVME_DMA_MAP(q->q_ccb_prpls),
597 1.1 nonaka ccb->ccb_prpl_off,
598 1.1 nonaka sizeof(*ccb->ccb_prpl) * dmap->dm_nsegs - 1,
599 1.1 nonaka BUS_DMASYNC_PREWRITE);
600 1.1 nonaka }
601 1.1 nonaka
602 1.1 nonaka if (ISSET(ctx->nnc_flags, NVME_NS_CTX_F_POLL)) {
603 1.1 nonaka if (nvme_poll(sc, q, ccb, nvme_ns_io_fill) != 0)
604 1.1 nonaka return EIO;
605 1.1 nonaka return 0;
606 1.1 nonaka }
607 1.1 nonaka
608 1.1 nonaka nvme_q_submit(sc, q, ccb, nvme_ns_io_fill);
609 1.1 nonaka return 0;
610 1.1 nonaka }
611 1.1 nonaka
612 1.1 nonaka static void
613 1.1 nonaka nvme_ns_io_fill(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot)
614 1.1 nonaka {
615 1.1 nonaka struct nvme_sqe_io *sqe = slot;
616 1.1 nonaka struct nvme_ns_context *ctx = ccb->ccb_cookie;
617 1.1 nonaka bus_dmamap_t dmap = ccb->ccb_dmamap;
618 1.1 nonaka
619 1.1 nonaka sqe->opcode = ISSET(ctx->nnc_flags, NVME_NS_CTX_F_READ) ?
620 1.1 nonaka NVM_CMD_READ : NVM_CMD_WRITE;
621 1.1 nonaka htolem32(&sqe->nsid, ctx->nnc_nsid);
622 1.1 nonaka
623 1.1 nonaka htolem64(&sqe->entry.prp[0], dmap->dm_segs[0].ds_addr);
624 1.1 nonaka switch (dmap->dm_nsegs) {
625 1.1 nonaka case 1:
626 1.1 nonaka break;
627 1.1 nonaka case 2:
628 1.1 nonaka htolem64(&sqe->entry.prp[1], dmap->dm_segs[1].ds_addr);
629 1.1 nonaka break;
630 1.1 nonaka default:
631 1.1 nonaka /* the prp list is already set up and synced */
632 1.1 nonaka htolem64(&sqe->entry.prp[1], ccb->ccb_prpl_dva);
633 1.1 nonaka break;
634 1.1 nonaka }
635 1.1 nonaka
636 1.1 nonaka htolem64(&sqe->slba, ctx->nnc_blkno);
637 1.1 nonaka htolem16(&sqe->nlb, (ctx->nnc_datasize / ctx->nnc_secsize) - 1);
638 1.1 nonaka }
639 1.1 nonaka
640 1.1 nonaka static void
641 1.1 nonaka nvme_ns_io_done(struct nvme_queue *q, struct nvme_ccb *ccb,
642 1.1 nonaka struct nvme_cqe *cqe)
643 1.1 nonaka {
644 1.1 nonaka struct nvme_softc *sc = q->q_sc;
645 1.1 nonaka struct nvme_ns_context *ctx = ccb->ccb_cookie;
646 1.1 nonaka bus_dmamap_t dmap = ccb->ccb_dmamap;
647 1.1 nonaka uint16_t flags;
648 1.1 nonaka
649 1.1 nonaka if (dmap->dm_nsegs > 2) {
650 1.1 nonaka bus_dmamap_sync(sc->sc_dmat,
651 1.1 nonaka NVME_DMA_MAP(q->q_ccb_prpls),
652 1.1 nonaka ccb->ccb_prpl_off,
653 1.1 nonaka sizeof(*ccb->ccb_prpl) * dmap->dm_nsegs - 1,
654 1.1 nonaka BUS_DMASYNC_POSTWRITE);
655 1.1 nonaka }
656 1.1 nonaka
657 1.1 nonaka bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
658 1.1 nonaka ISSET(ctx->nnc_flags, NVME_NS_CTX_F_READ) ?
659 1.1 nonaka BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
660 1.1 nonaka
661 1.1 nonaka bus_dmamap_unload(sc->sc_dmat, dmap);
662 1.1 nonaka nvme_ccb_put(q, ccb);
663 1.1 nonaka
664 1.1 nonaka flags = lemtoh16(&cqe->flags);
665 1.1 nonaka
666 1.1 nonaka ctx->nnc_status = flags;
667 1.1 nonaka (*ctx->nnc_done)(ctx);
668 1.1 nonaka }
669 1.1 nonaka
670 1.1 nonaka int
671 1.1 nonaka nvme_ns_sync(struct nvme_softc *sc, struct nvme_ns_context *ctx)
672 1.1 nonaka {
673 1.1 nonaka struct nvme_queue *q = nvme_get_q(sc);
674 1.1 nonaka struct nvme_ccb *ccb;
675 1.1 nonaka
676 1.1 nonaka ccb = nvme_ccb_get(q);
677 1.1 nonaka if (ccb == NULL)
678 1.1 nonaka return EAGAIN;
679 1.1 nonaka
680 1.1 nonaka ccb->ccb_done = nvme_ns_sync_done;
681 1.1 nonaka ccb->ccb_cookie = ctx;
682 1.1 nonaka
683 1.1 nonaka if (ISSET(ctx->nnc_flags, NVME_NS_CTX_F_POLL)) {
684 1.1 nonaka if (nvme_poll(sc, q, ccb, nvme_ns_sync_fill) != 0)
685 1.1 nonaka return EIO;
686 1.1 nonaka return 0;
687 1.1 nonaka }
688 1.1 nonaka
689 1.1 nonaka nvme_q_submit(sc, q, ccb, nvme_ns_sync_fill);
690 1.1 nonaka return 0;
691 1.1 nonaka }
692 1.1 nonaka
693 1.1 nonaka static void
694 1.1 nonaka nvme_ns_sync_fill(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot)
695 1.1 nonaka {
696 1.1 nonaka struct nvme_sqe *sqe = slot;
697 1.1 nonaka struct nvme_ns_context *ctx = ccb->ccb_cookie;
698 1.1 nonaka
699 1.1 nonaka sqe->opcode = NVM_CMD_FLUSH;
700 1.1 nonaka htolem32(&sqe->nsid, ctx->nnc_nsid);
701 1.1 nonaka }
702 1.1 nonaka
703 1.1 nonaka static void
704 1.1 nonaka nvme_ns_sync_done(struct nvme_queue *q, struct nvme_ccb *ccb,
705 1.1 nonaka struct nvme_cqe *cqe)
706 1.1 nonaka {
707 1.1 nonaka struct nvme_ns_context *ctx = ccb->ccb_cookie;
708 1.1 nonaka uint16_t flags;
709 1.1 nonaka
710 1.1 nonaka nvme_ccb_put(q, ccb);
711 1.1 nonaka
712 1.1 nonaka flags = lemtoh16(&cqe->flags);
713 1.1 nonaka
714 1.1 nonaka ctx->nnc_status = flags;
715 1.1 nonaka (*ctx->nnc_done)(ctx);
716 1.1 nonaka }
717 1.1 nonaka
718 1.1 nonaka void
719 1.1 nonaka nvme_ns_free(struct nvme_softc *sc, uint16_t nsid)
720 1.1 nonaka {
721 1.1 nonaka struct nvme_namespace *ns;
722 1.1 nonaka struct nvm_identify_namespace *identify;
723 1.1 nonaka
724 1.1 nonaka ns = nvme_ns_get(sc, nsid);
725 1.1 nonaka KASSERT(ns);
726 1.1 nonaka
727 1.1 nonaka identify = ns->ident;
728 1.1 nonaka ns->ident = NULL;
729 1.1 nonaka if (identify != NULL)
730 1.1 nonaka kmem_free(identify, sizeof(*identify));
731 1.1 nonaka }
732 1.1 nonaka
733 1.1 nonaka static void
734 1.1 nonaka nvme_q_submit(struct nvme_softc *sc, struct nvme_queue *q, struct nvme_ccb *ccb,
735 1.1 nonaka void (*fill)(struct nvme_queue *, struct nvme_ccb *, void *))
736 1.1 nonaka {
737 1.1 nonaka struct nvme_sqe *sqe = NVME_DMA_KVA(q->q_sq_dmamem);
738 1.1 nonaka uint32_t tail;
739 1.1 nonaka
740 1.1 nonaka mutex_enter(&q->q_sq_mtx);
741 1.1 nonaka tail = q->q_sq_tail;
742 1.1 nonaka if (++q->q_sq_tail >= q->q_entries)
743 1.1 nonaka q->q_sq_tail = 0;
744 1.1 nonaka
745 1.1 nonaka sqe += tail;
746 1.1 nonaka
747 1.1 nonaka bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_sq_dmamem),
748 1.1 nonaka sizeof(*sqe) * tail, sizeof(*sqe), BUS_DMASYNC_POSTWRITE);
749 1.1 nonaka memset(sqe, 0, sizeof(*sqe));
750 1.1 nonaka (*fill)(q, ccb, sqe);
751 1.1 nonaka sqe->cid = ccb->ccb_id;
752 1.1 nonaka bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_sq_dmamem),
753 1.1 nonaka sizeof(*sqe) * tail, sizeof(*sqe), BUS_DMASYNC_PREWRITE);
754 1.1 nonaka
755 1.1 nonaka nvme_write4(sc, q->q_sqtdbl, q->q_sq_tail);
756 1.1 nonaka mutex_exit(&q->q_sq_mtx);
757 1.1 nonaka }
758 1.1 nonaka
759 1.1 nonaka struct nvme_poll_state {
760 1.1 nonaka struct nvme_sqe s;
761 1.1 nonaka struct nvme_cqe c;
762 1.1 nonaka };
763 1.1 nonaka
764 1.1 nonaka static int
765 1.1 nonaka nvme_poll(struct nvme_softc *sc, struct nvme_queue *q, struct nvme_ccb *ccb,
766 1.1 nonaka void (*fill)(struct nvme_queue *, struct nvme_ccb *, void *))
767 1.1 nonaka {
768 1.1 nonaka struct nvme_poll_state state;
769 1.1 nonaka void (*done)(struct nvme_queue *, struct nvme_ccb *, struct nvme_cqe *);
770 1.1 nonaka void *cookie;
771 1.1 nonaka uint16_t flags;
772 1.1 nonaka
773 1.1 nonaka memset(&state, 0, sizeof(state));
774 1.1 nonaka (*fill)(q, ccb, &state.s);
775 1.1 nonaka
776 1.1 nonaka done = ccb->ccb_done;
777 1.1 nonaka cookie = ccb->ccb_cookie;
778 1.1 nonaka
779 1.1 nonaka ccb->ccb_done = nvme_poll_done;
780 1.1 nonaka ccb->ccb_cookie = &state;
781 1.1 nonaka
782 1.1 nonaka nvme_q_submit(sc, q, ccb, nvme_poll_fill);
783 1.1 nonaka while (!ISSET(state.c.flags, htole16(NVME_CQE_PHASE))) {
784 1.1 nonaka if (nvme_q_complete(sc, q) == 0)
785 1.1 nonaka delay(10);
786 1.1 nonaka
787 1.1 nonaka /* XXX no timeout? */
788 1.1 nonaka }
789 1.1 nonaka
790 1.1 nonaka ccb->ccb_cookie = cookie;
791 1.1 nonaka done(q, ccb, &state.c);
792 1.1 nonaka
793 1.1 nonaka flags = lemtoh16(&state.c.flags);
794 1.1 nonaka
795 1.1 nonaka return flags & ~NVME_CQE_PHASE;
796 1.1 nonaka }
797 1.1 nonaka
798 1.1 nonaka static void
799 1.1 nonaka nvme_poll_fill(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot)
800 1.1 nonaka {
801 1.1 nonaka struct nvme_sqe *sqe = slot;
802 1.1 nonaka struct nvme_poll_state *state = ccb->ccb_cookie;
803 1.1 nonaka
804 1.1 nonaka *sqe = state->s;
805 1.1 nonaka }
806 1.1 nonaka
807 1.1 nonaka static void
808 1.1 nonaka nvme_poll_done(struct nvme_queue *q, struct nvme_ccb *ccb,
809 1.1 nonaka struct nvme_cqe *cqe)
810 1.1 nonaka {
811 1.1 nonaka struct nvme_poll_state *state = ccb->ccb_cookie;
812 1.1 nonaka
813 1.1 nonaka SET(cqe->flags, htole16(NVME_CQE_PHASE));
814 1.1 nonaka state->c = *cqe;
815 1.1 nonaka }
816 1.1 nonaka
817 1.1 nonaka static void
818 1.1 nonaka nvme_sqe_fill(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot)
819 1.1 nonaka {
820 1.1 nonaka struct nvme_sqe *src = ccb->ccb_cookie;
821 1.1 nonaka struct nvme_sqe *dst = slot;
822 1.1 nonaka
823 1.1 nonaka *dst = *src;
824 1.1 nonaka }
825 1.1 nonaka
826 1.1 nonaka static void
827 1.1 nonaka nvme_empty_done(struct nvme_queue *q, struct nvme_ccb *ccb,
828 1.1 nonaka struct nvme_cqe *cqe)
829 1.1 nonaka {
830 1.1 nonaka }
831 1.1 nonaka
832 1.1 nonaka static int
833 1.1 nonaka nvme_q_complete(struct nvme_softc *sc, struct nvme_queue *q)
834 1.1 nonaka {
835 1.1 nonaka struct nvme_ccb *ccb;
836 1.1 nonaka struct nvme_cqe *ring = NVME_DMA_KVA(q->q_cq_dmamem), *cqe;
837 1.1 nonaka uint32_t head;
838 1.1 nonaka uint16_t flags;
839 1.1 nonaka int rv = 0;
840 1.1 nonaka
841 1.1 nonaka if (!mutex_tryenter(&q->q_cq_mtx))
842 1.1 nonaka return -1;
843 1.1 nonaka
844 1.1 nonaka nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_POSTREAD);
845 1.1 nonaka head = q->q_cq_head;
846 1.1 nonaka for (;;) {
847 1.1 nonaka cqe = &ring[head];
848 1.1 nonaka flags = lemtoh16(&cqe->flags);
849 1.1 nonaka if ((flags & NVME_CQE_PHASE) != q->q_cq_phase)
850 1.1 nonaka break;
851 1.1 nonaka
852 1.1 nonaka ccb = &q->q_ccbs[cqe->cid];
853 1.1 nonaka ccb->ccb_done(q, ccb, cqe);
854 1.1 nonaka
855 1.1 nonaka if (++head >= q->q_entries) {
856 1.1 nonaka head = 0;
857 1.1 nonaka q->q_cq_phase ^= NVME_CQE_PHASE;
858 1.1 nonaka }
859 1.1 nonaka
860 1.1 nonaka rv = 1;
861 1.1 nonaka }
862 1.1 nonaka nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_PREREAD);
863 1.1 nonaka
864 1.1 nonaka if (rv)
865 1.1 nonaka nvme_write4(sc, q->q_cqhdbl, q->q_cq_head = head);
866 1.1 nonaka mutex_exit(&q->q_cq_mtx);
867 1.1 nonaka
868 1.1 nonaka return rv;
869 1.1 nonaka }
870 1.1 nonaka
871 1.1 nonaka static int
872 1.1 nonaka nvme_identify(struct nvme_softc *sc, u_int mps)
873 1.1 nonaka {
874 1.1 nonaka char sn[41], mn[81], fr[17];
875 1.1 nonaka struct nvm_identify_controller *identify;
876 1.1 nonaka struct nvme_dmamem *mem;
877 1.1 nonaka struct nvme_ccb *ccb;
878 1.1 nonaka u_int mdts;
879 1.1 nonaka int rv = 1;
880 1.1 nonaka
881 1.1 nonaka ccb = nvme_ccb_get(sc->sc_admin_q);
882 1.1 nonaka if (ccb == NULL)
883 1.1 nonaka panic("%s: nvme_ccb_get returned NULL", __func__);
884 1.1 nonaka
885 1.1 nonaka mem = nvme_dmamem_alloc(sc, sizeof(*identify));
886 1.1 nonaka if (mem == NULL)
887 1.1 nonaka return 1;
888 1.1 nonaka
889 1.1 nonaka ccb->ccb_done = nvme_empty_done;
890 1.1 nonaka ccb->ccb_cookie = mem;
891 1.1 nonaka
892 1.1 nonaka nvme_dmamem_sync(sc, mem, BUS_DMASYNC_PREREAD);
893 1.1 nonaka rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_fill_identify);
894 1.1 nonaka nvme_dmamem_sync(sc, mem, BUS_DMASYNC_POSTREAD);
895 1.1 nonaka
896 1.1 nonaka nvme_ccb_put(sc->sc_admin_q, ccb);
897 1.1 nonaka
898 1.1 nonaka if (rv != 0)
899 1.1 nonaka goto done;
900 1.1 nonaka
901 1.1 nonaka identify = NVME_DMA_KVA(mem);
902 1.1 nonaka
903 1.1 nonaka nvme_strvis(sn, sizeof(sn), identify->sn, sizeof(identify->sn));
904 1.1 nonaka nvme_strvis(mn, sizeof(mn), identify->mn, sizeof(identify->mn));
905 1.1 nonaka nvme_strvis(fr, sizeof(fr), identify->fr, sizeof(identify->fr));
906 1.1 nonaka aprint_normal_dev(sc->sc_dev, "%s, firmware %s, serial %s\n", mn, fr,
907 1.1 nonaka sn);
908 1.1 nonaka
909 1.1 nonaka if (identify->mdts > 0) {
910 1.1 nonaka mdts = (1 << identify->mdts) * (1 << mps);
911 1.1 nonaka if (mdts < sc->sc_mdts)
912 1.1 nonaka sc->sc_mdts = mdts;
913 1.1 nonaka }
914 1.1 nonaka
915 1.1 nonaka sc->sc_nn = lemtoh32(&identify->nn);
916 1.1 nonaka
917 1.1 nonaka memcpy(&sc->sc_identify, identify, sizeof(sc->sc_identify));
918 1.1 nonaka
919 1.1 nonaka done:
920 1.1 nonaka nvme_dmamem_free(sc, mem);
921 1.1 nonaka
922 1.1 nonaka return rv;
923 1.1 nonaka }
924 1.1 nonaka
925 1.1 nonaka static int
926 1.1 nonaka nvme_q_create(struct nvme_softc *sc, struct nvme_queue *q)
927 1.1 nonaka {
928 1.1 nonaka struct nvme_sqe_q sqe;
929 1.1 nonaka struct nvme_ccb *ccb;
930 1.1 nonaka int rv;
931 1.1 nonaka
932 1.1 nonaka if (sc->sc_use_mq && sc->sc_intr_establish(sc, q->q_id, q))
933 1.1 nonaka return 1;
934 1.1 nonaka
935 1.1 nonaka ccb = nvme_ccb_get(sc->sc_admin_q);
936 1.1 nonaka KASSERT(ccb != NULL);
937 1.1 nonaka
938 1.1 nonaka ccb->ccb_done = nvme_empty_done;
939 1.1 nonaka ccb->ccb_cookie = &sqe;
940 1.1 nonaka
941 1.1 nonaka memset(&sqe, 0, sizeof(sqe));
942 1.1 nonaka sqe.opcode = NVM_ADMIN_ADD_IOCQ;
943 1.1 nonaka htolem64(&sqe.prp1, NVME_DMA_DVA(q->q_cq_dmamem));
944 1.1 nonaka htolem16(&sqe.qsize, q->q_entries - 1);
945 1.1 nonaka htolem16(&sqe.qid, q->q_id);
946 1.1 nonaka sqe.qflags = NVM_SQE_CQ_IEN | NVM_SQE_Q_PC;
947 1.1 nonaka if (sc->sc_use_mq)
948 1.1 nonaka htolem16(&sqe.cqid, q->q_id); /* qid == vector */
949 1.1 nonaka
950 1.1 nonaka rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill);
951 1.1 nonaka if (rv != 0)
952 1.1 nonaka goto fail;
953 1.1 nonaka
954 1.1 nonaka ccb->ccb_done = nvme_empty_done;
955 1.1 nonaka ccb->ccb_cookie = &sqe;
956 1.1 nonaka
957 1.1 nonaka memset(&sqe, 0, sizeof(sqe));
958 1.1 nonaka sqe.opcode = NVM_ADMIN_ADD_IOSQ;
959 1.1 nonaka htolem64(&sqe.prp1, NVME_DMA_DVA(q->q_sq_dmamem));
960 1.1 nonaka htolem16(&sqe.qsize, q->q_entries - 1);
961 1.1 nonaka htolem16(&sqe.qid, q->q_id);
962 1.1 nonaka htolem16(&sqe.cqid, q->q_id);
963 1.1 nonaka sqe.qflags = NVM_SQE_Q_PC;
964 1.1 nonaka
965 1.1 nonaka rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill);
966 1.1 nonaka if (rv != 0)
967 1.1 nonaka goto fail;
968 1.1 nonaka
969 1.1 nonaka fail:
970 1.1 nonaka nvme_ccb_put(sc->sc_admin_q, ccb);
971 1.1 nonaka return rv;
972 1.1 nonaka }
973 1.1 nonaka
974 1.1 nonaka static int
975 1.1 nonaka nvme_q_delete(struct nvme_softc *sc, struct nvme_queue *q)
976 1.1 nonaka {
977 1.1 nonaka struct nvme_sqe_q sqe;
978 1.1 nonaka struct nvme_ccb *ccb;
979 1.1 nonaka int rv;
980 1.1 nonaka
981 1.1 nonaka ccb = nvme_ccb_get(sc->sc_admin_q);
982 1.1 nonaka KASSERT(ccb != NULL);
983 1.1 nonaka
984 1.1 nonaka ccb->ccb_done = nvme_empty_done;
985 1.1 nonaka ccb->ccb_cookie = &sqe;
986 1.1 nonaka
987 1.1 nonaka memset(&sqe, 0, sizeof(sqe));
988 1.1 nonaka sqe.opcode = NVM_ADMIN_DEL_IOSQ;
989 1.1 nonaka htolem16(&sqe.qid, q->q_id);
990 1.1 nonaka
991 1.1 nonaka rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill);
992 1.1 nonaka if (rv != 0)
993 1.1 nonaka goto fail;
994 1.1 nonaka
995 1.1 nonaka ccb->ccb_done = nvme_empty_done;
996 1.1 nonaka ccb->ccb_cookie = &sqe;
997 1.1 nonaka
998 1.1 nonaka memset(&sqe, 0, sizeof(sqe));
999 1.1 nonaka sqe.opcode = NVM_ADMIN_DEL_IOCQ;
1000 1.1 nonaka htolem64(&sqe.prp1, NVME_DMA_DVA(q->q_sq_dmamem));
1001 1.1 nonaka htolem16(&sqe.qid, q->q_id);
1002 1.1 nonaka
1003 1.1 nonaka rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill);
1004 1.1 nonaka if (rv != 0)
1005 1.1 nonaka goto fail;
1006 1.1 nonaka
1007 1.1 nonaka fail:
1008 1.1 nonaka nvme_ccb_put(sc->sc_admin_q, ccb);
1009 1.1 nonaka
1010 1.1 nonaka if (rv == 0 && sc->sc_use_mq) {
1011 1.1 nonaka if (sc->sc_intr_disestablish(sc, q->q_id))
1012 1.1 nonaka rv = 1;
1013 1.1 nonaka }
1014 1.1 nonaka
1015 1.1 nonaka return rv;
1016 1.1 nonaka }
1017 1.1 nonaka
1018 1.1 nonaka static void
1019 1.1 nonaka nvme_fill_identify(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot)
1020 1.1 nonaka {
1021 1.1 nonaka struct nvme_sqe *sqe = slot;
1022 1.1 nonaka struct nvme_dmamem *mem = ccb->ccb_cookie;
1023 1.1 nonaka
1024 1.1 nonaka sqe->opcode = NVM_ADMIN_IDENTIFY;
1025 1.1 nonaka htolem64(&sqe->entry.prp[0], NVME_DMA_DVA(mem));
1026 1.1 nonaka htolem32(&sqe->cdw10, 1);
1027 1.1 nonaka }
1028 1.1 nonaka
1029 1.1 nonaka static int
1030 1.1 nonaka nvme_ccbs_alloc(struct nvme_queue *q, u_int nccbs)
1031 1.1 nonaka {
1032 1.1 nonaka struct nvme_softc *sc = q->q_sc;
1033 1.1 nonaka struct nvme_ccb *ccb;
1034 1.1 nonaka bus_addr_t off;
1035 1.1 nonaka uint64_t *prpl;
1036 1.1 nonaka u_int i;
1037 1.1 nonaka
1038 1.1 nonaka mutex_init(&q->q_ccb_mtx, MUTEX_DEFAULT, IPL_BIO);
1039 1.1 nonaka SIMPLEQ_INIT(&q->q_ccb_list);
1040 1.1 nonaka
1041 1.1 nonaka q->q_ccbs = kmem_alloc(sizeof(*ccb) * nccbs, KM_SLEEP);
1042 1.1 nonaka if (q->q_ccbs == NULL)
1043 1.1 nonaka return 1;
1044 1.1 nonaka
1045 1.1 nonaka q->q_nccbs = nccbs;
1046 1.1 nonaka q->q_ccb_prpls = nvme_dmamem_alloc(sc,
1047 1.1 nonaka sizeof(*prpl) * sc->sc_max_sgl * nccbs);
1048 1.1 nonaka
1049 1.1 nonaka prpl = NVME_DMA_KVA(q->q_ccb_prpls);
1050 1.1 nonaka off = 0;
1051 1.1 nonaka
1052 1.1 nonaka for (i = 0; i < nccbs; i++) {
1053 1.1 nonaka ccb = &q->q_ccbs[i];
1054 1.1 nonaka
1055 1.1 nonaka if (bus_dmamap_create(sc->sc_dmat, sc->sc_mdts,
1056 1.1 nonaka sc->sc_max_sgl + 1 /* we get a free prp in the sqe */,
1057 1.1 nonaka sc->sc_mps, sc->sc_mps, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
1058 1.1 nonaka &ccb->ccb_dmamap) != 0)
1059 1.1 nonaka goto free_maps;
1060 1.1 nonaka
1061 1.1 nonaka ccb->ccb_id = i;
1062 1.1 nonaka ccb->ccb_prpl = prpl;
1063 1.1 nonaka ccb->ccb_prpl_off = off;
1064 1.1 nonaka ccb->ccb_prpl_dva = NVME_DMA_DVA(q->q_ccb_prpls) + off;
1065 1.1 nonaka
1066 1.1 nonaka SIMPLEQ_INSERT_TAIL(&q->q_ccb_list, ccb, ccb_entry);
1067 1.1 nonaka
1068 1.1 nonaka prpl += sc->sc_max_sgl;
1069 1.1 nonaka off += sizeof(*prpl) * sc->sc_max_sgl;
1070 1.1 nonaka }
1071 1.1 nonaka
1072 1.1 nonaka return 0;
1073 1.1 nonaka
1074 1.1 nonaka free_maps:
1075 1.1 nonaka nvme_ccbs_free(q);
1076 1.1 nonaka return 1;
1077 1.1 nonaka }
1078 1.1 nonaka
1079 1.1 nonaka static struct nvme_ccb *
1080 1.1 nonaka nvme_ccb_get(struct nvme_queue *q)
1081 1.1 nonaka {
1082 1.1 nonaka struct nvme_ccb *ccb;
1083 1.1 nonaka
1084 1.1 nonaka mutex_enter(&q->q_ccb_mtx);
1085 1.1 nonaka ccb = SIMPLEQ_FIRST(&q->q_ccb_list);
1086 1.1 nonaka if (ccb != NULL)
1087 1.1 nonaka SIMPLEQ_REMOVE_HEAD(&q->q_ccb_list, ccb_entry);
1088 1.1 nonaka mutex_exit(&q->q_ccb_mtx);
1089 1.1 nonaka
1090 1.1 nonaka return ccb;
1091 1.1 nonaka }
1092 1.1 nonaka
1093 1.1 nonaka static void
1094 1.1 nonaka nvme_ccb_put(struct nvme_queue *q, struct nvme_ccb *ccb)
1095 1.1 nonaka {
1096 1.1 nonaka
1097 1.1 nonaka mutex_enter(&q->q_ccb_mtx);
1098 1.1 nonaka SIMPLEQ_INSERT_HEAD(&q->q_ccb_list, ccb, ccb_entry);
1099 1.1 nonaka mutex_exit(&q->q_ccb_mtx);
1100 1.1 nonaka }
1101 1.1 nonaka
1102 1.1 nonaka static void
1103 1.1 nonaka nvme_ccbs_free(struct nvme_queue *q)
1104 1.1 nonaka {
1105 1.1 nonaka struct nvme_softc *sc = q->q_sc;
1106 1.1 nonaka struct nvme_ccb *ccb;
1107 1.1 nonaka
1108 1.1 nonaka mutex_enter(&q->q_ccb_mtx);
1109 1.1 nonaka while ((ccb = SIMPLEQ_FIRST(&q->q_ccb_list)) != NULL) {
1110 1.1 nonaka SIMPLEQ_REMOVE_HEAD(&q->q_ccb_list, ccb_entry);
1111 1.1 nonaka bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
1112 1.1 nonaka }
1113 1.1 nonaka mutex_exit(&q->q_ccb_mtx);
1114 1.1 nonaka
1115 1.1 nonaka nvme_dmamem_free(sc, q->q_ccb_prpls);
1116 1.1 nonaka kmem_free(q->q_ccbs, sizeof(*ccb) * q->q_nccbs);
1117 1.1 nonaka q->q_ccbs = NULL;
1118 1.1 nonaka mutex_destroy(&q->q_ccb_mtx);
1119 1.1 nonaka }
1120 1.1 nonaka
1121 1.1 nonaka static struct nvme_queue *
1122 1.1 nonaka nvme_q_alloc(struct nvme_softc *sc, uint16_t id, u_int entries, u_int dstrd)
1123 1.1 nonaka {
1124 1.1 nonaka struct nvme_queue *q;
1125 1.1 nonaka
1126 1.1 nonaka q = kmem_alloc(sizeof(*q), KM_SLEEP);
1127 1.1 nonaka if (q == NULL)
1128 1.1 nonaka return NULL;
1129 1.1 nonaka
1130 1.1 nonaka q->q_sc = sc;
1131 1.1 nonaka q->q_sq_dmamem = nvme_dmamem_alloc(sc,
1132 1.1 nonaka sizeof(struct nvme_sqe) * entries);
1133 1.1 nonaka if (q->q_sq_dmamem == NULL)
1134 1.1 nonaka goto free;
1135 1.1 nonaka
1136 1.1 nonaka q->q_cq_dmamem = nvme_dmamem_alloc(sc,
1137 1.1 nonaka sizeof(struct nvme_cqe) * entries);
1138 1.1 nonaka if (q->q_cq_dmamem == NULL)
1139 1.1 nonaka goto free_sq;
1140 1.1 nonaka
1141 1.1 nonaka memset(NVME_DMA_KVA(q->q_sq_dmamem), 0, NVME_DMA_LEN(q->q_sq_dmamem));
1142 1.1 nonaka memset(NVME_DMA_KVA(q->q_cq_dmamem), 0, NVME_DMA_LEN(q->q_cq_dmamem));
1143 1.1 nonaka
1144 1.1 nonaka mutex_init(&q->q_sq_mtx, MUTEX_DEFAULT, IPL_BIO);
1145 1.1 nonaka mutex_init(&q->q_cq_mtx, MUTEX_DEFAULT, IPL_BIO);
1146 1.1 nonaka q->q_sqtdbl = NVME_SQTDBL(id, dstrd);
1147 1.1 nonaka q->q_cqhdbl = NVME_CQHDBL(id, dstrd);
1148 1.1 nonaka q->q_id = id;
1149 1.1 nonaka q->q_entries = entries;
1150 1.1 nonaka q->q_sq_tail = 0;
1151 1.1 nonaka q->q_cq_head = 0;
1152 1.1 nonaka q->q_cq_phase = NVME_CQE_PHASE;
1153 1.1 nonaka
1154 1.1 nonaka nvme_dmamem_sync(sc, q->q_sq_dmamem, BUS_DMASYNC_PREWRITE);
1155 1.1 nonaka nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_PREREAD);
1156 1.1 nonaka
1157 1.1 nonaka if (nvme_ccbs_alloc(q, entries) != 0) {
1158 1.1 nonaka aprint_error_dev(sc->sc_dev, "unable to allocate ccbs\n");
1159 1.1 nonaka goto free_cq;
1160 1.1 nonaka }
1161 1.1 nonaka
1162 1.1 nonaka return q;
1163 1.1 nonaka
1164 1.1 nonaka free_cq:
1165 1.1 nonaka nvme_dmamem_free(sc, q->q_cq_dmamem);
1166 1.1 nonaka free_sq:
1167 1.1 nonaka nvme_dmamem_free(sc, q->q_sq_dmamem);
1168 1.1 nonaka free:
1169 1.1 nonaka kmem_free(q, sizeof(*q));
1170 1.1 nonaka
1171 1.1 nonaka return NULL;
1172 1.1 nonaka }
1173 1.1 nonaka
1174 1.1 nonaka static void
1175 1.1 nonaka nvme_q_free(struct nvme_softc *sc, struct nvme_queue *q)
1176 1.1 nonaka {
1177 1.1 nonaka nvme_ccbs_free(q);
1178 1.1 nonaka nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_POSTREAD);
1179 1.1 nonaka nvme_dmamem_sync(sc, q->q_sq_dmamem, BUS_DMASYNC_POSTWRITE);
1180 1.1 nonaka nvme_dmamem_free(sc, q->q_cq_dmamem);
1181 1.1 nonaka nvme_dmamem_free(sc, q->q_sq_dmamem);
1182 1.1 nonaka kmem_free(q, sizeof(*q));
1183 1.1 nonaka }
1184 1.1 nonaka
1185 1.1 nonaka int
1186 1.1 nonaka nvme_intr(void *xsc)
1187 1.1 nonaka {
1188 1.1 nonaka struct nvme_softc *sc = xsc;
1189 1.1 nonaka int rv = 0;
1190 1.1 nonaka
1191 1.1 nonaka nvme_write4(sc, NVME_INTMS, 1);
1192 1.1 nonaka
1193 1.1 nonaka if (nvme_q_complete(sc, sc->sc_admin_q))
1194 1.1 nonaka rv = 1;
1195 1.1 nonaka if (sc->sc_q != NULL)
1196 1.1 nonaka if (nvme_q_complete(sc, sc->sc_q[0]))
1197 1.1 nonaka rv = 1;
1198 1.1 nonaka
1199 1.1 nonaka nvme_write4(sc, NVME_INTMC, 1);
1200 1.1 nonaka
1201 1.1 nonaka return rv;
1202 1.1 nonaka }
1203 1.1 nonaka
1204 1.1 nonaka int
1205 1.1 nonaka nvme_mq_msi_intr(void *xq)
1206 1.1 nonaka {
1207 1.1 nonaka struct nvme_queue *q = xq;
1208 1.1 nonaka struct nvme_softc *sc = q->q_sc;
1209 1.1 nonaka int rv = 0;
1210 1.1 nonaka
1211 1.1 nonaka nvme_write4(sc, NVME_INTMS, 1U << q->q_id);
1212 1.1 nonaka
1213 1.1 nonaka if (nvme_q_complete(sc, q))
1214 1.1 nonaka rv = 1;
1215 1.1 nonaka
1216 1.1 nonaka nvme_write4(sc, NVME_INTMC, 1U << q->q_id);
1217 1.1 nonaka
1218 1.1 nonaka return rv;
1219 1.1 nonaka }
1220 1.1 nonaka
1221 1.1 nonaka int
1222 1.1 nonaka nvme_mq_msix_intr(void *xq)
1223 1.1 nonaka {
1224 1.1 nonaka struct nvme_queue *q = xq;
1225 1.1 nonaka int rv = 0;
1226 1.1 nonaka
1227 1.1 nonaka if (nvme_q_complete(q->q_sc, q))
1228 1.1 nonaka rv = 1;
1229 1.1 nonaka
1230 1.1 nonaka return rv;
1231 1.1 nonaka }
1232 1.1 nonaka
1233 1.1 nonaka static struct nvme_dmamem *
1234 1.1 nonaka nvme_dmamem_alloc(struct nvme_softc *sc, size_t size)
1235 1.1 nonaka {
1236 1.1 nonaka struct nvme_dmamem *ndm;
1237 1.1 nonaka int nsegs;
1238 1.1 nonaka
1239 1.1 nonaka ndm = kmem_zalloc(sizeof(*ndm), KM_SLEEP);
1240 1.1 nonaka if (ndm == NULL)
1241 1.1 nonaka return NULL;
1242 1.1 nonaka
1243 1.1 nonaka ndm->ndm_size = size;
1244 1.1 nonaka
1245 1.1 nonaka if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1246 1.1 nonaka BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &ndm->ndm_map) != 0)
1247 1.1 nonaka goto ndmfree;
1248 1.1 nonaka
1249 1.1 nonaka if (bus_dmamem_alloc(sc->sc_dmat, size, sc->sc_mps, 0, &ndm->ndm_seg,
1250 1.1 nonaka 1, &nsegs, BUS_DMA_WAITOK) != 0)
1251 1.1 nonaka goto destroy;
1252 1.1 nonaka
1253 1.1 nonaka if (bus_dmamem_map(sc->sc_dmat, &ndm->ndm_seg, nsegs, size,
1254 1.1 nonaka &ndm->ndm_kva, BUS_DMA_WAITOK) != 0)
1255 1.1 nonaka goto free;
1256 1.1 nonaka memset(ndm->ndm_kva, 0, size);
1257 1.1 nonaka
1258 1.1 nonaka if (bus_dmamap_load(sc->sc_dmat, ndm->ndm_map, ndm->ndm_kva, size,
1259 1.1 nonaka NULL, BUS_DMA_WAITOK) != 0)
1260 1.1 nonaka goto unmap;
1261 1.1 nonaka
1262 1.1 nonaka return ndm;
1263 1.1 nonaka
1264 1.1 nonaka unmap:
1265 1.1 nonaka bus_dmamem_unmap(sc->sc_dmat, ndm->ndm_kva, size);
1266 1.1 nonaka free:
1267 1.1 nonaka bus_dmamem_free(sc->sc_dmat, &ndm->ndm_seg, 1);
1268 1.1 nonaka destroy:
1269 1.1 nonaka bus_dmamap_destroy(sc->sc_dmat, ndm->ndm_map);
1270 1.1 nonaka ndmfree:
1271 1.1 nonaka kmem_free(ndm, sizeof(*ndm));
1272 1.1 nonaka return NULL;
1273 1.1 nonaka }
1274 1.1 nonaka
1275 1.1 nonaka static void
1276 1.1 nonaka nvme_dmamem_sync(struct nvme_softc *sc, struct nvme_dmamem *mem, int ops)
1277 1.1 nonaka {
1278 1.1 nonaka bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(mem),
1279 1.1 nonaka 0, NVME_DMA_LEN(mem), ops);
1280 1.1 nonaka }
1281 1.1 nonaka
1282 1.1 nonaka void
1283 1.1 nonaka nvme_dmamem_free(struct nvme_softc *sc, struct nvme_dmamem *ndm)
1284 1.1 nonaka {
1285 1.1 nonaka bus_dmamap_unload(sc->sc_dmat, ndm->ndm_map);
1286 1.1 nonaka bus_dmamem_unmap(sc->sc_dmat, ndm->ndm_kva, ndm->ndm_size);
1287 1.1 nonaka bus_dmamem_free(sc->sc_dmat, &ndm->ndm_seg, 1);
1288 1.1 nonaka bus_dmamap_destroy(sc->sc_dmat, ndm->ndm_map);
1289 1.1 nonaka kmem_free(ndm, sizeof(*ndm));
1290 1.1 nonaka }
1291 1.1 nonaka
1292 1.1 nonaka /*
1293 1.1 nonaka * Copy of sys/dev/scsipi/scsipiconf.c:scsipi_strvis()
1294 1.1 nonaka */
1295 1.1 nonaka static void
1296 1.1 nonaka nvme_strvis(u_char *dst, int dlen, const u_char *src, int slen)
1297 1.1 nonaka {
1298 1.1 nonaka
1299 1.1 nonaka #define STRVIS_ISWHITE(x) ((x) == ' ' || (x) == '\0' || (x) == (u_char)'\377')
1300 1.1 nonaka /* Trim leading and trailing blanks and NULs. */
1301 1.1 nonaka while (slen > 0 && STRVIS_ISWHITE(src[0]))
1302 1.1 nonaka ++src, --slen;
1303 1.1 nonaka while (slen > 0 && STRVIS_ISWHITE(src[slen - 1]))
1304 1.1 nonaka --slen;
1305 1.1 nonaka
1306 1.1 nonaka while (slen > 0) {
1307 1.1 nonaka if (*src < 0x20 || *src >= 0x80) {
1308 1.1 nonaka /* non-printable characters */
1309 1.1 nonaka dlen -= 4;
1310 1.1 nonaka if (dlen < 1)
1311 1.1 nonaka break;
1312 1.1 nonaka *dst++ = '\\';
1313 1.1 nonaka *dst++ = ((*src & 0300) >> 6) + '0';
1314 1.1 nonaka *dst++ = ((*src & 0070) >> 3) + '0';
1315 1.1 nonaka *dst++ = ((*src & 0007) >> 0) + '0';
1316 1.1 nonaka } else if (*src == '\\') {
1317 1.1 nonaka /* quote characters */
1318 1.1 nonaka dlen -= 2;
1319 1.1 nonaka if (dlen < 1)
1320 1.1 nonaka break;
1321 1.1 nonaka *dst++ = '\\';
1322 1.1 nonaka *dst++ = '\\';
1323 1.1 nonaka } else {
1324 1.1 nonaka /* normal characters */
1325 1.1 nonaka if (--dlen < 1)
1326 1.1 nonaka break;
1327 1.1 nonaka *dst++ = *src;
1328 1.1 nonaka }
1329 1.1 nonaka ++src, --slen;
1330 1.1 nonaka }
1331 1.1 nonaka
1332 1.1 nonaka *dst++ = 0;
1333 1.1 nonaka }
1334