nvme.c revision 1.44.2.6 1 1.44.2.6 martin /* $NetBSD: nvme.c,v 1.44.2.6 2021/06/21 17:25:48 martin Exp $ */
2 1.1 nonaka /* $OpenBSD: nvme.c,v 1.49 2016/04/18 05:59:50 dlg Exp $ */
3 1.1 nonaka
4 1.1 nonaka /*
5 1.1 nonaka * Copyright (c) 2014 David Gwynne <dlg (at) openbsd.org>
6 1.1 nonaka *
7 1.1 nonaka * Permission to use, copy, modify, and distribute this software for any
8 1.1 nonaka * purpose with or without fee is hereby granted, provided that the above
9 1.1 nonaka * copyright notice and this permission notice appear in all copies.
10 1.1 nonaka *
11 1.1 nonaka * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 1.1 nonaka * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 1.1 nonaka * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 1.1 nonaka * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 1.1 nonaka * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 1.1 nonaka * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 1.1 nonaka * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 1.1 nonaka */
19 1.1 nonaka
20 1.1 nonaka #include <sys/cdefs.h>
21 1.44.2.6 martin __KERNEL_RCSID(0, "$NetBSD: nvme.c,v 1.44.2.6 2021/06/21 17:25:48 martin Exp $");
22 1.1 nonaka
23 1.1 nonaka #include <sys/param.h>
24 1.1 nonaka #include <sys/systm.h>
25 1.1 nonaka #include <sys/kernel.h>
26 1.1 nonaka #include <sys/atomic.h>
27 1.1 nonaka #include <sys/bus.h>
28 1.1 nonaka #include <sys/buf.h>
29 1.3 nonaka #include <sys/conf.h>
30 1.1 nonaka #include <sys/device.h>
31 1.1 nonaka #include <sys/kmem.h>
32 1.1 nonaka #include <sys/once.h>
33 1.3 nonaka #include <sys/proc.h>
34 1.1 nonaka #include <sys/queue.h>
35 1.1 nonaka #include <sys/mutex.h>
36 1.1 nonaka
37 1.3 nonaka #include <uvm/uvm_extern.h>
38 1.3 nonaka
39 1.1 nonaka #include <dev/ic/nvmereg.h>
40 1.1 nonaka #include <dev/ic/nvmevar.h>
41 1.3 nonaka #include <dev/ic/nvmeio.h>
42 1.1 nonaka
43 1.31 riastrad #include "ioconf.h"
44 1.31 riastrad
45 1.38 nonaka #define B4_CHK_RDY_DELAY_MS 2300 /* workaround controller bug */
46 1.38 nonaka
47 1.22 jdolecek int nvme_adminq_size = 32;
48 1.9 jdolecek int nvme_ioq_size = 1024;
49 1.1 nonaka
50 1.1 nonaka static int nvme_print(void *, const char *);
51 1.1 nonaka
52 1.1 nonaka static int nvme_ready(struct nvme_softc *, uint32_t);
53 1.1 nonaka static int nvme_enable(struct nvme_softc *, u_int);
54 1.1 nonaka static int nvme_disable(struct nvme_softc *);
55 1.1 nonaka static int nvme_shutdown(struct nvme_softc *);
56 1.1 nonaka
57 1.1 nonaka #ifdef NVME_DEBUG
58 1.1 nonaka static void nvme_dumpregs(struct nvme_softc *);
59 1.1 nonaka #endif
60 1.1 nonaka static int nvme_identify(struct nvme_softc *, u_int);
61 1.1 nonaka static void nvme_fill_identify(struct nvme_queue *, struct nvme_ccb *,
62 1.1 nonaka void *);
63 1.1 nonaka
64 1.20 jdolecek static int nvme_ccbs_alloc(struct nvme_queue *, uint16_t);
65 1.1 nonaka static void nvme_ccbs_free(struct nvme_queue *);
66 1.1 nonaka
67 1.1 nonaka static struct nvme_ccb *
68 1.34 jdolecek nvme_ccb_get(struct nvme_queue *, bool);
69 1.1 nonaka static void nvme_ccb_put(struct nvme_queue *, struct nvme_ccb *);
70 1.1 nonaka
71 1.1 nonaka static int nvme_poll(struct nvme_softc *, struct nvme_queue *,
72 1.1 nonaka struct nvme_ccb *, void (*)(struct nvme_queue *,
73 1.7 jdolecek struct nvme_ccb *, void *), int);
74 1.1 nonaka static void nvme_poll_fill(struct nvme_queue *, struct nvme_ccb *, void *);
75 1.1 nonaka static void nvme_poll_done(struct nvme_queue *, struct nvme_ccb *,
76 1.1 nonaka struct nvme_cqe *);
77 1.1 nonaka static void nvme_sqe_fill(struct nvme_queue *, struct nvme_ccb *, void *);
78 1.1 nonaka static void nvme_empty_done(struct nvme_queue *, struct nvme_ccb *,
79 1.1 nonaka struct nvme_cqe *);
80 1.1 nonaka
81 1.1 nonaka static struct nvme_queue *
82 1.1 nonaka nvme_q_alloc(struct nvme_softc *, uint16_t, u_int, u_int);
83 1.1 nonaka static int nvme_q_create(struct nvme_softc *, struct nvme_queue *);
84 1.44.2.6 martin static void nvme_q_reset(struct nvme_softc *, struct nvme_queue *);
85 1.1 nonaka static int nvme_q_delete(struct nvme_softc *, struct nvme_queue *);
86 1.1 nonaka static void nvme_q_submit(struct nvme_softc *, struct nvme_queue *,
87 1.1 nonaka struct nvme_ccb *, void (*)(struct nvme_queue *,
88 1.1 nonaka struct nvme_ccb *, void *));
89 1.1 nonaka static int nvme_q_complete(struct nvme_softc *, struct nvme_queue *q);
90 1.1 nonaka static void nvme_q_free(struct nvme_softc *, struct nvme_queue *);
91 1.34 jdolecek static void nvme_q_wait_complete(struct nvme_softc *, struct nvme_queue *,
92 1.34 jdolecek bool (*)(void *), void *);
93 1.1 nonaka
94 1.19 jdolecek static struct nvme_dmamem *
95 1.19 jdolecek nvme_dmamem_alloc(struct nvme_softc *, size_t);
96 1.1 nonaka static void nvme_dmamem_free(struct nvme_softc *, struct nvme_dmamem *);
97 1.19 jdolecek static void nvme_dmamem_sync(struct nvme_softc *, struct nvme_dmamem *,
98 1.19 jdolecek int);
99 1.1 nonaka
100 1.1 nonaka static void nvme_ns_io_fill(struct nvme_queue *, struct nvme_ccb *,
101 1.1 nonaka void *);
102 1.1 nonaka static void nvme_ns_io_done(struct nvme_queue *, struct nvme_ccb *,
103 1.1 nonaka struct nvme_cqe *);
104 1.1 nonaka static void nvme_ns_sync_fill(struct nvme_queue *, struct nvme_ccb *,
105 1.1 nonaka void *);
106 1.1 nonaka static void nvme_ns_sync_done(struct nvme_queue *, struct nvme_ccb *,
107 1.1 nonaka struct nvme_cqe *);
108 1.25 jdolecek static void nvme_getcache_fill(struct nvme_queue *, struct nvme_ccb *,
109 1.25 jdolecek void *);
110 1.25 jdolecek static void nvme_getcache_done(struct nvme_queue *, struct nvme_ccb *,
111 1.25 jdolecek struct nvme_cqe *);
112 1.1 nonaka
113 1.3 nonaka static void nvme_pt_fill(struct nvme_queue *, struct nvme_ccb *,
114 1.3 nonaka void *);
115 1.3 nonaka static void nvme_pt_done(struct nvme_queue *, struct nvme_ccb *,
116 1.3 nonaka struct nvme_cqe *);
117 1.3 nonaka static int nvme_command_passthrough(struct nvme_softc *,
118 1.3 nonaka struct nvme_pt_command *, uint16_t, struct lwp *, bool);
119 1.3 nonaka
120 1.44.2.3 martin static int nvme_set_number_of_queues(struct nvme_softc *, u_int, u_int *,
121 1.44.2.3 martin u_int *);
122 1.23 nonaka
123 1.7 jdolecek #define NVME_TIMO_QOP 5 /* queue create and delete timeout */
124 1.7 jdolecek #define NVME_TIMO_IDENT 10 /* probe identify timeout */
125 1.7 jdolecek #define NVME_TIMO_PT -1 /* passthrough cmd timeout */
126 1.13 jdolecek #define NVME_TIMO_SY 60 /* sync cache timeout */
127 1.7 jdolecek
128 1.1 nonaka #define nvme_read4(_s, _r) \
129 1.1 nonaka bus_space_read_4((_s)->sc_iot, (_s)->sc_ioh, (_r))
130 1.1 nonaka #define nvme_write4(_s, _r, _v) \
131 1.1 nonaka bus_space_write_4((_s)->sc_iot, (_s)->sc_ioh, (_r), (_v))
132 1.28 nonaka /*
133 1.28 nonaka * Some controllers, at least Apple NVMe, always require split
134 1.28 nonaka * transfers, so don't use bus_space_{read,write}_8() on LP64.
135 1.28 nonaka */
136 1.1 nonaka static inline uint64_t
137 1.1 nonaka nvme_read8(struct nvme_softc *sc, bus_size_t r)
138 1.1 nonaka {
139 1.1 nonaka uint64_t v;
140 1.1 nonaka uint32_t *a = (uint32_t *)&v;
141 1.1 nonaka
142 1.1 nonaka #if _BYTE_ORDER == _LITTLE_ENDIAN
143 1.1 nonaka a[0] = nvme_read4(sc, r);
144 1.1 nonaka a[1] = nvme_read4(sc, r + 4);
145 1.1 nonaka #else /* _BYTE_ORDER == _LITTLE_ENDIAN */
146 1.1 nonaka a[1] = nvme_read4(sc, r);
147 1.1 nonaka a[0] = nvme_read4(sc, r + 4);
148 1.1 nonaka #endif
149 1.1 nonaka
150 1.1 nonaka return v;
151 1.1 nonaka }
152 1.1 nonaka
153 1.1 nonaka static inline void
154 1.1 nonaka nvme_write8(struct nvme_softc *sc, bus_size_t r, uint64_t v)
155 1.1 nonaka {
156 1.1 nonaka uint32_t *a = (uint32_t *)&v;
157 1.1 nonaka
158 1.1 nonaka #if _BYTE_ORDER == _LITTLE_ENDIAN
159 1.1 nonaka nvme_write4(sc, r, a[0]);
160 1.1 nonaka nvme_write4(sc, r + 4, a[1]);
161 1.1 nonaka #else /* _BYTE_ORDER == _LITTLE_ENDIAN */
162 1.1 nonaka nvme_write4(sc, r, a[1]);
163 1.1 nonaka nvme_write4(sc, r + 4, a[0]);
164 1.1 nonaka #endif
165 1.1 nonaka }
166 1.1 nonaka #define nvme_barrier(_s, _r, _l, _f) \
167 1.1 nonaka bus_space_barrier((_s)->sc_iot, (_s)->sc_ioh, (_r), (_l), (_f))
168 1.1 nonaka
169 1.1 nonaka #ifdef NVME_DEBUG
170 1.6 jdolecek static __used void
171 1.1 nonaka nvme_dumpregs(struct nvme_softc *sc)
172 1.1 nonaka {
173 1.1 nonaka uint64_t r8;
174 1.1 nonaka uint32_t r4;
175 1.1 nonaka
176 1.1 nonaka #define DEVNAME(_sc) device_xname((_sc)->sc_dev)
177 1.1 nonaka r8 = nvme_read8(sc, NVME_CAP);
178 1.8 jdolecek printf("%s: cap 0x%016"PRIx64"\n", DEVNAME(sc), nvme_read8(sc, NVME_CAP));
179 1.1 nonaka printf("%s: mpsmax %u (%u)\n", DEVNAME(sc),
180 1.1 nonaka (u_int)NVME_CAP_MPSMAX(r8), (1 << NVME_CAP_MPSMAX(r8)));
181 1.1 nonaka printf("%s: mpsmin %u (%u)\n", DEVNAME(sc),
182 1.1 nonaka (u_int)NVME_CAP_MPSMIN(r8), (1 << NVME_CAP_MPSMIN(r8)));
183 1.8 jdolecek printf("%s: css %"PRIu64"\n", DEVNAME(sc), NVME_CAP_CSS(r8));
184 1.8 jdolecek printf("%s: nssrs %"PRIu64"\n", DEVNAME(sc), NVME_CAP_NSSRS(r8));
185 1.8 jdolecek printf("%s: dstrd %"PRIu64"\n", DEVNAME(sc), NVME_CAP_DSTRD(r8));
186 1.8 jdolecek printf("%s: to %"PRIu64" msec\n", DEVNAME(sc), NVME_CAP_TO(r8));
187 1.8 jdolecek printf("%s: ams %"PRIu64"\n", DEVNAME(sc), NVME_CAP_AMS(r8));
188 1.8 jdolecek printf("%s: cqr %"PRIu64"\n", DEVNAME(sc), NVME_CAP_CQR(r8));
189 1.8 jdolecek printf("%s: mqes %"PRIu64"\n", DEVNAME(sc), NVME_CAP_MQES(r8));
190 1.1 nonaka
191 1.1 nonaka printf("%s: vs 0x%04x\n", DEVNAME(sc), nvme_read4(sc, NVME_VS));
192 1.1 nonaka
193 1.1 nonaka r4 = nvme_read4(sc, NVME_CC);
194 1.1 nonaka printf("%s: cc 0x%04x\n", DEVNAME(sc), r4);
195 1.8 jdolecek printf("%s: iocqes %u (%u)\n", DEVNAME(sc), NVME_CC_IOCQES_R(r4),
196 1.8 jdolecek (1 << NVME_CC_IOCQES_R(r4)));
197 1.8 jdolecek printf("%s: iosqes %u (%u)\n", DEVNAME(sc), NVME_CC_IOSQES_R(r4),
198 1.8 jdolecek (1 << NVME_CC_IOSQES_R(r4)));
199 1.1 nonaka printf("%s: shn %u\n", DEVNAME(sc), NVME_CC_SHN_R(r4));
200 1.1 nonaka printf("%s: ams %u\n", DEVNAME(sc), NVME_CC_AMS_R(r4));
201 1.8 jdolecek printf("%s: mps %u (%u)\n", DEVNAME(sc), NVME_CC_MPS_R(r4),
202 1.8 jdolecek (1 << NVME_CC_MPS_R(r4)));
203 1.1 nonaka printf("%s: css %u\n", DEVNAME(sc), NVME_CC_CSS_R(r4));
204 1.6 jdolecek printf("%s: en %u\n", DEVNAME(sc), ISSET(r4, NVME_CC_EN) ? 1 : 0);
205 1.1 nonaka
206 1.8 jdolecek r4 = nvme_read4(sc, NVME_CSTS);
207 1.8 jdolecek printf("%s: csts 0x%08x\n", DEVNAME(sc), r4);
208 1.8 jdolecek printf("%s: rdy %u\n", DEVNAME(sc), r4 & NVME_CSTS_RDY);
209 1.8 jdolecek printf("%s: cfs %u\n", DEVNAME(sc), r4 & NVME_CSTS_CFS);
210 1.8 jdolecek printf("%s: shst %x\n", DEVNAME(sc), r4 & NVME_CSTS_SHST_MASK);
211 1.8 jdolecek
212 1.8 jdolecek r4 = nvme_read4(sc, NVME_AQA);
213 1.8 jdolecek printf("%s: aqa 0x%08x\n", DEVNAME(sc), r4);
214 1.8 jdolecek printf("%s: acqs %u\n", DEVNAME(sc), NVME_AQA_ACQS_R(r4));
215 1.8 jdolecek printf("%s: asqs %u\n", DEVNAME(sc), NVME_AQA_ASQS_R(r4));
216 1.8 jdolecek
217 1.8 jdolecek printf("%s: asq 0x%016"PRIx64"\n", DEVNAME(sc), nvme_read8(sc, NVME_ASQ));
218 1.8 jdolecek printf("%s: acq 0x%016"PRIx64"\n", DEVNAME(sc), nvme_read8(sc, NVME_ACQ));
219 1.1 nonaka #undef DEVNAME
220 1.1 nonaka }
221 1.1 nonaka #endif /* NVME_DEBUG */
222 1.1 nonaka
223 1.1 nonaka static int
224 1.1 nonaka nvme_ready(struct nvme_softc *sc, uint32_t rdy)
225 1.1 nonaka {
226 1.1 nonaka u_int i = 0;
227 1.1 nonaka
228 1.1 nonaka while ((nvme_read4(sc, NVME_CSTS) & NVME_CSTS_RDY) != rdy) {
229 1.1 nonaka if (i++ > sc->sc_rdy_to)
230 1.8 jdolecek return ENXIO;
231 1.1 nonaka
232 1.1 nonaka delay(1000);
233 1.1 nonaka nvme_barrier(sc, NVME_CSTS, 4, BUS_SPACE_BARRIER_READ);
234 1.1 nonaka }
235 1.1 nonaka
236 1.1 nonaka return 0;
237 1.1 nonaka }
238 1.1 nonaka
239 1.1 nonaka static int
240 1.1 nonaka nvme_enable(struct nvme_softc *sc, u_int mps)
241 1.1 nonaka {
242 1.8 jdolecek uint32_t cc, csts;
243 1.38 nonaka int error;
244 1.1 nonaka
245 1.1 nonaka cc = nvme_read4(sc, NVME_CC);
246 1.8 jdolecek csts = nvme_read4(sc, NVME_CSTS);
247 1.38 nonaka
248 1.38 nonaka /*
249 1.38 nonaka * See note in nvme_disable. Short circuit if we're already enabled.
250 1.38 nonaka */
251 1.7 jdolecek if (ISSET(cc, NVME_CC_EN)) {
252 1.8 jdolecek if (ISSET(csts, NVME_CSTS_RDY))
253 1.38 nonaka return 0;
254 1.8 jdolecek
255 1.8 jdolecek goto waitready;
256 1.38 nonaka } else {
257 1.38 nonaka /* EN == 0 already wait for RDY == 0 or fail */
258 1.38 nonaka error = nvme_ready(sc, 0);
259 1.38 nonaka if (error)
260 1.38 nonaka return error;
261 1.7 jdolecek }
262 1.1 nonaka
263 1.1 nonaka nvme_write8(sc, NVME_ASQ, NVME_DMA_DVA(sc->sc_admin_q->q_sq_dmamem));
264 1.1 nonaka nvme_barrier(sc, 0, sc->sc_ios, BUS_SPACE_BARRIER_WRITE);
265 1.8 jdolecek delay(5000);
266 1.1 nonaka nvme_write8(sc, NVME_ACQ, NVME_DMA_DVA(sc->sc_admin_q->q_cq_dmamem));
267 1.1 nonaka nvme_barrier(sc, 0, sc->sc_ios, BUS_SPACE_BARRIER_WRITE);
268 1.8 jdolecek delay(5000);
269 1.8 jdolecek
270 1.8 jdolecek nvme_write4(sc, NVME_AQA, NVME_AQA_ACQS(sc->sc_admin_q->q_entries) |
271 1.8 jdolecek NVME_AQA_ASQS(sc->sc_admin_q->q_entries));
272 1.8 jdolecek nvme_barrier(sc, 0, sc->sc_ios, BUS_SPACE_BARRIER_WRITE);
273 1.8 jdolecek delay(5000);
274 1.1 nonaka
275 1.1 nonaka CLR(cc, NVME_CC_IOCQES_MASK | NVME_CC_IOSQES_MASK | NVME_CC_SHN_MASK |
276 1.1 nonaka NVME_CC_AMS_MASK | NVME_CC_MPS_MASK | NVME_CC_CSS_MASK);
277 1.1 nonaka SET(cc, NVME_CC_IOSQES(ffs(64) - 1) | NVME_CC_IOCQES(ffs(16) - 1));
278 1.1 nonaka SET(cc, NVME_CC_SHN(NVME_CC_SHN_NONE));
279 1.1 nonaka SET(cc, NVME_CC_CSS(NVME_CC_CSS_NVM));
280 1.1 nonaka SET(cc, NVME_CC_AMS(NVME_CC_AMS_RR));
281 1.1 nonaka SET(cc, NVME_CC_MPS(mps));
282 1.1 nonaka SET(cc, NVME_CC_EN);
283 1.1 nonaka
284 1.1 nonaka nvme_write4(sc, NVME_CC, cc);
285 1.1 nonaka nvme_barrier(sc, 0, sc->sc_ios,
286 1.1 nonaka BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
287 1.1 nonaka
288 1.8 jdolecek waitready:
289 1.1 nonaka return nvme_ready(sc, NVME_CSTS_RDY);
290 1.1 nonaka }
291 1.1 nonaka
292 1.1 nonaka static int
293 1.1 nonaka nvme_disable(struct nvme_softc *sc)
294 1.1 nonaka {
295 1.1 nonaka uint32_t cc, csts;
296 1.38 nonaka int error;
297 1.1 nonaka
298 1.1 nonaka cc = nvme_read4(sc, NVME_CC);
299 1.8 jdolecek csts = nvme_read4(sc, NVME_CSTS);
300 1.8 jdolecek
301 1.38 nonaka /*
302 1.38 nonaka * Per 3.1.5 in NVME 1.3 spec, transitioning CC.EN from 0 to 1
303 1.38 nonaka * when CSTS.RDY is 1 or transitioning CC.EN from 1 to 0 when
304 1.38 nonaka * CSTS.RDY is 0 "has undefined results" So make sure that CSTS.RDY
305 1.38 nonaka * isn't the desired value. Short circuit if we're already disabled.
306 1.38 nonaka */
307 1.38 nonaka if (ISSET(cc, NVME_CC_EN)) {
308 1.38 nonaka if (!ISSET(csts, NVME_CSTS_RDY)) {
309 1.38 nonaka /* EN == 1, wait for RDY == 1 or fail */
310 1.38 nonaka error = nvme_ready(sc, NVME_CSTS_RDY);
311 1.38 nonaka if (error)
312 1.38 nonaka return error;
313 1.38 nonaka }
314 1.38 nonaka } else {
315 1.38 nonaka /* EN == 0 already wait for RDY == 0 */
316 1.38 nonaka if (!ISSET(csts, NVME_CSTS_RDY))
317 1.38 nonaka return 0;
318 1.38 nonaka
319 1.38 nonaka goto waitready;
320 1.38 nonaka }
321 1.1 nonaka
322 1.1 nonaka CLR(cc, NVME_CC_EN);
323 1.1 nonaka nvme_write4(sc, NVME_CC, cc);
324 1.8 jdolecek nvme_barrier(sc, 0, sc->sc_ios, BUS_SPACE_BARRIER_READ);
325 1.1 nonaka
326 1.38 nonaka /*
327 1.38 nonaka * Some drives have issues with accessing the mmio after we disable,
328 1.38 nonaka * so delay for a bit after we write the bit to cope with these issues.
329 1.38 nonaka */
330 1.38 nonaka if (ISSET(sc->sc_quirks, NVME_QUIRK_DELAY_B4_CHK_RDY))
331 1.38 nonaka delay(B4_CHK_RDY_DELAY_MS);
332 1.38 nonaka
333 1.38 nonaka waitready:
334 1.1 nonaka return nvme_ready(sc, 0);
335 1.1 nonaka }
336 1.1 nonaka
337 1.1 nonaka int
338 1.1 nonaka nvme_attach(struct nvme_softc *sc)
339 1.1 nonaka {
340 1.1 nonaka uint64_t cap;
341 1.1 nonaka uint32_t reg;
342 1.1 nonaka u_int mps = PAGE_SHIFT;
343 1.44.2.3 martin u_int ncq, nsq;
344 1.20 jdolecek uint16_t adminq_entries = nvme_adminq_size;
345 1.20 jdolecek uint16_t ioq_entries = nvme_ioq_size;
346 1.1 nonaka int i;
347 1.1 nonaka
348 1.1 nonaka reg = nvme_read4(sc, NVME_VS);
349 1.1 nonaka if (reg == 0xffffffff) {
350 1.1 nonaka aprint_error_dev(sc->sc_dev, "invalid mapping\n");
351 1.1 nonaka return 1;
352 1.1 nonaka }
353 1.1 nonaka
354 1.27 nonaka if (NVME_VS_TER(reg) == 0)
355 1.27 nonaka aprint_normal_dev(sc->sc_dev, "NVMe %d.%d\n", NVME_VS_MJR(reg),
356 1.27 nonaka NVME_VS_MNR(reg));
357 1.27 nonaka else
358 1.27 nonaka aprint_normal_dev(sc->sc_dev, "NVMe %d.%d.%d\n", NVME_VS_MJR(reg),
359 1.27 nonaka NVME_VS_MNR(reg), NVME_VS_TER(reg));
360 1.1 nonaka
361 1.1 nonaka cap = nvme_read8(sc, NVME_CAP);
362 1.44.2.6 martin sc->sc_dstrd = NVME_CAP_DSTRD(cap);
363 1.1 nonaka if (NVME_CAP_MPSMIN(cap) > PAGE_SHIFT) {
364 1.1 nonaka aprint_error_dev(sc->sc_dev, "NVMe minimum page size %u "
365 1.1 nonaka "is greater than CPU page size %u\n",
366 1.1 nonaka 1 << NVME_CAP_MPSMIN(cap), 1 << PAGE_SHIFT);
367 1.1 nonaka return 1;
368 1.1 nonaka }
369 1.1 nonaka if (NVME_CAP_MPSMAX(cap) < mps)
370 1.1 nonaka mps = NVME_CAP_MPSMAX(cap);
371 1.15 nonaka if (ioq_entries > NVME_CAP_MQES(cap))
372 1.15 nonaka ioq_entries = NVME_CAP_MQES(cap);
373 1.1 nonaka
374 1.8 jdolecek /* set initial values to be used for admin queue during probe */
375 1.1 nonaka sc->sc_rdy_to = NVME_CAP_TO(cap);
376 1.1 nonaka sc->sc_mps = 1 << mps;
377 1.1 nonaka sc->sc_mdts = MAXPHYS;
378 1.43 mrg sc->sc_max_sgl = btoc(round_page(sc->sc_mdts));
379 1.1 nonaka
380 1.1 nonaka if (nvme_disable(sc) != 0) {
381 1.1 nonaka aprint_error_dev(sc->sc_dev, "unable to disable controller\n");
382 1.1 nonaka return 1;
383 1.1 nonaka }
384 1.1 nonaka
385 1.44.2.6 martin sc->sc_admin_q = nvme_q_alloc(sc, NVME_ADMIN_Q, adminq_entries,
386 1.44.2.6 martin sc->sc_dstrd);
387 1.1 nonaka if (sc->sc_admin_q == NULL) {
388 1.1 nonaka aprint_error_dev(sc->sc_dev,
389 1.1 nonaka "unable to allocate admin queue\n");
390 1.1 nonaka return 1;
391 1.1 nonaka }
392 1.1 nonaka if (sc->sc_intr_establish(sc, NVME_ADMIN_Q, sc->sc_admin_q))
393 1.1 nonaka goto free_admin_q;
394 1.1 nonaka
395 1.1 nonaka if (nvme_enable(sc, mps) != 0) {
396 1.1 nonaka aprint_error_dev(sc->sc_dev, "unable to enable controller\n");
397 1.1 nonaka goto disestablish_admin_q;
398 1.1 nonaka }
399 1.1 nonaka
400 1.1 nonaka if (nvme_identify(sc, NVME_CAP_MPSMIN(cap)) != 0) {
401 1.1 nonaka aprint_error_dev(sc->sc_dev, "unable to identify controller\n");
402 1.1 nonaka goto disable;
403 1.1 nonaka }
404 1.44.2.2 martin if (sc->sc_nn == 0) {
405 1.44.2.2 martin aprint_error_dev(sc->sc_dev, "namespace not found\n");
406 1.44.2.2 martin goto disable;
407 1.44.2.2 martin }
408 1.1 nonaka
409 1.1 nonaka /* we know how big things are now */
410 1.1 nonaka sc->sc_max_sgl = sc->sc_mdts / sc->sc_mps;
411 1.1 nonaka
412 1.1 nonaka /* reallocate ccbs of admin queue with new max sgl. */
413 1.1 nonaka nvme_ccbs_free(sc->sc_admin_q);
414 1.1 nonaka nvme_ccbs_alloc(sc->sc_admin_q, sc->sc_admin_q->q_entries);
415 1.1 nonaka
416 1.23 nonaka if (sc->sc_use_mq) {
417 1.23 nonaka /* Limit the number of queues to the number allocated in HW */
418 1.44.2.3 martin if (nvme_set_number_of_queues(sc, sc->sc_nq, &ncq, &nsq) != 0) {
419 1.23 nonaka aprint_error_dev(sc->sc_dev,
420 1.23 nonaka "unable to get number of queues\n");
421 1.23 nonaka goto disable;
422 1.23 nonaka }
423 1.44.2.3 martin if (sc->sc_nq > ncq)
424 1.44.2.3 martin sc->sc_nq = ncq;
425 1.44.2.3 martin if (sc->sc_nq > nsq)
426 1.44.2.3 martin sc->sc_nq = nsq;
427 1.23 nonaka }
428 1.23 nonaka
429 1.1 nonaka sc->sc_q = kmem_zalloc(sizeof(*sc->sc_q) * sc->sc_nq, KM_SLEEP);
430 1.1 nonaka for (i = 0; i < sc->sc_nq; i++) {
431 1.44.2.6 martin sc->sc_q[i] = nvme_q_alloc(sc, i + 1, ioq_entries,
432 1.44.2.6 martin sc->sc_dstrd);
433 1.1 nonaka if (sc->sc_q[i] == NULL) {
434 1.1 nonaka aprint_error_dev(sc->sc_dev,
435 1.1 nonaka "unable to allocate io queue\n");
436 1.1 nonaka goto free_q;
437 1.1 nonaka }
438 1.1 nonaka if (nvme_q_create(sc, sc->sc_q[i]) != 0) {
439 1.1 nonaka aprint_error_dev(sc->sc_dev,
440 1.1 nonaka "unable to create io queue\n");
441 1.1 nonaka nvme_q_free(sc, sc->sc_q[i]);
442 1.1 nonaka goto free_q;
443 1.1 nonaka }
444 1.1 nonaka }
445 1.1 nonaka
446 1.1 nonaka if (!sc->sc_use_mq)
447 1.1 nonaka nvme_write4(sc, NVME_INTMC, 1);
448 1.1 nonaka
449 1.9 jdolecek /* probe subdevices */
450 1.1 nonaka sc->sc_namespaces = kmem_zalloc(sizeof(*sc->sc_namespaces) * sc->sc_nn,
451 1.1 nonaka KM_SLEEP);
452 1.14 pgoyette nvme_rescan(sc->sc_dev, "nvme", &i);
453 1.1 nonaka
454 1.1 nonaka return 0;
455 1.1 nonaka
456 1.1 nonaka free_q:
457 1.1 nonaka while (--i >= 0) {
458 1.1 nonaka nvme_q_delete(sc, sc->sc_q[i]);
459 1.1 nonaka nvme_q_free(sc, sc->sc_q[i]);
460 1.1 nonaka }
461 1.1 nonaka disable:
462 1.1 nonaka nvme_disable(sc);
463 1.1 nonaka disestablish_admin_q:
464 1.1 nonaka sc->sc_intr_disestablish(sc, NVME_ADMIN_Q);
465 1.1 nonaka free_admin_q:
466 1.1 nonaka nvme_q_free(sc, sc->sc_admin_q);
467 1.1 nonaka
468 1.1 nonaka return 1;
469 1.1 nonaka }
470 1.1 nonaka
471 1.14 pgoyette int
472 1.14 pgoyette nvme_rescan(device_t self, const char *attr, const int *flags)
473 1.14 pgoyette {
474 1.14 pgoyette struct nvme_softc *sc = device_private(self);
475 1.14 pgoyette struct nvme_attach_args naa;
476 1.44.2.4 martin struct nvm_namespace_format *f;
477 1.44.2.4 martin struct nvme_namespace *ns;
478 1.15 nonaka uint64_t cap;
479 1.15 nonaka int ioq_entries = nvme_ioq_size;
480 1.15 nonaka int i;
481 1.44.2.4 martin int error;
482 1.15 nonaka
483 1.15 nonaka cap = nvme_read8(sc, NVME_CAP);
484 1.15 nonaka if (ioq_entries > NVME_CAP_MQES(cap))
485 1.15 nonaka ioq_entries = NVME_CAP_MQES(cap);
486 1.14 pgoyette
487 1.44.2.4 martin for (i = 1; i <= sc->sc_nn; i++) {
488 1.44.2.4 martin if (sc->sc_namespaces[i - 1].dev)
489 1.44.2.4 martin continue;
490 1.44.2.4 martin
491 1.44.2.4 martin /* identify to check for availability */
492 1.44.2.4 martin error = nvme_ns_identify(sc, i);
493 1.44.2.4 martin if (error) {
494 1.44.2.4 martin aprint_error_dev(self, "couldn't identify namespace #%d\n", i);
495 1.44.2.4 martin continue;
496 1.44.2.4 martin }
497 1.44.2.4 martin
498 1.44.2.4 martin ns = nvme_ns_get(sc, i);
499 1.44.2.4 martin KASSERT(ns);
500 1.44.2.4 martin
501 1.44.2.4 martin f = &ns->ident->lbaf[NVME_ID_NS_FLBAS(ns->ident->flbas)];
502 1.44.2.4 martin
503 1.44.2.4 martin /*
504 1.44.2.4 martin * NVME1.0e 6.11 Identify command
505 1.44.2.4 martin *
506 1.44.2.4 martin * LBADS values smaller than 9 are not supported, a value
507 1.44.2.4 martin * of zero means that the format is not used.
508 1.44.2.4 martin */
509 1.44.2.4 martin if (f->lbads < 9) {
510 1.44.2.4 martin if (f->lbads > 0)
511 1.44.2.4 martin aprint_error_dev(self,
512 1.44.2.4 martin "unsupported logical data size %u\n", f->lbads);
513 1.14 pgoyette continue;
514 1.44.2.4 martin }
515 1.44.2.4 martin
516 1.14 pgoyette memset(&naa, 0, sizeof(naa));
517 1.44.2.4 martin naa.naa_nsid = i;
518 1.21 jdolecek naa.naa_qentries = (ioq_entries - 1) * sc->sc_nq;
519 1.21 jdolecek naa.naa_maxphys = sc->sc_mdts;
520 1.42 mlelstv naa.naa_typename = sc->sc_modelname;
521 1.44.2.4 martin sc->sc_namespaces[i - 1].dev = config_found(sc->sc_dev, &naa,
522 1.14 pgoyette nvme_print);
523 1.14 pgoyette }
524 1.14 pgoyette return 0;
525 1.14 pgoyette }
526 1.14 pgoyette
527 1.1 nonaka static int
528 1.1 nonaka nvme_print(void *aux, const char *pnp)
529 1.1 nonaka {
530 1.1 nonaka struct nvme_attach_args *naa = aux;
531 1.1 nonaka
532 1.1 nonaka if (pnp)
533 1.1 nonaka aprint_normal("at %s", pnp);
534 1.1 nonaka
535 1.1 nonaka if (naa->naa_nsid > 0)
536 1.1 nonaka aprint_normal(" nsid %d", naa->naa_nsid);
537 1.1 nonaka
538 1.1 nonaka return UNCONF;
539 1.1 nonaka }
540 1.1 nonaka
541 1.1 nonaka int
542 1.1 nonaka nvme_detach(struct nvme_softc *sc, int flags)
543 1.1 nonaka {
544 1.1 nonaka int i, error;
545 1.1 nonaka
546 1.1 nonaka error = config_detach_children(sc->sc_dev, flags);
547 1.1 nonaka if (error)
548 1.1 nonaka return error;
549 1.1 nonaka
550 1.1 nonaka error = nvme_shutdown(sc);
551 1.1 nonaka if (error)
552 1.1 nonaka return error;
553 1.1 nonaka
554 1.9 jdolecek /* from now on we are committed to detach, following will never fail */
555 1.44.2.6 martin sc->sc_intr_disestablish(sc, NVME_ADMIN_Q);
556 1.1 nonaka for (i = 0; i < sc->sc_nq; i++)
557 1.1 nonaka nvme_q_free(sc, sc->sc_q[i]);
558 1.1 nonaka kmem_free(sc->sc_q, sizeof(*sc->sc_q) * sc->sc_nq);
559 1.1 nonaka nvme_q_free(sc, sc->sc_admin_q);
560 1.1 nonaka
561 1.1 nonaka return 0;
562 1.1 nonaka }
563 1.1 nonaka
564 1.44.2.6 martin int
565 1.44.2.6 martin nvme_suspend(struct nvme_softc *sc)
566 1.44.2.6 martin {
567 1.44.2.6 martin
568 1.44.2.6 martin return nvme_shutdown(sc);
569 1.44.2.6 martin }
570 1.44.2.6 martin
571 1.44.2.6 martin int
572 1.44.2.6 martin nvme_resume(struct nvme_softc *sc)
573 1.44.2.6 martin {
574 1.44.2.6 martin int ioq_entries = nvme_ioq_size;
575 1.44.2.6 martin uint64_t cap;
576 1.44.2.6 martin int i, error;
577 1.44.2.6 martin
578 1.44.2.6 martin error = nvme_disable(sc);
579 1.44.2.6 martin if (error) {
580 1.44.2.6 martin device_printf(sc->sc_dev, "unable to disable controller\n");
581 1.44.2.6 martin return error;
582 1.44.2.6 martin }
583 1.44.2.6 martin
584 1.44.2.6 martin nvme_q_reset(sc, sc->sc_admin_q);
585 1.44.2.6 martin
586 1.44.2.6 martin error = nvme_enable(sc, ffs(sc->sc_mps) - 1);
587 1.44.2.6 martin if (error) {
588 1.44.2.6 martin device_printf(sc->sc_dev, "unable to enable controller\n");
589 1.44.2.6 martin return error;
590 1.44.2.6 martin }
591 1.44.2.6 martin
592 1.44.2.6 martin for (i = 0; i < sc->sc_nq; i++) {
593 1.44.2.6 martin cap = nvme_read8(sc, NVME_CAP);
594 1.44.2.6 martin if (ioq_entries > NVME_CAP_MQES(cap))
595 1.44.2.6 martin ioq_entries = NVME_CAP_MQES(cap);
596 1.44.2.6 martin sc->sc_q[i] = nvme_q_alloc(sc, i + 1, ioq_entries,
597 1.44.2.6 martin sc->sc_dstrd);
598 1.44.2.6 martin if (sc->sc_q[i] == NULL) {
599 1.44.2.6 martin error = ENOMEM;
600 1.44.2.6 martin device_printf(sc->sc_dev, "unable to allocate io q %d"
601 1.44.2.6 martin "\n", i);
602 1.44.2.6 martin goto disable;
603 1.44.2.6 martin }
604 1.44.2.6 martin if (nvme_q_create(sc, sc->sc_q[i]) != 0) {
605 1.44.2.6 martin error = EIO;
606 1.44.2.6 martin device_printf(sc->sc_dev, "unable to create io q %d"
607 1.44.2.6 martin "\n", i);
608 1.44.2.6 martin nvme_q_free(sc, sc->sc_q[i]);
609 1.44.2.6 martin goto free_q;
610 1.44.2.6 martin }
611 1.44.2.6 martin }
612 1.44.2.6 martin
613 1.44.2.6 martin nvme_write4(sc, NVME_INTMC, 1);
614 1.44.2.6 martin
615 1.44.2.6 martin return 0;
616 1.44.2.6 martin
617 1.44.2.6 martin free_q:
618 1.44.2.6 martin while (i --> 0)
619 1.44.2.6 martin nvme_q_free(sc, sc->sc_q[i]);
620 1.44.2.6 martin disable:
621 1.44.2.6 martin (void)nvme_disable(sc);
622 1.44.2.6 martin
623 1.44.2.6 martin return error;
624 1.44.2.6 martin }
625 1.44.2.6 martin
626 1.1 nonaka static int
627 1.1 nonaka nvme_shutdown(struct nvme_softc *sc)
628 1.1 nonaka {
629 1.1 nonaka uint32_t cc, csts;
630 1.1 nonaka bool disabled = false;
631 1.1 nonaka int i;
632 1.1 nonaka
633 1.1 nonaka if (!sc->sc_use_mq)
634 1.1 nonaka nvme_write4(sc, NVME_INTMS, 1);
635 1.1 nonaka
636 1.1 nonaka for (i = 0; i < sc->sc_nq; i++) {
637 1.1 nonaka if (nvme_q_delete(sc, sc->sc_q[i]) != 0) {
638 1.1 nonaka aprint_error_dev(sc->sc_dev,
639 1.1 nonaka "unable to delete io queue %d, disabling\n", i + 1);
640 1.1 nonaka disabled = true;
641 1.1 nonaka }
642 1.1 nonaka }
643 1.1 nonaka if (disabled)
644 1.1 nonaka goto disable;
645 1.1 nonaka
646 1.1 nonaka cc = nvme_read4(sc, NVME_CC);
647 1.1 nonaka CLR(cc, NVME_CC_SHN_MASK);
648 1.1 nonaka SET(cc, NVME_CC_SHN(NVME_CC_SHN_NORMAL));
649 1.1 nonaka nvme_write4(sc, NVME_CC, cc);
650 1.1 nonaka
651 1.1 nonaka for (i = 0; i < 4000; i++) {
652 1.1 nonaka nvme_barrier(sc, 0, sc->sc_ios,
653 1.1 nonaka BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
654 1.1 nonaka csts = nvme_read4(sc, NVME_CSTS);
655 1.1 nonaka if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_DONE)
656 1.1 nonaka return 0;
657 1.1 nonaka
658 1.1 nonaka delay(1000);
659 1.1 nonaka }
660 1.1 nonaka
661 1.1 nonaka aprint_error_dev(sc->sc_dev, "unable to shudown, disabling\n");
662 1.1 nonaka
663 1.1 nonaka disable:
664 1.1 nonaka nvme_disable(sc);
665 1.1 nonaka return 0;
666 1.1 nonaka }
667 1.1 nonaka
668 1.1 nonaka void
669 1.1 nonaka nvme_childdet(device_t self, device_t child)
670 1.1 nonaka {
671 1.1 nonaka struct nvme_softc *sc = device_private(self);
672 1.1 nonaka int i;
673 1.1 nonaka
674 1.1 nonaka for (i = 0; i < sc->sc_nn; i++) {
675 1.1 nonaka if (sc->sc_namespaces[i].dev == child) {
676 1.1 nonaka /* Already freed ns->ident. */
677 1.1 nonaka sc->sc_namespaces[i].dev = NULL;
678 1.1 nonaka break;
679 1.1 nonaka }
680 1.1 nonaka }
681 1.1 nonaka }
682 1.1 nonaka
683 1.1 nonaka int
684 1.1 nonaka nvme_ns_identify(struct nvme_softc *sc, uint16_t nsid)
685 1.1 nonaka {
686 1.1 nonaka struct nvme_sqe sqe;
687 1.1 nonaka struct nvm_identify_namespace *identify;
688 1.19 jdolecek struct nvme_dmamem *mem;
689 1.1 nonaka struct nvme_ccb *ccb;
690 1.1 nonaka struct nvme_namespace *ns;
691 1.19 jdolecek int rv;
692 1.1 nonaka
693 1.1 nonaka KASSERT(nsid > 0);
694 1.1 nonaka
695 1.44.2.5 martin ns = nvme_ns_get(sc, nsid);
696 1.44.2.5 martin KASSERT(ns);
697 1.44.2.5 martin
698 1.44.2.5 martin if (ns->ident != NULL)
699 1.44.2.5 martin return 0;
700 1.44.2.5 martin
701 1.34 jdolecek ccb = nvme_ccb_get(sc->sc_admin_q, false);
702 1.11 jdolecek KASSERT(ccb != NULL); /* it's a bug if we don't have spare ccb here */
703 1.1 nonaka
704 1.19 jdolecek mem = nvme_dmamem_alloc(sc, sizeof(*identify));
705 1.32 christos if (mem == NULL) {
706 1.32 christos nvme_ccb_put(sc->sc_admin_q, ccb);
707 1.19 jdolecek return ENOMEM;
708 1.32 christos }
709 1.1 nonaka
710 1.1 nonaka memset(&sqe, 0, sizeof(sqe));
711 1.1 nonaka sqe.opcode = NVM_ADMIN_IDENTIFY;
712 1.1 nonaka htolem32(&sqe.nsid, nsid);
713 1.1 nonaka htolem64(&sqe.entry.prp[0], NVME_DMA_DVA(mem));
714 1.1 nonaka htolem32(&sqe.cdw10, 0);
715 1.1 nonaka
716 1.1 nonaka ccb->ccb_done = nvme_empty_done;
717 1.1 nonaka ccb->ccb_cookie = &sqe;
718 1.1 nonaka
719 1.1 nonaka nvme_dmamem_sync(sc, mem, BUS_DMASYNC_PREREAD);
720 1.19 jdolecek rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill, NVME_TIMO_IDENT);
721 1.1 nonaka nvme_dmamem_sync(sc, mem, BUS_DMASYNC_POSTREAD);
722 1.1 nonaka
723 1.1 nonaka nvme_ccb_put(sc->sc_admin_q, ccb);
724 1.1 nonaka
725 1.19 jdolecek if (rv != 0) {
726 1.19 jdolecek rv = EIO;
727 1.1 nonaka goto done;
728 1.1 nonaka }
729 1.1 nonaka
730 1.1 nonaka /* commit */
731 1.1 nonaka
732 1.1 nonaka identify = kmem_zalloc(sizeof(*identify), KM_SLEEP);
733 1.19 jdolecek *identify = *((volatile struct nvm_identify_namespace *)NVME_DMA_KVA(mem));
734 1.39 nonaka
735 1.39 nonaka /* Convert data to host endian */
736 1.39 nonaka nvme_identify_namespace_swapbytes(identify);
737 1.1 nonaka
738 1.1 nonaka ns->ident = identify;
739 1.1 nonaka
740 1.1 nonaka done:
741 1.19 jdolecek nvme_dmamem_free(sc, mem);
742 1.1 nonaka
743 1.19 jdolecek return rv;
744 1.1 nonaka }
745 1.1 nonaka
746 1.1 nonaka int
747 1.11 jdolecek nvme_ns_dobio(struct nvme_softc *sc, uint16_t nsid, void *cookie,
748 1.11 jdolecek struct buf *bp, void *data, size_t datasize,
749 1.11 jdolecek int secsize, daddr_t blkno, int flags, nvme_nnc_done nnc_done)
750 1.1 nonaka {
751 1.44 jmcneill struct nvme_queue *q = nvme_get_q(sc, bp, false);
752 1.1 nonaka struct nvme_ccb *ccb;
753 1.1 nonaka bus_dmamap_t dmap;
754 1.1 nonaka int i, error;
755 1.1 nonaka
756 1.34 jdolecek ccb = nvme_ccb_get(q, false);
757 1.1 nonaka if (ccb == NULL)
758 1.1 nonaka return EAGAIN;
759 1.1 nonaka
760 1.1 nonaka ccb->ccb_done = nvme_ns_io_done;
761 1.11 jdolecek ccb->ccb_cookie = cookie;
762 1.11 jdolecek
763 1.11 jdolecek /* namespace context */
764 1.11 jdolecek ccb->nnc_nsid = nsid;
765 1.11 jdolecek ccb->nnc_flags = flags;
766 1.11 jdolecek ccb->nnc_buf = bp;
767 1.11 jdolecek ccb->nnc_datasize = datasize;
768 1.11 jdolecek ccb->nnc_secsize = secsize;
769 1.11 jdolecek ccb->nnc_blkno = blkno;
770 1.11 jdolecek ccb->nnc_done = nnc_done;
771 1.1 nonaka
772 1.1 nonaka dmap = ccb->ccb_dmamap;
773 1.11 jdolecek error = bus_dmamap_load(sc->sc_dmat, dmap, data,
774 1.11 jdolecek datasize, NULL,
775 1.11 jdolecek (ISSET(flags, NVME_NS_CTX_F_POLL) ?
776 1.1 nonaka BUS_DMA_NOWAIT : BUS_DMA_WAITOK) |
777 1.11 jdolecek (ISSET(flags, NVME_NS_CTX_F_READ) ?
778 1.1 nonaka BUS_DMA_READ : BUS_DMA_WRITE));
779 1.1 nonaka if (error) {
780 1.1 nonaka nvme_ccb_put(q, ccb);
781 1.1 nonaka return error;
782 1.1 nonaka }
783 1.1 nonaka
784 1.1 nonaka bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
785 1.11 jdolecek ISSET(flags, NVME_NS_CTX_F_READ) ?
786 1.1 nonaka BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
787 1.1 nonaka
788 1.1 nonaka if (dmap->dm_nsegs > 2) {
789 1.1 nonaka for (i = 1; i < dmap->dm_nsegs; i++) {
790 1.1 nonaka htolem64(&ccb->ccb_prpl[i - 1],
791 1.1 nonaka dmap->dm_segs[i].ds_addr);
792 1.1 nonaka }
793 1.1 nonaka bus_dmamap_sync(sc->sc_dmat,
794 1.1 nonaka NVME_DMA_MAP(q->q_ccb_prpls),
795 1.1 nonaka ccb->ccb_prpl_off,
796 1.16 nonaka sizeof(*ccb->ccb_prpl) * (dmap->dm_nsegs - 1),
797 1.1 nonaka BUS_DMASYNC_PREWRITE);
798 1.1 nonaka }
799 1.1 nonaka
800 1.11 jdolecek if (ISSET(flags, NVME_NS_CTX_F_POLL)) {
801 1.7 jdolecek if (nvme_poll(sc, q, ccb, nvme_ns_io_fill, NVME_TIMO_PT) != 0)
802 1.1 nonaka return EIO;
803 1.1 nonaka return 0;
804 1.1 nonaka }
805 1.1 nonaka
806 1.1 nonaka nvme_q_submit(sc, q, ccb, nvme_ns_io_fill);
807 1.1 nonaka return 0;
808 1.1 nonaka }
809 1.1 nonaka
810 1.1 nonaka static void
811 1.1 nonaka nvme_ns_io_fill(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot)
812 1.1 nonaka {
813 1.1 nonaka struct nvme_sqe_io *sqe = slot;
814 1.1 nonaka bus_dmamap_t dmap = ccb->ccb_dmamap;
815 1.1 nonaka
816 1.11 jdolecek sqe->opcode = ISSET(ccb->nnc_flags, NVME_NS_CTX_F_READ) ?
817 1.1 nonaka NVM_CMD_READ : NVM_CMD_WRITE;
818 1.11 jdolecek htolem32(&sqe->nsid, ccb->nnc_nsid);
819 1.1 nonaka
820 1.1 nonaka htolem64(&sqe->entry.prp[0], dmap->dm_segs[0].ds_addr);
821 1.1 nonaka switch (dmap->dm_nsegs) {
822 1.1 nonaka case 1:
823 1.1 nonaka break;
824 1.1 nonaka case 2:
825 1.1 nonaka htolem64(&sqe->entry.prp[1], dmap->dm_segs[1].ds_addr);
826 1.1 nonaka break;
827 1.1 nonaka default:
828 1.1 nonaka /* the prp list is already set up and synced */
829 1.1 nonaka htolem64(&sqe->entry.prp[1], ccb->ccb_prpl_dva);
830 1.1 nonaka break;
831 1.1 nonaka }
832 1.1 nonaka
833 1.11 jdolecek htolem64(&sqe->slba, ccb->nnc_blkno);
834 1.11 jdolecek
835 1.26 jdolecek if (ISSET(ccb->nnc_flags, NVME_NS_CTX_F_FUA))
836 1.26 jdolecek htolem16(&sqe->ioflags, NVM_SQE_IO_FUA);
837 1.26 jdolecek
838 1.11 jdolecek /* guaranteed by upper layers, but check just in case */
839 1.11 jdolecek KASSERT((ccb->nnc_datasize % ccb->nnc_secsize) == 0);
840 1.11 jdolecek htolem16(&sqe->nlb, (ccb->nnc_datasize / ccb->nnc_secsize) - 1);
841 1.1 nonaka }
842 1.1 nonaka
843 1.1 nonaka static void
844 1.1 nonaka nvme_ns_io_done(struct nvme_queue *q, struct nvme_ccb *ccb,
845 1.1 nonaka struct nvme_cqe *cqe)
846 1.1 nonaka {
847 1.1 nonaka struct nvme_softc *sc = q->q_sc;
848 1.1 nonaka bus_dmamap_t dmap = ccb->ccb_dmamap;
849 1.11 jdolecek void *nnc_cookie = ccb->ccb_cookie;
850 1.11 jdolecek nvme_nnc_done nnc_done = ccb->nnc_done;
851 1.11 jdolecek struct buf *bp = ccb->nnc_buf;
852 1.1 nonaka
853 1.1 nonaka if (dmap->dm_nsegs > 2) {
854 1.1 nonaka bus_dmamap_sync(sc->sc_dmat,
855 1.1 nonaka NVME_DMA_MAP(q->q_ccb_prpls),
856 1.1 nonaka ccb->ccb_prpl_off,
857 1.16 nonaka sizeof(*ccb->ccb_prpl) * (dmap->dm_nsegs - 1),
858 1.1 nonaka BUS_DMASYNC_POSTWRITE);
859 1.1 nonaka }
860 1.1 nonaka
861 1.1 nonaka bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
862 1.11 jdolecek ISSET(ccb->nnc_flags, NVME_NS_CTX_F_READ) ?
863 1.1 nonaka BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
864 1.1 nonaka
865 1.1 nonaka bus_dmamap_unload(sc->sc_dmat, dmap);
866 1.1 nonaka nvme_ccb_put(q, ccb);
867 1.1 nonaka
868 1.25 jdolecek nnc_done(nnc_cookie, bp, lemtoh16(&cqe->flags), lemtoh32(&cqe->cdw0));
869 1.25 jdolecek }
870 1.25 jdolecek
871 1.25 jdolecek /*
872 1.25 jdolecek * If there is no volatile write cache, it makes no sense to issue
873 1.25 jdolecek * flush commands or query for the status.
874 1.25 jdolecek */
875 1.34 jdolecek static bool
876 1.25 jdolecek nvme_has_volatile_write_cache(struct nvme_softc *sc)
877 1.25 jdolecek {
878 1.25 jdolecek /* sc_identify is filled during attachment */
879 1.25 jdolecek return ((sc->sc_identify.vwc & NVME_ID_CTRLR_VWC_PRESENT) != 0);
880 1.1 nonaka }
881 1.1 nonaka
882 1.34 jdolecek static bool
883 1.34 jdolecek nvme_ns_sync_finished(void *cookie)
884 1.34 jdolecek {
885 1.34 jdolecek int *result = cookie;
886 1.34 jdolecek
887 1.34 jdolecek return (*result != 0);
888 1.34 jdolecek }
889 1.34 jdolecek
890 1.1 nonaka int
891 1.34 jdolecek nvme_ns_sync(struct nvme_softc *sc, uint16_t nsid, int flags)
892 1.1 nonaka {
893 1.44 jmcneill struct nvme_queue *q = nvme_get_q(sc, NULL, true);
894 1.1 nonaka struct nvme_ccb *ccb;
895 1.34 jdolecek int result = 0;
896 1.34 jdolecek
897 1.34 jdolecek if (!nvme_has_volatile_write_cache(sc)) {
898 1.34 jdolecek /* cache not present, no value in trying to flush it */
899 1.34 jdolecek return 0;
900 1.34 jdolecek }
901 1.1 nonaka
902 1.34 jdolecek ccb = nvme_ccb_get(q, true);
903 1.44 jmcneill KASSERT(ccb != NULL);
904 1.1 nonaka
905 1.1 nonaka ccb->ccb_done = nvme_ns_sync_done;
906 1.34 jdolecek ccb->ccb_cookie = &result;
907 1.1 nonaka
908 1.11 jdolecek /* namespace context */
909 1.11 jdolecek ccb->nnc_nsid = nsid;
910 1.11 jdolecek ccb->nnc_flags = flags;
911 1.34 jdolecek ccb->nnc_done = NULL;
912 1.11 jdolecek
913 1.11 jdolecek if (ISSET(flags, NVME_NS_CTX_F_POLL)) {
914 1.7 jdolecek if (nvme_poll(sc, q, ccb, nvme_ns_sync_fill, NVME_TIMO_SY) != 0)
915 1.1 nonaka return EIO;
916 1.1 nonaka return 0;
917 1.1 nonaka }
918 1.1 nonaka
919 1.1 nonaka nvme_q_submit(sc, q, ccb, nvme_ns_sync_fill);
920 1.34 jdolecek
921 1.34 jdolecek /* wait for completion */
922 1.34 jdolecek nvme_q_wait_complete(sc, q, nvme_ns_sync_finished, &result);
923 1.34 jdolecek KASSERT(result != 0);
924 1.34 jdolecek
925 1.34 jdolecek return (result > 0) ? 0 : EIO;
926 1.1 nonaka }
927 1.1 nonaka
928 1.1 nonaka static void
929 1.1 nonaka nvme_ns_sync_fill(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot)
930 1.1 nonaka {
931 1.1 nonaka struct nvme_sqe *sqe = slot;
932 1.1 nonaka
933 1.1 nonaka sqe->opcode = NVM_CMD_FLUSH;
934 1.11 jdolecek htolem32(&sqe->nsid, ccb->nnc_nsid);
935 1.1 nonaka }
936 1.1 nonaka
937 1.1 nonaka static void
938 1.1 nonaka nvme_ns_sync_done(struct nvme_queue *q, struct nvme_ccb *ccb,
939 1.1 nonaka struct nvme_cqe *cqe)
940 1.1 nonaka {
941 1.34 jdolecek int *result = ccb->ccb_cookie;
942 1.34 jdolecek uint16_t status = NVME_CQE_SC(lemtoh16(&cqe->flags));
943 1.34 jdolecek
944 1.34 jdolecek if (status == NVME_CQE_SC_SUCCESS)
945 1.34 jdolecek *result = 1;
946 1.34 jdolecek else
947 1.34 jdolecek *result = -1;
948 1.1 nonaka
949 1.1 nonaka nvme_ccb_put(q, ccb);
950 1.34 jdolecek }
951 1.34 jdolecek
952 1.34 jdolecek static bool
953 1.34 jdolecek nvme_getcache_finished(void *xc)
954 1.34 jdolecek {
955 1.34 jdolecek int *addr = xc;
956 1.1 nonaka
957 1.34 jdolecek return (*addr != 0);
958 1.25 jdolecek }
959 1.25 jdolecek
960 1.25 jdolecek /*
961 1.25 jdolecek * Get status of volatile write cache. Always asynchronous.
962 1.25 jdolecek */
963 1.25 jdolecek int
964 1.34 jdolecek nvme_admin_getcache(struct nvme_softc *sc, int *addr)
965 1.25 jdolecek {
966 1.25 jdolecek struct nvme_ccb *ccb;
967 1.25 jdolecek struct nvme_queue *q = sc->sc_admin_q;
968 1.34 jdolecek int result = 0, error;
969 1.25 jdolecek
970 1.34 jdolecek if (!nvme_has_volatile_write_cache(sc)) {
971 1.34 jdolecek /* cache simply not present */
972 1.34 jdolecek *addr = 0;
973 1.34 jdolecek return 0;
974 1.34 jdolecek }
975 1.34 jdolecek
976 1.34 jdolecek ccb = nvme_ccb_get(q, true);
977 1.34 jdolecek KASSERT(ccb != NULL);
978 1.25 jdolecek
979 1.25 jdolecek ccb->ccb_done = nvme_getcache_done;
980 1.34 jdolecek ccb->ccb_cookie = &result;
981 1.25 jdolecek
982 1.25 jdolecek /* namespace context */
983 1.25 jdolecek ccb->nnc_flags = 0;
984 1.34 jdolecek ccb->nnc_done = NULL;
985 1.25 jdolecek
986 1.25 jdolecek nvme_q_submit(sc, q, ccb, nvme_getcache_fill);
987 1.34 jdolecek
988 1.34 jdolecek /* wait for completion */
989 1.34 jdolecek nvme_q_wait_complete(sc, q, nvme_getcache_finished, &result);
990 1.34 jdolecek KASSERT(result != 0);
991 1.34 jdolecek
992 1.34 jdolecek if (result > 0) {
993 1.34 jdolecek *addr = result;
994 1.34 jdolecek error = 0;
995 1.34 jdolecek } else
996 1.34 jdolecek error = EINVAL;
997 1.34 jdolecek
998 1.34 jdolecek return error;
999 1.25 jdolecek }
1000 1.25 jdolecek
1001 1.25 jdolecek static void
1002 1.25 jdolecek nvme_getcache_fill(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot)
1003 1.25 jdolecek {
1004 1.25 jdolecek struct nvme_sqe *sqe = slot;
1005 1.25 jdolecek
1006 1.25 jdolecek sqe->opcode = NVM_ADMIN_GET_FEATURES;
1007 1.39 nonaka htolem32(&sqe->cdw10, NVM_FEATURE_VOLATILE_WRITE_CACHE);
1008 1.41 jdolecek htolem32(&sqe->cdw11, NVM_VOLATILE_WRITE_CACHE_WCE);
1009 1.25 jdolecek }
1010 1.25 jdolecek
1011 1.25 jdolecek static void
1012 1.25 jdolecek nvme_getcache_done(struct nvme_queue *q, struct nvme_ccb *ccb,
1013 1.25 jdolecek struct nvme_cqe *cqe)
1014 1.25 jdolecek {
1015 1.34 jdolecek int *addr = ccb->ccb_cookie;
1016 1.34 jdolecek uint16_t status = NVME_CQE_SC(lemtoh16(&cqe->flags));
1017 1.34 jdolecek uint32_t cdw0 = lemtoh32(&cqe->cdw0);
1018 1.34 jdolecek int result;
1019 1.34 jdolecek
1020 1.34 jdolecek if (status == NVME_CQE_SC_SUCCESS) {
1021 1.34 jdolecek result = 0;
1022 1.34 jdolecek
1023 1.34 jdolecek /*
1024 1.34 jdolecek * DPO not supported, Dataset Management (DSM) field doesn't
1025 1.34 jdolecek * specify the same semantics. FUA is always supported.
1026 1.34 jdolecek */
1027 1.34 jdolecek result = DKCACHE_FUA;
1028 1.34 jdolecek
1029 1.41 jdolecek if (cdw0 & NVM_VOLATILE_WRITE_CACHE_WCE)
1030 1.34 jdolecek result |= DKCACHE_WRITE;
1031 1.34 jdolecek
1032 1.34 jdolecek /*
1033 1.34 jdolecek * If volatile write cache is present, the flag shall also be
1034 1.34 jdolecek * settable.
1035 1.34 jdolecek */
1036 1.34 jdolecek result |= DKCACHE_WCHANGE;
1037 1.41 jdolecek
1038 1.41 jdolecek /*
1039 1.41 jdolecek * ONCS field indicates whether the optional SAVE is also
1040 1.41 jdolecek * supported for Set Features. According to spec v1.3,
1041 1.41 jdolecek * Volatile Write Cache however doesn't support persistency
1042 1.41 jdolecek * across power cycle/reset.
1043 1.41 jdolecek */
1044 1.41 jdolecek
1045 1.34 jdolecek } else {
1046 1.34 jdolecek result = -1;
1047 1.34 jdolecek }
1048 1.34 jdolecek
1049 1.34 jdolecek *addr = result;
1050 1.25 jdolecek
1051 1.25 jdolecek nvme_ccb_put(q, ccb);
1052 1.1 nonaka }
1053 1.1 nonaka
1054 1.41 jdolecek struct nvme_setcache_state {
1055 1.41 jdolecek int dkcache;
1056 1.41 jdolecek int result;
1057 1.41 jdolecek };
1058 1.41 jdolecek
1059 1.41 jdolecek static bool
1060 1.41 jdolecek nvme_setcache_finished(void *xc)
1061 1.41 jdolecek {
1062 1.41 jdolecek struct nvme_setcache_state *st = xc;
1063 1.41 jdolecek
1064 1.41 jdolecek return (st->result != 0);
1065 1.41 jdolecek }
1066 1.41 jdolecek
1067 1.41 jdolecek static void
1068 1.41 jdolecek nvme_setcache_fill(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot)
1069 1.41 jdolecek {
1070 1.41 jdolecek struct nvme_sqe *sqe = slot;
1071 1.41 jdolecek struct nvme_setcache_state *st = ccb->ccb_cookie;
1072 1.41 jdolecek
1073 1.41 jdolecek sqe->opcode = NVM_ADMIN_SET_FEATURES;
1074 1.41 jdolecek htolem32(&sqe->cdw10, NVM_FEATURE_VOLATILE_WRITE_CACHE);
1075 1.41 jdolecek if (st->dkcache & DKCACHE_WRITE)
1076 1.41 jdolecek htolem32(&sqe->cdw11, NVM_VOLATILE_WRITE_CACHE_WCE);
1077 1.41 jdolecek }
1078 1.41 jdolecek
1079 1.41 jdolecek static void
1080 1.41 jdolecek nvme_setcache_done(struct nvme_queue *q, struct nvme_ccb *ccb,
1081 1.41 jdolecek struct nvme_cqe *cqe)
1082 1.41 jdolecek {
1083 1.41 jdolecek struct nvme_setcache_state *st = ccb->ccb_cookie;
1084 1.41 jdolecek uint16_t status = NVME_CQE_SC(lemtoh16(&cqe->flags));
1085 1.41 jdolecek
1086 1.41 jdolecek if (status == NVME_CQE_SC_SUCCESS) {
1087 1.41 jdolecek st->result = 1;
1088 1.41 jdolecek } else {
1089 1.41 jdolecek st->result = -1;
1090 1.41 jdolecek }
1091 1.41 jdolecek
1092 1.41 jdolecek nvme_ccb_put(q, ccb);
1093 1.41 jdolecek }
1094 1.41 jdolecek
1095 1.41 jdolecek /*
1096 1.41 jdolecek * Set status of volatile write cache. Always asynchronous.
1097 1.41 jdolecek */
1098 1.41 jdolecek int
1099 1.41 jdolecek nvme_admin_setcache(struct nvme_softc *sc, int dkcache)
1100 1.41 jdolecek {
1101 1.41 jdolecek struct nvme_ccb *ccb;
1102 1.41 jdolecek struct nvme_queue *q = sc->sc_admin_q;
1103 1.41 jdolecek int error;
1104 1.41 jdolecek struct nvme_setcache_state st;
1105 1.41 jdolecek
1106 1.41 jdolecek if (!nvme_has_volatile_write_cache(sc)) {
1107 1.41 jdolecek /* cache simply not present */
1108 1.41 jdolecek return EOPNOTSUPP;
1109 1.41 jdolecek }
1110 1.41 jdolecek
1111 1.41 jdolecek if (dkcache & ~(DKCACHE_WRITE)) {
1112 1.41 jdolecek /* unsupported parameters */
1113 1.41 jdolecek return EOPNOTSUPP;
1114 1.41 jdolecek }
1115 1.41 jdolecek
1116 1.41 jdolecek ccb = nvme_ccb_get(q, true);
1117 1.41 jdolecek KASSERT(ccb != NULL);
1118 1.41 jdolecek
1119 1.41 jdolecek memset(&st, 0, sizeof(st));
1120 1.41 jdolecek st.dkcache = dkcache;
1121 1.41 jdolecek
1122 1.41 jdolecek ccb->ccb_done = nvme_setcache_done;
1123 1.41 jdolecek ccb->ccb_cookie = &st;
1124 1.41 jdolecek
1125 1.41 jdolecek /* namespace context */
1126 1.41 jdolecek ccb->nnc_flags = 0;
1127 1.41 jdolecek ccb->nnc_done = NULL;
1128 1.41 jdolecek
1129 1.41 jdolecek nvme_q_submit(sc, q, ccb, nvme_setcache_fill);
1130 1.41 jdolecek
1131 1.41 jdolecek /* wait for completion */
1132 1.41 jdolecek nvme_q_wait_complete(sc, q, nvme_setcache_finished, &st);
1133 1.41 jdolecek KASSERT(st.result != 0);
1134 1.41 jdolecek
1135 1.41 jdolecek if (st.result > 0)
1136 1.41 jdolecek error = 0;
1137 1.41 jdolecek else
1138 1.41 jdolecek error = EINVAL;
1139 1.41 jdolecek
1140 1.41 jdolecek return error;
1141 1.41 jdolecek }
1142 1.41 jdolecek
1143 1.1 nonaka void
1144 1.1 nonaka nvme_ns_free(struct nvme_softc *sc, uint16_t nsid)
1145 1.1 nonaka {
1146 1.1 nonaka struct nvme_namespace *ns;
1147 1.1 nonaka struct nvm_identify_namespace *identify;
1148 1.1 nonaka
1149 1.1 nonaka ns = nvme_ns_get(sc, nsid);
1150 1.1 nonaka KASSERT(ns);
1151 1.1 nonaka
1152 1.1 nonaka identify = ns->ident;
1153 1.1 nonaka ns->ident = NULL;
1154 1.1 nonaka if (identify != NULL)
1155 1.1 nonaka kmem_free(identify, sizeof(*identify));
1156 1.1 nonaka }
1157 1.1 nonaka
1158 1.35 jdolecek struct nvme_pt_state {
1159 1.35 jdolecek struct nvme_pt_command *pt;
1160 1.35 jdolecek bool finished;
1161 1.35 jdolecek };
1162 1.35 jdolecek
1163 1.1 nonaka static void
1164 1.3 nonaka nvme_pt_fill(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot)
1165 1.3 nonaka {
1166 1.3 nonaka struct nvme_softc *sc = q->q_sc;
1167 1.3 nonaka struct nvme_sqe *sqe = slot;
1168 1.35 jdolecek struct nvme_pt_state *state = ccb->ccb_cookie;
1169 1.35 jdolecek struct nvme_pt_command *pt = state->pt;
1170 1.3 nonaka bus_dmamap_t dmap = ccb->ccb_dmamap;
1171 1.3 nonaka int i;
1172 1.3 nonaka
1173 1.3 nonaka sqe->opcode = pt->cmd.opcode;
1174 1.3 nonaka htolem32(&sqe->nsid, pt->cmd.nsid);
1175 1.3 nonaka
1176 1.3 nonaka if (pt->buf != NULL && pt->len > 0) {
1177 1.3 nonaka htolem64(&sqe->entry.prp[0], dmap->dm_segs[0].ds_addr);
1178 1.3 nonaka switch (dmap->dm_nsegs) {
1179 1.3 nonaka case 1:
1180 1.3 nonaka break;
1181 1.3 nonaka case 2:
1182 1.3 nonaka htolem64(&sqe->entry.prp[1], dmap->dm_segs[1].ds_addr);
1183 1.3 nonaka break;
1184 1.3 nonaka default:
1185 1.3 nonaka for (i = 1; i < dmap->dm_nsegs; i++) {
1186 1.3 nonaka htolem64(&ccb->ccb_prpl[i - 1],
1187 1.3 nonaka dmap->dm_segs[i].ds_addr);
1188 1.3 nonaka }
1189 1.3 nonaka bus_dmamap_sync(sc->sc_dmat,
1190 1.3 nonaka NVME_DMA_MAP(q->q_ccb_prpls),
1191 1.3 nonaka ccb->ccb_prpl_off,
1192 1.16 nonaka sizeof(*ccb->ccb_prpl) * (dmap->dm_nsegs - 1),
1193 1.3 nonaka BUS_DMASYNC_PREWRITE);
1194 1.3 nonaka htolem64(&sqe->entry.prp[1], ccb->ccb_prpl_dva);
1195 1.3 nonaka break;
1196 1.3 nonaka }
1197 1.3 nonaka }
1198 1.3 nonaka
1199 1.3 nonaka htolem32(&sqe->cdw10, pt->cmd.cdw10);
1200 1.3 nonaka htolem32(&sqe->cdw11, pt->cmd.cdw11);
1201 1.3 nonaka htolem32(&sqe->cdw12, pt->cmd.cdw12);
1202 1.3 nonaka htolem32(&sqe->cdw13, pt->cmd.cdw13);
1203 1.3 nonaka htolem32(&sqe->cdw14, pt->cmd.cdw14);
1204 1.3 nonaka htolem32(&sqe->cdw15, pt->cmd.cdw15);
1205 1.3 nonaka }
1206 1.3 nonaka
1207 1.3 nonaka static void
1208 1.3 nonaka nvme_pt_done(struct nvme_queue *q, struct nvme_ccb *ccb, struct nvme_cqe *cqe)
1209 1.3 nonaka {
1210 1.3 nonaka struct nvme_softc *sc = q->q_sc;
1211 1.35 jdolecek struct nvme_pt_state *state = ccb->ccb_cookie;
1212 1.35 jdolecek struct nvme_pt_command *pt = state->pt;
1213 1.3 nonaka bus_dmamap_t dmap = ccb->ccb_dmamap;
1214 1.3 nonaka
1215 1.3 nonaka if (pt->buf != NULL && pt->len > 0) {
1216 1.3 nonaka if (dmap->dm_nsegs > 2) {
1217 1.3 nonaka bus_dmamap_sync(sc->sc_dmat,
1218 1.3 nonaka NVME_DMA_MAP(q->q_ccb_prpls),
1219 1.3 nonaka ccb->ccb_prpl_off,
1220 1.16 nonaka sizeof(*ccb->ccb_prpl) * (dmap->dm_nsegs - 1),
1221 1.3 nonaka BUS_DMASYNC_POSTWRITE);
1222 1.3 nonaka }
1223 1.3 nonaka
1224 1.3 nonaka bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1225 1.3 nonaka pt->is_read ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1226 1.3 nonaka bus_dmamap_unload(sc->sc_dmat, dmap);
1227 1.3 nonaka }
1228 1.3 nonaka
1229 1.23 nonaka pt->cpl.cdw0 = lemtoh32(&cqe->cdw0);
1230 1.23 nonaka pt->cpl.flags = lemtoh16(&cqe->flags) & ~NVME_CQE_PHASE;
1231 1.35 jdolecek
1232 1.35 jdolecek state->finished = true;
1233 1.35 jdolecek
1234 1.35 jdolecek nvme_ccb_put(q, ccb);
1235 1.35 jdolecek }
1236 1.35 jdolecek
1237 1.35 jdolecek static bool
1238 1.35 jdolecek nvme_pt_finished(void *cookie)
1239 1.35 jdolecek {
1240 1.35 jdolecek struct nvme_pt_state *state = cookie;
1241 1.35 jdolecek
1242 1.35 jdolecek return state->finished;
1243 1.3 nonaka }
1244 1.3 nonaka
1245 1.3 nonaka static int
1246 1.3 nonaka nvme_command_passthrough(struct nvme_softc *sc, struct nvme_pt_command *pt,
1247 1.3 nonaka uint16_t nsid, struct lwp *l, bool is_adminq)
1248 1.3 nonaka {
1249 1.3 nonaka struct nvme_queue *q;
1250 1.3 nonaka struct nvme_ccb *ccb;
1251 1.3 nonaka void *buf = NULL;
1252 1.35 jdolecek struct nvme_pt_state state;
1253 1.3 nonaka int error;
1254 1.3 nonaka
1255 1.9 jdolecek /* limit command size to maximum data transfer size */
1256 1.3 nonaka if ((pt->buf == NULL && pt->len > 0) ||
1257 1.9 jdolecek (pt->buf != NULL && (pt->len == 0 || pt->len > sc->sc_mdts)))
1258 1.3 nonaka return EINVAL;
1259 1.3 nonaka
1260 1.44 jmcneill q = is_adminq ? sc->sc_admin_q : nvme_get_q(sc, NULL, true);
1261 1.34 jdolecek ccb = nvme_ccb_get(q, true);
1262 1.34 jdolecek KASSERT(ccb != NULL);
1263 1.3 nonaka
1264 1.9 jdolecek if (pt->buf != NULL) {
1265 1.9 jdolecek KASSERT(pt->len > 0);
1266 1.3 nonaka buf = kmem_alloc(pt->len, KM_SLEEP);
1267 1.3 nonaka if (!pt->is_read) {
1268 1.3 nonaka error = copyin(pt->buf, buf, pt->len);
1269 1.3 nonaka if (error)
1270 1.3 nonaka goto kmem_free;
1271 1.3 nonaka }
1272 1.3 nonaka error = bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap, buf,
1273 1.3 nonaka pt->len, NULL,
1274 1.3 nonaka BUS_DMA_WAITOK |
1275 1.3 nonaka (pt->is_read ? BUS_DMA_READ : BUS_DMA_WRITE));
1276 1.3 nonaka if (error)
1277 1.3 nonaka goto kmem_free;
1278 1.3 nonaka bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap,
1279 1.3 nonaka 0, ccb->ccb_dmamap->dm_mapsize,
1280 1.3 nonaka pt->is_read ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1281 1.3 nonaka }
1282 1.3 nonaka
1283 1.35 jdolecek memset(&state, 0, sizeof(state));
1284 1.35 jdolecek state.pt = pt;
1285 1.35 jdolecek state.finished = false;
1286 1.35 jdolecek
1287 1.3 nonaka ccb->ccb_done = nvme_pt_done;
1288 1.35 jdolecek ccb->ccb_cookie = &state;
1289 1.3 nonaka
1290 1.3 nonaka pt->cmd.nsid = nsid;
1291 1.35 jdolecek
1292 1.35 jdolecek nvme_q_submit(sc, q, ccb, nvme_pt_fill);
1293 1.35 jdolecek
1294 1.35 jdolecek /* wait for completion */
1295 1.35 jdolecek nvme_q_wait_complete(sc, q, nvme_pt_finished, &state);
1296 1.35 jdolecek KASSERT(state.finished);
1297 1.3 nonaka
1298 1.3 nonaka error = 0;
1299 1.35 jdolecek
1300 1.3 nonaka if (buf != NULL) {
1301 1.3 nonaka if (error == 0 && pt->is_read)
1302 1.3 nonaka error = copyout(buf, pt->buf, pt->len);
1303 1.3 nonaka kmem_free:
1304 1.3 nonaka kmem_free(buf, pt->len);
1305 1.3 nonaka }
1306 1.35 jdolecek
1307 1.3 nonaka return error;
1308 1.3 nonaka }
1309 1.3 nonaka
1310 1.3 nonaka static void
1311 1.1 nonaka nvme_q_submit(struct nvme_softc *sc, struct nvme_queue *q, struct nvme_ccb *ccb,
1312 1.1 nonaka void (*fill)(struct nvme_queue *, struct nvme_ccb *, void *))
1313 1.1 nonaka {
1314 1.1 nonaka struct nvme_sqe *sqe = NVME_DMA_KVA(q->q_sq_dmamem);
1315 1.1 nonaka uint32_t tail;
1316 1.1 nonaka
1317 1.1 nonaka mutex_enter(&q->q_sq_mtx);
1318 1.1 nonaka tail = q->q_sq_tail;
1319 1.1 nonaka if (++q->q_sq_tail >= q->q_entries)
1320 1.1 nonaka q->q_sq_tail = 0;
1321 1.1 nonaka
1322 1.1 nonaka sqe += tail;
1323 1.1 nonaka
1324 1.1 nonaka bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_sq_dmamem),
1325 1.1 nonaka sizeof(*sqe) * tail, sizeof(*sqe), BUS_DMASYNC_POSTWRITE);
1326 1.1 nonaka memset(sqe, 0, sizeof(*sqe));
1327 1.1 nonaka (*fill)(q, ccb, sqe);
1328 1.39 nonaka htolem16(&sqe->cid, ccb->ccb_id);
1329 1.1 nonaka bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_sq_dmamem),
1330 1.1 nonaka sizeof(*sqe) * tail, sizeof(*sqe), BUS_DMASYNC_PREWRITE);
1331 1.1 nonaka
1332 1.1 nonaka nvme_write4(sc, q->q_sqtdbl, q->q_sq_tail);
1333 1.1 nonaka mutex_exit(&q->q_sq_mtx);
1334 1.1 nonaka }
1335 1.1 nonaka
1336 1.1 nonaka struct nvme_poll_state {
1337 1.1 nonaka struct nvme_sqe s;
1338 1.1 nonaka struct nvme_cqe c;
1339 1.34 jdolecek void *cookie;
1340 1.34 jdolecek void (*done)(struct nvme_queue *, struct nvme_ccb *, struct nvme_cqe *);
1341 1.1 nonaka };
1342 1.1 nonaka
1343 1.1 nonaka static int
1344 1.1 nonaka nvme_poll(struct nvme_softc *sc, struct nvme_queue *q, struct nvme_ccb *ccb,
1345 1.7 jdolecek void (*fill)(struct nvme_queue *, struct nvme_ccb *, void *), int timo_sec)
1346 1.1 nonaka {
1347 1.1 nonaka struct nvme_poll_state state;
1348 1.1 nonaka uint16_t flags;
1349 1.7 jdolecek int step = 10;
1350 1.7 jdolecek int maxloop = timo_sec * 1000000 / step;
1351 1.7 jdolecek int error = 0;
1352 1.1 nonaka
1353 1.1 nonaka memset(&state, 0, sizeof(state));
1354 1.1 nonaka (*fill)(q, ccb, &state.s);
1355 1.1 nonaka
1356 1.34 jdolecek state.done = ccb->ccb_done;
1357 1.34 jdolecek state.cookie = ccb->ccb_cookie;
1358 1.1 nonaka
1359 1.1 nonaka ccb->ccb_done = nvme_poll_done;
1360 1.1 nonaka ccb->ccb_cookie = &state;
1361 1.1 nonaka
1362 1.1 nonaka nvme_q_submit(sc, q, ccb, nvme_poll_fill);
1363 1.1 nonaka while (!ISSET(state.c.flags, htole16(NVME_CQE_PHASE))) {
1364 1.1 nonaka if (nvme_q_complete(sc, q) == 0)
1365 1.7 jdolecek delay(step);
1366 1.1 nonaka
1367 1.7 jdolecek if (timo_sec >= 0 && --maxloop <= 0) {
1368 1.7 jdolecek error = ETIMEDOUT;
1369 1.7 jdolecek break;
1370 1.7 jdolecek }
1371 1.1 nonaka }
1372 1.1 nonaka
1373 1.7 jdolecek if (error == 0) {
1374 1.7 jdolecek flags = lemtoh16(&state.c.flags);
1375 1.7 jdolecek return flags & ~NVME_CQE_PHASE;
1376 1.7 jdolecek } else {
1377 1.34 jdolecek /*
1378 1.34 jdolecek * If it succeds later, it would hit ccb which will have been
1379 1.34 jdolecek * already reused for something else. Not good. Cross
1380 1.34 jdolecek * fingers and hope for best. XXX do controller reset?
1381 1.34 jdolecek */
1382 1.34 jdolecek aprint_error_dev(sc->sc_dev, "polled command timed out\n");
1383 1.34 jdolecek
1384 1.34 jdolecek /* Invoke the callback to clean state anyway */
1385 1.34 jdolecek struct nvme_cqe cqe;
1386 1.34 jdolecek memset(&cqe, 0, sizeof(cqe));
1387 1.34 jdolecek ccb->ccb_done(q, ccb, &cqe);
1388 1.34 jdolecek
1389 1.7 jdolecek return 1;
1390 1.7 jdolecek }
1391 1.1 nonaka }
1392 1.1 nonaka
1393 1.1 nonaka static void
1394 1.1 nonaka nvme_poll_fill(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot)
1395 1.1 nonaka {
1396 1.1 nonaka struct nvme_sqe *sqe = slot;
1397 1.1 nonaka struct nvme_poll_state *state = ccb->ccb_cookie;
1398 1.1 nonaka
1399 1.1 nonaka *sqe = state->s;
1400 1.1 nonaka }
1401 1.1 nonaka
1402 1.1 nonaka static void
1403 1.1 nonaka nvme_poll_done(struct nvme_queue *q, struct nvme_ccb *ccb,
1404 1.1 nonaka struct nvme_cqe *cqe)
1405 1.1 nonaka {
1406 1.1 nonaka struct nvme_poll_state *state = ccb->ccb_cookie;
1407 1.1 nonaka
1408 1.1 nonaka state->c = *cqe;
1409 1.44.2.1 martin SET(state->c.flags, htole16(NVME_CQE_PHASE));
1410 1.34 jdolecek
1411 1.34 jdolecek ccb->ccb_cookie = state->cookie;
1412 1.34 jdolecek state->done(q, ccb, &state->c);
1413 1.1 nonaka }
1414 1.1 nonaka
1415 1.1 nonaka static void
1416 1.1 nonaka nvme_sqe_fill(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot)
1417 1.1 nonaka {
1418 1.1 nonaka struct nvme_sqe *src = ccb->ccb_cookie;
1419 1.1 nonaka struct nvme_sqe *dst = slot;
1420 1.1 nonaka
1421 1.1 nonaka *dst = *src;
1422 1.1 nonaka }
1423 1.1 nonaka
1424 1.1 nonaka static void
1425 1.1 nonaka nvme_empty_done(struct nvme_queue *q, struct nvme_ccb *ccb,
1426 1.1 nonaka struct nvme_cqe *cqe)
1427 1.1 nonaka {
1428 1.1 nonaka }
1429 1.1 nonaka
1430 1.1 nonaka static int
1431 1.1 nonaka nvme_q_complete(struct nvme_softc *sc, struct nvme_queue *q)
1432 1.1 nonaka {
1433 1.1 nonaka struct nvme_ccb *ccb;
1434 1.1 nonaka struct nvme_cqe *ring = NVME_DMA_KVA(q->q_cq_dmamem), *cqe;
1435 1.1 nonaka uint16_t flags;
1436 1.1 nonaka int rv = 0;
1437 1.1 nonaka
1438 1.9 jdolecek mutex_enter(&q->q_cq_mtx);
1439 1.1 nonaka
1440 1.1 nonaka nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_POSTREAD);
1441 1.1 nonaka for (;;) {
1442 1.9 jdolecek cqe = &ring[q->q_cq_head];
1443 1.1 nonaka flags = lemtoh16(&cqe->flags);
1444 1.1 nonaka if ((flags & NVME_CQE_PHASE) != q->q_cq_phase)
1445 1.1 nonaka break;
1446 1.1 nonaka
1447 1.1 nonaka ccb = &q->q_ccbs[cqe->cid];
1448 1.1 nonaka
1449 1.9 jdolecek if (++q->q_cq_head >= q->q_entries) {
1450 1.9 jdolecek q->q_cq_head = 0;
1451 1.1 nonaka q->q_cq_phase ^= NVME_CQE_PHASE;
1452 1.1 nonaka }
1453 1.1 nonaka
1454 1.18 jdolecek #ifdef DEBUG
1455 1.18 jdolecek /*
1456 1.18 jdolecek * If we get spurious completion notification, something
1457 1.18 jdolecek * is seriously hosed up. Very likely DMA to some random
1458 1.18 jdolecek * memory place happened, so just bail out.
1459 1.18 jdolecek */
1460 1.18 jdolecek if ((intptr_t)ccb->ccb_cookie == NVME_CCB_FREE) {
1461 1.18 jdolecek panic("%s: invalid ccb detected",
1462 1.18 jdolecek device_xname(sc->sc_dev));
1463 1.18 jdolecek /* NOTREACHED */
1464 1.18 jdolecek }
1465 1.18 jdolecek #endif
1466 1.20 jdolecek
1467 1.20 jdolecek rv++;
1468 1.9 jdolecek
1469 1.9 jdolecek /*
1470 1.10 jdolecek * Unlock the mutex before calling the ccb_done callback
1471 1.9 jdolecek * and re-lock afterwards. The callback triggers lddone()
1472 1.9 jdolecek * which schedules another i/o, and also calls nvme_ccb_put().
1473 1.9 jdolecek * Unlock/relock avoids possibility of deadlock.
1474 1.9 jdolecek */
1475 1.9 jdolecek mutex_exit(&q->q_cq_mtx);
1476 1.9 jdolecek ccb->ccb_done(q, ccb, cqe);
1477 1.9 jdolecek mutex_enter(&q->q_cq_mtx);
1478 1.1 nonaka }
1479 1.1 nonaka nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_PREREAD);
1480 1.1 nonaka
1481 1.1 nonaka if (rv)
1482 1.9 jdolecek nvme_write4(sc, q->q_cqhdbl, q->q_cq_head);
1483 1.9 jdolecek
1484 1.1 nonaka mutex_exit(&q->q_cq_mtx);
1485 1.1 nonaka
1486 1.1 nonaka return rv;
1487 1.1 nonaka }
1488 1.1 nonaka
1489 1.34 jdolecek static void
1490 1.34 jdolecek nvme_q_wait_complete(struct nvme_softc *sc,
1491 1.34 jdolecek struct nvme_queue *q, bool (*finished)(void *), void *cookie)
1492 1.34 jdolecek {
1493 1.34 jdolecek mutex_enter(&q->q_ccb_mtx);
1494 1.34 jdolecek if (finished(cookie))
1495 1.34 jdolecek goto out;
1496 1.34 jdolecek
1497 1.34 jdolecek for(;;) {
1498 1.34 jdolecek q->q_ccb_waiting = true;
1499 1.34 jdolecek cv_wait(&q->q_ccb_wait, &q->q_ccb_mtx);
1500 1.34 jdolecek
1501 1.34 jdolecek if (finished(cookie))
1502 1.34 jdolecek break;
1503 1.34 jdolecek }
1504 1.34 jdolecek
1505 1.34 jdolecek out:
1506 1.34 jdolecek mutex_exit(&q->q_ccb_mtx);
1507 1.34 jdolecek }
1508 1.34 jdolecek
1509 1.1 nonaka static int
1510 1.1 nonaka nvme_identify(struct nvme_softc *sc, u_int mps)
1511 1.1 nonaka {
1512 1.1 nonaka char sn[41], mn[81], fr[17];
1513 1.1 nonaka struct nvm_identify_controller *identify;
1514 1.19 jdolecek struct nvme_dmamem *mem;
1515 1.1 nonaka struct nvme_ccb *ccb;
1516 1.1 nonaka u_int mdts;
1517 1.19 jdolecek int rv = 1;
1518 1.1 nonaka
1519 1.34 jdolecek ccb = nvme_ccb_get(sc->sc_admin_q, false);
1520 1.11 jdolecek KASSERT(ccb != NULL); /* it's a bug if we don't have spare ccb here */
1521 1.1 nonaka
1522 1.19 jdolecek mem = nvme_dmamem_alloc(sc, sizeof(*identify));
1523 1.19 jdolecek if (mem == NULL)
1524 1.19 jdolecek return 1;
1525 1.1 nonaka
1526 1.1 nonaka ccb->ccb_done = nvme_empty_done;
1527 1.19 jdolecek ccb->ccb_cookie = mem;
1528 1.1 nonaka
1529 1.1 nonaka nvme_dmamem_sync(sc, mem, BUS_DMASYNC_PREREAD);
1530 1.19 jdolecek rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_fill_identify,
1531 1.7 jdolecek NVME_TIMO_IDENT);
1532 1.1 nonaka nvme_dmamem_sync(sc, mem, BUS_DMASYNC_POSTREAD);
1533 1.1 nonaka
1534 1.1 nonaka nvme_ccb_put(sc->sc_admin_q, ccb);
1535 1.1 nonaka
1536 1.19 jdolecek if (rv != 0)
1537 1.1 nonaka goto done;
1538 1.1 nonaka
1539 1.1 nonaka identify = NVME_DMA_KVA(mem);
1540 1.39 nonaka sc->sc_identify = *identify;
1541 1.39 nonaka identify = NULL;
1542 1.39 nonaka
1543 1.39 nonaka /* Convert data to host endian */
1544 1.39 nonaka nvme_identify_controller_swapbytes(&sc->sc_identify);
1545 1.1 nonaka
1546 1.39 nonaka strnvisx(sn, sizeof(sn), (const char *)sc->sc_identify.sn,
1547 1.39 nonaka sizeof(sc->sc_identify.sn), VIS_TRIM|VIS_SAFE|VIS_OCTAL);
1548 1.39 nonaka strnvisx(mn, sizeof(mn), (const char *)sc->sc_identify.mn,
1549 1.39 nonaka sizeof(sc->sc_identify.mn), VIS_TRIM|VIS_SAFE|VIS_OCTAL);
1550 1.39 nonaka strnvisx(fr, sizeof(fr), (const char *)sc->sc_identify.fr,
1551 1.39 nonaka sizeof(sc->sc_identify.fr), VIS_TRIM|VIS_SAFE|VIS_OCTAL);
1552 1.1 nonaka aprint_normal_dev(sc->sc_dev, "%s, firmware %s, serial %s\n", mn, fr,
1553 1.1 nonaka sn);
1554 1.1 nonaka
1555 1.42 mlelstv strlcpy(sc->sc_modelname, mn, sizeof(sc->sc_modelname));
1556 1.42 mlelstv
1557 1.39 nonaka if (sc->sc_identify.mdts > 0) {
1558 1.39 nonaka mdts = (1 << sc->sc_identify.mdts) * (1 << mps);
1559 1.1 nonaka if (mdts < sc->sc_mdts)
1560 1.1 nonaka sc->sc_mdts = mdts;
1561 1.1 nonaka }
1562 1.1 nonaka
1563 1.39 nonaka sc->sc_nn = sc->sc_identify.nn;
1564 1.1 nonaka
1565 1.1 nonaka done:
1566 1.19 jdolecek nvme_dmamem_free(sc, mem);
1567 1.1 nonaka
1568 1.19 jdolecek return rv;
1569 1.1 nonaka }
1570 1.1 nonaka
1571 1.1 nonaka static int
1572 1.1 nonaka nvme_q_create(struct nvme_softc *sc, struct nvme_queue *q)
1573 1.1 nonaka {
1574 1.1 nonaka struct nvme_sqe_q sqe;
1575 1.1 nonaka struct nvme_ccb *ccb;
1576 1.1 nonaka int rv;
1577 1.1 nonaka
1578 1.9 jdolecek if (sc->sc_use_mq && sc->sc_intr_establish(sc, q->q_id, q) != 0)
1579 1.1 nonaka return 1;
1580 1.1 nonaka
1581 1.34 jdolecek ccb = nvme_ccb_get(sc->sc_admin_q, false);
1582 1.1 nonaka KASSERT(ccb != NULL);
1583 1.1 nonaka
1584 1.1 nonaka ccb->ccb_done = nvme_empty_done;
1585 1.1 nonaka ccb->ccb_cookie = &sqe;
1586 1.1 nonaka
1587 1.1 nonaka memset(&sqe, 0, sizeof(sqe));
1588 1.1 nonaka sqe.opcode = NVM_ADMIN_ADD_IOCQ;
1589 1.1 nonaka htolem64(&sqe.prp1, NVME_DMA_DVA(q->q_cq_dmamem));
1590 1.1 nonaka htolem16(&sqe.qsize, q->q_entries - 1);
1591 1.1 nonaka htolem16(&sqe.qid, q->q_id);
1592 1.1 nonaka sqe.qflags = NVM_SQE_CQ_IEN | NVM_SQE_Q_PC;
1593 1.1 nonaka if (sc->sc_use_mq)
1594 1.1 nonaka htolem16(&sqe.cqid, q->q_id); /* qid == vector */
1595 1.1 nonaka
1596 1.7 jdolecek rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill, NVME_TIMO_QOP);
1597 1.1 nonaka if (rv != 0)
1598 1.1 nonaka goto fail;
1599 1.1 nonaka
1600 1.1 nonaka ccb->ccb_done = nvme_empty_done;
1601 1.1 nonaka ccb->ccb_cookie = &sqe;
1602 1.1 nonaka
1603 1.1 nonaka memset(&sqe, 0, sizeof(sqe));
1604 1.1 nonaka sqe.opcode = NVM_ADMIN_ADD_IOSQ;
1605 1.1 nonaka htolem64(&sqe.prp1, NVME_DMA_DVA(q->q_sq_dmamem));
1606 1.1 nonaka htolem16(&sqe.qsize, q->q_entries - 1);
1607 1.1 nonaka htolem16(&sqe.qid, q->q_id);
1608 1.1 nonaka htolem16(&sqe.cqid, q->q_id);
1609 1.1 nonaka sqe.qflags = NVM_SQE_Q_PC;
1610 1.1 nonaka
1611 1.7 jdolecek rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill, NVME_TIMO_QOP);
1612 1.1 nonaka if (rv != 0)
1613 1.1 nonaka goto fail;
1614 1.1 nonaka
1615 1.40 jdolecek nvme_ccb_put(sc->sc_admin_q, ccb);
1616 1.40 jdolecek return 0;
1617 1.40 jdolecek
1618 1.1 nonaka fail:
1619 1.40 jdolecek if (sc->sc_use_mq)
1620 1.40 jdolecek sc->sc_intr_disestablish(sc, q->q_id);
1621 1.40 jdolecek
1622 1.1 nonaka nvme_ccb_put(sc->sc_admin_q, ccb);
1623 1.1 nonaka return rv;
1624 1.1 nonaka }
1625 1.1 nonaka
1626 1.1 nonaka static int
1627 1.1 nonaka nvme_q_delete(struct nvme_softc *sc, struct nvme_queue *q)
1628 1.1 nonaka {
1629 1.1 nonaka struct nvme_sqe_q sqe;
1630 1.1 nonaka struct nvme_ccb *ccb;
1631 1.1 nonaka int rv;
1632 1.1 nonaka
1633 1.34 jdolecek ccb = nvme_ccb_get(sc->sc_admin_q, false);
1634 1.1 nonaka KASSERT(ccb != NULL);
1635 1.1 nonaka
1636 1.1 nonaka ccb->ccb_done = nvme_empty_done;
1637 1.1 nonaka ccb->ccb_cookie = &sqe;
1638 1.1 nonaka
1639 1.1 nonaka memset(&sqe, 0, sizeof(sqe));
1640 1.1 nonaka sqe.opcode = NVM_ADMIN_DEL_IOSQ;
1641 1.1 nonaka htolem16(&sqe.qid, q->q_id);
1642 1.1 nonaka
1643 1.7 jdolecek rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill, NVME_TIMO_QOP);
1644 1.1 nonaka if (rv != 0)
1645 1.1 nonaka goto fail;
1646 1.1 nonaka
1647 1.1 nonaka ccb->ccb_done = nvme_empty_done;
1648 1.1 nonaka ccb->ccb_cookie = &sqe;
1649 1.1 nonaka
1650 1.1 nonaka memset(&sqe, 0, sizeof(sqe));
1651 1.1 nonaka sqe.opcode = NVM_ADMIN_DEL_IOCQ;
1652 1.1 nonaka htolem16(&sqe.qid, q->q_id);
1653 1.1 nonaka
1654 1.7 jdolecek rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill, NVME_TIMO_QOP);
1655 1.1 nonaka if (rv != 0)
1656 1.1 nonaka goto fail;
1657 1.1 nonaka
1658 1.1 nonaka fail:
1659 1.1 nonaka nvme_ccb_put(sc->sc_admin_q, ccb);
1660 1.1 nonaka
1661 1.1 nonaka if (rv == 0 && sc->sc_use_mq) {
1662 1.1 nonaka if (sc->sc_intr_disestablish(sc, q->q_id))
1663 1.1 nonaka rv = 1;
1664 1.1 nonaka }
1665 1.1 nonaka
1666 1.1 nonaka return rv;
1667 1.1 nonaka }
1668 1.1 nonaka
1669 1.1 nonaka static void
1670 1.1 nonaka nvme_fill_identify(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot)
1671 1.1 nonaka {
1672 1.1 nonaka struct nvme_sqe *sqe = slot;
1673 1.1 nonaka struct nvme_dmamem *mem = ccb->ccb_cookie;
1674 1.1 nonaka
1675 1.1 nonaka sqe->opcode = NVM_ADMIN_IDENTIFY;
1676 1.19 jdolecek htolem64(&sqe->entry.prp[0], NVME_DMA_DVA(mem));
1677 1.1 nonaka htolem32(&sqe->cdw10, 1);
1678 1.1 nonaka }
1679 1.1 nonaka
1680 1.1 nonaka static int
1681 1.44.2.3 martin nvme_set_number_of_queues(struct nvme_softc *sc, u_int nq, u_int *ncqa,
1682 1.44.2.3 martin u_int *nsqa)
1683 1.23 nonaka {
1684 1.36 jdolecek struct nvme_pt_state state;
1685 1.23 nonaka struct nvme_pt_command pt;
1686 1.23 nonaka struct nvme_ccb *ccb;
1687 1.23 nonaka int rv;
1688 1.23 nonaka
1689 1.34 jdolecek ccb = nvme_ccb_get(sc->sc_admin_q, false);
1690 1.23 nonaka KASSERT(ccb != NULL); /* it's a bug if we don't have spare ccb here */
1691 1.23 nonaka
1692 1.23 nonaka memset(&pt, 0, sizeof(pt));
1693 1.44.2.3 martin pt.cmd.opcode = NVM_ADMIN_SET_FEATURES;
1694 1.44.2.3 martin htolem32(&pt.cmd.cdw10, NVM_FEATURE_NUMBER_OF_QUEUES);
1695 1.44.2.3 martin htolem32(&pt.cmd.cdw11, ((nq - 1) << 16) | (nq - 1));
1696 1.23 nonaka
1697 1.36 jdolecek memset(&state, 0, sizeof(state));
1698 1.36 jdolecek state.pt = &pt;
1699 1.36 jdolecek state.finished = false;
1700 1.36 jdolecek
1701 1.23 nonaka ccb->ccb_done = nvme_pt_done;
1702 1.36 jdolecek ccb->ccb_cookie = &state;
1703 1.23 nonaka
1704 1.23 nonaka rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_pt_fill, NVME_TIMO_QOP);
1705 1.23 nonaka
1706 1.23 nonaka if (rv != 0) {
1707 1.44.2.3 martin *ncqa = *nsqa = 0;
1708 1.23 nonaka return EIO;
1709 1.23 nonaka }
1710 1.23 nonaka
1711 1.44.2.3 martin *ncqa = (pt.cpl.cdw0 >> 16) + 1;
1712 1.44.2.3 martin *nsqa = (pt.cpl.cdw0 & 0xffff) + 1;
1713 1.23 nonaka
1714 1.23 nonaka return 0;
1715 1.23 nonaka }
1716 1.23 nonaka
1717 1.23 nonaka static int
1718 1.20 jdolecek nvme_ccbs_alloc(struct nvme_queue *q, uint16_t nccbs)
1719 1.1 nonaka {
1720 1.1 nonaka struct nvme_softc *sc = q->q_sc;
1721 1.1 nonaka struct nvme_ccb *ccb;
1722 1.1 nonaka bus_addr_t off;
1723 1.1 nonaka uint64_t *prpl;
1724 1.1 nonaka u_int i;
1725 1.1 nonaka
1726 1.1 nonaka mutex_init(&q->q_ccb_mtx, MUTEX_DEFAULT, IPL_BIO);
1727 1.34 jdolecek cv_init(&q->q_ccb_wait, "nvmeqw");
1728 1.34 jdolecek q->q_ccb_waiting = false;
1729 1.1 nonaka SIMPLEQ_INIT(&q->q_ccb_list);
1730 1.1 nonaka
1731 1.1 nonaka q->q_ccbs = kmem_alloc(sizeof(*ccb) * nccbs, KM_SLEEP);
1732 1.1 nonaka
1733 1.1 nonaka q->q_nccbs = nccbs;
1734 1.19 jdolecek q->q_ccb_prpls = nvme_dmamem_alloc(sc,
1735 1.19 jdolecek sizeof(*prpl) * sc->sc_max_sgl * nccbs);
1736 1.1 nonaka
1737 1.1 nonaka prpl = NVME_DMA_KVA(q->q_ccb_prpls);
1738 1.1 nonaka off = 0;
1739 1.1 nonaka
1740 1.1 nonaka for (i = 0; i < nccbs; i++) {
1741 1.1 nonaka ccb = &q->q_ccbs[i];
1742 1.1 nonaka
1743 1.1 nonaka if (bus_dmamap_create(sc->sc_dmat, sc->sc_mdts,
1744 1.1 nonaka sc->sc_max_sgl + 1 /* we get a free prp in the sqe */,
1745 1.1 nonaka sc->sc_mps, sc->sc_mps, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
1746 1.1 nonaka &ccb->ccb_dmamap) != 0)
1747 1.1 nonaka goto free_maps;
1748 1.1 nonaka
1749 1.1 nonaka ccb->ccb_id = i;
1750 1.1 nonaka ccb->ccb_prpl = prpl;
1751 1.1 nonaka ccb->ccb_prpl_off = off;
1752 1.1 nonaka ccb->ccb_prpl_dva = NVME_DMA_DVA(q->q_ccb_prpls) + off;
1753 1.1 nonaka
1754 1.1 nonaka SIMPLEQ_INSERT_TAIL(&q->q_ccb_list, ccb, ccb_entry);
1755 1.1 nonaka
1756 1.1 nonaka prpl += sc->sc_max_sgl;
1757 1.1 nonaka off += sizeof(*prpl) * sc->sc_max_sgl;
1758 1.1 nonaka }
1759 1.1 nonaka
1760 1.1 nonaka return 0;
1761 1.1 nonaka
1762 1.1 nonaka free_maps:
1763 1.1 nonaka nvme_ccbs_free(q);
1764 1.1 nonaka return 1;
1765 1.1 nonaka }
1766 1.1 nonaka
1767 1.1 nonaka static struct nvme_ccb *
1768 1.34 jdolecek nvme_ccb_get(struct nvme_queue *q, bool wait)
1769 1.1 nonaka {
1770 1.20 jdolecek struct nvme_ccb *ccb = NULL;
1771 1.1 nonaka
1772 1.1 nonaka mutex_enter(&q->q_ccb_mtx);
1773 1.34 jdolecek again:
1774 1.33 jdolecek ccb = SIMPLEQ_FIRST(&q->q_ccb_list);
1775 1.33 jdolecek if (ccb != NULL) {
1776 1.1 nonaka SIMPLEQ_REMOVE_HEAD(&q->q_ccb_list, ccb_entry);
1777 1.18 jdolecek #ifdef DEBUG
1778 1.18 jdolecek ccb->ccb_cookie = NULL;
1779 1.18 jdolecek #endif
1780 1.34 jdolecek } else {
1781 1.34 jdolecek if (__predict_false(wait)) {
1782 1.34 jdolecek q->q_ccb_waiting = true;
1783 1.34 jdolecek cv_wait(&q->q_ccb_wait, &q->q_ccb_mtx);
1784 1.34 jdolecek goto again;
1785 1.34 jdolecek }
1786 1.18 jdolecek }
1787 1.1 nonaka mutex_exit(&q->q_ccb_mtx);
1788 1.1 nonaka
1789 1.1 nonaka return ccb;
1790 1.1 nonaka }
1791 1.1 nonaka
1792 1.1 nonaka static void
1793 1.1 nonaka nvme_ccb_put(struct nvme_queue *q, struct nvme_ccb *ccb)
1794 1.1 nonaka {
1795 1.1 nonaka
1796 1.1 nonaka mutex_enter(&q->q_ccb_mtx);
1797 1.18 jdolecek #ifdef DEBUG
1798 1.18 jdolecek ccb->ccb_cookie = (void *)NVME_CCB_FREE;
1799 1.18 jdolecek #endif
1800 1.1 nonaka SIMPLEQ_INSERT_HEAD(&q->q_ccb_list, ccb, ccb_entry);
1801 1.34 jdolecek
1802 1.34 jdolecek /* It's unlikely there are any waiters, it's not used for regular I/O */
1803 1.34 jdolecek if (__predict_false(q->q_ccb_waiting)) {
1804 1.34 jdolecek q->q_ccb_waiting = false;
1805 1.34 jdolecek cv_broadcast(&q->q_ccb_wait);
1806 1.34 jdolecek }
1807 1.34 jdolecek
1808 1.1 nonaka mutex_exit(&q->q_ccb_mtx);
1809 1.1 nonaka }
1810 1.1 nonaka
1811 1.1 nonaka static void
1812 1.1 nonaka nvme_ccbs_free(struct nvme_queue *q)
1813 1.1 nonaka {
1814 1.1 nonaka struct nvme_softc *sc = q->q_sc;
1815 1.1 nonaka struct nvme_ccb *ccb;
1816 1.1 nonaka
1817 1.1 nonaka mutex_enter(&q->q_ccb_mtx);
1818 1.1 nonaka while ((ccb = SIMPLEQ_FIRST(&q->q_ccb_list)) != NULL) {
1819 1.1 nonaka SIMPLEQ_REMOVE_HEAD(&q->q_ccb_list, ccb_entry);
1820 1.1 nonaka bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
1821 1.1 nonaka }
1822 1.1 nonaka mutex_exit(&q->q_ccb_mtx);
1823 1.1 nonaka
1824 1.19 jdolecek nvme_dmamem_free(sc, q->q_ccb_prpls);
1825 1.1 nonaka kmem_free(q->q_ccbs, sizeof(*ccb) * q->q_nccbs);
1826 1.1 nonaka q->q_ccbs = NULL;
1827 1.34 jdolecek cv_destroy(&q->q_ccb_wait);
1828 1.1 nonaka mutex_destroy(&q->q_ccb_mtx);
1829 1.1 nonaka }
1830 1.1 nonaka
1831 1.1 nonaka static struct nvme_queue *
1832 1.1 nonaka nvme_q_alloc(struct nvme_softc *sc, uint16_t id, u_int entries, u_int dstrd)
1833 1.1 nonaka {
1834 1.1 nonaka struct nvme_queue *q;
1835 1.1 nonaka
1836 1.1 nonaka q = kmem_alloc(sizeof(*q), KM_SLEEP);
1837 1.1 nonaka q->q_sc = sc;
1838 1.19 jdolecek q->q_sq_dmamem = nvme_dmamem_alloc(sc,
1839 1.19 jdolecek sizeof(struct nvme_sqe) * entries);
1840 1.19 jdolecek if (q->q_sq_dmamem == NULL)
1841 1.1 nonaka goto free;
1842 1.1 nonaka
1843 1.19 jdolecek q->q_cq_dmamem = nvme_dmamem_alloc(sc,
1844 1.19 jdolecek sizeof(struct nvme_cqe) * entries);
1845 1.19 jdolecek if (q->q_cq_dmamem == NULL)
1846 1.1 nonaka goto free_sq;
1847 1.1 nonaka
1848 1.1 nonaka memset(NVME_DMA_KVA(q->q_sq_dmamem), 0, NVME_DMA_LEN(q->q_sq_dmamem));
1849 1.1 nonaka memset(NVME_DMA_KVA(q->q_cq_dmamem), 0, NVME_DMA_LEN(q->q_cq_dmamem));
1850 1.1 nonaka
1851 1.1 nonaka mutex_init(&q->q_sq_mtx, MUTEX_DEFAULT, IPL_BIO);
1852 1.1 nonaka mutex_init(&q->q_cq_mtx, MUTEX_DEFAULT, IPL_BIO);
1853 1.1 nonaka q->q_sqtdbl = NVME_SQTDBL(id, dstrd);
1854 1.1 nonaka q->q_cqhdbl = NVME_CQHDBL(id, dstrd);
1855 1.1 nonaka q->q_id = id;
1856 1.1 nonaka q->q_entries = entries;
1857 1.1 nonaka q->q_sq_tail = 0;
1858 1.1 nonaka q->q_cq_head = 0;
1859 1.1 nonaka q->q_cq_phase = NVME_CQE_PHASE;
1860 1.1 nonaka
1861 1.1 nonaka nvme_dmamem_sync(sc, q->q_sq_dmamem, BUS_DMASYNC_PREWRITE);
1862 1.1 nonaka nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_PREREAD);
1863 1.1 nonaka
1864 1.20 jdolecek /*
1865 1.20 jdolecek * Due to definition of full and empty queue (queue is empty
1866 1.20 jdolecek * when head == tail, full when tail is one less then head),
1867 1.20 jdolecek * we can actually only have (entries - 1) in-flight commands.
1868 1.20 jdolecek */
1869 1.20 jdolecek if (nvme_ccbs_alloc(q, entries - 1) != 0) {
1870 1.1 nonaka aprint_error_dev(sc->sc_dev, "unable to allocate ccbs\n");
1871 1.1 nonaka goto free_cq;
1872 1.1 nonaka }
1873 1.1 nonaka
1874 1.1 nonaka return q;
1875 1.1 nonaka
1876 1.1 nonaka free_cq:
1877 1.19 jdolecek nvme_dmamem_free(sc, q->q_cq_dmamem);
1878 1.1 nonaka free_sq:
1879 1.19 jdolecek nvme_dmamem_free(sc, q->q_sq_dmamem);
1880 1.1 nonaka free:
1881 1.1 nonaka kmem_free(q, sizeof(*q));
1882 1.1 nonaka
1883 1.1 nonaka return NULL;
1884 1.1 nonaka }
1885 1.1 nonaka
1886 1.1 nonaka static void
1887 1.44.2.6 martin nvme_q_reset(struct nvme_softc *sc, struct nvme_queue *q)
1888 1.44.2.6 martin {
1889 1.44.2.6 martin
1890 1.44.2.6 martin memset(NVME_DMA_KVA(q->q_sq_dmamem), 0, NVME_DMA_LEN(q->q_sq_dmamem));
1891 1.44.2.6 martin memset(NVME_DMA_KVA(q->q_cq_dmamem), 0, NVME_DMA_LEN(q->q_cq_dmamem));
1892 1.44.2.6 martin
1893 1.44.2.6 martin q->q_sqtdbl = NVME_SQTDBL(q->q_id, sc->sc_dstrd);
1894 1.44.2.6 martin q->q_cqhdbl = NVME_CQHDBL(q->q_id, sc->sc_dstrd);
1895 1.44.2.6 martin
1896 1.44.2.6 martin q->q_sq_tail = 0;
1897 1.44.2.6 martin q->q_cq_head = 0;
1898 1.44.2.6 martin q->q_cq_phase = NVME_CQE_PHASE;
1899 1.44.2.6 martin
1900 1.44.2.6 martin nvme_dmamem_sync(sc, q->q_sq_dmamem, BUS_DMASYNC_PREWRITE);
1901 1.44.2.6 martin nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_PREREAD);
1902 1.44.2.6 martin }
1903 1.44.2.6 martin
1904 1.44.2.6 martin static void
1905 1.1 nonaka nvme_q_free(struct nvme_softc *sc, struct nvme_queue *q)
1906 1.1 nonaka {
1907 1.1 nonaka nvme_ccbs_free(q);
1908 1.9 jdolecek mutex_destroy(&q->q_sq_mtx);
1909 1.9 jdolecek mutex_destroy(&q->q_cq_mtx);
1910 1.1 nonaka nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_POSTREAD);
1911 1.1 nonaka nvme_dmamem_sync(sc, q->q_sq_dmamem, BUS_DMASYNC_POSTWRITE);
1912 1.19 jdolecek nvme_dmamem_free(sc, q->q_cq_dmamem);
1913 1.19 jdolecek nvme_dmamem_free(sc, q->q_sq_dmamem);
1914 1.1 nonaka kmem_free(q, sizeof(*q));
1915 1.1 nonaka }
1916 1.1 nonaka
1917 1.1 nonaka int
1918 1.1 nonaka nvme_intr(void *xsc)
1919 1.1 nonaka {
1920 1.1 nonaka struct nvme_softc *sc = xsc;
1921 1.1 nonaka
1922 1.10 jdolecek /*
1923 1.10 jdolecek * INTx is level triggered, controller deasserts the interrupt only
1924 1.10 jdolecek * when we advance command queue head via write to the doorbell.
1925 1.17 jdolecek * Tell the controller to block the interrupts while we process
1926 1.17 jdolecek * the queue(s).
1927 1.10 jdolecek */
1928 1.17 jdolecek nvme_write4(sc, NVME_INTMS, 1);
1929 1.17 jdolecek
1930 1.17 jdolecek softint_schedule(sc->sc_softih[0]);
1931 1.17 jdolecek
1932 1.17 jdolecek /* don't know, might not have been for us */
1933 1.17 jdolecek return 1;
1934 1.17 jdolecek }
1935 1.17 jdolecek
1936 1.17 jdolecek void
1937 1.17 jdolecek nvme_softintr_intx(void *xq)
1938 1.17 jdolecek {
1939 1.17 jdolecek struct nvme_queue *q = xq;
1940 1.17 jdolecek struct nvme_softc *sc = q->q_sc;
1941 1.17 jdolecek
1942 1.17 jdolecek nvme_q_complete(sc, sc->sc_admin_q);
1943 1.1 nonaka if (sc->sc_q != NULL)
1944 1.17 jdolecek nvme_q_complete(sc, sc->sc_q[0]);
1945 1.1 nonaka
1946 1.17 jdolecek /*
1947 1.17 jdolecek * Processing done, tell controller to issue interrupts again. There
1948 1.17 jdolecek * is no race, as NVMe spec requires the controller to maintain state,
1949 1.17 jdolecek * and assert the interrupt whenever there are unacknowledged
1950 1.17 jdolecek * completion queue entries.
1951 1.17 jdolecek */
1952 1.17 jdolecek nvme_write4(sc, NVME_INTMC, 1);
1953 1.1 nonaka }
1954 1.1 nonaka
1955 1.1 nonaka int
1956 1.9 jdolecek nvme_intr_msi(void *xq)
1957 1.1 nonaka {
1958 1.1 nonaka struct nvme_queue *q = xq;
1959 1.1 nonaka
1960 1.9 jdolecek KASSERT(q && q->q_sc && q->q_sc->sc_softih
1961 1.9 jdolecek && q->q_sc->sc_softih[q->q_id]);
1962 1.1 nonaka
1963 1.17 jdolecek /*
1964 1.17 jdolecek * MSI/MSI-X are edge triggered, so can handover processing to softint
1965 1.17 jdolecek * without masking the interrupt.
1966 1.17 jdolecek */
1967 1.9 jdolecek softint_schedule(q->q_sc->sc_softih[q->q_id]);
1968 1.1 nonaka
1969 1.9 jdolecek return 1;
1970 1.1 nonaka }
1971 1.1 nonaka
1972 1.9 jdolecek void
1973 1.9 jdolecek nvme_softintr_msi(void *xq)
1974 1.1 nonaka {
1975 1.1 nonaka struct nvme_queue *q = xq;
1976 1.9 jdolecek struct nvme_softc *sc = q->q_sc;
1977 1.1 nonaka
1978 1.9 jdolecek nvme_q_complete(sc, q);
1979 1.1 nonaka }
1980 1.1 nonaka
1981 1.19 jdolecek static struct nvme_dmamem *
1982 1.19 jdolecek nvme_dmamem_alloc(struct nvme_softc *sc, size_t size)
1983 1.1 nonaka {
1984 1.19 jdolecek struct nvme_dmamem *ndm;
1985 1.1 nonaka int nsegs;
1986 1.1 nonaka
1987 1.19 jdolecek ndm = kmem_zalloc(sizeof(*ndm), KM_SLEEP);
1988 1.19 jdolecek if (ndm == NULL)
1989 1.19 jdolecek return NULL;
1990 1.19 jdolecek
1991 1.1 nonaka ndm->ndm_size = size;
1992 1.1 nonaka
1993 1.43 mrg if (bus_dmamap_create(sc->sc_dmat, size, btoc(round_page(size)), size, 0,
1994 1.1 nonaka BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &ndm->ndm_map) != 0)
1995 1.1 nonaka goto ndmfree;
1996 1.1 nonaka
1997 1.1 nonaka if (bus_dmamem_alloc(sc->sc_dmat, size, sc->sc_mps, 0, &ndm->ndm_seg,
1998 1.1 nonaka 1, &nsegs, BUS_DMA_WAITOK) != 0)
1999 1.1 nonaka goto destroy;
2000 1.1 nonaka
2001 1.1 nonaka if (bus_dmamem_map(sc->sc_dmat, &ndm->ndm_seg, nsegs, size,
2002 1.1 nonaka &ndm->ndm_kva, BUS_DMA_WAITOK) != 0)
2003 1.1 nonaka goto free;
2004 1.1 nonaka memset(ndm->ndm_kva, 0, size);
2005 1.1 nonaka
2006 1.1 nonaka if (bus_dmamap_load(sc->sc_dmat, ndm->ndm_map, ndm->ndm_kva, size,
2007 1.1 nonaka NULL, BUS_DMA_WAITOK) != 0)
2008 1.1 nonaka goto unmap;
2009 1.1 nonaka
2010 1.19 jdolecek return ndm;
2011 1.1 nonaka
2012 1.1 nonaka unmap:
2013 1.1 nonaka bus_dmamem_unmap(sc->sc_dmat, ndm->ndm_kva, size);
2014 1.1 nonaka free:
2015 1.1 nonaka bus_dmamem_free(sc->sc_dmat, &ndm->ndm_seg, 1);
2016 1.1 nonaka destroy:
2017 1.1 nonaka bus_dmamap_destroy(sc->sc_dmat, ndm->ndm_map);
2018 1.1 nonaka ndmfree:
2019 1.19 jdolecek kmem_free(ndm, sizeof(*ndm));
2020 1.19 jdolecek return NULL;
2021 1.19 jdolecek }
2022 1.19 jdolecek
2023 1.19 jdolecek static void
2024 1.19 jdolecek nvme_dmamem_sync(struct nvme_softc *sc, struct nvme_dmamem *mem, int ops)
2025 1.19 jdolecek {
2026 1.19 jdolecek bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(mem),
2027 1.19 jdolecek 0, NVME_DMA_LEN(mem), ops);
2028 1.1 nonaka }
2029 1.1 nonaka
2030 1.1 nonaka void
2031 1.1 nonaka nvme_dmamem_free(struct nvme_softc *sc, struct nvme_dmamem *ndm)
2032 1.1 nonaka {
2033 1.1 nonaka bus_dmamap_unload(sc->sc_dmat, ndm->ndm_map);
2034 1.1 nonaka bus_dmamem_unmap(sc->sc_dmat, ndm->ndm_kva, ndm->ndm_size);
2035 1.1 nonaka bus_dmamem_free(sc->sc_dmat, &ndm->ndm_seg, 1);
2036 1.1 nonaka bus_dmamap_destroy(sc->sc_dmat, ndm->ndm_map);
2037 1.19 jdolecek kmem_free(ndm, sizeof(*ndm));
2038 1.1 nonaka }
2039 1.3 nonaka
2040 1.3 nonaka /*
2041 1.3 nonaka * ioctl
2042 1.3 nonaka */
2043 1.3 nonaka
2044 1.3 nonaka dev_type_open(nvmeopen);
2045 1.3 nonaka dev_type_close(nvmeclose);
2046 1.3 nonaka dev_type_ioctl(nvmeioctl);
2047 1.3 nonaka
2048 1.3 nonaka const struct cdevsw nvme_cdevsw = {
2049 1.3 nonaka .d_open = nvmeopen,
2050 1.3 nonaka .d_close = nvmeclose,
2051 1.3 nonaka .d_read = noread,
2052 1.3 nonaka .d_write = nowrite,
2053 1.3 nonaka .d_ioctl = nvmeioctl,
2054 1.3 nonaka .d_stop = nostop,
2055 1.3 nonaka .d_tty = notty,
2056 1.3 nonaka .d_poll = nopoll,
2057 1.3 nonaka .d_mmap = nommap,
2058 1.3 nonaka .d_kqfilter = nokqfilter,
2059 1.3 nonaka .d_discard = nodiscard,
2060 1.3 nonaka .d_flag = D_OTHER,
2061 1.3 nonaka };
2062 1.3 nonaka
2063 1.3 nonaka /*
2064 1.3 nonaka * Accept an open operation on the control device.
2065 1.3 nonaka */
2066 1.3 nonaka int
2067 1.3 nonaka nvmeopen(dev_t dev, int flag, int mode, struct lwp *l)
2068 1.3 nonaka {
2069 1.3 nonaka struct nvme_softc *sc;
2070 1.3 nonaka int unit = minor(dev) / 0x10000;
2071 1.3 nonaka int nsid = minor(dev) & 0xffff;
2072 1.3 nonaka int nsidx;
2073 1.3 nonaka
2074 1.3 nonaka if ((sc = device_lookup_private(&nvme_cd, unit)) == NULL)
2075 1.3 nonaka return ENXIO;
2076 1.3 nonaka if ((sc->sc_flags & NVME_F_ATTACHED) == 0)
2077 1.3 nonaka return ENXIO;
2078 1.3 nonaka
2079 1.5 nonaka if (nsid == 0) {
2080 1.5 nonaka /* controller */
2081 1.5 nonaka if (ISSET(sc->sc_flags, NVME_F_OPEN))
2082 1.5 nonaka return EBUSY;
2083 1.5 nonaka SET(sc->sc_flags, NVME_F_OPEN);
2084 1.5 nonaka } else {
2085 1.5 nonaka /* namespace */
2086 1.5 nonaka nsidx = nsid - 1;
2087 1.5 nonaka if (nsidx >= sc->sc_nn || sc->sc_namespaces[nsidx].dev == NULL)
2088 1.5 nonaka return ENXIO;
2089 1.5 nonaka if (ISSET(sc->sc_namespaces[nsidx].flags, NVME_NS_F_OPEN))
2090 1.5 nonaka return EBUSY;
2091 1.5 nonaka SET(sc->sc_namespaces[nsidx].flags, NVME_NS_F_OPEN);
2092 1.5 nonaka }
2093 1.3 nonaka return 0;
2094 1.3 nonaka }
2095 1.3 nonaka
2096 1.3 nonaka /*
2097 1.3 nonaka * Accept the last close on the control device.
2098 1.3 nonaka */
2099 1.3 nonaka int
2100 1.5 nonaka nvmeclose(dev_t dev, int flag, int mode, struct lwp *l)
2101 1.3 nonaka {
2102 1.3 nonaka struct nvme_softc *sc;
2103 1.3 nonaka int unit = minor(dev) / 0x10000;
2104 1.3 nonaka int nsid = minor(dev) & 0xffff;
2105 1.3 nonaka int nsidx;
2106 1.3 nonaka
2107 1.3 nonaka sc = device_lookup_private(&nvme_cd, unit);
2108 1.3 nonaka if (sc == NULL)
2109 1.3 nonaka return ENXIO;
2110 1.3 nonaka
2111 1.5 nonaka if (nsid == 0) {
2112 1.5 nonaka /* controller */
2113 1.5 nonaka CLR(sc->sc_flags, NVME_F_OPEN);
2114 1.5 nonaka } else {
2115 1.5 nonaka /* namespace */
2116 1.5 nonaka nsidx = nsid - 1;
2117 1.5 nonaka if (nsidx >= sc->sc_nn)
2118 1.5 nonaka return ENXIO;
2119 1.5 nonaka CLR(sc->sc_namespaces[nsidx].flags, NVME_NS_F_OPEN);
2120 1.5 nonaka }
2121 1.3 nonaka
2122 1.3 nonaka return 0;
2123 1.3 nonaka }
2124 1.3 nonaka
2125 1.3 nonaka /*
2126 1.3 nonaka * Handle control operations.
2127 1.3 nonaka */
2128 1.3 nonaka int
2129 1.5 nonaka nvmeioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
2130 1.3 nonaka {
2131 1.3 nonaka struct nvme_softc *sc;
2132 1.3 nonaka int unit = minor(dev) / 0x10000;
2133 1.3 nonaka int nsid = minor(dev) & 0xffff;
2134 1.5 nonaka struct nvme_pt_command *pt;
2135 1.3 nonaka
2136 1.3 nonaka sc = device_lookup_private(&nvme_cd, unit);
2137 1.3 nonaka if (sc == NULL)
2138 1.3 nonaka return ENXIO;
2139 1.3 nonaka
2140 1.3 nonaka switch (cmd) {
2141 1.3 nonaka case NVME_PASSTHROUGH_CMD:
2142 1.5 nonaka pt = data;
2143 1.5 nonaka return nvme_command_passthrough(sc, data,
2144 1.5 nonaka nsid == 0 ? pt->cmd.nsid : nsid, l, nsid == 0);
2145 1.3 nonaka }
2146 1.3 nonaka
2147 1.3 nonaka return ENOTTY;
2148 1.3 nonaka }
2149