Home | History | Annotate | Line # | Download | only in ic
nvme.c revision 1.20
      1 /*	$NetBSD: nvme.c,v 1.20 2016/11/01 14:24:35 jdolecek Exp $	*/
      2 /*	$OpenBSD: nvme.c,v 1.49 2016/04/18 05:59:50 dlg Exp $ */
      3 
      4 /*
      5  * Copyright (c) 2014 David Gwynne <dlg (at) openbsd.org>
      6  *
      7  * Permission to use, copy, modify, and distribute this software for any
      8  * purpose with or without fee is hereby granted, provided that the above
      9  * copyright notice and this permission notice appear in all copies.
     10  *
     11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     18  */
     19 
     20 #include <sys/cdefs.h>
     21 __KERNEL_RCSID(0, "$NetBSD: nvme.c,v 1.20 2016/11/01 14:24:35 jdolecek Exp $");
     22 
     23 #include <sys/param.h>
     24 #include <sys/systm.h>
     25 #include <sys/kernel.h>
     26 #include <sys/atomic.h>
     27 #include <sys/bus.h>
     28 #include <sys/buf.h>
     29 #include <sys/conf.h>
     30 #include <sys/device.h>
     31 #include <sys/kmem.h>
     32 #include <sys/once.h>
     33 #include <sys/proc.h>
     34 #include <sys/queue.h>
     35 #include <sys/mutex.h>
     36 
     37 #include <uvm/uvm_extern.h>
     38 
     39 #include <dev/ic/nvmereg.h>
     40 #include <dev/ic/nvmevar.h>
     41 #include <dev/ic/nvmeio.h>
     42 
     43 int nvme_adminq_size = 128;
     44 int nvme_ioq_size = 1024;
     45 
     46 static int	nvme_print(void *, const char *);
     47 
     48 static int	nvme_ready(struct nvme_softc *, uint32_t);
     49 static int	nvme_enable(struct nvme_softc *, u_int);
     50 static int	nvme_disable(struct nvme_softc *);
     51 static int	nvme_shutdown(struct nvme_softc *);
     52 
     53 static void	nvme_version(struct nvme_softc *, uint32_t);
     54 #ifdef NVME_DEBUG
     55 static void	nvme_dumpregs(struct nvme_softc *);
     56 #endif
     57 static int	nvme_identify(struct nvme_softc *, u_int);
     58 static void	nvme_fill_identify(struct nvme_queue *, struct nvme_ccb *,
     59 		    void *);
     60 
     61 static int	nvme_ccbs_alloc(struct nvme_queue *, uint16_t);
     62 static void	nvme_ccbs_free(struct nvme_queue *);
     63 
     64 static struct nvme_ccb *
     65 		nvme_ccb_get(struct nvme_queue *);
     66 static void	nvme_ccb_put(struct nvme_queue *, struct nvme_ccb *);
     67 
     68 static int	nvme_poll(struct nvme_softc *, struct nvme_queue *,
     69 		    struct nvme_ccb *, void (*)(struct nvme_queue *,
     70 		    struct nvme_ccb *, void *), int);
     71 static void	nvme_poll_fill(struct nvme_queue *, struct nvme_ccb *, void *);
     72 static void	nvme_poll_done(struct nvme_queue *, struct nvme_ccb *,
     73 		    struct nvme_cqe *);
     74 static void	nvme_sqe_fill(struct nvme_queue *, struct nvme_ccb *, void *);
     75 static void	nvme_empty_done(struct nvme_queue *, struct nvme_ccb *,
     76 		    struct nvme_cqe *);
     77 
     78 static struct nvme_queue *
     79 		nvme_q_alloc(struct nvme_softc *, uint16_t, u_int, u_int);
     80 static int	nvme_q_create(struct nvme_softc *, struct nvme_queue *);
     81 static int	nvme_q_delete(struct nvme_softc *, struct nvme_queue *);
     82 static void	nvme_q_submit(struct nvme_softc *, struct nvme_queue *,
     83 		    struct nvme_ccb *, void (*)(struct nvme_queue *,
     84 		    struct nvme_ccb *, void *));
     85 static int	nvme_q_complete(struct nvme_softc *, struct nvme_queue *q);
     86 static void	nvme_q_free(struct nvme_softc *, struct nvme_queue *);
     87 
     88 static struct nvme_dmamem *
     89 		nvme_dmamem_alloc(struct nvme_softc *, size_t);
     90 static void	nvme_dmamem_free(struct nvme_softc *, struct nvme_dmamem *);
     91 static void	nvme_dmamem_sync(struct nvme_softc *, struct nvme_dmamem *,
     92 		    int);
     93 
     94 static void	nvme_ns_io_fill(struct nvme_queue *, struct nvme_ccb *,
     95 		    void *);
     96 static void	nvme_ns_io_done(struct nvme_queue *, struct nvme_ccb *,
     97 		    struct nvme_cqe *);
     98 static void	nvme_ns_sync_fill(struct nvme_queue *, struct nvme_ccb *,
     99 		    void *);
    100 static void	nvme_ns_sync_done(struct nvme_queue *, struct nvme_ccb *,
    101 		    struct nvme_cqe *);
    102 
    103 static void	nvme_pt_fill(struct nvme_queue *, struct nvme_ccb *,
    104 		    void *);
    105 static void	nvme_pt_done(struct nvme_queue *, struct nvme_ccb *,
    106 		    struct nvme_cqe *);
    107 static int	nvme_command_passthrough(struct nvme_softc *,
    108 		    struct nvme_pt_command *, uint16_t, struct lwp *, bool);
    109 
    110 #define NVME_TIMO_QOP		5	/* queue create and delete timeout */
    111 #define NVME_TIMO_IDENT		10	/* probe identify timeout */
    112 #define NVME_TIMO_PT		-1	/* passthrough cmd timeout */
    113 #define NVME_TIMO_SY		60	/* sync cache timeout */
    114 
    115 #define nvme_read4(_s, _r) \
    116 	bus_space_read_4((_s)->sc_iot, (_s)->sc_ioh, (_r))
    117 #define nvme_write4(_s, _r, _v) \
    118 	bus_space_write_4((_s)->sc_iot, (_s)->sc_ioh, (_r), (_v))
    119 #ifdef __LP64__
    120 #define nvme_read8(_s, _r) \
    121 	bus_space_read_8((_s)->sc_iot, (_s)->sc_ioh, (_r))
    122 #define nvme_write8(_s, _r, _v) \
    123 	bus_space_write_8((_s)->sc_iot, (_s)->sc_ioh, (_r), (_v))
    124 #else /* __LP64__ */
    125 static inline uint64_t
    126 nvme_read8(struct nvme_softc *sc, bus_size_t r)
    127 {
    128 	uint64_t v;
    129 	uint32_t *a = (uint32_t *)&v;
    130 
    131 #if _BYTE_ORDER == _LITTLE_ENDIAN
    132 	a[0] = nvme_read4(sc, r);
    133 	a[1] = nvme_read4(sc, r + 4);
    134 #else /* _BYTE_ORDER == _LITTLE_ENDIAN */
    135 	a[1] = nvme_read4(sc, r);
    136 	a[0] = nvme_read4(sc, r + 4);
    137 #endif
    138 
    139 	return v;
    140 }
    141 
    142 static inline void
    143 nvme_write8(struct nvme_softc *sc, bus_size_t r, uint64_t v)
    144 {
    145 	uint32_t *a = (uint32_t *)&v;
    146 
    147 #if _BYTE_ORDER == _LITTLE_ENDIAN
    148 	nvme_write4(sc, r, a[0]);
    149 	nvme_write4(sc, r + 4, a[1]);
    150 #else /* _BYTE_ORDER == _LITTLE_ENDIAN */
    151 	nvme_write4(sc, r, a[1]);
    152 	nvme_write4(sc, r + 4, a[0]);
    153 #endif
    154 }
    155 #endif /* __LP64__ */
    156 #define nvme_barrier(_s, _r, _l, _f) \
    157 	bus_space_barrier((_s)->sc_iot, (_s)->sc_ioh, (_r), (_l), (_f))
    158 
    159 static void
    160 nvme_version(struct nvme_softc *sc, uint32_t ver)
    161 {
    162 	const char *v = NULL;
    163 
    164 	switch (ver) {
    165 	case NVME_VS_1_0:
    166 		v = "1.0";
    167 		break;
    168 	case NVME_VS_1_1:
    169 		v = "1.1";
    170 		break;
    171 	case NVME_VS_1_2:
    172 		v = "1.2";
    173 		break;
    174 	default:
    175 		aprint_error_dev(sc->sc_dev, "unknown version 0x%08x\n", ver);
    176 		return;
    177 	}
    178 
    179 	aprint_normal_dev(sc->sc_dev, "NVMe %s\n", v);
    180 }
    181 
    182 #ifdef NVME_DEBUG
    183 static __used void
    184 nvme_dumpregs(struct nvme_softc *sc)
    185 {
    186 	uint64_t r8;
    187 	uint32_t r4;
    188 
    189 #define	DEVNAME(_sc) device_xname((_sc)->sc_dev)
    190 	r8 = nvme_read8(sc, NVME_CAP);
    191 	printf("%s: cap  0x%016"PRIx64"\n", DEVNAME(sc), nvme_read8(sc, NVME_CAP));
    192 	printf("%s:  mpsmax %u (%u)\n", DEVNAME(sc),
    193 	    (u_int)NVME_CAP_MPSMAX(r8), (1 << NVME_CAP_MPSMAX(r8)));
    194 	printf("%s:  mpsmin %u (%u)\n", DEVNAME(sc),
    195 	    (u_int)NVME_CAP_MPSMIN(r8), (1 << NVME_CAP_MPSMIN(r8)));
    196 	printf("%s:  css %"PRIu64"\n", DEVNAME(sc), NVME_CAP_CSS(r8));
    197 	printf("%s:  nssrs %"PRIu64"\n", DEVNAME(sc), NVME_CAP_NSSRS(r8));
    198 	printf("%s:  dstrd %"PRIu64"\n", DEVNAME(sc), NVME_CAP_DSTRD(r8));
    199 	printf("%s:  to %"PRIu64" msec\n", DEVNAME(sc), NVME_CAP_TO(r8));
    200 	printf("%s:  ams %"PRIu64"\n", DEVNAME(sc), NVME_CAP_AMS(r8));
    201 	printf("%s:  cqr %"PRIu64"\n", DEVNAME(sc), NVME_CAP_CQR(r8));
    202 	printf("%s:  mqes %"PRIu64"\n", DEVNAME(sc), NVME_CAP_MQES(r8));
    203 
    204 	printf("%s: vs   0x%04x\n", DEVNAME(sc), nvme_read4(sc, NVME_VS));
    205 
    206 	r4 = nvme_read4(sc, NVME_CC);
    207 	printf("%s: cc   0x%04x\n", DEVNAME(sc), r4);
    208 	printf("%s:  iocqes %u (%u)\n", DEVNAME(sc), NVME_CC_IOCQES_R(r4),
    209 	    (1 << NVME_CC_IOCQES_R(r4)));
    210 	printf("%s:  iosqes %u (%u)\n", DEVNAME(sc), NVME_CC_IOSQES_R(r4),
    211 	    (1 << NVME_CC_IOSQES_R(r4)));
    212 	printf("%s:  shn %u\n", DEVNAME(sc), NVME_CC_SHN_R(r4));
    213 	printf("%s:  ams %u\n", DEVNAME(sc), NVME_CC_AMS_R(r4));
    214 	printf("%s:  mps %u (%u)\n", DEVNAME(sc), NVME_CC_MPS_R(r4),
    215 	    (1 << NVME_CC_MPS_R(r4)));
    216 	printf("%s:  css %u\n", DEVNAME(sc), NVME_CC_CSS_R(r4));
    217 	printf("%s:  en %u\n", DEVNAME(sc), ISSET(r4, NVME_CC_EN) ? 1 : 0);
    218 
    219 	r4 = nvme_read4(sc, NVME_CSTS);
    220 	printf("%s: csts 0x%08x\n", DEVNAME(sc), r4);
    221 	printf("%s:  rdy %u\n", DEVNAME(sc), r4 & NVME_CSTS_RDY);
    222 	printf("%s:  cfs %u\n", DEVNAME(sc), r4 & NVME_CSTS_CFS);
    223 	printf("%s:  shst %x\n", DEVNAME(sc), r4 & NVME_CSTS_SHST_MASK);
    224 
    225 	r4 = nvme_read4(sc, NVME_AQA);
    226 	printf("%s: aqa  0x%08x\n", DEVNAME(sc), r4);
    227 	printf("%s:  acqs %u\n", DEVNAME(sc), NVME_AQA_ACQS_R(r4));
    228 	printf("%s:  asqs %u\n", DEVNAME(sc), NVME_AQA_ASQS_R(r4));
    229 
    230 	printf("%s: asq  0x%016"PRIx64"\n", DEVNAME(sc), nvme_read8(sc, NVME_ASQ));
    231 	printf("%s: acq  0x%016"PRIx64"\n", DEVNAME(sc), nvme_read8(sc, NVME_ACQ));
    232 #undef	DEVNAME
    233 }
    234 #endif	/* NVME_DEBUG */
    235 
    236 static int
    237 nvme_ready(struct nvme_softc *sc, uint32_t rdy)
    238 {
    239 	u_int i = 0;
    240 	uint32_t cc;
    241 
    242 	cc = nvme_read4(sc, NVME_CC);
    243 	if (((cc & NVME_CC_EN) != 0) != (rdy != 0)) {
    244 		aprint_error_dev(sc->sc_dev,
    245 		    "controller enabled status expected %d, found to be %d\n",
    246 		    (rdy != 0), ((cc & NVME_CC_EN) != 0));
    247 		return ENXIO;
    248 	}
    249 
    250 	while ((nvme_read4(sc, NVME_CSTS) & NVME_CSTS_RDY) != rdy) {
    251 		if (i++ > sc->sc_rdy_to)
    252 			return ENXIO;
    253 
    254 		delay(1000);
    255 		nvme_barrier(sc, NVME_CSTS, 4, BUS_SPACE_BARRIER_READ);
    256 	}
    257 
    258 	return 0;
    259 }
    260 
    261 static int
    262 nvme_enable(struct nvme_softc *sc, u_int mps)
    263 {
    264 	uint32_t cc, csts;
    265 
    266 	cc = nvme_read4(sc, NVME_CC);
    267 	csts = nvme_read4(sc, NVME_CSTS);
    268 
    269 	if (ISSET(cc, NVME_CC_EN)) {
    270 		aprint_error_dev(sc->sc_dev, "controller unexpectedly enabled, failed to stay disabled\n");
    271 
    272 		if (ISSET(csts, NVME_CSTS_RDY))
    273 			return 1;
    274 
    275 		goto waitready;
    276 	}
    277 
    278 	nvme_write8(sc, NVME_ASQ, NVME_DMA_DVA(sc->sc_admin_q->q_sq_dmamem));
    279 	nvme_barrier(sc, 0, sc->sc_ios, BUS_SPACE_BARRIER_WRITE);
    280 	delay(5000);
    281 	nvme_write8(sc, NVME_ACQ, NVME_DMA_DVA(sc->sc_admin_q->q_cq_dmamem));
    282 	nvme_barrier(sc, 0, sc->sc_ios, BUS_SPACE_BARRIER_WRITE);
    283 	delay(5000);
    284 
    285 	nvme_write4(sc, NVME_AQA, NVME_AQA_ACQS(sc->sc_admin_q->q_entries) |
    286 	    NVME_AQA_ASQS(sc->sc_admin_q->q_entries));
    287 	nvme_barrier(sc, 0, sc->sc_ios, BUS_SPACE_BARRIER_WRITE);
    288 	delay(5000);
    289 
    290 	CLR(cc, NVME_CC_IOCQES_MASK | NVME_CC_IOSQES_MASK | NVME_CC_SHN_MASK |
    291 	    NVME_CC_AMS_MASK | NVME_CC_MPS_MASK | NVME_CC_CSS_MASK);
    292 	SET(cc, NVME_CC_IOSQES(ffs(64) - 1) | NVME_CC_IOCQES(ffs(16) - 1));
    293 	SET(cc, NVME_CC_SHN(NVME_CC_SHN_NONE));
    294 	SET(cc, NVME_CC_CSS(NVME_CC_CSS_NVM));
    295 	SET(cc, NVME_CC_AMS(NVME_CC_AMS_RR));
    296 	SET(cc, NVME_CC_MPS(mps));
    297 	SET(cc, NVME_CC_EN);
    298 
    299 	nvme_write4(sc, NVME_CC, cc);
    300 	nvme_barrier(sc, 0, sc->sc_ios,
    301 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
    302 	delay(5000);
    303 
    304     waitready:
    305 	return nvme_ready(sc, NVME_CSTS_RDY);
    306 }
    307 
    308 static int
    309 nvme_disable(struct nvme_softc *sc)
    310 {
    311 	uint32_t cc, csts;
    312 
    313 	cc = nvme_read4(sc, NVME_CC);
    314 	csts = nvme_read4(sc, NVME_CSTS);
    315 
    316 	if (ISSET(cc, NVME_CC_EN) && !ISSET(csts, NVME_CSTS_RDY))
    317 		nvme_ready(sc, NVME_CSTS_RDY);
    318 
    319 	CLR(cc, NVME_CC_EN);
    320 
    321 	nvme_write4(sc, NVME_CC, cc);
    322 	nvme_barrier(sc, 0, sc->sc_ios, BUS_SPACE_BARRIER_READ);
    323 
    324 	delay(5000);
    325 
    326 	return nvme_ready(sc, 0);
    327 }
    328 
    329 int
    330 nvme_attach(struct nvme_softc *sc)
    331 {
    332 	uint64_t cap;
    333 	uint32_t reg;
    334 	u_int dstrd;
    335 	u_int mps = PAGE_SHIFT;
    336 	uint16_t adminq_entries = nvme_adminq_size;
    337 	uint16_t ioq_entries = nvme_ioq_size;
    338 	int i;
    339 
    340 	reg = nvme_read4(sc, NVME_VS);
    341 	if (reg == 0xffffffff) {
    342 		aprint_error_dev(sc->sc_dev, "invalid mapping\n");
    343 		return 1;
    344 	}
    345 
    346 	nvme_version(sc, reg);
    347 
    348 	cap = nvme_read8(sc, NVME_CAP);
    349 	dstrd = NVME_CAP_DSTRD(cap);
    350 	if (NVME_CAP_MPSMIN(cap) > PAGE_SHIFT) {
    351 		aprint_error_dev(sc->sc_dev, "NVMe minimum page size %u "
    352 		    "is greater than CPU page size %u\n",
    353 		    1 << NVME_CAP_MPSMIN(cap), 1 << PAGE_SHIFT);
    354 		return 1;
    355 	}
    356 	if (NVME_CAP_MPSMAX(cap) < mps)
    357 		mps = NVME_CAP_MPSMAX(cap);
    358 	if (ioq_entries > NVME_CAP_MQES(cap))
    359 		ioq_entries = NVME_CAP_MQES(cap);
    360 
    361 	/* set initial values to be used for admin queue during probe */
    362 	sc->sc_rdy_to = NVME_CAP_TO(cap);
    363 	sc->sc_mps = 1 << mps;
    364 	sc->sc_mdts = MAXPHYS;
    365 	sc->sc_max_sgl = 2;
    366 
    367 	if (nvme_disable(sc) != 0) {
    368 		aprint_error_dev(sc->sc_dev, "unable to disable controller\n");
    369 		return 1;
    370 	}
    371 
    372 	sc->sc_admin_q = nvme_q_alloc(sc, NVME_ADMIN_Q, adminq_entries, dstrd);
    373 	if (sc->sc_admin_q == NULL) {
    374 		aprint_error_dev(sc->sc_dev,
    375 		    "unable to allocate admin queue\n");
    376 		return 1;
    377 	}
    378 	if (sc->sc_intr_establish(sc, NVME_ADMIN_Q, sc->sc_admin_q))
    379 		goto free_admin_q;
    380 
    381 	if (nvme_enable(sc, mps) != 0) {
    382 		aprint_error_dev(sc->sc_dev, "unable to enable controller\n");
    383 		goto disestablish_admin_q;
    384 	}
    385 
    386 	if (nvme_identify(sc, NVME_CAP_MPSMIN(cap)) != 0) {
    387 		aprint_error_dev(sc->sc_dev, "unable to identify controller\n");
    388 		goto disable;
    389 	}
    390 
    391 	/* we know how big things are now */
    392 	sc->sc_max_sgl = sc->sc_mdts / sc->sc_mps;
    393 
    394 	/* reallocate ccbs of admin queue with new max sgl. */
    395 	nvme_ccbs_free(sc->sc_admin_q);
    396 	nvme_ccbs_alloc(sc->sc_admin_q, sc->sc_admin_q->q_entries);
    397 
    398 	sc->sc_q = kmem_zalloc(sizeof(*sc->sc_q) * sc->sc_nq, KM_SLEEP);
    399 	if (sc->sc_q == NULL) {
    400 		aprint_error_dev(sc->sc_dev, "unable to allocate io queue\n");
    401 		goto disable;
    402 	}
    403 	for (i = 0; i < sc->sc_nq; i++) {
    404 		sc->sc_q[i] = nvme_q_alloc(sc, i + 1, ioq_entries, dstrd);
    405 		if (sc->sc_q[i] == NULL) {
    406 			aprint_error_dev(sc->sc_dev,
    407 			    "unable to allocate io queue\n");
    408 			goto free_q;
    409 		}
    410 		if (nvme_q_create(sc, sc->sc_q[i]) != 0) {
    411 			aprint_error_dev(sc->sc_dev,
    412 			    "unable to create io queue\n");
    413 			nvme_q_free(sc, sc->sc_q[i]);
    414 			goto free_q;
    415 		}
    416 	}
    417 
    418 	if (!sc->sc_use_mq)
    419 		nvme_write4(sc, NVME_INTMC, 1);
    420 
    421 	/* probe subdevices */
    422 	sc->sc_namespaces = kmem_zalloc(sizeof(*sc->sc_namespaces) * sc->sc_nn,
    423 	    KM_SLEEP);
    424 	if (sc->sc_namespaces == NULL)
    425 		goto free_q;
    426 	nvme_rescan(sc->sc_dev, "nvme", &i);
    427 
    428 	return 0;
    429 
    430 free_q:
    431 	while (--i >= 0) {
    432 		nvme_q_delete(sc, sc->sc_q[i]);
    433 		nvme_q_free(sc, sc->sc_q[i]);
    434 	}
    435 disable:
    436 	nvme_disable(sc);
    437 disestablish_admin_q:
    438 	sc->sc_intr_disestablish(sc, NVME_ADMIN_Q);
    439 free_admin_q:
    440 	nvme_q_free(sc, sc->sc_admin_q);
    441 
    442 	return 1;
    443 }
    444 
    445 int
    446 nvme_rescan(device_t self, const char *attr, const int *flags)
    447 {
    448 	struct nvme_softc *sc = device_private(self);
    449 	struct nvme_attach_args naa;
    450 	uint64_t cap;
    451 	int ioq_entries = nvme_ioq_size;
    452 	int i;
    453 
    454 	cap = nvme_read8(sc, NVME_CAP);
    455 	if (ioq_entries > NVME_CAP_MQES(cap))
    456 		ioq_entries = NVME_CAP_MQES(cap);
    457 
    458 	for (i = 0; i < sc->sc_nn; i++) {
    459 		if (sc->sc_namespaces[i].dev)
    460 			continue;
    461 		memset(&naa, 0, sizeof(naa));
    462 		naa.naa_nsid = i + 1;
    463 		naa.naa_qentries = ioq_entries;
    464 		sc->sc_namespaces[i].dev = config_found(sc->sc_dev, &naa,
    465 		    nvme_print);
    466 	}
    467 	return 0;
    468 }
    469 
    470 static int
    471 nvme_print(void *aux, const char *pnp)
    472 {
    473 	struct nvme_attach_args *naa = aux;
    474 
    475 	if (pnp)
    476 		aprint_normal("at %s", pnp);
    477 
    478 	if (naa->naa_nsid > 0)
    479 		aprint_normal(" nsid %d", naa->naa_nsid);
    480 
    481 	return UNCONF;
    482 }
    483 
    484 int
    485 nvme_detach(struct nvme_softc *sc, int flags)
    486 {
    487 	int i, error;
    488 
    489 	error = config_detach_children(sc->sc_dev, flags);
    490 	if (error)
    491 		return error;
    492 
    493 	error = nvme_shutdown(sc);
    494 	if (error)
    495 		return error;
    496 
    497 	/* from now on we are committed to detach, following will never fail */
    498 	for (i = 0; i < sc->sc_nq; i++)
    499 		nvme_q_free(sc, sc->sc_q[i]);
    500 	kmem_free(sc->sc_q, sizeof(*sc->sc_q) * sc->sc_nq);
    501 	nvme_q_free(sc, sc->sc_admin_q);
    502 
    503 	return 0;
    504 }
    505 
    506 static int
    507 nvme_shutdown(struct nvme_softc *sc)
    508 {
    509 	uint32_t cc, csts;
    510 	bool disabled = false;
    511 	int i;
    512 
    513 	if (!sc->sc_use_mq)
    514 		nvme_write4(sc, NVME_INTMS, 1);
    515 
    516 	for (i = 0; i < sc->sc_nq; i++) {
    517 		if (nvme_q_delete(sc, sc->sc_q[i]) != 0) {
    518 			aprint_error_dev(sc->sc_dev,
    519 			    "unable to delete io queue %d, disabling\n", i + 1);
    520 			disabled = true;
    521 		}
    522 	}
    523 	sc->sc_intr_disestablish(sc, NVME_ADMIN_Q);
    524 	if (disabled)
    525 		goto disable;
    526 
    527 	cc = nvme_read4(sc, NVME_CC);
    528 	CLR(cc, NVME_CC_SHN_MASK);
    529 	SET(cc, NVME_CC_SHN(NVME_CC_SHN_NORMAL));
    530 	nvme_write4(sc, NVME_CC, cc);
    531 
    532 	for (i = 0; i < 4000; i++) {
    533 		nvme_barrier(sc, 0, sc->sc_ios,
    534 		    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
    535 		csts = nvme_read4(sc, NVME_CSTS);
    536 		if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_DONE)
    537 			return 0;
    538 
    539 		delay(1000);
    540 	}
    541 
    542 	aprint_error_dev(sc->sc_dev, "unable to shudown, disabling\n");
    543 
    544 disable:
    545 	nvme_disable(sc);
    546 	return 0;
    547 }
    548 
    549 void
    550 nvme_childdet(device_t self, device_t child)
    551 {
    552 	struct nvme_softc *sc = device_private(self);
    553 	int i;
    554 
    555 	for (i = 0; i < sc->sc_nn; i++) {
    556 		if (sc->sc_namespaces[i].dev == child) {
    557 			/* Already freed ns->ident. */
    558 			sc->sc_namespaces[i].dev = NULL;
    559 			break;
    560 		}
    561 	}
    562 }
    563 
    564 int
    565 nvme_ns_identify(struct nvme_softc *sc, uint16_t nsid)
    566 {
    567 	struct nvme_sqe sqe;
    568 	struct nvm_identify_namespace *identify;
    569 	struct nvme_dmamem *mem;
    570 	struct nvme_ccb *ccb;
    571 	struct nvme_namespace *ns;
    572 	int rv;
    573 
    574 	KASSERT(nsid > 0);
    575 
    576 	ccb = nvme_ccb_get(sc->sc_admin_q);
    577 	KASSERT(ccb != NULL); /* it's a bug if we don't have spare ccb here */
    578 
    579 	mem = nvme_dmamem_alloc(sc, sizeof(*identify));
    580 	if (mem == NULL)
    581 		return ENOMEM;
    582 
    583 	memset(&sqe, 0, sizeof(sqe));
    584 	sqe.opcode = NVM_ADMIN_IDENTIFY;
    585 	htolem32(&sqe.nsid, nsid);
    586 	htolem64(&sqe.entry.prp[0], NVME_DMA_DVA(mem));
    587 	htolem32(&sqe.cdw10, 0);
    588 
    589 	ccb->ccb_done = nvme_empty_done;
    590 	ccb->ccb_cookie = &sqe;
    591 
    592 	nvme_dmamem_sync(sc, mem, BUS_DMASYNC_PREREAD);
    593 	rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill, NVME_TIMO_IDENT);
    594 	nvme_dmamem_sync(sc, mem, BUS_DMASYNC_POSTREAD);
    595 
    596 	nvme_ccb_put(sc->sc_admin_q, ccb);
    597 
    598 	if (rv != 0) {
    599 		rv = EIO;
    600 		goto done;
    601 	}
    602 
    603 	/* commit */
    604 
    605 	identify = kmem_zalloc(sizeof(*identify), KM_SLEEP);
    606 	*identify = *((volatile struct nvm_identify_namespace *)NVME_DMA_KVA(mem));
    607 	//memcpy(identify, NVME_DMA_KVA(mem), sizeof(*identify));
    608 
    609 	ns = nvme_ns_get(sc, nsid);
    610 	KASSERT(ns);
    611 	ns->ident = identify;
    612 
    613 done:
    614 	nvme_dmamem_free(sc, mem);
    615 
    616 	return rv;
    617 }
    618 
    619 int
    620 nvme_ns_dobio(struct nvme_softc *sc, uint16_t nsid, void *cookie,
    621     struct buf *bp, void *data, size_t datasize,
    622     int secsize, daddr_t blkno, int flags, nvme_nnc_done nnc_done)
    623 {
    624 	struct nvme_queue *q = nvme_get_q(sc);
    625 	struct nvme_ccb *ccb;
    626 	bus_dmamap_t dmap;
    627 	int i, error;
    628 
    629 	ccb = nvme_ccb_get(q);
    630 	if (ccb == NULL)
    631 		return EAGAIN;
    632 
    633 	ccb->ccb_done = nvme_ns_io_done;
    634 	ccb->ccb_cookie = cookie;
    635 
    636 	/* namespace context */
    637 	ccb->nnc_nsid = nsid;
    638 	ccb->nnc_flags = flags;
    639 	ccb->nnc_buf = bp;
    640 	ccb->nnc_datasize = datasize;
    641 	ccb->nnc_secsize = secsize;
    642 	ccb->nnc_blkno = blkno;
    643 	ccb->nnc_done = nnc_done;
    644 
    645 	dmap = ccb->ccb_dmamap;
    646 	error = bus_dmamap_load(sc->sc_dmat, dmap, data,
    647 	    datasize, NULL,
    648 	    (ISSET(flags, NVME_NS_CTX_F_POLL) ?
    649 	      BUS_DMA_NOWAIT : BUS_DMA_WAITOK) |
    650 	    (ISSET(flags, NVME_NS_CTX_F_READ) ?
    651 	      BUS_DMA_READ : BUS_DMA_WRITE));
    652 	if (error) {
    653 		nvme_ccb_put(q, ccb);
    654 		return error;
    655 	}
    656 
    657 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
    658 	    ISSET(flags, NVME_NS_CTX_F_READ) ?
    659 	    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
    660 
    661 	if (dmap->dm_nsegs > 2) {
    662 		for (i = 1; i < dmap->dm_nsegs; i++) {
    663 			htolem64(&ccb->ccb_prpl[i - 1],
    664 			    dmap->dm_segs[i].ds_addr);
    665 		}
    666 		bus_dmamap_sync(sc->sc_dmat,
    667 		    NVME_DMA_MAP(q->q_ccb_prpls),
    668 		    ccb->ccb_prpl_off,
    669 		    sizeof(*ccb->ccb_prpl) * (dmap->dm_nsegs - 1),
    670 		    BUS_DMASYNC_PREWRITE);
    671 	}
    672 
    673 	if (ISSET(flags, NVME_NS_CTX_F_POLL)) {
    674 		if (nvme_poll(sc, q, ccb, nvme_ns_io_fill, NVME_TIMO_PT) != 0)
    675 			return EIO;
    676 		return 0;
    677 	}
    678 
    679 	nvme_q_submit(sc, q, ccb, nvme_ns_io_fill);
    680 	return 0;
    681 }
    682 
    683 static void
    684 nvme_ns_io_fill(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot)
    685 {
    686 	struct nvme_sqe_io *sqe = slot;
    687 	bus_dmamap_t dmap = ccb->ccb_dmamap;
    688 
    689 	sqe->opcode = ISSET(ccb->nnc_flags, NVME_NS_CTX_F_READ) ?
    690 	    NVM_CMD_READ : NVM_CMD_WRITE;
    691 	htolem32(&sqe->nsid, ccb->nnc_nsid);
    692 
    693 	htolem64(&sqe->entry.prp[0], dmap->dm_segs[0].ds_addr);
    694 	switch (dmap->dm_nsegs) {
    695 	case 1:
    696 		break;
    697 	case 2:
    698 		htolem64(&sqe->entry.prp[1], dmap->dm_segs[1].ds_addr);
    699 		break;
    700 	default:
    701 		/* the prp list is already set up and synced */
    702 		htolem64(&sqe->entry.prp[1], ccb->ccb_prpl_dva);
    703 		break;
    704 	}
    705 
    706 	htolem64(&sqe->slba, ccb->nnc_blkno);
    707 
    708 	/* guaranteed by upper layers, but check just in case */
    709 	KASSERT((ccb->nnc_datasize % ccb->nnc_secsize) == 0);
    710 	htolem16(&sqe->nlb, (ccb->nnc_datasize / ccb->nnc_secsize) - 1);
    711 }
    712 
    713 static void
    714 nvme_ns_io_done(struct nvme_queue *q, struct nvme_ccb *ccb,
    715     struct nvme_cqe *cqe)
    716 {
    717 	struct nvme_softc *sc = q->q_sc;
    718 	bus_dmamap_t dmap = ccb->ccb_dmamap;
    719 	void *nnc_cookie = ccb->ccb_cookie;
    720 	nvme_nnc_done nnc_done = ccb->nnc_done;
    721 	struct buf *bp = ccb->nnc_buf;
    722 
    723 	if (dmap->dm_nsegs > 2) {
    724 		bus_dmamap_sync(sc->sc_dmat,
    725 		    NVME_DMA_MAP(q->q_ccb_prpls),
    726 		    ccb->ccb_prpl_off,
    727 		    sizeof(*ccb->ccb_prpl) * (dmap->dm_nsegs - 1),
    728 		    BUS_DMASYNC_POSTWRITE);
    729 	}
    730 
    731 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
    732 	    ISSET(ccb->nnc_flags, NVME_NS_CTX_F_READ) ?
    733 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
    734 
    735 	bus_dmamap_unload(sc->sc_dmat, dmap);
    736 	nvme_ccb_put(q, ccb);
    737 
    738 	nnc_done(nnc_cookie, bp, lemtoh16(&cqe->flags));
    739 }
    740 
    741 int
    742 nvme_ns_sync(struct nvme_softc *sc, uint16_t nsid, void *cookie,
    743     int flags, nvme_nnc_done nnc_done)
    744 {
    745 	struct nvme_queue *q = nvme_get_q(sc);
    746 	struct nvme_ccb *ccb;
    747 
    748 	ccb = nvme_ccb_get(q);
    749 	if (ccb == NULL)
    750 		return EAGAIN;
    751 
    752 	ccb->ccb_done = nvme_ns_sync_done;
    753 	ccb->ccb_cookie = cookie;
    754 
    755 	/* namespace context */
    756 	ccb->nnc_nsid = nsid;
    757 	ccb->nnc_flags = flags;
    758 	ccb->nnc_done = nnc_done;
    759 
    760 	if (ISSET(flags, NVME_NS_CTX_F_POLL)) {
    761 		if (nvme_poll(sc, q, ccb, nvme_ns_sync_fill, NVME_TIMO_SY) != 0)
    762 			return EIO;
    763 		return 0;
    764 	}
    765 
    766 	nvme_q_submit(sc, q, ccb, nvme_ns_sync_fill);
    767 	return 0;
    768 }
    769 
    770 static void
    771 nvme_ns_sync_fill(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot)
    772 {
    773 	struct nvme_sqe *sqe = slot;
    774 
    775 	sqe->opcode = NVM_CMD_FLUSH;
    776 	htolem32(&sqe->nsid, ccb->nnc_nsid);
    777 }
    778 
    779 static void
    780 nvme_ns_sync_done(struct nvme_queue *q, struct nvme_ccb *ccb,
    781     struct nvme_cqe *cqe)
    782 {
    783 	void *cookie = ccb->ccb_cookie;
    784 	nvme_nnc_done nnc_done = ccb->nnc_done;
    785 
    786 	nvme_ccb_put(q, ccb);
    787 
    788 	nnc_done(cookie, NULL, lemtoh16(&cqe->flags));
    789 }
    790 
    791 void
    792 nvme_ns_free(struct nvme_softc *sc, uint16_t nsid)
    793 {
    794 	struct nvme_namespace *ns;
    795 	struct nvm_identify_namespace *identify;
    796 
    797 	ns = nvme_ns_get(sc, nsid);
    798 	KASSERT(ns);
    799 
    800 	identify = ns->ident;
    801 	ns->ident = NULL;
    802 	if (identify != NULL)
    803 		kmem_free(identify, sizeof(*identify));
    804 }
    805 
    806 static void
    807 nvme_pt_fill(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot)
    808 {
    809 	struct nvme_softc *sc = q->q_sc;
    810 	struct nvme_sqe *sqe = slot;
    811 	struct nvme_pt_command *pt = ccb->ccb_cookie;
    812 	bus_dmamap_t dmap = ccb->ccb_dmamap;
    813 	int i;
    814 
    815 	sqe->opcode = pt->cmd.opcode;
    816 	htolem32(&sqe->nsid, pt->cmd.nsid);
    817 
    818 	if (pt->buf != NULL && pt->len > 0) {
    819 		htolem64(&sqe->entry.prp[0], dmap->dm_segs[0].ds_addr);
    820 		switch (dmap->dm_nsegs) {
    821 		case 1:
    822 			break;
    823 		case 2:
    824 			htolem64(&sqe->entry.prp[1], dmap->dm_segs[1].ds_addr);
    825 			break;
    826 		default:
    827 			for (i = 1; i < dmap->dm_nsegs; i++) {
    828 				htolem64(&ccb->ccb_prpl[i - 1],
    829 				    dmap->dm_segs[i].ds_addr);
    830 			}
    831 			bus_dmamap_sync(sc->sc_dmat,
    832 			    NVME_DMA_MAP(q->q_ccb_prpls),
    833 			    ccb->ccb_prpl_off,
    834 			    sizeof(*ccb->ccb_prpl) * (dmap->dm_nsegs - 1),
    835 			    BUS_DMASYNC_PREWRITE);
    836 			htolem64(&sqe->entry.prp[1], ccb->ccb_prpl_dva);
    837 			break;
    838 		}
    839 	}
    840 
    841 	htolem32(&sqe->cdw10, pt->cmd.cdw10);
    842 	htolem32(&sqe->cdw11, pt->cmd.cdw11);
    843 	htolem32(&sqe->cdw12, pt->cmd.cdw12);
    844 	htolem32(&sqe->cdw13, pt->cmd.cdw13);
    845 	htolem32(&sqe->cdw14, pt->cmd.cdw14);
    846 	htolem32(&sqe->cdw15, pt->cmd.cdw15);
    847 }
    848 
    849 static void
    850 nvme_pt_done(struct nvme_queue *q, struct nvme_ccb *ccb, struct nvme_cqe *cqe)
    851 {
    852 	struct nvme_softc *sc = q->q_sc;
    853 	struct nvme_pt_command *pt = ccb->ccb_cookie;
    854 	bus_dmamap_t dmap = ccb->ccb_dmamap;
    855 
    856 	if (pt->buf != NULL && pt->len > 0) {
    857 		if (dmap->dm_nsegs > 2) {
    858 			bus_dmamap_sync(sc->sc_dmat,
    859 			    NVME_DMA_MAP(q->q_ccb_prpls),
    860 			    ccb->ccb_prpl_off,
    861 			    sizeof(*ccb->ccb_prpl) * (dmap->dm_nsegs - 1),
    862 			    BUS_DMASYNC_POSTWRITE);
    863 		}
    864 
    865 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
    866 		    pt->is_read ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
    867 		bus_dmamap_unload(sc->sc_dmat, dmap);
    868 	}
    869 
    870 	pt->cpl.cdw0 = cqe->cdw0;
    871 	pt->cpl.flags = cqe->flags & ~NVME_CQE_PHASE;
    872 }
    873 
    874 static int
    875 nvme_command_passthrough(struct nvme_softc *sc, struct nvme_pt_command *pt,
    876     uint16_t nsid, struct lwp *l, bool is_adminq)
    877 {
    878 	struct nvme_queue *q;
    879 	struct nvme_ccb *ccb;
    880 	void *buf = NULL;
    881 	int error;
    882 
    883 	/* limit command size to maximum data transfer size */
    884 	if ((pt->buf == NULL && pt->len > 0) ||
    885 	    (pt->buf != NULL && (pt->len == 0 || pt->len > sc->sc_mdts)))
    886 		return EINVAL;
    887 
    888 	q = is_adminq ? sc->sc_admin_q : nvme_get_q(sc);
    889 	ccb = nvme_ccb_get(q);
    890 	if (ccb == NULL)
    891 		return EBUSY;
    892 
    893 	if (pt->buf != NULL) {
    894 		KASSERT(pt->len > 0);
    895 		buf = kmem_alloc(pt->len, KM_SLEEP);
    896 		if (buf == NULL) {
    897 			error = ENOMEM;
    898 			goto ccb_put;
    899 		}
    900 		if (!pt->is_read) {
    901 			error = copyin(pt->buf, buf, pt->len);
    902 			if (error)
    903 				goto kmem_free;
    904 		}
    905 		error = bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap, buf,
    906 		    pt->len, NULL,
    907 		    BUS_DMA_WAITOK |
    908 		      (pt->is_read ? BUS_DMA_READ : BUS_DMA_WRITE));
    909 		if (error)
    910 			goto kmem_free;
    911 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap,
    912 		    0, ccb->ccb_dmamap->dm_mapsize,
    913 		    pt->is_read ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
    914 	}
    915 
    916 	ccb->ccb_done = nvme_pt_done;
    917 	ccb->ccb_cookie = pt;
    918 
    919 	pt->cmd.nsid = nsid;
    920 	if (nvme_poll(sc, q, ccb, nvme_pt_fill, NVME_TIMO_PT)) {
    921 		error = EIO;
    922 		goto out;
    923 	}
    924 
    925 	error = 0;
    926 out:
    927 	if (buf != NULL) {
    928 		if (error == 0 && pt->is_read)
    929 			error = copyout(buf, pt->buf, pt->len);
    930 kmem_free:
    931 		kmem_free(buf, pt->len);
    932 	}
    933 ccb_put:
    934 	nvme_ccb_put(q, ccb);
    935 	return error;
    936 }
    937 
    938 static void
    939 nvme_q_submit(struct nvme_softc *sc, struct nvme_queue *q, struct nvme_ccb *ccb,
    940     void (*fill)(struct nvme_queue *, struct nvme_ccb *, void *))
    941 {
    942 	struct nvme_sqe *sqe = NVME_DMA_KVA(q->q_sq_dmamem);
    943 	uint32_t tail;
    944 
    945 	mutex_enter(&q->q_sq_mtx);
    946 	tail = q->q_sq_tail;
    947 	if (++q->q_sq_tail >= q->q_entries)
    948 		q->q_sq_tail = 0;
    949 
    950 	sqe += tail;
    951 
    952 	bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_sq_dmamem),
    953 	    sizeof(*sqe) * tail, sizeof(*sqe), BUS_DMASYNC_POSTWRITE);
    954 	memset(sqe, 0, sizeof(*sqe));
    955 	(*fill)(q, ccb, sqe);
    956 	sqe->cid = ccb->ccb_id;
    957 	bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_sq_dmamem),
    958 	    sizeof(*sqe) * tail, sizeof(*sqe), BUS_DMASYNC_PREWRITE);
    959 
    960 	nvme_write4(sc, q->q_sqtdbl, q->q_sq_tail);
    961 	mutex_exit(&q->q_sq_mtx);
    962 }
    963 
    964 struct nvme_poll_state {
    965 	struct nvme_sqe s;
    966 	struct nvme_cqe c;
    967 };
    968 
    969 static int
    970 nvme_poll(struct nvme_softc *sc, struct nvme_queue *q, struct nvme_ccb *ccb,
    971     void (*fill)(struct nvme_queue *, struct nvme_ccb *, void *), int timo_sec)
    972 {
    973 	struct nvme_poll_state state;
    974 	void (*done)(struct nvme_queue *, struct nvme_ccb *, struct nvme_cqe *);
    975 	void *cookie;
    976 	uint16_t flags;
    977 	int step = 10;
    978 	int maxloop = timo_sec * 1000000 / step;
    979 	int error = 0;
    980 
    981 	memset(&state, 0, sizeof(state));
    982 	(*fill)(q, ccb, &state.s);
    983 
    984 	done = ccb->ccb_done;
    985 	cookie = ccb->ccb_cookie;
    986 
    987 	ccb->ccb_done = nvme_poll_done;
    988 	ccb->ccb_cookie = &state;
    989 
    990 	nvme_q_submit(sc, q, ccb, nvme_poll_fill);
    991 	while (!ISSET(state.c.flags, htole16(NVME_CQE_PHASE))) {
    992 		if (nvme_q_complete(sc, q) == 0)
    993 			delay(step);
    994 
    995 		if (timo_sec >= 0 && --maxloop <= 0) {
    996 			error = ETIMEDOUT;
    997 			break;
    998 		}
    999 	}
   1000 
   1001 	ccb->ccb_cookie = cookie;
   1002 	done(q, ccb, &state.c);
   1003 
   1004 	if (error == 0) {
   1005 		flags = lemtoh16(&state.c.flags);
   1006 		return flags & ~NVME_CQE_PHASE;
   1007 	} else {
   1008 		return 1;
   1009 	}
   1010 }
   1011 
   1012 static void
   1013 nvme_poll_fill(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot)
   1014 {
   1015 	struct nvme_sqe *sqe = slot;
   1016 	struct nvme_poll_state *state = ccb->ccb_cookie;
   1017 
   1018 	*sqe = state->s;
   1019 }
   1020 
   1021 static void
   1022 nvme_poll_done(struct nvme_queue *q, struct nvme_ccb *ccb,
   1023     struct nvme_cqe *cqe)
   1024 {
   1025 	struct nvme_poll_state *state = ccb->ccb_cookie;
   1026 
   1027 	SET(cqe->flags, htole16(NVME_CQE_PHASE));
   1028 	state->c = *cqe;
   1029 }
   1030 
   1031 static void
   1032 nvme_sqe_fill(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot)
   1033 {
   1034 	struct nvme_sqe *src = ccb->ccb_cookie;
   1035 	struct nvme_sqe *dst = slot;
   1036 
   1037 	*dst = *src;
   1038 }
   1039 
   1040 static void
   1041 nvme_empty_done(struct nvme_queue *q, struct nvme_ccb *ccb,
   1042     struct nvme_cqe *cqe)
   1043 {
   1044 }
   1045 
   1046 static int
   1047 nvme_q_complete(struct nvme_softc *sc, struct nvme_queue *q)
   1048 {
   1049 	struct nvme_ccb *ccb;
   1050 	struct nvme_cqe *ring = NVME_DMA_KVA(q->q_cq_dmamem), *cqe;
   1051 	uint16_t flags;
   1052 	int rv = 0;
   1053 
   1054 	mutex_enter(&q->q_cq_mtx);
   1055 
   1056 	nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_POSTREAD);
   1057 	for (;;) {
   1058 		cqe = &ring[q->q_cq_head];
   1059 		flags = lemtoh16(&cqe->flags);
   1060 		if ((flags & NVME_CQE_PHASE) != q->q_cq_phase)
   1061 			break;
   1062 
   1063 		ccb = &q->q_ccbs[cqe->cid];
   1064 
   1065 		if (++q->q_cq_head >= q->q_entries) {
   1066 			q->q_cq_head = 0;
   1067 			q->q_cq_phase ^= NVME_CQE_PHASE;
   1068 		}
   1069 
   1070 #ifdef DEBUG
   1071 		/*
   1072 		 * If we get spurious completion notification, something
   1073 		 * is seriously hosed up. Very likely DMA to some random
   1074 		 * memory place happened, so just bail out.
   1075 		 */
   1076 		if ((intptr_t)ccb->ccb_cookie == NVME_CCB_FREE) {
   1077 			panic("%s: invalid ccb detected",
   1078 			    device_xname(sc->sc_dev));
   1079 			/* NOTREACHED */
   1080 		}
   1081 #endif
   1082 
   1083 		rv++;
   1084 
   1085 		/*
   1086 		 * Unlock the mutex before calling the ccb_done callback
   1087 		 * and re-lock afterwards. The callback triggers lddone()
   1088 		 * which schedules another i/o, and also calls nvme_ccb_put().
   1089 		 * Unlock/relock avoids possibility of deadlock.
   1090 		 */
   1091 		mutex_exit(&q->q_cq_mtx);
   1092 		ccb->ccb_done(q, ccb, cqe);
   1093 		mutex_enter(&q->q_cq_mtx);
   1094 	}
   1095 	nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_PREREAD);
   1096 
   1097 	if (rv)
   1098 		nvme_write4(sc, q->q_cqhdbl, q->q_cq_head);
   1099 
   1100 	mutex_exit(&q->q_cq_mtx);
   1101 
   1102 	if (rv) {
   1103 		mutex_enter(&q->q_ccb_mtx);
   1104 		q->q_nccbs_avail += rv;
   1105 		mutex_exit(&q->q_ccb_mtx);
   1106 	}
   1107 
   1108 	return rv;
   1109 }
   1110 
   1111 static int
   1112 nvme_identify(struct nvme_softc *sc, u_int mps)
   1113 {
   1114 	char sn[41], mn[81], fr[17];
   1115 	struct nvm_identify_controller *identify;
   1116 	struct nvme_dmamem *mem;
   1117 	struct nvme_ccb *ccb;
   1118 	u_int mdts;
   1119 	int rv = 1;
   1120 
   1121 	ccb = nvme_ccb_get(sc->sc_admin_q);
   1122 	KASSERT(ccb != NULL); /* it's a bug if we don't have spare ccb here */
   1123 
   1124 	mem = nvme_dmamem_alloc(sc, sizeof(*identify));
   1125 	if (mem == NULL)
   1126 		return 1;
   1127 
   1128 	ccb->ccb_done = nvme_empty_done;
   1129 	ccb->ccb_cookie = mem;
   1130 
   1131 	nvme_dmamem_sync(sc, mem, BUS_DMASYNC_PREREAD);
   1132 	rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_fill_identify,
   1133 	    NVME_TIMO_IDENT);
   1134 	nvme_dmamem_sync(sc, mem, BUS_DMASYNC_POSTREAD);
   1135 
   1136 	nvme_ccb_put(sc->sc_admin_q, ccb);
   1137 
   1138 	if (rv != 0)
   1139 		goto done;
   1140 
   1141 	identify = NVME_DMA_KVA(mem);
   1142 
   1143 	strnvisx(sn, sizeof(sn), (const char *)identify->sn,
   1144 	    sizeof(identify->sn), VIS_TRIM|VIS_SAFE|VIS_OCTAL);
   1145 	strnvisx(mn, sizeof(mn), (const char *)identify->mn,
   1146 	    sizeof(identify->mn), VIS_TRIM|VIS_SAFE|VIS_OCTAL);
   1147 	strnvisx(fr, sizeof(fr), (const char *)identify->fr,
   1148 	    sizeof(identify->fr), VIS_TRIM|VIS_SAFE|VIS_OCTAL);
   1149 	aprint_normal_dev(sc->sc_dev, "%s, firmware %s, serial %s\n", mn, fr,
   1150 	    sn);
   1151 
   1152 	if (identify->mdts > 0) {
   1153 		mdts = (1 << identify->mdts) * (1 << mps);
   1154 		if (mdts < sc->sc_mdts)
   1155 			sc->sc_mdts = mdts;
   1156 	}
   1157 
   1158 	sc->sc_nn = lemtoh32(&identify->nn);
   1159 
   1160 	memcpy(&sc->sc_identify, identify, sizeof(sc->sc_identify));
   1161 
   1162 done:
   1163 	nvme_dmamem_free(sc, mem);
   1164 
   1165 	return rv;
   1166 }
   1167 
   1168 static int
   1169 nvme_q_create(struct nvme_softc *sc, struct nvme_queue *q)
   1170 {
   1171 	struct nvme_sqe_q sqe;
   1172 	struct nvme_ccb *ccb;
   1173 	int rv;
   1174 
   1175 	if (sc->sc_use_mq && sc->sc_intr_establish(sc, q->q_id, q) != 0)
   1176 		return 1;
   1177 
   1178 	ccb = nvme_ccb_get(sc->sc_admin_q);
   1179 	KASSERT(ccb != NULL);
   1180 
   1181 	ccb->ccb_done = nvme_empty_done;
   1182 	ccb->ccb_cookie = &sqe;
   1183 
   1184 	memset(&sqe, 0, sizeof(sqe));
   1185 	sqe.opcode = NVM_ADMIN_ADD_IOCQ;
   1186 	htolem64(&sqe.prp1, NVME_DMA_DVA(q->q_cq_dmamem));
   1187 	htolem16(&sqe.qsize, q->q_entries - 1);
   1188 	htolem16(&sqe.qid, q->q_id);
   1189 	sqe.qflags = NVM_SQE_CQ_IEN | NVM_SQE_Q_PC;
   1190 	if (sc->sc_use_mq)
   1191 		htolem16(&sqe.cqid, q->q_id);	/* qid == vector */
   1192 
   1193 	rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill, NVME_TIMO_QOP);
   1194 	if (rv != 0)
   1195 		goto fail;
   1196 
   1197 	ccb->ccb_done = nvme_empty_done;
   1198 	ccb->ccb_cookie = &sqe;
   1199 
   1200 	memset(&sqe, 0, sizeof(sqe));
   1201 	sqe.opcode = NVM_ADMIN_ADD_IOSQ;
   1202 	htolem64(&sqe.prp1, NVME_DMA_DVA(q->q_sq_dmamem));
   1203 	htolem16(&sqe.qsize, q->q_entries - 1);
   1204 	htolem16(&sqe.qid, q->q_id);
   1205 	htolem16(&sqe.cqid, q->q_id);
   1206 	sqe.qflags = NVM_SQE_Q_PC;
   1207 
   1208 	rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill, NVME_TIMO_QOP);
   1209 	if (rv != 0)
   1210 		goto fail;
   1211 
   1212 fail:
   1213 	nvme_ccb_put(sc->sc_admin_q, ccb);
   1214 	return rv;
   1215 }
   1216 
   1217 static int
   1218 nvme_q_delete(struct nvme_softc *sc, struct nvme_queue *q)
   1219 {
   1220 	struct nvme_sqe_q sqe;
   1221 	struct nvme_ccb *ccb;
   1222 	int rv;
   1223 
   1224 	ccb = nvme_ccb_get(sc->sc_admin_q);
   1225 	KASSERT(ccb != NULL);
   1226 
   1227 	ccb->ccb_done = nvme_empty_done;
   1228 	ccb->ccb_cookie = &sqe;
   1229 
   1230 	memset(&sqe, 0, sizeof(sqe));
   1231 	sqe.opcode = NVM_ADMIN_DEL_IOSQ;
   1232 	htolem16(&sqe.qid, q->q_id);
   1233 
   1234 	rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill, NVME_TIMO_QOP);
   1235 	if (rv != 0)
   1236 		goto fail;
   1237 
   1238 	ccb->ccb_done = nvme_empty_done;
   1239 	ccb->ccb_cookie = &sqe;
   1240 
   1241 	memset(&sqe, 0, sizeof(sqe));
   1242 	sqe.opcode = NVM_ADMIN_DEL_IOCQ;
   1243 	htolem64(&sqe.prp1, NVME_DMA_DVA(q->q_sq_dmamem));
   1244 	htolem16(&sqe.qid, q->q_id);
   1245 
   1246 	rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill, NVME_TIMO_QOP);
   1247 	if (rv != 0)
   1248 		goto fail;
   1249 
   1250 fail:
   1251 	nvme_ccb_put(sc->sc_admin_q, ccb);
   1252 
   1253 	if (rv == 0 && sc->sc_use_mq) {
   1254 		if (sc->sc_intr_disestablish(sc, q->q_id))
   1255 			rv = 1;
   1256 	}
   1257 
   1258 	return rv;
   1259 }
   1260 
   1261 static void
   1262 nvme_fill_identify(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot)
   1263 {
   1264 	struct nvme_sqe *sqe = slot;
   1265 	struct nvme_dmamem *mem = ccb->ccb_cookie;
   1266 
   1267 	sqe->opcode = NVM_ADMIN_IDENTIFY;
   1268 	htolem64(&sqe->entry.prp[0], NVME_DMA_DVA(mem));
   1269 	htolem32(&sqe->cdw10, 1);
   1270 }
   1271 
   1272 static int
   1273 nvme_ccbs_alloc(struct nvme_queue *q, uint16_t nccbs)
   1274 {
   1275 	struct nvme_softc *sc = q->q_sc;
   1276 	struct nvme_ccb *ccb;
   1277 	bus_addr_t off;
   1278 	uint64_t *prpl;
   1279 	u_int i;
   1280 
   1281 	mutex_init(&q->q_ccb_mtx, MUTEX_DEFAULT, IPL_BIO);
   1282 	SIMPLEQ_INIT(&q->q_ccb_list);
   1283 
   1284 	q->q_ccbs = kmem_alloc(sizeof(*ccb) * nccbs, KM_SLEEP);
   1285 	if (q->q_ccbs == NULL)
   1286 		return 1;
   1287 
   1288 	q->q_nccbs = nccbs;
   1289 	q->q_nccbs_avail = nccbs;
   1290 	q->q_ccb_prpls = nvme_dmamem_alloc(sc,
   1291 	    sizeof(*prpl) * sc->sc_max_sgl * nccbs);
   1292 
   1293 	prpl = NVME_DMA_KVA(q->q_ccb_prpls);
   1294 	off = 0;
   1295 
   1296 	for (i = 0; i < nccbs; i++) {
   1297 		ccb = &q->q_ccbs[i];
   1298 
   1299 		if (bus_dmamap_create(sc->sc_dmat, sc->sc_mdts,
   1300 		    sc->sc_max_sgl + 1 /* we get a free prp in the sqe */,
   1301 		    sc->sc_mps, sc->sc_mps, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
   1302 		    &ccb->ccb_dmamap) != 0)
   1303 			goto free_maps;
   1304 
   1305 		ccb->ccb_id = i;
   1306 		ccb->ccb_prpl = prpl;
   1307 		ccb->ccb_prpl_off = off;
   1308 		ccb->ccb_prpl_dva = NVME_DMA_DVA(q->q_ccb_prpls) + off;
   1309 
   1310 		SIMPLEQ_INSERT_TAIL(&q->q_ccb_list, ccb, ccb_entry);
   1311 
   1312 		prpl += sc->sc_max_sgl;
   1313 		off += sizeof(*prpl) * sc->sc_max_sgl;
   1314 	}
   1315 
   1316 	return 0;
   1317 
   1318 free_maps:
   1319 	nvme_ccbs_free(q);
   1320 	return 1;
   1321 }
   1322 
   1323 static struct nvme_ccb *
   1324 nvme_ccb_get(struct nvme_queue *q)
   1325 {
   1326 	struct nvme_ccb *ccb = NULL;
   1327 
   1328 	mutex_enter(&q->q_ccb_mtx);
   1329 	if (q->q_nccbs_avail > 0) {
   1330 		ccb = SIMPLEQ_FIRST(&q->q_ccb_list);
   1331 		KASSERT(ccb != NULL);
   1332 		q->q_nccbs_avail--;
   1333 
   1334 		SIMPLEQ_REMOVE_HEAD(&q->q_ccb_list, ccb_entry);
   1335 #ifdef DEBUG
   1336 		ccb->ccb_cookie = NULL;
   1337 #endif
   1338 	}
   1339 	mutex_exit(&q->q_ccb_mtx);
   1340 
   1341 	return ccb;
   1342 }
   1343 
   1344 static void
   1345 nvme_ccb_put(struct nvme_queue *q, struct nvme_ccb *ccb)
   1346 {
   1347 
   1348 	mutex_enter(&q->q_ccb_mtx);
   1349 #ifdef DEBUG
   1350 	ccb->ccb_cookie = (void *)NVME_CCB_FREE;
   1351 #endif
   1352 	SIMPLEQ_INSERT_HEAD(&q->q_ccb_list, ccb, ccb_entry);
   1353 	mutex_exit(&q->q_ccb_mtx);
   1354 }
   1355 
   1356 static void
   1357 nvme_ccbs_free(struct nvme_queue *q)
   1358 {
   1359 	struct nvme_softc *sc = q->q_sc;
   1360 	struct nvme_ccb *ccb;
   1361 
   1362 	mutex_enter(&q->q_ccb_mtx);
   1363 	while ((ccb = SIMPLEQ_FIRST(&q->q_ccb_list)) != NULL) {
   1364 		SIMPLEQ_REMOVE_HEAD(&q->q_ccb_list, ccb_entry);
   1365 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
   1366 	}
   1367 	mutex_exit(&q->q_ccb_mtx);
   1368 
   1369 	nvme_dmamem_free(sc, q->q_ccb_prpls);
   1370 	kmem_free(q->q_ccbs, sizeof(*ccb) * q->q_nccbs);
   1371 	q->q_ccbs = NULL;
   1372 	mutex_destroy(&q->q_ccb_mtx);
   1373 }
   1374 
   1375 static struct nvme_queue *
   1376 nvme_q_alloc(struct nvme_softc *sc, uint16_t id, u_int entries, u_int dstrd)
   1377 {
   1378 	struct nvme_queue *q;
   1379 
   1380 	q = kmem_alloc(sizeof(*q), KM_SLEEP);
   1381 	if (q == NULL)
   1382 		return NULL;
   1383 
   1384 	q->q_sc = sc;
   1385 	q->q_sq_dmamem = nvme_dmamem_alloc(sc,
   1386 	    sizeof(struct nvme_sqe) * entries);
   1387 	if (q->q_sq_dmamem == NULL)
   1388 		goto free;
   1389 
   1390 	q->q_cq_dmamem = nvme_dmamem_alloc(sc,
   1391 	    sizeof(struct nvme_cqe) * entries);
   1392 	if (q->q_cq_dmamem == NULL)
   1393 		goto free_sq;
   1394 
   1395 	memset(NVME_DMA_KVA(q->q_sq_dmamem), 0, NVME_DMA_LEN(q->q_sq_dmamem));
   1396 	memset(NVME_DMA_KVA(q->q_cq_dmamem), 0, NVME_DMA_LEN(q->q_cq_dmamem));
   1397 
   1398 	mutex_init(&q->q_sq_mtx, MUTEX_DEFAULT, IPL_BIO);
   1399 	mutex_init(&q->q_cq_mtx, MUTEX_DEFAULT, IPL_BIO);
   1400 	q->q_sqtdbl = NVME_SQTDBL(id, dstrd);
   1401 	q->q_cqhdbl = NVME_CQHDBL(id, dstrd);
   1402 	q->q_id = id;
   1403 	q->q_entries = entries;
   1404 	q->q_sq_tail = 0;
   1405 	q->q_cq_head = 0;
   1406 	q->q_cq_phase = NVME_CQE_PHASE;
   1407 
   1408 	nvme_dmamem_sync(sc, q->q_sq_dmamem, BUS_DMASYNC_PREWRITE);
   1409 	nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_PREREAD);
   1410 
   1411 	/*
   1412 	 * Due to definition of full and empty queue (queue is empty
   1413 	 * when head == tail, full when tail is one less then head),
   1414 	 * we can actually only have (entries - 1) in-flight commands.
   1415 	 */
   1416 	if (nvme_ccbs_alloc(q, entries - 1) != 0) {
   1417 		aprint_error_dev(sc->sc_dev, "unable to allocate ccbs\n");
   1418 		goto free_cq;
   1419 	}
   1420 
   1421 	return q;
   1422 
   1423 free_cq:
   1424 	nvme_dmamem_free(sc, q->q_cq_dmamem);
   1425 free_sq:
   1426 	nvme_dmamem_free(sc, q->q_sq_dmamem);
   1427 free:
   1428 	kmem_free(q, sizeof(*q));
   1429 
   1430 	return NULL;
   1431 }
   1432 
   1433 static void
   1434 nvme_q_free(struct nvme_softc *sc, struct nvme_queue *q)
   1435 {
   1436 	nvme_ccbs_free(q);
   1437 	mutex_destroy(&q->q_sq_mtx);
   1438 	mutex_destroy(&q->q_cq_mtx);
   1439 	nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_POSTREAD);
   1440 	nvme_dmamem_sync(sc, q->q_sq_dmamem, BUS_DMASYNC_POSTWRITE);
   1441 	nvme_dmamem_free(sc, q->q_cq_dmamem);
   1442 	nvme_dmamem_free(sc, q->q_sq_dmamem);
   1443 	kmem_free(q, sizeof(*q));
   1444 }
   1445 
   1446 int
   1447 nvme_intr(void *xsc)
   1448 {
   1449 	struct nvme_softc *sc = xsc;
   1450 
   1451 	/*
   1452 	 * INTx is level triggered, controller deasserts the interrupt only
   1453 	 * when we advance command queue head via write to the doorbell.
   1454 	 * Tell the controller to block the interrupts while we process
   1455 	 * the queue(s).
   1456 	 */
   1457 	nvme_write4(sc, NVME_INTMS, 1);
   1458 
   1459 	softint_schedule(sc->sc_softih[0]);
   1460 
   1461 	/* don't know, might not have been for us */
   1462 	return 1;
   1463 }
   1464 
   1465 void
   1466 nvme_softintr_intx(void *xq)
   1467 {
   1468 	struct nvme_queue *q = xq;
   1469 	struct nvme_softc *sc = q->q_sc;
   1470 
   1471 	nvme_q_complete(sc, sc->sc_admin_q);
   1472 	if (sc->sc_q != NULL)
   1473 	        nvme_q_complete(sc, sc->sc_q[0]);
   1474 
   1475 	/*
   1476 	 * Processing done, tell controller to issue interrupts again. There
   1477 	 * is no race, as NVMe spec requires the controller to maintain state,
   1478 	 * and assert the interrupt whenever there are unacknowledged
   1479 	 * completion queue entries.
   1480 	 */
   1481 	nvme_write4(sc, NVME_INTMC, 1);
   1482 }
   1483 
   1484 int
   1485 nvme_intr_msi(void *xq)
   1486 {
   1487 	struct nvme_queue *q = xq;
   1488 
   1489 	KASSERT(q && q->q_sc && q->q_sc->sc_softih
   1490 	    && q->q_sc->sc_softih[q->q_id]);
   1491 
   1492 	/*
   1493 	 * MSI/MSI-X are edge triggered, so can handover processing to softint
   1494 	 * without masking the interrupt.
   1495 	 */
   1496 	softint_schedule(q->q_sc->sc_softih[q->q_id]);
   1497 
   1498 	return 1;
   1499 }
   1500 
   1501 void
   1502 nvme_softintr_msi(void *xq)
   1503 {
   1504 	struct nvme_queue *q = xq;
   1505 	struct nvme_softc *sc = q->q_sc;
   1506 
   1507 	nvme_q_complete(sc, q);
   1508 }
   1509 
   1510 static struct nvme_dmamem *
   1511 nvme_dmamem_alloc(struct nvme_softc *sc, size_t size)
   1512 {
   1513 	struct nvme_dmamem *ndm;
   1514 	int nsegs;
   1515 
   1516 	ndm = kmem_zalloc(sizeof(*ndm), KM_SLEEP);
   1517 	if (ndm == NULL)
   1518 		return NULL;
   1519 
   1520 	ndm->ndm_size = size;
   1521 
   1522 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
   1523 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &ndm->ndm_map) != 0)
   1524 		goto ndmfree;
   1525 
   1526 	if (bus_dmamem_alloc(sc->sc_dmat, size, sc->sc_mps, 0, &ndm->ndm_seg,
   1527 	    1, &nsegs, BUS_DMA_WAITOK) != 0)
   1528 		goto destroy;
   1529 
   1530 	if (bus_dmamem_map(sc->sc_dmat, &ndm->ndm_seg, nsegs, size,
   1531 	    &ndm->ndm_kva, BUS_DMA_WAITOK) != 0)
   1532 		goto free;
   1533 	memset(ndm->ndm_kva, 0, size);
   1534 
   1535 	if (bus_dmamap_load(sc->sc_dmat, ndm->ndm_map, ndm->ndm_kva, size,
   1536 	    NULL, BUS_DMA_WAITOK) != 0)
   1537 		goto unmap;
   1538 
   1539 	return ndm;
   1540 
   1541 unmap:
   1542 	bus_dmamem_unmap(sc->sc_dmat, ndm->ndm_kva, size);
   1543 free:
   1544 	bus_dmamem_free(sc->sc_dmat, &ndm->ndm_seg, 1);
   1545 destroy:
   1546 	bus_dmamap_destroy(sc->sc_dmat, ndm->ndm_map);
   1547 ndmfree:
   1548 	kmem_free(ndm, sizeof(*ndm));
   1549 	return NULL;
   1550 }
   1551 
   1552 static void
   1553 nvme_dmamem_sync(struct nvme_softc *sc, struct nvme_dmamem *mem, int ops)
   1554 {
   1555 	bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(mem),
   1556 	    0, NVME_DMA_LEN(mem), ops);
   1557 }
   1558 
   1559 void
   1560 nvme_dmamem_free(struct nvme_softc *sc, struct nvme_dmamem *ndm)
   1561 {
   1562 	bus_dmamap_unload(sc->sc_dmat, ndm->ndm_map);
   1563 	bus_dmamem_unmap(sc->sc_dmat, ndm->ndm_kva, ndm->ndm_size);
   1564 	bus_dmamem_free(sc->sc_dmat, &ndm->ndm_seg, 1);
   1565 	bus_dmamap_destroy(sc->sc_dmat, ndm->ndm_map);
   1566 	kmem_free(ndm, sizeof(*ndm));
   1567 }
   1568 
   1569 /*
   1570  * ioctl
   1571  */
   1572 
   1573 dev_type_open(nvmeopen);
   1574 dev_type_close(nvmeclose);
   1575 dev_type_ioctl(nvmeioctl);
   1576 
   1577 const struct cdevsw nvme_cdevsw = {
   1578 	.d_open = nvmeopen,
   1579 	.d_close = nvmeclose,
   1580 	.d_read = noread,
   1581 	.d_write = nowrite,
   1582 	.d_ioctl = nvmeioctl,
   1583 	.d_stop = nostop,
   1584 	.d_tty = notty,
   1585 	.d_poll = nopoll,
   1586 	.d_mmap = nommap,
   1587 	.d_kqfilter = nokqfilter,
   1588 	.d_discard = nodiscard,
   1589 	.d_flag = D_OTHER,
   1590 };
   1591 
   1592 extern struct cfdriver nvme_cd;
   1593 
   1594 /*
   1595  * Accept an open operation on the control device.
   1596  */
   1597 int
   1598 nvmeopen(dev_t dev, int flag, int mode, struct lwp *l)
   1599 {
   1600 	struct nvme_softc *sc;
   1601 	int unit = minor(dev) / 0x10000;
   1602 	int nsid = minor(dev) & 0xffff;
   1603 	int nsidx;
   1604 
   1605 	if ((sc = device_lookup_private(&nvme_cd, unit)) == NULL)
   1606 		return ENXIO;
   1607 	if ((sc->sc_flags & NVME_F_ATTACHED) == 0)
   1608 		return ENXIO;
   1609 
   1610 	if (nsid == 0) {
   1611 		/* controller */
   1612 		if (ISSET(sc->sc_flags, NVME_F_OPEN))
   1613 			return EBUSY;
   1614 		SET(sc->sc_flags, NVME_F_OPEN);
   1615 	} else {
   1616 		/* namespace */
   1617 		nsidx = nsid - 1;
   1618 		if (nsidx >= sc->sc_nn || sc->sc_namespaces[nsidx].dev == NULL)
   1619 			return ENXIO;
   1620 		if (ISSET(sc->sc_namespaces[nsidx].flags, NVME_NS_F_OPEN))
   1621 			return EBUSY;
   1622 		SET(sc->sc_namespaces[nsidx].flags, NVME_NS_F_OPEN);
   1623 	}
   1624 	return 0;
   1625 }
   1626 
   1627 /*
   1628  * Accept the last close on the control device.
   1629  */
   1630 int
   1631 nvmeclose(dev_t dev, int flag, int mode, struct lwp *l)
   1632 {
   1633 	struct nvme_softc *sc;
   1634 	int unit = minor(dev) / 0x10000;
   1635 	int nsid = minor(dev) & 0xffff;
   1636 	int nsidx;
   1637 
   1638 	sc = device_lookup_private(&nvme_cd, unit);
   1639 	if (sc == NULL)
   1640 		return ENXIO;
   1641 
   1642 	if (nsid == 0) {
   1643 		/* controller */
   1644 		CLR(sc->sc_flags, NVME_F_OPEN);
   1645 	} else {
   1646 		/* namespace */
   1647 		nsidx = nsid - 1;
   1648 		if (nsidx >= sc->sc_nn)
   1649 			return ENXIO;
   1650 		CLR(sc->sc_namespaces[nsidx].flags, NVME_NS_F_OPEN);
   1651 	}
   1652 
   1653 	return 0;
   1654 }
   1655 
   1656 /*
   1657  * Handle control operations.
   1658  */
   1659 int
   1660 nvmeioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
   1661 {
   1662 	struct nvme_softc *sc;
   1663 	int unit = minor(dev) / 0x10000;
   1664 	int nsid = minor(dev) & 0xffff;
   1665 	struct nvme_pt_command *pt;
   1666 
   1667 	sc = device_lookup_private(&nvme_cd, unit);
   1668 	if (sc == NULL)
   1669 		return ENXIO;
   1670 
   1671 	switch (cmd) {
   1672 	case NVME_PASSTHROUGH_CMD:
   1673 		pt = data;
   1674 		return nvme_command_passthrough(sc, data,
   1675 		    nsid == 0 ? pt->cmd.nsid : nsid, l, nsid == 0);
   1676 	}
   1677 
   1678 	return ENOTTY;
   1679 }
   1680