1 1.69 riastrad /* $NetBSD: nvme.c,v 1.69 2024/03/11 21:10:46 riastradh Exp $ */ 2 1.1 nonaka /* $OpenBSD: nvme.c,v 1.49 2016/04/18 05:59:50 dlg Exp $ */ 3 1.1 nonaka 4 1.1 nonaka /* 5 1.1 nonaka * Copyright (c) 2014 David Gwynne <dlg (at) openbsd.org> 6 1.1 nonaka * 7 1.1 nonaka * Permission to use, copy, modify, and distribute this software for any 8 1.1 nonaka * purpose with or without fee is hereby granted, provided that the above 9 1.1 nonaka * copyright notice and this permission notice appear in all copies. 10 1.1 nonaka * 11 1.1 nonaka * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 1.1 nonaka * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 1.1 nonaka * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 1.1 nonaka * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 1.1 nonaka * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 1.1 nonaka * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 1.1 nonaka * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 1.1 nonaka */ 19 1.1 nonaka 20 1.1 nonaka #include <sys/cdefs.h> 21 1.69 riastrad __KERNEL_RCSID(0, "$NetBSD: nvme.c,v 1.69 2024/03/11 21:10:46 riastradh Exp $"); 22 1.1 nonaka 23 1.1 nonaka #include <sys/param.h> 24 1.1 nonaka #include <sys/systm.h> 25 1.1 nonaka #include <sys/kernel.h> 26 1.1 nonaka #include <sys/atomic.h> 27 1.1 nonaka #include <sys/bus.h> 28 1.1 nonaka #include <sys/buf.h> 29 1.3 nonaka #include <sys/conf.h> 30 1.1 nonaka #include <sys/device.h> 31 1.1 nonaka #include <sys/kmem.h> 32 1.1 nonaka #include <sys/once.h> 33 1.3 nonaka #include <sys/proc.h> 34 1.1 nonaka #include <sys/queue.h> 35 1.1 nonaka #include <sys/mutex.h> 36 1.1 nonaka 37 1.3 nonaka #include <uvm/uvm_extern.h> 38 1.3 nonaka 39 1.1 nonaka #include <dev/ic/nvmereg.h> 40 1.1 nonaka #include <dev/ic/nvmevar.h> 41 1.3 nonaka #include <dev/ic/nvmeio.h> 42 1.1 nonaka 43 1.31 riastrad #include "ioconf.h" 44 1.55 thorpej #include "locators.h" 45 1.31 riastrad 46 1.38 nonaka #define B4_CHK_RDY_DELAY_MS 2300 /* workaround controller bug */ 47 1.38 nonaka 48 1.22 jdolecek int nvme_adminq_size = 32; 49 1.9 jdolecek int nvme_ioq_size = 1024; 50 1.1 nonaka 51 1.1 nonaka static int nvme_print(void *, const char *); 52 1.1 nonaka 53 1.1 nonaka static int nvme_ready(struct nvme_softc *, uint32_t); 54 1.1 nonaka static int nvme_enable(struct nvme_softc *, u_int); 55 1.1 nonaka static int nvme_disable(struct nvme_softc *); 56 1.1 nonaka static int nvme_shutdown(struct nvme_softc *); 57 1.1 nonaka 58 1.60 skrll uint32_t nvme_op_sq_enter(struct nvme_softc *, 59 1.60 skrll struct nvme_queue *, struct nvme_ccb *); 60 1.60 skrll void nvme_op_sq_leave(struct nvme_softc *, 61 1.60 skrll struct nvme_queue *, struct nvme_ccb *); 62 1.60 skrll uint32_t nvme_op_sq_enter_locked(struct nvme_softc *, 63 1.60 skrll struct nvme_queue *, struct nvme_ccb *); 64 1.60 skrll void nvme_op_sq_leave_locked(struct nvme_softc *, 65 1.60 skrll struct nvme_queue *, struct nvme_ccb *); 66 1.60 skrll 67 1.60 skrll void nvme_op_cq_done(struct nvme_softc *, 68 1.60 skrll struct nvme_queue *, struct nvme_ccb *); 69 1.60 skrll 70 1.60 skrll static const struct nvme_ops nvme_ops = { 71 1.60 skrll .op_sq_enter = nvme_op_sq_enter, 72 1.60 skrll .op_sq_leave = nvme_op_sq_leave, 73 1.60 skrll .op_sq_enter_locked = nvme_op_sq_enter_locked, 74 1.60 skrll .op_sq_leave_locked = nvme_op_sq_leave_locked, 75 1.60 skrll 76 1.60 skrll .op_cq_done = nvme_op_cq_done, 77 1.60 skrll }; 78 1.60 skrll 79 1.1 nonaka #ifdef NVME_DEBUG 80 1.1 nonaka static void nvme_dumpregs(struct nvme_softc *); 81 1.1 nonaka #endif 82 1.1 nonaka static int nvme_identify(struct nvme_softc *, u_int); 83 1.1 nonaka static void nvme_fill_identify(struct nvme_queue *, struct nvme_ccb *, 84 1.1 nonaka void *); 85 1.1 nonaka 86 1.20 jdolecek static int nvme_ccbs_alloc(struct nvme_queue *, uint16_t); 87 1.1 nonaka static void nvme_ccbs_free(struct nvme_queue *); 88 1.1 nonaka 89 1.1 nonaka static struct nvme_ccb * 90 1.34 jdolecek nvme_ccb_get(struct nvme_queue *, bool); 91 1.62 jmcneill static struct nvme_ccb * 92 1.62 jmcneill nvme_ccb_get_bio(struct nvme_softc *, struct buf *, 93 1.62 jmcneill struct nvme_queue **); 94 1.1 nonaka static void nvme_ccb_put(struct nvme_queue *, struct nvme_ccb *); 95 1.1 nonaka 96 1.1 nonaka static int nvme_poll(struct nvme_softc *, struct nvme_queue *, 97 1.1 nonaka struct nvme_ccb *, void (*)(struct nvme_queue *, 98 1.7 jdolecek struct nvme_ccb *, void *), int); 99 1.1 nonaka static void nvme_poll_fill(struct nvme_queue *, struct nvme_ccb *, void *); 100 1.1 nonaka static void nvme_poll_done(struct nvme_queue *, struct nvme_ccb *, 101 1.1 nonaka struct nvme_cqe *); 102 1.1 nonaka static void nvme_sqe_fill(struct nvme_queue *, struct nvme_ccb *, void *); 103 1.1 nonaka static void nvme_empty_done(struct nvme_queue *, struct nvme_ccb *, 104 1.1 nonaka struct nvme_cqe *); 105 1.1 nonaka 106 1.1 nonaka static struct nvme_queue * 107 1.1 nonaka nvme_q_alloc(struct nvme_softc *, uint16_t, u_int, u_int); 108 1.1 nonaka static int nvme_q_create(struct nvme_softc *, struct nvme_queue *); 109 1.56 riastrad static void nvme_q_reset(struct nvme_softc *, struct nvme_queue *); 110 1.1 nonaka static int nvme_q_delete(struct nvme_softc *, struct nvme_queue *); 111 1.1 nonaka static void nvme_q_submit(struct nvme_softc *, struct nvme_queue *, 112 1.1 nonaka struct nvme_ccb *, void (*)(struct nvme_queue *, 113 1.1 nonaka struct nvme_ccb *, void *)); 114 1.1 nonaka static int nvme_q_complete(struct nvme_softc *, struct nvme_queue *q); 115 1.1 nonaka static void nvme_q_free(struct nvme_softc *, struct nvme_queue *); 116 1.34 jdolecek static void nvme_q_wait_complete(struct nvme_softc *, struct nvme_queue *, 117 1.34 jdolecek bool (*)(void *), void *); 118 1.1 nonaka 119 1.1 nonaka static void nvme_ns_io_fill(struct nvme_queue *, struct nvme_ccb *, 120 1.1 nonaka void *); 121 1.1 nonaka static void nvme_ns_io_done(struct nvme_queue *, struct nvme_ccb *, 122 1.1 nonaka struct nvme_cqe *); 123 1.1 nonaka static void nvme_ns_sync_fill(struct nvme_queue *, struct nvme_ccb *, 124 1.1 nonaka void *); 125 1.1 nonaka static void nvme_ns_sync_done(struct nvme_queue *, struct nvme_ccb *, 126 1.1 nonaka struct nvme_cqe *); 127 1.25 jdolecek static void nvme_getcache_fill(struct nvme_queue *, struct nvme_ccb *, 128 1.25 jdolecek void *); 129 1.25 jdolecek static void nvme_getcache_done(struct nvme_queue *, struct nvme_ccb *, 130 1.25 jdolecek struct nvme_cqe *); 131 1.1 nonaka 132 1.3 nonaka static void nvme_pt_fill(struct nvme_queue *, struct nvme_ccb *, 133 1.3 nonaka void *); 134 1.3 nonaka static void nvme_pt_done(struct nvme_queue *, struct nvme_ccb *, 135 1.3 nonaka struct nvme_cqe *); 136 1.3 nonaka static int nvme_command_passthrough(struct nvme_softc *, 137 1.61 mlelstv struct nvme_pt_command *, uint32_t, struct lwp *, bool); 138 1.3 nonaka 139 1.47 nonaka static int nvme_set_number_of_queues(struct nvme_softc *, u_int, u_int *, 140 1.47 nonaka u_int *); 141 1.23 nonaka 142 1.7 jdolecek #define NVME_TIMO_QOP 5 /* queue create and delete timeout */ 143 1.7 jdolecek #define NVME_TIMO_IDENT 10 /* probe identify timeout */ 144 1.7 jdolecek #define NVME_TIMO_PT -1 /* passthrough cmd timeout */ 145 1.13 jdolecek #define NVME_TIMO_SY 60 /* sync cache timeout */ 146 1.7 jdolecek 147 1.28 nonaka /* 148 1.28 nonaka * Some controllers, at least Apple NVMe, always require split 149 1.28 nonaka * transfers, so don't use bus_space_{read,write}_8() on LP64. 150 1.28 nonaka */ 151 1.60 skrll uint64_t 152 1.1 nonaka nvme_read8(struct nvme_softc *sc, bus_size_t r) 153 1.1 nonaka { 154 1.1 nonaka uint64_t v; 155 1.1 nonaka uint32_t *a = (uint32_t *)&v; 156 1.1 nonaka 157 1.1 nonaka #if _BYTE_ORDER == _LITTLE_ENDIAN 158 1.1 nonaka a[0] = nvme_read4(sc, r); 159 1.1 nonaka a[1] = nvme_read4(sc, r + 4); 160 1.1 nonaka #else /* _BYTE_ORDER == _LITTLE_ENDIAN */ 161 1.1 nonaka a[1] = nvme_read4(sc, r); 162 1.1 nonaka a[0] = nvme_read4(sc, r + 4); 163 1.1 nonaka #endif 164 1.1 nonaka 165 1.1 nonaka return v; 166 1.1 nonaka } 167 1.1 nonaka 168 1.60 skrll void 169 1.1 nonaka nvme_write8(struct nvme_softc *sc, bus_size_t r, uint64_t v) 170 1.1 nonaka { 171 1.1 nonaka uint32_t *a = (uint32_t *)&v; 172 1.1 nonaka 173 1.1 nonaka #if _BYTE_ORDER == _LITTLE_ENDIAN 174 1.1 nonaka nvme_write4(sc, r, a[0]); 175 1.1 nonaka nvme_write4(sc, r + 4, a[1]); 176 1.1 nonaka #else /* _BYTE_ORDER == _LITTLE_ENDIAN */ 177 1.1 nonaka nvme_write4(sc, r, a[1]); 178 1.1 nonaka nvme_write4(sc, r + 4, a[0]); 179 1.1 nonaka #endif 180 1.1 nonaka } 181 1.1 nonaka 182 1.1 nonaka #ifdef NVME_DEBUG 183 1.6 jdolecek static __used void 184 1.1 nonaka nvme_dumpregs(struct nvme_softc *sc) 185 1.1 nonaka { 186 1.1 nonaka uint64_t r8; 187 1.1 nonaka uint32_t r4; 188 1.1 nonaka 189 1.1 nonaka #define DEVNAME(_sc) device_xname((_sc)->sc_dev) 190 1.1 nonaka r8 = nvme_read8(sc, NVME_CAP); 191 1.8 jdolecek printf("%s: cap 0x%016"PRIx64"\n", DEVNAME(sc), nvme_read8(sc, NVME_CAP)); 192 1.1 nonaka printf("%s: mpsmax %u (%u)\n", DEVNAME(sc), 193 1.1 nonaka (u_int)NVME_CAP_MPSMAX(r8), (1 << NVME_CAP_MPSMAX(r8))); 194 1.1 nonaka printf("%s: mpsmin %u (%u)\n", DEVNAME(sc), 195 1.1 nonaka (u_int)NVME_CAP_MPSMIN(r8), (1 << NVME_CAP_MPSMIN(r8))); 196 1.8 jdolecek printf("%s: css %"PRIu64"\n", DEVNAME(sc), NVME_CAP_CSS(r8)); 197 1.8 jdolecek printf("%s: nssrs %"PRIu64"\n", DEVNAME(sc), NVME_CAP_NSSRS(r8)); 198 1.8 jdolecek printf("%s: dstrd %"PRIu64"\n", DEVNAME(sc), NVME_CAP_DSTRD(r8)); 199 1.8 jdolecek printf("%s: to %"PRIu64" msec\n", DEVNAME(sc), NVME_CAP_TO(r8)); 200 1.8 jdolecek printf("%s: ams %"PRIu64"\n", DEVNAME(sc), NVME_CAP_AMS(r8)); 201 1.8 jdolecek printf("%s: cqr %"PRIu64"\n", DEVNAME(sc), NVME_CAP_CQR(r8)); 202 1.8 jdolecek printf("%s: mqes %"PRIu64"\n", DEVNAME(sc), NVME_CAP_MQES(r8)); 203 1.1 nonaka 204 1.1 nonaka printf("%s: vs 0x%04x\n", DEVNAME(sc), nvme_read4(sc, NVME_VS)); 205 1.1 nonaka 206 1.1 nonaka r4 = nvme_read4(sc, NVME_CC); 207 1.1 nonaka printf("%s: cc 0x%04x\n", DEVNAME(sc), r4); 208 1.8 jdolecek printf("%s: iocqes %u (%u)\n", DEVNAME(sc), NVME_CC_IOCQES_R(r4), 209 1.8 jdolecek (1 << NVME_CC_IOCQES_R(r4))); 210 1.8 jdolecek printf("%s: iosqes %u (%u)\n", DEVNAME(sc), NVME_CC_IOSQES_R(r4), 211 1.8 jdolecek (1 << NVME_CC_IOSQES_R(r4))); 212 1.1 nonaka printf("%s: shn %u\n", DEVNAME(sc), NVME_CC_SHN_R(r4)); 213 1.1 nonaka printf("%s: ams %u\n", DEVNAME(sc), NVME_CC_AMS_R(r4)); 214 1.8 jdolecek printf("%s: mps %u (%u)\n", DEVNAME(sc), NVME_CC_MPS_R(r4), 215 1.8 jdolecek (1 << NVME_CC_MPS_R(r4))); 216 1.1 nonaka printf("%s: css %u\n", DEVNAME(sc), NVME_CC_CSS_R(r4)); 217 1.6 jdolecek printf("%s: en %u\n", DEVNAME(sc), ISSET(r4, NVME_CC_EN) ? 1 : 0); 218 1.1 nonaka 219 1.8 jdolecek r4 = nvme_read4(sc, NVME_CSTS); 220 1.8 jdolecek printf("%s: csts 0x%08x\n", DEVNAME(sc), r4); 221 1.8 jdolecek printf("%s: rdy %u\n", DEVNAME(sc), r4 & NVME_CSTS_RDY); 222 1.8 jdolecek printf("%s: cfs %u\n", DEVNAME(sc), r4 & NVME_CSTS_CFS); 223 1.8 jdolecek printf("%s: shst %x\n", DEVNAME(sc), r4 & NVME_CSTS_SHST_MASK); 224 1.8 jdolecek 225 1.8 jdolecek r4 = nvme_read4(sc, NVME_AQA); 226 1.8 jdolecek printf("%s: aqa 0x%08x\n", DEVNAME(sc), r4); 227 1.8 jdolecek printf("%s: acqs %u\n", DEVNAME(sc), NVME_AQA_ACQS_R(r4)); 228 1.8 jdolecek printf("%s: asqs %u\n", DEVNAME(sc), NVME_AQA_ASQS_R(r4)); 229 1.8 jdolecek 230 1.8 jdolecek printf("%s: asq 0x%016"PRIx64"\n", DEVNAME(sc), nvme_read8(sc, NVME_ASQ)); 231 1.8 jdolecek printf("%s: acq 0x%016"PRIx64"\n", DEVNAME(sc), nvme_read8(sc, NVME_ACQ)); 232 1.1 nonaka #undef DEVNAME 233 1.1 nonaka } 234 1.1 nonaka #endif /* NVME_DEBUG */ 235 1.1 nonaka 236 1.1 nonaka static int 237 1.1 nonaka nvme_ready(struct nvme_softc *sc, uint32_t rdy) 238 1.1 nonaka { 239 1.1 nonaka u_int i = 0; 240 1.1 nonaka 241 1.1 nonaka while ((nvme_read4(sc, NVME_CSTS) & NVME_CSTS_RDY) != rdy) { 242 1.1 nonaka if (i++ > sc->sc_rdy_to) 243 1.8 jdolecek return ENXIO; 244 1.1 nonaka 245 1.1 nonaka delay(1000); 246 1.1 nonaka nvme_barrier(sc, NVME_CSTS, 4, BUS_SPACE_BARRIER_READ); 247 1.1 nonaka } 248 1.1 nonaka 249 1.1 nonaka return 0; 250 1.1 nonaka } 251 1.1 nonaka 252 1.1 nonaka static int 253 1.1 nonaka nvme_enable(struct nvme_softc *sc, u_int mps) 254 1.1 nonaka { 255 1.8 jdolecek uint32_t cc, csts; 256 1.38 nonaka int error; 257 1.1 nonaka 258 1.1 nonaka cc = nvme_read4(sc, NVME_CC); 259 1.8 jdolecek csts = nvme_read4(sc, NVME_CSTS); 260 1.38 nonaka 261 1.38 nonaka /* 262 1.38 nonaka * See note in nvme_disable. Short circuit if we're already enabled. 263 1.38 nonaka */ 264 1.7 jdolecek if (ISSET(cc, NVME_CC_EN)) { 265 1.8 jdolecek if (ISSET(csts, NVME_CSTS_RDY)) 266 1.38 nonaka return 0; 267 1.8 jdolecek 268 1.8 jdolecek goto waitready; 269 1.38 nonaka } else { 270 1.38 nonaka /* EN == 0 already wait for RDY == 0 or fail */ 271 1.38 nonaka error = nvme_ready(sc, 0); 272 1.38 nonaka if (error) 273 1.38 nonaka return error; 274 1.7 jdolecek } 275 1.1 nonaka 276 1.60 skrll if (sc->sc_ops->op_enable != NULL) 277 1.60 skrll sc->sc_ops->op_enable(sc); 278 1.60 skrll 279 1.1 nonaka nvme_write8(sc, NVME_ASQ, NVME_DMA_DVA(sc->sc_admin_q->q_sq_dmamem)); 280 1.1 nonaka nvme_barrier(sc, 0, sc->sc_ios, BUS_SPACE_BARRIER_WRITE); 281 1.8 jdolecek delay(5000); 282 1.1 nonaka nvme_write8(sc, NVME_ACQ, NVME_DMA_DVA(sc->sc_admin_q->q_cq_dmamem)); 283 1.1 nonaka nvme_barrier(sc, 0, sc->sc_ios, BUS_SPACE_BARRIER_WRITE); 284 1.8 jdolecek delay(5000); 285 1.8 jdolecek 286 1.8 jdolecek nvme_write4(sc, NVME_AQA, NVME_AQA_ACQS(sc->sc_admin_q->q_entries) | 287 1.8 jdolecek NVME_AQA_ASQS(sc->sc_admin_q->q_entries)); 288 1.8 jdolecek nvme_barrier(sc, 0, sc->sc_ios, BUS_SPACE_BARRIER_WRITE); 289 1.8 jdolecek delay(5000); 290 1.1 nonaka 291 1.1 nonaka CLR(cc, NVME_CC_IOCQES_MASK | NVME_CC_IOSQES_MASK | NVME_CC_SHN_MASK | 292 1.1 nonaka NVME_CC_AMS_MASK | NVME_CC_MPS_MASK | NVME_CC_CSS_MASK); 293 1.1 nonaka SET(cc, NVME_CC_IOSQES(ffs(64) - 1) | NVME_CC_IOCQES(ffs(16) - 1)); 294 1.1 nonaka SET(cc, NVME_CC_SHN(NVME_CC_SHN_NONE)); 295 1.1 nonaka SET(cc, NVME_CC_CSS(NVME_CC_CSS_NVM)); 296 1.1 nonaka SET(cc, NVME_CC_AMS(NVME_CC_AMS_RR)); 297 1.1 nonaka SET(cc, NVME_CC_MPS(mps)); 298 1.1 nonaka SET(cc, NVME_CC_EN); 299 1.1 nonaka 300 1.1 nonaka nvme_write4(sc, NVME_CC, cc); 301 1.1 nonaka nvme_barrier(sc, 0, sc->sc_ios, 302 1.1 nonaka BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 303 1.1 nonaka 304 1.8 jdolecek waitready: 305 1.1 nonaka return nvme_ready(sc, NVME_CSTS_RDY); 306 1.1 nonaka } 307 1.1 nonaka 308 1.1 nonaka static int 309 1.1 nonaka nvme_disable(struct nvme_softc *sc) 310 1.1 nonaka { 311 1.1 nonaka uint32_t cc, csts; 312 1.38 nonaka int error; 313 1.1 nonaka 314 1.1 nonaka cc = nvme_read4(sc, NVME_CC); 315 1.8 jdolecek csts = nvme_read4(sc, NVME_CSTS); 316 1.8 jdolecek 317 1.38 nonaka /* 318 1.38 nonaka * Per 3.1.5 in NVME 1.3 spec, transitioning CC.EN from 0 to 1 319 1.38 nonaka * when CSTS.RDY is 1 or transitioning CC.EN from 1 to 0 when 320 1.38 nonaka * CSTS.RDY is 0 "has undefined results" So make sure that CSTS.RDY 321 1.38 nonaka * isn't the desired value. Short circuit if we're already disabled. 322 1.38 nonaka */ 323 1.38 nonaka if (ISSET(cc, NVME_CC_EN)) { 324 1.38 nonaka if (!ISSET(csts, NVME_CSTS_RDY)) { 325 1.38 nonaka /* EN == 1, wait for RDY == 1 or fail */ 326 1.38 nonaka error = nvme_ready(sc, NVME_CSTS_RDY); 327 1.38 nonaka if (error) 328 1.38 nonaka return error; 329 1.38 nonaka } 330 1.38 nonaka } else { 331 1.38 nonaka /* EN == 0 already wait for RDY == 0 */ 332 1.38 nonaka if (!ISSET(csts, NVME_CSTS_RDY)) 333 1.38 nonaka return 0; 334 1.38 nonaka 335 1.38 nonaka goto waitready; 336 1.38 nonaka } 337 1.1 nonaka 338 1.1 nonaka CLR(cc, NVME_CC_EN); 339 1.1 nonaka nvme_write4(sc, NVME_CC, cc); 340 1.8 jdolecek nvme_barrier(sc, 0, sc->sc_ios, BUS_SPACE_BARRIER_READ); 341 1.1 nonaka 342 1.38 nonaka /* 343 1.38 nonaka * Some drives have issues with accessing the mmio after we disable, 344 1.38 nonaka * so delay for a bit after we write the bit to cope with these issues. 345 1.38 nonaka */ 346 1.38 nonaka if (ISSET(sc->sc_quirks, NVME_QUIRK_DELAY_B4_CHK_RDY)) 347 1.38 nonaka delay(B4_CHK_RDY_DELAY_MS); 348 1.38 nonaka 349 1.38 nonaka waitready: 350 1.1 nonaka return nvme_ready(sc, 0); 351 1.1 nonaka } 352 1.1 nonaka 353 1.1 nonaka int 354 1.1 nonaka nvme_attach(struct nvme_softc *sc) 355 1.1 nonaka { 356 1.1 nonaka uint64_t cap; 357 1.1 nonaka uint32_t reg; 358 1.1 nonaka u_int mps = PAGE_SHIFT; 359 1.47 nonaka u_int ncq, nsq; 360 1.20 jdolecek uint16_t adminq_entries = nvme_adminq_size; 361 1.20 jdolecek uint16_t ioq_entries = nvme_ioq_size; 362 1.1 nonaka int i; 363 1.1 nonaka 364 1.60 skrll if (sc->sc_ops == NULL) 365 1.60 skrll sc->sc_ops = &nvme_ops; 366 1.60 skrll 367 1.1 nonaka reg = nvme_read4(sc, NVME_VS); 368 1.1 nonaka if (reg == 0xffffffff) { 369 1.1 nonaka aprint_error_dev(sc->sc_dev, "invalid mapping\n"); 370 1.1 nonaka return 1; 371 1.1 nonaka } 372 1.1 nonaka 373 1.27 nonaka if (NVME_VS_TER(reg) == 0) 374 1.27 nonaka aprint_normal_dev(sc->sc_dev, "NVMe %d.%d\n", NVME_VS_MJR(reg), 375 1.27 nonaka NVME_VS_MNR(reg)); 376 1.27 nonaka else 377 1.27 nonaka aprint_normal_dev(sc->sc_dev, "NVMe %d.%d.%d\n", NVME_VS_MJR(reg), 378 1.27 nonaka NVME_VS_MNR(reg), NVME_VS_TER(reg)); 379 1.1 nonaka 380 1.1 nonaka cap = nvme_read8(sc, NVME_CAP); 381 1.56 riastrad sc->sc_dstrd = NVME_CAP_DSTRD(cap); 382 1.1 nonaka if (NVME_CAP_MPSMIN(cap) > PAGE_SHIFT) { 383 1.1 nonaka aprint_error_dev(sc->sc_dev, "NVMe minimum page size %u " 384 1.1 nonaka "is greater than CPU page size %u\n", 385 1.1 nonaka 1 << NVME_CAP_MPSMIN(cap), 1 << PAGE_SHIFT); 386 1.1 nonaka return 1; 387 1.1 nonaka } 388 1.1 nonaka if (NVME_CAP_MPSMAX(cap) < mps) 389 1.1 nonaka mps = NVME_CAP_MPSMAX(cap); 390 1.15 nonaka if (ioq_entries > NVME_CAP_MQES(cap)) 391 1.15 nonaka ioq_entries = NVME_CAP_MQES(cap); 392 1.1 nonaka 393 1.8 jdolecek /* set initial values to be used for admin queue during probe */ 394 1.1 nonaka sc->sc_rdy_to = NVME_CAP_TO(cap); 395 1.1 nonaka sc->sc_mps = 1 << mps; 396 1.1 nonaka sc->sc_mdts = MAXPHYS; 397 1.43 mrg sc->sc_max_sgl = btoc(round_page(sc->sc_mdts)); 398 1.1 nonaka 399 1.1 nonaka if (nvme_disable(sc) != 0) { 400 1.1 nonaka aprint_error_dev(sc->sc_dev, "unable to disable controller\n"); 401 1.1 nonaka return 1; 402 1.1 nonaka } 403 1.1 nonaka 404 1.56 riastrad sc->sc_admin_q = nvme_q_alloc(sc, NVME_ADMIN_Q, adminq_entries, 405 1.56 riastrad sc->sc_dstrd); 406 1.1 nonaka if (sc->sc_admin_q == NULL) { 407 1.1 nonaka aprint_error_dev(sc->sc_dev, 408 1.1 nonaka "unable to allocate admin queue\n"); 409 1.1 nonaka return 1; 410 1.1 nonaka } 411 1.1 nonaka if (sc->sc_intr_establish(sc, NVME_ADMIN_Q, sc->sc_admin_q)) 412 1.1 nonaka goto free_admin_q; 413 1.1 nonaka 414 1.1 nonaka if (nvme_enable(sc, mps) != 0) { 415 1.1 nonaka aprint_error_dev(sc->sc_dev, "unable to enable controller\n"); 416 1.1 nonaka goto disestablish_admin_q; 417 1.1 nonaka } 418 1.1 nonaka 419 1.1 nonaka if (nvme_identify(sc, NVME_CAP_MPSMIN(cap)) != 0) { 420 1.1 nonaka aprint_error_dev(sc->sc_dev, "unable to identify controller\n"); 421 1.1 nonaka goto disable; 422 1.1 nonaka } 423 1.46 nonaka if (sc->sc_nn == 0) { 424 1.46 nonaka aprint_error_dev(sc->sc_dev, "namespace not found\n"); 425 1.46 nonaka goto disable; 426 1.46 nonaka } 427 1.1 nonaka 428 1.1 nonaka /* we know how big things are now */ 429 1.1 nonaka sc->sc_max_sgl = sc->sc_mdts / sc->sc_mps; 430 1.1 nonaka 431 1.1 nonaka /* reallocate ccbs of admin queue with new max sgl. */ 432 1.1 nonaka nvme_ccbs_free(sc->sc_admin_q); 433 1.1 nonaka nvme_ccbs_alloc(sc->sc_admin_q, sc->sc_admin_q->q_entries); 434 1.1 nonaka 435 1.23 nonaka if (sc->sc_use_mq) { 436 1.23 nonaka /* Limit the number of queues to the number allocated in HW */ 437 1.47 nonaka if (nvme_set_number_of_queues(sc, sc->sc_nq, &ncq, &nsq) != 0) { 438 1.23 nonaka aprint_error_dev(sc->sc_dev, 439 1.23 nonaka "unable to get number of queues\n"); 440 1.23 nonaka goto disable; 441 1.23 nonaka } 442 1.47 nonaka if (sc->sc_nq > ncq) 443 1.47 nonaka sc->sc_nq = ncq; 444 1.47 nonaka if (sc->sc_nq > nsq) 445 1.47 nonaka sc->sc_nq = nsq; 446 1.23 nonaka } 447 1.23 nonaka 448 1.1 nonaka sc->sc_q = kmem_zalloc(sizeof(*sc->sc_q) * sc->sc_nq, KM_SLEEP); 449 1.1 nonaka for (i = 0; i < sc->sc_nq; i++) { 450 1.56 riastrad sc->sc_q[i] = nvme_q_alloc(sc, i + 1, ioq_entries, 451 1.56 riastrad sc->sc_dstrd); 452 1.1 nonaka if (sc->sc_q[i] == NULL) { 453 1.1 nonaka aprint_error_dev(sc->sc_dev, 454 1.1 nonaka "unable to allocate io queue\n"); 455 1.1 nonaka goto free_q; 456 1.1 nonaka } 457 1.1 nonaka if (nvme_q_create(sc, sc->sc_q[i]) != 0) { 458 1.1 nonaka aprint_error_dev(sc->sc_dev, 459 1.1 nonaka "unable to create io queue\n"); 460 1.1 nonaka nvme_q_free(sc, sc->sc_q[i]); 461 1.1 nonaka goto free_q; 462 1.1 nonaka } 463 1.1 nonaka } 464 1.1 nonaka 465 1.1 nonaka if (!sc->sc_use_mq) 466 1.1 nonaka nvme_write4(sc, NVME_INTMC, 1); 467 1.1 nonaka 468 1.9 jdolecek /* probe subdevices */ 469 1.1 nonaka sc->sc_namespaces = kmem_zalloc(sizeof(*sc->sc_namespaces) * sc->sc_nn, 470 1.1 nonaka KM_SLEEP); 471 1.55 thorpej nvme_rescan(sc->sc_dev, NULL, NULL); 472 1.1 nonaka 473 1.1 nonaka return 0; 474 1.1 nonaka 475 1.1 nonaka free_q: 476 1.1 nonaka while (--i >= 0) { 477 1.1 nonaka nvme_q_delete(sc, sc->sc_q[i]); 478 1.1 nonaka nvme_q_free(sc, sc->sc_q[i]); 479 1.1 nonaka } 480 1.1 nonaka disable: 481 1.1 nonaka nvme_disable(sc); 482 1.1 nonaka disestablish_admin_q: 483 1.1 nonaka sc->sc_intr_disestablish(sc, NVME_ADMIN_Q); 484 1.1 nonaka free_admin_q: 485 1.1 nonaka nvme_q_free(sc, sc->sc_admin_q); 486 1.1 nonaka 487 1.1 nonaka return 1; 488 1.1 nonaka } 489 1.1 nonaka 490 1.14 pgoyette int 491 1.55 thorpej nvme_rescan(device_t self, const char *ifattr, const int *locs) 492 1.14 pgoyette { 493 1.14 pgoyette struct nvme_softc *sc = device_private(self); 494 1.14 pgoyette struct nvme_attach_args naa; 495 1.50 kardel struct nvm_namespace_format *f; 496 1.50 kardel struct nvme_namespace *ns; 497 1.15 nonaka uint64_t cap; 498 1.15 nonaka int ioq_entries = nvme_ioq_size; 499 1.55 thorpej int i, mlocs[NVMECF_NLOCS]; 500 1.50 kardel int error; 501 1.15 nonaka 502 1.15 nonaka cap = nvme_read8(sc, NVME_CAP); 503 1.15 nonaka if (ioq_entries > NVME_CAP_MQES(cap)) 504 1.15 nonaka ioq_entries = NVME_CAP_MQES(cap); 505 1.14 pgoyette 506 1.50 kardel for (i = 1; i <= sc->sc_nn; i++) { 507 1.50 kardel if (sc->sc_namespaces[i - 1].dev) 508 1.50 kardel continue; 509 1.50 kardel 510 1.50 kardel /* identify to check for availability */ 511 1.50 kardel error = nvme_ns_identify(sc, i); 512 1.50 kardel if (error) { 513 1.50 kardel aprint_error_dev(self, "couldn't identify namespace #%d\n", i); 514 1.50 kardel continue; 515 1.50 kardel } 516 1.50 kardel 517 1.50 kardel ns = nvme_ns_get(sc, i); 518 1.50 kardel KASSERT(ns); 519 1.50 kardel 520 1.50 kardel f = &ns->ident->lbaf[NVME_ID_NS_FLBAS(ns->ident->flbas)]; 521 1.50 kardel 522 1.50 kardel /* 523 1.50 kardel * NVME1.0e 6.11 Identify command 524 1.50 kardel * 525 1.50 kardel * LBADS values smaller than 9 are not supported, a value 526 1.50 kardel * of zero means that the format is not used. 527 1.50 kardel */ 528 1.50 kardel if (f->lbads < 9) { 529 1.50 kardel if (f->lbads > 0) 530 1.50 kardel aprint_error_dev(self, 531 1.50 kardel "unsupported logical data size %u\n", f->lbads); 532 1.14 pgoyette continue; 533 1.50 kardel } 534 1.50 kardel 535 1.55 thorpej mlocs[NVMECF_NSID] = i; 536 1.55 thorpej 537 1.14 pgoyette memset(&naa, 0, sizeof(naa)); 538 1.50 kardel naa.naa_nsid = i; 539 1.21 jdolecek naa.naa_qentries = (ioq_entries - 1) * sc->sc_nq; 540 1.21 jdolecek naa.naa_maxphys = sc->sc_mdts; 541 1.42 mlelstv naa.naa_typename = sc->sc_modelname; 542 1.55 thorpej sc->sc_namespaces[i - 1].dev = 543 1.55 thorpej config_found(sc->sc_dev, &naa, nvme_print, 544 1.58 thorpej CFARGS(.submatch = config_stdsubmatch, 545 1.58 thorpej .locators = mlocs)); 546 1.14 pgoyette } 547 1.14 pgoyette return 0; 548 1.14 pgoyette } 549 1.14 pgoyette 550 1.1 nonaka static int 551 1.1 nonaka nvme_print(void *aux, const char *pnp) 552 1.1 nonaka { 553 1.1 nonaka struct nvme_attach_args *naa = aux; 554 1.1 nonaka 555 1.1 nonaka if (pnp) 556 1.49 jdolecek aprint_normal("ld at %s", pnp); 557 1.1 nonaka 558 1.1 nonaka if (naa->naa_nsid > 0) 559 1.1 nonaka aprint_normal(" nsid %d", naa->naa_nsid); 560 1.1 nonaka 561 1.1 nonaka return UNCONF; 562 1.1 nonaka } 563 1.1 nonaka 564 1.1 nonaka int 565 1.1 nonaka nvme_detach(struct nvme_softc *sc, int flags) 566 1.1 nonaka { 567 1.1 nonaka int i, error; 568 1.1 nonaka 569 1.1 nonaka error = config_detach_children(sc->sc_dev, flags); 570 1.1 nonaka if (error) 571 1.1 nonaka return error; 572 1.1 nonaka 573 1.1 nonaka error = nvme_shutdown(sc); 574 1.1 nonaka if (error) 575 1.1 nonaka return error; 576 1.1 nonaka 577 1.9 jdolecek /* from now on we are committed to detach, following will never fail */ 578 1.1 nonaka for (i = 0; i < sc->sc_nq; i++) 579 1.1 nonaka nvme_q_free(sc, sc->sc_q[i]); 580 1.1 nonaka kmem_free(sc->sc_q, sizeof(*sc->sc_q) * sc->sc_nq); 581 1.1 nonaka nvme_q_free(sc, sc->sc_admin_q); 582 1.1 nonaka 583 1.1 nonaka return 0; 584 1.1 nonaka } 585 1.1 nonaka 586 1.56 riastrad int 587 1.56 riastrad nvme_suspend(struct nvme_softc *sc) 588 1.56 riastrad { 589 1.56 riastrad 590 1.56 riastrad return nvme_shutdown(sc); 591 1.56 riastrad } 592 1.56 riastrad 593 1.56 riastrad int 594 1.56 riastrad nvme_resume(struct nvme_softc *sc) 595 1.56 riastrad { 596 1.56 riastrad int i, error; 597 1.56 riastrad 598 1.56 riastrad error = nvme_disable(sc); 599 1.56 riastrad if (error) { 600 1.56 riastrad device_printf(sc->sc_dev, "unable to disable controller\n"); 601 1.56 riastrad return error; 602 1.56 riastrad } 603 1.56 riastrad 604 1.56 riastrad nvme_q_reset(sc, sc->sc_admin_q); 605 1.69 riastrad if (sc->sc_intr_establish(sc, NVME_ADMIN_Q, sc->sc_admin_q)) { 606 1.69 riastrad error = EIO; 607 1.69 riastrad device_printf(sc->sc_dev, "unable to establish admin q\n"); 608 1.69 riastrad goto disable; 609 1.69 riastrad } 610 1.56 riastrad 611 1.56 riastrad error = nvme_enable(sc, ffs(sc->sc_mps) - 1); 612 1.56 riastrad if (error) { 613 1.56 riastrad device_printf(sc->sc_dev, "unable to enable controller\n"); 614 1.56 riastrad return error; 615 1.56 riastrad } 616 1.56 riastrad 617 1.56 riastrad for (i = 0; i < sc->sc_nq; i++) { 618 1.67 riastrad nvme_q_reset(sc, sc->sc_q[i]); 619 1.56 riastrad if (nvme_q_create(sc, sc->sc_q[i]) != 0) { 620 1.56 riastrad error = EIO; 621 1.56 riastrad device_printf(sc->sc_dev, "unable to create io q %d" 622 1.56 riastrad "\n", i); 623 1.67 riastrad goto disable; 624 1.56 riastrad } 625 1.56 riastrad } 626 1.56 riastrad 627 1.68 mrg if (!sc->sc_use_mq) 628 1.68 mrg nvme_write4(sc, NVME_INTMC, 1); 629 1.56 riastrad 630 1.56 riastrad return 0; 631 1.56 riastrad 632 1.56 riastrad disable: 633 1.56 riastrad (void)nvme_disable(sc); 634 1.56 riastrad 635 1.56 riastrad return error; 636 1.56 riastrad } 637 1.56 riastrad 638 1.57 riastrad static int 639 1.1 nonaka nvme_shutdown(struct nvme_softc *sc) 640 1.1 nonaka { 641 1.1 nonaka uint32_t cc, csts; 642 1.1 nonaka bool disabled = false; 643 1.1 nonaka int i; 644 1.1 nonaka 645 1.1 nonaka if (!sc->sc_use_mq) 646 1.1 nonaka nvme_write4(sc, NVME_INTMS, 1); 647 1.1 nonaka 648 1.1 nonaka for (i = 0; i < sc->sc_nq; i++) { 649 1.1 nonaka if (nvme_q_delete(sc, sc->sc_q[i]) != 0) { 650 1.1 nonaka aprint_error_dev(sc->sc_dev, 651 1.1 nonaka "unable to delete io queue %d, disabling\n", i + 1); 652 1.1 nonaka disabled = true; 653 1.1 nonaka } 654 1.1 nonaka } 655 1.1 nonaka if (disabled) 656 1.1 nonaka goto disable; 657 1.1 nonaka 658 1.69 riastrad sc->sc_intr_disestablish(sc, NVME_ADMIN_Q); 659 1.69 riastrad 660 1.1 nonaka cc = nvme_read4(sc, NVME_CC); 661 1.1 nonaka CLR(cc, NVME_CC_SHN_MASK); 662 1.1 nonaka SET(cc, NVME_CC_SHN(NVME_CC_SHN_NORMAL)); 663 1.1 nonaka nvme_write4(sc, NVME_CC, cc); 664 1.1 nonaka 665 1.1 nonaka for (i = 0; i < 4000; i++) { 666 1.1 nonaka nvme_barrier(sc, 0, sc->sc_ios, 667 1.1 nonaka BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 668 1.1 nonaka csts = nvme_read4(sc, NVME_CSTS); 669 1.1 nonaka if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_DONE) 670 1.1 nonaka return 0; 671 1.1 nonaka 672 1.1 nonaka delay(1000); 673 1.1 nonaka } 674 1.1 nonaka 675 1.1 nonaka aprint_error_dev(sc->sc_dev, "unable to shudown, disabling\n"); 676 1.1 nonaka 677 1.1 nonaka disable: 678 1.1 nonaka nvme_disable(sc); 679 1.1 nonaka return 0; 680 1.1 nonaka } 681 1.1 nonaka 682 1.1 nonaka void 683 1.1 nonaka nvme_childdet(device_t self, device_t child) 684 1.1 nonaka { 685 1.1 nonaka struct nvme_softc *sc = device_private(self); 686 1.1 nonaka int i; 687 1.1 nonaka 688 1.1 nonaka for (i = 0; i < sc->sc_nn; i++) { 689 1.1 nonaka if (sc->sc_namespaces[i].dev == child) { 690 1.1 nonaka /* Already freed ns->ident. */ 691 1.1 nonaka sc->sc_namespaces[i].dev = NULL; 692 1.1 nonaka break; 693 1.1 nonaka } 694 1.1 nonaka } 695 1.1 nonaka } 696 1.1 nonaka 697 1.1 nonaka int 698 1.1 nonaka nvme_ns_identify(struct nvme_softc *sc, uint16_t nsid) 699 1.1 nonaka { 700 1.1 nonaka struct nvme_sqe sqe; 701 1.1 nonaka struct nvm_identify_namespace *identify; 702 1.19 jdolecek struct nvme_dmamem *mem; 703 1.1 nonaka struct nvme_ccb *ccb; 704 1.1 nonaka struct nvme_namespace *ns; 705 1.19 jdolecek int rv; 706 1.1 nonaka 707 1.1 nonaka KASSERT(nsid > 0); 708 1.1 nonaka 709 1.53 kardel ns = nvme_ns_get(sc, nsid); 710 1.53 kardel KASSERT(ns); 711 1.53 kardel 712 1.53 kardel if (ns->ident != NULL) 713 1.53 kardel return 0; 714 1.53 kardel 715 1.34 jdolecek ccb = nvme_ccb_get(sc->sc_admin_q, false); 716 1.11 jdolecek KASSERT(ccb != NULL); /* it's a bug if we don't have spare ccb here */ 717 1.1 nonaka 718 1.19 jdolecek mem = nvme_dmamem_alloc(sc, sizeof(*identify)); 719 1.32 christos if (mem == NULL) { 720 1.32 christos nvme_ccb_put(sc->sc_admin_q, ccb); 721 1.19 jdolecek return ENOMEM; 722 1.32 christos } 723 1.1 nonaka 724 1.1 nonaka memset(&sqe, 0, sizeof(sqe)); 725 1.1 nonaka sqe.opcode = NVM_ADMIN_IDENTIFY; 726 1.1 nonaka htolem32(&sqe.nsid, nsid); 727 1.1 nonaka htolem64(&sqe.entry.prp[0], NVME_DMA_DVA(mem)); 728 1.1 nonaka htolem32(&sqe.cdw10, 0); 729 1.1 nonaka 730 1.1 nonaka ccb->ccb_done = nvme_empty_done; 731 1.1 nonaka ccb->ccb_cookie = &sqe; 732 1.1 nonaka 733 1.1 nonaka nvme_dmamem_sync(sc, mem, BUS_DMASYNC_PREREAD); 734 1.19 jdolecek rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill, NVME_TIMO_IDENT); 735 1.1 nonaka nvme_dmamem_sync(sc, mem, BUS_DMASYNC_POSTREAD); 736 1.1 nonaka 737 1.1 nonaka nvme_ccb_put(sc->sc_admin_q, ccb); 738 1.1 nonaka 739 1.19 jdolecek if (rv != 0) { 740 1.19 jdolecek rv = EIO; 741 1.1 nonaka goto done; 742 1.1 nonaka } 743 1.1 nonaka 744 1.1 nonaka /* commit */ 745 1.1 nonaka 746 1.1 nonaka identify = kmem_zalloc(sizeof(*identify), KM_SLEEP); 747 1.19 jdolecek *identify = *((volatile struct nvm_identify_namespace *)NVME_DMA_KVA(mem)); 748 1.39 nonaka 749 1.39 nonaka /* Convert data to host endian */ 750 1.39 nonaka nvme_identify_namespace_swapbytes(identify); 751 1.1 nonaka 752 1.1 nonaka ns->ident = identify; 753 1.1 nonaka 754 1.1 nonaka done: 755 1.19 jdolecek nvme_dmamem_free(sc, mem); 756 1.1 nonaka 757 1.19 jdolecek return rv; 758 1.1 nonaka } 759 1.1 nonaka 760 1.1 nonaka int 761 1.11 jdolecek nvme_ns_dobio(struct nvme_softc *sc, uint16_t nsid, void *cookie, 762 1.11 jdolecek struct buf *bp, void *data, size_t datasize, 763 1.11 jdolecek int secsize, daddr_t blkno, int flags, nvme_nnc_done nnc_done) 764 1.1 nonaka { 765 1.62 jmcneill struct nvme_queue *q; 766 1.1 nonaka struct nvme_ccb *ccb; 767 1.1 nonaka bus_dmamap_t dmap; 768 1.1 nonaka int i, error; 769 1.1 nonaka 770 1.62 jmcneill ccb = nvme_ccb_get_bio(sc, bp, &q); 771 1.1 nonaka if (ccb == NULL) 772 1.1 nonaka return EAGAIN; 773 1.1 nonaka 774 1.1 nonaka ccb->ccb_done = nvme_ns_io_done; 775 1.11 jdolecek ccb->ccb_cookie = cookie; 776 1.11 jdolecek 777 1.11 jdolecek /* namespace context */ 778 1.11 jdolecek ccb->nnc_nsid = nsid; 779 1.11 jdolecek ccb->nnc_flags = flags; 780 1.11 jdolecek ccb->nnc_buf = bp; 781 1.11 jdolecek ccb->nnc_datasize = datasize; 782 1.11 jdolecek ccb->nnc_secsize = secsize; 783 1.11 jdolecek ccb->nnc_blkno = blkno; 784 1.11 jdolecek ccb->nnc_done = nnc_done; 785 1.1 nonaka 786 1.1 nonaka dmap = ccb->ccb_dmamap; 787 1.11 jdolecek error = bus_dmamap_load(sc->sc_dmat, dmap, data, 788 1.11 jdolecek datasize, NULL, 789 1.11 jdolecek (ISSET(flags, NVME_NS_CTX_F_POLL) ? 790 1.1 nonaka BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | 791 1.11 jdolecek (ISSET(flags, NVME_NS_CTX_F_READ) ? 792 1.1 nonaka BUS_DMA_READ : BUS_DMA_WRITE)); 793 1.1 nonaka if (error) { 794 1.1 nonaka nvme_ccb_put(q, ccb); 795 1.1 nonaka return error; 796 1.1 nonaka } 797 1.1 nonaka 798 1.1 nonaka bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize, 799 1.11 jdolecek ISSET(flags, NVME_NS_CTX_F_READ) ? 800 1.1 nonaka BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 801 1.1 nonaka 802 1.1 nonaka if (dmap->dm_nsegs > 2) { 803 1.1 nonaka for (i = 1; i < dmap->dm_nsegs; i++) { 804 1.1 nonaka htolem64(&ccb->ccb_prpl[i - 1], 805 1.1 nonaka dmap->dm_segs[i].ds_addr); 806 1.1 nonaka } 807 1.1 nonaka bus_dmamap_sync(sc->sc_dmat, 808 1.1 nonaka NVME_DMA_MAP(q->q_ccb_prpls), 809 1.1 nonaka ccb->ccb_prpl_off, 810 1.16 nonaka sizeof(*ccb->ccb_prpl) * (dmap->dm_nsegs - 1), 811 1.1 nonaka BUS_DMASYNC_PREWRITE); 812 1.1 nonaka } 813 1.1 nonaka 814 1.11 jdolecek if (ISSET(flags, NVME_NS_CTX_F_POLL)) { 815 1.7 jdolecek if (nvme_poll(sc, q, ccb, nvme_ns_io_fill, NVME_TIMO_PT) != 0) 816 1.1 nonaka return EIO; 817 1.1 nonaka return 0; 818 1.1 nonaka } 819 1.1 nonaka 820 1.1 nonaka nvme_q_submit(sc, q, ccb, nvme_ns_io_fill); 821 1.1 nonaka return 0; 822 1.1 nonaka } 823 1.1 nonaka 824 1.1 nonaka static void 825 1.1 nonaka nvme_ns_io_fill(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot) 826 1.1 nonaka { 827 1.1 nonaka struct nvme_sqe_io *sqe = slot; 828 1.1 nonaka bus_dmamap_t dmap = ccb->ccb_dmamap; 829 1.1 nonaka 830 1.11 jdolecek sqe->opcode = ISSET(ccb->nnc_flags, NVME_NS_CTX_F_READ) ? 831 1.1 nonaka NVM_CMD_READ : NVM_CMD_WRITE; 832 1.11 jdolecek htolem32(&sqe->nsid, ccb->nnc_nsid); 833 1.1 nonaka 834 1.1 nonaka htolem64(&sqe->entry.prp[0], dmap->dm_segs[0].ds_addr); 835 1.1 nonaka switch (dmap->dm_nsegs) { 836 1.1 nonaka case 1: 837 1.1 nonaka break; 838 1.1 nonaka case 2: 839 1.1 nonaka htolem64(&sqe->entry.prp[1], dmap->dm_segs[1].ds_addr); 840 1.1 nonaka break; 841 1.1 nonaka default: 842 1.1 nonaka /* the prp list is already set up and synced */ 843 1.1 nonaka htolem64(&sqe->entry.prp[1], ccb->ccb_prpl_dva); 844 1.1 nonaka break; 845 1.1 nonaka } 846 1.1 nonaka 847 1.11 jdolecek htolem64(&sqe->slba, ccb->nnc_blkno); 848 1.11 jdolecek 849 1.26 jdolecek if (ISSET(ccb->nnc_flags, NVME_NS_CTX_F_FUA)) 850 1.26 jdolecek htolem16(&sqe->ioflags, NVM_SQE_IO_FUA); 851 1.26 jdolecek 852 1.11 jdolecek /* guaranteed by upper layers, but check just in case */ 853 1.11 jdolecek KASSERT((ccb->nnc_datasize % ccb->nnc_secsize) == 0); 854 1.11 jdolecek htolem16(&sqe->nlb, (ccb->nnc_datasize / ccb->nnc_secsize) - 1); 855 1.1 nonaka } 856 1.1 nonaka 857 1.1 nonaka static void 858 1.1 nonaka nvme_ns_io_done(struct nvme_queue *q, struct nvme_ccb *ccb, 859 1.1 nonaka struct nvme_cqe *cqe) 860 1.1 nonaka { 861 1.1 nonaka struct nvme_softc *sc = q->q_sc; 862 1.1 nonaka bus_dmamap_t dmap = ccb->ccb_dmamap; 863 1.11 jdolecek void *nnc_cookie = ccb->ccb_cookie; 864 1.11 jdolecek nvme_nnc_done nnc_done = ccb->nnc_done; 865 1.11 jdolecek struct buf *bp = ccb->nnc_buf; 866 1.1 nonaka 867 1.1 nonaka if (dmap->dm_nsegs > 2) { 868 1.1 nonaka bus_dmamap_sync(sc->sc_dmat, 869 1.1 nonaka NVME_DMA_MAP(q->q_ccb_prpls), 870 1.1 nonaka ccb->ccb_prpl_off, 871 1.16 nonaka sizeof(*ccb->ccb_prpl) * (dmap->dm_nsegs - 1), 872 1.1 nonaka BUS_DMASYNC_POSTWRITE); 873 1.1 nonaka } 874 1.1 nonaka 875 1.1 nonaka bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize, 876 1.11 jdolecek ISSET(ccb->nnc_flags, NVME_NS_CTX_F_READ) ? 877 1.1 nonaka BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 878 1.1 nonaka 879 1.1 nonaka bus_dmamap_unload(sc->sc_dmat, dmap); 880 1.1 nonaka nvme_ccb_put(q, ccb); 881 1.1 nonaka 882 1.25 jdolecek nnc_done(nnc_cookie, bp, lemtoh16(&cqe->flags), lemtoh32(&cqe->cdw0)); 883 1.25 jdolecek } 884 1.25 jdolecek 885 1.25 jdolecek /* 886 1.25 jdolecek * If there is no volatile write cache, it makes no sense to issue 887 1.25 jdolecek * flush commands or query for the status. 888 1.25 jdolecek */ 889 1.34 jdolecek static bool 890 1.25 jdolecek nvme_has_volatile_write_cache(struct nvme_softc *sc) 891 1.25 jdolecek { 892 1.25 jdolecek /* sc_identify is filled during attachment */ 893 1.25 jdolecek return ((sc->sc_identify.vwc & NVME_ID_CTRLR_VWC_PRESENT) != 0); 894 1.1 nonaka } 895 1.1 nonaka 896 1.34 jdolecek static bool 897 1.34 jdolecek nvme_ns_sync_finished(void *cookie) 898 1.34 jdolecek { 899 1.34 jdolecek int *result = cookie; 900 1.34 jdolecek 901 1.34 jdolecek return (*result != 0); 902 1.34 jdolecek } 903 1.34 jdolecek 904 1.1 nonaka int 905 1.34 jdolecek nvme_ns_sync(struct nvme_softc *sc, uint16_t nsid, int flags) 906 1.1 nonaka { 907 1.62 jmcneill struct nvme_queue *q = nvme_get_q(sc); 908 1.1 nonaka struct nvme_ccb *ccb; 909 1.34 jdolecek int result = 0; 910 1.34 jdolecek 911 1.34 jdolecek if (!nvme_has_volatile_write_cache(sc)) { 912 1.34 jdolecek /* cache not present, no value in trying to flush it */ 913 1.34 jdolecek return 0; 914 1.34 jdolecek } 915 1.1 nonaka 916 1.34 jdolecek ccb = nvme_ccb_get(q, true); 917 1.44 jmcneill KASSERT(ccb != NULL); 918 1.1 nonaka 919 1.1 nonaka ccb->ccb_done = nvme_ns_sync_done; 920 1.34 jdolecek ccb->ccb_cookie = &result; 921 1.1 nonaka 922 1.11 jdolecek /* namespace context */ 923 1.11 jdolecek ccb->nnc_nsid = nsid; 924 1.11 jdolecek ccb->nnc_flags = flags; 925 1.34 jdolecek ccb->nnc_done = NULL; 926 1.11 jdolecek 927 1.11 jdolecek if (ISSET(flags, NVME_NS_CTX_F_POLL)) { 928 1.7 jdolecek if (nvme_poll(sc, q, ccb, nvme_ns_sync_fill, NVME_TIMO_SY) != 0) 929 1.1 nonaka return EIO; 930 1.1 nonaka return 0; 931 1.1 nonaka } 932 1.1 nonaka 933 1.1 nonaka nvme_q_submit(sc, q, ccb, nvme_ns_sync_fill); 934 1.34 jdolecek 935 1.34 jdolecek /* wait for completion */ 936 1.34 jdolecek nvme_q_wait_complete(sc, q, nvme_ns_sync_finished, &result); 937 1.34 jdolecek KASSERT(result != 0); 938 1.34 jdolecek 939 1.34 jdolecek return (result > 0) ? 0 : EIO; 940 1.1 nonaka } 941 1.1 nonaka 942 1.1 nonaka static void 943 1.1 nonaka nvme_ns_sync_fill(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot) 944 1.1 nonaka { 945 1.1 nonaka struct nvme_sqe *sqe = slot; 946 1.1 nonaka 947 1.1 nonaka sqe->opcode = NVM_CMD_FLUSH; 948 1.11 jdolecek htolem32(&sqe->nsid, ccb->nnc_nsid); 949 1.1 nonaka } 950 1.1 nonaka 951 1.1 nonaka static void 952 1.1 nonaka nvme_ns_sync_done(struct nvme_queue *q, struct nvme_ccb *ccb, 953 1.1 nonaka struct nvme_cqe *cqe) 954 1.1 nonaka { 955 1.34 jdolecek int *result = ccb->ccb_cookie; 956 1.34 jdolecek uint16_t status = NVME_CQE_SC(lemtoh16(&cqe->flags)); 957 1.34 jdolecek 958 1.34 jdolecek if (status == NVME_CQE_SC_SUCCESS) 959 1.34 jdolecek *result = 1; 960 1.34 jdolecek else 961 1.34 jdolecek *result = -1; 962 1.1 nonaka 963 1.1 nonaka nvme_ccb_put(q, ccb); 964 1.34 jdolecek } 965 1.34 jdolecek 966 1.34 jdolecek static bool 967 1.34 jdolecek nvme_getcache_finished(void *xc) 968 1.34 jdolecek { 969 1.34 jdolecek int *addr = xc; 970 1.1 nonaka 971 1.34 jdolecek return (*addr != 0); 972 1.25 jdolecek } 973 1.25 jdolecek 974 1.25 jdolecek /* 975 1.25 jdolecek * Get status of volatile write cache. Always asynchronous. 976 1.25 jdolecek */ 977 1.25 jdolecek int 978 1.34 jdolecek nvme_admin_getcache(struct nvme_softc *sc, int *addr) 979 1.25 jdolecek { 980 1.25 jdolecek struct nvme_ccb *ccb; 981 1.25 jdolecek struct nvme_queue *q = sc->sc_admin_q; 982 1.34 jdolecek int result = 0, error; 983 1.25 jdolecek 984 1.34 jdolecek if (!nvme_has_volatile_write_cache(sc)) { 985 1.34 jdolecek /* cache simply not present */ 986 1.34 jdolecek *addr = 0; 987 1.34 jdolecek return 0; 988 1.34 jdolecek } 989 1.34 jdolecek 990 1.34 jdolecek ccb = nvme_ccb_get(q, true); 991 1.34 jdolecek KASSERT(ccb != NULL); 992 1.25 jdolecek 993 1.25 jdolecek ccb->ccb_done = nvme_getcache_done; 994 1.34 jdolecek ccb->ccb_cookie = &result; 995 1.25 jdolecek 996 1.25 jdolecek /* namespace context */ 997 1.25 jdolecek ccb->nnc_flags = 0; 998 1.34 jdolecek ccb->nnc_done = NULL; 999 1.25 jdolecek 1000 1.25 jdolecek nvme_q_submit(sc, q, ccb, nvme_getcache_fill); 1001 1.34 jdolecek 1002 1.34 jdolecek /* wait for completion */ 1003 1.34 jdolecek nvme_q_wait_complete(sc, q, nvme_getcache_finished, &result); 1004 1.34 jdolecek KASSERT(result != 0); 1005 1.34 jdolecek 1006 1.34 jdolecek if (result > 0) { 1007 1.34 jdolecek *addr = result; 1008 1.34 jdolecek error = 0; 1009 1.34 jdolecek } else 1010 1.34 jdolecek error = EINVAL; 1011 1.34 jdolecek 1012 1.34 jdolecek return error; 1013 1.25 jdolecek } 1014 1.25 jdolecek 1015 1.25 jdolecek static void 1016 1.25 jdolecek nvme_getcache_fill(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot) 1017 1.25 jdolecek { 1018 1.25 jdolecek struct nvme_sqe *sqe = slot; 1019 1.25 jdolecek 1020 1.25 jdolecek sqe->opcode = NVM_ADMIN_GET_FEATURES; 1021 1.39 nonaka htolem32(&sqe->cdw10, NVM_FEATURE_VOLATILE_WRITE_CACHE); 1022 1.41 jdolecek htolem32(&sqe->cdw11, NVM_VOLATILE_WRITE_CACHE_WCE); 1023 1.25 jdolecek } 1024 1.25 jdolecek 1025 1.25 jdolecek static void 1026 1.25 jdolecek nvme_getcache_done(struct nvme_queue *q, struct nvme_ccb *ccb, 1027 1.25 jdolecek struct nvme_cqe *cqe) 1028 1.25 jdolecek { 1029 1.34 jdolecek int *addr = ccb->ccb_cookie; 1030 1.34 jdolecek uint16_t status = NVME_CQE_SC(lemtoh16(&cqe->flags)); 1031 1.34 jdolecek uint32_t cdw0 = lemtoh32(&cqe->cdw0); 1032 1.34 jdolecek int result; 1033 1.34 jdolecek 1034 1.34 jdolecek if (status == NVME_CQE_SC_SUCCESS) { 1035 1.34 jdolecek result = 0; 1036 1.34 jdolecek 1037 1.34 jdolecek /* 1038 1.34 jdolecek * DPO not supported, Dataset Management (DSM) field doesn't 1039 1.34 jdolecek * specify the same semantics. FUA is always supported. 1040 1.59 skrll */ 1041 1.34 jdolecek result = DKCACHE_FUA; 1042 1.34 jdolecek 1043 1.41 jdolecek if (cdw0 & NVM_VOLATILE_WRITE_CACHE_WCE) 1044 1.34 jdolecek result |= DKCACHE_WRITE; 1045 1.34 jdolecek 1046 1.34 jdolecek /* 1047 1.34 jdolecek * If volatile write cache is present, the flag shall also be 1048 1.34 jdolecek * settable. 1049 1.34 jdolecek */ 1050 1.34 jdolecek result |= DKCACHE_WCHANGE; 1051 1.41 jdolecek 1052 1.41 jdolecek /* 1053 1.41 jdolecek * ONCS field indicates whether the optional SAVE is also 1054 1.41 jdolecek * supported for Set Features. According to spec v1.3, 1055 1.41 jdolecek * Volatile Write Cache however doesn't support persistency 1056 1.41 jdolecek * across power cycle/reset. 1057 1.41 jdolecek */ 1058 1.41 jdolecek 1059 1.34 jdolecek } else { 1060 1.34 jdolecek result = -1; 1061 1.34 jdolecek } 1062 1.34 jdolecek 1063 1.34 jdolecek *addr = result; 1064 1.25 jdolecek 1065 1.25 jdolecek nvme_ccb_put(q, ccb); 1066 1.1 nonaka } 1067 1.1 nonaka 1068 1.41 jdolecek struct nvme_setcache_state { 1069 1.41 jdolecek int dkcache; 1070 1.41 jdolecek int result; 1071 1.41 jdolecek }; 1072 1.41 jdolecek 1073 1.41 jdolecek static bool 1074 1.41 jdolecek nvme_setcache_finished(void *xc) 1075 1.41 jdolecek { 1076 1.41 jdolecek struct nvme_setcache_state *st = xc; 1077 1.41 jdolecek 1078 1.41 jdolecek return (st->result != 0); 1079 1.41 jdolecek } 1080 1.41 jdolecek 1081 1.41 jdolecek static void 1082 1.41 jdolecek nvme_setcache_fill(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot) 1083 1.41 jdolecek { 1084 1.41 jdolecek struct nvme_sqe *sqe = slot; 1085 1.41 jdolecek struct nvme_setcache_state *st = ccb->ccb_cookie; 1086 1.41 jdolecek 1087 1.41 jdolecek sqe->opcode = NVM_ADMIN_SET_FEATURES; 1088 1.41 jdolecek htolem32(&sqe->cdw10, NVM_FEATURE_VOLATILE_WRITE_CACHE); 1089 1.41 jdolecek if (st->dkcache & DKCACHE_WRITE) 1090 1.41 jdolecek htolem32(&sqe->cdw11, NVM_VOLATILE_WRITE_CACHE_WCE); 1091 1.41 jdolecek } 1092 1.41 jdolecek 1093 1.41 jdolecek static void 1094 1.41 jdolecek nvme_setcache_done(struct nvme_queue *q, struct nvme_ccb *ccb, 1095 1.41 jdolecek struct nvme_cqe *cqe) 1096 1.41 jdolecek { 1097 1.41 jdolecek struct nvme_setcache_state *st = ccb->ccb_cookie; 1098 1.41 jdolecek uint16_t status = NVME_CQE_SC(lemtoh16(&cqe->flags)); 1099 1.41 jdolecek 1100 1.41 jdolecek if (status == NVME_CQE_SC_SUCCESS) { 1101 1.41 jdolecek st->result = 1; 1102 1.41 jdolecek } else { 1103 1.41 jdolecek st->result = -1; 1104 1.41 jdolecek } 1105 1.41 jdolecek 1106 1.41 jdolecek nvme_ccb_put(q, ccb); 1107 1.41 jdolecek } 1108 1.41 jdolecek 1109 1.41 jdolecek /* 1110 1.41 jdolecek * Set status of volatile write cache. Always asynchronous. 1111 1.41 jdolecek */ 1112 1.41 jdolecek int 1113 1.41 jdolecek nvme_admin_setcache(struct nvme_softc *sc, int dkcache) 1114 1.41 jdolecek { 1115 1.41 jdolecek struct nvme_ccb *ccb; 1116 1.41 jdolecek struct nvme_queue *q = sc->sc_admin_q; 1117 1.41 jdolecek int error; 1118 1.41 jdolecek struct nvme_setcache_state st; 1119 1.41 jdolecek 1120 1.41 jdolecek if (!nvme_has_volatile_write_cache(sc)) { 1121 1.41 jdolecek /* cache simply not present */ 1122 1.41 jdolecek return EOPNOTSUPP; 1123 1.41 jdolecek } 1124 1.41 jdolecek 1125 1.41 jdolecek if (dkcache & ~(DKCACHE_WRITE)) { 1126 1.41 jdolecek /* unsupported parameters */ 1127 1.41 jdolecek return EOPNOTSUPP; 1128 1.41 jdolecek } 1129 1.41 jdolecek 1130 1.41 jdolecek ccb = nvme_ccb_get(q, true); 1131 1.41 jdolecek KASSERT(ccb != NULL); 1132 1.41 jdolecek 1133 1.41 jdolecek memset(&st, 0, sizeof(st)); 1134 1.41 jdolecek st.dkcache = dkcache; 1135 1.41 jdolecek 1136 1.41 jdolecek ccb->ccb_done = nvme_setcache_done; 1137 1.41 jdolecek ccb->ccb_cookie = &st; 1138 1.41 jdolecek 1139 1.41 jdolecek /* namespace context */ 1140 1.41 jdolecek ccb->nnc_flags = 0; 1141 1.41 jdolecek ccb->nnc_done = NULL; 1142 1.41 jdolecek 1143 1.41 jdolecek nvme_q_submit(sc, q, ccb, nvme_setcache_fill); 1144 1.41 jdolecek 1145 1.41 jdolecek /* wait for completion */ 1146 1.41 jdolecek nvme_q_wait_complete(sc, q, nvme_setcache_finished, &st); 1147 1.41 jdolecek KASSERT(st.result != 0); 1148 1.41 jdolecek 1149 1.41 jdolecek if (st.result > 0) 1150 1.41 jdolecek error = 0; 1151 1.41 jdolecek else 1152 1.41 jdolecek error = EINVAL; 1153 1.41 jdolecek 1154 1.41 jdolecek return error; 1155 1.41 jdolecek } 1156 1.41 jdolecek 1157 1.1 nonaka void 1158 1.1 nonaka nvme_ns_free(struct nvme_softc *sc, uint16_t nsid) 1159 1.1 nonaka { 1160 1.1 nonaka struct nvme_namespace *ns; 1161 1.1 nonaka struct nvm_identify_namespace *identify; 1162 1.1 nonaka 1163 1.1 nonaka ns = nvme_ns_get(sc, nsid); 1164 1.1 nonaka KASSERT(ns); 1165 1.1 nonaka 1166 1.1 nonaka identify = ns->ident; 1167 1.1 nonaka ns->ident = NULL; 1168 1.1 nonaka if (identify != NULL) 1169 1.1 nonaka kmem_free(identify, sizeof(*identify)); 1170 1.1 nonaka } 1171 1.1 nonaka 1172 1.35 jdolecek struct nvme_pt_state { 1173 1.35 jdolecek struct nvme_pt_command *pt; 1174 1.35 jdolecek bool finished; 1175 1.35 jdolecek }; 1176 1.35 jdolecek 1177 1.1 nonaka static void 1178 1.3 nonaka nvme_pt_fill(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot) 1179 1.3 nonaka { 1180 1.3 nonaka struct nvme_softc *sc = q->q_sc; 1181 1.3 nonaka struct nvme_sqe *sqe = slot; 1182 1.35 jdolecek struct nvme_pt_state *state = ccb->ccb_cookie; 1183 1.35 jdolecek struct nvme_pt_command *pt = state->pt; 1184 1.3 nonaka bus_dmamap_t dmap = ccb->ccb_dmamap; 1185 1.3 nonaka int i; 1186 1.3 nonaka 1187 1.3 nonaka sqe->opcode = pt->cmd.opcode; 1188 1.3 nonaka htolem32(&sqe->nsid, pt->cmd.nsid); 1189 1.3 nonaka 1190 1.3 nonaka if (pt->buf != NULL && pt->len > 0) { 1191 1.3 nonaka htolem64(&sqe->entry.prp[0], dmap->dm_segs[0].ds_addr); 1192 1.3 nonaka switch (dmap->dm_nsegs) { 1193 1.3 nonaka case 1: 1194 1.3 nonaka break; 1195 1.3 nonaka case 2: 1196 1.3 nonaka htolem64(&sqe->entry.prp[1], dmap->dm_segs[1].ds_addr); 1197 1.3 nonaka break; 1198 1.3 nonaka default: 1199 1.3 nonaka for (i = 1; i < dmap->dm_nsegs; i++) { 1200 1.3 nonaka htolem64(&ccb->ccb_prpl[i - 1], 1201 1.3 nonaka dmap->dm_segs[i].ds_addr); 1202 1.3 nonaka } 1203 1.3 nonaka bus_dmamap_sync(sc->sc_dmat, 1204 1.3 nonaka NVME_DMA_MAP(q->q_ccb_prpls), 1205 1.3 nonaka ccb->ccb_prpl_off, 1206 1.16 nonaka sizeof(*ccb->ccb_prpl) * (dmap->dm_nsegs - 1), 1207 1.3 nonaka BUS_DMASYNC_PREWRITE); 1208 1.3 nonaka htolem64(&sqe->entry.prp[1], ccb->ccb_prpl_dva); 1209 1.3 nonaka break; 1210 1.3 nonaka } 1211 1.3 nonaka } 1212 1.3 nonaka 1213 1.3 nonaka htolem32(&sqe->cdw10, pt->cmd.cdw10); 1214 1.3 nonaka htolem32(&sqe->cdw11, pt->cmd.cdw11); 1215 1.3 nonaka htolem32(&sqe->cdw12, pt->cmd.cdw12); 1216 1.3 nonaka htolem32(&sqe->cdw13, pt->cmd.cdw13); 1217 1.3 nonaka htolem32(&sqe->cdw14, pt->cmd.cdw14); 1218 1.3 nonaka htolem32(&sqe->cdw15, pt->cmd.cdw15); 1219 1.3 nonaka } 1220 1.3 nonaka 1221 1.3 nonaka static void 1222 1.3 nonaka nvme_pt_done(struct nvme_queue *q, struct nvme_ccb *ccb, struct nvme_cqe *cqe) 1223 1.3 nonaka { 1224 1.3 nonaka struct nvme_softc *sc = q->q_sc; 1225 1.35 jdolecek struct nvme_pt_state *state = ccb->ccb_cookie; 1226 1.35 jdolecek struct nvme_pt_command *pt = state->pt; 1227 1.3 nonaka bus_dmamap_t dmap = ccb->ccb_dmamap; 1228 1.3 nonaka 1229 1.3 nonaka if (pt->buf != NULL && pt->len > 0) { 1230 1.3 nonaka if (dmap->dm_nsegs > 2) { 1231 1.3 nonaka bus_dmamap_sync(sc->sc_dmat, 1232 1.3 nonaka NVME_DMA_MAP(q->q_ccb_prpls), 1233 1.3 nonaka ccb->ccb_prpl_off, 1234 1.16 nonaka sizeof(*ccb->ccb_prpl) * (dmap->dm_nsegs - 1), 1235 1.3 nonaka BUS_DMASYNC_POSTWRITE); 1236 1.3 nonaka } 1237 1.3 nonaka 1238 1.3 nonaka bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize, 1239 1.3 nonaka pt->is_read ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1240 1.3 nonaka bus_dmamap_unload(sc->sc_dmat, dmap); 1241 1.3 nonaka } 1242 1.3 nonaka 1243 1.23 nonaka pt->cpl.cdw0 = lemtoh32(&cqe->cdw0); 1244 1.23 nonaka pt->cpl.flags = lemtoh16(&cqe->flags) & ~NVME_CQE_PHASE; 1245 1.35 jdolecek 1246 1.35 jdolecek state->finished = true; 1247 1.35 jdolecek 1248 1.35 jdolecek nvme_ccb_put(q, ccb); 1249 1.35 jdolecek } 1250 1.35 jdolecek 1251 1.35 jdolecek static bool 1252 1.35 jdolecek nvme_pt_finished(void *cookie) 1253 1.35 jdolecek { 1254 1.35 jdolecek struct nvme_pt_state *state = cookie; 1255 1.35 jdolecek 1256 1.35 jdolecek return state->finished; 1257 1.3 nonaka } 1258 1.3 nonaka 1259 1.3 nonaka static int 1260 1.3 nonaka nvme_command_passthrough(struct nvme_softc *sc, struct nvme_pt_command *pt, 1261 1.61 mlelstv uint32_t nsid, struct lwp *l, bool is_adminq) 1262 1.3 nonaka { 1263 1.3 nonaka struct nvme_queue *q; 1264 1.3 nonaka struct nvme_ccb *ccb; 1265 1.3 nonaka void *buf = NULL; 1266 1.35 jdolecek struct nvme_pt_state state; 1267 1.3 nonaka int error; 1268 1.3 nonaka 1269 1.9 jdolecek /* limit command size to maximum data transfer size */ 1270 1.3 nonaka if ((pt->buf == NULL && pt->len > 0) || 1271 1.9 jdolecek (pt->buf != NULL && (pt->len == 0 || pt->len > sc->sc_mdts))) 1272 1.3 nonaka return EINVAL; 1273 1.3 nonaka 1274 1.62 jmcneill q = is_adminq ? sc->sc_admin_q : nvme_get_q(sc); 1275 1.34 jdolecek ccb = nvme_ccb_get(q, true); 1276 1.34 jdolecek KASSERT(ccb != NULL); 1277 1.3 nonaka 1278 1.9 jdolecek if (pt->buf != NULL) { 1279 1.9 jdolecek KASSERT(pt->len > 0); 1280 1.3 nonaka buf = kmem_alloc(pt->len, KM_SLEEP); 1281 1.3 nonaka if (!pt->is_read) { 1282 1.3 nonaka error = copyin(pt->buf, buf, pt->len); 1283 1.3 nonaka if (error) 1284 1.3 nonaka goto kmem_free; 1285 1.3 nonaka } 1286 1.3 nonaka error = bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap, buf, 1287 1.3 nonaka pt->len, NULL, 1288 1.3 nonaka BUS_DMA_WAITOK | 1289 1.3 nonaka (pt->is_read ? BUS_DMA_READ : BUS_DMA_WRITE)); 1290 1.3 nonaka if (error) 1291 1.3 nonaka goto kmem_free; 1292 1.3 nonaka bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 1293 1.3 nonaka 0, ccb->ccb_dmamap->dm_mapsize, 1294 1.3 nonaka pt->is_read ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 1295 1.3 nonaka } 1296 1.3 nonaka 1297 1.35 jdolecek memset(&state, 0, sizeof(state)); 1298 1.35 jdolecek state.pt = pt; 1299 1.35 jdolecek state.finished = false; 1300 1.35 jdolecek 1301 1.3 nonaka ccb->ccb_done = nvme_pt_done; 1302 1.35 jdolecek ccb->ccb_cookie = &state; 1303 1.3 nonaka 1304 1.3 nonaka pt->cmd.nsid = nsid; 1305 1.35 jdolecek 1306 1.35 jdolecek nvme_q_submit(sc, q, ccb, nvme_pt_fill); 1307 1.35 jdolecek 1308 1.35 jdolecek /* wait for completion */ 1309 1.35 jdolecek nvme_q_wait_complete(sc, q, nvme_pt_finished, &state); 1310 1.35 jdolecek KASSERT(state.finished); 1311 1.3 nonaka 1312 1.3 nonaka error = 0; 1313 1.35 jdolecek 1314 1.3 nonaka if (buf != NULL) { 1315 1.3 nonaka if (error == 0 && pt->is_read) 1316 1.3 nonaka error = copyout(buf, pt->buf, pt->len); 1317 1.3 nonaka kmem_free: 1318 1.3 nonaka kmem_free(buf, pt->len); 1319 1.3 nonaka } 1320 1.35 jdolecek 1321 1.3 nonaka return error; 1322 1.3 nonaka } 1323 1.3 nonaka 1324 1.60 skrll uint32_t 1325 1.60 skrll nvme_op_sq_enter(struct nvme_softc *sc, 1326 1.60 skrll struct nvme_queue *q, struct nvme_ccb *ccb) 1327 1.60 skrll { 1328 1.60 skrll mutex_enter(&q->q_sq_mtx); 1329 1.60 skrll 1330 1.60 skrll return nvme_op_sq_enter_locked(sc, q, ccb); 1331 1.60 skrll } 1332 1.60 skrll 1333 1.60 skrll uint32_t 1334 1.60 skrll nvme_op_sq_enter_locked(struct nvme_softc *sc, 1335 1.60 skrll struct nvme_queue *q, struct nvme_ccb *ccb) 1336 1.60 skrll { 1337 1.60 skrll return q->q_sq_tail; 1338 1.60 skrll } 1339 1.60 skrll 1340 1.60 skrll void 1341 1.60 skrll nvme_op_sq_leave_locked(struct nvme_softc *sc, 1342 1.60 skrll struct nvme_queue *q, struct nvme_ccb *ccb) 1343 1.60 skrll { 1344 1.60 skrll uint32_t tail; 1345 1.60 skrll 1346 1.60 skrll tail = ++q->q_sq_tail; 1347 1.60 skrll if (tail >= q->q_entries) 1348 1.60 skrll tail = 0; 1349 1.60 skrll q->q_sq_tail = tail; 1350 1.60 skrll nvme_write4(sc, q->q_sqtdbl, tail); 1351 1.60 skrll } 1352 1.60 skrll 1353 1.60 skrll void 1354 1.60 skrll nvme_op_sq_leave(struct nvme_softc *sc, 1355 1.60 skrll struct nvme_queue *q, struct nvme_ccb *ccb) 1356 1.60 skrll { 1357 1.60 skrll nvme_op_sq_leave_locked(sc, q, ccb); 1358 1.60 skrll 1359 1.60 skrll mutex_exit(&q->q_sq_mtx); 1360 1.60 skrll } 1361 1.60 skrll 1362 1.3 nonaka static void 1363 1.1 nonaka nvme_q_submit(struct nvme_softc *sc, struct nvme_queue *q, struct nvme_ccb *ccb, 1364 1.1 nonaka void (*fill)(struct nvme_queue *, struct nvme_ccb *, void *)) 1365 1.1 nonaka { 1366 1.1 nonaka struct nvme_sqe *sqe = NVME_DMA_KVA(q->q_sq_dmamem); 1367 1.1 nonaka uint32_t tail; 1368 1.1 nonaka 1369 1.60 skrll tail = sc->sc_ops->op_sq_enter(sc, q, ccb); 1370 1.1 nonaka 1371 1.1 nonaka sqe += tail; 1372 1.1 nonaka 1373 1.1 nonaka bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_sq_dmamem), 1374 1.1 nonaka sizeof(*sqe) * tail, sizeof(*sqe), BUS_DMASYNC_POSTWRITE); 1375 1.1 nonaka memset(sqe, 0, sizeof(*sqe)); 1376 1.1 nonaka (*fill)(q, ccb, sqe); 1377 1.39 nonaka htolem16(&sqe->cid, ccb->ccb_id); 1378 1.1 nonaka bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_sq_dmamem), 1379 1.1 nonaka sizeof(*sqe) * tail, sizeof(*sqe), BUS_DMASYNC_PREWRITE); 1380 1.1 nonaka 1381 1.60 skrll sc->sc_ops->op_sq_leave(sc, q, ccb); 1382 1.1 nonaka } 1383 1.1 nonaka 1384 1.1 nonaka struct nvme_poll_state { 1385 1.1 nonaka struct nvme_sqe s; 1386 1.1 nonaka struct nvme_cqe c; 1387 1.34 jdolecek void *cookie; 1388 1.34 jdolecek void (*done)(struct nvme_queue *, struct nvme_ccb *, struct nvme_cqe *); 1389 1.1 nonaka }; 1390 1.1 nonaka 1391 1.1 nonaka static int 1392 1.1 nonaka nvme_poll(struct nvme_softc *sc, struct nvme_queue *q, struct nvme_ccb *ccb, 1393 1.7 jdolecek void (*fill)(struct nvme_queue *, struct nvme_ccb *, void *), int timo_sec) 1394 1.1 nonaka { 1395 1.1 nonaka struct nvme_poll_state state; 1396 1.1 nonaka uint16_t flags; 1397 1.7 jdolecek int step = 10; 1398 1.7 jdolecek int maxloop = timo_sec * 1000000 / step; 1399 1.7 jdolecek int error = 0; 1400 1.1 nonaka 1401 1.1 nonaka memset(&state, 0, sizeof(state)); 1402 1.1 nonaka (*fill)(q, ccb, &state.s); 1403 1.1 nonaka 1404 1.34 jdolecek state.done = ccb->ccb_done; 1405 1.34 jdolecek state.cookie = ccb->ccb_cookie; 1406 1.1 nonaka 1407 1.1 nonaka ccb->ccb_done = nvme_poll_done; 1408 1.1 nonaka ccb->ccb_cookie = &state; 1409 1.1 nonaka 1410 1.1 nonaka nvme_q_submit(sc, q, ccb, nvme_poll_fill); 1411 1.1 nonaka while (!ISSET(state.c.flags, htole16(NVME_CQE_PHASE))) { 1412 1.1 nonaka if (nvme_q_complete(sc, q) == 0) 1413 1.7 jdolecek delay(step); 1414 1.1 nonaka 1415 1.7 jdolecek if (timo_sec >= 0 && --maxloop <= 0) { 1416 1.7 jdolecek error = ETIMEDOUT; 1417 1.7 jdolecek break; 1418 1.7 jdolecek } 1419 1.1 nonaka } 1420 1.1 nonaka 1421 1.7 jdolecek if (error == 0) { 1422 1.7 jdolecek flags = lemtoh16(&state.c.flags); 1423 1.7 jdolecek return flags & ~NVME_CQE_PHASE; 1424 1.7 jdolecek } else { 1425 1.34 jdolecek /* 1426 1.34 jdolecek * If it succeds later, it would hit ccb which will have been 1427 1.34 jdolecek * already reused for something else. Not good. Cross 1428 1.34 jdolecek * fingers and hope for best. XXX do controller reset? 1429 1.34 jdolecek */ 1430 1.34 jdolecek aprint_error_dev(sc->sc_dev, "polled command timed out\n"); 1431 1.34 jdolecek 1432 1.34 jdolecek /* Invoke the callback to clean state anyway */ 1433 1.34 jdolecek struct nvme_cqe cqe; 1434 1.34 jdolecek memset(&cqe, 0, sizeof(cqe)); 1435 1.34 jdolecek ccb->ccb_done(q, ccb, &cqe); 1436 1.34 jdolecek 1437 1.7 jdolecek return 1; 1438 1.7 jdolecek } 1439 1.1 nonaka } 1440 1.1 nonaka 1441 1.1 nonaka static void 1442 1.1 nonaka nvme_poll_fill(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot) 1443 1.1 nonaka { 1444 1.1 nonaka struct nvme_sqe *sqe = slot; 1445 1.1 nonaka struct nvme_poll_state *state = ccb->ccb_cookie; 1446 1.1 nonaka 1447 1.1 nonaka *sqe = state->s; 1448 1.1 nonaka } 1449 1.1 nonaka 1450 1.1 nonaka static void 1451 1.1 nonaka nvme_poll_done(struct nvme_queue *q, struct nvme_ccb *ccb, 1452 1.1 nonaka struct nvme_cqe *cqe) 1453 1.1 nonaka { 1454 1.1 nonaka struct nvme_poll_state *state = ccb->ccb_cookie; 1455 1.1 nonaka 1456 1.1 nonaka state->c = *cqe; 1457 1.45 nonaka SET(state->c.flags, htole16(NVME_CQE_PHASE)); 1458 1.34 jdolecek 1459 1.34 jdolecek ccb->ccb_cookie = state->cookie; 1460 1.34 jdolecek state->done(q, ccb, &state->c); 1461 1.1 nonaka } 1462 1.1 nonaka 1463 1.1 nonaka static void 1464 1.1 nonaka nvme_sqe_fill(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot) 1465 1.1 nonaka { 1466 1.1 nonaka struct nvme_sqe *src = ccb->ccb_cookie; 1467 1.1 nonaka struct nvme_sqe *dst = slot; 1468 1.1 nonaka 1469 1.1 nonaka *dst = *src; 1470 1.1 nonaka } 1471 1.1 nonaka 1472 1.1 nonaka static void 1473 1.1 nonaka nvme_empty_done(struct nvme_queue *q, struct nvme_ccb *ccb, 1474 1.1 nonaka struct nvme_cqe *cqe) 1475 1.1 nonaka { 1476 1.1 nonaka } 1477 1.1 nonaka 1478 1.60 skrll void 1479 1.60 skrll nvme_op_cq_done(struct nvme_softc *sc, 1480 1.60 skrll struct nvme_queue *q, struct nvme_ccb *ccb) 1481 1.60 skrll { 1482 1.60 skrll /* nop */ 1483 1.60 skrll } 1484 1.60 skrll 1485 1.1 nonaka static int 1486 1.1 nonaka nvme_q_complete(struct nvme_softc *sc, struct nvme_queue *q) 1487 1.1 nonaka { 1488 1.1 nonaka struct nvme_ccb *ccb; 1489 1.1 nonaka struct nvme_cqe *ring = NVME_DMA_KVA(q->q_cq_dmamem), *cqe; 1490 1.1 nonaka uint16_t flags; 1491 1.1 nonaka int rv = 0; 1492 1.1 nonaka 1493 1.9 jdolecek mutex_enter(&q->q_cq_mtx); 1494 1.1 nonaka 1495 1.1 nonaka nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_POSTREAD); 1496 1.1 nonaka for (;;) { 1497 1.9 jdolecek cqe = &ring[q->q_cq_head]; 1498 1.1 nonaka flags = lemtoh16(&cqe->flags); 1499 1.1 nonaka if ((flags & NVME_CQE_PHASE) != q->q_cq_phase) 1500 1.1 nonaka break; 1501 1.1 nonaka 1502 1.64 riastrad /* 1503 1.64 riastrad * Make sure we have read the flags _before_ we read 1504 1.64 riastrad * the cid. Otherwise the CPU might speculatively read 1505 1.64 riastrad * the cid before the entry has been assigned to our 1506 1.64 riastrad * phase. 1507 1.64 riastrad */ 1508 1.64 riastrad nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_POSTREAD); 1509 1.64 riastrad 1510 1.52 rin ccb = &q->q_ccbs[lemtoh16(&cqe->cid)]; 1511 1.1 nonaka 1512 1.9 jdolecek if (++q->q_cq_head >= q->q_entries) { 1513 1.9 jdolecek q->q_cq_head = 0; 1514 1.1 nonaka q->q_cq_phase ^= NVME_CQE_PHASE; 1515 1.1 nonaka } 1516 1.1 nonaka 1517 1.18 jdolecek #ifdef DEBUG 1518 1.18 jdolecek /* 1519 1.18 jdolecek * If we get spurious completion notification, something 1520 1.18 jdolecek * is seriously hosed up. Very likely DMA to some random 1521 1.18 jdolecek * memory place happened, so just bail out. 1522 1.18 jdolecek */ 1523 1.18 jdolecek if ((intptr_t)ccb->ccb_cookie == NVME_CCB_FREE) { 1524 1.18 jdolecek panic("%s: invalid ccb detected", 1525 1.18 jdolecek device_xname(sc->sc_dev)); 1526 1.18 jdolecek /* NOTREACHED */ 1527 1.18 jdolecek } 1528 1.18 jdolecek #endif 1529 1.20 jdolecek 1530 1.20 jdolecek rv++; 1531 1.9 jdolecek 1532 1.60 skrll sc->sc_ops->op_cq_done(sc, q, ccb); 1533 1.60 skrll 1534 1.9 jdolecek /* 1535 1.10 jdolecek * Unlock the mutex before calling the ccb_done callback 1536 1.9 jdolecek * and re-lock afterwards. The callback triggers lddone() 1537 1.9 jdolecek * which schedules another i/o, and also calls nvme_ccb_put(). 1538 1.9 jdolecek * Unlock/relock avoids possibility of deadlock. 1539 1.9 jdolecek */ 1540 1.9 jdolecek mutex_exit(&q->q_cq_mtx); 1541 1.9 jdolecek ccb->ccb_done(q, ccb, cqe); 1542 1.9 jdolecek mutex_enter(&q->q_cq_mtx); 1543 1.1 nonaka } 1544 1.1 nonaka nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_PREREAD); 1545 1.1 nonaka 1546 1.1 nonaka if (rv) 1547 1.9 jdolecek nvme_write4(sc, q->q_cqhdbl, q->q_cq_head); 1548 1.9 jdolecek 1549 1.1 nonaka mutex_exit(&q->q_cq_mtx); 1550 1.1 nonaka 1551 1.1 nonaka return rv; 1552 1.1 nonaka } 1553 1.1 nonaka 1554 1.34 jdolecek static void 1555 1.34 jdolecek nvme_q_wait_complete(struct nvme_softc *sc, 1556 1.34 jdolecek struct nvme_queue *q, bool (*finished)(void *), void *cookie) 1557 1.34 jdolecek { 1558 1.34 jdolecek mutex_enter(&q->q_ccb_mtx); 1559 1.34 jdolecek if (finished(cookie)) 1560 1.34 jdolecek goto out; 1561 1.34 jdolecek 1562 1.34 jdolecek for(;;) { 1563 1.34 jdolecek q->q_ccb_waiting = true; 1564 1.34 jdolecek cv_wait(&q->q_ccb_wait, &q->q_ccb_mtx); 1565 1.34 jdolecek 1566 1.34 jdolecek if (finished(cookie)) 1567 1.34 jdolecek break; 1568 1.34 jdolecek } 1569 1.34 jdolecek 1570 1.34 jdolecek out: 1571 1.34 jdolecek mutex_exit(&q->q_ccb_mtx); 1572 1.34 jdolecek } 1573 1.34 jdolecek 1574 1.1 nonaka static int 1575 1.1 nonaka nvme_identify(struct nvme_softc *sc, u_int mps) 1576 1.1 nonaka { 1577 1.1 nonaka char sn[41], mn[81], fr[17]; 1578 1.1 nonaka struct nvm_identify_controller *identify; 1579 1.19 jdolecek struct nvme_dmamem *mem; 1580 1.1 nonaka struct nvme_ccb *ccb; 1581 1.1 nonaka u_int mdts; 1582 1.19 jdolecek int rv = 1; 1583 1.1 nonaka 1584 1.34 jdolecek ccb = nvme_ccb_get(sc->sc_admin_q, false); 1585 1.11 jdolecek KASSERT(ccb != NULL); /* it's a bug if we don't have spare ccb here */ 1586 1.1 nonaka 1587 1.19 jdolecek mem = nvme_dmamem_alloc(sc, sizeof(*identify)); 1588 1.19 jdolecek if (mem == NULL) 1589 1.19 jdolecek return 1; 1590 1.1 nonaka 1591 1.1 nonaka ccb->ccb_done = nvme_empty_done; 1592 1.19 jdolecek ccb->ccb_cookie = mem; 1593 1.1 nonaka 1594 1.1 nonaka nvme_dmamem_sync(sc, mem, BUS_DMASYNC_PREREAD); 1595 1.19 jdolecek rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_fill_identify, 1596 1.7 jdolecek NVME_TIMO_IDENT); 1597 1.1 nonaka nvme_dmamem_sync(sc, mem, BUS_DMASYNC_POSTREAD); 1598 1.1 nonaka 1599 1.1 nonaka nvme_ccb_put(sc->sc_admin_q, ccb); 1600 1.1 nonaka 1601 1.19 jdolecek if (rv != 0) 1602 1.1 nonaka goto done; 1603 1.1 nonaka 1604 1.1 nonaka identify = NVME_DMA_KVA(mem); 1605 1.39 nonaka sc->sc_identify = *identify; 1606 1.39 nonaka identify = NULL; 1607 1.39 nonaka 1608 1.39 nonaka /* Convert data to host endian */ 1609 1.39 nonaka nvme_identify_controller_swapbytes(&sc->sc_identify); 1610 1.1 nonaka 1611 1.39 nonaka strnvisx(sn, sizeof(sn), (const char *)sc->sc_identify.sn, 1612 1.39 nonaka sizeof(sc->sc_identify.sn), VIS_TRIM|VIS_SAFE|VIS_OCTAL); 1613 1.39 nonaka strnvisx(mn, sizeof(mn), (const char *)sc->sc_identify.mn, 1614 1.39 nonaka sizeof(sc->sc_identify.mn), VIS_TRIM|VIS_SAFE|VIS_OCTAL); 1615 1.39 nonaka strnvisx(fr, sizeof(fr), (const char *)sc->sc_identify.fr, 1616 1.39 nonaka sizeof(sc->sc_identify.fr), VIS_TRIM|VIS_SAFE|VIS_OCTAL); 1617 1.1 nonaka aprint_normal_dev(sc->sc_dev, "%s, firmware %s, serial %s\n", mn, fr, 1618 1.1 nonaka sn); 1619 1.1 nonaka 1620 1.42 mlelstv strlcpy(sc->sc_modelname, mn, sizeof(sc->sc_modelname)); 1621 1.42 mlelstv 1622 1.39 nonaka if (sc->sc_identify.mdts > 0) { 1623 1.39 nonaka mdts = (1 << sc->sc_identify.mdts) * (1 << mps); 1624 1.1 nonaka if (mdts < sc->sc_mdts) 1625 1.1 nonaka sc->sc_mdts = mdts; 1626 1.1 nonaka } 1627 1.1 nonaka 1628 1.39 nonaka sc->sc_nn = sc->sc_identify.nn; 1629 1.1 nonaka 1630 1.1 nonaka done: 1631 1.19 jdolecek nvme_dmamem_free(sc, mem); 1632 1.1 nonaka 1633 1.19 jdolecek return rv; 1634 1.1 nonaka } 1635 1.1 nonaka 1636 1.1 nonaka static int 1637 1.1 nonaka nvme_q_create(struct nvme_softc *sc, struct nvme_queue *q) 1638 1.1 nonaka { 1639 1.1 nonaka struct nvme_sqe_q sqe; 1640 1.1 nonaka struct nvme_ccb *ccb; 1641 1.1 nonaka int rv; 1642 1.1 nonaka 1643 1.9 jdolecek if (sc->sc_use_mq && sc->sc_intr_establish(sc, q->q_id, q) != 0) 1644 1.1 nonaka return 1; 1645 1.1 nonaka 1646 1.34 jdolecek ccb = nvme_ccb_get(sc->sc_admin_q, false); 1647 1.1 nonaka KASSERT(ccb != NULL); 1648 1.1 nonaka 1649 1.1 nonaka ccb->ccb_done = nvme_empty_done; 1650 1.1 nonaka ccb->ccb_cookie = &sqe; 1651 1.1 nonaka 1652 1.1 nonaka memset(&sqe, 0, sizeof(sqe)); 1653 1.1 nonaka sqe.opcode = NVM_ADMIN_ADD_IOCQ; 1654 1.1 nonaka htolem64(&sqe.prp1, NVME_DMA_DVA(q->q_cq_dmamem)); 1655 1.1 nonaka htolem16(&sqe.qsize, q->q_entries - 1); 1656 1.1 nonaka htolem16(&sqe.qid, q->q_id); 1657 1.1 nonaka sqe.qflags = NVM_SQE_CQ_IEN | NVM_SQE_Q_PC; 1658 1.1 nonaka if (sc->sc_use_mq) 1659 1.1 nonaka htolem16(&sqe.cqid, q->q_id); /* qid == vector */ 1660 1.1 nonaka 1661 1.7 jdolecek rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill, NVME_TIMO_QOP); 1662 1.1 nonaka if (rv != 0) 1663 1.1 nonaka goto fail; 1664 1.1 nonaka 1665 1.1 nonaka ccb->ccb_done = nvme_empty_done; 1666 1.1 nonaka ccb->ccb_cookie = &sqe; 1667 1.1 nonaka 1668 1.1 nonaka memset(&sqe, 0, sizeof(sqe)); 1669 1.1 nonaka sqe.opcode = NVM_ADMIN_ADD_IOSQ; 1670 1.1 nonaka htolem64(&sqe.prp1, NVME_DMA_DVA(q->q_sq_dmamem)); 1671 1.1 nonaka htolem16(&sqe.qsize, q->q_entries - 1); 1672 1.1 nonaka htolem16(&sqe.qid, q->q_id); 1673 1.1 nonaka htolem16(&sqe.cqid, q->q_id); 1674 1.1 nonaka sqe.qflags = NVM_SQE_Q_PC; 1675 1.1 nonaka 1676 1.7 jdolecek rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill, NVME_TIMO_QOP); 1677 1.1 nonaka if (rv != 0) 1678 1.1 nonaka goto fail; 1679 1.1 nonaka 1680 1.40 jdolecek nvme_ccb_put(sc->sc_admin_q, ccb); 1681 1.40 jdolecek return 0; 1682 1.40 jdolecek 1683 1.1 nonaka fail: 1684 1.40 jdolecek if (sc->sc_use_mq) 1685 1.40 jdolecek sc->sc_intr_disestablish(sc, q->q_id); 1686 1.40 jdolecek 1687 1.1 nonaka nvme_ccb_put(sc->sc_admin_q, ccb); 1688 1.1 nonaka return rv; 1689 1.1 nonaka } 1690 1.1 nonaka 1691 1.1 nonaka static int 1692 1.1 nonaka nvme_q_delete(struct nvme_softc *sc, struct nvme_queue *q) 1693 1.1 nonaka { 1694 1.1 nonaka struct nvme_sqe_q sqe; 1695 1.1 nonaka struct nvme_ccb *ccb; 1696 1.1 nonaka int rv; 1697 1.1 nonaka 1698 1.34 jdolecek ccb = nvme_ccb_get(sc->sc_admin_q, false); 1699 1.1 nonaka KASSERT(ccb != NULL); 1700 1.1 nonaka 1701 1.1 nonaka ccb->ccb_done = nvme_empty_done; 1702 1.1 nonaka ccb->ccb_cookie = &sqe; 1703 1.1 nonaka 1704 1.1 nonaka memset(&sqe, 0, sizeof(sqe)); 1705 1.1 nonaka sqe.opcode = NVM_ADMIN_DEL_IOSQ; 1706 1.1 nonaka htolem16(&sqe.qid, q->q_id); 1707 1.1 nonaka 1708 1.7 jdolecek rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill, NVME_TIMO_QOP); 1709 1.1 nonaka if (rv != 0) 1710 1.1 nonaka goto fail; 1711 1.1 nonaka 1712 1.1 nonaka ccb->ccb_done = nvme_empty_done; 1713 1.1 nonaka ccb->ccb_cookie = &sqe; 1714 1.1 nonaka 1715 1.1 nonaka memset(&sqe, 0, sizeof(sqe)); 1716 1.1 nonaka sqe.opcode = NVM_ADMIN_DEL_IOCQ; 1717 1.1 nonaka htolem16(&sqe.qid, q->q_id); 1718 1.1 nonaka 1719 1.7 jdolecek rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill, NVME_TIMO_QOP); 1720 1.1 nonaka if (rv != 0) 1721 1.1 nonaka goto fail; 1722 1.1 nonaka 1723 1.1 nonaka fail: 1724 1.1 nonaka nvme_ccb_put(sc->sc_admin_q, ccb); 1725 1.1 nonaka 1726 1.1 nonaka if (rv == 0 && sc->sc_use_mq) { 1727 1.1 nonaka if (sc->sc_intr_disestablish(sc, q->q_id)) 1728 1.1 nonaka rv = 1; 1729 1.1 nonaka } 1730 1.1 nonaka 1731 1.1 nonaka return rv; 1732 1.1 nonaka } 1733 1.1 nonaka 1734 1.1 nonaka static void 1735 1.1 nonaka nvme_fill_identify(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot) 1736 1.1 nonaka { 1737 1.1 nonaka struct nvme_sqe *sqe = slot; 1738 1.1 nonaka struct nvme_dmamem *mem = ccb->ccb_cookie; 1739 1.1 nonaka 1740 1.1 nonaka sqe->opcode = NVM_ADMIN_IDENTIFY; 1741 1.19 jdolecek htolem64(&sqe->entry.prp[0], NVME_DMA_DVA(mem)); 1742 1.1 nonaka htolem32(&sqe->cdw10, 1); 1743 1.1 nonaka } 1744 1.1 nonaka 1745 1.1 nonaka static int 1746 1.47 nonaka nvme_set_number_of_queues(struct nvme_softc *sc, u_int nq, u_int *ncqa, 1747 1.47 nonaka u_int *nsqa) 1748 1.23 nonaka { 1749 1.36 jdolecek struct nvme_pt_state state; 1750 1.23 nonaka struct nvme_pt_command pt; 1751 1.23 nonaka struct nvme_ccb *ccb; 1752 1.23 nonaka int rv; 1753 1.23 nonaka 1754 1.34 jdolecek ccb = nvme_ccb_get(sc->sc_admin_q, false); 1755 1.23 nonaka KASSERT(ccb != NULL); /* it's a bug if we don't have spare ccb here */ 1756 1.23 nonaka 1757 1.23 nonaka memset(&pt, 0, sizeof(pt)); 1758 1.47 nonaka pt.cmd.opcode = NVM_ADMIN_SET_FEATURES; 1759 1.51 ryo pt.cmd.cdw10 = NVM_FEATURE_NUMBER_OF_QUEUES; 1760 1.51 ryo pt.cmd.cdw11 = ((nq - 1) << 16) | (nq - 1); 1761 1.23 nonaka 1762 1.36 jdolecek memset(&state, 0, sizeof(state)); 1763 1.36 jdolecek state.pt = &pt; 1764 1.36 jdolecek state.finished = false; 1765 1.36 jdolecek 1766 1.23 nonaka ccb->ccb_done = nvme_pt_done; 1767 1.36 jdolecek ccb->ccb_cookie = &state; 1768 1.23 nonaka 1769 1.23 nonaka rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_pt_fill, NVME_TIMO_QOP); 1770 1.23 nonaka 1771 1.23 nonaka if (rv != 0) { 1772 1.47 nonaka *ncqa = *nsqa = 0; 1773 1.23 nonaka return EIO; 1774 1.23 nonaka } 1775 1.23 nonaka 1776 1.47 nonaka *ncqa = (pt.cpl.cdw0 >> 16) + 1; 1777 1.47 nonaka *nsqa = (pt.cpl.cdw0 & 0xffff) + 1; 1778 1.23 nonaka 1779 1.23 nonaka return 0; 1780 1.23 nonaka } 1781 1.23 nonaka 1782 1.23 nonaka static int 1783 1.20 jdolecek nvme_ccbs_alloc(struct nvme_queue *q, uint16_t nccbs) 1784 1.1 nonaka { 1785 1.1 nonaka struct nvme_softc *sc = q->q_sc; 1786 1.1 nonaka struct nvme_ccb *ccb; 1787 1.1 nonaka bus_addr_t off; 1788 1.1 nonaka uint64_t *prpl; 1789 1.1 nonaka u_int i; 1790 1.1 nonaka 1791 1.1 nonaka mutex_init(&q->q_ccb_mtx, MUTEX_DEFAULT, IPL_BIO); 1792 1.34 jdolecek cv_init(&q->q_ccb_wait, "nvmeqw"); 1793 1.34 jdolecek q->q_ccb_waiting = false; 1794 1.1 nonaka SIMPLEQ_INIT(&q->q_ccb_list); 1795 1.1 nonaka 1796 1.1 nonaka q->q_ccbs = kmem_alloc(sizeof(*ccb) * nccbs, KM_SLEEP); 1797 1.1 nonaka 1798 1.1 nonaka q->q_nccbs = nccbs; 1799 1.19 jdolecek q->q_ccb_prpls = nvme_dmamem_alloc(sc, 1800 1.19 jdolecek sizeof(*prpl) * sc->sc_max_sgl * nccbs); 1801 1.1 nonaka 1802 1.1 nonaka prpl = NVME_DMA_KVA(q->q_ccb_prpls); 1803 1.1 nonaka off = 0; 1804 1.1 nonaka 1805 1.1 nonaka for (i = 0; i < nccbs; i++) { 1806 1.1 nonaka ccb = &q->q_ccbs[i]; 1807 1.1 nonaka 1808 1.1 nonaka if (bus_dmamap_create(sc->sc_dmat, sc->sc_mdts, 1809 1.1 nonaka sc->sc_max_sgl + 1 /* we get a free prp in the sqe */, 1810 1.1 nonaka sc->sc_mps, sc->sc_mps, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, 1811 1.1 nonaka &ccb->ccb_dmamap) != 0) 1812 1.1 nonaka goto free_maps; 1813 1.1 nonaka 1814 1.1 nonaka ccb->ccb_id = i; 1815 1.1 nonaka ccb->ccb_prpl = prpl; 1816 1.1 nonaka ccb->ccb_prpl_off = off; 1817 1.1 nonaka ccb->ccb_prpl_dva = NVME_DMA_DVA(q->q_ccb_prpls) + off; 1818 1.1 nonaka 1819 1.1 nonaka SIMPLEQ_INSERT_TAIL(&q->q_ccb_list, ccb, ccb_entry); 1820 1.1 nonaka 1821 1.1 nonaka prpl += sc->sc_max_sgl; 1822 1.1 nonaka off += sizeof(*prpl) * sc->sc_max_sgl; 1823 1.1 nonaka } 1824 1.1 nonaka 1825 1.1 nonaka return 0; 1826 1.1 nonaka 1827 1.1 nonaka free_maps: 1828 1.1 nonaka nvme_ccbs_free(q); 1829 1.1 nonaka return 1; 1830 1.1 nonaka } 1831 1.1 nonaka 1832 1.1 nonaka static struct nvme_ccb * 1833 1.34 jdolecek nvme_ccb_get(struct nvme_queue *q, bool wait) 1834 1.1 nonaka { 1835 1.20 jdolecek struct nvme_ccb *ccb = NULL; 1836 1.1 nonaka 1837 1.1 nonaka mutex_enter(&q->q_ccb_mtx); 1838 1.34 jdolecek again: 1839 1.33 jdolecek ccb = SIMPLEQ_FIRST(&q->q_ccb_list); 1840 1.33 jdolecek if (ccb != NULL) { 1841 1.1 nonaka SIMPLEQ_REMOVE_HEAD(&q->q_ccb_list, ccb_entry); 1842 1.18 jdolecek #ifdef DEBUG 1843 1.18 jdolecek ccb->ccb_cookie = NULL; 1844 1.18 jdolecek #endif 1845 1.34 jdolecek } else { 1846 1.34 jdolecek if (__predict_false(wait)) { 1847 1.34 jdolecek q->q_ccb_waiting = true; 1848 1.34 jdolecek cv_wait(&q->q_ccb_wait, &q->q_ccb_mtx); 1849 1.34 jdolecek goto again; 1850 1.34 jdolecek } 1851 1.18 jdolecek } 1852 1.1 nonaka mutex_exit(&q->q_ccb_mtx); 1853 1.1 nonaka 1854 1.1 nonaka return ccb; 1855 1.1 nonaka } 1856 1.1 nonaka 1857 1.62 jmcneill static struct nvme_ccb * 1858 1.62 jmcneill nvme_ccb_get_bio(struct nvme_softc *sc, struct buf *bp, 1859 1.62 jmcneill struct nvme_queue **selq) 1860 1.62 jmcneill { 1861 1.66 riastrad u_int cpuindex = cpu_index((bp && bp->b_ci) ? bp->b_ci : curcpu()); 1862 1.62 jmcneill 1863 1.62 jmcneill /* 1864 1.62 jmcneill * Find a queue with available ccbs, preferring the originating 1865 1.62 jmcneill * CPU's queue. 1866 1.62 jmcneill */ 1867 1.62 jmcneill 1868 1.62 jmcneill for (u_int qoff = 0; qoff < sc->sc_nq; qoff++) { 1869 1.62 jmcneill struct nvme_queue *q = sc->sc_q[(cpuindex + qoff) % sc->sc_nq]; 1870 1.62 jmcneill struct nvme_ccb *ccb; 1871 1.62 jmcneill 1872 1.62 jmcneill mutex_enter(&q->q_ccb_mtx); 1873 1.62 jmcneill ccb = SIMPLEQ_FIRST(&q->q_ccb_list); 1874 1.62 jmcneill if (ccb != NULL) { 1875 1.62 jmcneill SIMPLEQ_REMOVE_HEAD(&q->q_ccb_list, ccb_entry); 1876 1.62 jmcneill #ifdef DEBUG 1877 1.62 jmcneill ccb->ccb_cookie = NULL; 1878 1.62 jmcneill #endif 1879 1.62 jmcneill } 1880 1.62 jmcneill mutex_exit(&q->q_ccb_mtx); 1881 1.62 jmcneill 1882 1.62 jmcneill if (ccb != NULL) { 1883 1.62 jmcneill *selq = q; 1884 1.62 jmcneill return ccb; 1885 1.62 jmcneill } 1886 1.62 jmcneill } 1887 1.62 jmcneill 1888 1.62 jmcneill return NULL; 1889 1.62 jmcneill } 1890 1.62 jmcneill 1891 1.1 nonaka static void 1892 1.1 nonaka nvme_ccb_put(struct nvme_queue *q, struct nvme_ccb *ccb) 1893 1.1 nonaka { 1894 1.1 nonaka 1895 1.1 nonaka mutex_enter(&q->q_ccb_mtx); 1896 1.18 jdolecek #ifdef DEBUG 1897 1.18 jdolecek ccb->ccb_cookie = (void *)NVME_CCB_FREE; 1898 1.18 jdolecek #endif 1899 1.1 nonaka SIMPLEQ_INSERT_HEAD(&q->q_ccb_list, ccb, ccb_entry); 1900 1.34 jdolecek 1901 1.34 jdolecek /* It's unlikely there are any waiters, it's not used for regular I/O */ 1902 1.34 jdolecek if (__predict_false(q->q_ccb_waiting)) { 1903 1.34 jdolecek q->q_ccb_waiting = false; 1904 1.34 jdolecek cv_broadcast(&q->q_ccb_wait); 1905 1.34 jdolecek } 1906 1.34 jdolecek 1907 1.1 nonaka mutex_exit(&q->q_ccb_mtx); 1908 1.1 nonaka } 1909 1.1 nonaka 1910 1.1 nonaka static void 1911 1.1 nonaka nvme_ccbs_free(struct nvme_queue *q) 1912 1.1 nonaka { 1913 1.1 nonaka struct nvme_softc *sc = q->q_sc; 1914 1.1 nonaka struct nvme_ccb *ccb; 1915 1.1 nonaka 1916 1.1 nonaka mutex_enter(&q->q_ccb_mtx); 1917 1.1 nonaka while ((ccb = SIMPLEQ_FIRST(&q->q_ccb_list)) != NULL) { 1918 1.1 nonaka SIMPLEQ_REMOVE_HEAD(&q->q_ccb_list, ccb_entry); 1919 1.59 skrll /* 1920 1.48 ryo * bus_dmamap_destroy() may call vm_map_lock() and rw_enter() 1921 1.48 ryo * internally. don't hold spin mutex 1922 1.48 ryo */ 1923 1.48 ryo mutex_exit(&q->q_ccb_mtx); 1924 1.1 nonaka bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap); 1925 1.48 ryo mutex_enter(&q->q_ccb_mtx); 1926 1.1 nonaka } 1927 1.1 nonaka mutex_exit(&q->q_ccb_mtx); 1928 1.1 nonaka 1929 1.19 jdolecek nvme_dmamem_free(sc, q->q_ccb_prpls); 1930 1.1 nonaka kmem_free(q->q_ccbs, sizeof(*ccb) * q->q_nccbs); 1931 1.1 nonaka q->q_ccbs = NULL; 1932 1.34 jdolecek cv_destroy(&q->q_ccb_wait); 1933 1.1 nonaka mutex_destroy(&q->q_ccb_mtx); 1934 1.1 nonaka } 1935 1.1 nonaka 1936 1.1 nonaka static struct nvme_queue * 1937 1.1 nonaka nvme_q_alloc(struct nvme_softc *sc, uint16_t id, u_int entries, u_int dstrd) 1938 1.1 nonaka { 1939 1.1 nonaka struct nvme_queue *q; 1940 1.1 nonaka 1941 1.1 nonaka q = kmem_alloc(sizeof(*q), KM_SLEEP); 1942 1.1 nonaka q->q_sc = sc; 1943 1.19 jdolecek q->q_sq_dmamem = nvme_dmamem_alloc(sc, 1944 1.19 jdolecek sizeof(struct nvme_sqe) * entries); 1945 1.19 jdolecek if (q->q_sq_dmamem == NULL) 1946 1.1 nonaka goto free; 1947 1.1 nonaka 1948 1.19 jdolecek q->q_cq_dmamem = nvme_dmamem_alloc(sc, 1949 1.19 jdolecek sizeof(struct nvme_cqe) * entries); 1950 1.19 jdolecek if (q->q_cq_dmamem == NULL) 1951 1.1 nonaka goto free_sq; 1952 1.1 nonaka 1953 1.1 nonaka memset(NVME_DMA_KVA(q->q_sq_dmamem), 0, NVME_DMA_LEN(q->q_sq_dmamem)); 1954 1.1 nonaka memset(NVME_DMA_KVA(q->q_cq_dmamem), 0, NVME_DMA_LEN(q->q_cq_dmamem)); 1955 1.1 nonaka 1956 1.1 nonaka mutex_init(&q->q_sq_mtx, MUTEX_DEFAULT, IPL_BIO); 1957 1.1 nonaka mutex_init(&q->q_cq_mtx, MUTEX_DEFAULT, IPL_BIO); 1958 1.1 nonaka q->q_sqtdbl = NVME_SQTDBL(id, dstrd); 1959 1.1 nonaka q->q_cqhdbl = NVME_CQHDBL(id, dstrd); 1960 1.1 nonaka q->q_id = id; 1961 1.1 nonaka q->q_entries = entries; 1962 1.1 nonaka q->q_sq_tail = 0; 1963 1.1 nonaka q->q_cq_head = 0; 1964 1.1 nonaka q->q_cq_phase = NVME_CQE_PHASE; 1965 1.1 nonaka 1966 1.60 skrll if (sc->sc_ops->op_q_alloc != NULL) { 1967 1.60 skrll if (sc->sc_ops->op_q_alloc(sc, q) != 0) 1968 1.60 skrll goto free_cq; 1969 1.60 skrll } 1970 1.60 skrll 1971 1.1 nonaka nvme_dmamem_sync(sc, q->q_sq_dmamem, BUS_DMASYNC_PREWRITE); 1972 1.1 nonaka nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_PREREAD); 1973 1.1 nonaka 1974 1.20 jdolecek /* 1975 1.20 jdolecek * Due to definition of full and empty queue (queue is empty 1976 1.20 jdolecek * when head == tail, full when tail is one less then head), 1977 1.20 jdolecek * we can actually only have (entries - 1) in-flight commands. 1978 1.20 jdolecek */ 1979 1.20 jdolecek if (nvme_ccbs_alloc(q, entries - 1) != 0) { 1980 1.1 nonaka aprint_error_dev(sc->sc_dev, "unable to allocate ccbs\n"); 1981 1.1 nonaka goto free_cq; 1982 1.1 nonaka } 1983 1.1 nonaka 1984 1.1 nonaka return q; 1985 1.1 nonaka 1986 1.1 nonaka free_cq: 1987 1.19 jdolecek nvme_dmamem_free(sc, q->q_cq_dmamem); 1988 1.1 nonaka free_sq: 1989 1.19 jdolecek nvme_dmamem_free(sc, q->q_sq_dmamem); 1990 1.1 nonaka free: 1991 1.1 nonaka kmem_free(q, sizeof(*q)); 1992 1.1 nonaka 1993 1.1 nonaka return NULL; 1994 1.1 nonaka } 1995 1.1 nonaka 1996 1.1 nonaka static void 1997 1.56 riastrad nvme_q_reset(struct nvme_softc *sc, struct nvme_queue *q) 1998 1.56 riastrad { 1999 1.56 riastrad 2000 1.56 riastrad memset(NVME_DMA_KVA(q->q_sq_dmamem), 0, NVME_DMA_LEN(q->q_sq_dmamem)); 2001 1.56 riastrad memset(NVME_DMA_KVA(q->q_cq_dmamem), 0, NVME_DMA_LEN(q->q_cq_dmamem)); 2002 1.56 riastrad 2003 1.56 riastrad q->q_sq_tail = 0; 2004 1.56 riastrad q->q_cq_head = 0; 2005 1.56 riastrad q->q_cq_phase = NVME_CQE_PHASE; 2006 1.56 riastrad 2007 1.56 riastrad nvme_dmamem_sync(sc, q->q_sq_dmamem, BUS_DMASYNC_PREWRITE); 2008 1.56 riastrad nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_PREREAD); 2009 1.56 riastrad } 2010 1.56 riastrad 2011 1.56 riastrad static void 2012 1.1 nonaka nvme_q_free(struct nvme_softc *sc, struct nvme_queue *q) 2013 1.1 nonaka { 2014 1.1 nonaka nvme_ccbs_free(q); 2015 1.9 jdolecek mutex_destroy(&q->q_sq_mtx); 2016 1.9 jdolecek mutex_destroy(&q->q_cq_mtx); 2017 1.1 nonaka nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_POSTREAD); 2018 1.1 nonaka nvme_dmamem_sync(sc, q->q_sq_dmamem, BUS_DMASYNC_POSTWRITE); 2019 1.60 skrll 2020 1.60 skrll if (sc->sc_ops->op_q_alloc != NULL) 2021 1.60 skrll sc->sc_ops->op_q_free(sc, q); 2022 1.60 skrll 2023 1.19 jdolecek nvme_dmamem_free(sc, q->q_cq_dmamem); 2024 1.19 jdolecek nvme_dmamem_free(sc, q->q_sq_dmamem); 2025 1.1 nonaka kmem_free(q, sizeof(*q)); 2026 1.1 nonaka } 2027 1.1 nonaka 2028 1.1 nonaka int 2029 1.1 nonaka nvme_intr(void *xsc) 2030 1.1 nonaka { 2031 1.1 nonaka struct nvme_softc *sc = xsc; 2032 1.1 nonaka 2033 1.68 mrg KASSERT(!sc->sc_use_mq); 2034 1.68 mrg 2035 1.10 jdolecek /* 2036 1.10 jdolecek * INTx is level triggered, controller deasserts the interrupt only 2037 1.10 jdolecek * when we advance command queue head via write to the doorbell. 2038 1.17 jdolecek * Tell the controller to block the interrupts while we process 2039 1.17 jdolecek * the queue(s). 2040 1.10 jdolecek */ 2041 1.17 jdolecek nvme_write4(sc, NVME_INTMS, 1); 2042 1.17 jdolecek 2043 1.17 jdolecek softint_schedule(sc->sc_softih[0]); 2044 1.17 jdolecek 2045 1.17 jdolecek /* don't know, might not have been for us */ 2046 1.17 jdolecek return 1; 2047 1.17 jdolecek } 2048 1.17 jdolecek 2049 1.17 jdolecek void 2050 1.17 jdolecek nvme_softintr_intx(void *xq) 2051 1.17 jdolecek { 2052 1.17 jdolecek struct nvme_queue *q = xq; 2053 1.17 jdolecek struct nvme_softc *sc = q->q_sc; 2054 1.17 jdolecek 2055 1.68 mrg KASSERT(!sc->sc_use_mq); 2056 1.68 mrg 2057 1.17 jdolecek nvme_q_complete(sc, sc->sc_admin_q); 2058 1.1 nonaka if (sc->sc_q != NULL) 2059 1.17 jdolecek nvme_q_complete(sc, sc->sc_q[0]); 2060 1.1 nonaka 2061 1.17 jdolecek /* 2062 1.17 jdolecek * Processing done, tell controller to issue interrupts again. There 2063 1.17 jdolecek * is no race, as NVMe spec requires the controller to maintain state, 2064 1.17 jdolecek * and assert the interrupt whenever there are unacknowledged 2065 1.17 jdolecek * completion queue entries. 2066 1.17 jdolecek */ 2067 1.17 jdolecek nvme_write4(sc, NVME_INTMC, 1); 2068 1.1 nonaka } 2069 1.1 nonaka 2070 1.1 nonaka int 2071 1.9 jdolecek nvme_intr_msi(void *xq) 2072 1.1 nonaka { 2073 1.1 nonaka struct nvme_queue *q = xq; 2074 1.1 nonaka 2075 1.63 riastrad KASSERT(q); 2076 1.63 riastrad KASSERT(q->q_sc); 2077 1.63 riastrad KASSERT(q->q_sc->sc_softih); 2078 1.63 riastrad KASSERT(q->q_sc->sc_softih[q->q_id]); 2079 1.1 nonaka 2080 1.17 jdolecek /* 2081 1.17 jdolecek * MSI/MSI-X are edge triggered, so can handover processing to softint 2082 1.17 jdolecek * without masking the interrupt. 2083 1.17 jdolecek */ 2084 1.9 jdolecek softint_schedule(q->q_sc->sc_softih[q->q_id]); 2085 1.1 nonaka 2086 1.9 jdolecek return 1; 2087 1.1 nonaka } 2088 1.1 nonaka 2089 1.9 jdolecek void 2090 1.9 jdolecek nvme_softintr_msi(void *xq) 2091 1.1 nonaka { 2092 1.1 nonaka struct nvme_queue *q = xq; 2093 1.9 jdolecek struct nvme_softc *sc = q->q_sc; 2094 1.1 nonaka 2095 1.9 jdolecek nvme_q_complete(sc, q); 2096 1.1 nonaka } 2097 1.1 nonaka 2098 1.60 skrll struct nvme_dmamem * 2099 1.19 jdolecek nvme_dmamem_alloc(struct nvme_softc *sc, size_t size) 2100 1.1 nonaka { 2101 1.19 jdolecek struct nvme_dmamem *ndm; 2102 1.1 nonaka int nsegs; 2103 1.1 nonaka 2104 1.19 jdolecek ndm = kmem_zalloc(sizeof(*ndm), KM_SLEEP); 2105 1.19 jdolecek if (ndm == NULL) 2106 1.19 jdolecek return NULL; 2107 1.19 jdolecek 2108 1.1 nonaka ndm->ndm_size = size; 2109 1.1 nonaka 2110 1.43 mrg if (bus_dmamap_create(sc->sc_dmat, size, btoc(round_page(size)), size, 0, 2111 1.1 nonaka BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &ndm->ndm_map) != 0) 2112 1.1 nonaka goto ndmfree; 2113 1.1 nonaka 2114 1.1 nonaka if (bus_dmamem_alloc(sc->sc_dmat, size, sc->sc_mps, 0, &ndm->ndm_seg, 2115 1.1 nonaka 1, &nsegs, BUS_DMA_WAITOK) != 0) 2116 1.1 nonaka goto destroy; 2117 1.1 nonaka 2118 1.1 nonaka if (bus_dmamem_map(sc->sc_dmat, &ndm->ndm_seg, nsegs, size, 2119 1.1 nonaka &ndm->ndm_kva, BUS_DMA_WAITOK) != 0) 2120 1.1 nonaka goto free; 2121 1.1 nonaka 2122 1.1 nonaka if (bus_dmamap_load(sc->sc_dmat, ndm->ndm_map, ndm->ndm_kva, size, 2123 1.1 nonaka NULL, BUS_DMA_WAITOK) != 0) 2124 1.1 nonaka goto unmap; 2125 1.1 nonaka 2126 1.54 jmcneill memset(ndm->ndm_kva, 0, size); 2127 1.54 jmcneill bus_dmamap_sync(sc->sc_dmat, ndm->ndm_map, 0, size, BUS_DMASYNC_PREREAD); 2128 1.54 jmcneill 2129 1.19 jdolecek return ndm; 2130 1.1 nonaka 2131 1.1 nonaka unmap: 2132 1.1 nonaka bus_dmamem_unmap(sc->sc_dmat, ndm->ndm_kva, size); 2133 1.1 nonaka free: 2134 1.1 nonaka bus_dmamem_free(sc->sc_dmat, &ndm->ndm_seg, 1); 2135 1.1 nonaka destroy: 2136 1.1 nonaka bus_dmamap_destroy(sc->sc_dmat, ndm->ndm_map); 2137 1.1 nonaka ndmfree: 2138 1.19 jdolecek kmem_free(ndm, sizeof(*ndm)); 2139 1.19 jdolecek return NULL; 2140 1.19 jdolecek } 2141 1.19 jdolecek 2142 1.60 skrll void 2143 1.19 jdolecek nvme_dmamem_sync(struct nvme_softc *sc, struct nvme_dmamem *mem, int ops) 2144 1.19 jdolecek { 2145 1.19 jdolecek bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(mem), 2146 1.19 jdolecek 0, NVME_DMA_LEN(mem), ops); 2147 1.1 nonaka } 2148 1.1 nonaka 2149 1.1 nonaka void 2150 1.1 nonaka nvme_dmamem_free(struct nvme_softc *sc, struct nvme_dmamem *ndm) 2151 1.1 nonaka { 2152 1.1 nonaka bus_dmamap_unload(sc->sc_dmat, ndm->ndm_map); 2153 1.1 nonaka bus_dmamem_unmap(sc->sc_dmat, ndm->ndm_kva, ndm->ndm_size); 2154 1.1 nonaka bus_dmamem_free(sc->sc_dmat, &ndm->ndm_seg, 1); 2155 1.1 nonaka bus_dmamap_destroy(sc->sc_dmat, ndm->ndm_map); 2156 1.19 jdolecek kmem_free(ndm, sizeof(*ndm)); 2157 1.1 nonaka } 2158 1.3 nonaka 2159 1.3 nonaka /* 2160 1.3 nonaka * ioctl 2161 1.3 nonaka */ 2162 1.3 nonaka 2163 1.3 nonaka dev_type_open(nvmeopen); 2164 1.3 nonaka dev_type_close(nvmeclose); 2165 1.3 nonaka dev_type_ioctl(nvmeioctl); 2166 1.3 nonaka 2167 1.3 nonaka const struct cdevsw nvme_cdevsw = { 2168 1.3 nonaka .d_open = nvmeopen, 2169 1.3 nonaka .d_close = nvmeclose, 2170 1.3 nonaka .d_read = noread, 2171 1.3 nonaka .d_write = nowrite, 2172 1.3 nonaka .d_ioctl = nvmeioctl, 2173 1.3 nonaka .d_stop = nostop, 2174 1.3 nonaka .d_tty = notty, 2175 1.3 nonaka .d_poll = nopoll, 2176 1.3 nonaka .d_mmap = nommap, 2177 1.3 nonaka .d_kqfilter = nokqfilter, 2178 1.3 nonaka .d_discard = nodiscard, 2179 1.3 nonaka .d_flag = D_OTHER, 2180 1.3 nonaka }; 2181 1.3 nonaka 2182 1.3 nonaka /* 2183 1.3 nonaka * Accept an open operation on the control device. 2184 1.3 nonaka */ 2185 1.3 nonaka int 2186 1.3 nonaka nvmeopen(dev_t dev, int flag, int mode, struct lwp *l) 2187 1.3 nonaka { 2188 1.3 nonaka struct nvme_softc *sc; 2189 1.3 nonaka int unit = minor(dev) / 0x10000; 2190 1.3 nonaka int nsid = minor(dev) & 0xffff; 2191 1.3 nonaka int nsidx; 2192 1.3 nonaka 2193 1.3 nonaka if ((sc = device_lookup_private(&nvme_cd, unit)) == NULL) 2194 1.3 nonaka return ENXIO; 2195 1.3 nonaka if ((sc->sc_flags & NVME_F_ATTACHED) == 0) 2196 1.3 nonaka return ENXIO; 2197 1.3 nonaka 2198 1.5 nonaka if (nsid == 0) { 2199 1.5 nonaka /* controller */ 2200 1.5 nonaka if (ISSET(sc->sc_flags, NVME_F_OPEN)) 2201 1.5 nonaka return EBUSY; 2202 1.5 nonaka SET(sc->sc_flags, NVME_F_OPEN); 2203 1.5 nonaka } else { 2204 1.5 nonaka /* namespace */ 2205 1.5 nonaka nsidx = nsid - 1; 2206 1.5 nonaka if (nsidx >= sc->sc_nn || sc->sc_namespaces[nsidx].dev == NULL) 2207 1.5 nonaka return ENXIO; 2208 1.5 nonaka if (ISSET(sc->sc_namespaces[nsidx].flags, NVME_NS_F_OPEN)) 2209 1.5 nonaka return EBUSY; 2210 1.5 nonaka SET(sc->sc_namespaces[nsidx].flags, NVME_NS_F_OPEN); 2211 1.5 nonaka } 2212 1.3 nonaka return 0; 2213 1.3 nonaka } 2214 1.3 nonaka 2215 1.3 nonaka /* 2216 1.3 nonaka * Accept the last close on the control device. 2217 1.3 nonaka */ 2218 1.3 nonaka int 2219 1.5 nonaka nvmeclose(dev_t dev, int flag, int mode, struct lwp *l) 2220 1.3 nonaka { 2221 1.3 nonaka struct nvme_softc *sc; 2222 1.3 nonaka int unit = minor(dev) / 0x10000; 2223 1.3 nonaka int nsid = minor(dev) & 0xffff; 2224 1.3 nonaka int nsidx; 2225 1.3 nonaka 2226 1.3 nonaka sc = device_lookup_private(&nvme_cd, unit); 2227 1.3 nonaka if (sc == NULL) 2228 1.3 nonaka return ENXIO; 2229 1.3 nonaka 2230 1.5 nonaka if (nsid == 0) { 2231 1.5 nonaka /* controller */ 2232 1.5 nonaka CLR(sc->sc_flags, NVME_F_OPEN); 2233 1.5 nonaka } else { 2234 1.5 nonaka /* namespace */ 2235 1.5 nonaka nsidx = nsid - 1; 2236 1.5 nonaka if (nsidx >= sc->sc_nn) 2237 1.5 nonaka return ENXIO; 2238 1.5 nonaka CLR(sc->sc_namespaces[nsidx].flags, NVME_NS_F_OPEN); 2239 1.5 nonaka } 2240 1.3 nonaka 2241 1.3 nonaka return 0; 2242 1.3 nonaka } 2243 1.3 nonaka 2244 1.3 nonaka /* 2245 1.3 nonaka * Handle control operations. 2246 1.3 nonaka */ 2247 1.3 nonaka int 2248 1.5 nonaka nvmeioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l) 2249 1.3 nonaka { 2250 1.3 nonaka struct nvme_softc *sc; 2251 1.3 nonaka int unit = minor(dev) / 0x10000; 2252 1.3 nonaka int nsid = minor(dev) & 0xffff; 2253 1.5 nonaka struct nvme_pt_command *pt; 2254 1.3 nonaka 2255 1.3 nonaka sc = device_lookup_private(&nvme_cd, unit); 2256 1.3 nonaka if (sc == NULL) 2257 1.3 nonaka return ENXIO; 2258 1.3 nonaka 2259 1.3 nonaka switch (cmd) { 2260 1.3 nonaka case NVME_PASSTHROUGH_CMD: 2261 1.5 nonaka pt = data; 2262 1.5 nonaka return nvme_command_passthrough(sc, data, 2263 1.61 mlelstv nsid == 0 ? pt->cmd.nsid : (uint32_t)nsid, l, nsid == 0); 2264 1.3 nonaka } 2265 1.3 nonaka 2266 1.3 nonaka return ENOTTY; 2267 1.3 nonaka } 2268