nvmevar.h revision 1.16 1 /* $NetBSD: nvmevar.h,v 1.16 2018/04/18 10:05:59 nonaka Exp $ */
2 /* $OpenBSD: nvmevar.h,v 1.8 2016/04/14 11:18:32 dlg Exp $ */
3
4 /*
5 * Copyright (c) 2014 David Gwynne <dlg (at) openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include <sys/bus.h>
21 #include <sys/cpu.h>
22 #include <sys/device.h>
23 #include <sys/mutex.h>
24 #include <sys/pool.h>
25 #include <sys/queue.h>
26
27 struct nvme_dmamem {
28 bus_dmamap_t ndm_map;
29 bus_dma_segment_t ndm_seg;
30 size_t ndm_size;
31 void *ndm_kva;
32 };
33 #define NVME_DMA_MAP(_ndm) ((_ndm)->ndm_map)
34 #define NVME_DMA_LEN(_ndm) ((_ndm)->ndm_map->dm_segs[0].ds_len)
35 #define NVME_DMA_DVA(_ndm) ((uint64_t)(_ndm)->ndm_map->dm_segs[0].ds_addr)
36 #define NVME_DMA_KVA(_ndm) ((void *)(_ndm)->ndm_kva)
37
38 struct nvme_softc;
39 struct nvme_queue;
40
41 typedef void (*nvme_nnc_done)(void *, struct buf *, uint16_t, uint32_t);
42
43 struct nvme_ccb {
44 SIMPLEQ_ENTRY(nvme_ccb) ccb_entry;
45
46 /* DMA handles */
47 bus_dmamap_t ccb_dmamap;
48
49 bus_addr_t ccb_prpl_off;
50 uint64_t ccb_prpl_dva;
51 uint64_t *ccb_prpl;
52
53 /* command context */
54 uint16_t ccb_id;
55 void *ccb_cookie;
56 #define NVME_CCB_FREE 0xbeefdeed
57 void (*ccb_done)(struct nvme_queue *,
58 struct nvme_ccb *, struct nvme_cqe *);
59
60 /* namespace context */
61 void *nnc_cookie;
62 nvme_nnc_done nnc_done;
63 uint16_t nnc_nsid;
64 uint16_t nnc_flags;
65 #define NVME_NS_CTX_F_READ __BIT(0)
66 #define NVME_NS_CTX_F_POLL __BIT(1)
67 #define NVME_NS_CTX_F_FUA __BIT(2)
68
69 struct buf *nnc_buf;
70 daddr_t nnc_blkno;
71 size_t nnc_datasize;
72 int nnc_secsize;
73 };
74
75 struct nvme_queue {
76 struct nvme_softc *q_sc;
77 kmutex_t q_sq_mtx;
78 kmutex_t q_cq_mtx;
79 struct nvme_dmamem *q_sq_dmamem;
80 struct nvme_dmamem *q_cq_dmamem;
81 bus_size_t q_sqtdbl; /* submission queue tail doorbell */
82 bus_size_t q_cqhdbl; /* completion queue head doorbell */
83 uint16_t q_id;
84 uint32_t q_entries;
85 uint32_t q_sq_tail;
86 uint32_t q_cq_head;
87 uint16_t q_cq_phase;
88
89 kmutex_t q_ccb_mtx;
90 kcondvar_t q_ccb_wait; /* wait for ccb avail/finish */
91 bool q_ccb_waiting; /* whether there are waiters */
92 uint16_t q_nccbs; /* total number of ccbs */
93 struct nvme_ccb *q_ccbs;
94 SIMPLEQ_HEAD(, nvme_ccb) q_ccb_list;
95 struct nvme_dmamem *q_ccb_prpls;
96 };
97
98 struct nvme_namespace {
99 struct nvm_identify_namespace *ident;
100 device_t dev;
101 uint32_t flags;
102 #define NVME_NS_F_OPEN __BIT(0)
103 };
104
105 struct nvme_softc {
106 device_t sc_dev;
107
108 bus_space_tag_t sc_iot;
109 bus_space_handle_t sc_ioh;
110 bus_size_t sc_ios;
111 bus_dma_tag_t sc_dmat;
112
113 int (*sc_intr_establish)(struct nvme_softc *,
114 uint16_t qid, struct nvme_queue *);
115 int (*sc_intr_disestablish)(struct nvme_softc *,
116 uint16_t qid);
117 void **sc_ih; /* interrupt handlers */
118 void **sc_softih; /* softintr handlers */
119
120 u_int sc_rdy_to; /* RDY timeout */
121 size_t sc_mps; /* memory page size */
122 size_t sc_mdts; /* max data trasfer size */
123 u_int sc_max_sgl; /* max S/G segments */
124
125 struct nvm_identify_controller
126 sc_identify;
127
128 u_int sc_nn; /* namespace count */
129 struct nvme_namespace *sc_namespaces;
130
131 bool sc_use_mq;
132 u_int sc_nq; /* # of io queue (sc_q) */
133 struct nvme_queue *sc_admin_q;
134 struct nvme_queue **sc_q;
135
136 uint32_t sc_flags;
137 #define NVME_F_ATTACHED __BIT(0)
138 #define NVME_F_OPEN __BIT(1)
139
140 uint32_t sc_quirks;
141 #define NVME_QUIRK_DELAY_B4_CHK_RDY __BIT(0)
142 };
143
144 #define lemtoh16(p) le16toh(*((uint16_t *)(p)))
145 #define lemtoh32(p) le32toh(*((uint32_t *)(p)))
146 #define lemtoh64(p) le64toh(*((uint64_t *)(p)))
147 #define htolem16(p, x) (*((uint16_t *)(p)) = htole16(x))
148 #define htolem32(p, x) (*((uint32_t *)(p)) = htole32(x))
149 #define htolem64(p, x) (*((uint64_t *)(p)) = htole64(x))
150
151 struct nvme_attach_args {
152 uint16_t naa_nsid;
153 uint32_t naa_qentries; /* total number of queue slots */
154 uint32_t naa_maxphys; /* maximum device transfer size */
155 };
156
157 int nvme_attach(struct nvme_softc *);
158 int nvme_detach(struct nvme_softc *, int flags);
159 int nvme_rescan(device_t, const char *, const int *);
160 void nvme_childdet(device_t, device_t);
161 int nvme_intr(void *);
162 void nvme_softintr_intx(void *);
163 int nvme_intr_msi(void *);
164 void nvme_softintr_msi(void *);
165
166 static inline struct nvme_queue *
167 nvme_get_q(struct nvme_softc *sc)
168 {
169 return sc->sc_q[cpu_index(curcpu()) % sc->sc_nq];
170 }
171
172 /*
173 * namespace
174 */
175 static inline struct nvme_namespace *
176 nvme_ns_get(struct nvme_softc *sc, uint16_t nsid)
177 {
178 if (nsid == 0 || nsid - 1 >= sc->sc_nn)
179 return NULL;
180 return &sc->sc_namespaces[nsid - 1];
181 }
182
183 int nvme_ns_identify(struct nvme_softc *, uint16_t);
184 void nvme_ns_free(struct nvme_softc *, uint16_t);
185 int nvme_ns_dobio(struct nvme_softc *, uint16_t, void *,
186 struct buf *, void *, size_t, int, daddr_t, int, nvme_nnc_done);
187 int nvme_ns_sync(struct nvme_softc *, uint16_t, int);
188 int nvme_admin_getcache(struct nvme_softc *, int *);
189