nvmevar.h revision 1.25 1 /* $NetBSD: nvmevar.h,v 1.25 2022/08/01 07:34:28 mlelstv Exp $ */
2 /* $OpenBSD: nvmevar.h,v 1.8 2016/04/14 11:18:32 dlg Exp $ */
3
4 /*
5 * Copyright (c) 2014 David Gwynne <dlg (at) openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include <sys/bus.h>
21 #include <sys/cpu.h>
22 #include <sys/device.h>
23 #include <sys/mutex.h>
24 #include <sys/pool.h>
25 #include <sys/queue.h>
26
27 struct nvme_dmamem {
28 bus_dmamap_t ndm_map;
29 bus_dma_segment_t ndm_seg;
30 size_t ndm_size;
31 void *ndm_kva;
32 };
33 #define NVME_DMA_MAP(_ndm) ((_ndm)->ndm_map)
34 #define NVME_DMA_LEN(_ndm) ((_ndm)->ndm_map->dm_segs[0].ds_len)
35 #define NVME_DMA_DVA(_ndm) ((uint64_t)(_ndm)->ndm_map->dm_segs[0].ds_addr)
36 #define NVME_DMA_KVA(_ndm) ((void *)(_ndm)->ndm_kva)
37
38 struct nvme_softc;
39 struct nvme_queue;
40
41 typedef void (*nvme_nnc_done)(void *, struct buf *, uint16_t, uint32_t);
42
43 struct nvme_ccb {
44 SIMPLEQ_ENTRY(nvme_ccb) ccb_entry;
45
46 /* DMA handles */
47 bus_dmamap_t ccb_dmamap;
48
49 bus_addr_t ccb_prpl_off;
50 uint64_t ccb_prpl_dva;
51 uint64_t *ccb_prpl;
52
53 /* command context */
54 uint16_t ccb_id;
55 void *ccb_cookie;
56 #define NVME_CCB_FREE 0xbeefdeed
57 void (*ccb_done)(struct nvme_queue *,
58 struct nvme_ccb *, struct nvme_cqe *);
59
60 /* namespace context */
61 void *nnc_cookie;
62 nvme_nnc_done nnc_done;
63 uint16_t nnc_nsid;
64 uint16_t nnc_flags;
65 #define NVME_NS_CTX_F_READ __BIT(0)
66 #define NVME_NS_CTX_F_POLL __BIT(1)
67 #define NVME_NS_CTX_F_FUA __BIT(2)
68
69 struct buf *nnc_buf;
70 daddr_t nnc_blkno;
71 size_t nnc_datasize;
72 int nnc_secsize;
73 };
74
75 struct nvme_queue {
76 struct nvme_softc *q_sc;
77 kmutex_t q_sq_mtx;
78 kmutex_t q_cq_mtx;
79 struct nvme_dmamem *q_sq_dmamem;
80 struct nvme_dmamem *q_cq_dmamem;
81 struct nvme_dmamem *q_nvmmu_dmamem; /* for apple m1 nvme */
82
83 bus_size_t q_sqtdbl; /* submission queue tail doorbell */
84 bus_size_t q_cqhdbl; /* completion queue head doorbell */
85 uint16_t q_id;
86 uint32_t q_entries;
87 uint32_t q_sq_tail;
88 uint32_t q_cq_head;
89 uint16_t q_cq_phase;
90
91 kmutex_t q_ccb_mtx;
92 kcondvar_t q_ccb_wait; /* wait for ccb avail/finish */
93 bool q_ccb_waiting; /* whether there are waiters */
94 uint16_t q_nccbs; /* total number of ccbs */
95 struct nvme_ccb *q_ccbs;
96 SIMPLEQ_HEAD(, nvme_ccb) q_ccb_list;
97 struct nvme_dmamem *q_ccb_prpls;
98 };
99
100 struct nvme_namespace {
101 struct nvm_identify_namespace *ident;
102 device_t dev;
103 uint32_t flags;
104 #define NVME_NS_F_OPEN __BIT(0)
105 };
106
107 struct nvme_ops {
108 void (*op_enable)(struct nvme_softc *);
109
110 int (*op_q_alloc)(struct nvme_softc *,
111 struct nvme_queue *);
112 void (*op_q_free)(struct nvme_softc *,
113 struct nvme_queue *);
114
115 uint32_t (*op_sq_enter)(struct nvme_softc *,
116 struct nvme_queue *, struct nvme_ccb *);
117 void (*op_sq_leave)(struct nvme_softc *,
118 struct nvme_queue *, struct nvme_ccb *);
119 uint32_t (*op_sq_enter_locked)(struct nvme_softc *,
120 struct nvme_queue *, struct nvme_ccb *);
121 void (*op_sq_leave_locked)(struct nvme_softc *,
122 struct nvme_queue *, struct nvme_ccb *);
123
124 void (*op_cq_done)(struct nvme_softc *,
125 struct nvme_queue *, struct nvme_ccb *);
126 };
127
128 struct nvme_softc {
129 device_t sc_dev;
130
131 const struct nvme_ops *sc_ops;
132
133 bus_space_tag_t sc_iot;
134 bus_space_handle_t sc_ioh;
135 bus_size_t sc_ios;
136 bus_dma_tag_t sc_dmat;
137
138 int (*sc_intr_establish)(struct nvme_softc *,
139 uint16_t qid, struct nvme_queue *);
140 int (*sc_intr_disestablish)(struct nvme_softc *,
141 uint16_t qid);
142 void **sc_ih; /* interrupt handlers */
143 void **sc_softih; /* softintr handlers */
144
145 u_int sc_rdy_to; /* RDY timeout */
146 size_t sc_mps; /* memory page size */
147 size_t sc_mdts; /* max data trasfer size */
148 u_int sc_max_sgl; /* max S/G segments */
149 u_int sc_dstrd;
150
151 struct nvm_identify_controller
152 sc_identify;
153
154 u_int sc_nn; /* namespace count */
155 struct nvme_namespace *sc_namespaces;
156
157 bool sc_use_mq;
158 u_int sc_nq; /* # of io queue (sc_q) */
159 struct nvme_queue *sc_admin_q;
160 struct nvme_queue **sc_q;
161
162 uint32_t sc_flags;
163 #define NVME_F_ATTACHED __BIT(0)
164 #define NVME_F_OPEN __BIT(1)
165
166 uint32_t sc_quirks;
167 #define NVME_QUIRK_DELAY_B4_CHK_RDY __BIT(0)
168 #define NVME_QUIRK_NOMSI __BIT(1)
169
170 char sc_modelname[81];
171 };
172
173 #define lemtoh16(p) le16toh(*((uint16_t *)(p)))
174 #define lemtoh32(p) le32toh(*((uint32_t *)(p)))
175 #define lemtoh64(p) le64toh(*((uint64_t *)(p)))
176 #define htolem16(p, x) (*((uint16_t *)(p)) = htole16(x))
177 #define htolem32(p, x) (*((uint32_t *)(p)) = htole32(x))
178 #define htolem64(p, x) (*((uint64_t *)(p)) = htole64(x))
179
180 struct nvme_attach_args {
181 uint16_t naa_nsid;
182 uint32_t naa_qentries; /* total number of queue slots */
183 uint32_t naa_maxphys; /* maximum device transfer size */
184 const char *naa_typename; /* identifier */
185 };
186
187 int nvme_attach(struct nvme_softc *);
188 int nvme_detach(struct nvme_softc *, int flags);
189 int nvme_rescan(device_t, const char *, const int *);
190 void nvme_childdet(device_t, device_t);
191 int nvme_suspend(struct nvme_softc *);
192 int nvme_resume(struct nvme_softc *);
193 int nvme_intr(void *);
194 void nvme_softintr_intx(void *);
195 int nvme_intr_msi(void *);
196 void nvme_softintr_msi(void *);
197
198 static __inline struct nvme_queue *
199 nvme_get_q(struct nvme_softc *sc, struct buf *bp, bool waitok)
200 {
201 struct nvme_queue *q;
202 u_int cpunum;
203
204 cpunum = cpu_index(curcpu());
205
206 /* try own queue */
207 q = sc->sc_q[cpunum % sc->sc_nq];
208 if (waitok)
209 return q;
210
211 /* if busy, search for an idle queue */
212 if (SIMPLEQ_EMPTY(&q->q_ccb_list)) {
213 for (u_int qoff = 1; qoff < sc->sc_nq; qoff++) {
214 struct nvme_queue *t;
215 t = sc->sc_q[(cpunum + qoff) % sc->sc_nq];
216 if (t->q_sq_tail == t->q_cq_head) {
217 q = t;
218 break;
219 }
220 }
221 }
222
223 return q;
224 }
225
226 /*
227 * namespace
228 */
229 static __inline struct nvme_namespace *
230 nvme_ns_get(struct nvme_softc *sc, uint16_t nsid)
231 {
232 if (nsid == 0 || nsid - 1 >= sc->sc_nn)
233 return NULL;
234 return &sc->sc_namespaces[nsid - 1];
235 }
236
237 #define nvme_read4(_s, _r) \
238 bus_space_read_4((_s)->sc_iot, (_s)->sc_ioh, (_r))
239 #define nvme_write4(_s, _r, _v) \
240 bus_space_write_4((_s)->sc_iot, (_s)->sc_ioh, (_r), (_v))
241 uint64_t
242 nvme_read8(struct nvme_softc *, bus_size_t);
243 void nvme_write8(struct nvme_softc *, bus_size_t, uint64_t);
244
245 #define nvme_barrier(_s, _r, _l, _f) \
246 bus_space_barrier((_s)->sc_iot, (_s)->sc_ioh, (_r), (_l), (_f))
247
248 struct nvme_dmamem *
249 nvme_dmamem_alloc(struct nvme_softc *, size_t);
250 void nvme_dmamem_free(struct nvme_softc *, struct nvme_dmamem *);
251 void nvme_dmamem_sync(struct nvme_softc *, struct nvme_dmamem *, int);
252
253 int nvme_ns_identify(struct nvme_softc *, uint16_t);
254 void nvme_ns_free(struct nvme_softc *, uint16_t);
255 int nvme_ns_dobio(struct nvme_softc *, uint16_t, void *,
256 struct buf *, void *, size_t, int, daddr_t, int, nvme_nnc_done);
257 int nvme_ns_sync(struct nvme_softc *, uint16_t, int);
258 int nvme_admin_getcache(struct nvme_softc *, int *);
259 int nvme_admin_setcache(struct nvme_softc *, int);
260