nvmereg.h revision 1.3.2.1 1 /* $NetBSD: nvmereg.h,v 1.3.2.1 2016/11/04 14:49:09 pgoyette Exp $ */
2 /* $OpenBSD: nvmereg.h,v 1.10 2016/04/14 11:18:32 dlg Exp $ */
3
4 /*
5 * Copyright (c) 2014 David Gwynne <dlg (at) openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #ifndef __NVMEREG_H__
21 #define __NVMEREG_H__
22
23 #define NVME_CAP 0x0000 /* Controller Capabilities */
24 #define NVME_CAP_MPSMAX(_r) (12 + (((_r) >> 52) & 0xf)) /* shift */
25 #define NVME_CAP_MPSMIN(_r) (12 + (((_r) >> 48) & 0xf)) /* shift */
26 #define NVME_CAP_CSS(_r) (((_r) >> 37) & 0x7f)
27 #define NVME_CAP_CSS_NVM __BIT(0)
28 #define NVME_CAP_NSSRS(_r) ISSET((_r), __BIT(36))
29 #define NVME_CAP_DSTRD(_r) __BIT(2 + (((_r) >> 32) & 0xf)) /* bytes */
30 #define NVME_CAP_TO(_r) (500 * (((_r) >> 24) & 0xff)) /* ms */
31 #define NVME_CAP_AMS(_r) (((_r) >> 17) & 0x3)
32 #define NVME_CAP_AMS_WRR __BIT(0)
33 #define NVME_CAP_AMS_VENDOR __BIT(1)
34 #define NVME_CAP_CQR(_r) ISSET((_r), __BIT(16))
35 #define NVME_CAP_MQES(_r) (((_r) & 0xffff) + 1)
36 #define NVME_CAP_LO 0x0000
37 #define NVME_CAP_HI 0x0004
38 #define NVME_VS 0x0008 /* Version */
39 #define NVME_VS_MJR(_r) (((_r) >> 16) & 0xffff)
40 #define NVME_VS_MNR(_r) ((_r) & 0xffff)
41 #define NVME_VS_1_0 0x00010000
42 #define NVME_VS_1_1 0x00010100
43 #define NVME_VS_1_2 0x00010200
44 #define NVME_INTMS 0x000c /* Interrupt Mask Set */
45 #define NVME_INTMC 0x0010 /* Interrupt Mask Clear */
46 #define NVME_CC 0x0014 /* Controller Configuration */
47 #define NVME_CC_IOCQES(_v) (((_v) & 0xf) << 20)
48 #define NVME_CC_IOCQES_MASK NVME_CC_IOCQES(0xf)
49 #define NVME_CC_IOCQES_R(_v) (((_v) >> 20) & 0xf)
50 #define NVME_CC_IOSQES(_v) (((_v) & 0xf) << 16)
51 #define NVME_CC_IOSQES_MASK NVME_CC_IOSQES(0xf)
52 #define NVME_CC_IOSQES_R(_v) (((_v) >> 16) & 0xf)
53 #define NVME_CC_SHN(_v) (((_v) & 0x3) << 14)
54 #define NVME_CC_SHN_MASK NVME_CC_SHN(0x3)
55 #define NVME_CC_SHN_R(_v) (((_v) >> 15) & 0x3)
56 #define NVME_CC_SHN_NONE 0
57 #define NVME_CC_SHN_NORMAL 1
58 #define NVME_CC_SHN_ABRUPT 2
59 #define NVME_CC_AMS(_v) (((_v) & 0x7) << 11)
60 #define NVME_CC_AMS_MASK NVME_CC_AMS(0x7)
61 #define NVME_CC_AMS_R(_v) (((_v) >> 11) & 0xf)
62 #define NVME_CC_AMS_RR 0 /* round-robin */
63 #define NVME_CC_AMS_WRR_U 1 /* weighted round-robin w/ urgent */
64 #define NVME_CC_AMS_VENDOR 7 /* vendor */
65 #define NVME_CC_MPS(_v) ((((_v) - 12) & 0xf) << 7)
66 #define NVME_CC_MPS_MASK (0xf << 7)
67 #define NVME_CC_MPS_R(_v) (12 + (((_v) >> 7) & 0xf))
68 #define NVME_CC_CSS(_v) (((_v) & 0x7) << 4)
69 #define NVME_CC_CSS_MASK NVME_CC_CSS(0x7)
70 #define NVME_CC_CSS_R(_v) (((_v) >> 4) & 0x7)
71 #define NVME_CC_CSS_NVM 0
72 #define NVME_CC_EN __BIT(0)
73 #define NVME_CSTS 0x001c /* Controller Status */
74 #define NVME_CSTS_SHST_MASK (0x3 << 2)
75 #define NVME_CSTS_SHST_NONE (0x0 << 2) /* normal operation */
76 #define NVME_CSTS_SHST_WAIT (0x1 << 2) /* shutdown processing occurring */
77 #define NVME_CSTS_SHST_DONE (0x2 << 2) /* shutdown processing complete */
78 #define NVME_CSTS_CFS (1 << 1)
79 #define NVME_CSTS_RDY (1 << 0)
80 #define NVME_NSSR 0x0020 /* NVM Subsystem Reset (Optional) */
81 #define NVME_AQA 0x0024 /* Admin Queue Attributes */
82 /* Admin Completion Queue Size */
83 #define NVME_AQA_ACQS(_v) (((_v) - 1) << 16)
84 #define NVME_AQA_ACQS_R(_v) ((_v >> 16) & ((1 << 12) - 1))
85 /* Admin Submission Queue Size */
86 #define NVME_AQA_ASQS(_v) (((_v) - 1) << 0)
87 #define NVME_AQA_ASQS_R(_v) (_v & ((1 << 12) - 1))
88 #define NVME_ASQ 0x0028 /* Admin Submission Queue Base Address */
89 #define NVME_ACQ 0x0030 /* Admin Completion Queue Base Address */
90
91 #define NVME_ADMIN_Q 0
92 /* Submission Queue Tail Doorbell */
93 #define NVME_SQTDBL(_q, _s) (0x1000 + (2 * (_q) + 0) * (_s))
94 /* Completion Queue Head Doorbell */
95 #define NVME_CQHDBL(_q, _s) (0x1000 + (2 * (_q) + 1) * (_s))
96
97 struct nvme_sge {
98 uint8_t id;
99 uint8_t _reserved[15];
100 } __packed __aligned(8);
101
102 struct nvme_sge_data {
103 uint8_t id;
104 uint8_t _reserved[3];
105
106 uint32_t length;
107
108 uint64_t address;
109 } __packed __aligned(8);
110
111 struct nvme_sge_bit_bucket {
112 uint8_t id;
113 uint8_t _reserved[3];
114
115 uint32_t length;
116
117 uint64_t address;
118 } __packed __aligned(8);
119
120 struct nvme_sqe {
121 uint8_t opcode;
122 uint8_t flags;
123 uint16_t cid;
124
125 uint32_t nsid;
126
127 uint8_t _reserved[8];
128
129 uint64_t mptr;
130
131 union {
132 uint64_t prp[2];
133 struct nvme_sge sge;
134 } __packed entry;
135
136 uint32_t cdw10;
137 uint32_t cdw11;
138 uint32_t cdw12;
139 uint32_t cdw13;
140 uint32_t cdw14;
141 uint32_t cdw15;
142 } __packed __aligned(8);
143
144 struct nvme_sqe_q {
145 uint8_t opcode;
146 uint8_t flags;
147 uint16_t cid;
148
149 uint8_t _reserved1[20];
150
151 uint64_t prp1;
152
153 uint8_t _reserved2[8];
154
155 uint16_t qid;
156 uint16_t qsize;
157
158 uint8_t qflags;
159 #define NVM_SQE_SQ_QPRIO_URG (0x0 << 1)
160 #define NVM_SQE_SQ_QPRIO_HI (0x1 << 1)
161 #define NVM_SQE_SQ_QPRIO_MED (0x2 << 1)
162 #define NVM_SQE_SQ_QPRIO_LOW (0x3 << 1)
163 #define NVM_SQE_CQ_IEN (1 << 1)
164 #define NVM_SQE_Q_PC (1 << 0)
165 uint8_t _reserved3;
166 uint16_t cqid; /* XXX interrupt vector for cq */
167
168 uint8_t _reserved4[16];
169 } __packed __aligned(8);
170
171 struct nvme_sqe_io {
172 uint8_t opcode;
173 uint8_t flags;
174 uint16_t cid;
175
176 uint32_t nsid;
177
178 uint8_t _reserved[8];
179
180 uint64_t mptr;
181
182 union {
183 uint64_t prp[2];
184 struct nvme_sge sge;
185 } __packed entry;
186
187 uint64_t slba; /* Starting LBA */
188
189 uint16_t nlb; /* Number of Logical Blocks */
190 uint16_t ioflags;
191 #define NVM_SQE_IO_LR __BIT(15) /* Limited Retry */
192 #define NVM_SQE_IO_FUA __BIT(14) /* Force Unit Access (bypass cache) */
193
194 uint8_t dsm; /* Dataset Management */
195 #define NVM_SQE_IO_INCOMP __BIT(7) /* Incompressible */
196 #define NVM_SQE_IO_SEQ __BIT(6) /* Sequential request */
197 #define NVM_SQE_IO_LAT_MASK __BITS(4, 5) /* Access Latency */
198 #define NVM_SQE_IO_LAT_NONE 0 /* Latency: none */
199 #define NVM_SQE_IO_LAT_IDLE __BIT(4) /* Latency: idle */
200 #define NVM_SQE_IO_LAT_NORMAL __BIT(5) /* Latency: normal */
201 #define NVM_SQE_IO_LAT_LOW __BITS(4, 5) /* Latency: low */
202 #define NVM_SQE_IO_FREQ_MASK __BITS(0, 3) /* Access Frequency */
203 #define NVM_SQE_IO_FREQ_TYPICAL 0x1 /* Typical */
204 #define NVM_SQE_IO_FREQ_INFR_INFW 0x2 /* Infrequent read and writes */
205 #define NVM_SQE_IO_FREQ_FRR_INFW 0x3 /* Frequent read, inf. writes */
206 #define NVM_SQE_IO_FREQ_INFR_FRW 0x4 /* Inf. read, freq. writes */
207 #define NVM_SQE_IO_FREQ_FRR_FRW 0x5 /* Freq. read and writes */
208 #define NVM_SQE_IO_FREQ_ONCE 0x6 /* One time i/o operation */
209 /* Extra Access Frequency bits for read operations */
210 #define NVM_SQE_IO_FREQ_SPEC 0x7 /* Speculative read - prefech */
211 #define NVM_SQE_IO_FREQ_OVERWRITE 0x8 /* Will be overwritten soon */
212 uint8_t _reserved2[3];
213
214 uint32_t eilbrt; /* Expected Initial Logical Block
215 Reference Tag */
216
217 uint16_t elbat; /* Expected Logical Block
218 Application Tag */
219 uint16_t elbatm; /* Expected Logical Block
220 Application Tag Mask */
221 } __packed __aligned(8);
222
223 struct nvme_cqe {
224 uint32_t cdw0;
225
226 uint32_t _reserved;
227
228 uint16_t sqhd; /* SQ Head Pointer */
229 uint16_t sqid; /* SQ Identifier */
230
231 uint16_t cid; /* Command Identifier */
232 uint16_t flags;
233 #define NVME_CQE_DNR __BIT(15)
234 #define NVME_CQE_M __BIT(14)
235 #define NVME_CQE_SCT_MASK __BITS(8, 10)
236 #define NVME_CQE_SCT(_f) ((_f) & (0x07 << 8))
237 #define NVME_CQE_SCT_GENERIC (0x00 << 8)
238 #define NVME_CQE_SCT_COMMAND (0x01 << 8)
239 #define NVME_CQE_SCT_MEDIAERR (0x02 << 8)
240 #define NVME_CQE_SCT_VENDOR (0x07 << 8)
241 #define NVME_CQE_SC_MASK __BITS(1, 7)
242 #define NVME_CQE_SC(_f) ((_f) & (0x7f << 1))
243 /* generic command status codes */
244 #define NVME_CQE_SC_SUCCESS (0x00 << 1)
245 #define NVME_CQE_SC_INVALID_OPCODE (0x01 << 1)
246 #define NVME_CQE_SC_INVALID_FIELD (0x02 << 1)
247 #define NVME_CQE_SC_CID_CONFLICT (0x03 << 1)
248 #define NVME_CQE_SC_DATA_XFER_ERR (0x04 << 1)
249 #define NVME_CQE_SC_ABRT_BY_NO_PWR (0x05 << 1)
250 #define NVME_CQE_SC_INTERNAL_DEV_ERR (0x06 << 1)
251 #define NVME_CQE_SC_CMD_ABRT_REQD (0x07 << 1)
252 #define NVME_CQE_SC_CMD_ABDR_SQ_DEL (0x08 << 1)
253 #define NVME_CQE_SC_CMD_ABDR_FUSE_ERR (0x09 << 1)
254 #define NVME_CQE_SC_CMD_ABDR_FUSE_MISS (0x0a << 1)
255 #define NVME_CQE_SC_INVALID_NS (0x0b << 1)
256 #define NVME_CQE_SC_CMD_SEQ_ERR (0x0c << 1)
257 #define NVME_CQE_SC_INVALID_LAST_SGL (0x0d << 1)
258 #define NVME_CQE_SC_INVALID_NUM_SGL (0x0e << 1)
259 #define NVME_CQE_SC_DATA_SGL_LEN (0x0f << 1)
260 #define NVME_CQE_SC_MDATA_SGL_LEN (0x10 << 1)
261 #define NVME_CQE_SC_SGL_TYPE_INVALID (0x11 << 1)
262 #define NVME_CQE_SC_LBA_RANGE (0x80 << 1)
263 #define NVME_CQE_SC_CAP_EXCEEDED (0x81 << 1)
264 #define NVME_CQE_SC_NS_NOT_RDY (0x82 << 1)
265 #define NVME_CQE_SC_RSV_CONFLICT (0x83 << 1)
266 /* command specific status codes */
267 #define NVME_CQE_SC_CQE_INVALID (0x00 << 1)
268 #define NVME_CQE_SC_INVALID_QID (0x01 << 1)
269 #define NVME_CQE_SC_MAX_Q_SIZE (0x02 << 1)
270 #define NVME_CQE_SC_ABORT_LIMIT (0x03 << 1)
271 #define NVME_CQE_SC_ASYNC_EV_REQ_LIMIT (0x05 << 1)
272 #define NVME_CQE_SC_INVALID_FW_SLOT (0x06 << 1)
273 #define NVME_CQE_SC_INVALID_FW_IMAGE (0x07 << 1)
274 #define NVME_CQE_SC_INVALID_INT_VEC (0x08 << 1)
275 #define NVME_CQE_SC_INVALID_LOG_PAGE (0x09 << 1)
276 #define NVME_CQE_SC_INVALID_FORMAT (0x0a << 1)
277 #define NVME_CQE_SC_FW_REQ_CNV_RESET (0x0b << 1)
278 #define NVME_CQE_SC_FW_REQ_NVM_RESET (0x10 << 1)
279 #define NVME_CQE_SC_FW_REQ_RESET (0x11 << 1)
280 #define NVME_CQE_SC_FW_MAX_TIME_VIO (0x12 << 1)
281 #define NVME_CQE_SC_FW_PROHIBIT (0x13 << 1)
282 #define NVME_CQE_SC_OVERLAP_RANGE (0x14 << 1)
283 #define NVME_CQE_SC_CONFLICT_ATTRS (0x80 << 1)
284 #define NVME_CQE_SC_INVALID_PROT_INFO (0x81 << 1)
285 #define NVME_CQE_SC_ATT_WR_TO_RO_PAGE (0x82 << 1)
286 /* media error status codes */
287 #define NVME_CQE_SC_WRITE_FAULTS (0x80 << 1)
288 #define NVME_CQE_SC_UNRECV_READ_ERR (0x81 << 1)
289 #define NVME_CQE_SC_GUARD_CHECK_ERR (0x82 << 1)
290 #define NVME_CQE_SC_APPL_TAG_CHECK_ERR (0x83 << 1)
291 #define NVME_CQE_SC_REF_TAG_CHECK_ERR (0x84 << 1)
292 #define NVME_CQE_SC_CMP_FAIL (0x85 << 1)
293 #define NVME_CQE_SC_ACCESS_DENIED (0x86 << 1)
294 #define NVME_CQE_PHASE __BIT(0)
295 } __packed __aligned(8);
296
297 #define NVM_ADMIN_DEL_IOSQ 0x00 /* Delete I/O Submission Queue */
298 #define NVM_ADMIN_ADD_IOSQ 0x01 /* Create I/O Submission Queue */
299 #define NVM_ADMIN_GET_LOG_PG 0x02 /* Get Log Page */
300 #define NVM_ADMIN_DEL_IOCQ 0x04 /* Delete I/O Completion Queue */
301 #define NVM_ADMIN_ADD_IOCQ 0x05 /* Create I/O Completion Queue */
302 #define NVM_ADMIN_IDENTIFY 0x06 /* Identify */
303 #define NVM_ADMIN_ABORT 0x08 /* Abort */
304 #define NVM_ADMIN_SET_FEATURES 0x09 /* Set Features */
305 #define NVM_ADMIN_GET_FEATURES 0x0a /* Get Features */
306 #define NVM_ADMIN_ASYNC_EV_REQ 0x0c /* Asynchronous Event Request */
307 #define NVM_ADMIN_FW_COMMIT 0x10 /* Firmware Commit */
308 #define NVM_ADMIN_FW_DOWNLOAD 0x11 /* Firmware Image Download */
309
310 #define NVM_CMD_FLUSH 0x00 /* Flush */
311 #define NVM_CMD_WRITE 0x01 /* Write */
312 #define NVM_CMD_READ 0x02 /* Read */
313 #define NVM_CMD_WR_UNCOR 0x04 /* Write Uncorrectable */
314 #define NVM_CMD_COMPARE 0x05 /* Compare */
315 #define NVM_CMD_DSM 0x09 /* Dataset Management */
316
317 /* Power State Descriptor Data */
318 struct nvm_identify_psd {
319 uint16_t mp; /* Max Power */
320 uint8_t _reserved1;
321 uint8_t flags;
322 #define NVME_PSD_NOPS __BIT(1)
323 #define NVME_PSD_MPS __BIT(0)
324
325 uint32_t enlat; /* Entry Latency */
326
327 uint32_t exlat; /* Exit Latency */
328
329 uint8_t rrt; /* Relative Read Throughput */
330 #define NVME_PSD_RRT_MASK __BITS(0, 4)
331 uint8_t rrl; /* Relative Read Latency */
332 #define NVME_PSD_RRL_MASK __BITS(0, 4)
333 uint8_t rwt; /* Relative Write Throughput */
334 #define NVME_PSD_RWT_MASK __BITS(0, 4)
335 uint8_t rwl; /* Relative Write Latency */
336 #define NVME_PSD_RWL_MASK __BITS(0, 4)
337
338 uint16_t idlp; /* Idle Power */
339 uint8_t ips; /* Idle Power Scale */
340 #define NVME_PSD_IPS_MASK __BITS(0, 1)
341 uint8_t _reserved2;
342 uint16_t actp; /* Active Power */
343 uint16_t ap; /* Active Power Workload/Scale */
344 #define NVME_PSD_APW_MASK __BITS(0, 2)
345 #define NVME_PSD_APS_MASK __BITS(6, 7)
346
347 uint8_t _reserved[8];
348 } __packed __aligned(8);
349
350 struct nvm_identify_controller {
351 /* Controller Capabilities and Features */
352
353 uint16_t vid; /* PCI Vendor ID */
354 uint16_t ssvid; /* PCI Subsystem Vendor ID */
355
356 uint8_t sn[20]; /* Serial Number */
357 uint8_t mn[40]; /* Model Number */
358 uint8_t fr[8]; /* Firmware Revision */
359
360 uint8_t rab; /* Recommended Arbitration Burst */
361 uint8_t ieee[3]; /* IEEE OUI Identifier */
362
363 uint8_t cmic; /* Controller Multi-Path I/O and
364 Namespace Sharing Capabilities */
365 uint8_t mdts; /* Maximum Data Transfer Size */
366 uint16_t cntlid; /* Controller ID */
367
368 uint8_t _reserved1[176];
369
370 /* Admin Command Set Attributes & Optional Controller Capabilities */
371
372 uint16_t oacs; /* Optional Admin Command Support */
373 #define NVME_ID_CTRLR_OACS_NS __BIT(3)
374 #define NVME_ID_CTRLR_OACS_FW __BIT(2)
375 #define NVME_ID_CTRLR_OACS_FORMAT __BIT(1)
376 #define NVME_ID_CTRLR_OACS_SECURITY __BIT(0)
377 uint8_t acl; /* Abort Command Limit */
378 uint8_t aerl; /* Asynchronous Event Request Limit */
379
380 uint8_t frmw; /* Firmware Updates */
381 #define NVME_ID_CTRLR_FRMW_NOREQ_RESET __BIT(4)
382 #define NVME_ID_CTRLR_FRMW_NSLOT __BITS(1, 3)
383 #define NVME_ID_CTRLR_FRMW_SLOT1_RO __BIT(0)
384 uint8_t lpa; /* Log Page Attributes */
385 #define NVME_ID_CTRLR_LPA_CMD_EFFECT __BIT(1)
386 #define NVME_ID_CTRLR_LPA_NS_SMART __BIT(0)
387 uint8_t elpe; /* Error Log Page Entries */
388 uint8_t npss; /* Number of Power States Support */
389
390 uint8_t avscc; /* Admin Vendor Specific Command
391 Configuration */
392 uint8_t apsta; /* Autonomous Power State Transition
393 Attributes */
394
395 uint8_t _reserved2[246];
396
397 /* NVM Command Set Attributes */
398
399 uint8_t sqes; /* Submission Queue Entry Size */
400 #define NVME_ID_CTRLR_SQES_MAX __BITS(4, 7)
401 #define NVME_ID_CTRLR_SQES_MIN __BITS(0, 3)
402 uint8_t cqes; /* Completion Queue Entry Size */
403 #define NVME_ID_CTRLR_CQES_MAX __BITS(4, 7)
404 #define NVME_ID_CTRLR_CQES_MIN __BITS(0, 3)
405 uint8_t _reserved3[2];
406
407 uint32_t nn; /* Number of Namespaces */
408
409 uint16_t oncs; /* Optional NVM Command Support */
410 #define NVME_ID_CTRLR_ONCS_RESERVATION __BIT(5)
411 #define NVME_ID_CTRLR_ONCS_SET_FEATURES __BIT(4)
412 #define NVME_ID_CTRLR_ONCS_WRITE_ZERO __BIT(3)
413 #define NVME_ID_CTRLR_ONCS_DSM __BIT(2)
414 #define NVME_ID_CTRLR_ONCS_WRITE_UNC __BIT(1)
415 #define NVME_ID_CTRLR_ONCS_COMPARE __BIT(0)
416 uint16_t fuses; /* Fused Operation Support */
417
418 uint8_t fna; /* Format NVM Attributes */
419 uint8_t vwc; /* Volatile Write Cache */
420 #define NVME_ID_CTRLR_VWC_PRESENT __BIT(0)
421 uint16_t awun; /* Atomic Write Unit Normal */
422
423 uint16_t awupf; /* Atomic Write Unit Power Fail */
424 uint8_t nvscc; /* NVM Vendor Specific Command */
425 uint8_t _reserved4[1];
426
427 uint16_t acwu; /* Atomic Compare & Write Unit */
428 uint8_t _reserved5[2];
429
430 uint32_t sgls; /* SGL Support */
431
432 uint8_t _reserved6[164];
433
434 /* I/O Command Set Attributes */
435
436 uint8_t _reserved7[1344];
437
438 /* Power State Descriptors */
439
440 struct nvm_identify_psd psd[32]; /* Power State Descriptors */
441
442 /* Vendor Specific */
443
444 uint8_t _reserved8[1024];
445 } __packed __aligned(8);
446
447 struct nvm_namespace_format {
448 uint16_t ms; /* Metadata Size */
449 uint8_t lbads; /* LBA Data Size */
450 uint8_t rp; /* Relative Performance */
451 } __packed __aligned(4);
452
453 struct nvm_identify_namespace {
454 uint64_t nsze; /* Namespace Size */
455
456 uint64_t ncap; /* Namespace Capacity */
457
458 uint64_t nuse; /* Namespace Utilization */
459
460 uint8_t nsfeat; /* Namespace Features */
461 #define NVME_ID_NS_NSFEAT_LOGICAL_BLK_ERR __BIT(2)
462 #define NVME_ID_NS_NSFEAT_NS __BIT(1)
463 #define NVME_ID_NS_NSFEAT_THIN_PROV __BIT(0)
464 uint8_t nlbaf; /* Number of LBA Formats */
465 uint8_t flbas; /* Formatted LBA Size */
466 #define NVME_ID_NS_FLBAS(_f) ((_f) & 0x0f)
467 #define NVME_ID_NS_FLBAS_MD 0x10
468 uint8_t mc; /* Metadata Capabilities */
469 uint8_t dpc; /* End-to-end Data Protection
470 Capabilities */
471 uint8_t dps; /* End-to-end Data Protection Type Settings */
472
473 uint8_t _reserved1[98];
474
475 struct nvm_namespace_format
476 lbaf[16]; /* LBA Format Support */
477
478 uint8_t _reserved2[192];
479
480 uint8_t vs[3712];
481 } __packed __aligned(8);
482
483 #endif /* __NVMEREG_H__ */
484