Home | History | Annotate | Line # | Download | only in ic
      1 /*	$NetBSD: nvmevar.h,v 1.28 2022/08/14 12:08:57 jmcneill Exp $	*/
      2 /*	$OpenBSD: nvmevar.h,v 1.8 2016/04/14 11:18:32 dlg Exp $ */
      3 
      4 /*
      5  * Copyright (c) 2014 David Gwynne <dlg (at) openbsd.org>
      6  *
      7  * Permission to use, copy, modify, and distribute this software for any
      8  * purpose with or without fee is hereby granted, provided that the above
      9  * copyright notice and this permission notice appear in all copies.
     10  *
     11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     18  */
     19 
     20 #include <sys/bus.h>
     21 #include <sys/cpu.h>
     22 #include <sys/device.h>
     23 #include <sys/mutex.h>
     24 #include <sys/pool.h>
     25 #include <sys/queue.h>
     26 #include <sys/buf.h>
     27 
     28 struct nvme_dmamem {
     29 	bus_dmamap_t		ndm_map;
     30 	bus_dma_segment_t	ndm_seg;
     31 	size_t			ndm_size;
     32 	void			*ndm_kva;
     33 };
     34 #define NVME_DMA_MAP(_ndm)	((_ndm)->ndm_map)
     35 #define NVME_DMA_LEN(_ndm)	((_ndm)->ndm_map->dm_segs[0].ds_len)
     36 #define NVME_DMA_DVA(_ndm)	((uint64_t)(_ndm)->ndm_map->dm_segs[0].ds_addr)
     37 #define NVME_DMA_KVA(_ndm)	((void *)(_ndm)->ndm_kva)
     38 
     39 struct nvme_softc;
     40 struct nvme_queue;
     41 
     42 typedef void (*nvme_nnc_done)(void *, struct buf *, uint16_t, uint32_t);
     43 
     44 struct nvme_ccb {
     45 	SIMPLEQ_ENTRY(nvme_ccb)	ccb_entry;
     46 
     47 	/* DMA handles */
     48 	bus_dmamap_t		ccb_dmamap;
     49 
     50 	bus_addr_t		ccb_prpl_off;
     51 	uint64_t		ccb_prpl_dva;
     52 	uint64_t		*ccb_prpl;
     53 
     54 	/* command context */
     55 	uint16_t		ccb_id;
     56 	void			*ccb_cookie;
     57 #define NVME_CCB_FREE	0xbeefdeed
     58 	void			(*ccb_done)(struct nvme_queue *,
     59 				    struct nvme_ccb *, struct nvme_cqe *);
     60 
     61 	/* namespace context */
     62 	void		*nnc_cookie;
     63 	nvme_nnc_done	nnc_done;
     64 	uint16_t	nnc_nsid;
     65 	uint16_t	nnc_flags;
     66 #define	NVME_NS_CTX_F_READ	__BIT(0)
     67 #define	NVME_NS_CTX_F_POLL	__BIT(1)
     68 #define	NVME_NS_CTX_F_FUA	__BIT(2)
     69 
     70 	struct buf	*nnc_buf;
     71 	daddr_t		nnc_blkno;
     72 	size_t		nnc_datasize;
     73 	int		nnc_secsize;
     74 };
     75 
     76 struct nvme_queue {
     77 	struct nvme_softc	*q_sc;
     78 	kmutex_t		q_sq_mtx;
     79 	kmutex_t		q_cq_mtx;
     80 	struct nvme_dmamem	*q_sq_dmamem;
     81 	struct nvme_dmamem	*q_cq_dmamem;
     82 	struct nvme_dmamem	*q_nvmmu_dmamem; /* for apple m1 nvme */
     83 
     84 	bus_size_t 		q_sqtdbl; /* submission queue tail doorbell */
     85 	bus_size_t 		q_cqhdbl; /* completion queue head doorbell */
     86 	uint16_t		q_id;
     87 	uint32_t		q_entries;
     88 	uint32_t		q_sq_tail;
     89 	uint32_t		q_cq_head;
     90 	uint16_t		q_cq_phase;
     91 
     92 	kmutex_t		q_ccb_mtx;
     93 	kcondvar_t		q_ccb_wait;	/* wait for ccb avail/finish */
     94 	bool			q_ccb_waiting;	/* whether there are waiters */
     95 	uint16_t		q_nccbs;	/* total number of ccbs */
     96 	struct nvme_ccb		*q_ccbs;
     97 	SIMPLEQ_HEAD(, nvme_ccb) q_ccb_list;
     98 	struct nvme_dmamem	*q_ccb_prpls;
     99 };
    100 
    101 struct nvme_namespace {
    102 	struct nvm_identify_namespace *ident;
    103 	device_t dev;
    104 	uint32_t flags;
    105 #define	NVME_NS_F_OPEN	__BIT(0)
    106 };
    107 
    108 struct nvme_ops {
    109 	void		(*op_enable)(struct nvme_softc *);
    110 
    111 	int		(*op_q_alloc)(struct nvme_softc *,
    112 			      struct nvme_queue *);
    113 	void		(*op_q_free)(struct nvme_softc *,
    114 			      struct nvme_queue *);
    115 
    116 	uint32_t	(*op_sq_enter)(struct nvme_softc *,
    117 			      struct nvme_queue *, struct nvme_ccb *);
    118 	void		(*op_sq_leave)(struct nvme_softc *,
    119 			      struct nvme_queue *, struct nvme_ccb *);
    120 	uint32_t	(*op_sq_enter_locked)(struct nvme_softc *,
    121 			      struct nvme_queue *, struct nvme_ccb *);
    122 	void		(*op_sq_leave_locked)(struct nvme_softc *,
    123 			      struct nvme_queue *, struct nvme_ccb *);
    124 
    125 	void		(*op_cq_done)(struct nvme_softc *,
    126 			      struct nvme_queue *, struct nvme_ccb *);
    127 };
    128 
    129 struct nvme_softc {
    130 	device_t		sc_dev;
    131 
    132 	const struct nvme_ops	*sc_ops;
    133 
    134 	bus_space_tag_t		sc_iot;
    135 	bus_space_handle_t	sc_ioh;
    136 	bus_size_t		sc_ios;
    137 	bus_dma_tag_t		sc_dmat;
    138 
    139 	int			(*sc_intr_establish)(struct nvme_softc *,
    140 				    uint16_t qid, struct nvme_queue *);
    141 	int			(*sc_intr_disestablish)(struct nvme_softc *,
    142 				    uint16_t qid);
    143 	void			**sc_ih;	/* interrupt handlers */
    144 	void			**sc_softih;	/* softintr handlers */
    145 
    146 	u_int			sc_rdy_to;	/* RDY timeout */
    147 	size_t			sc_mps;		/* memory page size */
    148 	size_t			sc_mdts;	/* max data trasfer size */
    149 	u_int			sc_max_sgl;	/* max S/G segments */
    150 	u_int			sc_dstrd;
    151 
    152 	struct nvm_identify_controller
    153 				sc_identify;
    154 
    155 	u_int			sc_nn;		/* namespace count */
    156 	struct nvme_namespace	*sc_namespaces;
    157 
    158 	bool			sc_use_mq;
    159 	u_int			sc_nq;		/* # of io queue (sc_q) */
    160 	struct nvme_queue	*sc_admin_q;
    161 	struct nvme_queue	**sc_q;
    162 
    163 	uint32_t		sc_flags;
    164 #define	NVME_F_ATTACHED	__BIT(0)
    165 #define	NVME_F_OPEN	__BIT(1)
    166 
    167 	uint32_t		sc_quirks;
    168 #define	NVME_QUIRK_DELAY_B4_CHK_RDY	__BIT(0)
    169 #define	NVME_QUIRK_NOMSI		__BIT(1)
    170 
    171 	char			sc_modelname[81];
    172 };
    173 
    174 #define	lemtoh16(p)	le16toh(*((uint16_t *)(p)))
    175 #define	lemtoh32(p)	le32toh(*((uint32_t *)(p)))
    176 #define	lemtoh64(p)	le64toh(*((uint64_t *)(p)))
    177 #define	htolem16(p, x)	(*((uint16_t *)(p)) = htole16(x))
    178 #define	htolem32(p, x)	(*((uint32_t *)(p)) = htole32(x))
    179 #define	htolem64(p, x)	(*((uint64_t *)(p)) = htole64(x))
    180 
    181 struct nvme_attach_args {
    182 	uint16_t	naa_nsid;
    183 	uint32_t	naa_qentries;	/* total number of queue slots */
    184 	uint32_t	naa_maxphys;	/* maximum device transfer size */
    185 	const char	*naa_typename;	/* identifier */
    186 };
    187 
    188 int	nvme_attach(struct nvme_softc *);
    189 int	nvme_detach(struct nvme_softc *, int flags);
    190 int	nvme_rescan(device_t, const char *, const int *);
    191 void	nvme_childdet(device_t, device_t);
    192 int	nvme_suspend(struct nvme_softc *);
    193 int	nvme_resume(struct nvme_softc *);
    194 int	nvme_intr(void *);
    195 void	nvme_softintr_intx(void *);
    196 int	nvme_intr_msi(void *);
    197 void	nvme_softintr_msi(void *);
    198 
    199 static __inline struct nvme_queue *
    200 nvme_get_q(struct nvme_softc *sc)
    201 {
    202 	return sc->sc_q[cpu_index(curcpu()) % sc->sc_nq];
    203 }
    204 
    205 /*
    206  * namespace
    207  */
    208 static __inline struct nvme_namespace *
    209 nvme_ns_get(struct nvme_softc *sc, uint16_t nsid)
    210 {
    211 	if (nsid == 0 || nsid - 1 >= sc->sc_nn)
    212 		return NULL;
    213 	return &sc->sc_namespaces[nsid - 1];
    214 }
    215 
    216 #define nvme_read4(_s, _r) \
    217 	bus_space_read_4((_s)->sc_iot, (_s)->sc_ioh, (_r))
    218 #define nvme_write4(_s, _r, _v) \
    219 	bus_space_write_4((_s)->sc_iot, (_s)->sc_ioh, (_r), (_v))
    220 uint64_t
    221 	nvme_read8(struct nvme_softc *, bus_size_t);
    222 void	nvme_write8(struct nvme_softc *, bus_size_t, uint64_t);
    223 
    224 #define nvme_barrier(_s, _r, _l, _f) \
    225 	bus_space_barrier((_s)->sc_iot, (_s)->sc_ioh, (_r), (_l), (_f))
    226 
    227 struct nvme_dmamem *
    228 	nvme_dmamem_alloc(struct nvme_softc *, size_t);
    229 void	nvme_dmamem_free(struct nvme_softc *, struct nvme_dmamem *);
    230 void	nvme_dmamem_sync(struct nvme_softc *, struct nvme_dmamem *, int);
    231 
    232 int	nvme_ns_identify(struct nvme_softc *, uint16_t);
    233 void	nvme_ns_free(struct nvme_softc *, uint16_t);
    234 int	nvme_ns_dobio(struct nvme_softc *, uint16_t, void *,
    235     struct buf *, void *, size_t, int, daddr_t, int, nvme_nnc_done);
    236 int	nvme_ns_sync(struct nvme_softc *, uint16_t, int);
    237 int	nvme_admin_getcache(struct nvme_softc *, int *);
    238 int	nvme_admin_setcache(struct nvme_softc *, int);
    239