Home | History | Annotate | Line # | Download | only in pci
pvscsi.c revision 1.4
      1  1.1     skrll /*-
      2  1.1     skrll  * Copyright (c) 2018 VMware, Inc.
      3  1.1     skrll  *
      4  1.1     skrll  * SPDX-License-Identifier: (BSD-2-Clause OR GPL-2.0)
      5  1.1     skrll  */
      6  1.1     skrll 
      7  1.1     skrll /*
      8  1.1     skrll 
      9  1.1     skrll These files are provided under a dual BSD-2 Clause/GPLv2 license. When
     10  1.1     skrll using or redistributing this file, you may do so under either license.
     11  1.1     skrll 
     12  1.1     skrll BSD-2 Clause License
     13  1.1     skrll 
     14  1.1     skrll Copyright (c) 2018 VMware, Inc.
     15  1.1     skrll 
     16  1.1     skrll Redistribution and use in source and binary forms, with or without
     17  1.1     skrll modification, are permitted provided that the following conditions
     18  1.1     skrll are met:
     19  1.1     skrll 
     20  1.1     skrll   * Redistributions of source code must retain the above copyright
     21  1.1     skrll     notice, this list of conditions and the following disclaimer.
     22  1.1     skrll 
     23  1.1     skrll   * Redistributions in binary form must reproduce the above copyright
     24  1.1     skrll     notice, this list of conditions and the following disclaimer in
     25  1.1     skrll     the documentation and/or other materials provided with the
     26  1.1     skrll     distribution.
     27  1.1     skrll 
     28  1.1     skrll THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     29  1.1     skrll "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     30  1.1     skrll LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     31  1.1     skrll A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     32  1.1     skrll OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     33  1.1     skrll SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     34  1.1     skrll LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     35  1.1     skrll DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     36  1.1     skrll THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     37  1.1     skrll (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     38  1.1     skrll OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     39  1.1     skrll 
     40  1.1     skrll GPL License Summary
     41  1.1     skrll 
     42  1.1     skrll Copyright (c) 2018 VMware, Inc.
     43  1.1     skrll 
     44  1.1     skrll This program is free software; you can redistribute it and/or modify
     45  1.1     skrll it under the terms of version 2 of the GNU General Public License as
     46  1.1     skrll published by the Free Software Foundation.
     47  1.1     skrll 
     48  1.1     skrll This program is distributed in the hope that it will be useful, but
     49  1.1     skrll WITHOUT ANY WARRANTY; without even the implied warranty of
     50  1.1     skrll MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     51  1.1     skrll General Public License for more details.
     52  1.1     skrll 
     53  1.1     skrll You should have received a copy of the GNU General Public License
     54  1.1     skrll along with this program; if not, write to the Free Software
     55  1.1     skrll Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
     56  1.1     skrll The full GNU General Public License is included in this distribution
     57  1.1     skrll in the file called LICENSE.GPL.
     58  1.1     skrll 
     59  1.1     skrll */
     60  1.1     skrll 
     61  1.1     skrll #include <sys/cdefs.h>
     62  1.4  riastrad __KERNEL_RCSID(0, "$NetBSD: pvscsi.c,v 1.4 2025/09/06 02:56:40 riastradh Exp $");
     63  1.1     skrll 
     64  1.1     skrll #include <sys/param.h>
     65  1.1     skrll 
     66  1.1     skrll #include <sys/buf.h>
     67  1.1     skrll #include <sys/bus.h>
     68  1.1     skrll #include <sys/cpu.h>
     69  1.1     skrll #include <sys/device.h>
     70  1.1     skrll #include <sys/kernel.h>
     71  1.1     skrll #include <sys/kmem.h>
     72  1.3  riastrad #include <sys/paravirt_membar.h>
     73  1.1     skrll #include <sys/queue.h>
     74  1.1     skrll #include <sys/sysctl.h>
     75  1.1     skrll #include <sys/systm.h>
     76  1.1     skrll 
     77  1.1     skrll #include <dev/pci/pcireg.h>
     78  1.1     skrll #include <dev/pci/pcivar.h>
     79  1.1     skrll #include <dev/pci/pcidevs.h>
     80  1.1     skrll 
     81  1.1     skrll #include <dev/scsipi/scsi_all.h>
     82  1.1     skrll #include <dev/scsipi/scsi_message.h>
     83  1.1     skrll #include <dev/scsipi/scsiconf.h>
     84  1.1     skrll #include <dev/scsipi/scsipi_disk.h>
     85  1.1     skrll #include <dev/scsipi/scsi_disk.h>
     86  1.1     skrll 
     87  1.1     skrll #include "pvscsi.h"
     88  1.1     skrll 
     89  1.1     skrll #define	PVSCSI_DEFAULT_NUM_PAGES_REQ_RING	8
     90  1.1     skrll #define	PVSCSI_SENSE_LENGTH			256
     91  1.1     skrll 
     92  1.1     skrll #define PVSCSI_MAXPHYS				MAXPHYS
     93  1.1     skrll #define PVSCSI_MAXPHYS_SEGS			((PVSCSI_MAXPHYS / PAGE_SIZE) + 1)
     94  1.1     skrll 
     95  1.1     skrll #define PVSCSI_CMD_PER_LUN 64
     96  1.1     skrll #define PVSCSI_MAX_LUN 8
     97  1.1     skrll #define PVSCSI_MAX_TARGET 16
     98  1.1     skrll 
     99  1.1     skrll //#define PVSCSI_DEBUG_LOGGING
    100  1.1     skrll 
    101  1.1     skrll #ifdef PVSCSI_DEBUG_LOGGING
    102  1.1     skrll #define	DEBUG_PRINTF(level, dev, fmt, ...)				\
    103  1.1     skrll 	do {								\
    104  1.1     skrll 		if (pvscsi_log_level >= (level)) {			\
    105  1.1     skrll 			aprint_normal_dev((dev), (fmt), ##__VA_ARGS__);	\
    106  1.1     skrll 		}							\
    107  1.1     skrll 	} while(0)
    108  1.1     skrll #else
    109  1.1     skrll #define DEBUG_PRINTF(level, dev, fmt, ...)
    110  1.1     skrll #endif /* PVSCSI_DEBUG_LOGGING */
    111  1.1     skrll 
    112  1.1     skrll struct pvscsi_softc;
    113  1.1     skrll struct pvscsi_hcb;
    114  1.1     skrll struct pvscsi_dma;
    115  1.1     skrll 
    116  1.1     skrll #define VMWARE_PVSCSI_DEVSTR	"VMware Paravirtual SCSI Controller"
    117  1.1     skrll 
    118  1.1     skrll static inline uint32_t pvscsi_reg_read(struct pvscsi_softc *sc,
    119  1.1     skrll     uint32_t offset);
    120  1.1     skrll static inline void pvscsi_reg_write(struct pvscsi_softc *sc, uint32_t offset,
    121  1.1     skrll     uint32_t val);
    122  1.1     skrll static inline uint32_t pvscsi_read_intr_status(struct pvscsi_softc *sc);
    123  1.1     skrll static inline void pvscsi_write_intr_status(struct pvscsi_softc *sc,
    124  1.1     skrll     uint32_t val);
    125  1.1     skrll static inline void pvscsi_intr_enable(struct pvscsi_softc *sc);
    126  1.1     skrll static inline void pvscsi_intr_disable(struct pvscsi_softc *sc);
    127  1.1     skrll static void pvscsi_kick_io(struct pvscsi_softc *sc, uint8_t cdb0);
    128  1.1     skrll static void pvscsi_write_cmd(struct pvscsi_softc *sc, uint32_t cmd, void *data,
    129  1.1     skrll     uint32_t len);
    130  1.1     skrll static uint32_t pvscsi_get_max_targets(struct pvscsi_softc *sc);
    131  1.1     skrll static int pvscsi_setup_req_call(struct pvscsi_softc *sc, uint32_t enable);
    132  1.1     skrll static void pvscsi_setup_rings(struct pvscsi_softc *sc);
    133  1.1     skrll static void pvscsi_setup_msg_ring(struct pvscsi_softc *sc);
    134  1.1     skrll static int pvscsi_hw_supports_msg(struct pvscsi_softc *sc);
    135  1.1     skrll 
    136  1.1     skrll static void pvscsi_timeout(void *arg);
    137  1.1     skrll static void pvscsi_adapter_reset(struct pvscsi_softc *sc);
    138  1.1     skrll static void pvscsi_bus_reset(struct pvscsi_softc *sc);
    139  1.1     skrll static void pvscsi_device_reset(struct pvscsi_softc *sc, uint32_t target);
    140  1.1     skrll static void pvscsi_abort(struct pvscsi_softc *sc, uint32_t target,
    141  1.1     skrll     struct pvscsi_hcb *hcb);
    142  1.1     skrll 
    143  1.1     skrll static void pvscsi_process_completion(struct pvscsi_softc *sc,
    144  1.1     skrll     struct pvscsi_ring_cmp_desc *e);
    145  1.1     skrll static void pvscsi_process_cmp_ring(struct pvscsi_softc *sc);
    146  1.1     skrll static void pvscsi_process_msg(struct pvscsi_softc *sc,
    147  1.1     skrll     struct pvscsi_ring_msg_desc *e);
    148  1.1     skrll static void pvscsi_process_msg_ring(struct pvscsi_softc *sc);
    149  1.1     skrll 
    150  1.1     skrll static void pvscsi_intr_locked(struct pvscsi_softc *sc);
    151  1.1     skrll static int pvscsi_intr(void *xsc);
    152  1.1     skrll 
    153  1.1     skrll static void pvscsi_scsipi_request(struct scsipi_channel *,
    154  1.1     skrll     scsipi_adapter_req_t, void *);
    155  1.1     skrll 
    156  1.1     skrll static inline uint64_t pvscsi_hcb_to_context(struct pvscsi_softc *sc,
    157  1.1     skrll     struct pvscsi_hcb *hcb);
    158  1.1     skrll static inline struct pvscsi_hcb *pvscsi_context_to_hcb(struct pvscsi_softc *sc,
    159  1.1     skrll     uint64_t context);
    160  1.1     skrll static struct pvscsi_hcb * pvscsi_hcb_get(struct pvscsi_softc *sc);
    161  1.1     skrll static void pvscsi_hcb_put(struct pvscsi_softc *sc, struct pvscsi_hcb *hcb);
    162  1.1     skrll 
    163  1.1     skrll static void pvscsi_dma_free(struct pvscsi_softc *sc, struct pvscsi_dma *dma);
    164  1.1     skrll static int pvscsi_dma_alloc(struct pvscsi_softc *sc, struct pvscsi_dma *dma,
    165  1.1     skrll     bus_size_t size, bus_size_t alignment);
    166  1.1     skrll static int pvscsi_dma_alloc_ppns(struct pvscsi_softc *sc,
    167  1.1     skrll     struct pvscsi_dma *dma, uint64_t *ppn_list, uint32_t num_pages);
    168  1.1     skrll static void pvscsi_dma_free_per_hcb(struct pvscsi_softc *sc,
    169  1.1     skrll     uint32_t hcbs_allocated);
    170  1.1     skrll static int pvscsi_dma_alloc_per_hcb(struct pvscsi_softc *sc);
    171  1.1     skrll static void pvscsi_free_rings(struct pvscsi_softc *sc);
    172  1.1     skrll static int pvscsi_allocate_rings(struct pvscsi_softc *sc);
    173  1.1     skrll static void pvscsi_free_interrupts(struct pvscsi_softc *sc);
    174  1.1     skrll static int pvscsi_setup_interrupts(struct pvscsi_softc *sc, const struct pci_attach_args *);
    175  1.1     skrll static void pvscsi_free_all(struct pvscsi_softc *sc);
    176  1.1     skrll 
    177  1.1     skrll static void pvscsi_attach(device_t, device_t, void *);
    178  1.1     skrll static int pvscsi_detach(device_t, int);
    179  1.1     skrll static int pvscsi_probe(device_t, cfdata_t, void *);
    180  1.1     skrll 
    181  1.1     skrll #define pvscsi_get_tunable(_sc, _name, _value)	(_value)
    182  1.1     skrll 
    183  1.1     skrll #ifdef PVSCSI_DEBUG_LOGGING
    184  1.1     skrll static int pvscsi_log_level = 1;
    185  1.1     skrll #endif
    186  1.1     skrll 
    187  1.1     skrll #define TUNABLE_INT(__x, __d)					\
    188  1.1     skrll 	err = sysctl_createv(clog, 0, &rnode, &cnode,		\
    189  1.1     skrll 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,	\
    190  1.1     skrll 	    #__x, SYSCTL_DESCR(__d),				\
    191  1.1     skrll 	    NULL, 0, &(pvscsi_ ## __x), sizeof(pvscsi_ ## __x), \
    192  1.1     skrll 	    CTL_CREATE,	CTL_EOL);				\
    193  1.1     skrll 	if (err)						\
    194  1.1     skrll 		goto fail;
    195  1.1     skrll 
    196  1.1     skrll static int pvscsi_request_ring_pages = 0;
    197  1.1     skrll static int pvscsi_use_msg = 1;
    198  1.1     skrll static int pvscsi_use_msi = 1;
    199  1.1     skrll static int pvscsi_use_msix = 1;
    200  1.1     skrll static int pvscsi_use_req_call_threshold = 0;
    201  1.1     skrll static int pvscsi_max_queue_depth = 0;
    202  1.1     skrll 
    203  1.1     skrll SYSCTL_SETUP(sysctl_hw_pvscsi_setup, "sysctl hw.pvscsi setup")
    204  1.1     skrll {
    205  1.1     skrll 	int err;
    206  1.1     skrll 	const struct sysctlnode *rnode;
    207  1.1     skrll 	const struct sysctlnode *cnode;
    208  1.1     skrll 
    209  1.1     skrll 	err = sysctl_createv(clog, 0, NULL, &rnode,
    210  1.1     skrll 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "pvscsi",
    211  1.1     skrll 	    SYSCTL_DESCR("pvscsi global controls"),
    212  1.1     skrll 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
    213  1.1     skrll 
    214  1.1     skrll 	if (err)
    215  1.1     skrll 		goto fail;
    216  1.1     skrll 
    217  1.1     skrll #ifdef PVSCSI_DEBUG_LOGGING
    218  1.1     skrll 	TUNABLE_INT(log_level, "Enable debugging output");
    219  1.1     skrll #endif
    220  1.1     skrll 
    221  1.1     skrll 	TUNABLE_INT(request_ring_pages, "No. of pages for the request ring");
    222  1.1     skrll 	TUNABLE_INT(use_msg, "Use message passing");
    223  1.1     skrll 	TUNABLE_INT(use_msi, "Use MSI interrupt");
    224  1.1     skrll 	TUNABLE_INT(use_msix, "Use MSXI interrupt");
    225  1.1     skrll 	TUNABLE_INT(use_req_call_threshold, "Use request limit");
    226  1.1     skrll 	TUNABLE_INT(max_queue_depth, "Maximum size of request queue");
    227  1.1     skrll 
    228  1.1     skrll 	return;
    229  1.1     skrll fail:
    230  1.1     skrll 	aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err);
    231  1.1     skrll }
    232  1.1     skrll 
    233  1.1     skrll struct pvscsi_sg_list {
    234  1.1     skrll 	struct pvscsi_sg_element sge[PVSCSI_MAX_SG_ENTRIES_PER_SEGMENT];
    235  1.1     skrll };
    236  1.1     skrll 
    237  1.1     skrll #define	PVSCSI_ABORT_TIMEOUT	2
    238  1.1     skrll #define	PVSCSI_RESET_TIMEOUT	10
    239  1.1     skrll 
    240  1.1     skrll #define	PVSCSI_HCB_NONE		0
    241  1.1     skrll #define	PVSCSI_HCB_ABORT	1
    242  1.1     skrll #define	PVSCSI_HCB_DEVICE_RESET	2
    243  1.1     skrll #define	PVSCSI_HCB_BUS_RESET	3
    244  1.1     skrll 
    245  1.1     skrll struct pvscsi_hcb {
    246  1.1     skrll 	struct scsipi_xfer 		*xs;
    247  1.1     skrll 	struct pvscsi_softc		*sc;
    248  1.1     skrll 
    249  1.1     skrll 	struct pvscsi_ring_req_desc	*e;
    250  1.1     skrll 	int				 recovery;
    251  1.1     skrll 	SLIST_ENTRY(pvscsi_hcb)		 links;
    252  1.1     skrll 
    253  1.1     skrll 	bus_dmamap_t			 dma_map;
    254  1.1     skrll 	bus_addr_t			 dma_map_offset;
    255  1.1     skrll 	bus_size_t			 dma_map_size;
    256  1.1     skrll 	void				*sense_buffer;
    257  1.1     skrll 	bus_addr_t			 sense_buffer_paddr;
    258  1.1     skrll 	struct pvscsi_sg_list		*sg_list;
    259  1.1     skrll 	bus_addr_t			 sg_list_paddr;
    260  1.1     skrll 	bus_addr_t			 sg_list_offset;
    261  1.1     skrll };
    262  1.1     skrll 
    263  1.1     skrll struct pvscsi_dma {
    264  1.1     skrll 	bus_dmamap_t		 map;
    265  1.1     skrll 	void		        *vaddr;
    266  1.1     skrll 	bus_addr_t	 	 paddr;
    267  1.1     skrll 	bus_size_t	 	 size;
    268  1.1     skrll 	bus_dma_segment_t	 seg[1];
    269  1.1     skrll };
    270  1.1     skrll 
    271  1.1     skrll struct pvscsi_softc {
    272  1.1     skrll 	device_t		 dev;
    273  1.1     skrll 	kmutex_t		 lock;
    274  1.1     skrll 
    275  1.1     skrll 	device_t		 sc_scsibus_dv;
    276  1.1     skrll 	struct scsipi_adapter	 sc_adapter;
    277  1.1     skrll 	struct scsipi_channel 	 sc_channel;
    278  1.1     skrll 
    279  1.1     skrll 	struct pvscsi_rings_state	*rings_state;
    280  1.1     skrll 	struct pvscsi_ring_req_desc	*req_ring;
    281  1.1     skrll 	struct pvscsi_ring_cmp_desc	*cmp_ring;
    282  1.1     skrll 	struct pvscsi_ring_msg_desc	*msg_ring;
    283  1.1     skrll 	uint32_t		 hcb_cnt;
    284  1.1     skrll 	struct pvscsi_hcb	*hcbs;
    285  1.1     skrll 	SLIST_HEAD(, pvscsi_hcb) free_list;
    286  1.1     skrll 
    287  1.1     skrll 	bus_dma_tag_t		sc_dmat;
    288  1.1     skrll 	bus_space_tag_t		sc_memt;
    289  1.1     skrll 	bus_space_handle_t	sc_memh;
    290  1.1     skrll 	bus_size_t		sc_mems;
    291  1.1     skrll 
    292  1.1     skrll 	bool		 use_msg;
    293  1.1     skrll 	uint32_t	 max_targets;
    294  1.1     skrll 	int		 mm_rid;
    295  1.1     skrll 	int		 irq_id;
    296  1.1     skrll 	int		 use_req_call_threshold;
    297  1.1     skrll 
    298  1.1     skrll 	pci_chipset_tag_t	 sc_pc;
    299  1.1     skrll 	pci_intr_handle_t *	 sc_pihp;
    300  1.1     skrll 	void			*sc_ih;
    301  1.1     skrll 
    302  1.1     skrll 	uint64_t	rings_state_ppn;
    303  1.1     skrll 	uint32_t	req_ring_num_pages;
    304  1.1     skrll 	uint64_t	req_ring_ppn[PVSCSI_MAX_NUM_PAGES_REQ_RING];
    305  1.1     skrll 	uint32_t	cmp_ring_num_pages;
    306  1.1     skrll 	uint64_t	cmp_ring_ppn[PVSCSI_MAX_NUM_PAGES_CMP_RING];
    307  1.1     skrll 	uint32_t	msg_ring_num_pages;
    308  1.1     skrll 	uint64_t	msg_ring_ppn[PVSCSI_MAX_NUM_PAGES_MSG_RING];
    309  1.1     skrll 
    310  1.1     skrll 	struct	pvscsi_dma rings_state_dma;
    311  1.1     skrll 	struct	pvscsi_dma req_ring_dma;
    312  1.1     skrll 	struct	pvscsi_dma cmp_ring_dma;
    313  1.1     skrll 	struct	pvscsi_dma msg_ring_dma;
    314  1.1     skrll 
    315  1.1     skrll 	struct	pvscsi_dma sg_list_dma;
    316  1.1     skrll 	struct	pvscsi_dma sense_buffer_dma;
    317  1.1     skrll };
    318  1.1     skrll 
    319  1.1     skrll CFATTACH_DECL3_NEW(pvscsi, sizeof(struct pvscsi_softc),
    320  1.1     skrll     pvscsi_probe, pvscsi_attach, pvscsi_detach, NULL, NULL, NULL,
    321  1.1     skrll     DVF_DETACH_SHUTDOWN);
    322  1.1     skrll 
    323  1.4  riastrad #define	PVSCSI_DMA_SYNC_STATE(sc, dma, structptr, member, ops)		      \
    324  1.4  riastrad 	bus_dmamap_sync((sc)->sc_dmat, (dma)->map,			      \
    325  1.4  riastrad 	    /*offset*/offsetof(__typeof__(*(structptr)), member),	      \
    326  1.4  riastrad 	    /*length*/sizeof((structptr)->member),			      \
    327  1.4  riastrad 	    (ops))
    328  1.4  riastrad 
    329  1.4  riastrad #define	PVSCSI_DMA_SYNC_RING(sc, dma, ring, idx, ops)			      \
    330  1.4  riastrad 	bus_dmamap_sync((sc)->sc_dmat, (dma)->map,			      \
    331  1.4  riastrad 	    /*offset*/sizeof(*(ring)) * (idx),				      \
    332  1.4  riastrad 	    /*length*/sizeof(*(ring)),					      \
    333  1.4  riastrad 	    (ops))
    334  1.4  riastrad 
    335  1.1     skrll static inline uint32_t
    336  1.1     skrll pvscsi_reg_read(struct pvscsi_softc *sc, uint32_t offset)
    337  1.1     skrll {
    338  1.1     skrll 
    339  1.1     skrll 	return (bus_space_read_4(sc->sc_memt, sc->sc_memh, offset));
    340  1.1     skrll }
    341  1.1     skrll 
    342  1.1     skrll static inline void
    343  1.1     skrll pvscsi_reg_write(struct pvscsi_softc *sc, uint32_t offset, uint32_t val)
    344  1.1     skrll {
    345  1.1     skrll 
    346  1.1     skrll 	bus_space_write_4(sc->sc_memt, sc->sc_memh, offset, val);
    347  1.1     skrll }
    348  1.1     skrll 
    349  1.1     skrll static inline uint32_t
    350  1.1     skrll pvscsi_read_intr_status(struct pvscsi_softc *sc)
    351  1.1     skrll {
    352  1.1     skrll 
    353  1.1     skrll 	return (pvscsi_reg_read(sc, PVSCSI_REG_OFFSET_INTR_STATUS));
    354  1.1     skrll }
    355  1.1     skrll 
    356  1.1     skrll static inline void
    357  1.1     skrll pvscsi_write_intr_status(struct pvscsi_softc *sc, uint32_t val)
    358  1.1     skrll {
    359  1.1     skrll 
    360  1.1     skrll 	pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_INTR_STATUS, val);
    361  1.1     skrll }
    362  1.1     skrll 
    363  1.1     skrll static inline void
    364  1.1     skrll pvscsi_intr_enable(struct pvscsi_softc *sc)
    365  1.1     skrll {
    366  1.1     skrll 	uint32_t mask;
    367  1.1     skrll 
    368  1.1     skrll 	mask = PVSCSI_INTR_CMPL_MASK;
    369  1.1     skrll 	if (sc->use_msg) {
    370  1.1     skrll 		mask |= PVSCSI_INTR_MSG_MASK;
    371  1.1     skrll 	}
    372  1.1     skrll 
    373  1.1     skrll 	pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_INTR_MASK, mask);
    374  1.1     skrll }
    375  1.1     skrll 
    376  1.1     skrll static inline void
    377  1.1     skrll pvscsi_intr_disable(struct pvscsi_softc *sc)
    378  1.1     skrll {
    379  1.1     skrll 
    380  1.1     skrll 	pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_INTR_MASK, 0);
    381  1.1     skrll }
    382  1.1     skrll 
    383  1.1     skrll static void
    384  1.1     skrll pvscsi_kick_io(struct pvscsi_softc *sc, uint8_t cdb0)
    385  1.1     skrll {
    386  1.4  riastrad 	struct pvscsi_dma *s_dma;
    387  1.1     skrll 	struct pvscsi_rings_state *s;
    388  1.1     skrll 
    389  1.1     skrll 	DEBUG_PRINTF(2, sc->dev, "%s: cdb0 %#x\n", __func__, cdb0);
    390  1.1     skrll 	if (cdb0 == SCSI_READ_6_COMMAND  || cdb0 == READ_10  ||
    391  1.1     skrll 	    cdb0 == READ_12  || cdb0 == READ_16  ||
    392  1.1     skrll 	    cdb0 == SCSI_WRITE_6_COMMAND || cdb0 == WRITE_10 ||
    393  1.1     skrll 	    cdb0 == WRITE_12 || cdb0 == WRITE_16) {
    394  1.4  riastrad 		s_dma = &sc->rings_state_dma;
    395  1.1     skrll 		s = sc->rings_state;
    396  1.1     skrll 
    397  1.3  riastrad 		/*
    398  1.4  riastrad 		 * Ensure the command has been published before we read
    399  1.4  riastrad 		 * req_cons_idx to test whether we need to kick the
    400  1.4  riastrad 		 * host.
    401  1.3  riastrad 		 */
    402  1.3  riastrad 		paravirt_membar_sync();
    403  1.3  riastrad 
    404  1.4  riastrad 		PVSCSI_DMA_SYNC_STATE(sc, s_dma, s, req_cons_idx,
    405  1.4  riastrad 		    BUS_DMASYNC_POSTREAD);
    406  1.1     skrll 		DEBUG_PRINTF(2, sc->dev, "%s req prod %d cons %d\n", __func__,
    407  1.1     skrll 		    s->req_prod_idx, s->req_cons_idx);
    408  1.1     skrll 		if (!sc->use_req_call_threshold ||
    409  1.1     skrll 		    (s->req_prod_idx - s->req_cons_idx) >=
    410  1.1     skrll 		     s->req_call_threshold) {
    411  1.1     skrll 			pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_KICK_RW_IO, 0);
    412  1.1     skrll 			DEBUG_PRINTF(2, sc->dev, "kicked\n");
    413  1.1     skrll 		} else {
    414  1.1     skrll 			DEBUG_PRINTF(2, sc->dev, "wtf\n");
    415  1.1     skrll 		}
    416  1.4  riastrad 		PVSCSI_DMA_SYNC_STATE(sc, s_dma, s, req_cons_idx,
    417  1.4  riastrad 		    BUS_DMASYNC_PREREAD);
    418  1.1     skrll 	} else {
    419  1.1     skrll 		s = sc->rings_state;
    420  1.4  riastrad 		/*
    421  1.4  riastrad 		 * XXX req_cons_idx in debug log might be stale, but no
    422  1.4  riastrad 		 * need for DMA sync otherwise in this branch
    423  1.4  riastrad 		 */
    424  1.1     skrll 		DEBUG_PRINTF(1, sc->dev, "%s req prod %d cons %d not checked\n", __func__,
    425  1.1     skrll 		    s->req_prod_idx, s->req_cons_idx);
    426  1.1     skrll 
    427  1.1     skrll 		pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_KICK_NON_RW_IO, 0);
    428  1.1     skrll 	}
    429  1.1     skrll }
    430  1.1     skrll 
    431  1.1     skrll static void
    432  1.1     skrll pvscsi_write_cmd(struct pvscsi_softc *sc, uint32_t cmd, void *data,
    433  1.1     skrll 		 uint32_t len)
    434  1.1     skrll {
    435  1.1     skrll 	uint32_t *data_ptr;
    436  1.1     skrll 	int i;
    437  1.1     skrll 
    438  1.1     skrll 	KASSERTMSG(len % sizeof(uint32_t) == 0,
    439  1.1     skrll 		"command size not a multiple of 4");
    440  1.1     skrll 
    441  1.1     skrll 	data_ptr = data;
    442  1.1     skrll 	len /= sizeof(uint32_t);
    443  1.1     skrll 
    444  1.1     skrll 	pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_COMMAND, cmd);
    445  1.1     skrll 	for (i = 0; i < len; ++i) {
    446  1.1     skrll 		pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_COMMAND_DATA,
    447  1.1     skrll 		   data_ptr[i]);
    448  1.1     skrll 	}
    449  1.1     skrll }
    450  1.1     skrll 
    451  1.1     skrll static inline uint64_t pvscsi_hcb_to_context(struct pvscsi_softc *sc,
    452  1.1     skrll     struct pvscsi_hcb *hcb)
    453  1.1     skrll {
    454  1.1     skrll 
    455  1.1     skrll 	/* Offset by 1 because context must not be 0 */
    456  1.1     skrll 	return (hcb - sc->hcbs + 1);
    457  1.1     skrll }
    458  1.1     skrll 
    459  1.1     skrll static inline struct pvscsi_hcb* pvscsi_context_to_hcb(struct pvscsi_softc *sc,
    460  1.1     skrll     uint64_t context)
    461  1.1     skrll {
    462  1.1     skrll 
    463  1.1     skrll 	return (sc->hcbs + (context - 1));
    464  1.1     skrll }
    465  1.1     skrll 
    466  1.1     skrll static struct pvscsi_hcb *
    467  1.1     skrll pvscsi_hcb_get(struct pvscsi_softc *sc)
    468  1.1     skrll {
    469  1.1     skrll 	struct pvscsi_hcb *hcb;
    470  1.1     skrll 
    471  1.1     skrll 	KASSERT(mutex_owned(&sc->lock));
    472  1.1     skrll 
    473  1.1     skrll 	hcb = SLIST_FIRST(&sc->free_list);
    474  1.1     skrll 	if (hcb) {
    475  1.1     skrll 		SLIST_REMOVE_HEAD(&sc->free_list, links);
    476  1.1     skrll 	}
    477  1.1     skrll 
    478  1.1     skrll 	return (hcb);
    479  1.1     skrll }
    480  1.1     skrll 
    481  1.1     skrll static void
    482  1.1     skrll pvscsi_hcb_put(struct pvscsi_softc *sc, struct pvscsi_hcb *hcb)
    483  1.1     skrll {
    484  1.1     skrll 
    485  1.1     skrll 	KASSERT(mutex_owned(&sc->lock));
    486  1.1     skrll 	hcb->xs = NULL;
    487  1.1     skrll 	hcb->e = NULL;
    488  1.1     skrll 	hcb->recovery = PVSCSI_HCB_NONE;
    489  1.1     skrll 	SLIST_INSERT_HEAD(&sc->free_list, hcb, links);
    490  1.1     skrll }
    491  1.1     skrll 
    492  1.1     skrll static uint32_t
    493  1.1     skrll pvscsi_get_max_targets(struct pvscsi_softc *sc)
    494  1.1     skrll {
    495  1.1     skrll 	uint32_t max_targets;
    496  1.1     skrll 
    497  1.1     skrll 	pvscsi_write_cmd(sc, PVSCSI_CMD_GET_MAX_TARGETS, NULL, 0);
    498  1.1     skrll 
    499  1.1     skrll 	max_targets = pvscsi_reg_read(sc, PVSCSI_REG_OFFSET_COMMAND_STATUS);
    500  1.1     skrll 
    501  1.1     skrll 	if (max_targets == ~0) {
    502  1.1     skrll 		max_targets = 16;
    503  1.1     skrll 	}
    504  1.1     skrll 
    505  1.1     skrll 	return (max_targets);
    506  1.1     skrll }
    507  1.1     skrll 
    508  1.1     skrll static int pvscsi_setup_req_call(struct pvscsi_softc *sc, uint32_t enable)
    509  1.1     skrll {
    510  1.1     skrll 	uint32_t status;
    511  1.1     skrll 	struct pvscsi_cmd_desc_setup_req_call cmd;
    512  1.1     skrll 
    513  1.1     skrll 	if (!pvscsi_get_tunable(sc, "pvscsi_use_req_call_threshold",
    514  1.1     skrll 	    pvscsi_use_req_call_threshold)) {
    515  1.1     skrll 		return (0);
    516  1.1     skrll 	}
    517  1.1     skrll 
    518  1.1     skrll 	pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_COMMAND,
    519  1.1     skrll 	    PVSCSI_CMD_SETUP_REQCALLTHRESHOLD);
    520  1.1     skrll 	status = pvscsi_reg_read(sc, PVSCSI_REG_OFFSET_COMMAND_STATUS);
    521  1.1     skrll 
    522  1.1     skrll 	if (status != -1) {
    523  1.1     skrll 		memset(&cmd, 0, sizeof(cmd));
    524  1.1     skrll 		cmd.enable = enable;
    525  1.1     skrll 		pvscsi_write_cmd(sc, PVSCSI_CMD_SETUP_REQCALLTHRESHOLD,
    526  1.1     skrll 		    &cmd, sizeof(cmd));
    527  1.1     skrll 		status = pvscsi_reg_read(sc, PVSCSI_REG_OFFSET_COMMAND_STATUS);
    528  1.1     skrll 
    529  1.4  riastrad 		/*
    530  1.4  riastrad 		 * After setup, sync req_call_threshold before use.
    531  1.4  riastrad 		 * After this point it should be stable, so no need to
    532  1.4  riastrad 		 * sync again during use.
    533  1.4  riastrad 		 */
    534  1.4  riastrad 		PVSCSI_DMA_SYNC_STATE(sc, &sc->rings_state_dma,
    535  1.4  riastrad 		    sc->rings_state, req_call_threshold,
    536  1.4  riastrad 		    BUS_DMASYNC_POSTREAD);
    537  1.4  riastrad 
    538  1.1     skrll 		return (status != 0);
    539  1.1     skrll 	} else {
    540  1.1     skrll 		return (0);
    541  1.1     skrll 	}
    542  1.1     skrll }
    543  1.1     skrll 
    544  1.1     skrll static void
    545  1.1     skrll pvscsi_dma_free(struct pvscsi_softc *sc, struct pvscsi_dma *dma)
    546  1.1     skrll {
    547  1.1     skrll 
    548  1.1     skrll 	bus_dmamap_unload(sc->sc_dmat, dma->map);
    549  1.1     skrll 	bus_dmamem_unmap(sc->sc_dmat, dma->vaddr, dma->size);
    550  1.1     skrll 	bus_dmamap_destroy(sc->sc_dmat, dma->map);
    551  1.1     skrll 	bus_dmamem_free(sc->sc_dmat, dma->seg, __arraycount(dma->seg));
    552  1.1     skrll 
    553  1.1     skrll 	memset(dma, 0, sizeof(*dma));
    554  1.1     skrll }
    555  1.1     skrll 
    556  1.1     skrll static int
    557  1.1     skrll pvscsi_dma_alloc(struct pvscsi_softc *sc, struct pvscsi_dma *dma,
    558  1.1     skrll     bus_size_t size, bus_size_t alignment)
    559  1.1     skrll {
    560  1.1     skrll 	int error;
    561  1.1     skrll 	int nsegs;
    562  1.1     skrll 
    563  1.1     skrll 	memset(dma, 0, sizeof(*dma));
    564  1.1     skrll 
    565  1.1     skrll 	error = bus_dmamem_alloc(sc->sc_dmat, size, alignment, 0, dma->seg,
    566  1.1     skrll 	    __arraycount(dma->seg), &nsegs, BUS_DMA_WAITOK);
    567  1.1     skrll 	if (error) {
    568  1.1     skrll 		aprint_normal_dev(sc->dev, "error allocating dma mem, error %d\n",
    569  1.1     skrll 		    error);
    570  1.1     skrll 		goto fail;
    571  1.1     skrll 	}
    572  1.1     skrll 
    573  1.1     skrll 	error = bus_dmamem_map(sc->sc_dmat, dma->seg, nsegs, size,
    574  1.1     skrll 	    &dma->vaddr, BUS_DMA_WAITOK);
    575  1.1     skrll 	if (error != 0) {
    576  1.1     skrll 		device_printf(sc->dev, "Failed to map DMA memory\n");
    577  1.1     skrll 		goto dmamemmap_fail;
    578  1.1     skrll 	}
    579  1.1     skrll 
    580  1.1     skrll 	error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
    581  1.1     skrll 	    BUS_DMA_WAITOK, &dma->map);
    582  1.1     skrll 	if (error != 0) {
    583  1.1     skrll 		device_printf(sc->dev, "Failed to create DMA map\n");
    584  1.1     skrll 		goto dmamapcreate_fail;
    585  1.1     skrll 	}
    586  1.1     skrll 
    587  1.1     skrll 	error = bus_dmamap_load(sc->sc_dmat, dma->map, dma->vaddr, size,
    588  1.1     skrll 	    NULL, BUS_DMA_WAITOK);
    589  1.1     skrll 	if (error) {
    590  1.1     skrll 		aprint_normal_dev(sc->dev, "error mapping dma mam, error %d\n",
    591  1.1     skrll 		    error);
    592  1.1     skrll 		goto dmamapload_fail;
    593  1.1     skrll 	}
    594  1.1     skrll 
    595  1.1     skrll 	dma->paddr = dma->map->dm_segs[0].ds_addr;
    596  1.1     skrll 	dma->size = size;
    597  1.1     skrll 
    598  1.1     skrll 	return 0;
    599  1.1     skrll 
    600  1.1     skrll dmamapload_fail:
    601  1.1     skrll 	bus_dmamap_destroy(sc->sc_dmat, dma->map);
    602  1.1     skrll dmamapcreate_fail:
    603  1.1     skrll 	bus_dmamem_unmap(sc->sc_dmat, dma->vaddr, dma->size);
    604  1.1     skrll dmamemmap_fail:
    605  1.1     skrll 	bus_dmamem_free(sc->sc_dmat, dma->seg, __arraycount(dma->seg));
    606  1.1     skrll fail:
    607  1.1     skrll 
    608  1.1     skrll 	return (error);
    609  1.1     skrll }
    610  1.1     skrll 
    611  1.1     skrll static int
    612  1.1     skrll pvscsi_dma_alloc_ppns(struct pvscsi_softc *sc, struct pvscsi_dma *dma,
    613  1.1     skrll     uint64_t *ppn_list, uint32_t num_pages)
    614  1.1     skrll {
    615  1.1     skrll 	int error;
    616  1.1     skrll 	uint32_t i;
    617  1.1     skrll 	uint64_t ppn;
    618  1.1     skrll 
    619  1.1     skrll 	error = pvscsi_dma_alloc(sc, dma, num_pages * PAGE_SIZE, PAGE_SIZE);
    620  1.1     skrll 	if (error) {
    621  1.1     skrll 		aprint_normal_dev(sc->dev, "Error allocating pages, error %d\n",
    622  1.1     skrll 		    error);
    623  1.1     skrll 		return (error);
    624  1.1     skrll 	}
    625  1.1     skrll 
    626  1.1     skrll 	ppn = dma->paddr >> PAGE_SHIFT;
    627  1.1     skrll 	for (i = 0; i < num_pages; i++) {
    628  1.1     skrll 		ppn_list[i] = ppn + i;
    629  1.1     skrll 	}
    630  1.1     skrll 
    631  1.1     skrll 	return (0);
    632  1.1     skrll }
    633  1.1     skrll 
    634  1.1     skrll static void
    635  1.1     skrll pvscsi_dma_free_per_hcb(struct pvscsi_softc *sc, uint32_t hcbs_allocated)
    636  1.1     skrll {
    637  1.1     skrll 	int i;
    638  1.1     skrll 	struct pvscsi_hcb *hcb;
    639  1.1     skrll 
    640  1.1     skrll 	for (i = 0; i < hcbs_allocated; ++i) {
    641  1.1     skrll 		hcb = sc->hcbs + i;
    642  1.1     skrll 		bus_dmamap_destroy(sc->sc_dmat, hcb->dma_map);
    643  1.1     skrll 	};
    644  1.1     skrll 
    645  1.1     skrll 	pvscsi_dma_free(sc, &sc->sense_buffer_dma);
    646  1.1     skrll 	pvscsi_dma_free(sc, &sc->sg_list_dma);
    647  1.1     skrll }
    648  1.1     skrll 
    649  1.1     skrll static int
    650  1.1     skrll pvscsi_dma_alloc_per_hcb(struct pvscsi_softc *sc)
    651  1.1     skrll {
    652  1.1     skrll 	int i;
    653  1.1     skrll 	int error;
    654  1.1     skrll 	struct pvscsi_hcb *hcb;
    655  1.1     skrll 
    656  1.1     skrll 	i = 0;
    657  1.1     skrll 
    658  1.1     skrll 	error = pvscsi_dma_alloc(sc, &sc->sg_list_dma,
    659  1.1     skrll 	    sizeof(struct pvscsi_sg_list) * sc->hcb_cnt, 1);
    660  1.1     skrll 	if (error) {
    661  1.1     skrll 		aprint_normal_dev(sc->dev,
    662  1.1     skrll 		    "Error allocation sg list DMA memory, error %d\n", error);
    663  1.1     skrll 		goto fail;
    664  1.1     skrll 	}
    665  1.1     skrll 
    666  1.1     skrll 	error = pvscsi_dma_alloc(sc, &sc->sense_buffer_dma,
    667  1.1     skrll 				 PVSCSI_SENSE_LENGTH * sc->hcb_cnt, 1);
    668  1.1     skrll 	if (error) {
    669  1.1     skrll 		aprint_normal_dev(sc->dev,
    670  1.1     skrll 		    "Error allocation buffer DMA memory, error %d\n", error);
    671  1.1     skrll 		goto fail;
    672  1.1     skrll 	}
    673  1.1     skrll 
    674  1.1     skrll 	for (i = 0; i < sc->hcb_cnt; ++i) {
    675  1.1     skrll 		hcb = sc->hcbs + i;
    676  1.1     skrll 
    677  1.1     skrll 		error = bus_dmamap_create(sc->sc_dmat, PVSCSI_MAXPHYS,
    678  1.1     skrll 		    PVSCSI_MAXPHYS_SEGS, PVSCSI_MAXPHYS, 0,
    679  1.1     skrll 		    BUS_DMA_WAITOK, &hcb->dma_map);
    680  1.1     skrll 		if (error) {
    681  1.1     skrll 			aprint_normal_dev(sc->dev,
    682  1.1     skrll 			    "Error creating dma map for hcb %d, error %d\n",
    683  1.1     skrll 			    i, error);
    684  1.1     skrll 			goto fail;
    685  1.1     skrll 		}
    686  1.1     skrll 
    687  1.1     skrll 		hcb->sc = sc;
    688  1.1     skrll 		hcb->dma_map_offset = PVSCSI_SENSE_LENGTH * i;
    689  1.1     skrll 		hcb->dma_map_size = PVSCSI_SENSE_LENGTH;
    690  1.1     skrll 		hcb->sense_buffer =
    691  1.1     skrll 		    (void *)((char *)sc->sense_buffer_dma.vaddr +
    692  1.1     skrll 		    PVSCSI_SENSE_LENGTH * i);
    693  1.1     skrll 		hcb->sense_buffer_paddr = sc->sense_buffer_dma.paddr +
    694  1.1     skrll 		    PVSCSI_SENSE_LENGTH * i;
    695  1.1     skrll 
    696  1.1     skrll 		hcb->sg_list =
    697  1.1     skrll 		    (struct pvscsi_sg_list *)((char *)sc->sg_list_dma.vaddr +
    698  1.1     skrll 		    sizeof(struct pvscsi_sg_list) * i);
    699  1.1     skrll 		hcb->sg_list_paddr =
    700  1.1     skrll 		    sc->sg_list_dma.paddr + sizeof(struct pvscsi_sg_list) * i;
    701  1.1     skrll 		hcb->sg_list_offset = sizeof(struct pvscsi_sg_list) * i;
    702  1.1     skrll 	}
    703  1.1     skrll 
    704  1.1     skrll 	SLIST_INIT(&sc->free_list);
    705  1.1     skrll 	for (i = (sc->hcb_cnt - 1); i >= 0; --i) {
    706  1.1     skrll 		hcb = sc->hcbs + i;
    707  1.1     skrll 		SLIST_INSERT_HEAD(&sc->free_list, hcb, links);
    708  1.1     skrll 	}
    709  1.1     skrll 
    710  1.1     skrll fail:
    711  1.1     skrll 	if (error) {
    712  1.1     skrll 		pvscsi_dma_free_per_hcb(sc, i);
    713  1.1     skrll 	}
    714  1.1     skrll 
    715  1.1     skrll 	return (error);
    716  1.1     skrll }
    717  1.1     skrll 
    718  1.1     skrll static void
    719  1.1     skrll pvscsi_free_rings(struct pvscsi_softc *sc)
    720  1.1     skrll {
    721  1.1     skrll 
    722  1.1     skrll 	pvscsi_dma_free(sc, &sc->rings_state_dma);
    723  1.1     skrll 	pvscsi_dma_free(sc, &sc->req_ring_dma);
    724  1.1     skrll 	pvscsi_dma_free(sc, &sc->cmp_ring_dma);
    725  1.1     skrll 	if (sc->use_msg) {
    726  1.1     skrll 		pvscsi_dma_free(sc, &sc->msg_ring_dma);
    727  1.1     skrll 	}
    728  1.1     skrll }
    729  1.1     skrll 
    730  1.1     skrll static int
    731  1.1     skrll pvscsi_allocate_rings(struct pvscsi_softc *sc)
    732  1.1     skrll {
    733  1.1     skrll 	int error;
    734  1.1     skrll 
    735  1.1     skrll 	error = pvscsi_dma_alloc_ppns(sc, &sc->rings_state_dma,
    736  1.1     skrll 	    &sc->rings_state_ppn, 1);
    737  1.1     skrll 	if (error) {
    738  1.1     skrll 		aprint_normal_dev(sc->dev,
    739  1.1     skrll 		    "Error allocating rings state, error = %d\n", error);
    740  1.1     skrll 		goto fail;
    741  1.1     skrll 	}
    742  1.1     skrll 	sc->rings_state = sc->rings_state_dma.vaddr;
    743  1.1     skrll 
    744  1.1     skrll 	error = pvscsi_dma_alloc_ppns(sc, &sc->req_ring_dma, sc->req_ring_ppn,
    745  1.1     skrll 	    sc->req_ring_num_pages);
    746  1.1     skrll 	if (error) {
    747  1.1     skrll 		aprint_normal_dev(sc->dev,
    748  1.1     skrll 		    "Error allocating req ring pages, error = %d\n", error);
    749  1.1     skrll 		goto fail;
    750  1.1     skrll 	}
    751  1.1     skrll 	sc->req_ring = sc->req_ring_dma.vaddr;
    752  1.1     skrll 
    753  1.1     skrll 	error = pvscsi_dma_alloc_ppns(sc, &sc->cmp_ring_dma, sc->cmp_ring_ppn,
    754  1.1     skrll 	    sc->cmp_ring_num_pages);
    755  1.1     skrll 	if (error) {
    756  1.1     skrll 		aprint_normal_dev(sc->dev,
    757  1.1     skrll 		    "Error allocating cmp ring pages, error = %d\n", error);
    758  1.1     skrll 		goto fail;
    759  1.1     skrll 	}
    760  1.1     skrll 	sc->cmp_ring = sc->cmp_ring_dma.vaddr;
    761  1.1     skrll 
    762  1.1     skrll 	sc->msg_ring = NULL;
    763  1.1     skrll 	if (sc->use_msg) {
    764  1.1     skrll 		error = pvscsi_dma_alloc_ppns(sc, &sc->msg_ring_dma,
    765  1.1     skrll 		    sc->msg_ring_ppn, sc->msg_ring_num_pages);
    766  1.1     skrll 		if (error) {
    767  1.1     skrll 			aprint_normal_dev(sc->dev,
    768  1.1     skrll 			    "Error allocating cmp ring pages, error = %d\n",
    769  1.1     skrll 			    error);
    770  1.1     skrll 			goto fail;
    771  1.1     skrll 		}
    772  1.1     skrll 		sc->msg_ring = sc->msg_ring_dma.vaddr;
    773  1.1     skrll 	}
    774  1.1     skrll 
    775  1.1     skrll fail:
    776  1.1     skrll 	if (error) {
    777  1.1     skrll 		pvscsi_free_rings(sc);
    778  1.1     skrll 	}
    779  1.1     skrll 	return (error);
    780  1.1     skrll }
    781  1.1     skrll 
    782  1.1     skrll static void
    783  1.1     skrll pvscsi_setup_rings(struct pvscsi_softc *sc)
    784  1.1     skrll {
    785  1.1     skrll 	struct pvscsi_cmd_desc_setup_rings cmd;
    786  1.1     skrll 	uint32_t i;
    787  1.1     skrll 
    788  1.1     skrll 	memset(&cmd, 0, sizeof(cmd));
    789  1.1     skrll 
    790  1.1     skrll 	cmd.rings_state_ppn = sc->rings_state_ppn;
    791  1.1     skrll 
    792  1.1     skrll 	cmd.req_ring_num_pages = sc->req_ring_num_pages;
    793  1.1     skrll 	for (i = 0; i < sc->req_ring_num_pages; ++i) {
    794  1.1     skrll 		cmd.req_ring_ppns[i] = sc->req_ring_ppn[i];
    795  1.1     skrll 	}
    796  1.1     skrll 
    797  1.1     skrll 	cmd.cmp_ring_num_pages = sc->cmp_ring_num_pages;
    798  1.1     skrll 	for (i = 0; i < sc->cmp_ring_num_pages; ++i) {
    799  1.1     skrll 		cmd.cmp_ring_ppns[i] = sc->cmp_ring_ppn[i];
    800  1.1     skrll 	}
    801  1.1     skrll 
    802  1.1     skrll 	pvscsi_write_cmd(sc, PVSCSI_CMD_SETUP_RINGS, &cmd, sizeof(cmd));
    803  1.4  riastrad 
    804  1.4  riastrad 	/*
    805  1.4  riastrad 	 * After setup, sync *_num_entries_log2 before use.  After this
    806  1.4  riastrad 	 * point they should be stable, so no need to sync again during
    807  1.4  riastrad 	 * use.
    808  1.4  riastrad 	 */
    809  1.4  riastrad 	PVSCSI_DMA_SYNC_STATE(sc, &sc->rings_state_dma,
    810  1.4  riastrad 	    sc->rings_state, req_num_entries_log2,
    811  1.4  riastrad 	    BUS_DMASYNC_POSTREAD);
    812  1.4  riastrad 	PVSCSI_DMA_SYNC_STATE(sc, &sc->rings_state_dma,
    813  1.4  riastrad 	    sc->rings_state, cmp_num_entries_log2,
    814  1.4  riastrad 	    BUS_DMASYNC_POSTREAD);
    815  1.1     skrll }
    816  1.1     skrll 
    817  1.1     skrll static int
    818  1.1     skrll pvscsi_hw_supports_msg(struct pvscsi_softc *sc)
    819  1.1     skrll {
    820  1.1     skrll 	uint32_t status;
    821  1.1     skrll 
    822  1.1     skrll 	pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_COMMAND,
    823  1.1     skrll 	    PVSCSI_CMD_SETUP_MSG_RING);
    824  1.1     skrll 	status = pvscsi_reg_read(sc, PVSCSI_REG_OFFSET_COMMAND_STATUS);
    825  1.1     skrll 
    826  1.1     skrll 	return (status != -1);
    827  1.1     skrll }
    828  1.1     skrll 
    829  1.1     skrll static void
    830  1.1     skrll pvscsi_setup_msg_ring(struct pvscsi_softc *sc)
    831  1.1     skrll {
    832  1.1     skrll 	struct pvscsi_cmd_desc_setup_msg_ring cmd;
    833  1.1     skrll 	uint32_t i;
    834  1.1     skrll 
    835  1.1     skrll 	KASSERTMSG(sc->use_msg, "msg is not being used");
    836  1.1     skrll 
    837  1.1     skrll 	memset(&cmd, 0, sizeof(cmd));
    838  1.1     skrll 
    839  1.1     skrll 	cmd.num_pages = sc->msg_ring_num_pages;
    840  1.1     skrll 	for (i = 0; i < sc->msg_ring_num_pages; ++i) {
    841  1.1     skrll 		cmd.ring_ppns[i] = sc->msg_ring_ppn[i];
    842  1.1     skrll 	}
    843  1.1     skrll 
    844  1.1     skrll 	pvscsi_write_cmd(sc, PVSCSI_CMD_SETUP_MSG_RING, &cmd, sizeof(cmd));
    845  1.4  riastrad 
    846  1.4  riastrad 	/*
    847  1.4  riastrad 	 * After setup, sync msg_num_entries_log2 before use.  After
    848  1.4  riastrad 	 * this point it should be stable, so no need to sync again
    849  1.4  riastrad 	 * during use.
    850  1.4  riastrad 	 */
    851  1.4  riastrad 	PVSCSI_DMA_SYNC_STATE(sc, &sc->rings_state_dma,
    852  1.4  riastrad 	    sc->rings_state, msg_num_entries_log2,
    853  1.4  riastrad 	    BUS_DMASYNC_POSTREAD);
    854  1.1     skrll }
    855  1.1     skrll 
    856  1.1     skrll static void
    857  1.1     skrll pvscsi_adapter_reset(struct pvscsi_softc *sc)
    858  1.1     skrll {
    859  1.1     skrll 	aprint_normal_dev(sc->dev, "Adapter Reset\n");
    860  1.1     skrll 
    861  1.1     skrll 	pvscsi_write_cmd(sc, PVSCSI_CMD_ADAPTER_RESET, NULL, 0);
    862  1.1     skrll #ifdef PVSCSI_DEBUG_LOGGING
    863  1.1     skrll 	uint32_t val =
    864  1.1     skrll #endif
    865  1.1     skrll 	pvscsi_read_intr_status(sc);
    866  1.1     skrll 
    867  1.1     skrll 	DEBUG_PRINTF(2, sc->dev, "adapter reset done: %u\n", val);
    868  1.1     skrll }
    869  1.1     skrll 
    870  1.1     skrll static void
    871  1.1     skrll pvscsi_bus_reset(struct pvscsi_softc *sc)
    872  1.1     skrll {
    873  1.1     skrll 
    874  1.1     skrll 	aprint_normal_dev(sc->dev, "Bus Reset\n");
    875  1.1     skrll 
    876  1.1     skrll 	pvscsi_write_cmd(sc, PVSCSI_CMD_RESET_BUS, NULL, 0);
    877  1.1     skrll 	pvscsi_process_cmp_ring(sc);
    878  1.1     skrll 
    879  1.1     skrll 	DEBUG_PRINTF(2, sc->dev, "bus reset done\n");
    880  1.1     skrll }
    881  1.1     skrll 
    882  1.1     skrll static void
    883  1.1     skrll pvscsi_device_reset(struct pvscsi_softc *sc, uint32_t target)
    884  1.1     skrll {
    885  1.1     skrll 	struct pvscsi_cmd_desc_reset_device cmd;
    886  1.1     skrll 
    887  1.1     skrll 	memset(&cmd, 0, sizeof(cmd));
    888  1.1     skrll 
    889  1.1     skrll 	cmd.target = target;
    890  1.1     skrll 
    891  1.1     skrll 	aprint_normal_dev(sc->dev, "Device reset for target %u\n", target);
    892  1.1     skrll 
    893  1.1     skrll 	pvscsi_write_cmd(sc, PVSCSI_CMD_RESET_DEVICE, &cmd, sizeof cmd);
    894  1.1     skrll 	pvscsi_process_cmp_ring(sc);
    895  1.1     skrll 
    896  1.1     skrll 	DEBUG_PRINTF(2, sc->dev, "device reset done\n");
    897  1.1     skrll }
    898  1.1     skrll 
    899  1.1     skrll static void
    900  1.1     skrll pvscsi_abort(struct pvscsi_softc *sc, uint32_t target, struct pvscsi_hcb *hcb)
    901  1.1     skrll {
    902  1.1     skrll 	struct pvscsi_cmd_desc_abort_cmd cmd;
    903  1.1     skrll 	uint64_t context;
    904  1.1     skrll 
    905  1.1     skrll 	pvscsi_process_cmp_ring(sc);
    906  1.1     skrll 
    907  1.1     skrll 	if (hcb != NULL) {
    908  1.1     skrll 		context = pvscsi_hcb_to_context(sc, hcb);
    909  1.1     skrll 
    910  1.1     skrll 		memset(&cmd, 0, sizeof cmd);
    911  1.1     skrll 		cmd.target = target;
    912  1.1     skrll 		cmd.context = context;
    913  1.1     skrll 
    914  1.1     skrll 		aprint_normal_dev(sc->dev, "Abort for target %u context %llx\n",
    915  1.1     skrll 		    target, (unsigned long long)context);
    916  1.1     skrll 
    917  1.1     skrll 		pvscsi_write_cmd(sc, PVSCSI_CMD_ABORT_CMD, &cmd, sizeof(cmd));
    918  1.1     skrll 		pvscsi_process_cmp_ring(sc);
    919  1.1     skrll 
    920  1.1     skrll 		DEBUG_PRINTF(2, sc->dev, "abort done\n");
    921  1.1     skrll 	} else {
    922  1.1     skrll 		DEBUG_PRINTF(1, sc->dev,
    923  1.1     skrll 		    "Target %u hcb %p not found for abort\n", target, hcb);
    924  1.1     skrll 	}
    925  1.1     skrll }
    926  1.1     skrll 
    927  1.1     skrll static int
    928  1.1     skrll pvscsi_probe(device_t dev, cfdata_t cf, void *aux)
    929  1.1     skrll {
    930  1.1     skrll 	const struct pci_attach_args *pa = aux;
    931  1.1     skrll 
    932  1.1     skrll 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_VMWARE &&
    933  1.1     skrll 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VMWARE_PVSCSI) {
    934  1.1     skrll 		return 1;
    935  1.1     skrll 	}
    936  1.1     skrll 	return 0;
    937  1.1     skrll }
    938  1.1     skrll 
    939  1.1     skrll static void
    940  1.1     skrll pvscsi_timeout(void *arg)
    941  1.1     skrll {
    942  1.1     skrll 	struct pvscsi_hcb *hcb = arg;
    943  1.1     skrll 	struct scsipi_xfer *xs = hcb->xs;
    944  1.1     skrll 
    945  1.1     skrll 	if (xs == NULL) {
    946  1.1     skrll 		/* Already completed */
    947  1.1     skrll 		return;
    948  1.1     skrll 	}
    949  1.1     skrll 
    950  1.1     skrll 	struct pvscsi_softc *sc = hcb->sc;
    951  1.1     skrll 
    952  1.1     skrll 	mutex_enter(&sc->lock);
    953  1.1     skrll 
    954  1.1     skrll 	scsipi_printaddr(xs->xs_periph);
    955  1.1     skrll 	printf("command timeout, CDB: ");
    956  1.1     skrll 	scsipi_print_cdb(xs->cmd);
    957  1.1     skrll 	printf("\n");
    958  1.1     skrll 
    959  1.1     skrll 	switch (hcb->recovery) {
    960  1.1     skrll 	case PVSCSI_HCB_NONE:
    961  1.1     skrll 		hcb->recovery = PVSCSI_HCB_ABORT;
    962  1.1     skrll 		pvscsi_abort(sc, hcb->e->target, hcb);
    963  1.1     skrll 		callout_reset(&xs->xs_callout,
    964  1.1     skrll 		    mstohz(PVSCSI_ABORT_TIMEOUT * 1000),
    965  1.1     skrll 		    pvscsi_timeout, hcb);
    966  1.1     skrll 		break;
    967  1.1     skrll 	case PVSCSI_HCB_ABORT:
    968  1.1     skrll 		hcb->recovery = PVSCSI_HCB_DEVICE_RESET;
    969  1.1     skrll 		pvscsi_device_reset(sc, hcb->e->target);
    970  1.1     skrll 		callout_reset(&xs->xs_callout,
    971  1.1     skrll 		    mstohz(PVSCSI_RESET_TIMEOUT * 1000),
    972  1.1     skrll 		    pvscsi_timeout, hcb);
    973  1.1     skrll 		break;
    974  1.1     skrll 	case PVSCSI_HCB_DEVICE_RESET:
    975  1.1     skrll 		hcb->recovery = PVSCSI_HCB_BUS_RESET;
    976  1.1     skrll 		pvscsi_bus_reset(sc);
    977  1.1     skrll 		callout_reset(&xs->xs_callout,
    978  1.1     skrll 		    mstohz(PVSCSI_RESET_TIMEOUT * 1000),
    979  1.1     skrll 		    pvscsi_timeout, hcb);
    980  1.1     skrll 		break;
    981  1.1     skrll 	case PVSCSI_HCB_BUS_RESET:
    982  1.1     skrll 		pvscsi_adapter_reset(sc);
    983  1.1     skrll 		break;
    984  1.1     skrll 	};
    985  1.1     skrll 	mutex_exit(&sc->lock);
    986  1.1     skrll }
    987  1.1     skrll 
    988  1.1     skrll static void
    989  1.1     skrll pvscsi_process_completion(struct pvscsi_softc *sc,
    990  1.1     skrll     struct pvscsi_ring_cmp_desc *e)
    991  1.1     skrll {
    992  1.1     skrll 	struct pvscsi_hcb *hcb;
    993  1.1     skrll 	struct scsipi_xfer *xs;
    994  1.1     skrll 	uint32_t error = XS_NOERROR;
    995  1.1     skrll 	uint32_t btstat;
    996  1.1     skrll 	uint32_t sdstat;
    997  1.1     skrll 	int op;
    998  1.1     skrll 
    999  1.1     skrll 	hcb = pvscsi_context_to_hcb(sc, e->context);
   1000  1.1     skrll 	xs = hcb->xs;
   1001  1.1     skrll 
   1002  1.1     skrll 	callout_stop(&xs->xs_callout);
   1003  1.1     skrll 
   1004  1.1     skrll 	btstat = e->host_status;
   1005  1.1     skrll 	sdstat = e->scsi_status;
   1006  1.1     skrll 
   1007  1.1     skrll 	xs->status = sdstat;
   1008  1.1     skrll 	xs->resid = xs->datalen - e->data_len;
   1009  1.1     skrll 
   1010  1.1     skrll 	DEBUG_PRINTF(3, sc->dev,
   1011  1.1     skrll 	    "command context %llx btstat %d (%#x) sdstat %d (%#x)\n",
   1012  1.1     skrll 	    (unsigned long long)e->context, btstat, btstat, sdstat, sdstat);
   1013  1.1     skrll 
   1014  1.1     skrll 	if ((xs->xs_control & XS_CTL_DATA_IN) == XS_CTL_DATA_IN) {
   1015  1.1     skrll 		op = BUS_DMASYNC_POSTREAD;
   1016  1.1     skrll 	} else {
   1017  1.1     skrll 		op = BUS_DMASYNC_POSTWRITE;
   1018  1.1     skrll 	}
   1019  1.1     skrll 	bus_dmamap_sync(sc->sc_dmat, sc->sense_buffer_dma.map,
   1020  1.1     skrll 	    hcb->dma_map_offset, hcb->dma_map_size, op);
   1021  1.1     skrll 
   1022  1.1     skrll 	if (btstat == BTSTAT_SUCCESS && sdstat == SCSI_OK) {
   1023  1.1     skrll 		DEBUG_PRINTF(3, sc->dev,
   1024  1.1     skrll 		    "completing command context %llx success\n",
   1025  1.1     skrll 		    (unsigned long long)e->context);
   1026  1.1     skrll 		xs->resid = 0;
   1027  1.1     skrll 	} else {
   1028  1.1     skrll 		switch (btstat) {
   1029  1.1     skrll 		case BTSTAT_SUCCESS:
   1030  1.1     skrll 		case BTSTAT_LINKED_COMMAND_COMPLETED:
   1031  1.1     skrll 		case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG:
   1032  1.1     skrll 			switch (sdstat) {
   1033  1.1     skrll 			case SCSI_OK:
   1034  1.1     skrll 				xs->resid = 0;
   1035  1.1     skrll 				error = XS_NOERROR;
   1036  1.1     skrll 				break;
   1037  1.1     skrll 			case SCSI_CHECK:
   1038  1.1     skrll 				error = XS_SENSE;
   1039  1.1     skrll 				xs->resid = 0;
   1040  1.1     skrll 
   1041  1.1     skrll 				memset(&xs->sense, 0, sizeof(xs->sense));
   1042  1.1     skrll 				memcpy(&xs->sense, hcb->sense_buffer,
   1043  1.1     skrll 				    MIN(sizeof(xs->sense), e->sense_len));
   1044  1.1     skrll 				break;
   1045  1.1     skrll 			case SCSI_BUSY:
   1046  1.1     skrll 			case SCSI_QUEUE_FULL:
   1047  1.1     skrll 				error = XS_NOERROR;
   1048  1.1     skrll 				break;
   1049  1.1     skrll 			case SCSI_TERMINATED:
   1050  1.1     skrll // 			case SCSI_STATUS_TASK_ABORTED:
   1051  1.1     skrll 				DEBUG_PRINTF(1, sc->dev,
   1052  1.1     skrll 				    "xs: %p sdstat=0x%x\n", xs, sdstat);
   1053  1.1     skrll 				error = XS_DRIVER_STUFFUP;
   1054  1.1     skrll 				break;
   1055  1.1     skrll 			default:
   1056  1.1     skrll 				DEBUG_PRINTF(1, sc->dev,
   1057  1.1     skrll 				    "xs: %p sdstat=0x%x\n", xs, sdstat);
   1058  1.1     skrll 				error = XS_DRIVER_STUFFUP;
   1059  1.1     skrll 				break;
   1060  1.1     skrll 			}
   1061  1.1     skrll 			break;
   1062  1.1     skrll 		case BTSTAT_SELTIMEO:
   1063  1.1     skrll 			error = XS_SELTIMEOUT;
   1064  1.1     skrll 			break;
   1065  1.1     skrll 		case BTSTAT_DATARUN:
   1066  1.1     skrll 		case BTSTAT_DATA_UNDERRUN:
   1067  1.1     skrll //			xs->resid = xs->datalen - c->data_len;
   1068  1.1     skrll 			error = XS_NOERROR;
   1069  1.1     skrll 			break;
   1070  1.1     skrll 		case BTSTAT_ABORTQUEUE:
   1071  1.1     skrll 		case BTSTAT_HATIMEOUT:
   1072  1.1     skrll 			error = XS_NOERROR;
   1073  1.1     skrll 			break;
   1074  1.1     skrll 		case BTSTAT_NORESPONSE:
   1075  1.1     skrll 		case BTSTAT_SENTRST:
   1076  1.1     skrll 		case BTSTAT_RECVRST:
   1077  1.1     skrll 		case BTSTAT_BUSRESET:
   1078  1.1     skrll 			error = XS_RESET;
   1079  1.1     skrll 			break;
   1080  1.1     skrll 		case BTSTAT_SCSIPARITY:
   1081  1.1     skrll 			error = XS_DRIVER_STUFFUP;
   1082  1.1     skrll 			DEBUG_PRINTF(1, sc->dev,
   1083  1.1     skrll 			    "xs: %p sdstat=0x%x\n", xs, sdstat);
   1084  1.1     skrll 			break;
   1085  1.1     skrll 		case BTSTAT_BUSFREE:
   1086  1.1     skrll 			error = XS_DRIVER_STUFFUP;
   1087  1.1     skrll 			DEBUG_PRINTF(1, sc->dev,
   1088  1.1     skrll 			    "xs: %p sdstat=0x%x\n", xs, sdstat);
   1089  1.1     skrll 			break;
   1090  1.1     skrll 		case BTSTAT_INVPHASE:
   1091  1.1     skrll 			error = XS_DRIVER_STUFFUP;
   1092  1.1     skrll 			DEBUG_PRINTF(1, sc->dev,
   1093  1.1     skrll 			    "xs: %p sdstat=0x%x\n", xs, sdstat);
   1094  1.1     skrll 			break;
   1095  1.1     skrll 		case BTSTAT_SENSFAILED:
   1096  1.1     skrll 			error = XS_DRIVER_STUFFUP;
   1097  1.1     skrll 			DEBUG_PRINTF(1, sc->dev,
   1098  1.1     skrll 			    "xs: %p sdstat=0x%x\n", xs, sdstat);
   1099  1.1     skrll 			break;
   1100  1.1     skrll 		case BTSTAT_LUNMISMATCH:
   1101  1.1     skrll 		case BTSTAT_TAGREJECT:
   1102  1.1     skrll 		case BTSTAT_DISCONNECT:
   1103  1.1     skrll 		case BTSTAT_BADMSG:
   1104  1.1     skrll 		case BTSTAT_INVPARAM:
   1105  1.1     skrll 			error = XS_DRIVER_STUFFUP;
   1106  1.1     skrll 			DEBUG_PRINTF(1, sc->dev,
   1107  1.1     skrll 			    "xs: %p sdstat=0x%x\n", xs, sdstat);
   1108  1.1     skrll 			break;
   1109  1.1     skrll 		case BTSTAT_HASOFTWARE:
   1110  1.1     skrll 		case BTSTAT_HAHARDWARE:
   1111  1.1     skrll 			error = XS_DRIVER_STUFFUP;
   1112  1.1     skrll 			DEBUG_PRINTF(1, sc->dev,
   1113  1.1     skrll 			    "xs: %p sdstat=0x%x\n", xs, sdstat);
   1114  1.1     skrll 			break;
   1115  1.1     skrll 		default:
   1116  1.1     skrll 			aprint_normal_dev(sc->dev, "unknown hba status: 0x%x\n",
   1117  1.1     skrll 			    btstat);
   1118  1.1     skrll 			error = XS_DRIVER_STUFFUP;
   1119  1.1     skrll 			break;
   1120  1.1     skrll 		}
   1121  1.1     skrll 
   1122  1.1     skrll 		DEBUG_PRINTF(3, sc->dev,
   1123  1.1     skrll 		    "completing command context %llx btstat %x sdstat %x - error %x\n",
   1124  1.1     skrll 		    (unsigned long long)e->context, btstat, sdstat, error);
   1125  1.1     skrll 	}
   1126  1.1     skrll 
   1127  1.1     skrll 	xs->error = error;
   1128  1.1     skrll 	pvscsi_hcb_put(sc, hcb);
   1129  1.1     skrll 
   1130  1.1     skrll 	mutex_exit(&sc->lock);
   1131  1.1     skrll 
   1132  1.1     skrll 	scsipi_done(xs);
   1133  1.1     skrll 
   1134  1.1     skrll 	mutex_enter(&sc->lock);
   1135  1.1     skrll }
   1136  1.1     skrll 
   1137  1.1     skrll static void
   1138  1.1     skrll pvscsi_process_cmp_ring(struct pvscsi_softc *sc)
   1139  1.1     skrll {
   1140  1.4  riastrad 	struct pvscsi_dma *ring_dma;
   1141  1.1     skrll 	struct pvscsi_ring_cmp_desc *ring;
   1142  1.4  riastrad 	struct pvscsi_dma *s_dma;
   1143  1.1     skrll 	struct pvscsi_rings_state *s;
   1144  1.1     skrll 	struct pvscsi_ring_cmp_desc *e;
   1145  1.1     skrll 	uint32_t mask;
   1146  1.1     skrll 
   1147  1.1     skrll 	KASSERT(mutex_owned(&sc->lock));
   1148  1.1     skrll 
   1149  1.4  riastrad 	s_dma = &sc->rings_state_dma;
   1150  1.1     skrll 	s = sc->rings_state;
   1151  1.4  riastrad 	ring_dma = &sc->cmp_ring_dma;
   1152  1.1     skrll 	ring = sc->cmp_ring;
   1153  1.1     skrll 	mask = MASK(s->cmp_num_entries_log2);
   1154  1.1     skrll 
   1155  1.4  riastrad 	for (;;) {
   1156  1.4  riastrad 		PVSCSI_DMA_SYNC_STATE(sc, s_dma, s, cmp_prod_idx,
   1157  1.4  riastrad 		    BUS_DMASYNC_POSTREAD);
   1158  1.1     skrll 		size_t crpidx = s->cmp_prod_idx;
   1159  1.4  riastrad 		PVSCSI_DMA_SYNC_STATE(sc, s_dma, s, cmp_prod_idx,
   1160  1.4  riastrad 		    BUS_DMASYNC_PREREAD);
   1161  1.1     skrll 
   1162  1.1     skrll 		if (s->cmp_cons_idx == crpidx)
   1163  1.1     skrll 			break;
   1164  1.1     skrll 
   1165  1.1     skrll 		size_t crcidx = s->cmp_cons_idx & mask;
   1166  1.1     skrll 
   1167  1.4  riastrad 		PVSCSI_DMA_SYNC_RING(sc, ring_dma, ring, crcidx,
   1168  1.4  riastrad 		    BUS_DMASYNC_POSTREAD);
   1169  1.4  riastrad 
   1170  1.1     skrll 		e = ring + crcidx;
   1171  1.1     skrll 
   1172  1.1     skrll 		pvscsi_process_completion(sc, e);
   1173  1.1     skrll 
   1174  1.1     skrll 		/*
   1175  1.1     skrll 		 * ensure completion processing reads happen before write to
   1176  1.1     skrll 		 * (increment of) cmp_cons_idx
   1177  1.1     skrll 		 */
   1178  1.4  riastrad 		PVSCSI_DMA_SYNC_RING(sc, ring_dma, ring, crcidx,
   1179  1.4  riastrad 		    BUS_DMASYNC_PREREAD);
   1180  1.4  riastrad 
   1181  1.4  riastrad 		/*
   1182  1.4  riastrad 		 * XXX Not actually sure the `device' does DMA for
   1183  1.4  riastrad 		 * s->cmp_cons_idx at all -- qemu doesn't.  If not, we
   1184  1.4  riastrad 		 * can skip these DMA syncs.
   1185  1.4  riastrad 		 */
   1186  1.4  riastrad 		PVSCSI_DMA_SYNC_STATE(sc, s_dma, s, cmp_cons_idx,
   1187  1.4  riastrad 		    BUS_DMASYNC_POSTWRITE);
   1188  1.1     skrll 		s->cmp_cons_idx++;
   1189  1.4  riastrad 		PVSCSI_DMA_SYNC_STATE(sc, s_dma, s, cmp_cons_idx,
   1190  1.4  riastrad 		    BUS_DMASYNC_PREWRITE);
   1191  1.1     skrll 	}
   1192  1.1     skrll }
   1193  1.1     skrll 
   1194  1.1     skrll static void
   1195  1.1     skrll pvscsi_process_msg(struct pvscsi_softc *sc, struct pvscsi_ring_msg_desc *e)
   1196  1.1     skrll {
   1197  1.1     skrll 	struct pvscsi_ring_msg_dev_status_changed *desc;
   1198  1.1     skrll 
   1199  1.1     skrll 	switch (e->type) {
   1200  1.1     skrll 	case PVSCSI_MSG_DEV_ADDED:
   1201  1.1     skrll 	case PVSCSI_MSG_DEV_REMOVED: {
   1202  1.1     skrll 		desc = (struct pvscsi_ring_msg_dev_status_changed *)e;
   1203  1.1     skrll 		struct scsibus_softc *ssc = device_private(sc->sc_scsibus_dv);
   1204  1.1     skrll 
   1205  1.1     skrll 		aprint_normal_dev(sc->dev, "MSG: device %s at scsi%u:%u:%u\n",
   1206  1.1     skrll 		    desc->type == PVSCSI_MSG_DEV_ADDED ? "addition" : "removal",
   1207  1.1     skrll 		    desc->bus, desc->target, desc->lun[1]);
   1208  1.1     skrll 
   1209  1.1     skrll 		if (desc->type == PVSCSI_MSG_DEV_ADDED) {
   1210  1.1     skrll 			if (scsi_probe_bus(ssc,
   1211  1.1     skrll 			    desc->target, desc->lun[1]) != 0) {
   1212  1.1     skrll 				aprint_normal_dev(sc->dev,
   1213  1.1     skrll 				    "Error creating path for dev change.\n");
   1214  1.1     skrll 				break;
   1215  1.1     skrll 			}
   1216  1.1     skrll 		} else {
   1217  1.1     skrll 			if (scsipi_target_detach(ssc->sc_channel,
   1218  1.1     skrll 			    desc->target, desc->lun[1],
   1219  1.1     skrll 			    DETACH_FORCE) != 0) {
   1220  1.1     skrll 				aprint_normal_dev(sc->dev,
   1221  1.1     skrll 				    "Error detaching target %d lun %d\n",
   1222  1.1     skrll 				    desc->target, desc->lun[1]);
   1223  1.1     skrll 			};
   1224  1.1     skrll 
   1225  1.1     skrll 		}
   1226  1.1     skrll 	} break;
   1227  1.1     skrll 	default:
   1228  1.1     skrll 		aprint_normal_dev(sc->dev, "Unknown msg type 0x%x\n", e->type);
   1229  1.1     skrll 	};
   1230  1.1     skrll }
   1231  1.1     skrll 
   1232  1.1     skrll static void
   1233  1.1     skrll pvscsi_process_msg_ring(struct pvscsi_softc *sc)
   1234  1.1     skrll {
   1235  1.4  riastrad 	struct pvscsi_dma *ring_dma;
   1236  1.1     skrll 	struct pvscsi_ring_msg_desc *ring;
   1237  1.4  riastrad 	struct pvscsi_dma *s_dma;
   1238  1.1     skrll 	struct pvscsi_rings_state *s;
   1239  1.1     skrll 	struct pvscsi_ring_msg_desc *e;
   1240  1.1     skrll 	uint32_t mask;
   1241  1.1     skrll 
   1242  1.1     skrll 	KASSERT(mutex_owned(&sc->lock));
   1243  1.1     skrll 
   1244  1.4  riastrad 	s_dma = &sc->rings_state_dma;
   1245  1.1     skrll 	s = sc->rings_state;
   1246  1.4  riastrad 	ring_dma = &sc->msg_ring_dma;
   1247  1.1     skrll 	ring = sc->msg_ring;
   1248  1.1     skrll 	mask = MASK(s->msg_num_entries_log2);
   1249  1.1     skrll 
   1250  1.4  riastrad 	for (;;) {
   1251  1.4  riastrad 		PVSCSI_DMA_SYNC_STATE(sc, s_dma, s, msg_prod_idx,
   1252  1.4  riastrad 		    BUS_DMASYNC_POSTREAD);
   1253  1.1     skrll 		size_t mpidx = s->msg_prod_idx;	// dma read (device -> cpu)
   1254  1.4  riastrad 		PVSCSI_DMA_SYNC_STATE(sc, s_dma, s, msg_prod_idx,
   1255  1.4  riastrad 		    BUS_DMASYNC_PREREAD);
   1256  1.1     skrll 
   1257  1.1     skrll 		if (s->msg_cons_idx == mpidx)
   1258  1.1     skrll 			break;
   1259  1.1     skrll 
   1260  1.1     skrll 		size_t mcidx = s->msg_cons_idx & mask;
   1261  1.1     skrll 
   1262  1.4  riastrad 		PVSCSI_DMA_SYNC_RING(sc, ring_dma, ring, mcidx,
   1263  1.4  riastrad 		    BUS_DMASYNC_POSTREAD);
   1264  1.4  riastrad 
   1265  1.1     skrll 		e = ring + mcidx;
   1266  1.1     skrll 
   1267  1.1     skrll 		pvscsi_process_msg(sc, e);
   1268  1.1     skrll 
   1269  1.1     skrll 		/*
   1270  1.1     skrll 		 * ensure message processing reads happen before write to
   1271  1.1     skrll 		 * (increment of) msg_cons_idx
   1272  1.1     skrll 		 */
   1273  1.4  riastrad 		PVSCSI_DMA_SYNC_RING(sc, ring_dma, ring, mcidx,
   1274  1.4  riastrad 		    BUS_DMASYNC_PREREAD);
   1275  1.4  riastrad 
   1276  1.4  riastrad 		PVSCSI_DMA_SYNC_STATE(sc, s_dma, s, msg_cons_idx,
   1277  1.4  riastrad 		    BUS_DMASYNC_POSTWRITE);
   1278  1.1     skrll 		s->msg_cons_idx++;
   1279  1.4  riastrad 		PVSCSI_DMA_SYNC_STATE(sc, s_dma, s, msg_cons_idx,
   1280  1.4  riastrad 		    BUS_DMASYNC_PREWRITE);
   1281  1.1     skrll 	}
   1282  1.1     skrll }
   1283  1.1     skrll 
   1284  1.1     skrll static void
   1285  1.1     skrll pvscsi_intr_locked(struct pvscsi_softc *sc)
   1286  1.1     skrll {
   1287  1.1     skrll 	uint32_t val;
   1288  1.1     skrll 
   1289  1.1     skrll 	KASSERT(mutex_owned(&sc->lock));
   1290  1.1     skrll 
   1291  1.1     skrll 	val = pvscsi_read_intr_status(sc);
   1292  1.1     skrll 
   1293  1.1     skrll 	if ((val & PVSCSI_INTR_ALL_SUPPORTED) != 0) {
   1294  1.1     skrll 		pvscsi_write_intr_status(sc, val & PVSCSI_INTR_ALL_SUPPORTED);
   1295  1.1     skrll 		pvscsi_process_cmp_ring(sc);
   1296  1.1     skrll 		if (sc->use_msg) {
   1297  1.1     skrll 			pvscsi_process_msg_ring(sc);
   1298  1.1     skrll 		}
   1299  1.1     skrll 	}
   1300  1.1     skrll }
   1301  1.1     skrll 
   1302  1.1     skrll static int
   1303  1.1     skrll pvscsi_intr(void *xsc)
   1304  1.1     skrll {
   1305  1.1     skrll 	struct pvscsi_softc *sc;
   1306  1.1     skrll 
   1307  1.1     skrll 	sc = xsc;
   1308  1.1     skrll 
   1309  1.1     skrll 	mutex_enter(&sc->lock);
   1310  1.1     skrll 	pvscsi_intr_locked(xsc);
   1311  1.1     skrll 	mutex_exit(&sc->lock);
   1312  1.1     skrll 
   1313  1.1     skrll 	return 1;
   1314  1.1     skrll }
   1315  1.1     skrll 
   1316  1.1     skrll static void
   1317  1.1     skrll pvscsi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t
   1318  1.1     skrll     request, void *arg)
   1319  1.1     skrll {
   1320  1.1     skrll 	struct pvscsi_softc *sc = device_private(chan->chan_adapter->adapt_dev);
   1321  1.1     skrll 
   1322  1.1     skrll 	if (request == ADAPTER_REQ_SET_XFER_MODE) {
   1323  1.1     skrll 		struct scsipi_xfer_mode *xm = arg;
   1324  1.1     skrll 
   1325  1.1     skrll 		xm->xm_mode = PERIPH_CAP_TQING;
   1326  1.1     skrll 		xm->xm_period = 0;
   1327  1.1     skrll 		xm->xm_offset = 0;
   1328  1.1     skrll 		scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
   1329  1.1     skrll 		return;
   1330  1.1     skrll 	} else if (request != ADAPTER_REQ_RUN_XFER) {
   1331  1.1     skrll 		DEBUG_PRINTF(1, sc->dev, "unhandled %d\n", request);
   1332  1.1     skrll 		return;
   1333  1.1     skrll 	}
   1334  1.1     skrll 
   1335  1.1     skrll 	/* request is ADAPTER_REQ_RUN_XFER */
   1336  1.1     skrll 	struct scsipi_xfer *xs = arg;
   1337  1.1     skrll 	struct scsipi_periph *periph = xs->xs_periph;
   1338  1.1     skrll #ifdef SCSIPI_DEBUG
   1339  1.1     skrll 	periph->periph_dbflags |= SCSIPI_DEBUG_FLAGS;
   1340  1.1     skrll #endif
   1341  1.1     skrll 
   1342  1.1     skrll 	uint32_t req_num_entries_log2;
   1343  1.4  riastrad 	struct pvscsi_dma *ring_dma;
   1344  1.1     skrll 	struct pvscsi_ring_req_desc *ring;
   1345  1.1     skrll 	struct pvscsi_ring_req_desc *e;
   1346  1.4  riastrad 	struct pvscsi_dma *s_dma;
   1347  1.1     skrll 	struct pvscsi_rings_state *s;
   1348  1.1     skrll 	struct pvscsi_hcb *hcb;
   1349  1.1     skrll 
   1350  1.1     skrll 	if (xs->cmdlen < 0 || xs->cmdlen > sizeof(e->cdb)) {
   1351  1.1     skrll 		DEBUG_PRINTF(1, sc->dev, "bad cmdlen %zu > %zu\n",
   1352  1.1     skrll 		    (size_t)xs->cmdlen, sizeof(e->cdb));
   1353  1.1     skrll 		/* not a temporary condition */
   1354  1.1     skrll 		xs->error = XS_DRIVER_STUFFUP;
   1355  1.1     skrll 		scsipi_done(xs);
   1356  1.1     skrll 		return;
   1357  1.1     skrll 	}
   1358  1.1     skrll 
   1359  1.4  riastrad 	ring_dma = &sc->req_ring_dma;
   1360  1.1     skrll 	ring = sc->req_ring;
   1361  1.4  riastrad 	s_dma = &sc->rings_state_dma;
   1362  1.1     skrll 	s = sc->rings_state;
   1363  1.1     skrll 
   1364  1.1     skrll 	hcb = NULL;
   1365  1.1     skrll 	req_num_entries_log2 = s->req_num_entries_log2;
   1366  1.1     skrll 
   1367  1.1     skrll 	/* Protect against multiple senders */
   1368  1.1     skrll 	mutex_enter(&sc->lock);
   1369  1.1     skrll 
   1370  1.1     skrll 	if (s->req_prod_idx - s->cmp_cons_idx >=
   1371  1.1     skrll 	    (1 << req_num_entries_log2)) {
   1372  1.1     skrll 		aprint_normal_dev(sc->dev,
   1373  1.1     skrll 		    "Not enough room on completion ring.\n");
   1374  1.1     skrll 		xs->error = XS_RESOURCE_SHORTAGE;
   1375  1.1     skrll 		goto finish_xs;
   1376  1.1     skrll 	}
   1377  1.1     skrll 
   1378  1.1     skrll 	if (xs->cmdlen > sizeof(e->cdb)) {
   1379  1.1     skrll 		DEBUG_PRINTF(1, sc->dev, "cdb length %u too large\n",
   1380  1.1     skrll 		    xs->cmdlen);
   1381  1.1     skrll 		xs->error = XS_DRIVER_STUFFUP;
   1382  1.1     skrll 		goto finish_xs;
   1383  1.1     skrll 	}
   1384  1.1     skrll 
   1385  1.1     skrll 	hcb = pvscsi_hcb_get(sc);
   1386  1.1     skrll 	if (hcb == NULL) {
   1387  1.1     skrll 		aprint_normal_dev(sc->dev, "No free hcbs.\n");
   1388  1.1     skrll 		xs->error = XS_RESOURCE_SHORTAGE;
   1389  1.1     skrll 		goto finish_xs;
   1390  1.1     skrll 	}
   1391  1.1     skrll 
   1392  1.1     skrll 	hcb->xs = xs;
   1393  1.1     skrll 
   1394  1.1     skrll 	const size_t rridx = s->req_prod_idx & MASK(req_num_entries_log2);
   1395  1.4  riastrad 	PVSCSI_DMA_SYNC_RING(sc, ring_dma, ring, rridx, BUS_DMASYNC_POSTWRITE);
   1396  1.1     skrll 	e = ring + rridx;
   1397  1.1     skrll 
   1398  1.1     skrll 	memset(e, 0, sizeof(*e));
   1399  1.1     skrll 	e->bus = 0;
   1400  1.1     skrll 	e->target = periph->periph_target;
   1401  1.1     skrll 	e->lun[1] = periph->periph_lun;
   1402  1.1     skrll 	e->data_addr = 0;
   1403  1.1     skrll 	e->data_len = xs->datalen;
   1404  1.1     skrll 	e->vcpu_hint = cpu_index(curcpu());
   1405  1.1     skrll 	e->flags = 0;
   1406  1.1     skrll 
   1407  1.1     skrll 	e->cdb_len = xs->cmdlen;
   1408  1.1     skrll 	memcpy(e->cdb, xs->cmd, xs->cmdlen);
   1409  1.1     skrll 
   1410  1.1     skrll 	e->sense_addr = 0;
   1411  1.1     skrll 	e->sense_len = sizeof(xs->sense);
   1412  1.1     skrll 	if (e->sense_len > 0) {
   1413  1.1     skrll 		e->sense_addr = hcb->sense_buffer_paddr;
   1414  1.1     skrll 	}
   1415  1.1     skrll 	//e->tag = xs->xs_tag_type;
   1416  1.1     skrll 	e->tag = MSG_SIMPLE_Q_TAG;
   1417  1.1     skrll 
   1418  1.1     skrll 	switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
   1419  1.1     skrll 	case XS_CTL_DATA_IN:
   1420  1.1     skrll 		e->flags |= PVSCSI_FLAG_CMD_DIR_TOHOST;
   1421  1.1     skrll 		break;
   1422  1.1     skrll 	case XS_CTL_DATA_OUT:
   1423  1.1     skrll 		e->flags |= PVSCSI_FLAG_CMD_DIR_TODEVICE;
   1424  1.1     skrll 		break;
   1425  1.1     skrll 	default:
   1426  1.1     skrll 		e->flags |= PVSCSI_FLAG_CMD_DIR_NONE;
   1427  1.1     skrll 		break;
   1428  1.1     skrll 	}
   1429  1.1     skrll 
   1430  1.1     skrll 	e->context = pvscsi_hcb_to_context(sc, hcb);
   1431  1.1     skrll 	hcb->e = e;
   1432  1.1     skrll 
   1433  1.1     skrll 	DEBUG_PRINTF(3, sc->dev,
   1434  1.1     skrll 	    " queuing command %02x context %llx\n", e->cdb[0],
   1435  1.1     skrll 	    (unsigned long long)e->context);
   1436  1.1     skrll 
   1437  1.1     skrll 	int flags;
   1438  1.1     skrll 	flags  = (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE;
   1439  1.1     skrll 	flags |= (xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK;
   1440  1.1     skrll 
   1441  1.1     skrll 	int error = bus_dmamap_load(sc->sc_dmat, hcb->dma_map,
   1442  1.1     skrll 	    xs->data, xs->datalen, NULL, flags);
   1443  1.1     skrll 
   1444  1.1     skrll 	if (error) {
   1445  1.1     skrll 		if (error == ENOMEM || error == EAGAIN) {
   1446  1.1     skrll 			xs->error = XS_RESOURCE_SHORTAGE;
   1447  1.1     skrll 		} else {
   1448  1.1     skrll 			xs->error = XS_DRIVER_STUFFUP;
   1449  1.1     skrll 		}
   1450  1.1     skrll 		DEBUG_PRINTF(1, sc->dev,
   1451  1.1     skrll 		    "xs: %p load error %d data %p len %d",
   1452  1.1     skrll                     xs, error, xs->data, xs->datalen);
   1453  1.1     skrll 		goto error_load;
   1454  1.1     skrll 	}
   1455  1.1     skrll 
   1456  1.1     skrll 	int op = (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
   1457  1.1     skrll 	    BUS_DMASYNC_PREWRITE;
   1458  1.1     skrll 	int nseg = hcb->dma_map->dm_nsegs;
   1459  1.1     skrll 	bus_dma_segment_t *segs = hcb->dma_map->dm_segs;
   1460  1.1     skrll 	if (nseg != 0) {
   1461  1.1     skrll 		if (nseg > 1) {
   1462  1.1     skrll 			struct pvscsi_sg_element *sge;
   1463  1.1     skrll 
   1464  1.1     skrll 			KASSERTMSG(nseg <= PVSCSI_MAX_SG_ENTRIES_PER_SEGMENT,
   1465  1.1     skrll 			    "too many sg segments");
   1466  1.1     skrll 
   1467  1.1     skrll 			sge = hcb->sg_list->sge;
   1468  1.1     skrll 			e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
   1469  1.1     skrll 
   1470  1.1     skrll 			for (size_t i = 0; i < nseg; ++i) {
   1471  1.1     skrll 				sge[i].addr = segs[i].ds_addr;
   1472  1.1     skrll 				sge[i].length = segs[i].ds_len;
   1473  1.1     skrll 				sge[i].flags = 0;
   1474  1.1     skrll 			}
   1475  1.1     skrll 
   1476  1.1     skrll 			e->data_addr = hcb->sg_list_paddr;
   1477  1.1     skrll 
   1478  1.1     skrll 			bus_dmamap_sync(sc->sc_dmat,
   1479  1.1     skrll 			    sc->sg_list_dma.map, hcb->sg_list_offset,
   1480  1.1     skrll 			    sizeof(*sge) * nseg, BUS_DMASYNC_PREWRITE);
   1481  1.1     skrll 		} else {
   1482  1.1     skrll 			e->data_addr = segs->ds_addr;
   1483  1.1     skrll 		}
   1484  1.1     skrll 
   1485  1.1     skrll 		bus_dmamap_sync(sc->sc_dmat, hcb->dma_map, 0,
   1486  1.1     skrll 		    xs->datalen, op);
   1487  1.1     skrll 	} else {
   1488  1.1     skrll 		e->data_addr = 0;
   1489  1.1     skrll 	}
   1490  1.1     skrll 
   1491  1.1     skrll 	/*
   1492  1.1     skrll 	 * Ensure request record writes happen before write to (increment of)
   1493  1.1     skrll 	 * req_prod_idx.
   1494  1.1     skrll 	 */
   1495  1.4  riastrad 	PVSCSI_DMA_SYNC_RING(sc, ring_dma, ring, rridx, BUS_DMASYNC_PREWRITE);
   1496  1.1     skrll 
   1497  1.1     skrll 	uint8_t cdb0 = e->cdb[0];
   1498  1.1     skrll 
   1499  1.1     skrll 	/* handle timeout */
   1500  1.1     skrll 	if ((xs->xs_control & XS_CTL_POLL) == 0) {
   1501  1.1     skrll 		int timeout = mstohz(xs->timeout);
   1502  1.1     skrll 		/* start expire timer */
   1503  1.1     skrll 		if (timeout == 0)
   1504  1.1     skrll 			timeout = 1;
   1505  1.1     skrll 		callout_reset(&xs->xs_callout, timeout, pvscsi_timeout, hcb);
   1506  1.1     skrll 	}
   1507  1.1     skrll 
   1508  1.4  riastrad 	PVSCSI_DMA_SYNC_STATE(sc, s_dma, s, req_prod_idx,
   1509  1.4  riastrad 	    BUS_DMASYNC_POSTWRITE);
   1510  1.1     skrll 	s->req_prod_idx++;
   1511  1.1     skrll 
   1512  1.1     skrll 	/*
   1513  1.1     skrll 	 * Ensure req_prod_idx write (increment) happens before
   1514  1.1     skrll 	 * IO is kicked (via a write).
   1515  1.2     skrll 	 */
   1516  1.4  riastrad 	PVSCSI_DMA_SYNC_STATE(sc, s_dma, s, req_prod_idx,
   1517  1.4  riastrad 	    BUS_DMASYNC_PREWRITE);
   1518  1.2     skrll 
   1519  1.1     skrll 	pvscsi_kick_io(sc, cdb0);
   1520  1.1     skrll 	mutex_exit(&sc->lock);
   1521  1.1     skrll 
   1522  1.1     skrll 	return;
   1523  1.1     skrll 
   1524  1.1     skrll error_load:
   1525  1.1     skrll 	pvscsi_hcb_put(sc, hcb);
   1526  1.1     skrll 
   1527  1.1     skrll finish_xs:
   1528  1.1     skrll 	mutex_exit(&sc->lock);
   1529  1.1     skrll 	scsipi_done(xs);
   1530  1.1     skrll }
   1531  1.1     skrll 
   1532  1.1     skrll static void
   1533  1.1     skrll pvscsi_free_interrupts(struct pvscsi_softc *sc)
   1534  1.1     skrll {
   1535  1.1     skrll 
   1536  1.1     skrll 	if (sc->sc_ih != NULL) {
   1537  1.1     skrll 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
   1538  1.1     skrll 		sc->sc_ih = NULL;
   1539  1.1     skrll 	}
   1540  1.1     skrll 	if (sc->sc_pihp != NULL) {
   1541  1.1     skrll 		pci_intr_release(sc->sc_pc, sc->sc_pihp, 1);
   1542  1.1     skrll 		sc->sc_pihp = NULL;
   1543  1.1     skrll 	}
   1544  1.1     skrll }
   1545  1.1     skrll 
   1546  1.1     skrll static int
   1547  1.1     skrll pvscsi_setup_interrupts(struct pvscsi_softc *sc, const struct pci_attach_args *pa)
   1548  1.1     skrll {
   1549  1.1     skrll 	int use_msix;
   1550  1.1     skrll 	int use_msi;
   1551  1.1     skrll 	int counts[PCI_INTR_TYPE_SIZE];
   1552  1.1     skrll 
   1553  1.1     skrll 	for (size_t i = 0; i < PCI_INTR_TYPE_SIZE; i++) {
   1554  1.1     skrll 		counts[i] = 1;
   1555  1.1     skrll 	}
   1556  1.1     skrll 
   1557  1.1     skrll 	use_msix = pvscsi_get_tunable(sc, "use_msix", pvscsi_use_msix);
   1558  1.1     skrll 	use_msi = pvscsi_get_tunable(sc, "use_msi", pvscsi_use_msi);
   1559  1.1     skrll 
   1560  1.1     skrll 	if (!use_msix) {
   1561  1.1     skrll 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1562  1.1     skrll 	}
   1563  1.1     skrll 	if (!use_msi) {
   1564  1.1     skrll 		counts[PCI_INTR_TYPE_MSI] = 0;
   1565  1.1     skrll 	}
   1566  1.1     skrll 
   1567  1.1     skrll 	/* Allocate and establish the interrupt. */
   1568  1.1     skrll 	if (pci_intr_alloc(pa, &sc->sc_pihp, counts, PCI_INTR_TYPE_MSIX)) {
   1569  1.1     skrll 		aprint_error_dev(sc->dev, "can't allocate handler\n");
   1570  1.1     skrll 		goto fail;
   1571  1.1     skrll 	}
   1572  1.1     skrll 
   1573  1.1     skrll 	char intrbuf[PCI_INTRSTR_LEN];
   1574  1.1     skrll 	const pci_chipset_tag_t pc = pa->pa_pc;
   1575  1.1     skrll 	char const *intrstr = pci_intr_string(pc, sc->sc_pihp[0], intrbuf,
   1576  1.1     skrll 	    sizeof(intrbuf));
   1577  1.1     skrll 
   1578  1.1     skrll 	sc->sc_ih = pci_intr_establish_xname(pc, sc->sc_pihp[0], IPL_BIO,
   1579  1.1     skrll 	    pvscsi_intr, sc, device_xname(sc->dev));
   1580  1.1     skrll 	if (sc->sc_ih == NULL) {
   1581  1.1     skrll 		pci_intr_release(pc, sc->sc_pihp, 1);
   1582  1.1     skrll 		sc->sc_pihp = NULL;
   1583  1.1     skrll 		aprint_error_dev(sc->dev, "couldn't establish interrupt");
   1584  1.1     skrll 		if (intrstr != NULL)
   1585  1.1     skrll 			aprint_error(" at %s", intrstr);
   1586  1.1     skrll 		aprint_error("\n");
   1587  1.1     skrll 		goto fail;
   1588  1.1     skrll 	}
   1589  1.1     skrll 	pci_intr_setattr(pc, sc->sc_pihp, PCI_INTR_MPSAFE, true);
   1590  1.1     skrll 
   1591  1.1     skrll 	aprint_normal_dev(sc->dev, "interrupting at %s\n", intrstr);
   1592  1.1     skrll 
   1593  1.1     skrll 	return (0);
   1594  1.1     skrll 
   1595  1.1     skrll fail:
   1596  1.1     skrll 	if (sc->sc_ih != NULL) {
   1597  1.1     skrll 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
   1598  1.1     skrll 		sc->sc_ih = NULL;
   1599  1.1     skrll 	}
   1600  1.1     skrll 	if (sc->sc_pihp != NULL) {
   1601  1.1     skrll 		pci_intr_release(sc->sc_pc, sc->sc_pihp, 1);
   1602  1.1     skrll 		sc->sc_pihp = NULL;
   1603  1.1     skrll 	}
   1604  1.1     skrll 	if (sc->sc_mems) {
   1605  1.1     skrll 		bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
   1606  1.1     skrll 		sc->sc_mems = 0;
   1607  1.1     skrll 	}
   1608  1.1     skrll 
   1609  1.1     skrll 	return 1;
   1610  1.1     skrll }
   1611  1.1     skrll 
   1612  1.1     skrll static void
   1613  1.1     skrll pvscsi_free_all(struct pvscsi_softc *sc)
   1614  1.1     skrll {
   1615  1.1     skrll 
   1616  1.1     skrll 	pvscsi_dma_free_per_hcb(sc, sc->hcb_cnt);
   1617  1.1     skrll 
   1618  1.1     skrll 	if (sc->hcbs) {
   1619  1.1     skrll 		kmem_free(sc->hcbs, sc->hcb_cnt * sizeof(*sc->hcbs));
   1620  1.1     skrll 	}
   1621  1.1     skrll 
   1622  1.1     skrll 	pvscsi_free_rings(sc);
   1623  1.1     skrll 
   1624  1.1     skrll 	pvscsi_free_interrupts(sc);
   1625  1.1     skrll 
   1626  1.1     skrll 	if (sc->sc_mems) {
   1627  1.1     skrll 		bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
   1628  1.1     skrll 		sc->sc_mems = 0;
   1629  1.1     skrll 	}
   1630  1.1     skrll }
   1631  1.1     skrll 
   1632  1.1     skrll static inline void
   1633  1.1     skrll pci_enable_busmaster(device_t dev, const pci_chipset_tag_t pc,
   1634  1.1     skrll     const pcitag_t tag)
   1635  1.1     skrll {
   1636  1.1     skrll 	pcireg_t pci_cmd_word;
   1637  1.1     skrll 
   1638  1.1     skrll 	pci_cmd_word = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
   1639  1.1     skrll 	if (!(pci_cmd_word & PCI_COMMAND_MASTER_ENABLE)) {
   1640  1.1     skrll 		pci_cmd_word |= PCI_COMMAND_MASTER_ENABLE;
   1641  1.1     skrll 		pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, pci_cmd_word);
   1642  1.1     skrll 	}
   1643  1.1     skrll }
   1644  1.1     skrll 
   1645  1.1     skrll static void
   1646  1.1     skrll pvscsi_attach(device_t parent, device_t dev, void *aux)
   1647  1.1     skrll {
   1648  1.1     skrll 	const struct pci_attach_args *pa = aux;
   1649  1.1     skrll 	struct pvscsi_softc *sc;
   1650  1.1     skrll 	int rid;
   1651  1.1     skrll 	int error;
   1652  1.1     skrll 	int max_queue_depth;
   1653  1.1     skrll 	int adapter_queue_size;
   1654  1.1     skrll 
   1655  1.1     skrll 	sc = device_private(dev);
   1656  1.1     skrll 	sc->dev = dev;
   1657  1.1     skrll 
   1658  1.1     skrll 	struct scsipi_adapter *adapt = &sc->sc_adapter;
   1659  1.1     skrll 	struct scsipi_channel *chan = &sc->sc_channel;
   1660  1.1     skrll 
   1661  1.1     skrll 	mutex_init(&sc->lock, MUTEX_DEFAULT, IPL_BIO);
   1662  1.1     skrll 
   1663  1.1     skrll 	sc->sc_pc = pa->pa_pc;
   1664  1.1     skrll 	pci_enable_busmaster(dev, pa->pa_pc, pa->pa_tag);
   1665  1.1     skrll 
   1666  1.1     skrll 	pci_aprint_devinfo_fancy(pa, "virtual disk controller",
   1667  1.1     skrll 	    VMWARE_PVSCSI_DEVSTR, true);
   1668  1.1     skrll 
   1669  1.1     skrll 	/*
   1670  1.1     skrll 	 * Map the device.  All devices support memory-mapped acccess.
   1671  1.1     skrll 	 */
   1672  1.1     skrll 	bool memh_valid;
   1673  1.1     skrll 	bus_space_tag_t memt;
   1674  1.1     skrll 	bus_space_handle_t memh;
   1675  1.1     skrll 	bus_size_t mems;
   1676  1.1     skrll 	pcireg_t regt;
   1677  1.1     skrll 
   1678  1.1     skrll 	for (rid = PCI_MAPREG_START; rid < PCI_MAPREG_END; rid += sizeof(regt)) {
   1679  1.1     skrll 		regt = pci_mapreg_type(pa->pa_pc, pa->pa_tag, rid);
   1680  1.1     skrll 		if (PCI_MAPREG_TYPE(regt) == PCI_MAPREG_TYPE_MEM)
   1681  1.1     skrll 			break;
   1682  1.1     skrll 	}
   1683  1.1     skrll 
   1684  1.1     skrll 	if (rid >= PCI_MAPREG_END) {
   1685  1.1     skrll 		aprint_error_dev(dev,
   1686  1.1     skrll 		    "unable to locate device registers\n");
   1687  1.1     skrll 	}
   1688  1.1     skrll 
   1689  1.1     skrll 	memh_valid = (pci_mapreg_map(pa, rid, regt, 0, &memt, &memh,
   1690  1.1     skrll 	    NULL, &mems) == 0);
   1691  1.1     skrll 	if (!memh_valid) {
   1692  1.1     skrll 		aprint_error_dev(dev,
   1693  1.1     skrll 		    "unable to map device registers\n");
   1694  1.1     skrll 		return;
   1695  1.1     skrll 	}
   1696  1.1     skrll 	sc->sc_memt = memt;
   1697  1.1     skrll 	sc->sc_memh = memh;
   1698  1.1     skrll 	sc->sc_mems = mems;
   1699  1.1     skrll 
   1700  1.1     skrll 	if (pci_dma64_available(pa)) {
   1701  1.1     skrll 		sc->sc_dmat = pa->pa_dmat64;
   1702  1.1     skrll 		aprint_verbose_dev(sc->dev, "64-bit DMA\n");
   1703  1.1     skrll 	} else {
   1704  1.1     skrll 		aprint_verbose_dev(sc->dev, "32-bit DMA\n");
   1705  1.1     skrll 		sc->sc_dmat = pa->pa_dmat;
   1706  1.1     skrll 	}
   1707  1.1     skrll 
   1708  1.1     skrll 	error = pvscsi_setup_interrupts(sc, pa);
   1709  1.1     skrll 	if (error) {
   1710  1.1     skrll 		aprint_normal_dev(dev, "Interrupt setup failed\n");
   1711  1.1     skrll 		pvscsi_free_all(sc);
   1712  1.1     skrll 		return;
   1713  1.1     skrll 	}
   1714  1.1     skrll 
   1715  1.1     skrll 	sc->max_targets = pvscsi_get_max_targets(sc);
   1716  1.1     skrll 
   1717  1.1     skrll 	sc->use_msg = pvscsi_get_tunable(sc, "use_msg", pvscsi_use_msg) &&
   1718  1.1     skrll 	    pvscsi_hw_supports_msg(sc);
   1719  1.1     skrll 	sc->msg_ring_num_pages = sc->use_msg ? 1 : 0;
   1720  1.1     skrll 
   1721  1.1     skrll 	sc->req_ring_num_pages = pvscsi_get_tunable(sc, "request_ring_pages",
   1722  1.1     skrll 	    pvscsi_request_ring_pages);
   1723  1.1     skrll 	if (sc->req_ring_num_pages <= 0) {
   1724  1.1     skrll 		if (sc->max_targets <= 16) {
   1725  1.1     skrll 			sc->req_ring_num_pages =
   1726  1.1     skrll 			    PVSCSI_DEFAULT_NUM_PAGES_REQ_RING;
   1727  1.1     skrll 		} else {
   1728  1.1     skrll 			sc->req_ring_num_pages = PVSCSI_MAX_NUM_PAGES_REQ_RING;
   1729  1.1     skrll 		}
   1730  1.1     skrll 	} else if (sc->req_ring_num_pages > PVSCSI_MAX_NUM_PAGES_REQ_RING) {
   1731  1.1     skrll 		sc->req_ring_num_pages = PVSCSI_MAX_NUM_PAGES_REQ_RING;
   1732  1.1     skrll 	}
   1733  1.1     skrll 	sc->cmp_ring_num_pages = sc->req_ring_num_pages;
   1734  1.1     skrll 
   1735  1.1     skrll 	max_queue_depth = pvscsi_get_tunable(sc, "max_queue_depth",
   1736  1.1     skrll 	    pvscsi_max_queue_depth);
   1737  1.1     skrll 
   1738  1.1     skrll 	adapter_queue_size = (sc->req_ring_num_pages * PAGE_SIZE) /
   1739  1.1     skrll 	    sizeof(struct pvscsi_ring_req_desc);
   1740  1.1     skrll 	if (max_queue_depth > 0) {
   1741  1.1     skrll 		adapter_queue_size = MIN(adapter_queue_size, max_queue_depth);
   1742  1.1     skrll 	}
   1743  1.1     skrll 	adapter_queue_size = MIN(adapter_queue_size,
   1744  1.1     skrll 	    PVSCSI_MAX_REQ_QUEUE_DEPTH);
   1745  1.1     skrll 
   1746  1.1     skrll 	aprint_normal_dev(sc->dev, "Use Msg: %d\n", sc->use_msg);
   1747  1.1     skrll 	aprint_normal_dev(sc->dev, "Max targets: %d\n", sc->max_targets);
   1748  1.1     skrll 	aprint_normal_dev(sc->dev, "REQ num pages: %d\n", sc->req_ring_num_pages);
   1749  1.1     skrll 	aprint_normal_dev(sc->dev, "CMP num pages: %d\n", sc->cmp_ring_num_pages);
   1750  1.1     skrll 	aprint_normal_dev(sc->dev, "MSG num pages: %d\n", sc->msg_ring_num_pages);
   1751  1.1     skrll 	aprint_normal_dev(sc->dev, "Queue size: %d\n", adapter_queue_size);
   1752  1.1     skrll 
   1753  1.1     skrll 	if (pvscsi_allocate_rings(sc)) {
   1754  1.1     skrll 		aprint_normal_dev(dev, "ring allocation failed\n");
   1755  1.1     skrll 		pvscsi_free_all(sc);
   1756  1.1     skrll 		return;
   1757  1.1     skrll 	}
   1758  1.1     skrll 
   1759  1.1     skrll 	sc->hcb_cnt = adapter_queue_size;
   1760  1.1     skrll 	sc->hcbs = kmem_zalloc(sc->hcb_cnt * sizeof(*sc->hcbs), KM_SLEEP);
   1761  1.1     skrll 
   1762  1.1     skrll 	if (pvscsi_dma_alloc_per_hcb(sc)) {
   1763  1.1     skrll 		aprint_normal_dev(dev, "error allocating per hcb dma memory\n");
   1764  1.1     skrll 		pvscsi_free_all(sc);
   1765  1.1     skrll 		return;
   1766  1.1     skrll 	}
   1767  1.1     skrll 
   1768  1.1     skrll 	pvscsi_adapter_reset(sc);
   1769  1.1     skrll 
   1770  1.1     skrll 	/*
   1771  1.1     skrll 	 * Fill in the scsipi_adapter.
   1772  1.1     skrll 	 */
   1773  1.1     skrll 	memset(adapt, 0, sizeof(*adapt));
   1774  1.1     skrll 	adapt->adapt_dev = sc->dev;
   1775  1.1     skrll 	adapt->adapt_nchannels = 1;
   1776  1.1     skrll 	adapt->adapt_openings = MIN(adapter_queue_size, PVSCSI_CMD_PER_LUN);
   1777  1.1     skrll 	adapt->adapt_max_periph = adapt->adapt_openings;
   1778  1.1     skrll 	adapt->adapt_request = pvscsi_scsipi_request;
   1779  1.1     skrll 	adapt->adapt_minphys = minphys;
   1780  1.1     skrll 
   1781  1.1     skrll 	/*
   1782  1.1     skrll 	 * Fill in the scsipi_channel.
   1783  1.1     skrll 	 */
   1784  1.1     skrll 	memset(chan, 0, sizeof(*chan));
   1785  1.1     skrll 	chan->chan_adapter = adapt;
   1786  1.1     skrll 	chan->chan_bustype = &scsi_bustype;
   1787  1.1     skrll 	chan->chan_channel = 0;
   1788  1.1     skrll 	chan->chan_ntargets = MIN(PVSCSI_MAX_TARGET, 16);	/* cap reasonably */
   1789  1.1     skrll 	chan->chan_nluns = MIN(PVSCSI_MAX_LUN, 1024);		/* cap reasonably */
   1790  1.1     skrll 	chan->chan_id = PVSCSI_MAX_TARGET;
   1791  1.1     skrll 	chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
   1792  1.1     skrll 
   1793  1.1     skrll 	pvscsi_setup_rings(sc);
   1794  1.1     skrll 	if (sc->use_msg) {
   1795  1.1     skrll 		pvscsi_setup_msg_ring(sc);
   1796  1.1     skrll 	}
   1797  1.1     skrll 
   1798  1.1     skrll 	sc->use_req_call_threshold = pvscsi_setup_req_call(sc, 1);
   1799  1.1     skrll 
   1800  1.1     skrll 	pvscsi_intr_enable(sc);
   1801  1.1     skrll 
   1802  1.1     skrll 	sc->sc_scsibus_dv = config_found(sc->dev, &sc->sc_channel, scsiprint,
   1803  1.1     skrll 	    CFARGS_NONE);
   1804  1.1     skrll 
   1805  1.1     skrll 	return;
   1806  1.1     skrll }
   1807  1.1     skrll 
   1808  1.1     skrll static int
   1809  1.1     skrll pvscsi_detach(device_t dev, int flags)
   1810  1.1     skrll {
   1811  1.1     skrll 	struct pvscsi_softc *sc;
   1812  1.1     skrll 
   1813  1.1     skrll 	sc = device_private(dev);
   1814  1.1     skrll 
   1815  1.1     skrll 	pvscsi_intr_disable(sc);
   1816  1.1     skrll 	pvscsi_adapter_reset(sc);
   1817  1.1     skrll 
   1818  1.1     skrll 	pvscsi_free_all(sc);
   1819  1.1     skrll 
   1820  1.1     skrll 	mutex_destroy(&sc->lock);
   1821  1.1     skrll 
   1822  1.1     skrll 	return (0);
   1823  1.1     skrll }
   1824