Home | History | Annotate | Line # | Download | only in pci
      1  1.1     skrll /*-
      2  1.1     skrll  * Copyright (c) 2018 VMware, Inc.
      3  1.1     skrll  *
      4  1.1     skrll  * SPDX-License-Identifier: (BSD-2-Clause OR GPL-2.0)
      5  1.1     skrll  */
      6  1.1     skrll 
      7  1.1     skrll /*
      8  1.1     skrll 
      9  1.1     skrll These files are provided under a dual BSD-2 Clause/GPLv2 license. When
     10  1.1     skrll using or redistributing this file, you may do so under either license.
     11  1.1     skrll 
     12  1.1     skrll BSD-2 Clause License
     13  1.1     skrll 
     14  1.1     skrll Copyright (c) 2018 VMware, Inc.
     15  1.1     skrll 
     16  1.1     skrll Redistribution and use in source and binary forms, with or without
     17  1.1     skrll modification, are permitted provided that the following conditions
     18  1.1     skrll are met:
     19  1.1     skrll 
     20  1.1     skrll   * Redistributions of source code must retain the above copyright
     21  1.1     skrll     notice, this list of conditions and the following disclaimer.
     22  1.1     skrll 
     23  1.1     skrll   * Redistributions in binary form must reproduce the above copyright
     24  1.1     skrll     notice, this list of conditions and the following disclaimer in
     25  1.1     skrll     the documentation and/or other materials provided with the
     26  1.1     skrll     distribution.
     27  1.1     skrll 
     28  1.1     skrll THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     29  1.1     skrll "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     30  1.1     skrll LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     31  1.1     skrll A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     32  1.1     skrll OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     33  1.1     skrll SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     34  1.1     skrll LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     35  1.1     skrll DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     36  1.1     skrll THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     37  1.1     skrll (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     38  1.1     skrll OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     39  1.1     skrll 
     40  1.1     skrll GPL License Summary
     41  1.1     skrll 
     42  1.1     skrll Copyright (c) 2018 VMware, Inc.
     43  1.1     skrll 
     44  1.1     skrll This program is free software; you can redistribute it and/or modify
     45  1.1     skrll it under the terms of version 2 of the GNU General Public License as
     46  1.1     skrll published by the Free Software Foundation.
     47  1.1     skrll 
     48  1.1     skrll This program is distributed in the hope that it will be useful, but
     49  1.1     skrll WITHOUT ANY WARRANTY; without even the implied warranty of
     50  1.1     skrll MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     51  1.1     skrll General Public License for more details.
     52  1.1     skrll 
     53  1.1     skrll You should have received a copy of the GNU General Public License
     54  1.1     skrll along with this program; if not, write to the Free Software
     55  1.1     skrll Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
     56  1.1     skrll The full GNU General Public License is included in this distribution
     57  1.1     skrll in the file called LICENSE.GPL.
     58  1.1     skrll 
     59  1.1     skrll */
     60  1.1     skrll 
     61  1.1     skrll #include <sys/cdefs.h>
     62  1.5  riastrad __KERNEL_RCSID(0, "$NetBSD: pvscsi.c,v 1.5 2025/09/06 02:56:52 riastradh Exp $");
     63  1.1     skrll 
     64  1.1     skrll #include <sys/param.h>
     65  1.1     skrll 
     66  1.1     skrll #include <sys/buf.h>
     67  1.1     skrll #include <sys/bus.h>
     68  1.1     skrll #include <sys/cpu.h>
     69  1.1     skrll #include <sys/device.h>
     70  1.1     skrll #include <sys/kernel.h>
     71  1.1     skrll #include <sys/kmem.h>
     72  1.3  riastrad #include <sys/paravirt_membar.h>
     73  1.1     skrll #include <sys/queue.h>
     74  1.1     skrll #include <sys/sysctl.h>
     75  1.1     skrll #include <sys/systm.h>
     76  1.1     skrll 
     77  1.1     skrll #include <dev/pci/pcireg.h>
     78  1.1     skrll #include <dev/pci/pcivar.h>
     79  1.1     skrll #include <dev/pci/pcidevs.h>
     80  1.1     skrll 
     81  1.1     skrll #include <dev/scsipi/scsi_all.h>
     82  1.1     skrll #include <dev/scsipi/scsi_message.h>
     83  1.1     skrll #include <dev/scsipi/scsiconf.h>
     84  1.1     skrll #include <dev/scsipi/scsipi_disk.h>
     85  1.1     skrll #include <dev/scsipi/scsi_disk.h>
     86  1.1     skrll 
     87  1.1     skrll #include "pvscsi.h"
     88  1.1     skrll 
     89  1.1     skrll #define	PVSCSI_DEFAULT_NUM_PAGES_REQ_RING	8
     90  1.1     skrll #define	PVSCSI_SENSE_LENGTH			256
     91  1.1     skrll 
     92  1.1     skrll #define PVSCSI_MAXPHYS				MAXPHYS
     93  1.1     skrll #define PVSCSI_MAXPHYS_SEGS			((PVSCSI_MAXPHYS / PAGE_SIZE) + 1)
     94  1.1     skrll 
     95  1.1     skrll #define PVSCSI_CMD_PER_LUN 64
     96  1.1     skrll #define PVSCSI_MAX_LUN 8
     97  1.1     skrll #define PVSCSI_MAX_TARGET 16
     98  1.1     skrll 
     99  1.1     skrll //#define PVSCSI_DEBUG_LOGGING
    100  1.1     skrll 
    101  1.1     skrll #ifdef PVSCSI_DEBUG_LOGGING
    102  1.1     skrll #define	DEBUG_PRINTF(level, dev, fmt, ...)				\
    103  1.1     skrll 	do {								\
    104  1.1     skrll 		if (pvscsi_log_level >= (level)) {			\
    105  1.1     skrll 			aprint_normal_dev((dev), (fmt), ##__VA_ARGS__);	\
    106  1.1     skrll 		}							\
    107  1.1     skrll 	} while(0)
    108  1.1     skrll #else
    109  1.1     skrll #define DEBUG_PRINTF(level, dev, fmt, ...)
    110  1.1     skrll #endif /* PVSCSI_DEBUG_LOGGING */
    111  1.1     skrll 
    112  1.1     skrll struct pvscsi_softc;
    113  1.1     skrll struct pvscsi_hcb;
    114  1.1     skrll struct pvscsi_dma;
    115  1.1     skrll 
    116  1.1     skrll #define VMWARE_PVSCSI_DEVSTR	"VMware Paravirtual SCSI Controller"
    117  1.1     skrll 
    118  1.1     skrll static inline uint32_t pvscsi_reg_read(struct pvscsi_softc *sc,
    119  1.1     skrll     uint32_t offset);
    120  1.1     skrll static inline void pvscsi_reg_write(struct pvscsi_softc *sc, uint32_t offset,
    121  1.1     skrll     uint32_t val);
    122  1.1     skrll static inline uint32_t pvscsi_read_intr_status(struct pvscsi_softc *sc);
    123  1.1     skrll static inline void pvscsi_write_intr_status(struct pvscsi_softc *sc,
    124  1.1     skrll     uint32_t val);
    125  1.1     skrll static inline void pvscsi_intr_enable(struct pvscsi_softc *sc);
    126  1.1     skrll static inline void pvscsi_intr_disable(struct pvscsi_softc *sc);
    127  1.1     skrll static void pvscsi_kick_io(struct pvscsi_softc *sc, uint8_t cdb0);
    128  1.1     skrll static void pvscsi_write_cmd(struct pvscsi_softc *sc, uint32_t cmd, void *data,
    129  1.1     skrll     uint32_t len);
    130  1.1     skrll static uint32_t pvscsi_get_max_targets(struct pvscsi_softc *sc);
    131  1.1     skrll static int pvscsi_setup_req_call(struct pvscsi_softc *sc, uint32_t enable);
    132  1.1     skrll static void pvscsi_setup_rings(struct pvscsi_softc *sc);
    133  1.1     skrll static void pvscsi_setup_msg_ring(struct pvscsi_softc *sc);
    134  1.1     skrll static int pvscsi_hw_supports_msg(struct pvscsi_softc *sc);
    135  1.1     skrll 
    136  1.1     skrll static void pvscsi_timeout(void *arg);
    137  1.1     skrll static void pvscsi_adapter_reset(struct pvscsi_softc *sc);
    138  1.1     skrll static void pvscsi_bus_reset(struct pvscsi_softc *sc);
    139  1.1     skrll static void pvscsi_device_reset(struct pvscsi_softc *sc, uint32_t target);
    140  1.1     skrll static void pvscsi_abort(struct pvscsi_softc *sc, uint32_t target,
    141  1.1     skrll     struct pvscsi_hcb *hcb);
    142  1.1     skrll 
    143  1.1     skrll static void pvscsi_process_completion(struct pvscsi_softc *sc,
    144  1.1     skrll     struct pvscsi_ring_cmp_desc *e);
    145  1.1     skrll static void pvscsi_process_cmp_ring(struct pvscsi_softc *sc);
    146  1.1     skrll static void pvscsi_process_msg(struct pvscsi_softc *sc,
    147  1.1     skrll     struct pvscsi_ring_msg_desc *e);
    148  1.1     skrll static void pvscsi_process_msg_ring(struct pvscsi_softc *sc);
    149  1.1     skrll 
    150  1.1     skrll static void pvscsi_intr_locked(struct pvscsi_softc *sc);
    151  1.1     skrll static int pvscsi_intr(void *xsc);
    152  1.1     skrll 
    153  1.1     skrll static void pvscsi_scsipi_request(struct scsipi_channel *,
    154  1.1     skrll     scsipi_adapter_req_t, void *);
    155  1.1     skrll 
    156  1.1     skrll static inline uint64_t pvscsi_hcb_to_context(struct pvscsi_softc *sc,
    157  1.1     skrll     struct pvscsi_hcb *hcb);
    158  1.1     skrll static inline struct pvscsi_hcb *pvscsi_context_to_hcb(struct pvscsi_softc *sc,
    159  1.1     skrll     uint64_t context);
    160  1.1     skrll static struct pvscsi_hcb * pvscsi_hcb_get(struct pvscsi_softc *sc);
    161  1.1     skrll static void pvscsi_hcb_put(struct pvscsi_softc *sc, struct pvscsi_hcb *hcb);
    162  1.1     skrll 
    163  1.1     skrll static void pvscsi_dma_free(struct pvscsi_softc *sc, struct pvscsi_dma *dma);
    164  1.1     skrll static int pvscsi_dma_alloc(struct pvscsi_softc *sc, struct pvscsi_dma *dma,
    165  1.1     skrll     bus_size_t size, bus_size_t alignment);
    166  1.1     skrll static int pvscsi_dma_alloc_ppns(struct pvscsi_softc *sc,
    167  1.1     skrll     struct pvscsi_dma *dma, uint64_t *ppn_list, uint32_t num_pages);
    168  1.1     skrll static void pvscsi_dma_free_per_hcb(struct pvscsi_softc *sc,
    169  1.1     skrll     uint32_t hcbs_allocated);
    170  1.1     skrll static int pvscsi_dma_alloc_per_hcb(struct pvscsi_softc *sc);
    171  1.1     skrll static void pvscsi_free_rings(struct pvscsi_softc *sc);
    172  1.1     skrll static int pvscsi_allocate_rings(struct pvscsi_softc *sc);
    173  1.1     skrll static void pvscsi_free_interrupts(struct pvscsi_softc *sc);
    174  1.1     skrll static int pvscsi_setup_interrupts(struct pvscsi_softc *sc, const struct pci_attach_args *);
    175  1.1     skrll static void pvscsi_free_all(struct pvscsi_softc *sc);
    176  1.1     skrll 
    177  1.1     skrll static void pvscsi_attach(device_t, device_t, void *);
    178  1.1     skrll static int pvscsi_detach(device_t, int);
    179  1.1     skrll static int pvscsi_probe(device_t, cfdata_t, void *);
    180  1.1     skrll 
    181  1.1     skrll #define pvscsi_get_tunable(_sc, _name, _value)	(_value)
    182  1.1     skrll 
    183  1.1     skrll #ifdef PVSCSI_DEBUG_LOGGING
    184  1.1     skrll static int pvscsi_log_level = 1;
    185  1.1     skrll #endif
    186  1.1     skrll 
    187  1.1     skrll #define TUNABLE_INT(__x, __d)					\
    188  1.1     skrll 	err = sysctl_createv(clog, 0, &rnode, &cnode,		\
    189  1.1     skrll 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,	\
    190  1.1     skrll 	    #__x, SYSCTL_DESCR(__d),				\
    191  1.1     skrll 	    NULL, 0, &(pvscsi_ ## __x), sizeof(pvscsi_ ## __x), \
    192  1.1     skrll 	    CTL_CREATE,	CTL_EOL);				\
    193  1.1     skrll 	if (err)						\
    194  1.1     skrll 		goto fail;
    195  1.1     skrll 
    196  1.1     skrll static int pvscsi_request_ring_pages = 0;
    197  1.1     skrll static int pvscsi_use_msg = 1;
    198  1.1     skrll static int pvscsi_use_msi = 1;
    199  1.1     skrll static int pvscsi_use_msix = 1;
    200  1.1     skrll static int pvscsi_use_req_call_threshold = 0;
    201  1.1     skrll static int pvscsi_max_queue_depth = 0;
    202  1.1     skrll 
    203  1.1     skrll SYSCTL_SETUP(sysctl_hw_pvscsi_setup, "sysctl hw.pvscsi setup")
    204  1.1     skrll {
    205  1.1     skrll 	int err;
    206  1.1     skrll 	const struct sysctlnode *rnode;
    207  1.1     skrll 	const struct sysctlnode *cnode;
    208  1.1     skrll 
    209  1.1     skrll 	err = sysctl_createv(clog, 0, NULL, &rnode,
    210  1.1     skrll 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "pvscsi",
    211  1.1     skrll 	    SYSCTL_DESCR("pvscsi global controls"),
    212  1.1     skrll 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
    213  1.1     skrll 
    214  1.1     skrll 	if (err)
    215  1.1     skrll 		goto fail;
    216  1.1     skrll 
    217  1.1     skrll #ifdef PVSCSI_DEBUG_LOGGING
    218  1.1     skrll 	TUNABLE_INT(log_level, "Enable debugging output");
    219  1.1     skrll #endif
    220  1.1     skrll 
    221  1.1     skrll 	TUNABLE_INT(request_ring_pages, "No. of pages for the request ring");
    222  1.1     skrll 	TUNABLE_INT(use_msg, "Use message passing");
    223  1.1     skrll 	TUNABLE_INT(use_msi, "Use MSI interrupt");
    224  1.1     skrll 	TUNABLE_INT(use_msix, "Use MSXI interrupt");
    225  1.1     skrll 	TUNABLE_INT(use_req_call_threshold, "Use request limit");
    226  1.1     skrll 	TUNABLE_INT(max_queue_depth, "Maximum size of request queue");
    227  1.1     skrll 
    228  1.1     skrll 	return;
    229  1.1     skrll fail:
    230  1.1     skrll 	aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err);
    231  1.1     skrll }
    232  1.1     skrll 
    233  1.1     skrll struct pvscsi_sg_list {
    234  1.1     skrll 	struct pvscsi_sg_element sge[PVSCSI_MAX_SG_ENTRIES_PER_SEGMENT];
    235  1.1     skrll };
    236  1.1     skrll 
    237  1.1     skrll #define	PVSCSI_ABORT_TIMEOUT	2
    238  1.1     skrll #define	PVSCSI_RESET_TIMEOUT	10
    239  1.1     skrll 
    240  1.1     skrll #define	PVSCSI_HCB_NONE		0
    241  1.1     skrll #define	PVSCSI_HCB_ABORT	1
    242  1.1     skrll #define	PVSCSI_HCB_DEVICE_RESET	2
    243  1.1     skrll #define	PVSCSI_HCB_BUS_RESET	3
    244  1.1     skrll 
    245  1.1     skrll struct pvscsi_hcb {
    246  1.1     skrll 	struct scsipi_xfer 		*xs;
    247  1.1     skrll 	struct pvscsi_softc		*sc;
    248  1.1     skrll 
    249  1.1     skrll 	struct pvscsi_ring_req_desc	*e;
    250  1.1     skrll 	int				 recovery;
    251  1.1     skrll 	SLIST_ENTRY(pvscsi_hcb)		 links;
    252  1.1     skrll 
    253  1.1     skrll 	bus_dmamap_t			 dma_map;
    254  1.1     skrll 	bus_addr_t			 dma_map_offset;
    255  1.1     skrll 	bus_size_t			 dma_map_size;
    256  1.1     skrll 	void				*sense_buffer;
    257  1.1     skrll 	bus_addr_t			 sense_buffer_paddr;
    258  1.1     skrll 	struct pvscsi_sg_list		*sg_list;
    259  1.1     skrll 	bus_addr_t			 sg_list_paddr;
    260  1.1     skrll 	bus_addr_t			 sg_list_offset;
    261  1.1     skrll };
    262  1.1     skrll 
    263  1.1     skrll struct pvscsi_dma {
    264  1.1     skrll 	bus_dmamap_t		 map;
    265  1.1     skrll 	void		        *vaddr;
    266  1.1     skrll 	bus_addr_t	 	 paddr;
    267  1.1     skrll 	bus_size_t	 	 size;
    268  1.1     skrll 	bus_dma_segment_t	 seg[1];
    269  1.1     skrll };
    270  1.1     skrll 
    271  1.1     skrll struct pvscsi_softc {
    272  1.1     skrll 	device_t		 dev;
    273  1.1     skrll 	kmutex_t		 lock;
    274  1.1     skrll 
    275  1.1     skrll 	device_t		 sc_scsibus_dv;
    276  1.1     skrll 	struct scsipi_adapter	 sc_adapter;
    277  1.1     skrll 	struct scsipi_channel 	 sc_channel;
    278  1.1     skrll 
    279  1.1     skrll 	struct pvscsi_rings_state	*rings_state;
    280  1.1     skrll 	struct pvscsi_ring_req_desc	*req_ring;
    281  1.1     skrll 	struct pvscsi_ring_cmp_desc	*cmp_ring;
    282  1.1     skrll 	struct pvscsi_ring_msg_desc	*msg_ring;
    283  1.1     skrll 	uint32_t		 hcb_cnt;
    284  1.1     skrll 	struct pvscsi_hcb	*hcbs;
    285  1.1     skrll 	SLIST_HEAD(, pvscsi_hcb) free_list;
    286  1.1     skrll 
    287  1.1     skrll 	bus_dma_tag_t		sc_dmat;
    288  1.1     skrll 	bus_space_tag_t		sc_memt;
    289  1.1     skrll 	bus_space_handle_t	sc_memh;
    290  1.1     skrll 	bus_size_t		sc_mems;
    291  1.1     skrll 
    292  1.1     skrll 	bool		 use_msg;
    293  1.1     skrll 	uint32_t	 max_targets;
    294  1.1     skrll 	int		 mm_rid;
    295  1.1     skrll 	int		 irq_id;
    296  1.1     skrll 	int		 use_req_call_threshold;
    297  1.1     skrll 
    298  1.1     skrll 	pci_chipset_tag_t	 sc_pc;
    299  1.1     skrll 	pci_intr_handle_t *	 sc_pihp;
    300  1.1     skrll 	void			*sc_ih;
    301  1.1     skrll 
    302  1.1     skrll 	uint64_t	rings_state_ppn;
    303  1.1     skrll 	uint32_t	req_ring_num_pages;
    304  1.1     skrll 	uint64_t	req_ring_ppn[PVSCSI_MAX_NUM_PAGES_REQ_RING];
    305  1.1     skrll 	uint32_t	cmp_ring_num_pages;
    306  1.1     skrll 	uint64_t	cmp_ring_ppn[PVSCSI_MAX_NUM_PAGES_CMP_RING];
    307  1.1     skrll 	uint32_t	msg_ring_num_pages;
    308  1.1     skrll 	uint64_t	msg_ring_ppn[PVSCSI_MAX_NUM_PAGES_MSG_RING];
    309  1.1     skrll 
    310  1.1     skrll 	struct	pvscsi_dma rings_state_dma;
    311  1.1     skrll 	struct	pvscsi_dma req_ring_dma;
    312  1.1     skrll 	struct	pvscsi_dma cmp_ring_dma;
    313  1.1     skrll 	struct	pvscsi_dma msg_ring_dma;
    314  1.1     skrll 
    315  1.1     skrll 	struct	pvscsi_dma sg_list_dma;
    316  1.1     skrll 	struct	pvscsi_dma sense_buffer_dma;
    317  1.1     skrll };
    318  1.1     skrll 
    319  1.1     skrll CFATTACH_DECL3_NEW(pvscsi, sizeof(struct pvscsi_softc),
    320  1.1     skrll     pvscsi_probe, pvscsi_attach, pvscsi_detach, NULL, NULL, NULL,
    321  1.1     skrll     DVF_DETACH_SHUTDOWN);
    322  1.1     skrll 
    323  1.4  riastrad #define	PVSCSI_DMA_SYNC_STATE(sc, dma, structptr, member, ops)		      \
    324  1.4  riastrad 	bus_dmamap_sync((sc)->sc_dmat, (dma)->map,			      \
    325  1.4  riastrad 	    /*offset*/offsetof(__typeof__(*(structptr)), member),	      \
    326  1.4  riastrad 	    /*length*/sizeof((structptr)->member),			      \
    327  1.4  riastrad 	    (ops))
    328  1.4  riastrad 
    329  1.4  riastrad #define	PVSCSI_DMA_SYNC_RING(sc, dma, ring, idx, ops)			      \
    330  1.4  riastrad 	bus_dmamap_sync((sc)->sc_dmat, (dma)->map,			      \
    331  1.4  riastrad 	    /*offset*/sizeof(*(ring)) * (idx),				      \
    332  1.4  riastrad 	    /*length*/sizeof(*(ring)),					      \
    333  1.4  riastrad 	    (ops))
    334  1.4  riastrad 
    335  1.1     skrll static inline uint32_t
    336  1.1     skrll pvscsi_reg_read(struct pvscsi_softc *sc, uint32_t offset)
    337  1.1     skrll {
    338  1.1     skrll 
    339  1.1     skrll 	return (bus_space_read_4(sc->sc_memt, sc->sc_memh, offset));
    340  1.1     skrll }
    341  1.1     skrll 
    342  1.1     skrll static inline void
    343  1.1     skrll pvscsi_reg_write(struct pvscsi_softc *sc, uint32_t offset, uint32_t val)
    344  1.1     skrll {
    345  1.1     skrll 
    346  1.1     skrll 	bus_space_write_4(sc->sc_memt, sc->sc_memh, offset, val);
    347  1.1     skrll }
    348  1.1     skrll 
    349  1.1     skrll static inline uint32_t
    350  1.1     skrll pvscsi_read_intr_status(struct pvscsi_softc *sc)
    351  1.1     skrll {
    352  1.1     skrll 
    353  1.1     skrll 	return (pvscsi_reg_read(sc, PVSCSI_REG_OFFSET_INTR_STATUS));
    354  1.1     skrll }
    355  1.1     skrll 
    356  1.1     skrll static inline void
    357  1.1     skrll pvscsi_write_intr_status(struct pvscsi_softc *sc, uint32_t val)
    358  1.1     skrll {
    359  1.1     skrll 
    360  1.1     skrll 	pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_INTR_STATUS, val);
    361  1.1     skrll }
    362  1.1     skrll 
    363  1.1     skrll static inline void
    364  1.1     skrll pvscsi_intr_enable(struct pvscsi_softc *sc)
    365  1.1     skrll {
    366  1.1     skrll 	uint32_t mask;
    367  1.1     skrll 
    368  1.1     skrll 	mask = PVSCSI_INTR_CMPL_MASK;
    369  1.1     skrll 	if (sc->use_msg) {
    370  1.1     skrll 		mask |= PVSCSI_INTR_MSG_MASK;
    371  1.1     skrll 	}
    372  1.1     skrll 
    373  1.1     skrll 	pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_INTR_MASK, mask);
    374  1.1     skrll }
    375  1.1     skrll 
    376  1.1     skrll static inline void
    377  1.1     skrll pvscsi_intr_disable(struct pvscsi_softc *sc)
    378  1.1     skrll {
    379  1.1     skrll 
    380  1.1     skrll 	pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_INTR_MASK, 0);
    381  1.1     skrll }
    382  1.1     skrll 
    383  1.1     skrll static void
    384  1.1     skrll pvscsi_kick_io(struct pvscsi_softc *sc, uint8_t cdb0)
    385  1.1     skrll {
    386  1.4  riastrad 	struct pvscsi_dma *s_dma;
    387  1.1     skrll 	struct pvscsi_rings_state *s;
    388  1.1     skrll 
    389  1.1     skrll 	DEBUG_PRINTF(2, sc->dev, "%s: cdb0 %#x\n", __func__, cdb0);
    390  1.1     skrll 	if (cdb0 == SCSI_READ_6_COMMAND  || cdb0 == READ_10  ||
    391  1.1     skrll 	    cdb0 == READ_12  || cdb0 == READ_16  ||
    392  1.1     skrll 	    cdb0 == SCSI_WRITE_6_COMMAND || cdb0 == WRITE_10 ||
    393  1.1     skrll 	    cdb0 == WRITE_12 || cdb0 == WRITE_16) {
    394  1.4  riastrad 		s_dma = &sc->rings_state_dma;
    395  1.1     skrll 		s = sc->rings_state;
    396  1.1     skrll 
    397  1.3  riastrad 		/*
    398  1.4  riastrad 		 * Ensure the command has been published before we read
    399  1.4  riastrad 		 * req_cons_idx to test whether we need to kick the
    400  1.4  riastrad 		 * host.
    401  1.3  riastrad 		 */
    402  1.3  riastrad 		paravirt_membar_sync();
    403  1.3  riastrad 
    404  1.4  riastrad 		PVSCSI_DMA_SYNC_STATE(sc, s_dma, s, req_cons_idx,
    405  1.4  riastrad 		    BUS_DMASYNC_POSTREAD);
    406  1.1     skrll 		DEBUG_PRINTF(2, sc->dev, "%s req prod %d cons %d\n", __func__,
    407  1.1     skrll 		    s->req_prod_idx, s->req_cons_idx);
    408  1.1     skrll 		if (!sc->use_req_call_threshold ||
    409  1.1     skrll 		    (s->req_prod_idx - s->req_cons_idx) >=
    410  1.1     skrll 		     s->req_call_threshold) {
    411  1.1     skrll 			pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_KICK_RW_IO, 0);
    412  1.1     skrll 			DEBUG_PRINTF(2, sc->dev, "kicked\n");
    413  1.1     skrll 		} else {
    414  1.1     skrll 			DEBUG_PRINTF(2, sc->dev, "wtf\n");
    415  1.1     skrll 		}
    416  1.4  riastrad 		PVSCSI_DMA_SYNC_STATE(sc, s_dma, s, req_cons_idx,
    417  1.4  riastrad 		    BUS_DMASYNC_PREREAD);
    418  1.1     skrll 	} else {
    419  1.1     skrll 		s = sc->rings_state;
    420  1.4  riastrad 		/*
    421  1.4  riastrad 		 * XXX req_cons_idx in debug log might be stale, but no
    422  1.4  riastrad 		 * need for DMA sync otherwise in this branch
    423  1.4  riastrad 		 */
    424  1.1     skrll 		DEBUG_PRINTF(1, sc->dev, "%s req prod %d cons %d not checked\n", __func__,
    425  1.1     skrll 		    s->req_prod_idx, s->req_cons_idx);
    426  1.1     skrll 
    427  1.1     skrll 		pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_KICK_NON_RW_IO, 0);
    428  1.1     skrll 	}
    429  1.1     skrll }
    430  1.1     skrll 
    431  1.1     skrll static void
    432  1.1     skrll pvscsi_write_cmd(struct pvscsi_softc *sc, uint32_t cmd, void *data,
    433  1.1     skrll 		 uint32_t len)
    434  1.1     skrll {
    435  1.1     skrll 	uint32_t *data_ptr;
    436  1.1     skrll 	int i;
    437  1.1     skrll 
    438  1.1     skrll 	KASSERTMSG(len % sizeof(uint32_t) == 0,
    439  1.1     skrll 		"command size not a multiple of 4");
    440  1.1     skrll 
    441  1.1     skrll 	data_ptr = data;
    442  1.1     skrll 	len /= sizeof(uint32_t);
    443  1.1     skrll 
    444  1.1     skrll 	pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_COMMAND, cmd);
    445  1.1     skrll 	for (i = 0; i < len; ++i) {
    446  1.1     skrll 		pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_COMMAND_DATA,
    447  1.1     skrll 		   data_ptr[i]);
    448  1.1     skrll 	}
    449  1.1     skrll }
    450  1.1     skrll 
    451  1.1     skrll static inline uint64_t pvscsi_hcb_to_context(struct pvscsi_softc *sc,
    452  1.1     skrll     struct pvscsi_hcb *hcb)
    453  1.1     skrll {
    454  1.1     skrll 
    455  1.1     skrll 	/* Offset by 1 because context must not be 0 */
    456  1.1     skrll 	return (hcb - sc->hcbs + 1);
    457  1.1     skrll }
    458  1.1     skrll 
    459  1.1     skrll static inline struct pvscsi_hcb* pvscsi_context_to_hcb(struct pvscsi_softc *sc,
    460  1.1     skrll     uint64_t context)
    461  1.1     skrll {
    462  1.1     skrll 
    463  1.1     skrll 	return (sc->hcbs + (context - 1));
    464  1.1     skrll }
    465  1.1     skrll 
    466  1.1     skrll static struct pvscsi_hcb *
    467  1.1     skrll pvscsi_hcb_get(struct pvscsi_softc *sc)
    468  1.1     skrll {
    469  1.1     skrll 	struct pvscsi_hcb *hcb;
    470  1.1     skrll 
    471  1.1     skrll 	KASSERT(mutex_owned(&sc->lock));
    472  1.1     skrll 
    473  1.1     skrll 	hcb = SLIST_FIRST(&sc->free_list);
    474  1.1     skrll 	if (hcb) {
    475  1.1     skrll 		SLIST_REMOVE_HEAD(&sc->free_list, links);
    476  1.1     skrll 	}
    477  1.1     skrll 
    478  1.1     skrll 	return (hcb);
    479  1.1     skrll }
    480  1.1     skrll 
    481  1.1     skrll static void
    482  1.1     skrll pvscsi_hcb_put(struct pvscsi_softc *sc, struct pvscsi_hcb *hcb)
    483  1.1     skrll {
    484  1.1     skrll 
    485  1.1     skrll 	KASSERT(mutex_owned(&sc->lock));
    486  1.1     skrll 	hcb->xs = NULL;
    487  1.1     skrll 	hcb->e = NULL;
    488  1.1     skrll 	hcb->recovery = PVSCSI_HCB_NONE;
    489  1.1     skrll 	SLIST_INSERT_HEAD(&sc->free_list, hcb, links);
    490  1.1     skrll }
    491  1.1     skrll 
    492  1.1     skrll static uint32_t
    493  1.1     skrll pvscsi_get_max_targets(struct pvscsi_softc *sc)
    494  1.1     skrll {
    495  1.1     skrll 	uint32_t max_targets;
    496  1.1     skrll 
    497  1.1     skrll 	pvscsi_write_cmd(sc, PVSCSI_CMD_GET_MAX_TARGETS, NULL, 0);
    498  1.1     skrll 
    499  1.1     skrll 	max_targets = pvscsi_reg_read(sc, PVSCSI_REG_OFFSET_COMMAND_STATUS);
    500  1.1     skrll 
    501  1.1     skrll 	if (max_targets == ~0) {
    502  1.1     skrll 		max_targets = 16;
    503  1.1     skrll 	}
    504  1.1     skrll 
    505  1.1     skrll 	return (max_targets);
    506  1.1     skrll }
    507  1.1     skrll 
    508  1.1     skrll static int pvscsi_setup_req_call(struct pvscsi_softc *sc, uint32_t enable)
    509  1.1     skrll {
    510  1.1     skrll 	uint32_t status;
    511  1.1     skrll 	struct pvscsi_cmd_desc_setup_req_call cmd;
    512  1.1     skrll 
    513  1.1     skrll 	if (!pvscsi_get_tunable(sc, "pvscsi_use_req_call_threshold",
    514  1.1     skrll 	    pvscsi_use_req_call_threshold)) {
    515  1.1     skrll 		return (0);
    516  1.1     skrll 	}
    517  1.1     skrll 
    518  1.1     skrll 	pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_COMMAND,
    519  1.1     skrll 	    PVSCSI_CMD_SETUP_REQCALLTHRESHOLD);
    520  1.1     skrll 	status = pvscsi_reg_read(sc, PVSCSI_REG_OFFSET_COMMAND_STATUS);
    521  1.1     skrll 
    522  1.1     skrll 	if (status != -1) {
    523  1.1     skrll 		memset(&cmd, 0, sizeof(cmd));
    524  1.1     skrll 		cmd.enable = enable;
    525  1.1     skrll 		pvscsi_write_cmd(sc, PVSCSI_CMD_SETUP_REQCALLTHRESHOLD,
    526  1.1     skrll 		    &cmd, sizeof(cmd));
    527  1.1     skrll 		status = pvscsi_reg_read(sc, PVSCSI_REG_OFFSET_COMMAND_STATUS);
    528  1.1     skrll 
    529  1.4  riastrad 		/*
    530  1.4  riastrad 		 * After setup, sync req_call_threshold before use.
    531  1.4  riastrad 		 * After this point it should be stable, so no need to
    532  1.4  riastrad 		 * sync again during use.
    533  1.4  riastrad 		 */
    534  1.4  riastrad 		PVSCSI_DMA_SYNC_STATE(sc, &sc->rings_state_dma,
    535  1.4  riastrad 		    sc->rings_state, req_call_threshold,
    536  1.4  riastrad 		    BUS_DMASYNC_POSTREAD);
    537  1.4  riastrad 
    538  1.1     skrll 		return (status != 0);
    539  1.1     skrll 	} else {
    540  1.1     skrll 		return (0);
    541  1.1     skrll 	}
    542  1.1     skrll }
    543  1.1     skrll 
    544  1.1     skrll static void
    545  1.1     skrll pvscsi_dma_free(struct pvscsi_softc *sc, struct pvscsi_dma *dma)
    546  1.1     skrll {
    547  1.1     skrll 
    548  1.1     skrll 	bus_dmamap_unload(sc->sc_dmat, dma->map);
    549  1.1     skrll 	bus_dmamem_unmap(sc->sc_dmat, dma->vaddr, dma->size);
    550  1.1     skrll 	bus_dmamap_destroy(sc->sc_dmat, dma->map);
    551  1.1     skrll 	bus_dmamem_free(sc->sc_dmat, dma->seg, __arraycount(dma->seg));
    552  1.1     skrll 
    553  1.1     skrll 	memset(dma, 0, sizeof(*dma));
    554  1.1     skrll }
    555  1.1     skrll 
    556  1.1     skrll static int
    557  1.1     skrll pvscsi_dma_alloc(struct pvscsi_softc *sc, struct pvscsi_dma *dma,
    558  1.1     skrll     bus_size_t size, bus_size_t alignment)
    559  1.1     skrll {
    560  1.1     skrll 	int error;
    561  1.1     skrll 	int nsegs;
    562  1.1     skrll 
    563  1.1     skrll 	memset(dma, 0, sizeof(*dma));
    564  1.1     skrll 
    565  1.1     skrll 	error = bus_dmamem_alloc(sc->sc_dmat, size, alignment, 0, dma->seg,
    566  1.1     skrll 	    __arraycount(dma->seg), &nsegs, BUS_DMA_WAITOK);
    567  1.1     skrll 	if (error) {
    568  1.1     skrll 		aprint_normal_dev(sc->dev, "error allocating dma mem, error %d\n",
    569  1.1     skrll 		    error);
    570  1.1     skrll 		goto fail;
    571  1.1     skrll 	}
    572  1.1     skrll 
    573  1.1     skrll 	error = bus_dmamem_map(sc->sc_dmat, dma->seg, nsegs, size,
    574  1.1     skrll 	    &dma->vaddr, BUS_DMA_WAITOK);
    575  1.1     skrll 	if (error != 0) {
    576  1.1     skrll 		device_printf(sc->dev, "Failed to map DMA memory\n");
    577  1.1     skrll 		goto dmamemmap_fail;
    578  1.1     skrll 	}
    579  1.1     skrll 
    580  1.1     skrll 	error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
    581  1.1     skrll 	    BUS_DMA_WAITOK, &dma->map);
    582  1.1     skrll 	if (error != 0) {
    583  1.1     skrll 		device_printf(sc->dev, "Failed to create DMA map\n");
    584  1.1     skrll 		goto dmamapcreate_fail;
    585  1.1     skrll 	}
    586  1.1     skrll 
    587  1.1     skrll 	error = bus_dmamap_load(sc->sc_dmat, dma->map, dma->vaddr, size,
    588  1.1     skrll 	    NULL, BUS_DMA_WAITOK);
    589  1.1     skrll 	if (error) {
    590  1.1     skrll 		aprint_normal_dev(sc->dev, "error mapping dma mam, error %d\n",
    591  1.1     skrll 		    error);
    592  1.1     skrll 		goto dmamapload_fail;
    593  1.1     skrll 	}
    594  1.1     skrll 
    595  1.1     skrll 	dma->paddr = dma->map->dm_segs[0].ds_addr;
    596  1.1     skrll 	dma->size = size;
    597  1.1     skrll 
    598  1.1     skrll 	return 0;
    599  1.1     skrll 
    600  1.1     skrll dmamapload_fail:
    601  1.1     skrll 	bus_dmamap_destroy(sc->sc_dmat, dma->map);
    602  1.1     skrll dmamapcreate_fail:
    603  1.1     skrll 	bus_dmamem_unmap(sc->sc_dmat, dma->vaddr, dma->size);
    604  1.1     skrll dmamemmap_fail:
    605  1.1     skrll 	bus_dmamem_free(sc->sc_dmat, dma->seg, __arraycount(dma->seg));
    606  1.1     skrll fail:
    607  1.1     skrll 
    608  1.1     skrll 	return (error);
    609  1.1     skrll }
    610  1.1     skrll 
    611  1.1     skrll static int
    612  1.1     skrll pvscsi_dma_alloc_ppns(struct pvscsi_softc *sc, struct pvscsi_dma *dma,
    613  1.1     skrll     uint64_t *ppn_list, uint32_t num_pages)
    614  1.1     skrll {
    615  1.1     skrll 	int error;
    616  1.1     skrll 	uint32_t i;
    617  1.1     skrll 	uint64_t ppn;
    618  1.1     skrll 
    619  1.1     skrll 	error = pvscsi_dma_alloc(sc, dma, num_pages * PAGE_SIZE, PAGE_SIZE);
    620  1.1     skrll 	if (error) {
    621  1.1     skrll 		aprint_normal_dev(sc->dev, "Error allocating pages, error %d\n",
    622  1.1     skrll 		    error);
    623  1.1     skrll 		return (error);
    624  1.1     skrll 	}
    625  1.1     skrll 
    626  1.5  riastrad 	memset(dma->vaddr, 0, num_pages * PAGE_SIZE);
    627  1.5  riastrad 	bus_dmamap_sync(sc->sc_dmat, dma->map, 0, num_pages * PAGE_SIZE,
    628  1.5  riastrad 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    629  1.5  riastrad 
    630  1.1     skrll 	ppn = dma->paddr >> PAGE_SHIFT;
    631  1.1     skrll 	for (i = 0; i < num_pages; i++) {
    632  1.1     skrll 		ppn_list[i] = ppn + i;
    633  1.1     skrll 	}
    634  1.1     skrll 
    635  1.1     skrll 	return (0);
    636  1.1     skrll }
    637  1.1     skrll 
    638  1.1     skrll static void
    639  1.1     skrll pvscsi_dma_free_per_hcb(struct pvscsi_softc *sc, uint32_t hcbs_allocated)
    640  1.1     skrll {
    641  1.1     skrll 	int i;
    642  1.1     skrll 	struct pvscsi_hcb *hcb;
    643  1.1     skrll 
    644  1.1     skrll 	for (i = 0; i < hcbs_allocated; ++i) {
    645  1.1     skrll 		hcb = sc->hcbs + i;
    646  1.1     skrll 		bus_dmamap_destroy(sc->sc_dmat, hcb->dma_map);
    647  1.1     skrll 	};
    648  1.1     skrll 
    649  1.1     skrll 	pvscsi_dma_free(sc, &sc->sense_buffer_dma);
    650  1.1     skrll 	pvscsi_dma_free(sc, &sc->sg_list_dma);
    651  1.1     skrll }
    652  1.1     skrll 
    653  1.1     skrll static int
    654  1.1     skrll pvscsi_dma_alloc_per_hcb(struct pvscsi_softc *sc)
    655  1.1     skrll {
    656  1.1     skrll 	int i;
    657  1.1     skrll 	int error;
    658  1.1     skrll 	struct pvscsi_hcb *hcb;
    659  1.1     skrll 
    660  1.1     skrll 	i = 0;
    661  1.1     skrll 
    662  1.1     skrll 	error = pvscsi_dma_alloc(sc, &sc->sg_list_dma,
    663  1.1     skrll 	    sizeof(struct pvscsi_sg_list) * sc->hcb_cnt, 1);
    664  1.1     skrll 	if (error) {
    665  1.1     skrll 		aprint_normal_dev(sc->dev,
    666  1.1     skrll 		    "Error allocation sg list DMA memory, error %d\n", error);
    667  1.1     skrll 		goto fail;
    668  1.1     skrll 	}
    669  1.1     skrll 
    670  1.1     skrll 	error = pvscsi_dma_alloc(sc, &sc->sense_buffer_dma,
    671  1.1     skrll 				 PVSCSI_SENSE_LENGTH * sc->hcb_cnt, 1);
    672  1.1     skrll 	if (error) {
    673  1.1     skrll 		aprint_normal_dev(sc->dev,
    674  1.1     skrll 		    "Error allocation buffer DMA memory, error %d\n", error);
    675  1.1     skrll 		goto fail;
    676  1.1     skrll 	}
    677  1.1     skrll 
    678  1.1     skrll 	for (i = 0; i < sc->hcb_cnt; ++i) {
    679  1.1     skrll 		hcb = sc->hcbs + i;
    680  1.1     skrll 
    681  1.1     skrll 		error = bus_dmamap_create(sc->sc_dmat, PVSCSI_MAXPHYS,
    682  1.1     skrll 		    PVSCSI_MAXPHYS_SEGS, PVSCSI_MAXPHYS, 0,
    683  1.1     skrll 		    BUS_DMA_WAITOK, &hcb->dma_map);
    684  1.1     skrll 		if (error) {
    685  1.1     skrll 			aprint_normal_dev(sc->dev,
    686  1.1     skrll 			    "Error creating dma map for hcb %d, error %d\n",
    687  1.1     skrll 			    i, error);
    688  1.1     skrll 			goto fail;
    689  1.1     skrll 		}
    690  1.1     skrll 
    691  1.1     skrll 		hcb->sc = sc;
    692  1.1     skrll 		hcb->dma_map_offset = PVSCSI_SENSE_LENGTH * i;
    693  1.1     skrll 		hcb->dma_map_size = PVSCSI_SENSE_LENGTH;
    694  1.1     skrll 		hcb->sense_buffer =
    695  1.1     skrll 		    (void *)((char *)sc->sense_buffer_dma.vaddr +
    696  1.1     skrll 		    PVSCSI_SENSE_LENGTH * i);
    697  1.1     skrll 		hcb->sense_buffer_paddr = sc->sense_buffer_dma.paddr +
    698  1.1     skrll 		    PVSCSI_SENSE_LENGTH * i;
    699  1.1     skrll 
    700  1.1     skrll 		hcb->sg_list =
    701  1.1     skrll 		    (struct pvscsi_sg_list *)((char *)sc->sg_list_dma.vaddr +
    702  1.1     skrll 		    sizeof(struct pvscsi_sg_list) * i);
    703  1.1     skrll 		hcb->sg_list_paddr =
    704  1.1     skrll 		    sc->sg_list_dma.paddr + sizeof(struct pvscsi_sg_list) * i;
    705  1.1     skrll 		hcb->sg_list_offset = sizeof(struct pvscsi_sg_list) * i;
    706  1.1     skrll 	}
    707  1.1     skrll 
    708  1.1     skrll 	SLIST_INIT(&sc->free_list);
    709  1.1     skrll 	for (i = (sc->hcb_cnt - 1); i >= 0; --i) {
    710  1.1     skrll 		hcb = sc->hcbs + i;
    711  1.1     skrll 		SLIST_INSERT_HEAD(&sc->free_list, hcb, links);
    712  1.1     skrll 	}
    713  1.1     skrll 
    714  1.1     skrll fail:
    715  1.1     skrll 	if (error) {
    716  1.1     skrll 		pvscsi_dma_free_per_hcb(sc, i);
    717  1.1     skrll 	}
    718  1.1     skrll 
    719  1.1     skrll 	return (error);
    720  1.1     skrll }
    721  1.1     skrll 
    722  1.1     skrll static void
    723  1.1     skrll pvscsi_free_rings(struct pvscsi_softc *sc)
    724  1.1     skrll {
    725  1.1     skrll 
    726  1.5  riastrad 	bus_dmamap_sync(sc->sc_dmat, sc->rings_state_dma.map,
    727  1.5  riastrad 	    0, sc->rings_state_dma.size,
    728  1.5  riastrad 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
    729  1.5  riastrad 	bus_dmamap_sync(sc->sc_dmat, sc->req_ring_dma.map,
    730  1.5  riastrad 	    0, sc->req_ring_dma.size,
    731  1.5  riastrad 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
    732  1.5  riastrad 	bus_dmamap_sync(sc->sc_dmat, sc->cmp_ring_dma.map,
    733  1.5  riastrad 	    0, sc->cmp_ring_dma.size,
    734  1.5  riastrad 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
    735  1.5  riastrad 
    736  1.1     skrll 	pvscsi_dma_free(sc, &sc->rings_state_dma);
    737  1.1     skrll 	pvscsi_dma_free(sc, &sc->req_ring_dma);
    738  1.1     skrll 	pvscsi_dma_free(sc, &sc->cmp_ring_dma);
    739  1.1     skrll 	if (sc->use_msg) {
    740  1.1     skrll 		pvscsi_dma_free(sc, &sc->msg_ring_dma);
    741  1.1     skrll 	}
    742  1.1     skrll }
    743  1.1     skrll 
    744  1.1     skrll static int
    745  1.1     skrll pvscsi_allocate_rings(struct pvscsi_softc *sc)
    746  1.1     skrll {
    747  1.1     skrll 	int error;
    748  1.1     skrll 
    749  1.1     skrll 	error = pvscsi_dma_alloc_ppns(sc, &sc->rings_state_dma,
    750  1.1     skrll 	    &sc->rings_state_ppn, 1);
    751  1.1     skrll 	if (error) {
    752  1.1     skrll 		aprint_normal_dev(sc->dev,
    753  1.1     skrll 		    "Error allocating rings state, error = %d\n", error);
    754  1.1     skrll 		goto fail;
    755  1.1     skrll 	}
    756  1.1     skrll 	sc->rings_state = sc->rings_state_dma.vaddr;
    757  1.1     skrll 
    758  1.1     skrll 	error = pvscsi_dma_alloc_ppns(sc, &sc->req_ring_dma, sc->req_ring_ppn,
    759  1.1     skrll 	    sc->req_ring_num_pages);
    760  1.1     skrll 	if (error) {
    761  1.1     skrll 		aprint_normal_dev(sc->dev,
    762  1.1     skrll 		    "Error allocating req ring pages, error = %d\n", error);
    763  1.1     skrll 		goto fail;
    764  1.1     skrll 	}
    765  1.1     skrll 	sc->req_ring = sc->req_ring_dma.vaddr;
    766  1.1     skrll 
    767  1.1     skrll 	error = pvscsi_dma_alloc_ppns(sc, &sc->cmp_ring_dma, sc->cmp_ring_ppn,
    768  1.1     skrll 	    sc->cmp_ring_num_pages);
    769  1.1     skrll 	if (error) {
    770  1.1     skrll 		aprint_normal_dev(sc->dev,
    771  1.1     skrll 		    "Error allocating cmp ring pages, error = %d\n", error);
    772  1.1     skrll 		goto fail;
    773  1.1     skrll 	}
    774  1.1     skrll 	sc->cmp_ring = sc->cmp_ring_dma.vaddr;
    775  1.1     skrll 
    776  1.1     skrll 	sc->msg_ring = NULL;
    777  1.1     skrll 	if (sc->use_msg) {
    778  1.1     skrll 		error = pvscsi_dma_alloc_ppns(sc, &sc->msg_ring_dma,
    779  1.1     skrll 		    sc->msg_ring_ppn, sc->msg_ring_num_pages);
    780  1.1     skrll 		if (error) {
    781  1.1     skrll 			aprint_normal_dev(sc->dev,
    782  1.1     skrll 			    "Error allocating cmp ring pages, error = %d\n",
    783  1.1     skrll 			    error);
    784  1.1     skrll 			goto fail;
    785  1.1     skrll 		}
    786  1.1     skrll 		sc->msg_ring = sc->msg_ring_dma.vaddr;
    787  1.1     skrll 	}
    788  1.1     skrll 
    789  1.1     skrll fail:
    790  1.1     skrll 	if (error) {
    791  1.1     skrll 		pvscsi_free_rings(sc);
    792  1.1     skrll 	}
    793  1.1     skrll 	return (error);
    794  1.1     skrll }
    795  1.1     skrll 
    796  1.1     skrll static void
    797  1.1     skrll pvscsi_setup_rings(struct pvscsi_softc *sc)
    798  1.1     skrll {
    799  1.1     skrll 	struct pvscsi_cmd_desc_setup_rings cmd;
    800  1.1     skrll 	uint32_t i;
    801  1.1     skrll 
    802  1.1     skrll 	memset(&cmd, 0, sizeof(cmd));
    803  1.1     skrll 
    804  1.1     skrll 	cmd.rings_state_ppn = sc->rings_state_ppn;
    805  1.1     skrll 
    806  1.1     skrll 	cmd.req_ring_num_pages = sc->req_ring_num_pages;
    807  1.1     skrll 	for (i = 0; i < sc->req_ring_num_pages; ++i) {
    808  1.1     skrll 		cmd.req_ring_ppns[i] = sc->req_ring_ppn[i];
    809  1.1     skrll 	}
    810  1.1     skrll 
    811  1.1     skrll 	cmd.cmp_ring_num_pages = sc->cmp_ring_num_pages;
    812  1.1     skrll 	for (i = 0; i < sc->cmp_ring_num_pages; ++i) {
    813  1.1     skrll 		cmd.cmp_ring_ppns[i] = sc->cmp_ring_ppn[i];
    814  1.1     skrll 	}
    815  1.1     skrll 
    816  1.1     skrll 	pvscsi_write_cmd(sc, PVSCSI_CMD_SETUP_RINGS, &cmd, sizeof(cmd));
    817  1.4  riastrad 
    818  1.4  riastrad 	/*
    819  1.4  riastrad 	 * After setup, sync *_num_entries_log2 before use.  After this
    820  1.4  riastrad 	 * point they should be stable, so no need to sync again during
    821  1.4  riastrad 	 * use.
    822  1.4  riastrad 	 */
    823  1.4  riastrad 	PVSCSI_DMA_SYNC_STATE(sc, &sc->rings_state_dma,
    824  1.4  riastrad 	    sc->rings_state, req_num_entries_log2,
    825  1.4  riastrad 	    BUS_DMASYNC_POSTREAD);
    826  1.4  riastrad 	PVSCSI_DMA_SYNC_STATE(sc, &sc->rings_state_dma,
    827  1.4  riastrad 	    sc->rings_state, cmp_num_entries_log2,
    828  1.4  riastrad 	    BUS_DMASYNC_POSTREAD);
    829  1.1     skrll }
    830  1.1     skrll 
    831  1.1     skrll static int
    832  1.1     skrll pvscsi_hw_supports_msg(struct pvscsi_softc *sc)
    833  1.1     skrll {
    834  1.1     skrll 	uint32_t status;
    835  1.1     skrll 
    836  1.1     skrll 	pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_COMMAND,
    837  1.1     skrll 	    PVSCSI_CMD_SETUP_MSG_RING);
    838  1.1     skrll 	status = pvscsi_reg_read(sc, PVSCSI_REG_OFFSET_COMMAND_STATUS);
    839  1.1     skrll 
    840  1.1     skrll 	return (status != -1);
    841  1.1     skrll }
    842  1.1     skrll 
    843  1.1     skrll static void
    844  1.1     skrll pvscsi_setup_msg_ring(struct pvscsi_softc *sc)
    845  1.1     skrll {
    846  1.1     skrll 	struct pvscsi_cmd_desc_setup_msg_ring cmd;
    847  1.1     skrll 	uint32_t i;
    848  1.1     skrll 
    849  1.1     skrll 	KASSERTMSG(sc->use_msg, "msg is not being used");
    850  1.1     skrll 
    851  1.1     skrll 	memset(&cmd, 0, sizeof(cmd));
    852  1.1     skrll 
    853  1.1     skrll 	cmd.num_pages = sc->msg_ring_num_pages;
    854  1.1     skrll 	for (i = 0; i < sc->msg_ring_num_pages; ++i) {
    855  1.1     skrll 		cmd.ring_ppns[i] = sc->msg_ring_ppn[i];
    856  1.1     skrll 	}
    857  1.1     skrll 
    858  1.1     skrll 	pvscsi_write_cmd(sc, PVSCSI_CMD_SETUP_MSG_RING, &cmd, sizeof(cmd));
    859  1.4  riastrad 
    860  1.4  riastrad 	/*
    861  1.4  riastrad 	 * After setup, sync msg_num_entries_log2 before use.  After
    862  1.4  riastrad 	 * this point it should be stable, so no need to sync again
    863  1.4  riastrad 	 * during use.
    864  1.4  riastrad 	 */
    865  1.4  riastrad 	PVSCSI_DMA_SYNC_STATE(sc, &sc->rings_state_dma,
    866  1.4  riastrad 	    sc->rings_state, msg_num_entries_log2,
    867  1.4  riastrad 	    BUS_DMASYNC_POSTREAD);
    868  1.1     skrll }
    869  1.1     skrll 
    870  1.1     skrll static void
    871  1.1     skrll pvscsi_adapter_reset(struct pvscsi_softc *sc)
    872  1.1     skrll {
    873  1.1     skrll 	aprint_normal_dev(sc->dev, "Adapter Reset\n");
    874  1.1     skrll 
    875  1.1     skrll 	pvscsi_write_cmd(sc, PVSCSI_CMD_ADAPTER_RESET, NULL, 0);
    876  1.1     skrll #ifdef PVSCSI_DEBUG_LOGGING
    877  1.1     skrll 	uint32_t val =
    878  1.1     skrll #endif
    879  1.1     skrll 	pvscsi_read_intr_status(sc);
    880  1.1     skrll 
    881  1.1     skrll 	DEBUG_PRINTF(2, sc->dev, "adapter reset done: %u\n", val);
    882  1.1     skrll }
    883  1.1     skrll 
    884  1.1     skrll static void
    885  1.1     skrll pvscsi_bus_reset(struct pvscsi_softc *sc)
    886  1.1     skrll {
    887  1.1     skrll 
    888  1.1     skrll 	aprint_normal_dev(sc->dev, "Bus Reset\n");
    889  1.1     skrll 
    890  1.1     skrll 	pvscsi_write_cmd(sc, PVSCSI_CMD_RESET_BUS, NULL, 0);
    891  1.1     skrll 	pvscsi_process_cmp_ring(sc);
    892  1.1     skrll 
    893  1.1     skrll 	DEBUG_PRINTF(2, sc->dev, "bus reset done\n");
    894  1.1     skrll }
    895  1.1     skrll 
    896  1.1     skrll static void
    897  1.1     skrll pvscsi_device_reset(struct pvscsi_softc *sc, uint32_t target)
    898  1.1     skrll {
    899  1.1     skrll 	struct pvscsi_cmd_desc_reset_device cmd;
    900  1.1     skrll 
    901  1.1     skrll 	memset(&cmd, 0, sizeof(cmd));
    902  1.1     skrll 
    903  1.1     skrll 	cmd.target = target;
    904  1.1     skrll 
    905  1.1     skrll 	aprint_normal_dev(sc->dev, "Device reset for target %u\n", target);
    906  1.1     skrll 
    907  1.1     skrll 	pvscsi_write_cmd(sc, PVSCSI_CMD_RESET_DEVICE, &cmd, sizeof cmd);
    908  1.1     skrll 	pvscsi_process_cmp_ring(sc);
    909  1.1     skrll 
    910  1.1     skrll 	DEBUG_PRINTF(2, sc->dev, "device reset done\n");
    911  1.1     skrll }
    912  1.1     skrll 
    913  1.1     skrll static void
    914  1.1     skrll pvscsi_abort(struct pvscsi_softc *sc, uint32_t target, struct pvscsi_hcb *hcb)
    915  1.1     skrll {
    916  1.1     skrll 	struct pvscsi_cmd_desc_abort_cmd cmd;
    917  1.1     skrll 	uint64_t context;
    918  1.1     skrll 
    919  1.1     skrll 	pvscsi_process_cmp_ring(sc);
    920  1.1     skrll 
    921  1.1     skrll 	if (hcb != NULL) {
    922  1.1     skrll 		context = pvscsi_hcb_to_context(sc, hcb);
    923  1.1     skrll 
    924  1.1     skrll 		memset(&cmd, 0, sizeof cmd);
    925  1.1     skrll 		cmd.target = target;
    926  1.1     skrll 		cmd.context = context;
    927  1.1     skrll 
    928  1.1     skrll 		aprint_normal_dev(sc->dev, "Abort for target %u context %llx\n",
    929  1.1     skrll 		    target, (unsigned long long)context);
    930  1.1     skrll 
    931  1.1     skrll 		pvscsi_write_cmd(sc, PVSCSI_CMD_ABORT_CMD, &cmd, sizeof(cmd));
    932  1.1     skrll 		pvscsi_process_cmp_ring(sc);
    933  1.1     skrll 
    934  1.1     skrll 		DEBUG_PRINTF(2, sc->dev, "abort done\n");
    935  1.1     skrll 	} else {
    936  1.1     skrll 		DEBUG_PRINTF(1, sc->dev,
    937  1.1     skrll 		    "Target %u hcb %p not found for abort\n", target, hcb);
    938  1.1     skrll 	}
    939  1.1     skrll }
    940  1.1     skrll 
    941  1.1     skrll static int
    942  1.1     skrll pvscsi_probe(device_t dev, cfdata_t cf, void *aux)
    943  1.1     skrll {
    944  1.1     skrll 	const struct pci_attach_args *pa = aux;
    945  1.1     skrll 
    946  1.1     skrll 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_VMWARE &&
    947  1.1     skrll 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VMWARE_PVSCSI) {
    948  1.1     skrll 		return 1;
    949  1.1     skrll 	}
    950  1.1     skrll 	return 0;
    951  1.1     skrll }
    952  1.1     skrll 
    953  1.1     skrll static void
    954  1.1     skrll pvscsi_timeout(void *arg)
    955  1.1     skrll {
    956  1.1     skrll 	struct pvscsi_hcb *hcb = arg;
    957  1.1     skrll 	struct scsipi_xfer *xs = hcb->xs;
    958  1.1     skrll 
    959  1.1     skrll 	if (xs == NULL) {
    960  1.1     skrll 		/* Already completed */
    961  1.1     skrll 		return;
    962  1.1     skrll 	}
    963  1.1     skrll 
    964  1.1     skrll 	struct pvscsi_softc *sc = hcb->sc;
    965  1.1     skrll 
    966  1.1     skrll 	mutex_enter(&sc->lock);
    967  1.1     skrll 
    968  1.1     skrll 	scsipi_printaddr(xs->xs_periph);
    969  1.1     skrll 	printf("command timeout, CDB: ");
    970  1.1     skrll 	scsipi_print_cdb(xs->cmd);
    971  1.1     skrll 	printf("\n");
    972  1.1     skrll 
    973  1.1     skrll 	switch (hcb->recovery) {
    974  1.1     skrll 	case PVSCSI_HCB_NONE:
    975  1.1     skrll 		hcb->recovery = PVSCSI_HCB_ABORT;
    976  1.1     skrll 		pvscsi_abort(sc, hcb->e->target, hcb);
    977  1.1     skrll 		callout_reset(&xs->xs_callout,
    978  1.1     skrll 		    mstohz(PVSCSI_ABORT_TIMEOUT * 1000),
    979  1.1     skrll 		    pvscsi_timeout, hcb);
    980  1.1     skrll 		break;
    981  1.1     skrll 	case PVSCSI_HCB_ABORT:
    982  1.1     skrll 		hcb->recovery = PVSCSI_HCB_DEVICE_RESET;
    983  1.1     skrll 		pvscsi_device_reset(sc, hcb->e->target);
    984  1.1     skrll 		callout_reset(&xs->xs_callout,
    985  1.1     skrll 		    mstohz(PVSCSI_RESET_TIMEOUT * 1000),
    986  1.1     skrll 		    pvscsi_timeout, hcb);
    987  1.1     skrll 		break;
    988  1.1     skrll 	case PVSCSI_HCB_DEVICE_RESET:
    989  1.1     skrll 		hcb->recovery = PVSCSI_HCB_BUS_RESET;
    990  1.1     skrll 		pvscsi_bus_reset(sc);
    991  1.1     skrll 		callout_reset(&xs->xs_callout,
    992  1.1     skrll 		    mstohz(PVSCSI_RESET_TIMEOUT * 1000),
    993  1.1     skrll 		    pvscsi_timeout, hcb);
    994  1.1     skrll 		break;
    995  1.1     skrll 	case PVSCSI_HCB_BUS_RESET:
    996  1.1     skrll 		pvscsi_adapter_reset(sc);
    997  1.1     skrll 		break;
    998  1.1     skrll 	};
    999  1.1     skrll 	mutex_exit(&sc->lock);
   1000  1.1     skrll }
   1001  1.1     skrll 
   1002  1.1     skrll static void
   1003  1.1     skrll pvscsi_process_completion(struct pvscsi_softc *sc,
   1004  1.1     skrll     struct pvscsi_ring_cmp_desc *e)
   1005  1.1     skrll {
   1006  1.1     skrll 	struct pvscsi_hcb *hcb;
   1007  1.1     skrll 	struct scsipi_xfer *xs;
   1008  1.1     skrll 	uint32_t error = XS_NOERROR;
   1009  1.1     skrll 	uint32_t btstat;
   1010  1.1     skrll 	uint32_t sdstat;
   1011  1.1     skrll 	int op;
   1012  1.1     skrll 
   1013  1.1     skrll 	hcb = pvscsi_context_to_hcb(sc, e->context);
   1014  1.1     skrll 	xs = hcb->xs;
   1015  1.1     skrll 
   1016  1.1     skrll 	callout_stop(&xs->xs_callout);
   1017  1.1     skrll 
   1018  1.1     skrll 	btstat = e->host_status;
   1019  1.1     skrll 	sdstat = e->scsi_status;
   1020  1.1     skrll 
   1021  1.1     skrll 	xs->status = sdstat;
   1022  1.1     skrll 	xs->resid = xs->datalen - e->data_len;
   1023  1.1     skrll 
   1024  1.1     skrll 	DEBUG_PRINTF(3, sc->dev,
   1025  1.1     skrll 	    "command context %llx btstat %d (%#x) sdstat %d (%#x)\n",
   1026  1.1     skrll 	    (unsigned long long)e->context, btstat, btstat, sdstat, sdstat);
   1027  1.1     skrll 
   1028  1.1     skrll 	if ((xs->xs_control & XS_CTL_DATA_IN) == XS_CTL_DATA_IN) {
   1029  1.1     skrll 		op = BUS_DMASYNC_POSTREAD;
   1030  1.1     skrll 	} else {
   1031  1.1     skrll 		op = BUS_DMASYNC_POSTWRITE;
   1032  1.1     skrll 	}
   1033  1.1     skrll 	bus_dmamap_sync(sc->sc_dmat, sc->sense_buffer_dma.map,
   1034  1.1     skrll 	    hcb->dma_map_offset, hcb->dma_map_size, op);
   1035  1.1     skrll 
   1036  1.1     skrll 	if (btstat == BTSTAT_SUCCESS && sdstat == SCSI_OK) {
   1037  1.1     skrll 		DEBUG_PRINTF(3, sc->dev,
   1038  1.1     skrll 		    "completing command context %llx success\n",
   1039  1.1     skrll 		    (unsigned long long)e->context);
   1040  1.1     skrll 		xs->resid = 0;
   1041  1.1     skrll 	} else {
   1042  1.1     skrll 		switch (btstat) {
   1043  1.1     skrll 		case BTSTAT_SUCCESS:
   1044  1.1     skrll 		case BTSTAT_LINKED_COMMAND_COMPLETED:
   1045  1.1     skrll 		case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG:
   1046  1.1     skrll 			switch (sdstat) {
   1047  1.1     skrll 			case SCSI_OK:
   1048  1.1     skrll 				xs->resid = 0;
   1049  1.1     skrll 				error = XS_NOERROR;
   1050  1.1     skrll 				break;
   1051  1.1     skrll 			case SCSI_CHECK:
   1052  1.1     skrll 				error = XS_SENSE;
   1053  1.1     skrll 				xs->resid = 0;
   1054  1.1     skrll 
   1055  1.1     skrll 				memset(&xs->sense, 0, sizeof(xs->sense));
   1056  1.1     skrll 				memcpy(&xs->sense, hcb->sense_buffer,
   1057  1.1     skrll 				    MIN(sizeof(xs->sense), e->sense_len));
   1058  1.1     skrll 				break;
   1059  1.1     skrll 			case SCSI_BUSY:
   1060  1.1     skrll 			case SCSI_QUEUE_FULL:
   1061  1.1     skrll 				error = XS_NOERROR;
   1062  1.1     skrll 				break;
   1063  1.1     skrll 			case SCSI_TERMINATED:
   1064  1.1     skrll // 			case SCSI_STATUS_TASK_ABORTED:
   1065  1.1     skrll 				DEBUG_PRINTF(1, sc->dev,
   1066  1.1     skrll 				    "xs: %p sdstat=0x%x\n", xs, sdstat);
   1067  1.1     skrll 				error = XS_DRIVER_STUFFUP;
   1068  1.1     skrll 				break;
   1069  1.1     skrll 			default:
   1070  1.1     skrll 				DEBUG_PRINTF(1, sc->dev,
   1071  1.1     skrll 				    "xs: %p sdstat=0x%x\n", xs, sdstat);
   1072  1.1     skrll 				error = XS_DRIVER_STUFFUP;
   1073  1.1     skrll 				break;
   1074  1.1     skrll 			}
   1075  1.1     skrll 			break;
   1076  1.1     skrll 		case BTSTAT_SELTIMEO:
   1077  1.1     skrll 			error = XS_SELTIMEOUT;
   1078  1.1     skrll 			break;
   1079  1.1     skrll 		case BTSTAT_DATARUN:
   1080  1.1     skrll 		case BTSTAT_DATA_UNDERRUN:
   1081  1.1     skrll //			xs->resid = xs->datalen - c->data_len;
   1082  1.1     skrll 			error = XS_NOERROR;
   1083  1.1     skrll 			break;
   1084  1.1     skrll 		case BTSTAT_ABORTQUEUE:
   1085  1.1     skrll 		case BTSTAT_HATIMEOUT:
   1086  1.1     skrll 			error = XS_NOERROR;
   1087  1.1     skrll 			break;
   1088  1.1     skrll 		case BTSTAT_NORESPONSE:
   1089  1.1     skrll 		case BTSTAT_SENTRST:
   1090  1.1     skrll 		case BTSTAT_RECVRST:
   1091  1.1     skrll 		case BTSTAT_BUSRESET:
   1092  1.1     skrll 			error = XS_RESET;
   1093  1.1     skrll 			break;
   1094  1.1     skrll 		case BTSTAT_SCSIPARITY:
   1095  1.1     skrll 			error = XS_DRIVER_STUFFUP;
   1096  1.1     skrll 			DEBUG_PRINTF(1, sc->dev,
   1097  1.1     skrll 			    "xs: %p sdstat=0x%x\n", xs, sdstat);
   1098  1.1     skrll 			break;
   1099  1.1     skrll 		case BTSTAT_BUSFREE:
   1100  1.1     skrll 			error = XS_DRIVER_STUFFUP;
   1101  1.1     skrll 			DEBUG_PRINTF(1, sc->dev,
   1102  1.1     skrll 			    "xs: %p sdstat=0x%x\n", xs, sdstat);
   1103  1.1     skrll 			break;
   1104  1.1     skrll 		case BTSTAT_INVPHASE:
   1105  1.1     skrll 			error = XS_DRIVER_STUFFUP;
   1106  1.1     skrll 			DEBUG_PRINTF(1, sc->dev,
   1107  1.1     skrll 			    "xs: %p sdstat=0x%x\n", xs, sdstat);
   1108  1.1     skrll 			break;
   1109  1.1     skrll 		case BTSTAT_SENSFAILED:
   1110  1.1     skrll 			error = XS_DRIVER_STUFFUP;
   1111  1.1     skrll 			DEBUG_PRINTF(1, sc->dev,
   1112  1.1     skrll 			    "xs: %p sdstat=0x%x\n", xs, sdstat);
   1113  1.1     skrll 			break;
   1114  1.1     skrll 		case BTSTAT_LUNMISMATCH:
   1115  1.1     skrll 		case BTSTAT_TAGREJECT:
   1116  1.1     skrll 		case BTSTAT_DISCONNECT:
   1117  1.1     skrll 		case BTSTAT_BADMSG:
   1118  1.1     skrll 		case BTSTAT_INVPARAM:
   1119  1.1     skrll 			error = XS_DRIVER_STUFFUP;
   1120  1.1     skrll 			DEBUG_PRINTF(1, sc->dev,
   1121  1.1     skrll 			    "xs: %p sdstat=0x%x\n", xs, sdstat);
   1122  1.1     skrll 			break;
   1123  1.1     skrll 		case BTSTAT_HASOFTWARE:
   1124  1.1     skrll 		case BTSTAT_HAHARDWARE:
   1125  1.1     skrll 			error = XS_DRIVER_STUFFUP;
   1126  1.1     skrll 			DEBUG_PRINTF(1, sc->dev,
   1127  1.1     skrll 			    "xs: %p sdstat=0x%x\n", xs, sdstat);
   1128  1.1     skrll 			break;
   1129  1.1     skrll 		default:
   1130  1.1     skrll 			aprint_normal_dev(sc->dev, "unknown hba status: 0x%x\n",
   1131  1.1     skrll 			    btstat);
   1132  1.1     skrll 			error = XS_DRIVER_STUFFUP;
   1133  1.1     skrll 			break;
   1134  1.1     skrll 		}
   1135  1.1     skrll 
   1136  1.1     skrll 		DEBUG_PRINTF(3, sc->dev,
   1137  1.1     skrll 		    "completing command context %llx btstat %x sdstat %x - error %x\n",
   1138  1.1     skrll 		    (unsigned long long)e->context, btstat, sdstat, error);
   1139  1.1     skrll 	}
   1140  1.1     skrll 
   1141  1.1     skrll 	xs->error = error;
   1142  1.1     skrll 	pvscsi_hcb_put(sc, hcb);
   1143  1.1     skrll 
   1144  1.1     skrll 	mutex_exit(&sc->lock);
   1145  1.1     skrll 
   1146  1.1     skrll 	scsipi_done(xs);
   1147  1.1     skrll 
   1148  1.1     skrll 	mutex_enter(&sc->lock);
   1149  1.1     skrll }
   1150  1.1     skrll 
   1151  1.1     skrll static void
   1152  1.1     skrll pvscsi_process_cmp_ring(struct pvscsi_softc *sc)
   1153  1.1     skrll {
   1154  1.4  riastrad 	struct pvscsi_dma *ring_dma;
   1155  1.1     skrll 	struct pvscsi_ring_cmp_desc *ring;
   1156  1.4  riastrad 	struct pvscsi_dma *s_dma;
   1157  1.1     skrll 	struct pvscsi_rings_state *s;
   1158  1.1     skrll 	struct pvscsi_ring_cmp_desc *e;
   1159  1.1     skrll 	uint32_t mask;
   1160  1.1     skrll 
   1161  1.1     skrll 	KASSERT(mutex_owned(&sc->lock));
   1162  1.1     skrll 
   1163  1.4  riastrad 	s_dma = &sc->rings_state_dma;
   1164  1.1     skrll 	s = sc->rings_state;
   1165  1.4  riastrad 	ring_dma = &sc->cmp_ring_dma;
   1166  1.1     skrll 	ring = sc->cmp_ring;
   1167  1.1     skrll 	mask = MASK(s->cmp_num_entries_log2);
   1168  1.1     skrll 
   1169  1.4  riastrad 	for (;;) {
   1170  1.4  riastrad 		PVSCSI_DMA_SYNC_STATE(sc, s_dma, s, cmp_prod_idx,
   1171  1.4  riastrad 		    BUS_DMASYNC_POSTREAD);
   1172  1.1     skrll 		size_t crpidx = s->cmp_prod_idx;
   1173  1.4  riastrad 		PVSCSI_DMA_SYNC_STATE(sc, s_dma, s, cmp_prod_idx,
   1174  1.4  riastrad 		    BUS_DMASYNC_PREREAD);
   1175  1.1     skrll 
   1176  1.1     skrll 		if (s->cmp_cons_idx == crpidx)
   1177  1.1     skrll 			break;
   1178  1.1     skrll 
   1179  1.1     skrll 		size_t crcidx = s->cmp_cons_idx & mask;
   1180  1.1     skrll 
   1181  1.4  riastrad 		PVSCSI_DMA_SYNC_RING(sc, ring_dma, ring, crcidx,
   1182  1.4  riastrad 		    BUS_DMASYNC_POSTREAD);
   1183  1.4  riastrad 
   1184  1.1     skrll 		e = ring + crcidx;
   1185  1.1     skrll 
   1186  1.1     skrll 		pvscsi_process_completion(sc, e);
   1187  1.1     skrll 
   1188  1.1     skrll 		/*
   1189  1.1     skrll 		 * ensure completion processing reads happen before write to
   1190  1.1     skrll 		 * (increment of) cmp_cons_idx
   1191  1.1     skrll 		 */
   1192  1.4  riastrad 		PVSCSI_DMA_SYNC_RING(sc, ring_dma, ring, crcidx,
   1193  1.4  riastrad 		    BUS_DMASYNC_PREREAD);
   1194  1.4  riastrad 
   1195  1.4  riastrad 		/*
   1196  1.4  riastrad 		 * XXX Not actually sure the `device' does DMA for
   1197  1.4  riastrad 		 * s->cmp_cons_idx at all -- qemu doesn't.  If not, we
   1198  1.4  riastrad 		 * can skip these DMA syncs.
   1199  1.4  riastrad 		 */
   1200  1.4  riastrad 		PVSCSI_DMA_SYNC_STATE(sc, s_dma, s, cmp_cons_idx,
   1201  1.4  riastrad 		    BUS_DMASYNC_POSTWRITE);
   1202  1.1     skrll 		s->cmp_cons_idx++;
   1203  1.4  riastrad 		PVSCSI_DMA_SYNC_STATE(sc, s_dma, s, cmp_cons_idx,
   1204  1.4  riastrad 		    BUS_DMASYNC_PREWRITE);
   1205  1.1     skrll 	}
   1206  1.1     skrll }
   1207  1.1     skrll 
   1208  1.1     skrll static void
   1209  1.1     skrll pvscsi_process_msg(struct pvscsi_softc *sc, struct pvscsi_ring_msg_desc *e)
   1210  1.1     skrll {
   1211  1.1     skrll 	struct pvscsi_ring_msg_dev_status_changed *desc;
   1212  1.1     skrll 
   1213  1.1     skrll 	switch (e->type) {
   1214  1.1     skrll 	case PVSCSI_MSG_DEV_ADDED:
   1215  1.1     skrll 	case PVSCSI_MSG_DEV_REMOVED: {
   1216  1.1     skrll 		desc = (struct pvscsi_ring_msg_dev_status_changed *)e;
   1217  1.1     skrll 		struct scsibus_softc *ssc = device_private(sc->sc_scsibus_dv);
   1218  1.1     skrll 
   1219  1.1     skrll 		aprint_normal_dev(sc->dev, "MSG: device %s at scsi%u:%u:%u\n",
   1220  1.1     skrll 		    desc->type == PVSCSI_MSG_DEV_ADDED ? "addition" : "removal",
   1221  1.1     skrll 		    desc->bus, desc->target, desc->lun[1]);
   1222  1.1     skrll 
   1223  1.1     skrll 		if (desc->type == PVSCSI_MSG_DEV_ADDED) {
   1224  1.1     skrll 			if (scsi_probe_bus(ssc,
   1225  1.1     skrll 			    desc->target, desc->lun[1]) != 0) {
   1226  1.1     skrll 				aprint_normal_dev(sc->dev,
   1227  1.1     skrll 				    "Error creating path for dev change.\n");
   1228  1.1     skrll 				break;
   1229  1.1     skrll 			}
   1230  1.1     skrll 		} else {
   1231  1.1     skrll 			if (scsipi_target_detach(ssc->sc_channel,
   1232  1.1     skrll 			    desc->target, desc->lun[1],
   1233  1.1     skrll 			    DETACH_FORCE) != 0) {
   1234  1.1     skrll 				aprint_normal_dev(sc->dev,
   1235  1.1     skrll 				    "Error detaching target %d lun %d\n",
   1236  1.1     skrll 				    desc->target, desc->lun[1]);
   1237  1.1     skrll 			};
   1238  1.1     skrll 
   1239  1.1     skrll 		}
   1240  1.1     skrll 	} break;
   1241  1.1     skrll 	default:
   1242  1.1     skrll 		aprint_normal_dev(sc->dev, "Unknown msg type 0x%x\n", e->type);
   1243  1.1     skrll 	};
   1244  1.1     skrll }
   1245  1.1     skrll 
   1246  1.1     skrll static void
   1247  1.1     skrll pvscsi_process_msg_ring(struct pvscsi_softc *sc)
   1248  1.1     skrll {
   1249  1.4  riastrad 	struct pvscsi_dma *ring_dma;
   1250  1.1     skrll 	struct pvscsi_ring_msg_desc *ring;
   1251  1.4  riastrad 	struct pvscsi_dma *s_dma;
   1252  1.1     skrll 	struct pvscsi_rings_state *s;
   1253  1.1     skrll 	struct pvscsi_ring_msg_desc *e;
   1254  1.1     skrll 	uint32_t mask;
   1255  1.1     skrll 
   1256  1.1     skrll 	KASSERT(mutex_owned(&sc->lock));
   1257  1.1     skrll 
   1258  1.4  riastrad 	s_dma = &sc->rings_state_dma;
   1259  1.1     skrll 	s = sc->rings_state;
   1260  1.4  riastrad 	ring_dma = &sc->msg_ring_dma;
   1261  1.1     skrll 	ring = sc->msg_ring;
   1262  1.1     skrll 	mask = MASK(s->msg_num_entries_log2);
   1263  1.1     skrll 
   1264  1.4  riastrad 	for (;;) {
   1265  1.4  riastrad 		PVSCSI_DMA_SYNC_STATE(sc, s_dma, s, msg_prod_idx,
   1266  1.4  riastrad 		    BUS_DMASYNC_POSTREAD);
   1267  1.1     skrll 		size_t mpidx = s->msg_prod_idx;	// dma read (device -> cpu)
   1268  1.4  riastrad 		PVSCSI_DMA_SYNC_STATE(sc, s_dma, s, msg_prod_idx,
   1269  1.4  riastrad 		    BUS_DMASYNC_PREREAD);
   1270  1.1     skrll 
   1271  1.1     skrll 		if (s->msg_cons_idx == mpidx)
   1272  1.1     skrll 			break;
   1273  1.1     skrll 
   1274  1.1     skrll 		size_t mcidx = s->msg_cons_idx & mask;
   1275  1.1     skrll 
   1276  1.4  riastrad 		PVSCSI_DMA_SYNC_RING(sc, ring_dma, ring, mcidx,
   1277  1.4  riastrad 		    BUS_DMASYNC_POSTREAD);
   1278  1.4  riastrad 
   1279  1.1     skrll 		e = ring + mcidx;
   1280  1.1     skrll 
   1281  1.1     skrll 		pvscsi_process_msg(sc, e);
   1282  1.1     skrll 
   1283  1.1     skrll 		/*
   1284  1.1     skrll 		 * ensure message processing reads happen before write to
   1285  1.1     skrll 		 * (increment of) msg_cons_idx
   1286  1.1     skrll 		 */
   1287  1.4  riastrad 		PVSCSI_DMA_SYNC_RING(sc, ring_dma, ring, mcidx,
   1288  1.4  riastrad 		    BUS_DMASYNC_PREREAD);
   1289  1.4  riastrad 
   1290  1.4  riastrad 		PVSCSI_DMA_SYNC_STATE(sc, s_dma, s, msg_cons_idx,
   1291  1.4  riastrad 		    BUS_DMASYNC_POSTWRITE);
   1292  1.1     skrll 		s->msg_cons_idx++;
   1293  1.4  riastrad 		PVSCSI_DMA_SYNC_STATE(sc, s_dma, s, msg_cons_idx,
   1294  1.4  riastrad 		    BUS_DMASYNC_PREWRITE);
   1295  1.1     skrll 	}
   1296  1.1     skrll }
   1297  1.1     skrll 
   1298  1.1     skrll static void
   1299  1.1     skrll pvscsi_intr_locked(struct pvscsi_softc *sc)
   1300  1.1     skrll {
   1301  1.1     skrll 	uint32_t val;
   1302  1.1     skrll 
   1303  1.1     skrll 	KASSERT(mutex_owned(&sc->lock));
   1304  1.1     skrll 
   1305  1.1     skrll 	val = pvscsi_read_intr_status(sc);
   1306  1.1     skrll 
   1307  1.1     skrll 	if ((val & PVSCSI_INTR_ALL_SUPPORTED) != 0) {
   1308  1.1     skrll 		pvscsi_write_intr_status(sc, val & PVSCSI_INTR_ALL_SUPPORTED);
   1309  1.1     skrll 		pvscsi_process_cmp_ring(sc);
   1310  1.1     skrll 		if (sc->use_msg) {
   1311  1.1     skrll 			pvscsi_process_msg_ring(sc);
   1312  1.1     skrll 		}
   1313  1.1     skrll 	}
   1314  1.1     skrll }
   1315  1.1     skrll 
   1316  1.1     skrll static int
   1317  1.1     skrll pvscsi_intr(void *xsc)
   1318  1.1     skrll {
   1319  1.1     skrll 	struct pvscsi_softc *sc;
   1320  1.1     skrll 
   1321  1.1     skrll 	sc = xsc;
   1322  1.1     skrll 
   1323  1.1     skrll 	mutex_enter(&sc->lock);
   1324  1.1     skrll 	pvscsi_intr_locked(xsc);
   1325  1.1     skrll 	mutex_exit(&sc->lock);
   1326  1.1     skrll 
   1327  1.1     skrll 	return 1;
   1328  1.1     skrll }
   1329  1.1     skrll 
   1330  1.1     skrll static void
   1331  1.1     skrll pvscsi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t
   1332  1.1     skrll     request, void *arg)
   1333  1.1     skrll {
   1334  1.1     skrll 	struct pvscsi_softc *sc = device_private(chan->chan_adapter->adapt_dev);
   1335  1.1     skrll 
   1336  1.1     skrll 	if (request == ADAPTER_REQ_SET_XFER_MODE) {
   1337  1.1     skrll 		struct scsipi_xfer_mode *xm = arg;
   1338  1.1     skrll 
   1339  1.1     skrll 		xm->xm_mode = PERIPH_CAP_TQING;
   1340  1.1     skrll 		xm->xm_period = 0;
   1341  1.1     skrll 		xm->xm_offset = 0;
   1342  1.1     skrll 		scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
   1343  1.1     skrll 		return;
   1344  1.1     skrll 	} else if (request != ADAPTER_REQ_RUN_XFER) {
   1345  1.1     skrll 		DEBUG_PRINTF(1, sc->dev, "unhandled %d\n", request);
   1346  1.1     skrll 		return;
   1347  1.1     skrll 	}
   1348  1.1     skrll 
   1349  1.1     skrll 	/* request is ADAPTER_REQ_RUN_XFER */
   1350  1.1     skrll 	struct scsipi_xfer *xs = arg;
   1351  1.1     skrll 	struct scsipi_periph *periph = xs->xs_periph;
   1352  1.1     skrll #ifdef SCSIPI_DEBUG
   1353  1.1     skrll 	periph->periph_dbflags |= SCSIPI_DEBUG_FLAGS;
   1354  1.1     skrll #endif
   1355  1.1     skrll 
   1356  1.1     skrll 	uint32_t req_num_entries_log2;
   1357  1.4  riastrad 	struct pvscsi_dma *ring_dma;
   1358  1.1     skrll 	struct pvscsi_ring_req_desc *ring;
   1359  1.1     skrll 	struct pvscsi_ring_req_desc *e;
   1360  1.4  riastrad 	struct pvscsi_dma *s_dma;
   1361  1.1     skrll 	struct pvscsi_rings_state *s;
   1362  1.1     skrll 	struct pvscsi_hcb *hcb;
   1363  1.1     skrll 
   1364  1.1     skrll 	if (xs->cmdlen < 0 || xs->cmdlen > sizeof(e->cdb)) {
   1365  1.1     skrll 		DEBUG_PRINTF(1, sc->dev, "bad cmdlen %zu > %zu\n",
   1366  1.1     skrll 		    (size_t)xs->cmdlen, sizeof(e->cdb));
   1367  1.1     skrll 		/* not a temporary condition */
   1368  1.1     skrll 		xs->error = XS_DRIVER_STUFFUP;
   1369  1.1     skrll 		scsipi_done(xs);
   1370  1.1     skrll 		return;
   1371  1.1     skrll 	}
   1372  1.1     skrll 
   1373  1.4  riastrad 	ring_dma = &sc->req_ring_dma;
   1374  1.1     skrll 	ring = sc->req_ring;
   1375  1.4  riastrad 	s_dma = &sc->rings_state_dma;
   1376  1.1     skrll 	s = sc->rings_state;
   1377  1.1     skrll 
   1378  1.1     skrll 	hcb = NULL;
   1379  1.1     skrll 	req_num_entries_log2 = s->req_num_entries_log2;
   1380  1.1     skrll 
   1381  1.1     skrll 	/* Protect against multiple senders */
   1382  1.1     skrll 	mutex_enter(&sc->lock);
   1383  1.1     skrll 
   1384  1.1     skrll 	if (s->req_prod_idx - s->cmp_cons_idx >=
   1385  1.1     skrll 	    (1 << req_num_entries_log2)) {
   1386  1.1     skrll 		aprint_normal_dev(sc->dev,
   1387  1.1     skrll 		    "Not enough room on completion ring.\n");
   1388  1.1     skrll 		xs->error = XS_RESOURCE_SHORTAGE;
   1389  1.1     skrll 		goto finish_xs;
   1390  1.1     skrll 	}
   1391  1.1     skrll 
   1392  1.1     skrll 	if (xs->cmdlen > sizeof(e->cdb)) {
   1393  1.1     skrll 		DEBUG_PRINTF(1, sc->dev, "cdb length %u too large\n",
   1394  1.1     skrll 		    xs->cmdlen);
   1395  1.1     skrll 		xs->error = XS_DRIVER_STUFFUP;
   1396  1.1     skrll 		goto finish_xs;
   1397  1.1     skrll 	}
   1398  1.1     skrll 
   1399  1.1     skrll 	hcb = pvscsi_hcb_get(sc);
   1400  1.1     skrll 	if (hcb == NULL) {
   1401  1.1     skrll 		aprint_normal_dev(sc->dev, "No free hcbs.\n");
   1402  1.1     skrll 		xs->error = XS_RESOURCE_SHORTAGE;
   1403  1.1     skrll 		goto finish_xs;
   1404  1.1     skrll 	}
   1405  1.1     skrll 
   1406  1.1     skrll 	hcb->xs = xs;
   1407  1.1     skrll 
   1408  1.1     skrll 	const size_t rridx = s->req_prod_idx & MASK(req_num_entries_log2);
   1409  1.4  riastrad 	PVSCSI_DMA_SYNC_RING(sc, ring_dma, ring, rridx, BUS_DMASYNC_POSTWRITE);
   1410  1.1     skrll 	e = ring + rridx;
   1411  1.1     skrll 
   1412  1.1     skrll 	memset(e, 0, sizeof(*e));
   1413  1.1     skrll 	e->bus = 0;
   1414  1.1     skrll 	e->target = periph->periph_target;
   1415  1.1     skrll 	e->lun[1] = periph->periph_lun;
   1416  1.1     skrll 	e->data_addr = 0;
   1417  1.1     skrll 	e->data_len = xs->datalen;
   1418  1.1     skrll 	e->vcpu_hint = cpu_index(curcpu());
   1419  1.1     skrll 	e->flags = 0;
   1420  1.1     skrll 
   1421  1.1     skrll 	e->cdb_len = xs->cmdlen;
   1422  1.1     skrll 	memcpy(e->cdb, xs->cmd, xs->cmdlen);
   1423  1.1     skrll 
   1424  1.1     skrll 	e->sense_addr = 0;
   1425  1.1     skrll 	e->sense_len = sizeof(xs->sense);
   1426  1.1     skrll 	if (e->sense_len > 0) {
   1427  1.1     skrll 		e->sense_addr = hcb->sense_buffer_paddr;
   1428  1.1     skrll 	}
   1429  1.1     skrll 	//e->tag = xs->xs_tag_type;
   1430  1.1     skrll 	e->tag = MSG_SIMPLE_Q_TAG;
   1431  1.1     skrll 
   1432  1.1     skrll 	switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
   1433  1.1     skrll 	case XS_CTL_DATA_IN:
   1434  1.1     skrll 		e->flags |= PVSCSI_FLAG_CMD_DIR_TOHOST;
   1435  1.1     skrll 		break;
   1436  1.1     skrll 	case XS_CTL_DATA_OUT:
   1437  1.1     skrll 		e->flags |= PVSCSI_FLAG_CMD_DIR_TODEVICE;
   1438  1.1     skrll 		break;
   1439  1.1     skrll 	default:
   1440  1.1     skrll 		e->flags |= PVSCSI_FLAG_CMD_DIR_NONE;
   1441  1.1     skrll 		break;
   1442  1.1     skrll 	}
   1443  1.1     skrll 
   1444  1.1     skrll 	e->context = pvscsi_hcb_to_context(sc, hcb);
   1445  1.1     skrll 	hcb->e = e;
   1446  1.1     skrll 
   1447  1.1     skrll 	DEBUG_PRINTF(3, sc->dev,
   1448  1.1     skrll 	    " queuing command %02x context %llx\n", e->cdb[0],
   1449  1.1     skrll 	    (unsigned long long)e->context);
   1450  1.1     skrll 
   1451  1.1     skrll 	int flags;
   1452  1.1     skrll 	flags  = (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE;
   1453  1.1     skrll 	flags |= (xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK;
   1454  1.1     skrll 
   1455  1.1     skrll 	int error = bus_dmamap_load(sc->sc_dmat, hcb->dma_map,
   1456  1.1     skrll 	    xs->data, xs->datalen, NULL, flags);
   1457  1.1     skrll 
   1458  1.1     skrll 	if (error) {
   1459  1.1     skrll 		if (error == ENOMEM || error == EAGAIN) {
   1460  1.1     skrll 			xs->error = XS_RESOURCE_SHORTAGE;
   1461  1.1     skrll 		} else {
   1462  1.1     skrll 			xs->error = XS_DRIVER_STUFFUP;
   1463  1.1     skrll 		}
   1464  1.1     skrll 		DEBUG_PRINTF(1, sc->dev,
   1465  1.1     skrll 		    "xs: %p load error %d data %p len %d",
   1466  1.1     skrll                     xs, error, xs->data, xs->datalen);
   1467  1.1     skrll 		goto error_load;
   1468  1.1     skrll 	}
   1469  1.1     skrll 
   1470  1.1     skrll 	int op = (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
   1471  1.1     skrll 	    BUS_DMASYNC_PREWRITE;
   1472  1.1     skrll 	int nseg = hcb->dma_map->dm_nsegs;
   1473  1.1     skrll 	bus_dma_segment_t *segs = hcb->dma_map->dm_segs;
   1474  1.1     skrll 	if (nseg != 0) {
   1475  1.1     skrll 		if (nseg > 1) {
   1476  1.1     skrll 			struct pvscsi_sg_element *sge;
   1477  1.1     skrll 
   1478  1.1     skrll 			KASSERTMSG(nseg <= PVSCSI_MAX_SG_ENTRIES_PER_SEGMENT,
   1479  1.1     skrll 			    "too many sg segments");
   1480  1.1     skrll 
   1481  1.1     skrll 			sge = hcb->sg_list->sge;
   1482  1.1     skrll 			e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
   1483  1.1     skrll 
   1484  1.1     skrll 			for (size_t i = 0; i < nseg; ++i) {
   1485  1.1     skrll 				sge[i].addr = segs[i].ds_addr;
   1486  1.1     skrll 				sge[i].length = segs[i].ds_len;
   1487  1.1     skrll 				sge[i].flags = 0;
   1488  1.1     skrll 			}
   1489  1.1     skrll 
   1490  1.1     skrll 			e->data_addr = hcb->sg_list_paddr;
   1491  1.1     skrll 
   1492  1.1     skrll 			bus_dmamap_sync(sc->sc_dmat,
   1493  1.1     skrll 			    sc->sg_list_dma.map, hcb->sg_list_offset,
   1494  1.1     skrll 			    sizeof(*sge) * nseg, BUS_DMASYNC_PREWRITE);
   1495  1.1     skrll 		} else {
   1496  1.1     skrll 			e->data_addr = segs->ds_addr;
   1497  1.1     skrll 		}
   1498  1.1     skrll 
   1499  1.1     skrll 		bus_dmamap_sync(sc->sc_dmat, hcb->dma_map, 0,
   1500  1.1     skrll 		    xs->datalen, op);
   1501  1.1     skrll 	} else {
   1502  1.1     skrll 		e->data_addr = 0;
   1503  1.1     skrll 	}
   1504  1.1     skrll 
   1505  1.1     skrll 	/*
   1506  1.1     skrll 	 * Ensure request record writes happen before write to (increment of)
   1507  1.1     skrll 	 * req_prod_idx.
   1508  1.1     skrll 	 */
   1509  1.4  riastrad 	PVSCSI_DMA_SYNC_RING(sc, ring_dma, ring, rridx, BUS_DMASYNC_PREWRITE);
   1510  1.1     skrll 
   1511  1.1     skrll 	uint8_t cdb0 = e->cdb[0];
   1512  1.1     skrll 
   1513  1.1     skrll 	/* handle timeout */
   1514  1.1     skrll 	if ((xs->xs_control & XS_CTL_POLL) == 0) {
   1515  1.1     skrll 		int timeout = mstohz(xs->timeout);
   1516  1.1     skrll 		/* start expire timer */
   1517  1.1     skrll 		if (timeout == 0)
   1518  1.1     skrll 			timeout = 1;
   1519  1.1     skrll 		callout_reset(&xs->xs_callout, timeout, pvscsi_timeout, hcb);
   1520  1.1     skrll 	}
   1521  1.1     skrll 
   1522  1.4  riastrad 	PVSCSI_DMA_SYNC_STATE(sc, s_dma, s, req_prod_idx,
   1523  1.4  riastrad 	    BUS_DMASYNC_POSTWRITE);
   1524  1.1     skrll 	s->req_prod_idx++;
   1525  1.1     skrll 
   1526  1.1     skrll 	/*
   1527  1.1     skrll 	 * Ensure req_prod_idx write (increment) happens before
   1528  1.1     skrll 	 * IO is kicked (via a write).
   1529  1.2     skrll 	 */
   1530  1.4  riastrad 	PVSCSI_DMA_SYNC_STATE(sc, s_dma, s, req_prod_idx,
   1531  1.4  riastrad 	    BUS_DMASYNC_PREWRITE);
   1532  1.2     skrll 
   1533  1.1     skrll 	pvscsi_kick_io(sc, cdb0);
   1534  1.1     skrll 	mutex_exit(&sc->lock);
   1535  1.1     skrll 
   1536  1.1     skrll 	return;
   1537  1.1     skrll 
   1538  1.1     skrll error_load:
   1539  1.1     skrll 	pvscsi_hcb_put(sc, hcb);
   1540  1.1     skrll 
   1541  1.1     skrll finish_xs:
   1542  1.1     skrll 	mutex_exit(&sc->lock);
   1543  1.1     skrll 	scsipi_done(xs);
   1544  1.1     skrll }
   1545  1.1     skrll 
   1546  1.1     skrll static void
   1547  1.1     skrll pvscsi_free_interrupts(struct pvscsi_softc *sc)
   1548  1.1     skrll {
   1549  1.1     skrll 
   1550  1.1     skrll 	if (sc->sc_ih != NULL) {
   1551  1.1     skrll 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
   1552  1.1     skrll 		sc->sc_ih = NULL;
   1553  1.1     skrll 	}
   1554  1.1     skrll 	if (sc->sc_pihp != NULL) {
   1555  1.1     skrll 		pci_intr_release(sc->sc_pc, sc->sc_pihp, 1);
   1556  1.1     skrll 		sc->sc_pihp = NULL;
   1557  1.1     skrll 	}
   1558  1.1     skrll }
   1559  1.1     skrll 
   1560  1.1     skrll static int
   1561  1.1     skrll pvscsi_setup_interrupts(struct pvscsi_softc *sc, const struct pci_attach_args *pa)
   1562  1.1     skrll {
   1563  1.1     skrll 	int use_msix;
   1564  1.1     skrll 	int use_msi;
   1565  1.1     skrll 	int counts[PCI_INTR_TYPE_SIZE];
   1566  1.1     skrll 
   1567  1.1     skrll 	for (size_t i = 0; i < PCI_INTR_TYPE_SIZE; i++) {
   1568  1.1     skrll 		counts[i] = 1;
   1569  1.1     skrll 	}
   1570  1.1     skrll 
   1571  1.1     skrll 	use_msix = pvscsi_get_tunable(sc, "use_msix", pvscsi_use_msix);
   1572  1.1     skrll 	use_msi = pvscsi_get_tunable(sc, "use_msi", pvscsi_use_msi);
   1573  1.1     skrll 
   1574  1.1     skrll 	if (!use_msix) {
   1575  1.1     skrll 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1576  1.1     skrll 	}
   1577  1.1     skrll 	if (!use_msi) {
   1578  1.1     skrll 		counts[PCI_INTR_TYPE_MSI] = 0;
   1579  1.1     skrll 	}
   1580  1.1     skrll 
   1581  1.1     skrll 	/* Allocate and establish the interrupt. */
   1582  1.1     skrll 	if (pci_intr_alloc(pa, &sc->sc_pihp, counts, PCI_INTR_TYPE_MSIX)) {
   1583  1.1     skrll 		aprint_error_dev(sc->dev, "can't allocate handler\n");
   1584  1.1     skrll 		goto fail;
   1585  1.1     skrll 	}
   1586  1.1     skrll 
   1587  1.1     skrll 	char intrbuf[PCI_INTRSTR_LEN];
   1588  1.1     skrll 	const pci_chipset_tag_t pc = pa->pa_pc;
   1589  1.1     skrll 	char const *intrstr = pci_intr_string(pc, sc->sc_pihp[0], intrbuf,
   1590  1.1     skrll 	    sizeof(intrbuf));
   1591  1.1     skrll 
   1592  1.1     skrll 	sc->sc_ih = pci_intr_establish_xname(pc, sc->sc_pihp[0], IPL_BIO,
   1593  1.1     skrll 	    pvscsi_intr, sc, device_xname(sc->dev));
   1594  1.1     skrll 	if (sc->sc_ih == NULL) {
   1595  1.1     skrll 		pci_intr_release(pc, sc->sc_pihp, 1);
   1596  1.1     skrll 		sc->sc_pihp = NULL;
   1597  1.1     skrll 		aprint_error_dev(sc->dev, "couldn't establish interrupt");
   1598  1.1     skrll 		if (intrstr != NULL)
   1599  1.1     skrll 			aprint_error(" at %s", intrstr);
   1600  1.1     skrll 		aprint_error("\n");
   1601  1.1     skrll 		goto fail;
   1602  1.1     skrll 	}
   1603  1.1     skrll 	pci_intr_setattr(pc, sc->sc_pihp, PCI_INTR_MPSAFE, true);
   1604  1.1     skrll 
   1605  1.1     skrll 	aprint_normal_dev(sc->dev, "interrupting at %s\n", intrstr);
   1606  1.1     skrll 
   1607  1.1     skrll 	return (0);
   1608  1.1     skrll 
   1609  1.1     skrll fail:
   1610  1.1     skrll 	if (sc->sc_ih != NULL) {
   1611  1.1     skrll 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
   1612  1.1     skrll 		sc->sc_ih = NULL;
   1613  1.1     skrll 	}
   1614  1.1     skrll 	if (sc->sc_pihp != NULL) {
   1615  1.1     skrll 		pci_intr_release(sc->sc_pc, sc->sc_pihp, 1);
   1616  1.1     skrll 		sc->sc_pihp = NULL;
   1617  1.1     skrll 	}
   1618  1.1     skrll 	if (sc->sc_mems) {
   1619  1.1     skrll 		bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
   1620  1.1     skrll 		sc->sc_mems = 0;
   1621  1.1     skrll 	}
   1622  1.1     skrll 
   1623  1.1     skrll 	return 1;
   1624  1.1     skrll }
   1625  1.1     skrll 
   1626  1.1     skrll static void
   1627  1.1     skrll pvscsi_free_all(struct pvscsi_softc *sc)
   1628  1.1     skrll {
   1629  1.1     skrll 
   1630  1.1     skrll 	pvscsi_dma_free_per_hcb(sc, sc->hcb_cnt);
   1631  1.1     skrll 
   1632  1.1     skrll 	if (sc->hcbs) {
   1633  1.1     skrll 		kmem_free(sc->hcbs, sc->hcb_cnt * sizeof(*sc->hcbs));
   1634  1.1     skrll 	}
   1635  1.1     skrll 
   1636  1.1     skrll 	pvscsi_free_rings(sc);
   1637  1.1     skrll 
   1638  1.1     skrll 	pvscsi_free_interrupts(sc);
   1639  1.1     skrll 
   1640  1.1     skrll 	if (sc->sc_mems) {
   1641  1.1     skrll 		bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
   1642  1.1     skrll 		sc->sc_mems = 0;
   1643  1.1     skrll 	}
   1644  1.1     skrll }
   1645  1.1     skrll 
   1646  1.1     skrll static inline void
   1647  1.1     skrll pci_enable_busmaster(device_t dev, const pci_chipset_tag_t pc,
   1648  1.1     skrll     const pcitag_t tag)
   1649  1.1     skrll {
   1650  1.1     skrll 	pcireg_t pci_cmd_word;
   1651  1.1     skrll 
   1652  1.1     skrll 	pci_cmd_word = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
   1653  1.1     skrll 	if (!(pci_cmd_word & PCI_COMMAND_MASTER_ENABLE)) {
   1654  1.1     skrll 		pci_cmd_word |= PCI_COMMAND_MASTER_ENABLE;
   1655  1.1     skrll 		pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, pci_cmd_word);
   1656  1.1     skrll 	}
   1657  1.1     skrll }
   1658  1.1     skrll 
   1659  1.1     skrll static void
   1660  1.1     skrll pvscsi_attach(device_t parent, device_t dev, void *aux)
   1661  1.1     skrll {
   1662  1.1     skrll 	const struct pci_attach_args *pa = aux;
   1663  1.1     skrll 	struct pvscsi_softc *sc;
   1664  1.1     skrll 	int rid;
   1665  1.1     skrll 	int error;
   1666  1.1     skrll 	int max_queue_depth;
   1667  1.1     skrll 	int adapter_queue_size;
   1668  1.1     skrll 
   1669  1.1     skrll 	sc = device_private(dev);
   1670  1.1     skrll 	sc->dev = dev;
   1671  1.1     skrll 
   1672  1.1     skrll 	struct scsipi_adapter *adapt = &sc->sc_adapter;
   1673  1.1     skrll 	struct scsipi_channel *chan = &sc->sc_channel;
   1674  1.1     skrll 
   1675  1.1     skrll 	mutex_init(&sc->lock, MUTEX_DEFAULT, IPL_BIO);
   1676  1.1     skrll 
   1677  1.1     skrll 	sc->sc_pc = pa->pa_pc;
   1678  1.1     skrll 	pci_enable_busmaster(dev, pa->pa_pc, pa->pa_tag);
   1679  1.1     skrll 
   1680  1.1     skrll 	pci_aprint_devinfo_fancy(pa, "virtual disk controller",
   1681  1.1     skrll 	    VMWARE_PVSCSI_DEVSTR, true);
   1682  1.1     skrll 
   1683  1.1     skrll 	/*
   1684  1.1     skrll 	 * Map the device.  All devices support memory-mapped acccess.
   1685  1.1     skrll 	 */
   1686  1.1     skrll 	bool memh_valid;
   1687  1.1     skrll 	bus_space_tag_t memt;
   1688  1.1     skrll 	bus_space_handle_t memh;
   1689  1.1     skrll 	bus_size_t mems;
   1690  1.1     skrll 	pcireg_t regt;
   1691  1.1     skrll 
   1692  1.1     skrll 	for (rid = PCI_MAPREG_START; rid < PCI_MAPREG_END; rid += sizeof(regt)) {
   1693  1.1     skrll 		regt = pci_mapreg_type(pa->pa_pc, pa->pa_tag, rid);
   1694  1.1     skrll 		if (PCI_MAPREG_TYPE(regt) == PCI_MAPREG_TYPE_MEM)
   1695  1.1     skrll 			break;
   1696  1.1     skrll 	}
   1697  1.1     skrll 
   1698  1.1     skrll 	if (rid >= PCI_MAPREG_END) {
   1699  1.1     skrll 		aprint_error_dev(dev,
   1700  1.1     skrll 		    "unable to locate device registers\n");
   1701  1.1     skrll 	}
   1702  1.1     skrll 
   1703  1.1     skrll 	memh_valid = (pci_mapreg_map(pa, rid, regt, 0, &memt, &memh,
   1704  1.1     skrll 	    NULL, &mems) == 0);
   1705  1.1     skrll 	if (!memh_valid) {
   1706  1.1     skrll 		aprint_error_dev(dev,
   1707  1.1     skrll 		    "unable to map device registers\n");
   1708  1.1     skrll 		return;
   1709  1.1     skrll 	}
   1710  1.1     skrll 	sc->sc_memt = memt;
   1711  1.1     skrll 	sc->sc_memh = memh;
   1712  1.1     skrll 	sc->sc_mems = mems;
   1713  1.1     skrll 
   1714  1.1     skrll 	if (pci_dma64_available(pa)) {
   1715  1.1     skrll 		sc->sc_dmat = pa->pa_dmat64;
   1716  1.1     skrll 		aprint_verbose_dev(sc->dev, "64-bit DMA\n");
   1717  1.1     skrll 	} else {
   1718  1.1     skrll 		aprint_verbose_dev(sc->dev, "32-bit DMA\n");
   1719  1.1     skrll 		sc->sc_dmat = pa->pa_dmat;
   1720  1.1     skrll 	}
   1721  1.1     skrll 
   1722  1.1     skrll 	error = pvscsi_setup_interrupts(sc, pa);
   1723  1.1     skrll 	if (error) {
   1724  1.1     skrll 		aprint_normal_dev(dev, "Interrupt setup failed\n");
   1725  1.1     skrll 		pvscsi_free_all(sc);
   1726  1.1     skrll 		return;
   1727  1.1     skrll 	}
   1728  1.1     skrll 
   1729  1.1     skrll 	sc->max_targets = pvscsi_get_max_targets(sc);
   1730  1.1     skrll 
   1731  1.1     skrll 	sc->use_msg = pvscsi_get_tunable(sc, "use_msg", pvscsi_use_msg) &&
   1732  1.1     skrll 	    pvscsi_hw_supports_msg(sc);
   1733  1.1     skrll 	sc->msg_ring_num_pages = sc->use_msg ? 1 : 0;
   1734  1.1     skrll 
   1735  1.1     skrll 	sc->req_ring_num_pages = pvscsi_get_tunable(sc, "request_ring_pages",
   1736  1.1     skrll 	    pvscsi_request_ring_pages);
   1737  1.1     skrll 	if (sc->req_ring_num_pages <= 0) {
   1738  1.1     skrll 		if (sc->max_targets <= 16) {
   1739  1.1     skrll 			sc->req_ring_num_pages =
   1740  1.1     skrll 			    PVSCSI_DEFAULT_NUM_PAGES_REQ_RING;
   1741  1.1     skrll 		} else {
   1742  1.1     skrll 			sc->req_ring_num_pages = PVSCSI_MAX_NUM_PAGES_REQ_RING;
   1743  1.1     skrll 		}
   1744  1.1     skrll 	} else if (sc->req_ring_num_pages > PVSCSI_MAX_NUM_PAGES_REQ_RING) {
   1745  1.1     skrll 		sc->req_ring_num_pages = PVSCSI_MAX_NUM_PAGES_REQ_RING;
   1746  1.1     skrll 	}
   1747  1.1     skrll 	sc->cmp_ring_num_pages = sc->req_ring_num_pages;
   1748  1.1     skrll 
   1749  1.1     skrll 	max_queue_depth = pvscsi_get_tunable(sc, "max_queue_depth",
   1750  1.1     skrll 	    pvscsi_max_queue_depth);
   1751  1.1     skrll 
   1752  1.1     skrll 	adapter_queue_size = (sc->req_ring_num_pages * PAGE_SIZE) /
   1753  1.1     skrll 	    sizeof(struct pvscsi_ring_req_desc);
   1754  1.1     skrll 	if (max_queue_depth > 0) {
   1755  1.1     skrll 		adapter_queue_size = MIN(adapter_queue_size, max_queue_depth);
   1756  1.1     skrll 	}
   1757  1.1     skrll 	adapter_queue_size = MIN(adapter_queue_size,
   1758  1.1     skrll 	    PVSCSI_MAX_REQ_QUEUE_DEPTH);
   1759  1.1     skrll 
   1760  1.1     skrll 	aprint_normal_dev(sc->dev, "Use Msg: %d\n", sc->use_msg);
   1761  1.1     skrll 	aprint_normal_dev(sc->dev, "Max targets: %d\n", sc->max_targets);
   1762  1.1     skrll 	aprint_normal_dev(sc->dev, "REQ num pages: %d\n", sc->req_ring_num_pages);
   1763  1.1     skrll 	aprint_normal_dev(sc->dev, "CMP num pages: %d\n", sc->cmp_ring_num_pages);
   1764  1.1     skrll 	aprint_normal_dev(sc->dev, "MSG num pages: %d\n", sc->msg_ring_num_pages);
   1765  1.1     skrll 	aprint_normal_dev(sc->dev, "Queue size: %d\n", adapter_queue_size);
   1766  1.1     skrll 
   1767  1.1     skrll 	if (pvscsi_allocate_rings(sc)) {
   1768  1.1     skrll 		aprint_normal_dev(dev, "ring allocation failed\n");
   1769  1.1     skrll 		pvscsi_free_all(sc);
   1770  1.1     skrll 		return;
   1771  1.1     skrll 	}
   1772  1.1     skrll 
   1773  1.1     skrll 	sc->hcb_cnt = adapter_queue_size;
   1774  1.1     skrll 	sc->hcbs = kmem_zalloc(sc->hcb_cnt * sizeof(*sc->hcbs), KM_SLEEP);
   1775  1.1     skrll 
   1776  1.1     skrll 	if (pvscsi_dma_alloc_per_hcb(sc)) {
   1777  1.1     skrll 		aprint_normal_dev(dev, "error allocating per hcb dma memory\n");
   1778  1.1     skrll 		pvscsi_free_all(sc);
   1779  1.1     skrll 		return;
   1780  1.1     skrll 	}
   1781  1.1     skrll 
   1782  1.1     skrll 	pvscsi_adapter_reset(sc);
   1783  1.1     skrll 
   1784  1.1     skrll 	/*
   1785  1.1     skrll 	 * Fill in the scsipi_adapter.
   1786  1.1     skrll 	 */
   1787  1.1     skrll 	memset(adapt, 0, sizeof(*adapt));
   1788  1.1     skrll 	adapt->adapt_dev = sc->dev;
   1789  1.1     skrll 	adapt->adapt_nchannels = 1;
   1790  1.1     skrll 	adapt->adapt_openings = MIN(adapter_queue_size, PVSCSI_CMD_PER_LUN);
   1791  1.1     skrll 	adapt->adapt_max_periph = adapt->adapt_openings;
   1792  1.1     skrll 	adapt->adapt_request = pvscsi_scsipi_request;
   1793  1.1     skrll 	adapt->adapt_minphys = minphys;
   1794  1.1     skrll 
   1795  1.1     skrll 	/*
   1796  1.1     skrll 	 * Fill in the scsipi_channel.
   1797  1.1     skrll 	 */
   1798  1.1     skrll 	memset(chan, 0, sizeof(*chan));
   1799  1.1     skrll 	chan->chan_adapter = adapt;
   1800  1.1     skrll 	chan->chan_bustype = &scsi_bustype;
   1801  1.1     skrll 	chan->chan_channel = 0;
   1802  1.1     skrll 	chan->chan_ntargets = MIN(PVSCSI_MAX_TARGET, 16);	/* cap reasonably */
   1803  1.1     skrll 	chan->chan_nluns = MIN(PVSCSI_MAX_LUN, 1024);		/* cap reasonably */
   1804  1.1     skrll 	chan->chan_id = PVSCSI_MAX_TARGET;
   1805  1.1     skrll 	chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
   1806  1.1     skrll 
   1807  1.1     skrll 	pvscsi_setup_rings(sc);
   1808  1.1     skrll 	if (sc->use_msg) {
   1809  1.1     skrll 		pvscsi_setup_msg_ring(sc);
   1810  1.1     skrll 	}
   1811  1.1     skrll 
   1812  1.1     skrll 	sc->use_req_call_threshold = pvscsi_setup_req_call(sc, 1);
   1813  1.1     skrll 
   1814  1.1     skrll 	pvscsi_intr_enable(sc);
   1815  1.1     skrll 
   1816  1.1     skrll 	sc->sc_scsibus_dv = config_found(sc->dev, &sc->sc_channel, scsiprint,
   1817  1.1     skrll 	    CFARGS_NONE);
   1818  1.1     skrll 
   1819  1.1     skrll 	return;
   1820  1.1     skrll }
   1821  1.1     skrll 
   1822  1.1     skrll static int
   1823  1.1     skrll pvscsi_detach(device_t dev, int flags)
   1824  1.1     skrll {
   1825  1.1     skrll 	struct pvscsi_softc *sc;
   1826  1.1     skrll 
   1827  1.1     skrll 	sc = device_private(dev);
   1828  1.1     skrll 
   1829  1.1     skrll 	pvscsi_intr_disable(sc);
   1830  1.1     skrll 	pvscsi_adapter_reset(sc);
   1831  1.1     skrll 
   1832  1.1     skrll 	pvscsi_free_all(sc);
   1833  1.1     skrll 
   1834  1.1     skrll 	mutex_destroy(&sc->lock);
   1835  1.1     skrll 
   1836  1.1     skrll 	return (0);
   1837  1.1     skrll }
   1838