Home | History | Annotate | Line # | Download | only in pci
pvscsi.c revision 1.2
      1  1.1  skrll /*-
      2  1.1  skrll  * Copyright (c) 2018 VMware, Inc.
      3  1.1  skrll  *
      4  1.1  skrll  * SPDX-License-Identifier: (BSD-2-Clause OR GPL-2.0)
      5  1.1  skrll  */
      6  1.1  skrll 
      7  1.1  skrll /*
      8  1.1  skrll 
      9  1.1  skrll These files are provided under a dual BSD-2 Clause/GPLv2 license. When
     10  1.1  skrll using or redistributing this file, you may do so under either license.
     11  1.1  skrll 
     12  1.1  skrll BSD-2 Clause License
     13  1.1  skrll 
     14  1.1  skrll Copyright (c) 2018 VMware, Inc.
     15  1.1  skrll 
     16  1.1  skrll Redistribution and use in source and binary forms, with or without
     17  1.1  skrll modification, are permitted provided that the following conditions
     18  1.1  skrll are met:
     19  1.1  skrll 
     20  1.1  skrll   * Redistributions of source code must retain the above copyright
     21  1.1  skrll     notice, this list of conditions and the following disclaimer.
     22  1.1  skrll 
     23  1.1  skrll   * Redistributions in binary form must reproduce the above copyright
     24  1.1  skrll     notice, this list of conditions and the following disclaimer in
     25  1.1  skrll     the documentation and/or other materials provided with the
     26  1.1  skrll     distribution.
     27  1.1  skrll 
     28  1.1  skrll THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     29  1.1  skrll "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     30  1.1  skrll LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     31  1.1  skrll A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     32  1.1  skrll OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     33  1.1  skrll SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     34  1.1  skrll LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     35  1.1  skrll DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     36  1.1  skrll THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     37  1.1  skrll (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     38  1.1  skrll OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     39  1.1  skrll 
     40  1.1  skrll GPL License Summary
     41  1.1  skrll 
     42  1.1  skrll Copyright (c) 2018 VMware, Inc.
     43  1.1  skrll 
     44  1.1  skrll This program is free software; you can redistribute it and/or modify
     45  1.1  skrll it under the terms of version 2 of the GNU General Public License as
     46  1.1  skrll published by the Free Software Foundation.
     47  1.1  skrll 
     48  1.1  skrll This program is distributed in the hope that it will be useful, but
     49  1.1  skrll WITHOUT ANY WARRANTY; without even the implied warranty of
     50  1.1  skrll MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     51  1.1  skrll General Public License for more details.
     52  1.1  skrll 
     53  1.1  skrll You should have received a copy of the GNU General Public License
     54  1.1  skrll along with this program; if not, write to the Free Software
     55  1.1  skrll Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
     56  1.1  skrll The full GNU General Public License is included in this distribution
     57  1.1  skrll in the file called LICENSE.GPL.
     58  1.1  skrll 
     59  1.1  skrll */
     60  1.1  skrll 
     61  1.1  skrll #include <sys/cdefs.h>
     62  1.2  skrll __KERNEL_RCSID(0, "$NetBSD: pvscsi.c,v 1.2 2025/08/05 08:30:23 skrll Exp $");
     63  1.1  skrll 
     64  1.1  skrll #include <sys/param.h>
     65  1.1  skrll 
     66  1.1  skrll #include <sys/atomic.h>
     67  1.1  skrll #include <sys/buf.h>
     68  1.1  skrll #include <sys/bus.h>
     69  1.1  skrll #include <sys/cpu.h>
     70  1.1  skrll #include <sys/device.h>
     71  1.1  skrll #include <sys/kernel.h>
     72  1.1  skrll #include <sys/kmem.h>
     73  1.1  skrll #include <sys/queue.h>
     74  1.1  skrll #include <sys/sysctl.h>
     75  1.1  skrll #include <sys/systm.h>
     76  1.1  skrll 
     77  1.1  skrll #include <dev/pci/pcireg.h>
     78  1.1  skrll #include <dev/pci/pcivar.h>
     79  1.1  skrll #include <dev/pci/pcidevs.h>
     80  1.1  skrll 
     81  1.1  skrll #include <dev/scsipi/scsi_all.h>
     82  1.1  skrll #include <dev/scsipi/scsi_message.h>
     83  1.1  skrll #include <dev/scsipi/scsiconf.h>
     84  1.1  skrll #include <dev/scsipi/scsipi_disk.h>
     85  1.1  skrll #include <dev/scsipi/scsi_disk.h>
     86  1.1  skrll 
     87  1.1  skrll #include "pvscsi.h"
     88  1.1  skrll 
     89  1.1  skrll #define	PVSCSI_DEFAULT_NUM_PAGES_REQ_RING	8
     90  1.1  skrll #define	PVSCSI_SENSE_LENGTH			256
     91  1.1  skrll 
     92  1.1  skrll #define PVSCSI_MAXPHYS				MAXPHYS
     93  1.1  skrll #define PVSCSI_MAXPHYS_SEGS			((PVSCSI_MAXPHYS / PAGE_SIZE) + 1)
     94  1.1  skrll 
     95  1.1  skrll #define PVSCSI_CMD_PER_LUN 64
     96  1.1  skrll #define PVSCSI_MAX_LUN 8
     97  1.1  skrll #define PVSCSI_MAX_TARGET 16
     98  1.1  skrll 
     99  1.1  skrll //#define PVSCSI_DEBUG_LOGGING
    100  1.1  skrll 
    101  1.1  skrll #ifdef PVSCSI_DEBUG_LOGGING
    102  1.1  skrll #define	DEBUG_PRINTF(level, dev, fmt, ...)				\
    103  1.1  skrll 	do {								\
    104  1.1  skrll 		if (pvscsi_log_level >= (level)) {			\
    105  1.1  skrll 			aprint_normal_dev((dev), (fmt), ##__VA_ARGS__);	\
    106  1.1  skrll 		}							\
    107  1.1  skrll 	} while(0)
    108  1.1  skrll #else
    109  1.1  skrll #define DEBUG_PRINTF(level, dev, fmt, ...)
    110  1.1  skrll #endif /* PVSCSI_DEBUG_LOGGING */
    111  1.1  skrll 
    112  1.1  skrll struct pvscsi_softc;
    113  1.1  skrll struct pvscsi_hcb;
    114  1.1  skrll struct pvscsi_dma;
    115  1.1  skrll 
    116  1.1  skrll #define VMWARE_PVSCSI_DEVSTR	"VMware Paravirtual SCSI Controller"
    117  1.1  skrll 
    118  1.1  skrll static inline uint32_t pvscsi_reg_read(struct pvscsi_softc *sc,
    119  1.1  skrll     uint32_t offset);
    120  1.1  skrll static inline void pvscsi_reg_write(struct pvscsi_softc *sc, uint32_t offset,
    121  1.1  skrll     uint32_t val);
    122  1.1  skrll static inline uint32_t pvscsi_read_intr_status(struct pvscsi_softc *sc);
    123  1.1  skrll static inline void pvscsi_write_intr_status(struct pvscsi_softc *sc,
    124  1.1  skrll     uint32_t val);
    125  1.1  skrll static inline void pvscsi_intr_enable(struct pvscsi_softc *sc);
    126  1.1  skrll static inline void pvscsi_intr_disable(struct pvscsi_softc *sc);
    127  1.1  skrll static void pvscsi_kick_io(struct pvscsi_softc *sc, uint8_t cdb0);
    128  1.1  skrll static void pvscsi_write_cmd(struct pvscsi_softc *sc, uint32_t cmd, void *data,
    129  1.1  skrll     uint32_t len);
    130  1.1  skrll static uint32_t pvscsi_get_max_targets(struct pvscsi_softc *sc);
    131  1.1  skrll static int pvscsi_setup_req_call(struct pvscsi_softc *sc, uint32_t enable);
    132  1.1  skrll static void pvscsi_setup_rings(struct pvscsi_softc *sc);
    133  1.1  skrll static void pvscsi_setup_msg_ring(struct pvscsi_softc *sc);
    134  1.1  skrll static int pvscsi_hw_supports_msg(struct pvscsi_softc *sc);
    135  1.1  skrll 
    136  1.1  skrll static void pvscsi_timeout(void *arg);
    137  1.1  skrll static void pvscsi_adapter_reset(struct pvscsi_softc *sc);
    138  1.1  skrll static void pvscsi_bus_reset(struct pvscsi_softc *sc);
    139  1.1  skrll static void pvscsi_device_reset(struct pvscsi_softc *sc, uint32_t target);
    140  1.1  skrll static void pvscsi_abort(struct pvscsi_softc *sc, uint32_t target,
    141  1.1  skrll     struct pvscsi_hcb *hcb);
    142  1.1  skrll 
    143  1.1  skrll static void pvscsi_process_completion(struct pvscsi_softc *sc,
    144  1.1  skrll     struct pvscsi_ring_cmp_desc *e);
    145  1.1  skrll static void pvscsi_process_cmp_ring(struct pvscsi_softc *sc);
    146  1.1  skrll static void pvscsi_process_msg(struct pvscsi_softc *sc,
    147  1.1  skrll     struct pvscsi_ring_msg_desc *e);
    148  1.1  skrll static void pvscsi_process_msg_ring(struct pvscsi_softc *sc);
    149  1.1  skrll 
    150  1.1  skrll static void pvscsi_intr_locked(struct pvscsi_softc *sc);
    151  1.1  skrll static int pvscsi_intr(void *xsc);
    152  1.1  skrll 
    153  1.1  skrll static void pvscsi_scsipi_request(struct scsipi_channel *,
    154  1.1  skrll     scsipi_adapter_req_t, void *);
    155  1.1  skrll 
    156  1.1  skrll static inline uint64_t pvscsi_hcb_to_context(struct pvscsi_softc *sc,
    157  1.1  skrll     struct pvscsi_hcb *hcb);
    158  1.1  skrll static inline struct pvscsi_hcb *pvscsi_context_to_hcb(struct pvscsi_softc *sc,
    159  1.1  skrll     uint64_t context);
    160  1.1  skrll static struct pvscsi_hcb * pvscsi_hcb_get(struct pvscsi_softc *sc);
    161  1.1  skrll static void pvscsi_hcb_put(struct pvscsi_softc *sc, struct pvscsi_hcb *hcb);
    162  1.1  skrll 
    163  1.1  skrll static void pvscsi_dma_free(struct pvscsi_softc *sc, struct pvscsi_dma *dma);
    164  1.1  skrll static int pvscsi_dma_alloc(struct pvscsi_softc *sc, struct pvscsi_dma *dma,
    165  1.1  skrll     bus_size_t size, bus_size_t alignment);
    166  1.1  skrll static int pvscsi_dma_alloc_ppns(struct pvscsi_softc *sc,
    167  1.1  skrll     struct pvscsi_dma *dma, uint64_t *ppn_list, uint32_t num_pages);
    168  1.1  skrll static void pvscsi_dma_free_per_hcb(struct pvscsi_softc *sc,
    169  1.1  skrll     uint32_t hcbs_allocated);
    170  1.1  skrll static int pvscsi_dma_alloc_per_hcb(struct pvscsi_softc *sc);
    171  1.1  skrll static void pvscsi_free_rings(struct pvscsi_softc *sc);
    172  1.1  skrll static int pvscsi_allocate_rings(struct pvscsi_softc *sc);
    173  1.1  skrll static void pvscsi_free_interrupts(struct pvscsi_softc *sc);
    174  1.1  skrll static int pvscsi_setup_interrupts(struct pvscsi_softc *sc, const struct pci_attach_args *);
    175  1.1  skrll static void pvscsi_free_all(struct pvscsi_softc *sc);
    176  1.1  skrll 
    177  1.1  skrll static void pvscsi_attach(device_t, device_t, void *);
    178  1.1  skrll static int pvscsi_detach(device_t, int);
    179  1.1  skrll static int pvscsi_probe(device_t, cfdata_t, void *);
    180  1.1  skrll 
    181  1.1  skrll #define pvscsi_get_tunable(_sc, _name, _value)	(_value)
    182  1.1  skrll 
    183  1.1  skrll #ifdef PVSCSI_DEBUG_LOGGING
    184  1.1  skrll static int pvscsi_log_level = 1;
    185  1.1  skrll #endif
    186  1.1  skrll 
    187  1.1  skrll #define TUNABLE_INT(__x, __d)					\
    188  1.1  skrll 	err = sysctl_createv(clog, 0, &rnode, &cnode,		\
    189  1.1  skrll 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,	\
    190  1.1  skrll 	    #__x, SYSCTL_DESCR(__d),				\
    191  1.1  skrll 	    NULL, 0, &(pvscsi_ ## __x), sizeof(pvscsi_ ## __x), \
    192  1.1  skrll 	    CTL_CREATE,	CTL_EOL);				\
    193  1.1  skrll 	if (err)						\
    194  1.1  skrll 		goto fail;
    195  1.1  skrll 
    196  1.1  skrll static int pvscsi_request_ring_pages = 0;
    197  1.1  skrll static int pvscsi_use_msg = 1;
    198  1.1  skrll static int pvscsi_use_msi = 1;
    199  1.1  skrll static int pvscsi_use_msix = 1;
    200  1.1  skrll static int pvscsi_use_req_call_threshold = 0;
    201  1.1  skrll static int pvscsi_max_queue_depth = 0;
    202  1.1  skrll 
    203  1.1  skrll SYSCTL_SETUP(sysctl_hw_pvscsi_setup, "sysctl hw.pvscsi setup")
    204  1.1  skrll {
    205  1.1  skrll 	int err;
    206  1.1  skrll 	const struct sysctlnode *rnode;
    207  1.1  skrll 	const struct sysctlnode *cnode;
    208  1.1  skrll 
    209  1.1  skrll 	err = sysctl_createv(clog, 0, NULL, &rnode,
    210  1.1  skrll 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "pvscsi",
    211  1.1  skrll 	    SYSCTL_DESCR("pvscsi global controls"),
    212  1.1  skrll 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
    213  1.1  skrll 
    214  1.1  skrll 	if (err)
    215  1.1  skrll 		goto fail;
    216  1.1  skrll 
    217  1.1  skrll #ifdef PVSCSI_DEBUG_LOGGING
    218  1.1  skrll 	TUNABLE_INT(log_level, "Enable debugging output");
    219  1.1  skrll #endif
    220  1.1  skrll 
    221  1.1  skrll 	TUNABLE_INT(request_ring_pages, "No. of pages for the request ring");
    222  1.1  skrll 	TUNABLE_INT(use_msg, "Use message passing");
    223  1.1  skrll 	TUNABLE_INT(use_msi, "Use MSI interrupt");
    224  1.1  skrll 	TUNABLE_INT(use_msix, "Use MSXI interrupt");
    225  1.1  skrll 	TUNABLE_INT(use_req_call_threshold, "Use request limit");
    226  1.1  skrll 	TUNABLE_INT(max_queue_depth, "Maximum size of request queue");
    227  1.1  skrll 
    228  1.1  skrll 	return;
    229  1.1  skrll fail:
    230  1.1  skrll 	aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err);
    231  1.1  skrll }
    232  1.1  skrll 
    233  1.1  skrll struct pvscsi_sg_list {
    234  1.1  skrll 	struct pvscsi_sg_element sge[PVSCSI_MAX_SG_ENTRIES_PER_SEGMENT];
    235  1.1  skrll };
    236  1.1  skrll 
    237  1.1  skrll #define	PVSCSI_ABORT_TIMEOUT	2
    238  1.1  skrll #define	PVSCSI_RESET_TIMEOUT	10
    239  1.1  skrll 
    240  1.1  skrll #define	PVSCSI_HCB_NONE		0
    241  1.1  skrll #define	PVSCSI_HCB_ABORT	1
    242  1.1  skrll #define	PVSCSI_HCB_DEVICE_RESET	2
    243  1.1  skrll #define	PVSCSI_HCB_BUS_RESET	3
    244  1.1  skrll 
    245  1.1  skrll struct pvscsi_hcb {
    246  1.1  skrll 	struct scsipi_xfer 		*xs;
    247  1.1  skrll 	struct pvscsi_softc		*sc;
    248  1.1  skrll 
    249  1.1  skrll 	struct pvscsi_ring_req_desc	*e;
    250  1.1  skrll 	int				 recovery;
    251  1.1  skrll 	SLIST_ENTRY(pvscsi_hcb)		 links;
    252  1.1  skrll 
    253  1.1  skrll 	bus_dmamap_t			 dma_map;
    254  1.1  skrll 	bus_addr_t			 dma_map_offset;
    255  1.1  skrll 	bus_size_t			 dma_map_size;
    256  1.1  skrll 	void				*sense_buffer;
    257  1.1  skrll 	bus_addr_t			 sense_buffer_paddr;
    258  1.1  skrll 	struct pvscsi_sg_list		*sg_list;
    259  1.1  skrll 	bus_addr_t			 sg_list_paddr;
    260  1.1  skrll 	bus_addr_t			 sg_list_offset;
    261  1.1  skrll };
    262  1.1  skrll 
    263  1.1  skrll struct pvscsi_dma {
    264  1.1  skrll 	bus_dmamap_t		 map;
    265  1.1  skrll 	void		        *vaddr;
    266  1.1  skrll 	bus_addr_t	 	 paddr;
    267  1.1  skrll 	bus_size_t	 	 size;
    268  1.1  skrll 	bus_dma_segment_t	 seg[1];
    269  1.1  skrll };
    270  1.1  skrll 
    271  1.1  skrll struct pvscsi_softc {
    272  1.1  skrll 	device_t		 dev;
    273  1.1  skrll 	kmutex_t		 lock;
    274  1.1  skrll 
    275  1.1  skrll 	device_t		 sc_scsibus_dv;
    276  1.1  skrll 	struct scsipi_adapter	 sc_adapter;
    277  1.1  skrll 	struct scsipi_channel 	 sc_channel;
    278  1.1  skrll 
    279  1.1  skrll 	struct pvscsi_rings_state	*rings_state;
    280  1.1  skrll 	struct pvscsi_ring_req_desc	*req_ring;
    281  1.1  skrll 	struct pvscsi_ring_cmp_desc	*cmp_ring;
    282  1.1  skrll 	struct pvscsi_ring_msg_desc	*msg_ring;
    283  1.1  skrll 	uint32_t		 hcb_cnt;
    284  1.1  skrll 	struct pvscsi_hcb	*hcbs;
    285  1.1  skrll 	SLIST_HEAD(, pvscsi_hcb) free_list;
    286  1.1  skrll 
    287  1.1  skrll 	bus_dma_tag_t		sc_dmat;
    288  1.1  skrll 	bus_space_tag_t		sc_memt;
    289  1.1  skrll 	bus_space_handle_t	sc_memh;
    290  1.1  skrll 	bus_size_t		sc_mems;
    291  1.1  skrll 
    292  1.1  skrll 	bool		 use_msg;
    293  1.1  skrll 	uint32_t	 max_targets;
    294  1.1  skrll 	int		 mm_rid;
    295  1.1  skrll 	int		 irq_id;
    296  1.1  skrll 	int		 use_req_call_threshold;
    297  1.1  skrll 
    298  1.1  skrll 	pci_chipset_tag_t	 sc_pc;
    299  1.1  skrll 	pci_intr_handle_t *	 sc_pihp;
    300  1.1  skrll 	void			*sc_ih;
    301  1.1  skrll 
    302  1.1  skrll 	uint64_t	rings_state_ppn;
    303  1.1  skrll 	uint32_t	req_ring_num_pages;
    304  1.1  skrll 	uint64_t	req_ring_ppn[PVSCSI_MAX_NUM_PAGES_REQ_RING];
    305  1.1  skrll 	uint32_t	cmp_ring_num_pages;
    306  1.1  skrll 	uint64_t	cmp_ring_ppn[PVSCSI_MAX_NUM_PAGES_CMP_RING];
    307  1.1  skrll 	uint32_t	msg_ring_num_pages;
    308  1.1  skrll 	uint64_t	msg_ring_ppn[PVSCSI_MAX_NUM_PAGES_MSG_RING];
    309  1.1  skrll 
    310  1.1  skrll 	struct	pvscsi_dma rings_state_dma;
    311  1.1  skrll 	struct	pvscsi_dma req_ring_dma;
    312  1.1  skrll 	struct	pvscsi_dma cmp_ring_dma;
    313  1.1  skrll 	struct	pvscsi_dma msg_ring_dma;
    314  1.1  skrll 
    315  1.1  skrll 	struct	pvscsi_dma sg_list_dma;
    316  1.1  skrll 	struct	pvscsi_dma sense_buffer_dma;
    317  1.1  skrll };
    318  1.1  skrll 
    319  1.1  skrll CFATTACH_DECL3_NEW(pvscsi, sizeof(struct pvscsi_softc),
    320  1.1  skrll     pvscsi_probe, pvscsi_attach, pvscsi_detach, NULL, NULL, NULL,
    321  1.1  skrll     DVF_DETACH_SHUTDOWN);
    322  1.1  skrll 
    323  1.1  skrll static inline uint32_t
    324  1.1  skrll pvscsi_reg_read(struct pvscsi_softc *sc, uint32_t offset)
    325  1.1  skrll {
    326  1.1  skrll 
    327  1.1  skrll 	return (bus_space_read_4(sc->sc_memt, sc->sc_memh, offset));
    328  1.1  skrll }
    329  1.1  skrll 
    330  1.1  skrll static inline void
    331  1.1  skrll pvscsi_reg_write(struct pvscsi_softc *sc, uint32_t offset, uint32_t val)
    332  1.1  skrll {
    333  1.1  skrll 
    334  1.1  skrll 	bus_space_write_4(sc->sc_memt, sc->sc_memh, offset, val);
    335  1.1  skrll }
    336  1.1  skrll 
    337  1.1  skrll static inline uint32_t
    338  1.1  skrll pvscsi_read_intr_status(struct pvscsi_softc *sc)
    339  1.1  skrll {
    340  1.1  skrll 
    341  1.1  skrll 	return (pvscsi_reg_read(sc, PVSCSI_REG_OFFSET_INTR_STATUS));
    342  1.1  skrll }
    343  1.1  skrll 
    344  1.1  skrll static inline void
    345  1.1  skrll pvscsi_write_intr_status(struct pvscsi_softc *sc, uint32_t val)
    346  1.1  skrll {
    347  1.1  skrll 
    348  1.1  skrll 	pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_INTR_STATUS, val);
    349  1.1  skrll }
    350  1.1  skrll 
    351  1.1  skrll static inline void
    352  1.1  skrll pvscsi_intr_enable(struct pvscsi_softc *sc)
    353  1.1  skrll {
    354  1.1  skrll 	uint32_t mask;
    355  1.1  skrll 
    356  1.1  skrll 	mask = PVSCSI_INTR_CMPL_MASK;
    357  1.1  skrll 	if (sc->use_msg) {
    358  1.1  skrll 		mask |= PVSCSI_INTR_MSG_MASK;
    359  1.1  skrll 	}
    360  1.1  skrll 
    361  1.1  skrll 	pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_INTR_MASK, mask);
    362  1.1  skrll }
    363  1.1  skrll 
    364  1.1  skrll static inline void
    365  1.1  skrll pvscsi_intr_disable(struct pvscsi_softc *sc)
    366  1.1  skrll {
    367  1.1  skrll 
    368  1.1  skrll 	pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_INTR_MASK, 0);
    369  1.1  skrll }
    370  1.1  skrll 
    371  1.1  skrll static void
    372  1.1  skrll pvscsi_kick_io(struct pvscsi_softc *sc, uint8_t cdb0)
    373  1.1  skrll {
    374  1.1  skrll 	struct pvscsi_rings_state *s;
    375  1.1  skrll 
    376  1.1  skrll 	DEBUG_PRINTF(2, sc->dev, "%s: cdb0 %#x\n", __func__, cdb0);
    377  1.1  skrll 	if (cdb0 == SCSI_READ_6_COMMAND  || cdb0 == READ_10  ||
    378  1.1  skrll 	    cdb0 == READ_12  || cdb0 == READ_16  ||
    379  1.1  skrll 	    cdb0 == SCSI_WRITE_6_COMMAND || cdb0 == WRITE_10 ||
    380  1.1  skrll 	    cdb0 == WRITE_12 || cdb0 == WRITE_16) {
    381  1.1  skrll 		s = sc->rings_state;
    382  1.1  skrll 
    383  1.1  skrll 		DEBUG_PRINTF(2, sc->dev, "%s req prod %d cons %d\n", __func__,
    384  1.1  skrll 		    s->req_prod_idx, s->req_cons_idx);
    385  1.1  skrll 		if (!sc->use_req_call_threshold ||
    386  1.1  skrll 		    (s->req_prod_idx - s->req_cons_idx) >=
    387  1.1  skrll 		     s->req_call_threshold) {
    388  1.1  skrll 			pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_KICK_RW_IO, 0);
    389  1.1  skrll 			DEBUG_PRINTF(2, sc->dev, "kicked\n");
    390  1.1  skrll 		} else {
    391  1.1  skrll 			DEBUG_PRINTF(2, sc->dev, "wtf\n");
    392  1.1  skrll 		}
    393  1.1  skrll 	} else {
    394  1.1  skrll 		s = sc->rings_state;
    395  1.1  skrll 		DEBUG_PRINTF(1, sc->dev, "%s req prod %d cons %d not checked\n", __func__,
    396  1.1  skrll 		    s->req_prod_idx, s->req_cons_idx);
    397  1.1  skrll 
    398  1.1  skrll 		pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_KICK_NON_RW_IO, 0);
    399  1.1  skrll 	}
    400  1.1  skrll }
    401  1.1  skrll 
    402  1.1  skrll static void
    403  1.1  skrll pvscsi_write_cmd(struct pvscsi_softc *sc, uint32_t cmd, void *data,
    404  1.1  skrll 		 uint32_t len)
    405  1.1  skrll {
    406  1.1  skrll 	uint32_t *data_ptr;
    407  1.1  skrll 	int i;
    408  1.1  skrll 
    409  1.1  skrll 	KASSERTMSG(len % sizeof(uint32_t) == 0,
    410  1.1  skrll 		"command size not a multiple of 4");
    411  1.1  skrll 
    412  1.1  skrll 	data_ptr = data;
    413  1.1  skrll 	len /= sizeof(uint32_t);
    414  1.1  skrll 
    415  1.1  skrll 	pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_COMMAND, cmd);
    416  1.1  skrll 	for (i = 0; i < len; ++i) {
    417  1.1  skrll 		pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_COMMAND_DATA,
    418  1.1  skrll 		   data_ptr[i]);
    419  1.1  skrll 	}
    420  1.1  skrll }
    421  1.1  skrll 
    422  1.1  skrll static inline uint64_t pvscsi_hcb_to_context(struct pvscsi_softc *sc,
    423  1.1  skrll     struct pvscsi_hcb *hcb)
    424  1.1  skrll {
    425  1.1  skrll 
    426  1.1  skrll 	/* Offset by 1 because context must not be 0 */
    427  1.1  skrll 	return (hcb - sc->hcbs + 1);
    428  1.1  skrll }
    429  1.1  skrll 
    430  1.1  skrll static inline struct pvscsi_hcb* pvscsi_context_to_hcb(struct pvscsi_softc *sc,
    431  1.1  skrll     uint64_t context)
    432  1.1  skrll {
    433  1.1  skrll 
    434  1.1  skrll 	return (sc->hcbs + (context - 1));
    435  1.1  skrll }
    436  1.1  skrll 
    437  1.1  skrll static struct pvscsi_hcb *
    438  1.1  skrll pvscsi_hcb_get(struct pvscsi_softc *sc)
    439  1.1  skrll {
    440  1.1  skrll 	struct pvscsi_hcb *hcb;
    441  1.1  skrll 
    442  1.1  skrll 	KASSERT(mutex_owned(&sc->lock));
    443  1.1  skrll 
    444  1.1  skrll 	hcb = SLIST_FIRST(&sc->free_list);
    445  1.1  skrll 	if (hcb) {
    446  1.1  skrll 		SLIST_REMOVE_HEAD(&sc->free_list, links);
    447  1.1  skrll 	}
    448  1.1  skrll 
    449  1.1  skrll 	return (hcb);
    450  1.1  skrll }
    451  1.1  skrll 
    452  1.1  skrll static void
    453  1.1  skrll pvscsi_hcb_put(struct pvscsi_softc *sc, struct pvscsi_hcb *hcb)
    454  1.1  skrll {
    455  1.1  skrll 
    456  1.1  skrll 	KASSERT(mutex_owned(&sc->lock));
    457  1.1  skrll 	hcb->xs = NULL;
    458  1.1  skrll 	hcb->e = NULL;
    459  1.1  skrll 	hcb->recovery = PVSCSI_HCB_NONE;
    460  1.1  skrll 	SLIST_INSERT_HEAD(&sc->free_list, hcb, links);
    461  1.1  skrll }
    462  1.1  skrll 
    463  1.1  skrll static uint32_t
    464  1.1  skrll pvscsi_get_max_targets(struct pvscsi_softc *sc)
    465  1.1  skrll {
    466  1.1  skrll 	uint32_t max_targets;
    467  1.1  skrll 
    468  1.1  skrll 	pvscsi_write_cmd(sc, PVSCSI_CMD_GET_MAX_TARGETS, NULL, 0);
    469  1.1  skrll 
    470  1.1  skrll 	max_targets = pvscsi_reg_read(sc, PVSCSI_REG_OFFSET_COMMAND_STATUS);
    471  1.1  skrll 
    472  1.1  skrll 	if (max_targets == ~0) {
    473  1.1  skrll 		max_targets = 16;
    474  1.1  skrll 	}
    475  1.1  skrll 
    476  1.1  skrll 	return (max_targets);
    477  1.1  skrll }
    478  1.1  skrll 
    479  1.1  skrll static int pvscsi_setup_req_call(struct pvscsi_softc *sc, uint32_t enable)
    480  1.1  skrll {
    481  1.1  skrll 	uint32_t status;
    482  1.1  skrll 	struct pvscsi_cmd_desc_setup_req_call cmd;
    483  1.1  skrll 
    484  1.1  skrll 	if (!pvscsi_get_tunable(sc, "pvscsi_use_req_call_threshold",
    485  1.1  skrll 	    pvscsi_use_req_call_threshold)) {
    486  1.1  skrll 		return (0);
    487  1.1  skrll 	}
    488  1.1  skrll 
    489  1.1  skrll 	pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_COMMAND,
    490  1.1  skrll 	    PVSCSI_CMD_SETUP_REQCALLTHRESHOLD);
    491  1.1  skrll 	status = pvscsi_reg_read(sc, PVSCSI_REG_OFFSET_COMMAND_STATUS);
    492  1.1  skrll 
    493  1.1  skrll 	if (status != -1) {
    494  1.1  skrll 		memset(&cmd, 0, sizeof(cmd));
    495  1.1  skrll 		cmd.enable = enable;
    496  1.1  skrll 		pvscsi_write_cmd(sc, PVSCSI_CMD_SETUP_REQCALLTHRESHOLD,
    497  1.1  skrll 		    &cmd, sizeof(cmd));
    498  1.1  skrll 		status = pvscsi_reg_read(sc, PVSCSI_REG_OFFSET_COMMAND_STATUS);
    499  1.1  skrll 
    500  1.1  skrll 		return (status != 0);
    501  1.1  skrll 	} else {
    502  1.1  skrll 		return (0);
    503  1.1  skrll 	}
    504  1.1  skrll }
    505  1.1  skrll 
    506  1.1  skrll static void
    507  1.1  skrll pvscsi_dma_free(struct pvscsi_softc *sc, struct pvscsi_dma *dma)
    508  1.1  skrll {
    509  1.1  skrll 
    510  1.1  skrll 	bus_dmamap_unload(sc->sc_dmat, dma->map);
    511  1.1  skrll 	bus_dmamem_unmap(sc->sc_dmat, dma->vaddr, dma->size);
    512  1.1  skrll 	bus_dmamap_destroy(sc->sc_dmat, dma->map);
    513  1.1  skrll 	bus_dmamem_free(sc->sc_dmat, dma->seg, __arraycount(dma->seg));
    514  1.1  skrll 
    515  1.1  skrll 	memset(dma, 0, sizeof(*dma));
    516  1.1  skrll }
    517  1.1  skrll 
    518  1.1  skrll static int
    519  1.1  skrll pvscsi_dma_alloc(struct pvscsi_softc *sc, struct pvscsi_dma *dma,
    520  1.1  skrll     bus_size_t size, bus_size_t alignment)
    521  1.1  skrll {
    522  1.1  skrll 	int error;
    523  1.1  skrll 	int nsegs;
    524  1.1  skrll 
    525  1.1  skrll 	memset(dma, 0, sizeof(*dma));
    526  1.1  skrll 
    527  1.1  skrll 	error = bus_dmamem_alloc(sc->sc_dmat, size, alignment, 0, dma->seg,
    528  1.1  skrll 	    __arraycount(dma->seg), &nsegs, BUS_DMA_WAITOK);
    529  1.1  skrll 	if (error) {
    530  1.1  skrll 		aprint_normal_dev(sc->dev, "error allocating dma mem, error %d\n",
    531  1.1  skrll 		    error);
    532  1.1  skrll 		goto fail;
    533  1.1  skrll 	}
    534  1.1  skrll 
    535  1.1  skrll 	error = bus_dmamem_map(sc->sc_dmat, dma->seg, nsegs, size,
    536  1.1  skrll 	    &dma->vaddr, BUS_DMA_WAITOK);
    537  1.1  skrll 	if (error != 0) {
    538  1.1  skrll 		device_printf(sc->dev, "Failed to map DMA memory\n");
    539  1.1  skrll 		goto dmamemmap_fail;
    540  1.1  skrll 	}
    541  1.1  skrll 
    542  1.1  skrll 	error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
    543  1.1  skrll 	    BUS_DMA_WAITOK, &dma->map);
    544  1.1  skrll 	if (error != 0) {
    545  1.1  skrll 		device_printf(sc->dev, "Failed to create DMA map\n");
    546  1.1  skrll 		goto dmamapcreate_fail;
    547  1.1  skrll 	}
    548  1.1  skrll 
    549  1.1  skrll 	error = bus_dmamap_load(sc->sc_dmat, dma->map, dma->vaddr, size,
    550  1.1  skrll 	    NULL, BUS_DMA_WAITOK);
    551  1.1  skrll 	if (error) {
    552  1.1  skrll 		aprint_normal_dev(sc->dev, "error mapping dma mam, error %d\n",
    553  1.1  skrll 		    error);
    554  1.1  skrll 		goto dmamapload_fail;
    555  1.1  skrll 	}
    556  1.1  skrll 
    557  1.1  skrll 	dma->paddr = dma->map->dm_segs[0].ds_addr;
    558  1.1  skrll 	dma->size = size;
    559  1.1  skrll 
    560  1.1  skrll 	return 0;
    561  1.1  skrll 
    562  1.1  skrll dmamapload_fail:
    563  1.1  skrll 	bus_dmamap_destroy(sc->sc_dmat, dma->map);
    564  1.1  skrll dmamapcreate_fail:
    565  1.1  skrll 	bus_dmamem_unmap(sc->sc_dmat, dma->vaddr, dma->size);
    566  1.1  skrll dmamemmap_fail:
    567  1.1  skrll 	bus_dmamem_free(sc->sc_dmat, dma->seg, __arraycount(dma->seg));
    568  1.1  skrll fail:
    569  1.1  skrll 
    570  1.1  skrll 	return (error);
    571  1.1  skrll }
    572  1.1  skrll 
    573  1.1  skrll static int
    574  1.1  skrll pvscsi_dma_alloc_ppns(struct pvscsi_softc *sc, struct pvscsi_dma *dma,
    575  1.1  skrll     uint64_t *ppn_list, uint32_t num_pages)
    576  1.1  skrll {
    577  1.1  skrll 	int error;
    578  1.1  skrll 	uint32_t i;
    579  1.1  skrll 	uint64_t ppn;
    580  1.1  skrll 
    581  1.1  skrll 	error = pvscsi_dma_alloc(sc, dma, num_pages * PAGE_SIZE, PAGE_SIZE);
    582  1.1  skrll 	if (error) {
    583  1.1  skrll 		aprint_normal_dev(sc->dev, "Error allocating pages, error %d\n",
    584  1.1  skrll 		    error);
    585  1.1  skrll 		return (error);
    586  1.1  skrll 	}
    587  1.1  skrll 
    588  1.1  skrll 	ppn = dma->paddr >> PAGE_SHIFT;
    589  1.1  skrll 	for (i = 0; i < num_pages; i++) {
    590  1.1  skrll 		ppn_list[i] = ppn + i;
    591  1.1  skrll 	}
    592  1.1  skrll 
    593  1.1  skrll 	return (0);
    594  1.1  skrll }
    595  1.1  skrll 
    596  1.1  skrll static void
    597  1.1  skrll pvscsi_dma_free_per_hcb(struct pvscsi_softc *sc, uint32_t hcbs_allocated)
    598  1.1  skrll {
    599  1.1  skrll 	int i;
    600  1.1  skrll 	struct pvscsi_hcb *hcb;
    601  1.1  skrll 
    602  1.1  skrll 	for (i = 0; i < hcbs_allocated; ++i) {
    603  1.1  skrll 		hcb = sc->hcbs + i;
    604  1.1  skrll 		bus_dmamap_destroy(sc->sc_dmat, hcb->dma_map);
    605  1.1  skrll 	};
    606  1.1  skrll 
    607  1.1  skrll 	pvscsi_dma_free(sc, &sc->sense_buffer_dma);
    608  1.1  skrll 	pvscsi_dma_free(sc, &sc->sg_list_dma);
    609  1.1  skrll }
    610  1.1  skrll 
    611  1.1  skrll static int
    612  1.1  skrll pvscsi_dma_alloc_per_hcb(struct pvscsi_softc *sc)
    613  1.1  skrll {
    614  1.1  skrll 	int i;
    615  1.1  skrll 	int error;
    616  1.1  skrll 	struct pvscsi_hcb *hcb;
    617  1.1  skrll 
    618  1.1  skrll 	i = 0;
    619  1.1  skrll 
    620  1.1  skrll 	error = pvscsi_dma_alloc(sc, &sc->sg_list_dma,
    621  1.1  skrll 	    sizeof(struct pvscsi_sg_list) * sc->hcb_cnt, 1);
    622  1.1  skrll 	if (error) {
    623  1.1  skrll 		aprint_normal_dev(sc->dev,
    624  1.1  skrll 		    "Error allocation sg list DMA memory, error %d\n", error);
    625  1.1  skrll 		goto fail;
    626  1.1  skrll 	}
    627  1.1  skrll 
    628  1.1  skrll 	error = pvscsi_dma_alloc(sc, &sc->sense_buffer_dma,
    629  1.1  skrll 				 PVSCSI_SENSE_LENGTH * sc->hcb_cnt, 1);
    630  1.1  skrll 	if (error) {
    631  1.1  skrll 		aprint_normal_dev(sc->dev,
    632  1.1  skrll 		    "Error allocation buffer DMA memory, error %d\n", error);
    633  1.1  skrll 		goto fail;
    634  1.1  skrll 	}
    635  1.1  skrll 
    636  1.1  skrll 	for (i = 0; i < sc->hcb_cnt; ++i) {
    637  1.1  skrll 		hcb = sc->hcbs + i;
    638  1.1  skrll 
    639  1.1  skrll 		error = bus_dmamap_create(sc->sc_dmat, PVSCSI_MAXPHYS,
    640  1.1  skrll 		    PVSCSI_MAXPHYS_SEGS, PVSCSI_MAXPHYS, 0,
    641  1.1  skrll 		    BUS_DMA_WAITOK, &hcb->dma_map);
    642  1.1  skrll 		if (error) {
    643  1.1  skrll 			aprint_normal_dev(sc->dev,
    644  1.1  skrll 			    "Error creating dma map for hcb %d, error %d\n",
    645  1.1  skrll 			    i, error);
    646  1.1  skrll 			goto fail;
    647  1.1  skrll 		}
    648  1.1  skrll 
    649  1.1  skrll 		hcb->sc = sc;
    650  1.1  skrll 		hcb->dma_map_offset = PVSCSI_SENSE_LENGTH * i;
    651  1.1  skrll 		hcb->dma_map_size = PVSCSI_SENSE_LENGTH;
    652  1.1  skrll 		hcb->sense_buffer =
    653  1.1  skrll 		    (void *)((char *)sc->sense_buffer_dma.vaddr +
    654  1.1  skrll 		    PVSCSI_SENSE_LENGTH * i);
    655  1.1  skrll 		hcb->sense_buffer_paddr = sc->sense_buffer_dma.paddr +
    656  1.1  skrll 		    PVSCSI_SENSE_LENGTH * i;
    657  1.1  skrll 
    658  1.1  skrll 		hcb->sg_list =
    659  1.1  skrll 		    (struct pvscsi_sg_list *)((char *)sc->sg_list_dma.vaddr +
    660  1.1  skrll 		    sizeof(struct pvscsi_sg_list) * i);
    661  1.1  skrll 		hcb->sg_list_paddr =
    662  1.1  skrll 		    sc->sg_list_dma.paddr + sizeof(struct pvscsi_sg_list) * i;
    663  1.1  skrll 		hcb->sg_list_offset = sizeof(struct pvscsi_sg_list) * i;
    664  1.1  skrll 	}
    665  1.1  skrll 
    666  1.1  skrll 	SLIST_INIT(&sc->free_list);
    667  1.1  skrll 	for (i = (sc->hcb_cnt - 1); i >= 0; --i) {
    668  1.1  skrll 		hcb = sc->hcbs + i;
    669  1.1  skrll 		SLIST_INSERT_HEAD(&sc->free_list, hcb, links);
    670  1.1  skrll 	}
    671  1.1  skrll 
    672  1.1  skrll fail:
    673  1.1  skrll 	if (error) {
    674  1.1  skrll 		pvscsi_dma_free_per_hcb(sc, i);
    675  1.1  skrll 	}
    676  1.1  skrll 
    677  1.1  skrll 	return (error);
    678  1.1  skrll }
    679  1.1  skrll 
    680  1.1  skrll static void
    681  1.1  skrll pvscsi_free_rings(struct pvscsi_softc *sc)
    682  1.1  skrll {
    683  1.1  skrll 
    684  1.1  skrll 	pvscsi_dma_free(sc, &sc->rings_state_dma);
    685  1.1  skrll 	pvscsi_dma_free(sc, &sc->req_ring_dma);
    686  1.1  skrll 	pvscsi_dma_free(sc, &sc->cmp_ring_dma);
    687  1.1  skrll 	if (sc->use_msg) {
    688  1.1  skrll 		pvscsi_dma_free(sc, &sc->msg_ring_dma);
    689  1.1  skrll 	}
    690  1.1  skrll }
    691  1.1  skrll 
    692  1.1  skrll static int
    693  1.1  skrll pvscsi_allocate_rings(struct pvscsi_softc *sc)
    694  1.1  skrll {
    695  1.1  skrll 	int error;
    696  1.1  skrll 
    697  1.1  skrll 	error = pvscsi_dma_alloc_ppns(sc, &sc->rings_state_dma,
    698  1.1  skrll 	    &sc->rings_state_ppn, 1);
    699  1.1  skrll 	if (error) {
    700  1.1  skrll 		aprint_normal_dev(sc->dev,
    701  1.1  skrll 		    "Error allocating rings state, error = %d\n", error);
    702  1.1  skrll 		goto fail;
    703  1.1  skrll 	}
    704  1.1  skrll 	sc->rings_state = sc->rings_state_dma.vaddr;
    705  1.1  skrll 
    706  1.1  skrll 	error = pvscsi_dma_alloc_ppns(sc, &sc->req_ring_dma, sc->req_ring_ppn,
    707  1.1  skrll 	    sc->req_ring_num_pages);
    708  1.1  skrll 	if (error) {
    709  1.1  skrll 		aprint_normal_dev(sc->dev,
    710  1.1  skrll 		    "Error allocating req ring pages, error = %d\n", error);
    711  1.1  skrll 		goto fail;
    712  1.1  skrll 	}
    713  1.1  skrll 	sc->req_ring = sc->req_ring_dma.vaddr;
    714  1.1  skrll 
    715  1.1  skrll 	error = pvscsi_dma_alloc_ppns(sc, &sc->cmp_ring_dma, sc->cmp_ring_ppn,
    716  1.1  skrll 	    sc->cmp_ring_num_pages);
    717  1.1  skrll 	if (error) {
    718  1.1  skrll 		aprint_normal_dev(sc->dev,
    719  1.1  skrll 		    "Error allocating cmp ring pages, error = %d\n", error);
    720  1.1  skrll 		goto fail;
    721  1.1  skrll 	}
    722  1.1  skrll 	sc->cmp_ring = sc->cmp_ring_dma.vaddr;
    723  1.1  skrll 
    724  1.1  skrll 	sc->msg_ring = NULL;
    725  1.1  skrll 	if (sc->use_msg) {
    726  1.1  skrll 		error = pvscsi_dma_alloc_ppns(sc, &sc->msg_ring_dma,
    727  1.1  skrll 		    sc->msg_ring_ppn, sc->msg_ring_num_pages);
    728  1.1  skrll 		if (error) {
    729  1.1  skrll 			aprint_normal_dev(sc->dev,
    730  1.1  skrll 			    "Error allocating cmp ring pages, error = %d\n",
    731  1.1  skrll 			    error);
    732  1.1  skrll 			goto fail;
    733  1.1  skrll 		}
    734  1.1  skrll 		sc->msg_ring = sc->msg_ring_dma.vaddr;
    735  1.1  skrll 	}
    736  1.1  skrll 
    737  1.1  skrll fail:
    738  1.1  skrll 	if (error) {
    739  1.1  skrll 		pvscsi_free_rings(sc);
    740  1.1  skrll 	}
    741  1.1  skrll 	return (error);
    742  1.1  skrll }
    743  1.1  skrll 
    744  1.1  skrll static void
    745  1.1  skrll pvscsi_setup_rings(struct pvscsi_softc *sc)
    746  1.1  skrll {
    747  1.1  skrll 	struct pvscsi_cmd_desc_setup_rings cmd;
    748  1.1  skrll 	uint32_t i;
    749  1.1  skrll 
    750  1.1  skrll 	memset(&cmd, 0, sizeof(cmd));
    751  1.1  skrll 
    752  1.1  skrll 	cmd.rings_state_ppn = sc->rings_state_ppn;
    753  1.1  skrll 
    754  1.1  skrll 	cmd.req_ring_num_pages = sc->req_ring_num_pages;
    755  1.1  skrll 	for (i = 0; i < sc->req_ring_num_pages; ++i) {
    756  1.1  skrll 		cmd.req_ring_ppns[i] = sc->req_ring_ppn[i];
    757  1.1  skrll 	}
    758  1.1  skrll 
    759  1.1  skrll 	cmd.cmp_ring_num_pages = sc->cmp_ring_num_pages;
    760  1.1  skrll 	for (i = 0; i < sc->cmp_ring_num_pages; ++i) {
    761  1.1  skrll 		cmd.cmp_ring_ppns[i] = sc->cmp_ring_ppn[i];
    762  1.1  skrll 	}
    763  1.1  skrll 
    764  1.1  skrll 	pvscsi_write_cmd(sc, PVSCSI_CMD_SETUP_RINGS, &cmd, sizeof(cmd));
    765  1.1  skrll }
    766  1.1  skrll 
    767  1.1  skrll static int
    768  1.1  skrll pvscsi_hw_supports_msg(struct pvscsi_softc *sc)
    769  1.1  skrll {
    770  1.1  skrll 	uint32_t status;
    771  1.1  skrll 
    772  1.1  skrll 	pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_COMMAND,
    773  1.1  skrll 	    PVSCSI_CMD_SETUP_MSG_RING);
    774  1.1  skrll 	status = pvscsi_reg_read(sc, PVSCSI_REG_OFFSET_COMMAND_STATUS);
    775  1.1  skrll 
    776  1.1  skrll 	return (status != -1);
    777  1.1  skrll }
    778  1.1  skrll 
    779  1.1  skrll static void
    780  1.1  skrll pvscsi_setup_msg_ring(struct pvscsi_softc *sc)
    781  1.1  skrll {
    782  1.1  skrll 	struct pvscsi_cmd_desc_setup_msg_ring cmd;
    783  1.1  skrll 	uint32_t i;
    784  1.1  skrll 
    785  1.1  skrll 	KASSERTMSG(sc->use_msg, "msg is not being used");
    786  1.1  skrll 
    787  1.1  skrll 	memset(&cmd, 0, sizeof(cmd));
    788  1.1  skrll 
    789  1.1  skrll 	cmd.num_pages = sc->msg_ring_num_pages;
    790  1.1  skrll 	for (i = 0; i < sc->msg_ring_num_pages; ++i) {
    791  1.1  skrll 		cmd.ring_ppns[i] = sc->msg_ring_ppn[i];
    792  1.1  skrll 	}
    793  1.1  skrll 
    794  1.1  skrll 	pvscsi_write_cmd(sc, PVSCSI_CMD_SETUP_MSG_RING, &cmd, sizeof(cmd));
    795  1.1  skrll }
    796  1.1  skrll 
    797  1.1  skrll static void
    798  1.1  skrll pvscsi_adapter_reset(struct pvscsi_softc *sc)
    799  1.1  skrll {
    800  1.1  skrll 	aprint_normal_dev(sc->dev, "Adapter Reset\n");
    801  1.1  skrll 
    802  1.1  skrll 	pvscsi_write_cmd(sc, PVSCSI_CMD_ADAPTER_RESET, NULL, 0);
    803  1.1  skrll #ifdef PVSCSI_DEBUG_LOGGING
    804  1.1  skrll 	uint32_t val =
    805  1.1  skrll #endif
    806  1.1  skrll 	pvscsi_read_intr_status(sc);
    807  1.1  skrll 
    808  1.1  skrll 	DEBUG_PRINTF(2, sc->dev, "adapter reset done: %u\n", val);
    809  1.1  skrll }
    810  1.1  skrll 
    811  1.1  skrll static void
    812  1.1  skrll pvscsi_bus_reset(struct pvscsi_softc *sc)
    813  1.1  skrll {
    814  1.1  skrll 
    815  1.1  skrll 	aprint_normal_dev(sc->dev, "Bus Reset\n");
    816  1.1  skrll 
    817  1.1  skrll 	pvscsi_write_cmd(sc, PVSCSI_CMD_RESET_BUS, NULL, 0);
    818  1.1  skrll 	pvscsi_process_cmp_ring(sc);
    819  1.1  skrll 
    820  1.1  skrll 	DEBUG_PRINTF(2, sc->dev, "bus reset done\n");
    821  1.1  skrll }
    822  1.1  skrll 
    823  1.1  skrll static void
    824  1.1  skrll pvscsi_device_reset(struct pvscsi_softc *sc, uint32_t target)
    825  1.1  skrll {
    826  1.1  skrll 	struct pvscsi_cmd_desc_reset_device cmd;
    827  1.1  skrll 
    828  1.1  skrll 	memset(&cmd, 0, sizeof(cmd));
    829  1.1  skrll 
    830  1.1  skrll 	cmd.target = target;
    831  1.1  skrll 
    832  1.1  skrll 	aprint_normal_dev(sc->dev, "Device reset for target %u\n", target);
    833  1.1  skrll 
    834  1.1  skrll 	pvscsi_write_cmd(sc, PVSCSI_CMD_RESET_DEVICE, &cmd, sizeof cmd);
    835  1.1  skrll 	pvscsi_process_cmp_ring(sc);
    836  1.1  skrll 
    837  1.1  skrll 	DEBUG_PRINTF(2, sc->dev, "device reset done\n");
    838  1.1  skrll }
    839  1.1  skrll 
    840  1.1  skrll static void
    841  1.1  skrll pvscsi_abort(struct pvscsi_softc *sc, uint32_t target, struct pvscsi_hcb *hcb)
    842  1.1  skrll {
    843  1.1  skrll 	struct pvscsi_cmd_desc_abort_cmd cmd;
    844  1.1  skrll 	uint64_t context;
    845  1.1  skrll 
    846  1.1  skrll 	pvscsi_process_cmp_ring(sc);
    847  1.1  skrll 
    848  1.1  skrll 	if (hcb != NULL) {
    849  1.1  skrll 		context = pvscsi_hcb_to_context(sc, hcb);
    850  1.1  skrll 
    851  1.1  skrll 		memset(&cmd, 0, sizeof cmd);
    852  1.1  skrll 		cmd.target = target;
    853  1.1  skrll 		cmd.context = context;
    854  1.1  skrll 
    855  1.1  skrll 		aprint_normal_dev(sc->dev, "Abort for target %u context %llx\n",
    856  1.1  skrll 		    target, (unsigned long long)context);
    857  1.1  skrll 
    858  1.1  skrll 		pvscsi_write_cmd(sc, PVSCSI_CMD_ABORT_CMD, &cmd, sizeof(cmd));
    859  1.1  skrll 		pvscsi_process_cmp_ring(sc);
    860  1.1  skrll 
    861  1.1  skrll 		DEBUG_PRINTF(2, sc->dev, "abort done\n");
    862  1.1  skrll 	} else {
    863  1.1  skrll 		DEBUG_PRINTF(1, sc->dev,
    864  1.1  skrll 		    "Target %u hcb %p not found for abort\n", target, hcb);
    865  1.1  skrll 	}
    866  1.1  skrll }
    867  1.1  skrll 
    868  1.1  skrll static int
    869  1.1  skrll pvscsi_probe(device_t dev, cfdata_t cf, void *aux)
    870  1.1  skrll {
    871  1.1  skrll 	const struct pci_attach_args *pa = aux;
    872  1.1  skrll 
    873  1.1  skrll 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_VMWARE &&
    874  1.1  skrll 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VMWARE_PVSCSI) {
    875  1.1  skrll 		return 1;
    876  1.1  skrll 	}
    877  1.1  skrll 	return 0;
    878  1.1  skrll }
    879  1.1  skrll 
    880  1.1  skrll static void
    881  1.1  skrll pvscsi_timeout(void *arg)
    882  1.1  skrll {
    883  1.1  skrll 	struct pvscsi_hcb *hcb = arg;
    884  1.1  skrll 	struct scsipi_xfer *xs = hcb->xs;
    885  1.1  skrll 
    886  1.1  skrll 	if (xs == NULL) {
    887  1.1  skrll 		/* Already completed */
    888  1.1  skrll 		return;
    889  1.1  skrll 	}
    890  1.1  skrll 
    891  1.1  skrll 	struct pvscsi_softc *sc = hcb->sc;
    892  1.1  skrll 
    893  1.1  skrll 	mutex_enter(&sc->lock);
    894  1.1  skrll 
    895  1.1  skrll 	scsipi_printaddr(xs->xs_periph);
    896  1.1  skrll 	printf("command timeout, CDB: ");
    897  1.1  skrll 	scsipi_print_cdb(xs->cmd);
    898  1.1  skrll 	printf("\n");
    899  1.1  skrll 
    900  1.1  skrll 	switch (hcb->recovery) {
    901  1.1  skrll 	case PVSCSI_HCB_NONE:
    902  1.1  skrll 		hcb->recovery = PVSCSI_HCB_ABORT;
    903  1.1  skrll 		pvscsi_abort(sc, hcb->e->target, hcb);
    904  1.1  skrll 		callout_reset(&xs->xs_callout,
    905  1.1  skrll 		    mstohz(PVSCSI_ABORT_TIMEOUT * 1000),
    906  1.1  skrll 		    pvscsi_timeout, hcb);
    907  1.1  skrll 		break;
    908  1.1  skrll 	case PVSCSI_HCB_ABORT:
    909  1.1  skrll 		hcb->recovery = PVSCSI_HCB_DEVICE_RESET;
    910  1.1  skrll 		pvscsi_device_reset(sc, hcb->e->target);
    911  1.1  skrll 		callout_reset(&xs->xs_callout,
    912  1.1  skrll 		    mstohz(PVSCSI_RESET_TIMEOUT * 1000),
    913  1.1  skrll 		    pvscsi_timeout, hcb);
    914  1.1  skrll 		break;
    915  1.1  skrll 	case PVSCSI_HCB_DEVICE_RESET:
    916  1.1  skrll 		hcb->recovery = PVSCSI_HCB_BUS_RESET;
    917  1.1  skrll 		pvscsi_bus_reset(sc);
    918  1.1  skrll 		callout_reset(&xs->xs_callout,
    919  1.1  skrll 		    mstohz(PVSCSI_RESET_TIMEOUT * 1000),
    920  1.1  skrll 		    pvscsi_timeout, hcb);
    921  1.1  skrll 		break;
    922  1.1  skrll 	case PVSCSI_HCB_BUS_RESET:
    923  1.1  skrll 		pvscsi_adapter_reset(sc);
    924  1.1  skrll 		break;
    925  1.1  skrll 	};
    926  1.1  skrll 	mutex_exit(&sc->lock);
    927  1.1  skrll }
    928  1.1  skrll 
    929  1.1  skrll static void
    930  1.1  skrll pvscsi_process_completion(struct pvscsi_softc *sc,
    931  1.1  skrll     struct pvscsi_ring_cmp_desc *e)
    932  1.1  skrll {
    933  1.1  skrll 	struct pvscsi_hcb *hcb;
    934  1.1  skrll 	struct scsipi_xfer *xs;
    935  1.1  skrll 	uint32_t error = XS_NOERROR;
    936  1.1  skrll 	uint32_t btstat;
    937  1.1  skrll 	uint32_t sdstat;
    938  1.1  skrll 	int op;
    939  1.1  skrll 
    940  1.1  skrll 	hcb = pvscsi_context_to_hcb(sc, e->context);
    941  1.1  skrll 	xs = hcb->xs;
    942  1.1  skrll 
    943  1.1  skrll 	callout_stop(&xs->xs_callout);
    944  1.1  skrll 
    945  1.1  skrll 	btstat = e->host_status;
    946  1.1  skrll 	sdstat = e->scsi_status;
    947  1.1  skrll 
    948  1.1  skrll 	xs->status = sdstat;
    949  1.1  skrll 	xs->resid = xs->datalen - e->data_len;
    950  1.1  skrll 
    951  1.1  skrll 	DEBUG_PRINTF(3, sc->dev,
    952  1.1  skrll 	    "command context %llx btstat %d (%#x) sdstat %d (%#x)\n",
    953  1.1  skrll 	    (unsigned long long)e->context, btstat, btstat, sdstat, sdstat);
    954  1.1  skrll 
    955  1.1  skrll 	if ((xs->xs_control & XS_CTL_DATA_IN) == XS_CTL_DATA_IN) {
    956  1.1  skrll 		op = BUS_DMASYNC_POSTREAD;
    957  1.1  skrll 	} else {
    958  1.1  skrll 		op = BUS_DMASYNC_POSTWRITE;
    959  1.1  skrll 	}
    960  1.1  skrll 	bus_dmamap_sync(sc->sc_dmat, sc->sense_buffer_dma.map,
    961  1.1  skrll 	    hcb->dma_map_offset, hcb->dma_map_size, op);
    962  1.1  skrll 
    963  1.1  skrll 	if (btstat == BTSTAT_SUCCESS && sdstat == SCSI_OK) {
    964  1.1  skrll 		DEBUG_PRINTF(3, sc->dev,
    965  1.1  skrll 		    "completing command context %llx success\n",
    966  1.1  skrll 		    (unsigned long long)e->context);
    967  1.1  skrll 		xs->resid = 0;
    968  1.1  skrll 	} else {
    969  1.1  skrll 		switch (btstat) {
    970  1.1  skrll 		case BTSTAT_SUCCESS:
    971  1.1  skrll 		case BTSTAT_LINKED_COMMAND_COMPLETED:
    972  1.1  skrll 		case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG:
    973  1.1  skrll 			switch (sdstat) {
    974  1.1  skrll 			case SCSI_OK:
    975  1.1  skrll 				xs->resid = 0;
    976  1.1  skrll 				error = XS_NOERROR;
    977  1.1  skrll 				break;
    978  1.1  skrll 			case SCSI_CHECK:
    979  1.1  skrll 				error = XS_SENSE;
    980  1.1  skrll 				xs->resid = 0;
    981  1.1  skrll 
    982  1.1  skrll 				memset(&xs->sense, 0, sizeof(xs->sense));
    983  1.1  skrll 				memcpy(&xs->sense, hcb->sense_buffer,
    984  1.1  skrll 				    MIN(sizeof(xs->sense), e->sense_len));
    985  1.1  skrll 				break;
    986  1.1  skrll 			case SCSI_BUSY:
    987  1.1  skrll 			case SCSI_QUEUE_FULL:
    988  1.1  skrll 				error = XS_NOERROR;
    989  1.1  skrll 				break;
    990  1.1  skrll 			case SCSI_TERMINATED:
    991  1.1  skrll // 			case SCSI_STATUS_TASK_ABORTED:
    992  1.1  skrll 				DEBUG_PRINTF(1, sc->dev,
    993  1.1  skrll 				    "xs: %p sdstat=0x%x\n", xs, sdstat);
    994  1.1  skrll 				error = XS_DRIVER_STUFFUP;
    995  1.1  skrll 				break;
    996  1.1  skrll 			default:
    997  1.1  skrll 				DEBUG_PRINTF(1, sc->dev,
    998  1.1  skrll 				    "xs: %p sdstat=0x%x\n", xs, sdstat);
    999  1.1  skrll 				error = XS_DRIVER_STUFFUP;
   1000  1.1  skrll 				break;
   1001  1.1  skrll 			}
   1002  1.1  skrll 			break;
   1003  1.1  skrll 		case BTSTAT_SELTIMEO:
   1004  1.1  skrll 			error = XS_SELTIMEOUT;
   1005  1.1  skrll 			break;
   1006  1.1  skrll 		case BTSTAT_DATARUN:
   1007  1.1  skrll 		case BTSTAT_DATA_UNDERRUN:
   1008  1.1  skrll //			xs->resid = xs->datalen - c->data_len;
   1009  1.1  skrll 			error = XS_NOERROR;
   1010  1.1  skrll 			break;
   1011  1.1  skrll 		case BTSTAT_ABORTQUEUE:
   1012  1.1  skrll 		case BTSTAT_HATIMEOUT:
   1013  1.1  skrll 			error = XS_NOERROR;
   1014  1.1  skrll 			break;
   1015  1.1  skrll 		case BTSTAT_NORESPONSE:
   1016  1.1  skrll 		case BTSTAT_SENTRST:
   1017  1.1  skrll 		case BTSTAT_RECVRST:
   1018  1.1  skrll 		case BTSTAT_BUSRESET:
   1019  1.1  skrll 			error = XS_RESET;
   1020  1.1  skrll 			break;
   1021  1.1  skrll 		case BTSTAT_SCSIPARITY:
   1022  1.1  skrll 			error = XS_DRIVER_STUFFUP;
   1023  1.1  skrll 			DEBUG_PRINTF(1, sc->dev,
   1024  1.1  skrll 			    "xs: %p sdstat=0x%x\n", xs, sdstat);
   1025  1.1  skrll 			break;
   1026  1.1  skrll 		case BTSTAT_BUSFREE:
   1027  1.1  skrll 			error = XS_DRIVER_STUFFUP;
   1028  1.1  skrll 			DEBUG_PRINTF(1, sc->dev,
   1029  1.1  skrll 			    "xs: %p sdstat=0x%x\n", xs, sdstat);
   1030  1.1  skrll 			break;
   1031  1.1  skrll 		case BTSTAT_INVPHASE:
   1032  1.1  skrll 			error = XS_DRIVER_STUFFUP;
   1033  1.1  skrll 			DEBUG_PRINTF(1, sc->dev,
   1034  1.1  skrll 			    "xs: %p sdstat=0x%x\n", xs, sdstat);
   1035  1.1  skrll 			break;
   1036  1.1  skrll 		case BTSTAT_SENSFAILED:
   1037  1.1  skrll 			error = XS_DRIVER_STUFFUP;
   1038  1.1  skrll 			DEBUG_PRINTF(1, sc->dev,
   1039  1.1  skrll 			    "xs: %p sdstat=0x%x\n", xs, sdstat);
   1040  1.1  skrll 			break;
   1041  1.1  skrll 		case BTSTAT_LUNMISMATCH:
   1042  1.1  skrll 		case BTSTAT_TAGREJECT:
   1043  1.1  skrll 		case BTSTAT_DISCONNECT:
   1044  1.1  skrll 		case BTSTAT_BADMSG:
   1045  1.1  skrll 		case BTSTAT_INVPARAM:
   1046  1.1  skrll 			error = XS_DRIVER_STUFFUP;
   1047  1.1  skrll 			DEBUG_PRINTF(1, sc->dev,
   1048  1.1  skrll 			    "xs: %p sdstat=0x%x\n", xs, sdstat);
   1049  1.1  skrll 			break;
   1050  1.1  skrll 		case BTSTAT_HASOFTWARE:
   1051  1.1  skrll 		case BTSTAT_HAHARDWARE:
   1052  1.1  skrll 			error = XS_DRIVER_STUFFUP;
   1053  1.1  skrll 			DEBUG_PRINTF(1, sc->dev,
   1054  1.1  skrll 			    "xs: %p sdstat=0x%x\n", xs, sdstat);
   1055  1.1  skrll 			break;
   1056  1.1  skrll 		default:
   1057  1.1  skrll 			aprint_normal_dev(sc->dev, "unknown hba status: 0x%x\n",
   1058  1.1  skrll 			    btstat);
   1059  1.1  skrll 			error = XS_DRIVER_STUFFUP;
   1060  1.1  skrll 			break;
   1061  1.1  skrll 		}
   1062  1.1  skrll 
   1063  1.1  skrll 		DEBUG_PRINTF(3, sc->dev,
   1064  1.1  skrll 		    "completing command context %llx btstat %x sdstat %x - error %x\n",
   1065  1.1  skrll 		    (unsigned long long)e->context, btstat, sdstat, error);
   1066  1.1  skrll 	}
   1067  1.1  skrll 
   1068  1.1  skrll 	xs->error = error;
   1069  1.1  skrll 	pvscsi_hcb_put(sc, hcb);
   1070  1.1  skrll 
   1071  1.1  skrll 	mutex_exit(&sc->lock);
   1072  1.1  skrll 
   1073  1.1  skrll 	scsipi_done(xs);
   1074  1.1  skrll 
   1075  1.1  skrll 	mutex_enter(&sc->lock);
   1076  1.1  skrll }
   1077  1.1  skrll 
   1078  1.1  skrll static void
   1079  1.1  skrll pvscsi_process_cmp_ring(struct pvscsi_softc *sc)
   1080  1.1  skrll {
   1081  1.1  skrll 	struct pvscsi_ring_cmp_desc *ring;
   1082  1.1  skrll 	struct pvscsi_rings_state *s;
   1083  1.1  skrll 	struct pvscsi_ring_cmp_desc *e;
   1084  1.1  skrll 	uint32_t mask;
   1085  1.1  skrll 
   1086  1.1  skrll 	KASSERT(mutex_owned(&sc->lock));
   1087  1.1  skrll 
   1088  1.1  skrll 	s = sc->rings_state;
   1089  1.1  skrll 	ring = sc->cmp_ring;
   1090  1.1  skrll 	mask = MASK(s->cmp_num_entries_log2);
   1091  1.1  skrll 
   1092  1.1  skrll 	while (true) {
   1093  1.1  skrll 		size_t crpidx = s->cmp_prod_idx;
   1094  1.1  skrll 		membar_acquire();
   1095  1.1  skrll 
   1096  1.1  skrll 		if (s->cmp_cons_idx == crpidx)
   1097  1.1  skrll 			break;
   1098  1.1  skrll 
   1099  1.1  skrll 		size_t crcidx = s->cmp_cons_idx & mask;
   1100  1.1  skrll 
   1101  1.1  skrll 		e = ring + crcidx;
   1102  1.1  skrll 
   1103  1.1  skrll 		pvscsi_process_completion(sc, e);
   1104  1.1  skrll 
   1105  1.1  skrll 		/*
   1106  1.1  skrll 		 * ensure completion processing reads happen before write to
   1107  1.1  skrll 		 * (increment of) cmp_cons_idx
   1108  1.1  skrll 		 */
   1109  1.1  skrll 		membar_release();
   1110  1.1  skrll 		s->cmp_cons_idx++;
   1111  1.1  skrll 	}
   1112  1.1  skrll }
   1113  1.1  skrll 
   1114  1.1  skrll static void
   1115  1.1  skrll pvscsi_process_msg(struct pvscsi_softc *sc, struct pvscsi_ring_msg_desc *e)
   1116  1.1  skrll {
   1117  1.1  skrll 	struct pvscsi_ring_msg_dev_status_changed *desc;
   1118  1.1  skrll 
   1119  1.1  skrll 	switch (e->type) {
   1120  1.1  skrll 	case PVSCSI_MSG_DEV_ADDED:
   1121  1.1  skrll 	case PVSCSI_MSG_DEV_REMOVED: {
   1122  1.1  skrll 		desc = (struct pvscsi_ring_msg_dev_status_changed *)e;
   1123  1.1  skrll 		struct scsibus_softc *ssc = device_private(sc->sc_scsibus_dv);
   1124  1.1  skrll 
   1125  1.1  skrll 		aprint_normal_dev(sc->dev, "MSG: device %s at scsi%u:%u:%u\n",
   1126  1.1  skrll 		    desc->type == PVSCSI_MSG_DEV_ADDED ? "addition" : "removal",
   1127  1.1  skrll 		    desc->bus, desc->target, desc->lun[1]);
   1128  1.1  skrll 
   1129  1.1  skrll 		if (desc->type == PVSCSI_MSG_DEV_ADDED) {
   1130  1.1  skrll 			if (scsi_probe_bus(ssc,
   1131  1.1  skrll 			    desc->target, desc->lun[1]) != 0) {
   1132  1.1  skrll 				aprint_normal_dev(sc->dev,
   1133  1.1  skrll 				    "Error creating path for dev change.\n");
   1134  1.1  skrll 				break;
   1135  1.1  skrll 			}
   1136  1.1  skrll 		} else {
   1137  1.1  skrll 			if (scsipi_target_detach(ssc->sc_channel,
   1138  1.1  skrll 			    desc->target, desc->lun[1],
   1139  1.1  skrll 			    DETACH_FORCE) != 0) {
   1140  1.1  skrll 				aprint_normal_dev(sc->dev,
   1141  1.1  skrll 				    "Error detaching target %d lun %d\n",
   1142  1.1  skrll 				    desc->target, desc->lun[1]);
   1143  1.1  skrll 			};
   1144  1.1  skrll 
   1145  1.1  skrll 		}
   1146  1.1  skrll 	} break;
   1147  1.1  skrll 	default:
   1148  1.1  skrll 		aprint_normal_dev(sc->dev, "Unknown msg type 0x%x\n", e->type);
   1149  1.1  skrll 	};
   1150  1.1  skrll }
   1151  1.1  skrll 
   1152  1.1  skrll static void
   1153  1.1  skrll pvscsi_process_msg_ring(struct pvscsi_softc *sc)
   1154  1.1  skrll {
   1155  1.1  skrll 	struct pvscsi_ring_msg_desc *ring;
   1156  1.1  skrll 	struct pvscsi_rings_state *s;
   1157  1.1  skrll 	struct pvscsi_ring_msg_desc *e;
   1158  1.1  skrll 	uint32_t mask;
   1159  1.1  skrll 
   1160  1.1  skrll 	KASSERT(mutex_owned(&sc->lock));
   1161  1.1  skrll 
   1162  1.1  skrll 	s = sc->rings_state;
   1163  1.1  skrll 	ring = sc->msg_ring;
   1164  1.1  skrll 	mask = MASK(s->msg_num_entries_log2);
   1165  1.1  skrll 
   1166  1.1  skrll 	while (true) {
   1167  1.1  skrll 		size_t mpidx = s->msg_prod_idx;	// dma read (device -> cpu)
   1168  1.1  skrll 		membar_acquire();
   1169  1.1  skrll 
   1170  1.1  skrll 		if (s->msg_cons_idx == mpidx)
   1171  1.1  skrll 			break;
   1172  1.1  skrll 
   1173  1.1  skrll 		size_t mcidx = s->msg_cons_idx & mask;
   1174  1.1  skrll 
   1175  1.1  skrll 		e = ring + mcidx;
   1176  1.1  skrll 
   1177  1.1  skrll 		pvscsi_process_msg(sc, e);
   1178  1.1  skrll 
   1179  1.1  skrll 		/*
   1180  1.1  skrll 		 * ensure message processing reads happen before write to
   1181  1.1  skrll 		 * (increment of) msg_cons_idx
   1182  1.1  skrll 		 */
   1183  1.1  skrll 		membar_release();
   1184  1.1  skrll 		s->msg_cons_idx++;
   1185  1.1  skrll 	}
   1186  1.1  skrll }
   1187  1.1  skrll 
   1188  1.1  skrll static void
   1189  1.1  skrll pvscsi_intr_locked(struct pvscsi_softc *sc)
   1190  1.1  skrll {
   1191  1.1  skrll 	uint32_t val;
   1192  1.1  skrll 
   1193  1.1  skrll 	KASSERT(mutex_owned(&sc->lock));
   1194  1.1  skrll 
   1195  1.1  skrll 	val = pvscsi_read_intr_status(sc);
   1196  1.1  skrll 
   1197  1.1  skrll 	if ((val & PVSCSI_INTR_ALL_SUPPORTED) != 0) {
   1198  1.1  skrll 		pvscsi_write_intr_status(sc, val & PVSCSI_INTR_ALL_SUPPORTED);
   1199  1.1  skrll 		pvscsi_process_cmp_ring(sc);
   1200  1.1  skrll 		if (sc->use_msg) {
   1201  1.1  skrll 			pvscsi_process_msg_ring(sc);
   1202  1.1  skrll 		}
   1203  1.1  skrll 	}
   1204  1.1  skrll }
   1205  1.1  skrll 
   1206  1.1  skrll static int
   1207  1.1  skrll pvscsi_intr(void *xsc)
   1208  1.1  skrll {
   1209  1.1  skrll 	struct pvscsi_softc *sc;
   1210  1.1  skrll 
   1211  1.1  skrll 	sc = xsc;
   1212  1.1  skrll 
   1213  1.1  skrll 	mutex_enter(&sc->lock);
   1214  1.1  skrll 	pvscsi_intr_locked(xsc);
   1215  1.1  skrll 	mutex_exit(&sc->lock);
   1216  1.1  skrll 
   1217  1.1  skrll 	return 1;
   1218  1.1  skrll }
   1219  1.1  skrll 
   1220  1.1  skrll static void
   1221  1.1  skrll pvscsi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t
   1222  1.1  skrll     request, void *arg)
   1223  1.1  skrll {
   1224  1.1  skrll 	struct pvscsi_softc *sc = device_private(chan->chan_adapter->adapt_dev);
   1225  1.1  skrll 
   1226  1.1  skrll 	if (request == ADAPTER_REQ_SET_XFER_MODE) {
   1227  1.1  skrll 		struct scsipi_xfer_mode *xm = arg;
   1228  1.1  skrll 
   1229  1.1  skrll 		xm->xm_mode = PERIPH_CAP_TQING;
   1230  1.1  skrll 		xm->xm_period = 0;
   1231  1.1  skrll 		xm->xm_offset = 0;
   1232  1.1  skrll 		scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
   1233  1.1  skrll 		return;
   1234  1.1  skrll 	} else if (request != ADAPTER_REQ_RUN_XFER) {
   1235  1.1  skrll 		DEBUG_PRINTF(1, sc->dev, "unhandled %d\n", request);
   1236  1.1  skrll 		return;
   1237  1.1  skrll 	}
   1238  1.1  skrll 
   1239  1.1  skrll 	/* request is ADAPTER_REQ_RUN_XFER */
   1240  1.1  skrll 	struct scsipi_xfer *xs = arg;
   1241  1.1  skrll 	struct scsipi_periph *periph = xs->xs_periph;
   1242  1.1  skrll #ifdef SCSIPI_DEBUG
   1243  1.1  skrll 	periph->periph_dbflags |= SCSIPI_DEBUG_FLAGS;
   1244  1.1  skrll #endif
   1245  1.1  skrll 
   1246  1.1  skrll 	uint32_t req_num_entries_log2;
   1247  1.1  skrll 	struct pvscsi_ring_req_desc *ring;
   1248  1.1  skrll 	struct pvscsi_ring_req_desc *e;
   1249  1.1  skrll 	struct pvscsi_rings_state *s;
   1250  1.1  skrll 	struct pvscsi_hcb *hcb;
   1251  1.1  skrll 
   1252  1.1  skrll 	if (xs->cmdlen < 0 || xs->cmdlen > sizeof(e->cdb)) {
   1253  1.1  skrll 		DEBUG_PRINTF(1, sc->dev, "bad cmdlen %zu > %zu\n",
   1254  1.1  skrll 		    (size_t)xs->cmdlen, sizeof(e->cdb));
   1255  1.1  skrll 		/* not a temporary condition */
   1256  1.1  skrll 		xs->error = XS_DRIVER_STUFFUP;
   1257  1.1  skrll 		scsipi_done(xs);
   1258  1.1  skrll 		return;
   1259  1.1  skrll 	}
   1260  1.1  skrll 
   1261  1.1  skrll 	ring = sc->req_ring;
   1262  1.1  skrll 	s = sc->rings_state;
   1263  1.1  skrll 
   1264  1.1  skrll 	hcb = NULL;
   1265  1.1  skrll 	req_num_entries_log2 = s->req_num_entries_log2;
   1266  1.1  skrll 
   1267  1.1  skrll 	/* Protect against multiple senders */
   1268  1.1  skrll 	mutex_enter(&sc->lock);
   1269  1.1  skrll 
   1270  1.1  skrll 	if (s->req_prod_idx - s->cmp_cons_idx >=
   1271  1.1  skrll 	    (1 << req_num_entries_log2)) {
   1272  1.1  skrll 		aprint_normal_dev(sc->dev,
   1273  1.1  skrll 		    "Not enough room on completion ring.\n");
   1274  1.1  skrll 		xs->error = XS_RESOURCE_SHORTAGE;
   1275  1.1  skrll 		goto finish_xs;
   1276  1.1  skrll 	}
   1277  1.1  skrll 
   1278  1.1  skrll 	if (xs->cmdlen > sizeof(e->cdb)) {
   1279  1.1  skrll 		DEBUG_PRINTF(1, sc->dev, "cdb length %u too large\n",
   1280  1.1  skrll 		    xs->cmdlen);
   1281  1.1  skrll 		xs->error = XS_DRIVER_STUFFUP;
   1282  1.1  skrll 		goto finish_xs;
   1283  1.1  skrll 	}
   1284  1.1  skrll 
   1285  1.1  skrll 	hcb = pvscsi_hcb_get(sc);
   1286  1.1  skrll 	if (hcb == NULL) {
   1287  1.1  skrll 		aprint_normal_dev(sc->dev, "No free hcbs.\n");
   1288  1.1  skrll 		xs->error = XS_RESOURCE_SHORTAGE;
   1289  1.1  skrll 		goto finish_xs;
   1290  1.1  skrll 	}
   1291  1.1  skrll 
   1292  1.1  skrll 	hcb->xs = xs;
   1293  1.1  skrll 
   1294  1.1  skrll 	const size_t rridx = s->req_prod_idx & MASK(req_num_entries_log2);
   1295  1.1  skrll 	e = ring + rridx;
   1296  1.1  skrll 
   1297  1.1  skrll 	memset(e, 0, sizeof(*e));
   1298  1.1  skrll 	e->bus = 0;
   1299  1.1  skrll 	e->target = periph->periph_target;
   1300  1.1  skrll 	e->lun[1] = periph->periph_lun;
   1301  1.1  skrll 	e->data_addr = 0;
   1302  1.1  skrll 	e->data_len = xs->datalen;
   1303  1.1  skrll 	e->vcpu_hint = cpu_index(curcpu());
   1304  1.1  skrll 	e->flags = 0;
   1305  1.1  skrll 
   1306  1.1  skrll 	e->cdb_len = xs->cmdlen;
   1307  1.1  skrll 	memcpy(e->cdb, xs->cmd, xs->cmdlen);
   1308  1.1  skrll 
   1309  1.1  skrll 	e->sense_addr = 0;
   1310  1.1  skrll 	e->sense_len = sizeof(xs->sense);
   1311  1.1  skrll 	if (e->sense_len > 0) {
   1312  1.1  skrll 		e->sense_addr = hcb->sense_buffer_paddr;
   1313  1.1  skrll 	}
   1314  1.1  skrll 	//e->tag = xs->xs_tag_type;
   1315  1.1  skrll 	e->tag = MSG_SIMPLE_Q_TAG;
   1316  1.1  skrll 
   1317  1.1  skrll 	switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
   1318  1.1  skrll 	case XS_CTL_DATA_IN:
   1319  1.1  skrll 		e->flags |= PVSCSI_FLAG_CMD_DIR_TOHOST;
   1320  1.1  skrll 		break;
   1321  1.1  skrll 	case XS_CTL_DATA_OUT:
   1322  1.1  skrll 		e->flags |= PVSCSI_FLAG_CMD_DIR_TODEVICE;
   1323  1.1  skrll 		break;
   1324  1.1  skrll 	default:
   1325  1.1  skrll 		e->flags |= PVSCSI_FLAG_CMD_DIR_NONE;
   1326  1.1  skrll 		break;
   1327  1.1  skrll 	}
   1328  1.1  skrll 
   1329  1.1  skrll 	e->context = pvscsi_hcb_to_context(sc, hcb);
   1330  1.1  skrll 	hcb->e = e;
   1331  1.1  skrll 
   1332  1.1  skrll 	DEBUG_PRINTF(3, sc->dev,
   1333  1.1  skrll 	    " queuing command %02x context %llx\n", e->cdb[0],
   1334  1.1  skrll 	    (unsigned long long)e->context);
   1335  1.1  skrll 
   1336  1.1  skrll 	int flags;
   1337  1.1  skrll 	flags  = (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE;
   1338  1.1  skrll 	flags |= (xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK;
   1339  1.1  skrll 
   1340  1.1  skrll 	int error = bus_dmamap_load(sc->sc_dmat, hcb->dma_map,
   1341  1.1  skrll 	    xs->data, xs->datalen, NULL, flags);
   1342  1.1  skrll 
   1343  1.1  skrll 	if (error) {
   1344  1.1  skrll 		if (error == ENOMEM || error == EAGAIN) {
   1345  1.1  skrll 			xs->error = XS_RESOURCE_SHORTAGE;
   1346  1.1  skrll 		} else {
   1347  1.1  skrll 			xs->error = XS_DRIVER_STUFFUP;
   1348  1.1  skrll 		}
   1349  1.1  skrll 		DEBUG_PRINTF(1, sc->dev,
   1350  1.1  skrll 		    "xs: %p load error %d data %p len %d",
   1351  1.1  skrll                     xs, error, xs->data, xs->datalen);
   1352  1.1  skrll 		goto error_load;
   1353  1.1  skrll 	}
   1354  1.1  skrll 
   1355  1.1  skrll 	int op = (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
   1356  1.1  skrll 	    BUS_DMASYNC_PREWRITE;
   1357  1.1  skrll 	int nseg = hcb->dma_map->dm_nsegs;
   1358  1.1  skrll 	bus_dma_segment_t *segs = hcb->dma_map->dm_segs;
   1359  1.1  skrll 	if (nseg != 0) {
   1360  1.1  skrll 		if (nseg > 1) {
   1361  1.1  skrll 			struct pvscsi_sg_element *sge;
   1362  1.1  skrll 
   1363  1.1  skrll 			KASSERTMSG(nseg <= PVSCSI_MAX_SG_ENTRIES_PER_SEGMENT,
   1364  1.1  skrll 			    "too many sg segments");
   1365  1.1  skrll 
   1366  1.1  skrll 			sge = hcb->sg_list->sge;
   1367  1.1  skrll 			e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
   1368  1.1  skrll 
   1369  1.1  skrll 			for (size_t i = 0; i < nseg; ++i) {
   1370  1.1  skrll 				sge[i].addr = segs[i].ds_addr;
   1371  1.1  skrll 				sge[i].length = segs[i].ds_len;
   1372  1.1  skrll 				sge[i].flags = 0;
   1373  1.1  skrll 			}
   1374  1.1  skrll 
   1375  1.1  skrll 			e->data_addr = hcb->sg_list_paddr;
   1376  1.1  skrll 
   1377  1.1  skrll 			bus_dmamap_sync(sc->sc_dmat,
   1378  1.1  skrll 			    sc->sg_list_dma.map, hcb->sg_list_offset,
   1379  1.1  skrll 			    sizeof(*sge) * nseg, BUS_DMASYNC_PREWRITE);
   1380  1.1  skrll 		} else {
   1381  1.1  skrll 			e->data_addr = segs->ds_addr;
   1382  1.1  skrll 		}
   1383  1.1  skrll 
   1384  1.1  skrll 		bus_dmamap_sync(sc->sc_dmat, hcb->dma_map, 0,
   1385  1.1  skrll 		    xs->datalen, op);
   1386  1.1  skrll 	} else {
   1387  1.1  skrll 		e->data_addr = 0;
   1388  1.1  skrll 	}
   1389  1.1  skrll 
   1390  1.1  skrll 	/*
   1391  1.1  skrll 	 * Ensure request record writes happen before write to (increment of)
   1392  1.1  skrll 	 * req_prod_idx.
   1393  1.1  skrll 	 */
   1394  1.1  skrll 	membar_producer();
   1395  1.1  skrll 
   1396  1.1  skrll 	uint8_t cdb0 = e->cdb[0];
   1397  1.1  skrll 
   1398  1.1  skrll 	/* handle timeout */
   1399  1.1  skrll 	if ((xs->xs_control & XS_CTL_POLL) == 0) {
   1400  1.1  skrll 		int timeout = mstohz(xs->timeout);
   1401  1.1  skrll 		/* start expire timer */
   1402  1.1  skrll 		if (timeout == 0)
   1403  1.1  skrll 			timeout = 1;
   1404  1.1  skrll 		callout_reset(&xs->xs_callout, timeout, pvscsi_timeout, hcb);
   1405  1.1  skrll 	}
   1406  1.1  skrll 
   1407  1.1  skrll 	s->req_prod_idx++;
   1408  1.1  skrll 
   1409  1.1  skrll 	/*
   1410  1.1  skrll 	 * Ensure req_prod_idx write (increment) happens before
   1411  1.1  skrll 	 * IO is kicked (via a write).
   1412  1.2  skrll 	 */
   1413  1.2  skrll 	membar_producer();
   1414  1.2  skrll 
   1415  1.1  skrll 	pvscsi_kick_io(sc, cdb0);
   1416  1.1  skrll 	mutex_exit(&sc->lock);
   1417  1.1  skrll 
   1418  1.1  skrll 	return;
   1419  1.1  skrll 
   1420  1.1  skrll error_load:
   1421  1.1  skrll 	pvscsi_hcb_put(sc, hcb);
   1422  1.1  skrll 
   1423  1.1  skrll finish_xs:
   1424  1.1  skrll 	mutex_exit(&sc->lock);
   1425  1.1  skrll 	scsipi_done(xs);
   1426  1.1  skrll }
   1427  1.1  skrll 
   1428  1.1  skrll static void
   1429  1.1  skrll pvscsi_free_interrupts(struct pvscsi_softc *sc)
   1430  1.1  skrll {
   1431  1.1  skrll 
   1432  1.1  skrll 	if (sc->sc_ih != NULL) {
   1433  1.1  skrll 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
   1434  1.1  skrll 		sc->sc_ih = NULL;
   1435  1.1  skrll 	}
   1436  1.1  skrll 	if (sc->sc_pihp != NULL) {
   1437  1.1  skrll 		pci_intr_release(sc->sc_pc, sc->sc_pihp, 1);
   1438  1.1  skrll 		sc->sc_pihp = NULL;
   1439  1.1  skrll 	}
   1440  1.1  skrll }
   1441  1.1  skrll 
   1442  1.1  skrll static int
   1443  1.1  skrll pvscsi_setup_interrupts(struct pvscsi_softc *sc, const struct pci_attach_args *pa)
   1444  1.1  skrll {
   1445  1.1  skrll 	int use_msix;
   1446  1.1  skrll 	int use_msi;
   1447  1.1  skrll 	int counts[PCI_INTR_TYPE_SIZE];
   1448  1.1  skrll 
   1449  1.1  skrll 	for (size_t i = 0; i < PCI_INTR_TYPE_SIZE; i++) {
   1450  1.1  skrll 		counts[i] = 1;
   1451  1.1  skrll 	}
   1452  1.1  skrll 
   1453  1.1  skrll 	use_msix = pvscsi_get_tunable(sc, "use_msix", pvscsi_use_msix);
   1454  1.1  skrll 	use_msi = pvscsi_get_tunable(sc, "use_msi", pvscsi_use_msi);
   1455  1.1  skrll 
   1456  1.1  skrll 	if (!use_msix) {
   1457  1.1  skrll 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1458  1.1  skrll 	}
   1459  1.1  skrll 	if (!use_msi) {
   1460  1.1  skrll 		counts[PCI_INTR_TYPE_MSI] = 0;
   1461  1.1  skrll 	}
   1462  1.1  skrll 
   1463  1.1  skrll 	/* Allocate and establish the interrupt. */
   1464  1.1  skrll 	if (pci_intr_alloc(pa, &sc->sc_pihp, counts, PCI_INTR_TYPE_MSIX)) {
   1465  1.1  skrll 		aprint_error_dev(sc->dev, "can't allocate handler\n");
   1466  1.1  skrll 		goto fail;
   1467  1.1  skrll 	}
   1468  1.1  skrll 
   1469  1.1  skrll 	char intrbuf[PCI_INTRSTR_LEN];
   1470  1.1  skrll 	const pci_chipset_tag_t pc = pa->pa_pc;
   1471  1.1  skrll 	char const *intrstr = pci_intr_string(pc, sc->sc_pihp[0], intrbuf,
   1472  1.1  skrll 	    sizeof(intrbuf));
   1473  1.1  skrll 
   1474  1.1  skrll 	sc->sc_ih = pci_intr_establish_xname(pc, sc->sc_pihp[0], IPL_BIO,
   1475  1.1  skrll 	    pvscsi_intr, sc, device_xname(sc->dev));
   1476  1.1  skrll 	if (sc->sc_ih == NULL) {
   1477  1.1  skrll 		pci_intr_release(pc, sc->sc_pihp, 1);
   1478  1.1  skrll 		sc->sc_pihp = NULL;
   1479  1.1  skrll 		aprint_error_dev(sc->dev, "couldn't establish interrupt");
   1480  1.1  skrll 		if (intrstr != NULL)
   1481  1.1  skrll 			aprint_error(" at %s", intrstr);
   1482  1.1  skrll 		aprint_error("\n");
   1483  1.1  skrll 		goto fail;
   1484  1.1  skrll 	}
   1485  1.1  skrll 	pci_intr_setattr(pc, sc->sc_pihp, PCI_INTR_MPSAFE, true);
   1486  1.1  skrll 
   1487  1.1  skrll 	aprint_normal_dev(sc->dev, "interrupting at %s\n", intrstr);
   1488  1.1  skrll 
   1489  1.1  skrll 	return (0);
   1490  1.1  skrll 
   1491  1.1  skrll fail:
   1492  1.1  skrll 	if (sc->sc_ih != NULL) {
   1493  1.1  skrll 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
   1494  1.1  skrll 		sc->sc_ih = NULL;
   1495  1.1  skrll 	}
   1496  1.1  skrll 	if (sc->sc_pihp != NULL) {
   1497  1.1  skrll 		pci_intr_release(sc->sc_pc, sc->sc_pihp, 1);
   1498  1.1  skrll 		sc->sc_pihp = NULL;
   1499  1.1  skrll 	}
   1500  1.1  skrll 	if (sc->sc_mems) {
   1501  1.1  skrll 		bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
   1502  1.1  skrll 		sc->sc_mems = 0;
   1503  1.1  skrll 	}
   1504  1.1  skrll 
   1505  1.1  skrll 	return 1;
   1506  1.1  skrll }
   1507  1.1  skrll 
   1508  1.1  skrll static void
   1509  1.1  skrll pvscsi_free_all(struct pvscsi_softc *sc)
   1510  1.1  skrll {
   1511  1.1  skrll 
   1512  1.1  skrll 	pvscsi_dma_free_per_hcb(sc, sc->hcb_cnt);
   1513  1.1  skrll 
   1514  1.1  skrll 	if (sc->hcbs) {
   1515  1.1  skrll 		kmem_free(sc->hcbs, sc->hcb_cnt * sizeof(*sc->hcbs));
   1516  1.1  skrll 	}
   1517  1.1  skrll 
   1518  1.1  skrll 	pvscsi_free_rings(sc);
   1519  1.1  skrll 
   1520  1.1  skrll 	pvscsi_free_interrupts(sc);
   1521  1.1  skrll 
   1522  1.1  skrll 	if (sc->sc_mems) {
   1523  1.1  skrll 		bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
   1524  1.1  skrll 		sc->sc_mems = 0;
   1525  1.1  skrll 	}
   1526  1.1  skrll }
   1527  1.1  skrll 
   1528  1.1  skrll static inline void
   1529  1.1  skrll pci_enable_busmaster(device_t dev, const pci_chipset_tag_t pc,
   1530  1.1  skrll     const pcitag_t tag)
   1531  1.1  skrll {
   1532  1.1  skrll 	pcireg_t pci_cmd_word;
   1533  1.1  skrll 
   1534  1.1  skrll 	pci_cmd_word = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
   1535  1.1  skrll 	if (!(pci_cmd_word & PCI_COMMAND_MASTER_ENABLE)) {
   1536  1.1  skrll 		pci_cmd_word |= PCI_COMMAND_MASTER_ENABLE;
   1537  1.1  skrll 		pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, pci_cmd_word);
   1538  1.1  skrll 	}
   1539  1.1  skrll }
   1540  1.1  skrll 
   1541  1.1  skrll static void
   1542  1.1  skrll pvscsi_attach(device_t parent, device_t dev, void *aux)
   1543  1.1  skrll {
   1544  1.1  skrll 	const struct pci_attach_args *pa = aux;
   1545  1.1  skrll 	struct pvscsi_softc *sc;
   1546  1.1  skrll 	int rid;
   1547  1.1  skrll 	int error;
   1548  1.1  skrll 	int max_queue_depth;
   1549  1.1  skrll 	int adapter_queue_size;
   1550  1.1  skrll 
   1551  1.1  skrll 	sc = device_private(dev);
   1552  1.1  skrll 	sc->dev = dev;
   1553  1.1  skrll 
   1554  1.1  skrll 	struct scsipi_adapter *adapt = &sc->sc_adapter;
   1555  1.1  skrll 	struct scsipi_channel *chan = &sc->sc_channel;
   1556  1.1  skrll 
   1557  1.1  skrll 	mutex_init(&sc->lock, MUTEX_DEFAULT, IPL_BIO);
   1558  1.1  skrll 
   1559  1.1  skrll 	sc->sc_pc = pa->pa_pc;
   1560  1.1  skrll 	pci_enable_busmaster(dev, pa->pa_pc, pa->pa_tag);
   1561  1.1  skrll 
   1562  1.1  skrll 	pci_aprint_devinfo_fancy(pa, "virtual disk controller",
   1563  1.1  skrll 	    VMWARE_PVSCSI_DEVSTR, true);
   1564  1.1  skrll 
   1565  1.1  skrll 	/*
   1566  1.1  skrll 	 * Map the device.  All devices support memory-mapped acccess.
   1567  1.1  skrll 	 */
   1568  1.1  skrll 	bool memh_valid;
   1569  1.1  skrll 	bus_space_tag_t memt;
   1570  1.1  skrll 	bus_space_handle_t memh;
   1571  1.1  skrll 	bus_size_t mems;
   1572  1.1  skrll 	pcireg_t regt;
   1573  1.1  skrll 
   1574  1.1  skrll 	for (rid = PCI_MAPREG_START; rid < PCI_MAPREG_END; rid += sizeof(regt)) {
   1575  1.1  skrll 		regt = pci_mapreg_type(pa->pa_pc, pa->pa_tag, rid);
   1576  1.1  skrll 		if (PCI_MAPREG_TYPE(regt) == PCI_MAPREG_TYPE_MEM)
   1577  1.1  skrll 			break;
   1578  1.1  skrll 	}
   1579  1.1  skrll 
   1580  1.1  skrll 	if (rid >= PCI_MAPREG_END) {
   1581  1.1  skrll 		aprint_error_dev(dev,
   1582  1.1  skrll 		    "unable to locate device registers\n");
   1583  1.1  skrll 	}
   1584  1.1  skrll 
   1585  1.1  skrll 	memh_valid = (pci_mapreg_map(pa, rid, regt, 0, &memt, &memh,
   1586  1.1  skrll 	    NULL, &mems) == 0);
   1587  1.1  skrll 	if (!memh_valid) {
   1588  1.1  skrll 		aprint_error_dev(dev,
   1589  1.1  skrll 		    "unable to map device registers\n");
   1590  1.1  skrll 		return;
   1591  1.1  skrll 	}
   1592  1.1  skrll 	sc->sc_memt = memt;
   1593  1.1  skrll 	sc->sc_memh = memh;
   1594  1.1  skrll 	sc->sc_mems = mems;
   1595  1.1  skrll 
   1596  1.1  skrll 	if (pci_dma64_available(pa)) {
   1597  1.1  skrll 		sc->sc_dmat = pa->pa_dmat64;
   1598  1.1  skrll 		aprint_verbose_dev(sc->dev, "64-bit DMA\n");
   1599  1.1  skrll 	} else {
   1600  1.1  skrll 		aprint_verbose_dev(sc->dev, "32-bit DMA\n");
   1601  1.1  skrll 		sc->sc_dmat = pa->pa_dmat;
   1602  1.1  skrll 	}
   1603  1.1  skrll 
   1604  1.1  skrll 	error = pvscsi_setup_interrupts(sc, pa);
   1605  1.1  skrll 	if (error) {
   1606  1.1  skrll 		aprint_normal_dev(dev, "Interrupt setup failed\n");
   1607  1.1  skrll 		pvscsi_free_all(sc);
   1608  1.1  skrll 		return;
   1609  1.1  skrll 	}
   1610  1.1  skrll 
   1611  1.1  skrll 	sc->max_targets = pvscsi_get_max_targets(sc);
   1612  1.1  skrll 
   1613  1.1  skrll 	sc->use_msg = pvscsi_get_tunable(sc, "use_msg", pvscsi_use_msg) &&
   1614  1.1  skrll 	    pvscsi_hw_supports_msg(sc);
   1615  1.1  skrll 	sc->msg_ring_num_pages = sc->use_msg ? 1 : 0;
   1616  1.1  skrll 
   1617  1.1  skrll 	sc->req_ring_num_pages = pvscsi_get_tunable(sc, "request_ring_pages",
   1618  1.1  skrll 	    pvscsi_request_ring_pages);
   1619  1.1  skrll 	if (sc->req_ring_num_pages <= 0) {
   1620  1.1  skrll 		if (sc->max_targets <= 16) {
   1621  1.1  skrll 			sc->req_ring_num_pages =
   1622  1.1  skrll 			    PVSCSI_DEFAULT_NUM_PAGES_REQ_RING;
   1623  1.1  skrll 		} else {
   1624  1.1  skrll 			sc->req_ring_num_pages = PVSCSI_MAX_NUM_PAGES_REQ_RING;
   1625  1.1  skrll 		}
   1626  1.1  skrll 	} else if (sc->req_ring_num_pages > PVSCSI_MAX_NUM_PAGES_REQ_RING) {
   1627  1.1  skrll 		sc->req_ring_num_pages = PVSCSI_MAX_NUM_PAGES_REQ_RING;
   1628  1.1  skrll 	}
   1629  1.1  skrll 	sc->cmp_ring_num_pages = sc->req_ring_num_pages;
   1630  1.1  skrll 
   1631  1.1  skrll 	max_queue_depth = pvscsi_get_tunable(sc, "max_queue_depth",
   1632  1.1  skrll 	    pvscsi_max_queue_depth);
   1633  1.1  skrll 
   1634  1.1  skrll 	adapter_queue_size = (sc->req_ring_num_pages * PAGE_SIZE) /
   1635  1.1  skrll 	    sizeof(struct pvscsi_ring_req_desc);
   1636  1.1  skrll 	if (max_queue_depth > 0) {
   1637  1.1  skrll 		adapter_queue_size = MIN(adapter_queue_size, max_queue_depth);
   1638  1.1  skrll 	}
   1639  1.1  skrll 	adapter_queue_size = MIN(adapter_queue_size,
   1640  1.1  skrll 	    PVSCSI_MAX_REQ_QUEUE_DEPTH);
   1641  1.1  skrll 
   1642  1.1  skrll 	aprint_normal_dev(sc->dev, "Use Msg: %d\n", sc->use_msg);
   1643  1.1  skrll 	aprint_normal_dev(sc->dev, "Max targets: %d\n", sc->max_targets);
   1644  1.1  skrll 	aprint_normal_dev(sc->dev, "REQ num pages: %d\n", sc->req_ring_num_pages);
   1645  1.1  skrll 	aprint_normal_dev(sc->dev, "CMP num pages: %d\n", sc->cmp_ring_num_pages);
   1646  1.1  skrll 	aprint_normal_dev(sc->dev, "MSG num pages: %d\n", sc->msg_ring_num_pages);
   1647  1.1  skrll 	aprint_normal_dev(sc->dev, "Queue size: %d\n", adapter_queue_size);
   1648  1.1  skrll 
   1649  1.1  skrll 	if (pvscsi_allocate_rings(sc)) {
   1650  1.1  skrll 		aprint_normal_dev(dev, "ring allocation failed\n");
   1651  1.1  skrll 		pvscsi_free_all(sc);
   1652  1.1  skrll 		return;
   1653  1.1  skrll 	}
   1654  1.1  skrll 
   1655  1.1  skrll 	sc->hcb_cnt = adapter_queue_size;
   1656  1.1  skrll 	sc->hcbs = kmem_zalloc(sc->hcb_cnt * sizeof(*sc->hcbs), KM_SLEEP);
   1657  1.1  skrll 
   1658  1.1  skrll 	if (pvscsi_dma_alloc_per_hcb(sc)) {
   1659  1.1  skrll 		aprint_normal_dev(dev, "error allocating per hcb dma memory\n");
   1660  1.1  skrll 		pvscsi_free_all(sc);
   1661  1.1  skrll 		return;
   1662  1.1  skrll 	}
   1663  1.1  skrll 
   1664  1.1  skrll 	pvscsi_adapter_reset(sc);
   1665  1.1  skrll 
   1666  1.1  skrll 	/*
   1667  1.1  skrll 	 * Fill in the scsipi_adapter.
   1668  1.1  skrll 	 */
   1669  1.1  skrll 	memset(adapt, 0, sizeof(*adapt));
   1670  1.1  skrll 	adapt->adapt_dev = sc->dev;
   1671  1.1  skrll 	adapt->adapt_nchannels = 1;
   1672  1.1  skrll 	adapt->adapt_openings = MIN(adapter_queue_size, PVSCSI_CMD_PER_LUN);
   1673  1.1  skrll 	adapt->adapt_max_periph = adapt->adapt_openings;
   1674  1.1  skrll 	adapt->adapt_request = pvscsi_scsipi_request;
   1675  1.1  skrll 	adapt->adapt_minphys = minphys;
   1676  1.1  skrll 
   1677  1.1  skrll 	/*
   1678  1.1  skrll 	 * Fill in the scsipi_channel.
   1679  1.1  skrll 	 */
   1680  1.1  skrll 	memset(chan, 0, sizeof(*chan));
   1681  1.1  skrll 	chan->chan_adapter = adapt;
   1682  1.1  skrll 	chan->chan_bustype = &scsi_bustype;
   1683  1.1  skrll 	chan->chan_channel = 0;
   1684  1.1  skrll 	chan->chan_ntargets = MIN(PVSCSI_MAX_TARGET, 16);	/* cap reasonably */
   1685  1.1  skrll 	chan->chan_nluns = MIN(PVSCSI_MAX_LUN, 1024);		/* cap reasonably */
   1686  1.1  skrll 	chan->chan_id = PVSCSI_MAX_TARGET;
   1687  1.1  skrll 	chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
   1688  1.1  skrll 
   1689  1.1  skrll 	pvscsi_setup_rings(sc);
   1690  1.1  skrll 	if (sc->use_msg) {
   1691  1.1  skrll 		pvscsi_setup_msg_ring(sc);
   1692  1.1  skrll 	}
   1693  1.1  skrll 
   1694  1.1  skrll 	sc->use_req_call_threshold = pvscsi_setup_req_call(sc, 1);
   1695  1.1  skrll 
   1696  1.1  skrll 	pvscsi_intr_enable(sc);
   1697  1.1  skrll 
   1698  1.1  skrll 	sc->sc_scsibus_dv = config_found(sc->dev, &sc->sc_channel, scsiprint,
   1699  1.1  skrll 	    CFARGS_NONE);
   1700  1.1  skrll 
   1701  1.1  skrll 	return;
   1702  1.1  skrll }
   1703  1.1  skrll 
   1704  1.1  skrll static int
   1705  1.1  skrll pvscsi_detach(device_t dev, int flags)
   1706  1.1  skrll {
   1707  1.1  skrll 	struct pvscsi_softc *sc;
   1708  1.1  skrll 
   1709  1.1  skrll 	sc = device_private(dev);
   1710  1.1  skrll 
   1711  1.1  skrll 	pvscsi_intr_disable(sc);
   1712  1.1  skrll 	pvscsi_adapter_reset(sc);
   1713  1.1  skrll 
   1714  1.1  skrll 	pvscsi_free_all(sc);
   1715  1.1  skrll 
   1716  1.1  skrll 	mutex_destroy(&sc->lock);
   1717  1.1  skrll 
   1718  1.1  skrll 	return (0);
   1719  1.1  skrll }
   1720