pvscsi.c revision 1.3 1 1.1 skrll /*-
2 1.1 skrll * Copyright (c) 2018 VMware, Inc.
3 1.1 skrll *
4 1.1 skrll * SPDX-License-Identifier: (BSD-2-Clause OR GPL-2.0)
5 1.1 skrll */
6 1.1 skrll
7 1.1 skrll /*
8 1.1 skrll
9 1.1 skrll These files are provided under a dual BSD-2 Clause/GPLv2 license. When
10 1.1 skrll using or redistributing this file, you may do so under either license.
11 1.1 skrll
12 1.1 skrll BSD-2 Clause License
13 1.1 skrll
14 1.1 skrll Copyright (c) 2018 VMware, Inc.
15 1.1 skrll
16 1.1 skrll Redistribution and use in source and binary forms, with or without
17 1.1 skrll modification, are permitted provided that the following conditions
18 1.1 skrll are met:
19 1.1 skrll
20 1.1 skrll * Redistributions of source code must retain the above copyright
21 1.1 skrll notice, this list of conditions and the following disclaimer.
22 1.1 skrll
23 1.1 skrll * Redistributions in binary form must reproduce the above copyright
24 1.1 skrll notice, this list of conditions and the following disclaimer in
25 1.1 skrll the documentation and/or other materials provided with the
26 1.1 skrll distribution.
27 1.1 skrll
28 1.1 skrll THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 1.1 skrll "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 1.1 skrll LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 1.1 skrll A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 1.1 skrll OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 1.1 skrll SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 1.1 skrll LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 1.1 skrll DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 1.1 skrll THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 1.1 skrll (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 1.1 skrll OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 1.1 skrll
40 1.1 skrll GPL License Summary
41 1.1 skrll
42 1.1 skrll Copyright (c) 2018 VMware, Inc.
43 1.1 skrll
44 1.1 skrll This program is free software; you can redistribute it and/or modify
45 1.1 skrll it under the terms of version 2 of the GNU General Public License as
46 1.1 skrll published by the Free Software Foundation.
47 1.1 skrll
48 1.1 skrll This program is distributed in the hope that it will be useful, but
49 1.1 skrll WITHOUT ANY WARRANTY; without even the implied warranty of
50 1.1 skrll MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
51 1.1 skrll General Public License for more details.
52 1.1 skrll
53 1.1 skrll You should have received a copy of the GNU General Public License
54 1.1 skrll along with this program; if not, write to the Free Software
55 1.1 skrll Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
56 1.1 skrll The full GNU General Public License is included in this distribution
57 1.1 skrll in the file called LICENSE.GPL.
58 1.1 skrll
59 1.1 skrll */
60 1.1 skrll
61 1.1 skrll #include <sys/cdefs.h>
62 1.3 riastrad __KERNEL_RCSID(0, "$NetBSD: pvscsi.c,v 1.3 2025/09/06 02:56:30 riastradh Exp $");
63 1.1 skrll
64 1.1 skrll #include <sys/param.h>
65 1.1 skrll
66 1.1 skrll #include <sys/atomic.h>
67 1.1 skrll #include <sys/buf.h>
68 1.1 skrll #include <sys/bus.h>
69 1.1 skrll #include <sys/cpu.h>
70 1.1 skrll #include <sys/device.h>
71 1.1 skrll #include <sys/kernel.h>
72 1.1 skrll #include <sys/kmem.h>
73 1.3 riastrad #include <sys/paravirt_membar.h>
74 1.1 skrll #include <sys/queue.h>
75 1.1 skrll #include <sys/sysctl.h>
76 1.1 skrll #include <sys/systm.h>
77 1.1 skrll
78 1.1 skrll #include <dev/pci/pcireg.h>
79 1.1 skrll #include <dev/pci/pcivar.h>
80 1.1 skrll #include <dev/pci/pcidevs.h>
81 1.1 skrll
82 1.1 skrll #include <dev/scsipi/scsi_all.h>
83 1.1 skrll #include <dev/scsipi/scsi_message.h>
84 1.1 skrll #include <dev/scsipi/scsiconf.h>
85 1.1 skrll #include <dev/scsipi/scsipi_disk.h>
86 1.1 skrll #include <dev/scsipi/scsi_disk.h>
87 1.1 skrll
88 1.1 skrll #include "pvscsi.h"
89 1.1 skrll
90 1.1 skrll #define PVSCSI_DEFAULT_NUM_PAGES_REQ_RING 8
91 1.1 skrll #define PVSCSI_SENSE_LENGTH 256
92 1.1 skrll
93 1.1 skrll #define PVSCSI_MAXPHYS MAXPHYS
94 1.1 skrll #define PVSCSI_MAXPHYS_SEGS ((PVSCSI_MAXPHYS / PAGE_SIZE) + 1)
95 1.1 skrll
96 1.1 skrll #define PVSCSI_CMD_PER_LUN 64
97 1.1 skrll #define PVSCSI_MAX_LUN 8
98 1.1 skrll #define PVSCSI_MAX_TARGET 16
99 1.1 skrll
100 1.1 skrll //#define PVSCSI_DEBUG_LOGGING
101 1.1 skrll
102 1.1 skrll #ifdef PVSCSI_DEBUG_LOGGING
103 1.1 skrll #define DEBUG_PRINTF(level, dev, fmt, ...) \
104 1.1 skrll do { \
105 1.1 skrll if (pvscsi_log_level >= (level)) { \
106 1.1 skrll aprint_normal_dev((dev), (fmt), ##__VA_ARGS__); \
107 1.1 skrll } \
108 1.1 skrll } while(0)
109 1.1 skrll #else
110 1.1 skrll #define DEBUG_PRINTF(level, dev, fmt, ...)
111 1.1 skrll #endif /* PVSCSI_DEBUG_LOGGING */
112 1.1 skrll
113 1.1 skrll struct pvscsi_softc;
114 1.1 skrll struct pvscsi_hcb;
115 1.1 skrll struct pvscsi_dma;
116 1.1 skrll
117 1.1 skrll #define VMWARE_PVSCSI_DEVSTR "VMware Paravirtual SCSI Controller"
118 1.1 skrll
119 1.1 skrll static inline uint32_t pvscsi_reg_read(struct pvscsi_softc *sc,
120 1.1 skrll uint32_t offset);
121 1.1 skrll static inline void pvscsi_reg_write(struct pvscsi_softc *sc, uint32_t offset,
122 1.1 skrll uint32_t val);
123 1.1 skrll static inline uint32_t pvscsi_read_intr_status(struct pvscsi_softc *sc);
124 1.1 skrll static inline void pvscsi_write_intr_status(struct pvscsi_softc *sc,
125 1.1 skrll uint32_t val);
126 1.1 skrll static inline void pvscsi_intr_enable(struct pvscsi_softc *sc);
127 1.1 skrll static inline void pvscsi_intr_disable(struct pvscsi_softc *sc);
128 1.1 skrll static void pvscsi_kick_io(struct pvscsi_softc *sc, uint8_t cdb0);
129 1.1 skrll static void pvscsi_write_cmd(struct pvscsi_softc *sc, uint32_t cmd, void *data,
130 1.1 skrll uint32_t len);
131 1.1 skrll static uint32_t pvscsi_get_max_targets(struct pvscsi_softc *sc);
132 1.1 skrll static int pvscsi_setup_req_call(struct pvscsi_softc *sc, uint32_t enable);
133 1.1 skrll static void pvscsi_setup_rings(struct pvscsi_softc *sc);
134 1.1 skrll static void pvscsi_setup_msg_ring(struct pvscsi_softc *sc);
135 1.1 skrll static int pvscsi_hw_supports_msg(struct pvscsi_softc *sc);
136 1.1 skrll
137 1.1 skrll static void pvscsi_timeout(void *arg);
138 1.1 skrll static void pvscsi_adapter_reset(struct pvscsi_softc *sc);
139 1.1 skrll static void pvscsi_bus_reset(struct pvscsi_softc *sc);
140 1.1 skrll static void pvscsi_device_reset(struct pvscsi_softc *sc, uint32_t target);
141 1.1 skrll static void pvscsi_abort(struct pvscsi_softc *sc, uint32_t target,
142 1.1 skrll struct pvscsi_hcb *hcb);
143 1.1 skrll
144 1.1 skrll static void pvscsi_process_completion(struct pvscsi_softc *sc,
145 1.1 skrll struct pvscsi_ring_cmp_desc *e);
146 1.1 skrll static void pvscsi_process_cmp_ring(struct pvscsi_softc *sc);
147 1.1 skrll static void pvscsi_process_msg(struct pvscsi_softc *sc,
148 1.1 skrll struct pvscsi_ring_msg_desc *e);
149 1.1 skrll static void pvscsi_process_msg_ring(struct pvscsi_softc *sc);
150 1.1 skrll
151 1.1 skrll static void pvscsi_intr_locked(struct pvscsi_softc *sc);
152 1.1 skrll static int pvscsi_intr(void *xsc);
153 1.1 skrll
154 1.1 skrll static void pvscsi_scsipi_request(struct scsipi_channel *,
155 1.1 skrll scsipi_adapter_req_t, void *);
156 1.1 skrll
157 1.1 skrll static inline uint64_t pvscsi_hcb_to_context(struct pvscsi_softc *sc,
158 1.1 skrll struct pvscsi_hcb *hcb);
159 1.1 skrll static inline struct pvscsi_hcb *pvscsi_context_to_hcb(struct pvscsi_softc *sc,
160 1.1 skrll uint64_t context);
161 1.1 skrll static struct pvscsi_hcb * pvscsi_hcb_get(struct pvscsi_softc *sc);
162 1.1 skrll static void pvscsi_hcb_put(struct pvscsi_softc *sc, struct pvscsi_hcb *hcb);
163 1.1 skrll
164 1.1 skrll static void pvscsi_dma_free(struct pvscsi_softc *sc, struct pvscsi_dma *dma);
165 1.1 skrll static int pvscsi_dma_alloc(struct pvscsi_softc *sc, struct pvscsi_dma *dma,
166 1.1 skrll bus_size_t size, bus_size_t alignment);
167 1.1 skrll static int pvscsi_dma_alloc_ppns(struct pvscsi_softc *sc,
168 1.1 skrll struct pvscsi_dma *dma, uint64_t *ppn_list, uint32_t num_pages);
169 1.1 skrll static void pvscsi_dma_free_per_hcb(struct pvscsi_softc *sc,
170 1.1 skrll uint32_t hcbs_allocated);
171 1.1 skrll static int pvscsi_dma_alloc_per_hcb(struct pvscsi_softc *sc);
172 1.1 skrll static void pvscsi_free_rings(struct pvscsi_softc *sc);
173 1.1 skrll static int pvscsi_allocate_rings(struct pvscsi_softc *sc);
174 1.1 skrll static void pvscsi_free_interrupts(struct pvscsi_softc *sc);
175 1.1 skrll static int pvscsi_setup_interrupts(struct pvscsi_softc *sc, const struct pci_attach_args *);
176 1.1 skrll static void pvscsi_free_all(struct pvscsi_softc *sc);
177 1.1 skrll
178 1.1 skrll static void pvscsi_attach(device_t, device_t, void *);
179 1.1 skrll static int pvscsi_detach(device_t, int);
180 1.1 skrll static int pvscsi_probe(device_t, cfdata_t, void *);
181 1.1 skrll
182 1.1 skrll #define pvscsi_get_tunable(_sc, _name, _value) (_value)
183 1.1 skrll
184 1.1 skrll #ifdef PVSCSI_DEBUG_LOGGING
185 1.1 skrll static int pvscsi_log_level = 1;
186 1.1 skrll #endif
187 1.1 skrll
188 1.1 skrll #define TUNABLE_INT(__x, __d) \
189 1.1 skrll err = sysctl_createv(clog, 0, &rnode, &cnode, \
190 1.1 skrll CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, \
191 1.1 skrll #__x, SYSCTL_DESCR(__d), \
192 1.1 skrll NULL, 0, &(pvscsi_ ## __x), sizeof(pvscsi_ ## __x), \
193 1.1 skrll CTL_CREATE, CTL_EOL); \
194 1.1 skrll if (err) \
195 1.1 skrll goto fail;
196 1.1 skrll
197 1.1 skrll static int pvscsi_request_ring_pages = 0;
198 1.1 skrll static int pvscsi_use_msg = 1;
199 1.1 skrll static int pvscsi_use_msi = 1;
200 1.1 skrll static int pvscsi_use_msix = 1;
201 1.1 skrll static int pvscsi_use_req_call_threshold = 0;
202 1.1 skrll static int pvscsi_max_queue_depth = 0;
203 1.1 skrll
204 1.1 skrll SYSCTL_SETUP(sysctl_hw_pvscsi_setup, "sysctl hw.pvscsi setup")
205 1.1 skrll {
206 1.1 skrll int err;
207 1.1 skrll const struct sysctlnode *rnode;
208 1.1 skrll const struct sysctlnode *cnode;
209 1.1 skrll
210 1.1 skrll err = sysctl_createv(clog, 0, NULL, &rnode,
211 1.1 skrll CTLFLAG_PERMANENT, CTLTYPE_NODE, "pvscsi",
212 1.1 skrll SYSCTL_DESCR("pvscsi global controls"),
213 1.1 skrll NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
214 1.1 skrll
215 1.1 skrll if (err)
216 1.1 skrll goto fail;
217 1.1 skrll
218 1.1 skrll #ifdef PVSCSI_DEBUG_LOGGING
219 1.1 skrll TUNABLE_INT(log_level, "Enable debugging output");
220 1.1 skrll #endif
221 1.1 skrll
222 1.1 skrll TUNABLE_INT(request_ring_pages, "No. of pages for the request ring");
223 1.1 skrll TUNABLE_INT(use_msg, "Use message passing");
224 1.1 skrll TUNABLE_INT(use_msi, "Use MSI interrupt");
225 1.1 skrll TUNABLE_INT(use_msix, "Use MSXI interrupt");
226 1.1 skrll TUNABLE_INT(use_req_call_threshold, "Use request limit");
227 1.1 skrll TUNABLE_INT(max_queue_depth, "Maximum size of request queue");
228 1.1 skrll
229 1.1 skrll return;
230 1.1 skrll fail:
231 1.1 skrll aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err);
232 1.1 skrll }
233 1.1 skrll
234 1.1 skrll struct pvscsi_sg_list {
235 1.1 skrll struct pvscsi_sg_element sge[PVSCSI_MAX_SG_ENTRIES_PER_SEGMENT];
236 1.1 skrll };
237 1.1 skrll
238 1.1 skrll #define PVSCSI_ABORT_TIMEOUT 2
239 1.1 skrll #define PVSCSI_RESET_TIMEOUT 10
240 1.1 skrll
241 1.1 skrll #define PVSCSI_HCB_NONE 0
242 1.1 skrll #define PVSCSI_HCB_ABORT 1
243 1.1 skrll #define PVSCSI_HCB_DEVICE_RESET 2
244 1.1 skrll #define PVSCSI_HCB_BUS_RESET 3
245 1.1 skrll
246 1.1 skrll struct pvscsi_hcb {
247 1.1 skrll struct scsipi_xfer *xs;
248 1.1 skrll struct pvscsi_softc *sc;
249 1.1 skrll
250 1.1 skrll struct pvscsi_ring_req_desc *e;
251 1.1 skrll int recovery;
252 1.1 skrll SLIST_ENTRY(pvscsi_hcb) links;
253 1.1 skrll
254 1.1 skrll bus_dmamap_t dma_map;
255 1.1 skrll bus_addr_t dma_map_offset;
256 1.1 skrll bus_size_t dma_map_size;
257 1.1 skrll void *sense_buffer;
258 1.1 skrll bus_addr_t sense_buffer_paddr;
259 1.1 skrll struct pvscsi_sg_list *sg_list;
260 1.1 skrll bus_addr_t sg_list_paddr;
261 1.1 skrll bus_addr_t sg_list_offset;
262 1.1 skrll };
263 1.1 skrll
264 1.1 skrll struct pvscsi_dma {
265 1.1 skrll bus_dmamap_t map;
266 1.1 skrll void *vaddr;
267 1.1 skrll bus_addr_t paddr;
268 1.1 skrll bus_size_t size;
269 1.1 skrll bus_dma_segment_t seg[1];
270 1.1 skrll };
271 1.1 skrll
272 1.1 skrll struct pvscsi_softc {
273 1.1 skrll device_t dev;
274 1.1 skrll kmutex_t lock;
275 1.1 skrll
276 1.1 skrll device_t sc_scsibus_dv;
277 1.1 skrll struct scsipi_adapter sc_adapter;
278 1.1 skrll struct scsipi_channel sc_channel;
279 1.1 skrll
280 1.1 skrll struct pvscsi_rings_state *rings_state;
281 1.1 skrll struct pvscsi_ring_req_desc *req_ring;
282 1.1 skrll struct pvscsi_ring_cmp_desc *cmp_ring;
283 1.1 skrll struct pvscsi_ring_msg_desc *msg_ring;
284 1.1 skrll uint32_t hcb_cnt;
285 1.1 skrll struct pvscsi_hcb *hcbs;
286 1.1 skrll SLIST_HEAD(, pvscsi_hcb) free_list;
287 1.1 skrll
288 1.1 skrll bus_dma_tag_t sc_dmat;
289 1.1 skrll bus_space_tag_t sc_memt;
290 1.1 skrll bus_space_handle_t sc_memh;
291 1.1 skrll bus_size_t sc_mems;
292 1.1 skrll
293 1.1 skrll bool use_msg;
294 1.1 skrll uint32_t max_targets;
295 1.1 skrll int mm_rid;
296 1.1 skrll int irq_id;
297 1.1 skrll int use_req_call_threshold;
298 1.1 skrll
299 1.1 skrll pci_chipset_tag_t sc_pc;
300 1.1 skrll pci_intr_handle_t * sc_pihp;
301 1.1 skrll void *sc_ih;
302 1.1 skrll
303 1.1 skrll uint64_t rings_state_ppn;
304 1.1 skrll uint32_t req_ring_num_pages;
305 1.1 skrll uint64_t req_ring_ppn[PVSCSI_MAX_NUM_PAGES_REQ_RING];
306 1.1 skrll uint32_t cmp_ring_num_pages;
307 1.1 skrll uint64_t cmp_ring_ppn[PVSCSI_MAX_NUM_PAGES_CMP_RING];
308 1.1 skrll uint32_t msg_ring_num_pages;
309 1.1 skrll uint64_t msg_ring_ppn[PVSCSI_MAX_NUM_PAGES_MSG_RING];
310 1.1 skrll
311 1.1 skrll struct pvscsi_dma rings_state_dma;
312 1.1 skrll struct pvscsi_dma req_ring_dma;
313 1.1 skrll struct pvscsi_dma cmp_ring_dma;
314 1.1 skrll struct pvscsi_dma msg_ring_dma;
315 1.1 skrll
316 1.1 skrll struct pvscsi_dma sg_list_dma;
317 1.1 skrll struct pvscsi_dma sense_buffer_dma;
318 1.1 skrll };
319 1.1 skrll
320 1.1 skrll CFATTACH_DECL3_NEW(pvscsi, sizeof(struct pvscsi_softc),
321 1.1 skrll pvscsi_probe, pvscsi_attach, pvscsi_detach, NULL, NULL, NULL,
322 1.1 skrll DVF_DETACH_SHUTDOWN);
323 1.1 skrll
324 1.1 skrll static inline uint32_t
325 1.1 skrll pvscsi_reg_read(struct pvscsi_softc *sc, uint32_t offset)
326 1.1 skrll {
327 1.1 skrll
328 1.1 skrll return (bus_space_read_4(sc->sc_memt, sc->sc_memh, offset));
329 1.1 skrll }
330 1.1 skrll
331 1.1 skrll static inline void
332 1.1 skrll pvscsi_reg_write(struct pvscsi_softc *sc, uint32_t offset, uint32_t val)
333 1.1 skrll {
334 1.1 skrll
335 1.1 skrll bus_space_write_4(sc->sc_memt, sc->sc_memh, offset, val);
336 1.1 skrll }
337 1.1 skrll
338 1.1 skrll static inline uint32_t
339 1.1 skrll pvscsi_read_intr_status(struct pvscsi_softc *sc)
340 1.1 skrll {
341 1.1 skrll
342 1.1 skrll return (pvscsi_reg_read(sc, PVSCSI_REG_OFFSET_INTR_STATUS));
343 1.1 skrll }
344 1.1 skrll
345 1.1 skrll static inline void
346 1.1 skrll pvscsi_write_intr_status(struct pvscsi_softc *sc, uint32_t val)
347 1.1 skrll {
348 1.1 skrll
349 1.1 skrll pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_INTR_STATUS, val);
350 1.1 skrll }
351 1.1 skrll
352 1.1 skrll static inline void
353 1.1 skrll pvscsi_intr_enable(struct pvscsi_softc *sc)
354 1.1 skrll {
355 1.1 skrll uint32_t mask;
356 1.1 skrll
357 1.1 skrll mask = PVSCSI_INTR_CMPL_MASK;
358 1.1 skrll if (sc->use_msg) {
359 1.1 skrll mask |= PVSCSI_INTR_MSG_MASK;
360 1.1 skrll }
361 1.1 skrll
362 1.1 skrll pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_INTR_MASK, mask);
363 1.1 skrll }
364 1.1 skrll
365 1.1 skrll static inline void
366 1.1 skrll pvscsi_intr_disable(struct pvscsi_softc *sc)
367 1.1 skrll {
368 1.1 skrll
369 1.1 skrll pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_INTR_MASK, 0);
370 1.1 skrll }
371 1.1 skrll
372 1.1 skrll static void
373 1.1 skrll pvscsi_kick_io(struct pvscsi_softc *sc, uint8_t cdb0)
374 1.1 skrll {
375 1.1 skrll struct pvscsi_rings_state *s;
376 1.1 skrll
377 1.1 skrll DEBUG_PRINTF(2, sc->dev, "%s: cdb0 %#x\n", __func__, cdb0);
378 1.1 skrll if (cdb0 == SCSI_READ_6_COMMAND || cdb0 == READ_10 ||
379 1.1 skrll cdb0 == READ_12 || cdb0 == READ_16 ||
380 1.1 skrll cdb0 == SCSI_WRITE_6_COMMAND || cdb0 == WRITE_10 ||
381 1.1 skrll cdb0 == WRITE_12 || cdb0 == WRITE_16) {
382 1.1 skrll s = sc->rings_state;
383 1.1 skrll
384 1.3 riastrad /*
385 1.3 riastrad * Ensure the command has been published before we test
386 1.3 riastrad * whether we need to kick the host.
387 1.3 riastrad */
388 1.3 riastrad paravirt_membar_sync();
389 1.3 riastrad
390 1.1 skrll DEBUG_PRINTF(2, sc->dev, "%s req prod %d cons %d\n", __func__,
391 1.1 skrll s->req_prod_idx, s->req_cons_idx);
392 1.1 skrll if (!sc->use_req_call_threshold ||
393 1.1 skrll (s->req_prod_idx - s->req_cons_idx) >=
394 1.1 skrll s->req_call_threshold) {
395 1.1 skrll pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_KICK_RW_IO, 0);
396 1.1 skrll DEBUG_PRINTF(2, sc->dev, "kicked\n");
397 1.1 skrll } else {
398 1.1 skrll DEBUG_PRINTF(2, sc->dev, "wtf\n");
399 1.1 skrll }
400 1.1 skrll } else {
401 1.1 skrll s = sc->rings_state;
402 1.1 skrll DEBUG_PRINTF(1, sc->dev, "%s req prod %d cons %d not checked\n", __func__,
403 1.1 skrll s->req_prod_idx, s->req_cons_idx);
404 1.1 skrll
405 1.1 skrll pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_KICK_NON_RW_IO, 0);
406 1.1 skrll }
407 1.1 skrll }
408 1.1 skrll
409 1.1 skrll static void
410 1.1 skrll pvscsi_write_cmd(struct pvscsi_softc *sc, uint32_t cmd, void *data,
411 1.1 skrll uint32_t len)
412 1.1 skrll {
413 1.1 skrll uint32_t *data_ptr;
414 1.1 skrll int i;
415 1.1 skrll
416 1.1 skrll KASSERTMSG(len % sizeof(uint32_t) == 0,
417 1.1 skrll "command size not a multiple of 4");
418 1.1 skrll
419 1.1 skrll data_ptr = data;
420 1.1 skrll len /= sizeof(uint32_t);
421 1.1 skrll
422 1.1 skrll pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_COMMAND, cmd);
423 1.1 skrll for (i = 0; i < len; ++i) {
424 1.1 skrll pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_COMMAND_DATA,
425 1.1 skrll data_ptr[i]);
426 1.1 skrll }
427 1.1 skrll }
428 1.1 skrll
429 1.1 skrll static inline uint64_t pvscsi_hcb_to_context(struct pvscsi_softc *sc,
430 1.1 skrll struct pvscsi_hcb *hcb)
431 1.1 skrll {
432 1.1 skrll
433 1.1 skrll /* Offset by 1 because context must not be 0 */
434 1.1 skrll return (hcb - sc->hcbs + 1);
435 1.1 skrll }
436 1.1 skrll
437 1.1 skrll static inline struct pvscsi_hcb* pvscsi_context_to_hcb(struct pvscsi_softc *sc,
438 1.1 skrll uint64_t context)
439 1.1 skrll {
440 1.1 skrll
441 1.1 skrll return (sc->hcbs + (context - 1));
442 1.1 skrll }
443 1.1 skrll
444 1.1 skrll static struct pvscsi_hcb *
445 1.1 skrll pvscsi_hcb_get(struct pvscsi_softc *sc)
446 1.1 skrll {
447 1.1 skrll struct pvscsi_hcb *hcb;
448 1.1 skrll
449 1.1 skrll KASSERT(mutex_owned(&sc->lock));
450 1.1 skrll
451 1.1 skrll hcb = SLIST_FIRST(&sc->free_list);
452 1.1 skrll if (hcb) {
453 1.1 skrll SLIST_REMOVE_HEAD(&sc->free_list, links);
454 1.1 skrll }
455 1.1 skrll
456 1.1 skrll return (hcb);
457 1.1 skrll }
458 1.1 skrll
459 1.1 skrll static void
460 1.1 skrll pvscsi_hcb_put(struct pvscsi_softc *sc, struct pvscsi_hcb *hcb)
461 1.1 skrll {
462 1.1 skrll
463 1.1 skrll KASSERT(mutex_owned(&sc->lock));
464 1.1 skrll hcb->xs = NULL;
465 1.1 skrll hcb->e = NULL;
466 1.1 skrll hcb->recovery = PVSCSI_HCB_NONE;
467 1.1 skrll SLIST_INSERT_HEAD(&sc->free_list, hcb, links);
468 1.1 skrll }
469 1.1 skrll
470 1.1 skrll static uint32_t
471 1.1 skrll pvscsi_get_max_targets(struct pvscsi_softc *sc)
472 1.1 skrll {
473 1.1 skrll uint32_t max_targets;
474 1.1 skrll
475 1.1 skrll pvscsi_write_cmd(sc, PVSCSI_CMD_GET_MAX_TARGETS, NULL, 0);
476 1.1 skrll
477 1.1 skrll max_targets = pvscsi_reg_read(sc, PVSCSI_REG_OFFSET_COMMAND_STATUS);
478 1.1 skrll
479 1.1 skrll if (max_targets == ~0) {
480 1.1 skrll max_targets = 16;
481 1.1 skrll }
482 1.1 skrll
483 1.1 skrll return (max_targets);
484 1.1 skrll }
485 1.1 skrll
486 1.1 skrll static int pvscsi_setup_req_call(struct pvscsi_softc *sc, uint32_t enable)
487 1.1 skrll {
488 1.1 skrll uint32_t status;
489 1.1 skrll struct pvscsi_cmd_desc_setup_req_call cmd;
490 1.1 skrll
491 1.1 skrll if (!pvscsi_get_tunable(sc, "pvscsi_use_req_call_threshold",
492 1.1 skrll pvscsi_use_req_call_threshold)) {
493 1.1 skrll return (0);
494 1.1 skrll }
495 1.1 skrll
496 1.1 skrll pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_COMMAND,
497 1.1 skrll PVSCSI_CMD_SETUP_REQCALLTHRESHOLD);
498 1.1 skrll status = pvscsi_reg_read(sc, PVSCSI_REG_OFFSET_COMMAND_STATUS);
499 1.1 skrll
500 1.1 skrll if (status != -1) {
501 1.1 skrll memset(&cmd, 0, sizeof(cmd));
502 1.1 skrll cmd.enable = enable;
503 1.1 skrll pvscsi_write_cmd(sc, PVSCSI_CMD_SETUP_REQCALLTHRESHOLD,
504 1.1 skrll &cmd, sizeof(cmd));
505 1.1 skrll status = pvscsi_reg_read(sc, PVSCSI_REG_OFFSET_COMMAND_STATUS);
506 1.1 skrll
507 1.1 skrll return (status != 0);
508 1.1 skrll } else {
509 1.1 skrll return (0);
510 1.1 skrll }
511 1.1 skrll }
512 1.1 skrll
513 1.1 skrll static void
514 1.1 skrll pvscsi_dma_free(struct pvscsi_softc *sc, struct pvscsi_dma *dma)
515 1.1 skrll {
516 1.1 skrll
517 1.1 skrll bus_dmamap_unload(sc->sc_dmat, dma->map);
518 1.1 skrll bus_dmamem_unmap(sc->sc_dmat, dma->vaddr, dma->size);
519 1.1 skrll bus_dmamap_destroy(sc->sc_dmat, dma->map);
520 1.1 skrll bus_dmamem_free(sc->sc_dmat, dma->seg, __arraycount(dma->seg));
521 1.1 skrll
522 1.1 skrll memset(dma, 0, sizeof(*dma));
523 1.1 skrll }
524 1.1 skrll
525 1.1 skrll static int
526 1.1 skrll pvscsi_dma_alloc(struct pvscsi_softc *sc, struct pvscsi_dma *dma,
527 1.1 skrll bus_size_t size, bus_size_t alignment)
528 1.1 skrll {
529 1.1 skrll int error;
530 1.1 skrll int nsegs;
531 1.1 skrll
532 1.1 skrll memset(dma, 0, sizeof(*dma));
533 1.1 skrll
534 1.1 skrll error = bus_dmamem_alloc(sc->sc_dmat, size, alignment, 0, dma->seg,
535 1.1 skrll __arraycount(dma->seg), &nsegs, BUS_DMA_WAITOK);
536 1.1 skrll if (error) {
537 1.1 skrll aprint_normal_dev(sc->dev, "error allocating dma mem, error %d\n",
538 1.1 skrll error);
539 1.1 skrll goto fail;
540 1.1 skrll }
541 1.1 skrll
542 1.1 skrll error = bus_dmamem_map(sc->sc_dmat, dma->seg, nsegs, size,
543 1.1 skrll &dma->vaddr, BUS_DMA_WAITOK);
544 1.1 skrll if (error != 0) {
545 1.1 skrll device_printf(sc->dev, "Failed to map DMA memory\n");
546 1.1 skrll goto dmamemmap_fail;
547 1.1 skrll }
548 1.1 skrll
549 1.1 skrll error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
550 1.1 skrll BUS_DMA_WAITOK, &dma->map);
551 1.1 skrll if (error != 0) {
552 1.1 skrll device_printf(sc->dev, "Failed to create DMA map\n");
553 1.1 skrll goto dmamapcreate_fail;
554 1.1 skrll }
555 1.1 skrll
556 1.1 skrll error = bus_dmamap_load(sc->sc_dmat, dma->map, dma->vaddr, size,
557 1.1 skrll NULL, BUS_DMA_WAITOK);
558 1.1 skrll if (error) {
559 1.1 skrll aprint_normal_dev(sc->dev, "error mapping dma mam, error %d\n",
560 1.1 skrll error);
561 1.1 skrll goto dmamapload_fail;
562 1.1 skrll }
563 1.1 skrll
564 1.1 skrll dma->paddr = dma->map->dm_segs[0].ds_addr;
565 1.1 skrll dma->size = size;
566 1.1 skrll
567 1.1 skrll return 0;
568 1.1 skrll
569 1.1 skrll dmamapload_fail:
570 1.1 skrll bus_dmamap_destroy(sc->sc_dmat, dma->map);
571 1.1 skrll dmamapcreate_fail:
572 1.1 skrll bus_dmamem_unmap(sc->sc_dmat, dma->vaddr, dma->size);
573 1.1 skrll dmamemmap_fail:
574 1.1 skrll bus_dmamem_free(sc->sc_dmat, dma->seg, __arraycount(dma->seg));
575 1.1 skrll fail:
576 1.1 skrll
577 1.1 skrll return (error);
578 1.1 skrll }
579 1.1 skrll
580 1.1 skrll static int
581 1.1 skrll pvscsi_dma_alloc_ppns(struct pvscsi_softc *sc, struct pvscsi_dma *dma,
582 1.1 skrll uint64_t *ppn_list, uint32_t num_pages)
583 1.1 skrll {
584 1.1 skrll int error;
585 1.1 skrll uint32_t i;
586 1.1 skrll uint64_t ppn;
587 1.1 skrll
588 1.1 skrll error = pvscsi_dma_alloc(sc, dma, num_pages * PAGE_SIZE, PAGE_SIZE);
589 1.1 skrll if (error) {
590 1.1 skrll aprint_normal_dev(sc->dev, "Error allocating pages, error %d\n",
591 1.1 skrll error);
592 1.1 skrll return (error);
593 1.1 skrll }
594 1.1 skrll
595 1.1 skrll ppn = dma->paddr >> PAGE_SHIFT;
596 1.1 skrll for (i = 0; i < num_pages; i++) {
597 1.1 skrll ppn_list[i] = ppn + i;
598 1.1 skrll }
599 1.1 skrll
600 1.1 skrll return (0);
601 1.1 skrll }
602 1.1 skrll
603 1.1 skrll static void
604 1.1 skrll pvscsi_dma_free_per_hcb(struct pvscsi_softc *sc, uint32_t hcbs_allocated)
605 1.1 skrll {
606 1.1 skrll int i;
607 1.1 skrll struct pvscsi_hcb *hcb;
608 1.1 skrll
609 1.1 skrll for (i = 0; i < hcbs_allocated; ++i) {
610 1.1 skrll hcb = sc->hcbs + i;
611 1.1 skrll bus_dmamap_destroy(sc->sc_dmat, hcb->dma_map);
612 1.1 skrll };
613 1.1 skrll
614 1.1 skrll pvscsi_dma_free(sc, &sc->sense_buffer_dma);
615 1.1 skrll pvscsi_dma_free(sc, &sc->sg_list_dma);
616 1.1 skrll }
617 1.1 skrll
618 1.1 skrll static int
619 1.1 skrll pvscsi_dma_alloc_per_hcb(struct pvscsi_softc *sc)
620 1.1 skrll {
621 1.1 skrll int i;
622 1.1 skrll int error;
623 1.1 skrll struct pvscsi_hcb *hcb;
624 1.1 skrll
625 1.1 skrll i = 0;
626 1.1 skrll
627 1.1 skrll error = pvscsi_dma_alloc(sc, &sc->sg_list_dma,
628 1.1 skrll sizeof(struct pvscsi_sg_list) * sc->hcb_cnt, 1);
629 1.1 skrll if (error) {
630 1.1 skrll aprint_normal_dev(sc->dev,
631 1.1 skrll "Error allocation sg list DMA memory, error %d\n", error);
632 1.1 skrll goto fail;
633 1.1 skrll }
634 1.1 skrll
635 1.1 skrll error = pvscsi_dma_alloc(sc, &sc->sense_buffer_dma,
636 1.1 skrll PVSCSI_SENSE_LENGTH * sc->hcb_cnt, 1);
637 1.1 skrll if (error) {
638 1.1 skrll aprint_normal_dev(sc->dev,
639 1.1 skrll "Error allocation buffer DMA memory, error %d\n", error);
640 1.1 skrll goto fail;
641 1.1 skrll }
642 1.1 skrll
643 1.1 skrll for (i = 0; i < sc->hcb_cnt; ++i) {
644 1.1 skrll hcb = sc->hcbs + i;
645 1.1 skrll
646 1.1 skrll error = bus_dmamap_create(sc->sc_dmat, PVSCSI_MAXPHYS,
647 1.1 skrll PVSCSI_MAXPHYS_SEGS, PVSCSI_MAXPHYS, 0,
648 1.1 skrll BUS_DMA_WAITOK, &hcb->dma_map);
649 1.1 skrll if (error) {
650 1.1 skrll aprint_normal_dev(sc->dev,
651 1.1 skrll "Error creating dma map for hcb %d, error %d\n",
652 1.1 skrll i, error);
653 1.1 skrll goto fail;
654 1.1 skrll }
655 1.1 skrll
656 1.1 skrll hcb->sc = sc;
657 1.1 skrll hcb->dma_map_offset = PVSCSI_SENSE_LENGTH * i;
658 1.1 skrll hcb->dma_map_size = PVSCSI_SENSE_LENGTH;
659 1.1 skrll hcb->sense_buffer =
660 1.1 skrll (void *)((char *)sc->sense_buffer_dma.vaddr +
661 1.1 skrll PVSCSI_SENSE_LENGTH * i);
662 1.1 skrll hcb->sense_buffer_paddr = sc->sense_buffer_dma.paddr +
663 1.1 skrll PVSCSI_SENSE_LENGTH * i;
664 1.1 skrll
665 1.1 skrll hcb->sg_list =
666 1.1 skrll (struct pvscsi_sg_list *)((char *)sc->sg_list_dma.vaddr +
667 1.1 skrll sizeof(struct pvscsi_sg_list) * i);
668 1.1 skrll hcb->sg_list_paddr =
669 1.1 skrll sc->sg_list_dma.paddr + sizeof(struct pvscsi_sg_list) * i;
670 1.1 skrll hcb->sg_list_offset = sizeof(struct pvscsi_sg_list) * i;
671 1.1 skrll }
672 1.1 skrll
673 1.1 skrll SLIST_INIT(&sc->free_list);
674 1.1 skrll for (i = (sc->hcb_cnt - 1); i >= 0; --i) {
675 1.1 skrll hcb = sc->hcbs + i;
676 1.1 skrll SLIST_INSERT_HEAD(&sc->free_list, hcb, links);
677 1.1 skrll }
678 1.1 skrll
679 1.1 skrll fail:
680 1.1 skrll if (error) {
681 1.1 skrll pvscsi_dma_free_per_hcb(sc, i);
682 1.1 skrll }
683 1.1 skrll
684 1.1 skrll return (error);
685 1.1 skrll }
686 1.1 skrll
687 1.1 skrll static void
688 1.1 skrll pvscsi_free_rings(struct pvscsi_softc *sc)
689 1.1 skrll {
690 1.1 skrll
691 1.1 skrll pvscsi_dma_free(sc, &sc->rings_state_dma);
692 1.1 skrll pvscsi_dma_free(sc, &sc->req_ring_dma);
693 1.1 skrll pvscsi_dma_free(sc, &sc->cmp_ring_dma);
694 1.1 skrll if (sc->use_msg) {
695 1.1 skrll pvscsi_dma_free(sc, &sc->msg_ring_dma);
696 1.1 skrll }
697 1.1 skrll }
698 1.1 skrll
699 1.1 skrll static int
700 1.1 skrll pvscsi_allocate_rings(struct pvscsi_softc *sc)
701 1.1 skrll {
702 1.1 skrll int error;
703 1.1 skrll
704 1.1 skrll error = pvscsi_dma_alloc_ppns(sc, &sc->rings_state_dma,
705 1.1 skrll &sc->rings_state_ppn, 1);
706 1.1 skrll if (error) {
707 1.1 skrll aprint_normal_dev(sc->dev,
708 1.1 skrll "Error allocating rings state, error = %d\n", error);
709 1.1 skrll goto fail;
710 1.1 skrll }
711 1.1 skrll sc->rings_state = sc->rings_state_dma.vaddr;
712 1.1 skrll
713 1.1 skrll error = pvscsi_dma_alloc_ppns(sc, &sc->req_ring_dma, sc->req_ring_ppn,
714 1.1 skrll sc->req_ring_num_pages);
715 1.1 skrll if (error) {
716 1.1 skrll aprint_normal_dev(sc->dev,
717 1.1 skrll "Error allocating req ring pages, error = %d\n", error);
718 1.1 skrll goto fail;
719 1.1 skrll }
720 1.1 skrll sc->req_ring = sc->req_ring_dma.vaddr;
721 1.1 skrll
722 1.1 skrll error = pvscsi_dma_alloc_ppns(sc, &sc->cmp_ring_dma, sc->cmp_ring_ppn,
723 1.1 skrll sc->cmp_ring_num_pages);
724 1.1 skrll if (error) {
725 1.1 skrll aprint_normal_dev(sc->dev,
726 1.1 skrll "Error allocating cmp ring pages, error = %d\n", error);
727 1.1 skrll goto fail;
728 1.1 skrll }
729 1.1 skrll sc->cmp_ring = sc->cmp_ring_dma.vaddr;
730 1.1 skrll
731 1.1 skrll sc->msg_ring = NULL;
732 1.1 skrll if (sc->use_msg) {
733 1.1 skrll error = pvscsi_dma_alloc_ppns(sc, &sc->msg_ring_dma,
734 1.1 skrll sc->msg_ring_ppn, sc->msg_ring_num_pages);
735 1.1 skrll if (error) {
736 1.1 skrll aprint_normal_dev(sc->dev,
737 1.1 skrll "Error allocating cmp ring pages, error = %d\n",
738 1.1 skrll error);
739 1.1 skrll goto fail;
740 1.1 skrll }
741 1.1 skrll sc->msg_ring = sc->msg_ring_dma.vaddr;
742 1.1 skrll }
743 1.1 skrll
744 1.1 skrll fail:
745 1.1 skrll if (error) {
746 1.1 skrll pvscsi_free_rings(sc);
747 1.1 skrll }
748 1.1 skrll return (error);
749 1.1 skrll }
750 1.1 skrll
751 1.1 skrll static void
752 1.1 skrll pvscsi_setup_rings(struct pvscsi_softc *sc)
753 1.1 skrll {
754 1.1 skrll struct pvscsi_cmd_desc_setup_rings cmd;
755 1.1 skrll uint32_t i;
756 1.1 skrll
757 1.1 skrll memset(&cmd, 0, sizeof(cmd));
758 1.1 skrll
759 1.1 skrll cmd.rings_state_ppn = sc->rings_state_ppn;
760 1.1 skrll
761 1.1 skrll cmd.req_ring_num_pages = sc->req_ring_num_pages;
762 1.1 skrll for (i = 0; i < sc->req_ring_num_pages; ++i) {
763 1.1 skrll cmd.req_ring_ppns[i] = sc->req_ring_ppn[i];
764 1.1 skrll }
765 1.1 skrll
766 1.1 skrll cmd.cmp_ring_num_pages = sc->cmp_ring_num_pages;
767 1.1 skrll for (i = 0; i < sc->cmp_ring_num_pages; ++i) {
768 1.1 skrll cmd.cmp_ring_ppns[i] = sc->cmp_ring_ppn[i];
769 1.1 skrll }
770 1.1 skrll
771 1.1 skrll pvscsi_write_cmd(sc, PVSCSI_CMD_SETUP_RINGS, &cmd, sizeof(cmd));
772 1.1 skrll }
773 1.1 skrll
774 1.1 skrll static int
775 1.1 skrll pvscsi_hw_supports_msg(struct pvscsi_softc *sc)
776 1.1 skrll {
777 1.1 skrll uint32_t status;
778 1.1 skrll
779 1.1 skrll pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_COMMAND,
780 1.1 skrll PVSCSI_CMD_SETUP_MSG_RING);
781 1.1 skrll status = pvscsi_reg_read(sc, PVSCSI_REG_OFFSET_COMMAND_STATUS);
782 1.1 skrll
783 1.1 skrll return (status != -1);
784 1.1 skrll }
785 1.1 skrll
786 1.1 skrll static void
787 1.1 skrll pvscsi_setup_msg_ring(struct pvscsi_softc *sc)
788 1.1 skrll {
789 1.1 skrll struct pvscsi_cmd_desc_setup_msg_ring cmd;
790 1.1 skrll uint32_t i;
791 1.1 skrll
792 1.1 skrll KASSERTMSG(sc->use_msg, "msg is not being used");
793 1.1 skrll
794 1.1 skrll memset(&cmd, 0, sizeof(cmd));
795 1.1 skrll
796 1.1 skrll cmd.num_pages = sc->msg_ring_num_pages;
797 1.1 skrll for (i = 0; i < sc->msg_ring_num_pages; ++i) {
798 1.1 skrll cmd.ring_ppns[i] = sc->msg_ring_ppn[i];
799 1.1 skrll }
800 1.1 skrll
801 1.1 skrll pvscsi_write_cmd(sc, PVSCSI_CMD_SETUP_MSG_RING, &cmd, sizeof(cmd));
802 1.1 skrll }
803 1.1 skrll
804 1.1 skrll static void
805 1.1 skrll pvscsi_adapter_reset(struct pvscsi_softc *sc)
806 1.1 skrll {
807 1.1 skrll aprint_normal_dev(sc->dev, "Adapter Reset\n");
808 1.1 skrll
809 1.1 skrll pvscsi_write_cmd(sc, PVSCSI_CMD_ADAPTER_RESET, NULL, 0);
810 1.1 skrll #ifdef PVSCSI_DEBUG_LOGGING
811 1.1 skrll uint32_t val =
812 1.1 skrll #endif
813 1.1 skrll pvscsi_read_intr_status(sc);
814 1.1 skrll
815 1.1 skrll DEBUG_PRINTF(2, sc->dev, "adapter reset done: %u\n", val);
816 1.1 skrll }
817 1.1 skrll
818 1.1 skrll static void
819 1.1 skrll pvscsi_bus_reset(struct pvscsi_softc *sc)
820 1.1 skrll {
821 1.1 skrll
822 1.1 skrll aprint_normal_dev(sc->dev, "Bus Reset\n");
823 1.1 skrll
824 1.1 skrll pvscsi_write_cmd(sc, PVSCSI_CMD_RESET_BUS, NULL, 0);
825 1.1 skrll pvscsi_process_cmp_ring(sc);
826 1.1 skrll
827 1.1 skrll DEBUG_PRINTF(2, sc->dev, "bus reset done\n");
828 1.1 skrll }
829 1.1 skrll
830 1.1 skrll static void
831 1.1 skrll pvscsi_device_reset(struct pvscsi_softc *sc, uint32_t target)
832 1.1 skrll {
833 1.1 skrll struct pvscsi_cmd_desc_reset_device cmd;
834 1.1 skrll
835 1.1 skrll memset(&cmd, 0, sizeof(cmd));
836 1.1 skrll
837 1.1 skrll cmd.target = target;
838 1.1 skrll
839 1.1 skrll aprint_normal_dev(sc->dev, "Device reset for target %u\n", target);
840 1.1 skrll
841 1.1 skrll pvscsi_write_cmd(sc, PVSCSI_CMD_RESET_DEVICE, &cmd, sizeof cmd);
842 1.1 skrll pvscsi_process_cmp_ring(sc);
843 1.1 skrll
844 1.1 skrll DEBUG_PRINTF(2, sc->dev, "device reset done\n");
845 1.1 skrll }
846 1.1 skrll
847 1.1 skrll static void
848 1.1 skrll pvscsi_abort(struct pvscsi_softc *sc, uint32_t target, struct pvscsi_hcb *hcb)
849 1.1 skrll {
850 1.1 skrll struct pvscsi_cmd_desc_abort_cmd cmd;
851 1.1 skrll uint64_t context;
852 1.1 skrll
853 1.1 skrll pvscsi_process_cmp_ring(sc);
854 1.1 skrll
855 1.1 skrll if (hcb != NULL) {
856 1.1 skrll context = pvscsi_hcb_to_context(sc, hcb);
857 1.1 skrll
858 1.1 skrll memset(&cmd, 0, sizeof cmd);
859 1.1 skrll cmd.target = target;
860 1.1 skrll cmd.context = context;
861 1.1 skrll
862 1.1 skrll aprint_normal_dev(sc->dev, "Abort for target %u context %llx\n",
863 1.1 skrll target, (unsigned long long)context);
864 1.1 skrll
865 1.1 skrll pvscsi_write_cmd(sc, PVSCSI_CMD_ABORT_CMD, &cmd, sizeof(cmd));
866 1.1 skrll pvscsi_process_cmp_ring(sc);
867 1.1 skrll
868 1.1 skrll DEBUG_PRINTF(2, sc->dev, "abort done\n");
869 1.1 skrll } else {
870 1.1 skrll DEBUG_PRINTF(1, sc->dev,
871 1.1 skrll "Target %u hcb %p not found for abort\n", target, hcb);
872 1.1 skrll }
873 1.1 skrll }
874 1.1 skrll
875 1.1 skrll static int
876 1.1 skrll pvscsi_probe(device_t dev, cfdata_t cf, void *aux)
877 1.1 skrll {
878 1.1 skrll const struct pci_attach_args *pa = aux;
879 1.1 skrll
880 1.1 skrll if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_VMWARE &&
881 1.1 skrll PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VMWARE_PVSCSI) {
882 1.1 skrll return 1;
883 1.1 skrll }
884 1.1 skrll return 0;
885 1.1 skrll }
886 1.1 skrll
887 1.1 skrll static void
888 1.1 skrll pvscsi_timeout(void *arg)
889 1.1 skrll {
890 1.1 skrll struct pvscsi_hcb *hcb = arg;
891 1.1 skrll struct scsipi_xfer *xs = hcb->xs;
892 1.1 skrll
893 1.1 skrll if (xs == NULL) {
894 1.1 skrll /* Already completed */
895 1.1 skrll return;
896 1.1 skrll }
897 1.1 skrll
898 1.1 skrll struct pvscsi_softc *sc = hcb->sc;
899 1.1 skrll
900 1.1 skrll mutex_enter(&sc->lock);
901 1.1 skrll
902 1.1 skrll scsipi_printaddr(xs->xs_periph);
903 1.1 skrll printf("command timeout, CDB: ");
904 1.1 skrll scsipi_print_cdb(xs->cmd);
905 1.1 skrll printf("\n");
906 1.1 skrll
907 1.1 skrll switch (hcb->recovery) {
908 1.1 skrll case PVSCSI_HCB_NONE:
909 1.1 skrll hcb->recovery = PVSCSI_HCB_ABORT;
910 1.1 skrll pvscsi_abort(sc, hcb->e->target, hcb);
911 1.1 skrll callout_reset(&xs->xs_callout,
912 1.1 skrll mstohz(PVSCSI_ABORT_TIMEOUT * 1000),
913 1.1 skrll pvscsi_timeout, hcb);
914 1.1 skrll break;
915 1.1 skrll case PVSCSI_HCB_ABORT:
916 1.1 skrll hcb->recovery = PVSCSI_HCB_DEVICE_RESET;
917 1.1 skrll pvscsi_device_reset(sc, hcb->e->target);
918 1.1 skrll callout_reset(&xs->xs_callout,
919 1.1 skrll mstohz(PVSCSI_RESET_TIMEOUT * 1000),
920 1.1 skrll pvscsi_timeout, hcb);
921 1.1 skrll break;
922 1.1 skrll case PVSCSI_HCB_DEVICE_RESET:
923 1.1 skrll hcb->recovery = PVSCSI_HCB_BUS_RESET;
924 1.1 skrll pvscsi_bus_reset(sc);
925 1.1 skrll callout_reset(&xs->xs_callout,
926 1.1 skrll mstohz(PVSCSI_RESET_TIMEOUT * 1000),
927 1.1 skrll pvscsi_timeout, hcb);
928 1.1 skrll break;
929 1.1 skrll case PVSCSI_HCB_BUS_RESET:
930 1.1 skrll pvscsi_adapter_reset(sc);
931 1.1 skrll break;
932 1.1 skrll };
933 1.1 skrll mutex_exit(&sc->lock);
934 1.1 skrll }
935 1.1 skrll
936 1.1 skrll static void
937 1.1 skrll pvscsi_process_completion(struct pvscsi_softc *sc,
938 1.1 skrll struct pvscsi_ring_cmp_desc *e)
939 1.1 skrll {
940 1.1 skrll struct pvscsi_hcb *hcb;
941 1.1 skrll struct scsipi_xfer *xs;
942 1.1 skrll uint32_t error = XS_NOERROR;
943 1.1 skrll uint32_t btstat;
944 1.1 skrll uint32_t sdstat;
945 1.1 skrll int op;
946 1.1 skrll
947 1.1 skrll hcb = pvscsi_context_to_hcb(sc, e->context);
948 1.1 skrll xs = hcb->xs;
949 1.1 skrll
950 1.1 skrll callout_stop(&xs->xs_callout);
951 1.1 skrll
952 1.1 skrll btstat = e->host_status;
953 1.1 skrll sdstat = e->scsi_status;
954 1.1 skrll
955 1.1 skrll xs->status = sdstat;
956 1.1 skrll xs->resid = xs->datalen - e->data_len;
957 1.1 skrll
958 1.1 skrll DEBUG_PRINTF(3, sc->dev,
959 1.1 skrll "command context %llx btstat %d (%#x) sdstat %d (%#x)\n",
960 1.1 skrll (unsigned long long)e->context, btstat, btstat, sdstat, sdstat);
961 1.1 skrll
962 1.1 skrll if ((xs->xs_control & XS_CTL_DATA_IN) == XS_CTL_DATA_IN) {
963 1.1 skrll op = BUS_DMASYNC_POSTREAD;
964 1.1 skrll } else {
965 1.1 skrll op = BUS_DMASYNC_POSTWRITE;
966 1.1 skrll }
967 1.1 skrll bus_dmamap_sync(sc->sc_dmat, sc->sense_buffer_dma.map,
968 1.1 skrll hcb->dma_map_offset, hcb->dma_map_size, op);
969 1.1 skrll
970 1.1 skrll if (btstat == BTSTAT_SUCCESS && sdstat == SCSI_OK) {
971 1.1 skrll DEBUG_PRINTF(3, sc->dev,
972 1.1 skrll "completing command context %llx success\n",
973 1.1 skrll (unsigned long long)e->context);
974 1.1 skrll xs->resid = 0;
975 1.1 skrll } else {
976 1.1 skrll switch (btstat) {
977 1.1 skrll case BTSTAT_SUCCESS:
978 1.1 skrll case BTSTAT_LINKED_COMMAND_COMPLETED:
979 1.1 skrll case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG:
980 1.1 skrll switch (sdstat) {
981 1.1 skrll case SCSI_OK:
982 1.1 skrll xs->resid = 0;
983 1.1 skrll error = XS_NOERROR;
984 1.1 skrll break;
985 1.1 skrll case SCSI_CHECK:
986 1.1 skrll error = XS_SENSE;
987 1.1 skrll xs->resid = 0;
988 1.1 skrll
989 1.1 skrll memset(&xs->sense, 0, sizeof(xs->sense));
990 1.1 skrll memcpy(&xs->sense, hcb->sense_buffer,
991 1.1 skrll MIN(sizeof(xs->sense), e->sense_len));
992 1.1 skrll break;
993 1.1 skrll case SCSI_BUSY:
994 1.1 skrll case SCSI_QUEUE_FULL:
995 1.1 skrll error = XS_NOERROR;
996 1.1 skrll break;
997 1.1 skrll case SCSI_TERMINATED:
998 1.1 skrll // case SCSI_STATUS_TASK_ABORTED:
999 1.1 skrll DEBUG_PRINTF(1, sc->dev,
1000 1.1 skrll "xs: %p sdstat=0x%x\n", xs, sdstat);
1001 1.1 skrll error = XS_DRIVER_STUFFUP;
1002 1.1 skrll break;
1003 1.1 skrll default:
1004 1.1 skrll DEBUG_PRINTF(1, sc->dev,
1005 1.1 skrll "xs: %p sdstat=0x%x\n", xs, sdstat);
1006 1.1 skrll error = XS_DRIVER_STUFFUP;
1007 1.1 skrll break;
1008 1.1 skrll }
1009 1.1 skrll break;
1010 1.1 skrll case BTSTAT_SELTIMEO:
1011 1.1 skrll error = XS_SELTIMEOUT;
1012 1.1 skrll break;
1013 1.1 skrll case BTSTAT_DATARUN:
1014 1.1 skrll case BTSTAT_DATA_UNDERRUN:
1015 1.1 skrll // xs->resid = xs->datalen - c->data_len;
1016 1.1 skrll error = XS_NOERROR;
1017 1.1 skrll break;
1018 1.1 skrll case BTSTAT_ABORTQUEUE:
1019 1.1 skrll case BTSTAT_HATIMEOUT:
1020 1.1 skrll error = XS_NOERROR;
1021 1.1 skrll break;
1022 1.1 skrll case BTSTAT_NORESPONSE:
1023 1.1 skrll case BTSTAT_SENTRST:
1024 1.1 skrll case BTSTAT_RECVRST:
1025 1.1 skrll case BTSTAT_BUSRESET:
1026 1.1 skrll error = XS_RESET;
1027 1.1 skrll break;
1028 1.1 skrll case BTSTAT_SCSIPARITY:
1029 1.1 skrll error = XS_DRIVER_STUFFUP;
1030 1.1 skrll DEBUG_PRINTF(1, sc->dev,
1031 1.1 skrll "xs: %p sdstat=0x%x\n", xs, sdstat);
1032 1.1 skrll break;
1033 1.1 skrll case BTSTAT_BUSFREE:
1034 1.1 skrll error = XS_DRIVER_STUFFUP;
1035 1.1 skrll DEBUG_PRINTF(1, sc->dev,
1036 1.1 skrll "xs: %p sdstat=0x%x\n", xs, sdstat);
1037 1.1 skrll break;
1038 1.1 skrll case BTSTAT_INVPHASE:
1039 1.1 skrll error = XS_DRIVER_STUFFUP;
1040 1.1 skrll DEBUG_PRINTF(1, sc->dev,
1041 1.1 skrll "xs: %p sdstat=0x%x\n", xs, sdstat);
1042 1.1 skrll break;
1043 1.1 skrll case BTSTAT_SENSFAILED:
1044 1.1 skrll error = XS_DRIVER_STUFFUP;
1045 1.1 skrll DEBUG_PRINTF(1, sc->dev,
1046 1.1 skrll "xs: %p sdstat=0x%x\n", xs, sdstat);
1047 1.1 skrll break;
1048 1.1 skrll case BTSTAT_LUNMISMATCH:
1049 1.1 skrll case BTSTAT_TAGREJECT:
1050 1.1 skrll case BTSTAT_DISCONNECT:
1051 1.1 skrll case BTSTAT_BADMSG:
1052 1.1 skrll case BTSTAT_INVPARAM:
1053 1.1 skrll error = XS_DRIVER_STUFFUP;
1054 1.1 skrll DEBUG_PRINTF(1, sc->dev,
1055 1.1 skrll "xs: %p sdstat=0x%x\n", xs, sdstat);
1056 1.1 skrll break;
1057 1.1 skrll case BTSTAT_HASOFTWARE:
1058 1.1 skrll case BTSTAT_HAHARDWARE:
1059 1.1 skrll error = XS_DRIVER_STUFFUP;
1060 1.1 skrll DEBUG_PRINTF(1, sc->dev,
1061 1.1 skrll "xs: %p sdstat=0x%x\n", xs, sdstat);
1062 1.1 skrll break;
1063 1.1 skrll default:
1064 1.1 skrll aprint_normal_dev(sc->dev, "unknown hba status: 0x%x\n",
1065 1.1 skrll btstat);
1066 1.1 skrll error = XS_DRIVER_STUFFUP;
1067 1.1 skrll break;
1068 1.1 skrll }
1069 1.1 skrll
1070 1.1 skrll DEBUG_PRINTF(3, sc->dev,
1071 1.1 skrll "completing command context %llx btstat %x sdstat %x - error %x\n",
1072 1.1 skrll (unsigned long long)e->context, btstat, sdstat, error);
1073 1.1 skrll }
1074 1.1 skrll
1075 1.1 skrll xs->error = error;
1076 1.1 skrll pvscsi_hcb_put(sc, hcb);
1077 1.1 skrll
1078 1.1 skrll mutex_exit(&sc->lock);
1079 1.1 skrll
1080 1.1 skrll scsipi_done(xs);
1081 1.1 skrll
1082 1.1 skrll mutex_enter(&sc->lock);
1083 1.1 skrll }
1084 1.1 skrll
1085 1.1 skrll static void
1086 1.1 skrll pvscsi_process_cmp_ring(struct pvscsi_softc *sc)
1087 1.1 skrll {
1088 1.1 skrll struct pvscsi_ring_cmp_desc *ring;
1089 1.1 skrll struct pvscsi_rings_state *s;
1090 1.1 skrll struct pvscsi_ring_cmp_desc *e;
1091 1.1 skrll uint32_t mask;
1092 1.1 skrll
1093 1.1 skrll KASSERT(mutex_owned(&sc->lock));
1094 1.1 skrll
1095 1.1 skrll s = sc->rings_state;
1096 1.1 skrll ring = sc->cmp_ring;
1097 1.1 skrll mask = MASK(s->cmp_num_entries_log2);
1098 1.1 skrll
1099 1.1 skrll while (true) {
1100 1.1 skrll size_t crpidx = s->cmp_prod_idx;
1101 1.1 skrll membar_acquire();
1102 1.1 skrll
1103 1.1 skrll if (s->cmp_cons_idx == crpidx)
1104 1.1 skrll break;
1105 1.1 skrll
1106 1.1 skrll size_t crcidx = s->cmp_cons_idx & mask;
1107 1.1 skrll
1108 1.1 skrll e = ring + crcidx;
1109 1.1 skrll
1110 1.1 skrll pvscsi_process_completion(sc, e);
1111 1.1 skrll
1112 1.1 skrll /*
1113 1.1 skrll * ensure completion processing reads happen before write to
1114 1.1 skrll * (increment of) cmp_cons_idx
1115 1.1 skrll */
1116 1.1 skrll membar_release();
1117 1.1 skrll s->cmp_cons_idx++;
1118 1.1 skrll }
1119 1.1 skrll }
1120 1.1 skrll
1121 1.1 skrll static void
1122 1.1 skrll pvscsi_process_msg(struct pvscsi_softc *sc, struct pvscsi_ring_msg_desc *e)
1123 1.1 skrll {
1124 1.1 skrll struct pvscsi_ring_msg_dev_status_changed *desc;
1125 1.1 skrll
1126 1.1 skrll switch (e->type) {
1127 1.1 skrll case PVSCSI_MSG_DEV_ADDED:
1128 1.1 skrll case PVSCSI_MSG_DEV_REMOVED: {
1129 1.1 skrll desc = (struct pvscsi_ring_msg_dev_status_changed *)e;
1130 1.1 skrll struct scsibus_softc *ssc = device_private(sc->sc_scsibus_dv);
1131 1.1 skrll
1132 1.1 skrll aprint_normal_dev(sc->dev, "MSG: device %s at scsi%u:%u:%u\n",
1133 1.1 skrll desc->type == PVSCSI_MSG_DEV_ADDED ? "addition" : "removal",
1134 1.1 skrll desc->bus, desc->target, desc->lun[1]);
1135 1.1 skrll
1136 1.1 skrll if (desc->type == PVSCSI_MSG_DEV_ADDED) {
1137 1.1 skrll if (scsi_probe_bus(ssc,
1138 1.1 skrll desc->target, desc->lun[1]) != 0) {
1139 1.1 skrll aprint_normal_dev(sc->dev,
1140 1.1 skrll "Error creating path for dev change.\n");
1141 1.1 skrll break;
1142 1.1 skrll }
1143 1.1 skrll } else {
1144 1.1 skrll if (scsipi_target_detach(ssc->sc_channel,
1145 1.1 skrll desc->target, desc->lun[1],
1146 1.1 skrll DETACH_FORCE) != 0) {
1147 1.1 skrll aprint_normal_dev(sc->dev,
1148 1.1 skrll "Error detaching target %d lun %d\n",
1149 1.1 skrll desc->target, desc->lun[1]);
1150 1.1 skrll };
1151 1.1 skrll
1152 1.1 skrll }
1153 1.1 skrll } break;
1154 1.1 skrll default:
1155 1.1 skrll aprint_normal_dev(sc->dev, "Unknown msg type 0x%x\n", e->type);
1156 1.1 skrll };
1157 1.1 skrll }
1158 1.1 skrll
1159 1.1 skrll static void
1160 1.1 skrll pvscsi_process_msg_ring(struct pvscsi_softc *sc)
1161 1.1 skrll {
1162 1.1 skrll struct pvscsi_ring_msg_desc *ring;
1163 1.1 skrll struct pvscsi_rings_state *s;
1164 1.1 skrll struct pvscsi_ring_msg_desc *e;
1165 1.1 skrll uint32_t mask;
1166 1.1 skrll
1167 1.1 skrll KASSERT(mutex_owned(&sc->lock));
1168 1.1 skrll
1169 1.1 skrll s = sc->rings_state;
1170 1.1 skrll ring = sc->msg_ring;
1171 1.1 skrll mask = MASK(s->msg_num_entries_log2);
1172 1.1 skrll
1173 1.1 skrll while (true) {
1174 1.1 skrll size_t mpidx = s->msg_prod_idx; // dma read (device -> cpu)
1175 1.1 skrll membar_acquire();
1176 1.1 skrll
1177 1.1 skrll if (s->msg_cons_idx == mpidx)
1178 1.1 skrll break;
1179 1.1 skrll
1180 1.1 skrll size_t mcidx = s->msg_cons_idx & mask;
1181 1.1 skrll
1182 1.1 skrll e = ring + mcidx;
1183 1.1 skrll
1184 1.1 skrll pvscsi_process_msg(sc, e);
1185 1.1 skrll
1186 1.1 skrll /*
1187 1.1 skrll * ensure message processing reads happen before write to
1188 1.1 skrll * (increment of) msg_cons_idx
1189 1.1 skrll */
1190 1.1 skrll membar_release();
1191 1.1 skrll s->msg_cons_idx++;
1192 1.1 skrll }
1193 1.1 skrll }
1194 1.1 skrll
1195 1.1 skrll static void
1196 1.1 skrll pvscsi_intr_locked(struct pvscsi_softc *sc)
1197 1.1 skrll {
1198 1.1 skrll uint32_t val;
1199 1.1 skrll
1200 1.1 skrll KASSERT(mutex_owned(&sc->lock));
1201 1.1 skrll
1202 1.1 skrll val = pvscsi_read_intr_status(sc);
1203 1.1 skrll
1204 1.1 skrll if ((val & PVSCSI_INTR_ALL_SUPPORTED) != 0) {
1205 1.1 skrll pvscsi_write_intr_status(sc, val & PVSCSI_INTR_ALL_SUPPORTED);
1206 1.1 skrll pvscsi_process_cmp_ring(sc);
1207 1.1 skrll if (sc->use_msg) {
1208 1.1 skrll pvscsi_process_msg_ring(sc);
1209 1.1 skrll }
1210 1.1 skrll }
1211 1.1 skrll }
1212 1.1 skrll
1213 1.1 skrll static int
1214 1.1 skrll pvscsi_intr(void *xsc)
1215 1.1 skrll {
1216 1.1 skrll struct pvscsi_softc *sc;
1217 1.1 skrll
1218 1.1 skrll sc = xsc;
1219 1.1 skrll
1220 1.1 skrll mutex_enter(&sc->lock);
1221 1.1 skrll pvscsi_intr_locked(xsc);
1222 1.1 skrll mutex_exit(&sc->lock);
1223 1.1 skrll
1224 1.1 skrll return 1;
1225 1.1 skrll }
1226 1.1 skrll
1227 1.1 skrll static void
1228 1.1 skrll pvscsi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t
1229 1.1 skrll request, void *arg)
1230 1.1 skrll {
1231 1.1 skrll struct pvscsi_softc *sc = device_private(chan->chan_adapter->adapt_dev);
1232 1.1 skrll
1233 1.1 skrll if (request == ADAPTER_REQ_SET_XFER_MODE) {
1234 1.1 skrll struct scsipi_xfer_mode *xm = arg;
1235 1.1 skrll
1236 1.1 skrll xm->xm_mode = PERIPH_CAP_TQING;
1237 1.1 skrll xm->xm_period = 0;
1238 1.1 skrll xm->xm_offset = 0;
1239 1.1 skrll scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
1240 1.1 skrll return;
1241 1.1 skrll } else if (request != ADAPTER_REQ_RUN_XFER) {
1242 1.1 skrll DEBUG_PRINTF(1, sc->dev, "unhandled %d\n", request);
1243 1.1 skrll return;
1244 1.1 skrll }
1245 1.1 skrll
1246 1.1 skrll /* request is ADAPTER_REQ_RUN_XFER */
1247 1.1 skrll struct scsipi_xfer *xs = arg;
1248 1.1 skrll struct scsipi_periph *periph = xs->xs_periph;
1249 1.1 skrll #ifdef SCSIPI_DEBUG
1250 1.1 skrll periph->periph_dbflags |= SCSIPI_DEBUG_FLAGS;
1251 1.1 skrll #endif
1252 1.1 skrll
1253 1.1 skrll uint32_t req_num_entries_log2;
1254 1.1 skrll struct pvscsi_ring_req_desc *ring;
1255 1.1 skrll struct pvscsi_ring_req_desc *e;
1256 1.1 skrll struct pvscsi_rings_state *s;
1257 1.1 skrll struct pvscsi_hcb *hcb;
1258 1.1 skrll
1259 1.1 skrll if (xs->cmdlen < 0 || xs->cmdlen > sizeof(e->cdb)) {
1260 1.1 skrll DEBUG_PRINTF(1, sc->dev, "bad cmdlen %zu > %zu\n",
1261 1.1 skrll (size_t)xs->cmdlen, sizeof(e->cdb));
1262 1.1 skrll /* not a temporary condition */
1263 1.1 skrll xs->error = XS_DRIVER_STUFFUP;
1264 1.1 skrll scsipi_done(xs);
1265 1.1 skrll return;
1266 1.1 skrll }
1267 1.1 skrll
1268 1.1 skrll ring = sc->req_ring;
1269 1.1 skrll s = sc->rings_state;
1270 1.1 skrll
1271 1.1 skrll hcb = NULL;
1272 1.1 skrll req_num_entries_log2 = s->req_num_entries_log2;
1273 1.1 skrll
1274 1.1 skrll /* Protect against multiple senders */
1275 1.1 skrll mutex_enter(&sc->lock);
1276 1.1 skrll
1277 1.1 skrll if (s->req_prod_idx - s->cmp_cons_idx >=
1278 1.1 skrll (1 << req_num_entries_log2)) {
1279 1.1 skrll aprint_normal_dev(sc->dev,
1280 1.1 skrll "Not enough room on completion ring.\n");
1281 1.1 skrll xs->error = XS_RESOURCE_SHORTAGE;
1282 1.1 skrll goto finish_xs;
1283 1.1 skrll }
1284 1.1 skrll
1285 1.1 skrll if (xs->cmdlen > sizeof(e->cdb)) {
1286 1.1 skrll DEBUG_PRINTF(1, sc->dev, "cdb length %u too large\n",
1287 1.1 skrll xs->cmdlen);
1288 1.1 skrll xs->error = XS_DRIVER_STUFFUP;
1289 1.1 skrll goto finish_xs;
1290 1.1 skrll }
1291 1.1 skrll
1292 1.1 skrll hcb = pvscsi_hcb_get(sc);
1293 1.1 skrll if (hcb == NULL) {
1294 1.1 skrll aprint_normal_dev(sc->dev, "No free hcbs.\n");
1295 1.1 skrll xs->error = XS_RESOURCE_SHORTAGE;
1296 1.1 skrll goto finish_xs;
1297 1.1 skrll }
1298 1.1 skrll
1299 1.1 skrll hcb->xs = xs;
1300 1.1 skrll
1301 1.1 skrll const size_t rridx = s->req_prod_idx & MASK(req_num_entries_log2);
1302 1.1 skrll e = ring + rridx;
1303 1.1 skrll
1304 1.1 skrll memset(e, 0, sizeof(*e));
1305 1.1 skrll e->bus = 0;
1306 1.1 skrll e->target = periph->periph_target;
1307 1.1 skrll e->lun[1] = periph->periph_lun;
1308 1.1 skrll e->data_addr = 0;
1309 1.1 skrll e->data_len = xs->datalen;
1310 1.1 skrll e->vcpu_hint = cpu_index(curcpu());
1311 1.1 skrll e->flags = 0;
1312 1.1 skrll
1313 1.1 skrll e->cdb_len = xs->cmdlen;
1314 1.1 skrll memcpy(e->cdb, xs->cmd, xs->cmdlen);
1315 1.1 skrll
1316 1.1 skrll e->sense_addr = 0;
1317 1.1 skrll e->sense_len = sizeof(xs->sense);
1318 1.1 skrll if (e->sense_len > 0) {
1319 1.1 skrll e->sense_addr = hcb->sense_buffer_paddr;
1320 1.1 skrll }
1321 1.1 skrll //e->tag = xs->xs_tag_type;
1322 1.1 skrll e->tag = MSG_SIMPLE_Q_TAG;
1323 1.1 skrll
1324 1.1 skrll switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1325 1.1 skrll case XS_CTL_DATA_IN:
1326 1.1 skrll e->flags |= PVSCSI_FLAG_CMD_DIR_TOHOST;
1327 1.1 skrll break;
1328 1.1 skrll case XS_CTL_DATA_OUT:
1329 1.1 skrll e->flags |= PVSCSI_FLAG_CMD_DIR_TODEVICE;
1330 1.1 skrll break;
1331 1.1 skrll default:
1332 1.1 skrll e->flags |= PVSCSI_FLAG_CMD_DIR_NONE;
1333 1.1 skrll break;
1334 1.1 skrll }
1335 1.1 skrll
1336 1.1 skrll e->context = pvscsi_hcb_to_context(sc, hcb);
1337 1.1 skrll hcb->e = e;
1338 1.1 skrll
1339 1.1 skrll DEBUG_PRINTF(3, sc->dev,
1340 1.1 skrll " queuing command %02x context %llx\n", e->cdb[0],
1341 1.1 skrll (unsigned long long)e->context);
1342 1.1 skrll
1343 1.1 skrll int flags;
1344 1.1 skrll flags = (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE;
1345 1.1 skrll flags |= (xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK;
1346 1.1 skrll
1347 1.1 skrll int error = bus_dmamap_load(sc->sc_dmat, hcb->dma_map,
1348 1.1 skrll xs->data, xs->datalen, NULL, flags);
1349 1.1 skrll
1350 1.1 skrll if (error) {
1351 1.1 skrll if (error == ENOMEM || error == EAGAIN) {
1352 1.1 skrll xs->error = XS_RESOURCE_SHORTAGE;
1353 1.1 skrll } else {
1354 1.1 skrll xs->error = XS_DRIVER_STUFFUP;
1355 1.1 skrll }
1356 1.1 skrll DEBUG_PRINTF(1, sc->dev,
1357 1.1 skrll "xs: %p load error %d data %p len %d",
1358 1.1 skrll xs, error, xs->data, xs->datalen);
1359 1.1 skrll goto error_load;
1360 1.1 skrll }
1361 1.1 skrll
1362 1.1 skrll int op = (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
1363 1.1 skrll BUS_DMASYNC_PREWRITE;
1364 1.1 skrll int nseg = hcb->dma_map->dm_nsegs;
1365 1.1 skrll bus_dma_segment_t *segs = hcb->dma_map->dm_segs;
1366 1.1 skrll if (nseg != 0) {
1367 1.1 skrll if (nseg > 1) {
1368 1.1 skrll struct pvscsi_sg_element *sge;
1369 1.1 skrll
1370 1.1 skrll KASSERTMSG(nseg <= PVSCSI_MAX_SG_ENTRIES_PER_SEGMENT,
1371 1.1 skrll "too many sg segments");
1372 1.1 skrll
1373 1.1 skrll sge = hcb->sg_list->sge;
1374 1.1 skrll e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
1375 1.1 skrll
1376 1.1 skrll for (size_t i = 0; i < nseg; ++i) {
1377 1.1 skrll sge[i].addr = segs[i].ds_addr;
1378 1.1 skrll sge[i].length = segs[i].ds_len;
1379 1.1 skrll sge[i].flags = 0;
1380 1.1 skrll }
1381 1.1 skrll
1382 1.1 skrll e->data_addr = hcb->sg_list_paddr;
1383 1.1 skrll
1384 1.1 skrll bus_dmamap_sync(sc->sc_dmat,
1385 1.1 skrll sc->sg_list_dma.map, hcb->sg_list_offset,
1386 1.1 skrll sizeof(*sge) * nseg, BUS_DMASYNC_PREWRITE);
1387 1.1 skrll } else {
1388 1.1 skrll e->data_addr = segs->ds_addr;
1389 1.1 skrll }
1390 1.1 skrll
1391 1.1 skrll bus_dmamap_sync(sc->sc_dmat, hcb->dma_map, 0,
1392 1.1 skrll xs->datalen, op);
1393 1.1 skrll } else {
1394 1.1 skrll e->data_addr = 0;
1395 1.1 skrll }
1396 1.1 skrll
1397 1.1 skrll /*
1398 1.1 skrll * Ensure request record writes happen before write to (increment of)
1399 1.1 skrll * req_prod_idx.
1400 1.1 skrll */
1401 1.1 skrll membar_producer();
1402 1.1 skrll
1403 1.1 skrll uint8_t cdb0 = e->cdb[0];
1404 1.1 skrll
1405 1.1 skrll /* handle timeout */
1406 1.1 skrll if ((xs->xs_control & XS_CTL_POLL) == 0) {
1407 1.1 skrll int timeout = mstohz(xs->timeout);
1408 1.1 skrll /* start expire timer */
1409 1.1 skrll if (timeout == 0)
1410 1.1 skrll timeout = 1;
1411 1.1 skrll callout_reset(&xs->xs_callout, timeout, pvscsi_timeout, hcb);
1412 1.1 skrll }
1413 1.1 skrll
1414 1.1 skrll s->req_prod_idx++;
1415 1.1 skrll
1416 1.1 skrll /*
1417 1.1 skrll * Ensure req_prod_idx write (increment) happens before
1418 1.1 skrll * IO is kicked (via a write).
1419 1.2 skrll */
1420 1.2 skrll membar_producer();
1421 1.2 skrll
1422 1.1 skrll pvscsi_kick_io(sc, cdb0);
1423 1.1 skrll mutex_exit(&sc->lock);
1424 1.1 skrll
1425 1.1 skrll return;
1426 1.1 skrll
1427 1.1 skrll error_load:
1428 1.1 skrll pvscsi_hcb_put(sc, hcb);
1429 1.1 skrll
1430 1.1 skrll finish_xs:
1431 1.1 skrll mutex_exit(&sc->lock);
1432 1.1 skrll scsipi_done(xs);
1433 1.1 skrll }
1434 1.1 skrll
1435 1.1 skrll static void
1436 1.1 skrll pvscsi_free_interrupts(struct pvscsi_softc *sc)
1437 1.1 skrll {
1438 1.1 skrll
1439 1.1 skrll if (sc->sc_ih != NULL) {
1440 1.1 skrll pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
1441 1.1 skrll sc->sc_ih = NULL;
1442 1.1 skrll }
1443 1.1 skrll if (sc->sc_pihp != NULL) {
1444 1.1 skrll pci_intr_release(sc->sc_pc, sc->sc_pihp, 1);
1445 1.1 skrll sc->sc_pihp = NULL;
1446 1.1 skrll }
1447 1.1 skrll }
1448 1.1 skrll
1449 1.1 skrll static int
1450 1.1 skrll pvscsi_setup_interrupts(struct pvscsi_softc *sc, const struct pci_attach_args *pa)
1451 1.1 skrll {
1452 1.1 skrll int use_msix;
1453 1.1 skrll int use_msi;
1454 1.1 skrll int counts[PCI_INTR_TYPE_SIZE];
1455 1.1 skrll
1456 1.1 skrll for (size_t i = 0; i < PCI_INTR_TYPE_SIZE; i++) {
1457 1.1 skrll counts[i] = 1;
1458 1.1 skrll }
1459 1.1 skrll
1460 1.1 skrll use_msix = pvscsi_get_tunable(sc, "use_msix", pvscsi_use_msix);
1461 1.1 skrll use_msi = pvscsi_get_tunable(sc, "use_msi", pvscsi_use_msi);
1462 1.1 skrll
1463 1.1 skrll if (!use_msix) {
1464 1.1 skrll counts[PCI_INTR_TYPE_MSIX] = 0;
1465 1.1 skrll }
1466 1.1 skrll if (!use_msi) {
1467 1.1 skrll counts[PCI_INTR_TYPE_MSI] = 0;
1468 1.1 skrll }
1469 1.1 skrll
1470 1.1 skrll /* Allocate and establish the interrupt. */
1471 1.1 skrll if (pci_intr_alloc(pa, &sc->sc_pihp, counts, PCI_INTR_TYPE_MSIX)) {
1472 1.1 skrll aprint_error_dev(sc->dev, "can't allocate handler\n");
1473 1.1 skrll goto fail;
1474 1.1 skrll }
1475 1.1 skrll
1476 1.1 skrll char intrbuf[PCI_INTRSTR_LEN];
1477 1.1 skrll const pci_chipset_tag_t pc = pa->pa_pc;
1478 1.1 skrll char const *intrstr = pci_intr_string(pc, sc->sc_pihp[0], intrbuf,
1479 1.1 skrll sizeof(intrbuf));
1480 1.1 skrll
1481 1.1 skrll sc->sc_ih = pci_intr_establish_xname(pc, sc->sc_pihp[0], IPL_BIO,
1482 1.1 skrll pvscsi_intr, sc, device_xname(sc->dev));
1483 1.1 skrll if (sc->sc_ih == NULL) {
1484 1.1 skrll pci_intr_release(pc, sc->sc_pihp, 1);
1485 1.1 skrll sc->sc_pihp = NULL;
1486 1.1 skrll aprint_error_dev(sc->dev, "couldn't establish interrupt");
1487 1.1 skrll if (intrstr != NULL)
1488 1.1 skrll aprint_error(" at %s", intrstr);
1489 1.1 skrll aprint_error("\n");
1490 1.1 skrll goto fail;
1491 1.1 skrll }
1492 1.1 skrll pci_intr_setattr(pc, sc->sc_pihp, PCI_INTR_MPSAFE, true);
1493 1.1 skrll
1494 1.1 skrll aprint_normal_dev(sc->dev, "interrupting at %s\n", intrstr);
1495 1.1 skrll
1496 1.1 skrll return (0);
1497 1.1 skrll
1498 1.1 skrll fail:
1499 1.1 skrll if (sc->sc_ih != NULL) {
1500 1.1 skrll pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
1501 1.1 skrll sc->sc_ih = NULL;
1502 1.1 skrll }
1503 1.1 skrll if (sc->sc_pihp != NULL) {
1504 1.1 skrll pci_intr_release(sc->sc_pc, sc->sc_pihp, 1);
1505 1.1 skrll sc->sc_pihp = NULL;
1506 1.1 skrll }
1507 1.1 skrll if (sc->sc_mems) {
1508 1.1 skrll bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1509 1.1 skrll sc->sc_mems = 0;
1510 1.1 skrll }
1511 1.1 skrll
1512 1.1 skrll return 1;
1513 1.1 skrll }
1514 1.1 skrll
1515 1.1 skrll static void
1516 1.1 skrll pvscsi_free_all(struct pvscsi_softc *sc)
1517 1.1 skrll {
1518 1.1 skrll
1519 1.1 skrll pvscsi_dma_free_per_hcb(sc, sc->hcb_cnt);
1520 1.1 skrll
1521 1.1 skrll if (sc->hcbs) {
1522 1.1 skrll kmem_free(sc->hcbs, sc->hcb_cnt * sizeof(*sc->hcbs));
1523 1.1 skrll }
1524 1.1 skrll
1525 1.1 skrll pvscsi_free_rings(sc);
1526 1.1 skrll
1527 1.1 skrll pvscsi_free_interrupts(sc);
1528 1.1 skrll
1529 1.1 skrll if (sc->sc_mems) {
1530 1.1 skrll bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1531 1.1 skrll sc->sc_mems = 0;
1532 1.1 skrll }
1533 1.1 skrll }
1534 1.1 skrll
1535 1.1 skrll static inline void
1536 1.1 skrll pci_enable_busmaster(device_t dev, const pci_chipset_tag_t pc,
1537 1.1 skrll const pcitag_t tag)
1538 1.1 skrll {
1539 1.1 skrll pcireg_t pci_cmd_word;
1540 1.1 skrll
1541 1.1 skrll pci_cmd_word = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
1542 1.1 skrll if (!(pci_cmd_word & PCI_COMMAND_MASTER_ENABLE)) {
1543 1.1 skrll pci_cmd_word |= PCI_COMMAND_MASTER_ENABLE;
1544 1.1 skrll pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, pci_cmd_word);
1545 1.1 skrll }
1546 1.1 skrll }
1547 1.1 skrll
1548 1.1 skrll static void
1549 1.1 skrll pvscsi_attach(device_t parent, device_t dev, void *aux)
1550 1.1 skrll {
1551 1.1 skrll const struct pci_attach_args *pa = aux;
1552 1.1 skrll struct pvscsi_softc *sc;
1553 1.1 skrll int rid;
1554 1.1 skrll int error;
1555 1.1 skrll int max_queue_depth;
1556 1.1 skrll int adapter_queue_size;
1557 1.1 skrll
1558 1.1 skrll sc = device_private(dev);
1559 1.1 skrll sc->dev = dev;
1560 1.1 skrll
1561 1.1 skrll struct scsipi_adapter *adapt = &sc->sc_adapter;
1562 1.1 skrll struct scsipi_channel *chan = &sc->sc_channel;
1563 1.1 skrll
1564 1.1 skrll mutex_init(&sc->lock, MUTEX_DEFAULT, IPL_BIO);
1565 1.1 skrll
1566 1.1 skrll sc->sc_pc = pa->pa_pc;
1567 1.1 skrll pci_enable_busmaster(dev, pa->pa_pc, pa->pa_tag);
1568 1.1 skrll
1569 1.1 skrll pci_aprint_devinfo_fancy(pa, "virtual disk controller",
1570 1.1 skrll VMWARE_PVSCSI_DEVSTR, true);
1571 1.1 skrll
1572 1.1 skrll /*
1573 1.1 skrll * Map the device. All devices support memory-mapped acccess.
1574 1.1 skrll */
1575 1.1 skrll bool memh_valid;
1576 1.1 skrll bus_space_tag_t memt;
1577 1.1 skrll bus_space_handle_t memh;
1578 1.1 skrll bus_size_t mems;
1579 1.1 skrll pcireg_t regt;
1580 1.1 skrll
1581 1.1 skrll for (rid = PCI_MAPREG_START; rid < PCI_MAPREG_END; rid += sizeof(regt)) {
1582 1.1 skrll regt = pci_mapreg_type(pa->pa_pc, pa->pa_tag, rid);
1583 1.1 skrll if (PCI_MAPREG_TYPE(regt) == PCI_MAPREG_TYPE_MEM)
1584 1.1 skrll break;
1585 1.1 skrll }
1586 1.1 skrll
1587 1.1 skrll if (rid >= PCI_MAPREG_END) {
1588 1.1 skrll aprint_error_dev(dev,
1589 1.1 skrll "unable to locate device registers\n");
1590 1.1 skrll }
1591 1.1 skrll
1592 1.1 skrll memh_valid = (pci_mapreg_map(pa, rid, regt, 0, &memt, &memh,
1593 1.1 skrll NULL, &mems) == 0);
1594 1.1 skrll if (!memh_valid) {
1595 1.1 skrll aprint_error_dev(dev,
1596 1.1 skrll "unable to map device registers\n");
1597 1.1 skrll return;
1598 1.1 skrll }
1599 1.1 skrll sc->sc_memt = memt;
1600 1.1 skrll sc->sc_memh = memh;
1601 1.1 skrll sc->sc_mems = mems;
1602 1.1 skrll
1603 1.1 skrll if (pci_dma64_available(pa)) {
1604 1.1 skrll sc->sc_dmat = pa->pa_dmat64;
1605 1.1 skrll aprint_verbose_dev(sc->dev, "64-bit DMA\n");
1606 1.1 skrll } else {
1607 1.1 skrll aprint_verbose_dev(sc->dev, "32-bit DMA\n");
1608 1.1 skrll sc->sc_dmat = pa->pa_dmat;
1609 1.1 skrll }
1610 1.1 skrll
1611 1.1 skrll error = pvscsi_setup_interrupts(sc, pa);
1612 1.1 skrll if (error) {
1613 1.1 skrll aprint_normal_dev(dev, "Interrupt setup failed\n");
1614 1.1 skrll pvscsi_free_all(sc);
1615 1.1 skrll return;
1616 1.1 skrll }
1617 1.1 skrll
1618 1.1 skrll sc->max_targets = pvscsi_get_max_targets(sc);
1619 1.1 skrll
1620 1.1 skrll sc->use_msg = pvscsi_get_tunable(sc, "use_msg", pvscsi_use_msg) &&
1621 1.1 skrll pvscsi_hw_supports_msg(sc);
1622 1.1 skrll sc->msg_ring_num_pages = sc->use_msg ? 1 : 0;
1623 1.1 skrll
1624 1.1 skrll sc->req_ring_num_pages = pvscsi_get_tunable(sc, "request_ring_pages",
1625 1.1 skrll pvscsi_request_ring_pages);
1626 1.1 skrll if (sc->req_ring_num_pages <= 0) {
1627 1.1 skrll if (sc->max_targets <= 16) {
1628 1.1 skrll sc->req_ring_num_pages =
1629 1.1 skrll PVSCSI_DEFAULT_NUM_PAGES_REQ_RING;
1630 1.1 skrll } else {
1631 1.1 skrll sc->req_ring_num_pages = PVSCSI_MAX_NUM_PAGES_REQ_RING;
1632 1.1 skrll }
1633 1.1 skrll } else if (sc->req_ring_num_pages > PVSCSI_MAX_NUM_PAGES_REQ_RING) {
1634 1.1 skrll sc->req_ring_num_pages = PVSCSI_MAX_NUM_PAGES_REQ_RING;
1635 1.1 skrll }
1636 1.1 skrll sc->cmp_ring_num_pages = sc->req_ring_num_pages;
1637 1.1 skrll
1638 1.1 skrll max_queue_depth = pvscsi_get_tunable(sc, "max_queue_depth",
1639 1.1 skrll pvscsi_max_queue_depth);
1640 1.1 skrll
1641 1.1 skrll adapter_queue_size = (sc->req_ring_num_pages * PAGE_SIZE) /
1642 1.1 skrll sizeof(struct pvscsi_ring_req_desc);
1643 1.1 skrll if (max_queue_depth > 0) {
1644 1.1 skrll adapter_queue_size = MIN(adapter_queue_size, max_queue_depth);
1645 1.1 skrll }
1646 1.1 skrll adapter_queue_size = MIN(adapter_queue_size,
1647 1.1 skrll PVSCSI_MAX_REQ_QUEUE_DEPTH);
1648 1.1 skrll
1649 1.1 skrll aprint_normal_dev(sc->dev, "Use Msg: %d\n", sc->use_msg);
1650 1.1 skrll aprint_normal_dev(sc->dev, "Max targets: %d\n", sc->max_targets);
1651 1.1 skrll aprint_normal_dev(sc->dev, "REQ num pages: %d\n", sc->req_ring_num_pages);
1652 1.1 skrll aprint_normal_dev(sc->dev, "CMP num pages: %d\n", sc->cmp_ring_num_pages);
1653 1.1 skrll aprint_normal_dev(sc->dev, "MSG num pages: %d\n", sc->msg_ring_num_pages);
1654 1.1 skrll aprint_normal_dev(sc->dev, "Queue size: %d\n", adapter_queue_size);
1655 1.1 skrll
1656 1.1 skrll if (pvscsi_allocate_rings(sc)) {
1657 1.1 skrll aprint_normal_dev(dev, "ring allocation failed\n");
1658 1.1 skrll pvscsi_free_all(sc);
1659 1.1 skrll return;
1660 1.1 skrll }
1661 1.1 skrll
1662 1.1 skrll sc->hcb_cnt = adapter_queue_size;
1663 1.1 skrll sc->hcbs = kmem_zalloc(sc->hcb_cnt * sizeof(*sc->hcbs), KM_SLEEP);
1664 1.1 skrll
1665 1.1 skrll if (pvscsi_dma_alloc_per_hcb(sc)) {
1666 1.1 skrll aprint_normal_dev(dev, "error allocating per hcb dma memory\n");
1667 1.1 skrll pvscsi_free_all(sc);
1668 1.1 skrll return;
1669 1.1 skrll }
1670 1.1 skrll
1671 1.1 skrll pvscsi_adapter_reset(sc);
1672 1.1 skrll
1673 1.1 skrll /*
1674 1.1 skrll * Fill in the scsipi_adapter.
1675 1.1 skrll */
1676 1.1 skrll memset(adapt, 0, sizeof(*adapt));
1677 1.1 skrll adapt->adapt_dev = sc->dev;
1678 1.1 skrll adapt->adapt_nchannels = 1;
1679 1.1 skrll adapt->adapt_openings = MIN(adapter_queue_size, PVSCSI_CMD_PER_LUN);
1680 1.1 skrll adapt->adapt_max_periph = adapt->adapt_openings;
1681 1.1 skrll adapt->adapt_request = pvscsi_scsipi_request;
1682 1.1 skrll adapt->adapt_minphys = minphys;
1683 1.1 skrll
1684 1.1 skrll /*
1685 1.1 skrll * Fill in the scsipi_channel.
1686 1.1 skrll */
1687 1.1 skrll memset(chan, 0, sizeof(*chan));
1688 1.1 skrll chan->chan_adapter = adapt;
1689 1.1 skrll chan->chan_bustype = &scsi_bustype;
1690 1.1 skrll chan->chan_channel = 0;
1691 1.1 skrll chan->chan_ntargets = MIN(PVSCSI_MAX_TARGET, 16); /* cap reasonably */
1692 1.1 skrll chan->chan_nluns = MIN(PVSCSI_MAX_LUN, 1024); /* cap reasonably */
1693 1.1 skrll chan->chan_id = PVSCSI_MAX_TARGET;
1694 1.1 skrll chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
1695 1.1 skrll
1696 1.1 skrll pvscsi_setup_rings(sc);
1697 1.1 skrll if (sc->use_msg) {
1698 1.1 skrll pvscsi_setup_msg_ring(sc);
1699 1.1 skrll }
1700 1.1 skrll
1701 1.1 skrll sc->use_req_call_threshold = pvscsi_setup_req_call(sc, 1);
1702 1.1 skrll
1703 1.1 skrll pvscsi_intr_enable(sc);
1704 1.1 skrll
1705 1.1 skrll sc->sc_scsibus_dv = config_found(sc->dev, &sc->sc_channel, scsiprint,
1706 1.1 skrll CFARGS_NONE);
1707 1.1 skrll
1708 1.1 skrll return;
1709 1.1 skrll }
1710 1.1 skrll
1711 1.1 skrll static int
1712 1.1 skrll pvscsi_detach(device_t dev, int flags)
1713 1.1 skrll {
1714 1.1 skrll struct pvscsi_softc *sc;
1715 1.1 skrll
1716 1.1 skrll sc = device_private(dev);
1717 1.1 skrll
1718 1.1 skrll pvscsi_intr_disable(sc);
1719 1.1 skrll pvscsi_adapter_reset(sc);
1720 1.1 skrll
1721 1.1 skrll pvscsi_free_all(sc);
1722 1.1 skrll
1723 1.1 skrll mutex_destroy(&sc->lock);
1724 1.1 skrll
1725 1.1 skrll return (0);
1726 1.1 skrll }
1727