pvscsi.c revision 1.5 1 /*-
2 * Copyright (c) 2018 VMware, Inc.
3 *
4 * SPDX-License-Identifier: (BSD-2-Clause OR GPL-2.0)
5 */
6
7 /*
8
9 These files are provided under a dual BSD-2 Clause/GPLv2 license. When
10 using or redistributing this file, you may do so under either license.
11
12 BSD-2 Clause License
13
14 Copyright (c) 2018 VMware, Inc.
15
16 Redistribution and use in source and binary forms, with or without
17 modification, are permitted provided that the following conditions
18 are met:
19
20 * Redistributions of source code must retain the above copyright
21 notice, this list of conditions and the following disclaimer.
22
23 * Redistributions in binary form must reproduce the above copyright
24 notice, this list of conditions and the following disclaimer in
25 the documentation and/or other materials provided with the
26 distribution.
27
28 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39
40 GPL License Summary
41
42 Copyright (c) 2018 VMware, Inc.
43
44 This program is free software; you can redistribute it and/or modify
45 it under the terms of version 2 of the GNU General Public License as
46 published by the Free Software Foundation.
47
48 This program is distributed in the hope that it will be useful, but
49 WITHOUT ANY WARRANTY; without even the implied warranty of
50 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
51 General Public License for more details.
52
53 You should have received a copy of the GNU General Public License
54 along with this program; if not, write to the Free Software
55 Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
56 The full GNU General Public License is included in this distribution
57 in the file called LICENSE.GPL.
58
59 */
60
61 #include <sys/cdefs.h>
62 __KERNEL_RCSID(0, "$NetBSD: pvscsi.c,v 1.5 2025/09/06 02:56:52 riastradh Exp $");
63
64 #include <sys/param.h>
65
66 #include <sys/buf.h>
67 #include <sys/bus.h>
68 #include <sys/cpu.h>
69 #include <sys/device.h>
70 #include <sys/kernel.h>
71 #include <sys/kmem.h>
72 #include <sys/paravirt_membar.h>
73 #include <sys/queue.h>
74 #include <sys/sysctl.h>
75 #include <sys/systm.h>
76
77 #include <dev/pci/pcireg.h>
78 #include <dev/pci/pcivar.h>
79 #include <dev/pci/pcidevs.h>
80
81 #include <dev/scsipi/scsi_all.h>
82 #include <dev/scsipi/scsi_message.h>
83 #include <dev/scsipi/scsiconf.h>
84 #include <dev/scsipi/scsipi_disk.h>
85 #include <dev/scsipi/scsi_disk.h>
86
87 #include "pvscsi.h"
88
89 #define PVSCSI_DEFAULT_NUM_PAGES_REQ_RING 8
90 #define PVSCSI_SENSE_LENGTH 256
91
92 #define PVSCSI_MAXPHYS MAXPHYS
93 #define PVSCSI_MAXPHYS_SEGS ((PVSCSI_MAXPHYS / PAGE_SIZE) + 1)
94
95 #define PVSCSI_CMD_PER_LUN 64
96 #define PVSCSI_MAX_LUN 8
97 #define PVSCSI_MAX_TARGET 16
98
99 //#define PVSCSI_DEBUG_LOGGING
100
101 #ifdef PVSCSI_DEBUG_LOGGING
102 #define DEBUG_PRINTF(level, dev, fmt, ...) \
103 do { \
104 if (pvscsi_log_level >= (level)) { \
105 aprint_normal_dev((dev), (fmt), ##__VA_ARGS__); \
106 } \
107 } while(0)
108 #else
109 #define DEBUG_PRINTF(level, dev, fmt, ...)
110 #endif /* PVSCSI_DEBUG_LOGGING */
111
112 struct pvscsi_softc;
113 struct pvscsi_hcb;
114 struct pvscsi_dma;
115
116 #define VMWARE_PVSCSI_DEVSTR "VMware Paravirtual SCSI Controller"
117
118 static inline uint32_t pvscsi_reg_read(struct pvscsi_softc *sc,
119 uint32_t offset);
120 static inline void pvscsi_reg_write(struct pvscsi_softc *sc, uint32_t offset,
121 uint32_t val);
122 static inline uint32_t pvscsi_read_intr_status(struct pvscsi_softc *sc);
123 static inline void pvscsi_write_intr_status(struct pvscsi_softc *sc,
124 uint32_t val);
125 static inline void pvscsi_intr_enable(struct pvscsi_softc *sc);
126 static inline void pvscsi_intr_disable(struct pvscsi_softc *sc);
127 static void pvscsi_kick_io(struct pvscsi_softc *sc, uint8_t cdb0);
128 static void pvscsi_write_cmd(struct pvscsi_softc *sc, uint32_t cmd, void *data,
129 uint32_t len);
130 static uint32_t pvscsi_get_max_targets(struct pvscsi_softc *sc);
131 static int pvscsi_setup_req_call(struct pvscsi_softc *sc, uint32_t enable);
132 static void pvscsi_setup_rings(struct pvscsi_softc *sc);
133 static void pvscsi_setup_msg_ring(struct pvscsi_softc *sc);
134 static int pvscsi_hw_supports_msg(struct pvscsi_softc *sc);
135
136 static void pvscsi_timeout(void *arg);
137 static void pvscsi_adapter_reset(struct pvscsi_softc *sc);
138 static void pvscsi_bus_reset(struct pvscsi_softc *sc);
139 static void pvscsi_device_reset(struct pvscsi_softc *sc, uint32_t target);
140 static void pvscsi_abort(struct pvscsi_softc *sc, uint32_t target,
141 struct pvscsi_hcb *hcb);
142
143 static void pvscsi_process_completion(struct pvscsi_softc *sc,
144 struct pvscsi_ring_cmp_desc *e);
145 static void pvscsi_process_cmp_ring(struct pvscsi_softc *sc);
146 static void pvscsi_process_msg(struct pvscsi_softc *sc,
147 struct pvscsi_ring_msg_desc *e);
148 static void pvscsi_process_msg_ring(struct pvscsi_softc *sc);
149
150 static void pvscsi_intr_locked(struct pvscsi_softc *sc);
151 static int pvscsi_intr(void *xsc);
152
153 static void pvscsi_scsipi_request(struct scsipi_channel *,
154 scsipi_adapter_req_t, void *);
155
156 static inline uint64_t pvscsi_hcb_to_context(struct pvscsi_softc *sc,
157 struct pvscsi_hcb *hcb);
158 static inline struct pvscsi_hcb *pvscsi_context_to_hcb(struct pvscsi_softc *sc,
159 uint64_t context);
160 static struct pvscsi_hcb * pvscsi_hcb_get(struct pvscsi_softc *sc);
161 static void pvscsi_hcb_put(struct pvscsi_softc *sc, struct pvscsi_hcb *hcb);
162
163 static void pvscsi_dma_free(struct pvscsi_softc *sc, struct pvscsi_dma *dma);
164 static int pvscsi_dma_alloc(struct pvscsi_softc *sc, struct pvscsi_dma *dma,
165 bus_size_t size, bus_size_t alignment);
166 static int pvscsi_dma_alloc_ppns(struct pvscsi_softc *sc,
167 struct pvscsi_dma *dma, uint64_t *ppn_list, uint32_t num_pages);
168 static void pvscsi_dma_free_per_hcb(struct pvscsi_softc *sc,
169 uint32_t hcbs_allocated);
170 static int pvscsi_dma_alloc_per_hcb(struct pvscsi_softc *sc);
171 static void pvscsi_free_rings(struct pvscsi_softc *sc);
172 static int pvscsi_allocate_rings(struct pvscsi_softc *sc);
173 static void pvscsi_free_interrupts(struct pvscsi_softc *sc);
174 static int pvscsi_setup_interrupts(struct pvscsi_softc *sc, const struct pci_attach_args *);
175 static void pvscsi_free_all(struct pvscsi_softc *sc);
176
177 static void pvscsi_attach(device_t, device_t, void *);
178 static int pvscsi_detach(device_t, int);
179 static int pvscsi_probe(device_t, cfdata_t, void *);
180
181 #define pvscsi_get_tunable(_sc, _name, _value) (_value)
182
183 #ifdef PVSCSI_DEBUG_LOGGING
184 static int pvscsi_log_level = 1;
185 #endif
186
187 #define TUNABLE_INT(__x, __d) \
188 err = sysctl_createv(clog, 0, &rnode, &cnode, \
189 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, \
190 #__x, SYSCTL_DESCR(__d), \
191 NULL, 0, &(pvscsi_ ## __x), sizeof(pvscsi_ ## __x), \
192 CTL_CREATE, CTL_EOL); \
193 if (err) \
194 goto fail;
195
196 static int pvscsi_request_ring_pages = 0;
197 static int pvscsi_use_msg = 1;
198 static int pvscsi_use_msi = 1;
199 static int pvscsi_use_msix = 1;
200 static int pvscsi_use_req_call_threshold = 0;
201 static int pvscsi_max_queue_depth = 0;
202
203 SYSCTL_SETUP(sysctl_hw_pvscsi_setup, "sysctl hw.pvscsi setup")
204 {
205 int err;
206 const struct sysctlnode *rnode;
207 const struct sysctlnode *cnode;
208
209 err = sysctl_createv(clog, 0, NULL, &rnode,
210 CTLFLAG_PERMANENT, CTLTYPE_NODE, "pvscsi",
211 SYSCTL_DESCR("pvscsi global controls"),
212 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
213
214 if (err)
215 goto fail;
216
217 #ifdef PVSCSI_DEBUG_LOGGING
218 TUNABLE_INT(log_level, "Enable debugging output");
219 #endif
220
221 TUNABLE_INT(request_ring_pages, "No. of pages for the request ring");
222 TUNABLE_INT(use_msg, "Use message passing");
223 TUNABLE_INT(use_msi, "Use MSI interrupt");
224 TUNABLE_INT(use_msix, "Use MSXI interrupt");
225 TUNABLE_INT(use_req_call_threshold, "Use request limit");
226 TUNABLE_INT(max_queue_depth, "Maximum size of request queue");
227
228 return;
229 fail:
230 aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err);
231 }
232
233 struct pvscsi_sg_list {
234 struct pvscsi_sg_element sge[PVSCSI_MAX_SG_ENTRIES_PER_SEGMENT];
235 };
236
237 #define PVSCSI_ABORT_TIMEOUT 2
238 #define PVSCSI_RESET_TIMEOUT 10
239
240 #define PVSCSI_HCB_NONE 0
241 #define PVSCSI_HCB_ABORT 1
242 #define PVSCSI_HCB_DEVICE_RESET 2
243 #define PVSCSI_HCB_BUS_RESET 3
244
245 struct pvscsi_hcb {
246 struct scsipi_xfer *xs;
247 struct pvscsi_softc *sc;
248
249 struct pvscsi_ring_req_desc *e;
250 int recovery;
251 SLIST_ENTRY(pvscsi_hcb) links;
252
253 bus_dmamap_t dma_map;
254 bus_addr_t dma_map_offset;
255 bus_size_t dma_map_size;
256 void *sense_buffer;
257 bus_addr_t sense_buffer_paddr;
258 struct pvscsi_sg_list *sg_list;
259 bus_addr_t sg_list_paddr;
260 bus_addr_t sg_list_offset;
261 };
262
263 struct pvscsi_dma {
264 bus_dmamap_t map;
265 void *vaddr;
266 bus_addr_t paddr;
267 bus_size_t size;
268 bus_dma_segment_t seg[1];
269 };
270
271 struct pvscsi_softc {
272 device_t dev;
273 kmutex_t lock;
274
275 device_t sc_scsibus_dv;
276 struct scsipi_adapter sc_adapter;
277 struct scsipi_channel sc_channel;
278
279 struct pvscsi_rings_state *rings_state;
280 struct pvscsi_ring_req_desc *req_ring;
281 struct pvscsi_ring_cmp_desc *cmp_ring;
282 struct pvscsi_ring_msg_desc *msg_ring;
283 uint32_t hcb_cnt;
284 struct pvscsi_hcb *hcbs;
285 SLIST_HEAD(, pvscsi_hcb) free_list;
286
287 bus_dma_tag_t sc_dmat;
288 bus_space_tag_t sc_memt;
289 bus_space_handle_t sc_memh;
290 bus_size_t sc_mems;
291
292 bool use_msg;
293 uint32_t max_targets;
294 int mm_rid;
295 int irq_id;
296 int use_req_call_threshold;
297
298 pci_chipset_tag_t sc_pc;
299 pci_intr_handle_t * sc_pihp;
300 void *sc_ih;
301
302 uint64_t rings_state_ppn;
303 uint32_t req_ring_num_pages;
304 uint64_t req_ring_ppn[PVSCSI_MAX_NUM_PAGES_REQ_RING];
305 uint32_t cmp_ring_num_pages;
306 uint64_t cmp_ring_ppn[PVSCSI_MAX_NUM_PAGES_CMP_RING];
307 uint32_t msg_ring_num_pages;
308 uint64_t msg_ring_ppn[PVSCSI_MAX_NUM_PAGES_MSG_RING];
309
310 struct pvscsi_dma rings_state_dma;
311 struct pvscsi_dma req_ring_dma;
312 struct pvscsi_dma cmp_ring_dma;
313 struct pvscsi_dma msg_ring_dma;
314
315 struct pvscsi_dma sg_list_dma;
316 struct pvscsi_dma sense_buffer_dma;
317 };
318
319 CFATTACH_DECL3_NEW(pvscsi, sizeof(struct pvscsi_softc),
320 pvscsi_probe, pvscsi_attach, pvscsi_detach, NULL, NULL, NULL,
321 DVF_DETACH_SHUTDOWN);
322
323 #define PVSCSI_DMA_SYNC_STATE(sc, dma, structptr, member, ops) \
324 bus_dmamap_sync((sc)->sc_dmat, (dma)->map, \
325 /*offset*/offsetof(__typeof__(*(structptr)), member), \
326 /*length*/sizeof((structptr)->member), \
327 (ops))
328
329 #define PVSCSI_DMA_SYNC_RING(sc, dma, ring, idx, ops) \
330 bus_dmamap_sync((sc)->sc_dmat, (dma)->map, \
331 /*offset*/sizeof(*(ring)) * (idx), \
332 /*length*/sizeof(*(ring)), \
333 (ops))
334
335 static inline uint32_t
336 pvscsi_reg_read(struct pvscsi_softc *sc, uint32_t offset)
337 {
338
339 return (bus_space_read_4(sc->sc_memt, sc->sc_memh, offset));
340 }
341
342 static inline void
343 pvscsi_reg_write(struct pvscsi_softc *sc, uint32_t offset, uint32_t val)
344 {
345
346 bus_space_write_4(sc->sc_memt, sc->sc_memh, offset, val);
347 }
348
349 static inline uint32_t
350 pvscsi_read_intr_status(struct pvscsi_softc *sc)
351 {
352
353 return (pvscsi_reg_read(sc, PVSCSI_REG_OFFSET_INTR_STATUS));
354 }
355
356 static inline void
357 pvscsi_write_intr_status(struct pvscsi_softc *sc, uint32_t val)
358 {
359
360 pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_INTR_STATUS, val);
361 }
362
363 static inline void
364 pvscsi_intr_enable(struct pvscsi_softc *sc)
365 {
366 uint32_t mask;
367
368 mask = PVSCSI_INTR_CMPL_MASK;
369 if (sc->use_msg) {
370 mask |= PVSCSI_INTR_MSG_MASK;
371 }
372
373 pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_INTR_MASK, mask);
374 }
375
376 static inline void
377 pvscsi_intr_disable(struct pvscsi_softc *sc)
378 {
379
380 pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_INTR_MASK, 0);
381 }
382
383 static void
384 pvscsi_kick_io(struct pvscsi_softc *sc, uint8_t cdb0)
385 {
386 struct pvscsi_dma *s_dma;
387 struct pvscsi_rings_state *s;
388
389 DEBUG_PRINTF(2, sc->dev, "%s: cdb0 %#x\n", __func__, cdb0);
390 if (cdb0 == SCSI_READ_6_COMMAND || cdb0 == READ_10 ||
391 cdb0 == READ_12 || cdb0 == READ_16 ||
392 cdb0 == SCSI_WRITE_6_COMMAND || cdb0 == WRITE_10 ||
393 cdb0 == WRITE_12 || cdb0 == WRITE_16) {
394 s_dma = &sc->rings_state_dma;
395 s = sc->rings_state;
396
397 /*
398 * Ensure the command has been published before we read
399 * req_cons_idx to test whether we need to kick the
400 * host.
401 */
402 paravirt_membar_sync();
403
404 PVSCSI_DMA_SYNC_STATE(sc, s_dma, s, req_cons_idx,
405 BUS_DMASYNC_POSTREAD);
406 DEBUG_PRINTF(2, sc->dev, "%s req prod %d cons %d\n", __func__,
407 s->req_prod_idx, s->req_cons_idx);
408 if (!sc->use_req_call_threshold ||
409 (s->req_prod_idx - s->req_cons_idx) >=
410 s->req_call_threshold) {
411 pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_KICK_RW_IO, 0);
412 DEBUG_PRINTF(2, sc->dev, "kicked\n");
413 } else {
414 DEBUG_PRINTF(2, sc->dev, "wtf\n");
415 }
416 PVSCSI_DMA_SYNC_STATE(sc, s_dma, s, req_cons_idx,
417 BUS_DMASYNC_PREREAD);
418 } else {
419 s = sc->rings_state;
420 /*
421 * XXX req_cons_idx in debug log might be stale, but no
422 * need for DMA sync otherwise in this branch
423 */
424 DEBUG_PRINTF(1, sc->dev, "%s req prod %d cons %d not checked\n", __func__,
425 s->req_prod_idx, s->req_cons_idx);
426
427 pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_KICK_NON_RW_IO, 0);
428 }
429 }
430
431 static void
432 pvscsi_write_cmd(struct pvscsi_softc *sc, uint32_t cmd, void *data,
433 uint32_t len)
434 {
435 uint32_t *data_ptr;
436 int i;
437
438 KASSERTMSG(len % sizeof(uint32_t) == 0,
439 "command size not a multiple of 4");
440
441 data_ptr = data;
442 len /= sizeof(uint32_t);
443
444 pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_COMMAND, cmd);
445 for (i = 0; i < len; ++i) {
446 pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_COMMAND_DATA,
447 data_ptr[i]);
448 }
449 }
450
451 static inline uint64_t pvscsi_hcb_to_context(struct pvscsi_softc *sc,
452 struct pvscsi_hcb *hcb)
453 {
454
455 /* Offset by 1 because context must not be 0 */
456 return (hcb - sc->hcbs + 1);
457 }
458
459 static inline struct pvscsi_hcb* pvscsi_context_to_hcb(struct pvscsi_softc *sc,
460 uint64_t context)
461 {
462
463 return (sc->hcbs + (context - 1));
464 }
465
466 static struct pvscsi_hcb *
467 pvscsi_hcb_get(struct pvscsi_softc *sc)
468 {
469 struct pvscsi_hcb *hcb;
470
471 KASSERT(mutex_owned(&sc->lock));
472
473 hcb = SLIST_FIRST(&sc->free_list);
474 if (hcb) {
475 SLIST_REMOVE_HEAD(&sc->free_list, links);
476 }
477
478 return (hcb);
479 }
480
481 static void
482 pvscsi_hcb_put(struct pvscsi_softc *sc, struct pvscsi_hcb *hcb)
483 {
484
485 KASSERT(mutex_owned(&sc->lock));
486 hcb->xs = NULL;
487 hcb->e = NULL;
488 hcb->recovery = PVSCSI_HCB_NONE;
489 SLIST_INSERT_HEAD(&sc->free_list, hcb, links);
490 }
491
492 static uint32_t
493 pvscsi_get_max_targets(struct pvscsi_softc *sc)
494 {
495 uint32_t max_targets;
496
497 pvscsi_write_cmd(sc, PVSCSI_CMD_GET_MAX_TARGETS, NULL, 0);
498
499 max_targets = pvscsi_reg_read(sc, PVSCSI_REG_OFFSET_COMMAND_STATUS);
500
501 if (max_targets == ~0) {
502 max_targets = 16;
503 }
504
505 return (max_targets);
506 }
507
508 static int pvscsi_setup_req_call(struct pvscsi_softc *sc, uint32_t enable)
509 {
510 uint32_t status;
511 struct pvscsi_cmd_desc_setup_req_call cmd;
512
513 if (!pvscsi_get_tunable(sc, "pvscsi_use_req_call_threshold",
514 pvscsi_use_req_call_threshold)) {
515 return (0);
516 }
517
518 pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_COMMAND,
519 PVSCSI_CMD_SETUP_REQCALLTHRESHOLD);
520 status = pvscsi_reg_read(sc, PVSCSI_REG_OFFSET_COMMAND_STATUS);
521
522 if (status != -1) {
523 memset(&cmd, 0, sizeof(cmd));
524 cmd.enable = enable;
525 pvscsi_write_cmd(sc, PVSCSI_CMD_SETUP_REQCALLTHRESHOLD,
526 &cmd, sizeof(cmd));
527 status = pvscsi_reg_read(sc, PVSCSI_REG_OFFSET_COMMAND_STATUS);
528
529 /*
530 * After setup, sync req_call_threshold before use.
531 * After this point it should be stable, so no need to
532 * sync again during use.
533 */
534 PVSCSI_DMA_SYNC_STATE(sc, &sc->rings_state_dma,
535 sc->rings_state, req_call_threshold,
536 BUS_DMASYNC_POSTREAD);
537
538 return (status != 0);
539 } else {
540 return (0);
541 }
542 }
543
544 static void
545 pvscsi_dma_free(struct pvscsi_softc *sc, struct pvscsi_dma *dma)
546 {
547
548 bus_dmamap_unload(sc->sc_dmat, dma->map);
549 bus_dmamem_unmap(sc->sc_dmat, dma->vaddr, dma->size);
550 bus_dmamap_destroy(sc->sc_dmat, dma->map);
551 bus_dmamem_free(sc->sc_dmat, dma->seg, __arraycount(dma->seg));
552
553 memset(dma, 0, sizeof(*dma));
554 }
555
556 static int
557 pvscsi_dma_alloc(struct pvscsi_softc *sc, struct pvscsi_dma *dma,
558 bus_size_t size, bus_size_t alignment)
559 {
560 int error;
561 int nsegs;
562
563 memset(dma, 0, sizeof(*dma));
564
565 error = bus_dmamem_alloc(sc->sc_dmat, size, alignment, 0, dma->seg,
566 __arraycount(dma->seg), &nsegs, BUS_DMA_WAITOK);
567 if (error) {
568 aprint_normal_dev(sc->dev, "error allocating dma mem, error %d\n",
569 error);
570 goto fail;
571 }
572
573 error = bus_dmamem_map(sc->sc_dmat, dma->seg, nsegs, size,
574 &dma->vaddr, BUS_DMA_WAITOK);
575 if (error != 0) {
576 device_printf(sc->dev, "Failed to map DMA memory\n");
577 goto dmamemmap_fail;
578 }
579
580 error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
581 BUS_DMA_WAITOK, &dma->map);
582 if (error != 0) {
583 device_printf(sc->dev, "Failed to create DMA map\n");
584 goto dmamapcreate_fail;
585 }
586
587 error = bus_dmamap_load(sc->sc_dmat, dma->map, dma->vaddr, size,
588 NULL, BUS_DMA_WAITOK);
589 if (error) {
590 aprint_normal_dev(sc->dev, "error mapping dma mam, error %d\n",
591 error);
592 goto dmamapload_fail;
593 }
594
595 dma->paddr = dma->map->dm_segs[0].ds_addr;
596 dma->size = size;
597
598 return 0;
599
600 dmamapload_fail:
601 bus_dmamap_destroy(sc->sc_dmat, dma->map);
602 dmamapcreate_fail:
603 bus_dmamem_unmap(sc->sc_dmat, dma->vaddr, dma->size);
604 dmamemmap_fail:
605 bus_dmamem_free(sc->sc_dmat, dma->seg, __arraycount(dma->seg));
606 fail:
607
608 return (error);
609 }
610
611 static int
612 pvscsi_dma_alloc_ppns(struct pvscsi_softc *sc, struct pvscsi_dma *dma,
613 uint64_t *ppn_list, uint32_t num_pages)
614 {
615 int error;
616 uint32_t i;
617 uint64_t ppn;
618
619 error = pvscsi_dma_alloc(sc, dma, num_pages * PAGE_SIZE, PAGE_SIZE);
620 if (error) {
621 aprint_normal_dev(sc->dev, "Error allocating pages, error %d\n",
622 error);
623 return (error);
624 }
625
626 memset(dma->vaddr, 0, num_pages * PAGE_SIZE);
627 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, num_pages * PAGE_SIZE,
628 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
629
630 ppn = dma->paddr >> PAGE_SHIFT;
631 for (i = 0; i < num_pages; i++) {
632 ppn_list[i] = ppn + i;
633 }
634
635 return (0);
636 }
637
638 static void
639 pvscsi_dma_free_per_hcb(struct pvscsi_softc *sc, uint32_t hcbs_allocated)
640 {
641 int i;
642 struct pvscsi_hcb *hcb;
643
644 for (i = 0; i < hcbs_allocated; ++i) {
645 hcb = sc->hcbs + i;
646 bus_dmamap_destroy(sc->sc_dmat, hcb->dma_map);
647 };
648
649 pvscsi_dma_free(sc, &sc->sense_buffer_dma);
650 pvscsi_dma_free(sc, &sc->sg_list_dma);
651 }
652
653 static int
654 pvscsi_dma_alloc_per_hcb(struct pvscsi_softc *sc)
655 {
656 int i;
657 int error;
658 struct pvscsi_hcb *hcb;
659
660 i = 0;
661
662 error = pvscsi_dma_alloc(sc, &sc->sg_list_dma,
663 sizeof(struct pvscsi_sg_list) * sc->hcb_cnt, 1);
664 if (error) {
665 aprint_normal_dev(sc->dev,
666 "Error allocation sg list DMA memory, error %d\n", error);
667 goto fail;
668 }
669
670 error = pvscsi_dma_alloc(sc, &sc->sense_buffer_dma,
671 PVSCSI_SENSE_LENGTH * sc->hcb_cnt, 1);
672 if (error) {
673 aprint_normal_dev(sc->dev,
674 "Error allocation buffer DMA memory, error %d\n", error);
675 goto fail;
676 }
677
678 for (i = 0; i < sc->hcb_cnt; ++i) {
679 hcb = sc->hcbs + i;
680
681 error = bus_dmamap_create(sc->sc_dmat, PVSCSI_MAXPHYS,
682 PVSCSI_MAXPHYS_SEGS, PVSCSI_MAXPHYS, 0,
683 BUS_DMA_WAITOK, &hcb->dma_map);
684 if (error) {
685 aprint_normal_dev(sc->dev,
686 "Error creating dma map for hcb %d, error %d\n",
687 i, error);
688 goto fail;
689 }
690
691 hcb->sc = sc;
692 hcb->dma_map_offset = PVSCSI_SENSE_LENGTH * i;
693 hcb->dma_map_size = PVSCSI_SENSE_LENGTH;
694 hcb->sense_buffer =
695 (void *)((char *)sc->sense_buffer_dma.vaddr +
696 PVSCSI_SENSE_LENGTH * i);
697 hcb->sense_buffer_paddr = sc->sense_buffer_dma.paddr +
698 PVSCSI_SENSE_LENGTH * i;
699
700 hcb->sg_list =
701 (struct pvscsi_sg_list *)((char *)sc->sg_list_dma.vaddr +
702 sizeof(struct pvscsi_sg_list) * i);
703 hcb->sg_list_paddr =
704 sc->sg_list_dma.paddr + sizeof(struct pvscsi_sg_list) * i;
705 hcb->sg_list_offset = sizeof(struct pvscsi_sg_list) * i;
706 }
707
708 SLIST_INIT(&sc->free_list);
709 for (i = (sc->hcb_cnt - 1); i >= 0; --i) {
710 hcb = sc->hcbs + i;
711 SLIST_INSERT_HEAD(&sc->free_list, hcb, links);
712 }
713
714 fail:
715 if (error) {
716 pvscsi_dma_free_per_hcb(sc, i);
717 }
718
719 return (error);
720 }
721
722 static void
723 pvscsi_free_rings(struct pvscsi_softc *sc)
724 {
725
726 bus_dmamap_sync(sc->sc_dmat, sc->rings_state_dma.map,
727 0, sc->rings_state_dma.size,
728 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
729 bus_dmamap_sync(sc->sc_dmat, sc->req_ring_dma.map,
730 0, sc->req_ring_dma.size,
731 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
732 bus_dmamap_sync(sc->sc_dmat, sc->cmp_ring_dma.map,
733 0, sc->cmp_ring_dma.size,
734 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
735
736 pvscsi_dma_free(sc, &sc->rings_state_dma);
737 pvscsi_dma_free(sc, &sc->req_ring_dma);
738 pvscsi_dma_free(sc, &sc->cmp_ring_dma);
739 if (sc->use_msg) {
740 pvscsi_dma_free(sc, &sc->msg_ring_dma);
741 }
742 }
743
744 static int
745 pvscsi_allocate_rings(struct pvscsi_softc *sc)
746 {
747 int error;
748
749 error = pvscsi_dma_alloc_ppns(sc, &sc->rings_state_dma,
750 &sc->rings_state_ppn, 1);
751 if (error) {
752 aprint_normal_dev(sc->dev,
753 "Error allocating rings state, error = %d\n", error);
754 goto fail;
755 }
756 sc->rings_state = sc->rings_state_dma.vaddr;
757
758 error = pvscsi_dma_alloc_ppns(sc, &sc->req_ring_dma, sc->req_ring_ppn,
759 sc->req_ring_num_pages);
760 if (error) {
761 aprint_normal_dev(sc->dev,
762 "Error allocating req ring pages, error = %d\n", error);
763 goto fail;
764 }
765 sc->req_ring = sc->req_ring_dma.vaddr;
766
767 error = pvscsi_dma_alloc_ppns(sc, &sc->cmp_ring_dma, sc->cmp_ring_ppn,
768 sc->cmp_ring_num_pages);
769 if (error) {
770 aprint_normal_dev(sc->dev,
771 "Error allocating cmp ring pages, error = %d\n", error);
772 goto fail;
773 }
774 sc->cmp_ring = sc->cmp_ring_dma.vaddr;
775
776 sc->msg_ring = NULL;
777 if (sc->use_msg) {
778 error = pvscsi_dma_alloc_ppns(sc, &sc->msg_ring_dma,
779 sc->msg_ring_ppn, sc->msg_ring_num_pages);
780 if (error) {
781 aprint_normal_dev(sc->dev,
782 "Error allocating cmp ring pages, error = %d\n",
783 error);
784 goto fail;
785 }
786 sc->msg_ring = sc->msg_ring_dma.vaddr;
787 }
788
789 fail:
790 if (error) {
791 pvscsi_free_rings(sc);
792 }
793 return (error);
794 }
795
796 static void
797 pvscsi_setup_rings(struct pvscsi_softc *sc)
798 {
799 struct pvscsi_cmd_desc_setup_rings cmd;
800 uint32_t i;
801
802 memset(&cmd, 0, sizeof(cmd));
803
804 cmd.rings_state_ppn = sc->rings_state_ppn;
805
806 cmd.req_ring_num_pages = sc->req_ring_num_pages;
807 for (i = 0; i < sc->req_ring_num_pages; ++i) {
808 cmd.req_ring_ppns[i] = sc->req_ring_ppn[i];
809 }
810
811 cmd.cmp_ring_num_pages = sc->cmp_ring_num_pages;
812 for (i = 0; i < sc->cmp_ring_num_pages; ++i) {
813 cmd.cmp_ring_ppns[i] = sc->cmp_ring_ppn[i];
814 }
815
816 pvscsi_write_cmd(sc, PVSCSI_CMD_SETUP_RINGS, &cmd, sizeof(cmd));
817
818 /*
819 * After setup, sync *_num_entries_log2 before use. After this
820 * point they should be stable, so no need to sync again during
821 * use.
822 */
823 PVSCSI_DMA_SYNC_STATE(sc, &sc->rings_state_dma,
824 sc->rings_state, req_num_entries_log2,
825 BUS_DMASYNC_POSTREAD);
826 PVSCSI_DMA_SYNC_STATE(sc, &sc->rings_state_dma,
827 sc->rings_state, cmp_num_entries_log2,
828 BUS_DMASYNC_POSTREAD);
829 }
830
831 static int
832 pvscsi_hw_supports_msg(struct pvscsi_softc *sc)
833 {
834 uint32_t status;
835
836 pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_COMMAND,
837 PVSCSI_CMD_SETUP_MSG_RING);
838 status = pvscsi_reg_read(sc, PVSCSI_REG_OFFSET_COMMAND_STATUS);
839
840 return (status != -1);
841 }
842
843 static void
844 pvscsi_setup_msg_ring(struct pvscsi_softc *sc)
845 {
846 struct pvscsi_cmd_desc_setup_msg_ring cmd;
847 uint32_t i;
848
849 KASSERTMSG(sc->use_msg, "msg is not being used");
850
851 memset(&cmd, 0, sizeof(cmd));
852
853 cmd.num_pages = sc->msg_ring_num_pages;
854 for (i = 0; i < sc->msg_ring_num_pages; ++i) {
855 cmd.ring_ppns[i] = sc->msg_ring_ppn[i];
856 }
857
858 pvscsi_write_cmd(sc, PVSCSI_CMD_SETUP_MSG_RING, &cmd, sizeof(cmd));
859
860 /*
861 * After setup, sync msg_num_entries_log2 before use. After
862 * this point it should be stable, so no need to sync again
863 * during use.
864 */
865 PVSCSI_DMA_SYNC_STATE(sc, &sc->rings_state_dma,
866 sc->rings_state, msg_num_entries_log2,
867 BUS_DMASYNC_POSTREAD);
868 }
869
870 static void
871 pvscsi_adapter_reset(struct pvscsi_softc *sc)
872 {
873 aprint_normal_dev(sc->dev, "Adapter Reset\n");
874
875 pvscsi_write_cmd(sc, PVSCSI_CMD_ADAPTER_RESET, NULL, 0);
876 #ifdef PVSCSI_DEBUG_LOGGING
877 uint32_t val =
878 #endif
879 pvscsi_read_intr_status(sc);
880
881 DEBUG_PRINTF(2, sc->dev, "adapter reset done: %u\n", val);
882 }
883
884 static void
885 pvscsi_bus_reset(struct pvscsi_softc *sc)
886 {
887
888 aprint_normal_dev(sc->dev, "Bus Reset\n");
889
890 pvscsi_write_cmd(sc, PVSCSI_CMD_RESET_BUS, NULL, 0);
891 pvscsi_process_cmp_ring(sc);
892
893 DEBUG_PRINTF(2, sc->dev, "bus reset done\n");
894 }
895
896 static void
897 pvscsi_device_reset(struct pvscsi_softc *sc, uint32_t target)
898 {
899 struct pvscsi_cmd_desc_reset_device cmd;
900
901 memset(&cmd, 0, sizeof(cmd));
902
903 cmd.target = target;
904
905 aprint_normal_dev(sc->dev, "Device reset for target %u\n", target);
906
907 pvscsi_write_cmd(sc, PVSCSI_CMD_RESET_DEVICE, &cmd, sizeof cmd);
908 pvscsi_process_cmp_ring(sc);
909
910 DEBUG_PRINTF(2, sc->dev, "device reset done\n");
911 }
912
913 static void
914 pvscsi_abort(struct pvscsi_softc *sc, uint32_t target, struct pvscsi_hcb *hcb)
915 {
916 struct pvscsi_cmd_desc_abort_cmd cmd;
917 uint64_t context;
918
919 pvscsi_process_cmp_ring(sc);
920
921 if (hcb != NULL) {
922 context = pvscsi_hcb_to_context(sc, hcb);
923
924 memset(&cmd, 0, sizeof cmd);
925 cmd.target = target;
926 cmd.context = context;
927
928 aprint_normal_dev(sc->dev, "Abort for target %u context %llx\n",
929 target, (unsigned long long)context);
930
931 pvscsi_write_cmd(sc, PVSCSI_CMD_ABORT_CMD, &cmd, sizeof(cmd));
932 pvscsi_process_cmp_ring(sc);
933
934 DEBUG_PRINTF(2, sc->dev, "abort done\n");
935 } else {
936 DEBUG_PRINTF(1, sc->dev,
937 "Target %u hcb %p not found for abort\n", target, hcb);
938 }
939 }
940
941 static int
942 pvscsi_probe(device_t dev, cfdata_t cf, void *aux)
943 {
944 const struct pci_attach_args *pa = aux;
945
946 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_VMWARE &&
947 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VMWARE_PVSCSI) {
948 return 1;
949 }
950 return 0;
951 }
952
953 static void
954 pvscsi_timeout(void *arg)
955 {
956 struct pvscsi_hcb *hcb = arg;
957 struct scsipi_xfer *xs = hcb->xs;
958
959 if (xs == NULL) {
960 /* Already completed */
961 return;
962 }
963
964 struct pvscsi_softc *sc = hcb->sc;
965
966 mutex_enter(&sc->lock);
967
968 scsipi_printaddr(xs->xs_periph);
969 printf("command timeout, CDB: ");
970 scsipi_print_cdb(xs->cmd);
971 printf("\n");
972
973 switch (hcb->recovery) {
974 case PVSCSI_HCB_NONE:
975 hcb->recovery = PVSCSI_HCB_ABORT;
976 pvscsi_abort(sc, hcb->e->target, hcb);
977 callout_reset(&xs->xs_callout,
978 mstohz(PVSCSI_ABORT_TIMEOUT * 1000),
979 pvscsi_timeout, hcb);
980 break;
981 case PVSCSI_HCB_ABORT:
982 hcb->recovery = PVSCSI_HCB_DEVICE_RESET;
983 pvscsi_device_reset(sc, hcb->e->target);
984 callout_reset(&xs->xs_callout,
985 mstohz(PVSCSI_RESET_TIMEOUT * 1000),
986 pvscsi_timeout, hcb);
987 break;
988 case PVSCSI_HCB_DEVICE_RESET:
989 hcb->recovery = PVSCSI_HCB_BUS_RESET;
990 pvscsi_bus_reset(sc);
991 callout_reset(&xs->xs_callout,
992 mstohz(PVSCSI_RESET_TIMEOUT * 1000),
993 pvscsi_timeout, hcb);
994 break;
995 case PVSCSI_HCB_BUS_RESET:
996 pvscsi_adapter_reset(sc);
997 break;
998 };
999 mutex_exit(&sc->lock);
1000 }
1001
1002 static void
1003 pvscsi_process_completion(struct pvscsi_softc *sc,
1004 struct pvscsi_ring_cmp_desc *e)
1005 {
1006 struct pvscsi_hcb *hcb;
1007 struct scsipi_xfer *xs;
1008 uint32_t error = XS_NOERROR;
1009 uint32_t btstat;
1010 uint32_t sdstat;
1011 int op;
1012
1013 hcb = pvscsi_context_to_hcb(sc, e->context);
1014 xs = hcb->xs;
1015
1016 callout_stop(&xs->xs_callout);
1017
1018 btstat = e->host_status;
1019 sdstat = e->scsi_status;
1020
1021 xs->status = sdstat;
1022 xs->resid = xs->datalen - e->data_len;
1023
1024 DEBUG_PRINTF(3, sc->dev,
1025 "command context %llx btstat %d (%#x) sdstat %d (%#x)\n",
1026 (unsigned long long)e->context, btstat, btstat, sdstat, sdstat);
1027
1028 if ((xs->xs_control & XS_CTL_DATA_IN) == XS_CTL_DATA_IN) {
1029 op = BUS_DMASYNC_POSTREAD;
1030 } else {
1031 op = BUS_DMASYNC_POSTWRITE;
1032 }
1033 bus_dmamap_sync(sc->sc_dmat, sc->sense_buffer_dma.map,
1034 hcb->dma_map_offset, hcb->dma_map_size, op);
1035
1036 if (btstat == BTSTAT_SUCCESS && sdstat == SCSI_OK) {
1037 DEBUG_PRINTF(3, sc->dev,
1038 "completing command context %llx success\n",
1039 (unsigned long long)e->context);
1040 xs->resid = 0;
1041 } else {
1042 switch (btstat) {
1043 case BTSTAT_SUCCESS:
1044 case BTSTAT_LINKED_COMMAND_COMPLETED:
1045 case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG:
1046 switch (sdstat) {
1047 case SCSI_OK:
1048 xs->resid = 0;
1049 error = XS_NOERROR;
1050 break;
1051 case SCSI_CHECK:
1052 error = XS_SENSE;
1053 xs->resid = 0;
1054
1055 memset(&xs->sense, 0, sizeof(xs->sense));
1056 memcpy(&xs->sense, hcb->sense_buffer,
1057 MIN(sizeof(xs->sense), e->sense_len));
1058 break;
1059 case SCSI_BUSY:
1060 case SCSI_QUEUE_FULL:
1061 error = XS_NOERROR;
1062 break;
1063 case SCSI_TERMINATED:
1064 // case SCSI_STATUS_TASK_ABORTED:
1065 DEBUG_PRINTF(1, sc->dev,
1066 "xs: %p sdstat=0x%x\n", xs, sdstat);
1067 error = XS_DRIVER_STUFFUP;
1068 break;
1069 default:
1070 DEBUG_PRINTF(1, sc->dev,
1071 "xs: %p sdstat=0x%x\n", xs, sdstat);
1072 error = XS_DRIVER_STUFFUP;
1073 break;
1074 }
1075 break;
1076 case BTSTAT_SELTIMEO:
1077 error = XS_SELTIMEOUT;
1078 break;
1079 case BTSTAT_DATARUN:
1080 case BTSTAT_DATA_UNDERRUN:
1081 // xs->resid = xs->datalen - c->data_len;
1082 error = XS_NOERROR;
1083 break;
1084 case BTSTAT_ABORTQUEUE:
1085 case BTSTAT_HATIMEOUT:
1086 error = XS_NOERROR;
1087 break;
1088 case BTSTAT_NORESPONSE:
1089 case BTSTAT_SENTRST:
1090 case BTSTAT_RECVRST:
1091 case BTSTAT_BUSRESET:
1092 error = XS_RESET;
1093 break;
1094 case BTSTAT_SCSIPARITY:
1095 error = XS_DRIVER_STUFFUP;
1096 DEBUG_PRINTF(1, sc->dev,
1097 "xs: %p sdstat=0x%x\n", xs, sdstat);
1098 break;
1099 case BTSTAT_BUSFREE:
1100 error = XS_DRIVER_STUFFUP;
1101 DEBUG_PRINTF(1, sc->dev,
1102 "xs: %p sdstat=0x%x\n", xs, sdstat);
1103 break;
1104 case BTSTAT_INVPHASE:
1105 error = XS_DRIVER_STUFFUP;
1106 DEBUG_PRINTF(1, sc->dev,
1107 "xs: %p sdstat=0x%x\n", xs, sdstat);
1108 break;
1109 case BTSTAT_SENSFAILED:
1110 error = XS_DRIVER_STUFFUP;
1111 DEBUG_PRINTF(1, sc->dev,
1112 "xs: %p sdstat=0x%x\n", xs, sdstat);
1113 break;
1114 case BTSTAT_LUNMISMATCH:
1115 case BTSTAT_TAGREJECT:
1116 case BTSTAT_DISCONNECT:
1117 case BTSTAT_BADMSG:
1118 case BTSTAT_INVPARAM:
1119 error = XS_DRIVER_STUFFUP;
1120 DEBUG_PRINTF(1, sc->dev,
1121 "xs: %p sdstat=0x%x\n", xs, sdstat);
1122 break;
1123 case BTSTAT_HASOFTWARE:
1124 case BTSTAT_HAHARDWARE:
1125 error = XS_DRIVER_STUFFUP;
1126 DEBUG_PRINTF(1, sc->dev,
1127 "xs: %p sdstat=0x%x\n", xs, sdstat);
1128 break;
1129 default:
1130 aprint_normal_dev(sc->dev, "unknown hba status: 0x%x\n",
1131 btstat);
1132 error = XS_DRIVER_STUFFUP;
1133 break;
1134 }
1135
1136 DEBUG_PRINTF(3, sc->dev,
1137 "completing command context %llx btstat %x sdstat %x - error %x\n",
1138 (unsigned long long)e->context, btstat, sdstat, error);
1139 }
1140
1141 xs->error = error;
1142 pvscsi_hcb_put(sc, hcb);
1143
1144 mutex_exit(&sc->lock);
1145
1146 scsipi_done(xs);
1147
1148 mutex_enter(&sc->lock);
1149 }
1150
1151 static void
1152 pvscsi_process_cmp_ring(struct pvscsi_softc *sc)
1153 {
1154 struct pvscsi_dma *ring_dma;
1155 struct pvscsi_ring_cmp_desc *ring;
1156 struct pvscsi_dma *s_dma;
1157 struct pvscsi_rings_state *s;
1158 struct pvscsi_ring_cmp_desc *e;
1159 uint32_t mask;
1160
1161 KASSERT(mutex_owned(&sc->lock));
1162
1163 s_dma = &sc->rings_state_dma;
1164 s = sc->rings_state;
1165 ring_dma = &sc->cmp_ring_dma;
1166 ring = sc->cmp_ring;
1167 mask = MASK(s->cmp_num_entries_log2);
1168
1169 for (;;) {
1170 PVSCSI_DMA_SYNC_STATE(sc, s_dma, s, cmp_prod_idx,
1171 BUS_DMASYNC_POSTREAD);
1172 size_t crpidx = s->cmp_prod_idx;
1173 PVSCSI_DMA_SYNC_STATE(sc, s_dma, s, cmp_prod_idx,
1174 BUS_DMASYNC_PREREAD);
1175
1176 if (s->cmp_cons_idx == crpidx)
1177 break;
1178
1179 size_t crcidx = s->cmp_cons_idx & mask;
1180
1181 PVSCSI_DMA_SYNC_RING(sc, ring_dma, ring, crcidx,
1182 BUS_DMASYNC_POSTREAD);
1183
1184 e = ring + crcidx;
1185
1186 pvscsi_process_completion(sc, e);
1187
1188 /*
1189 * ensure completion processing reads happen before write to
1190 * (increment of) cmp_cons_idx
1191 */
1192 PVSCSI_DMA_SYNC_RING(sc, ring_dma, ring, crcidx,
1193 BUS_DMASYNC_PREREAD);
1194
1195 /*
1196 * XXX Not actually sure the `device' does DMA for
1197 * s->cmp_cons_idx at all -- qemu doesn't. If not, we
1198 * can skip these DMA syncs.
1199 */
1200 PVSCSI_DMA_SYNC_STATE(sc, s_dma, s, cmp_cons_idx,
1201 BUS_DMASYNC_POSTWRITE);
1202 s->cmp_cons_idx++;
1203 PVSCSI_DMA_SYNC_STATE(sc, s_dma, s, cmp_cons_idx,
1204 BUS_DMASYNC_PREWRITE);
1205 }
1206 }
1207
1208 static void
1209 pvscsi_process_msg(struct pvscsi_softc *sc, struct pvscsi_ring_msg_desc *e)
1210 {
1211 struct pvscsi_ring_msg_dev_status_changed *desc;
1212
1213 switch (e->type) {
1214 case PVSCSI_MSG_DEV_ADDED:
1215 case PVSCSI_MSG_DEV_REMOVED: {
1216 desc = (struct pvscsi_ring_msg_dev_status_changed *)e;
1217 struct scsibus_softc *ssc = device_private(sc->sc_scsibus_dv);
1218
1219 aprint_normal_dev(sc->dev, "MSG: device %s at scsi%u:%u:%u\n",
1220 desc->type == PVSCSI_MSG_DEV_ADDED ? "addition" : "removal",
1221 desc->bus, desc->target, desc->lun[1]);
1222
1223 if (desc->type == PVSCSI_MSG_DEV_ADDED) {
1224 if (scsi_probe_bus(ssc,
1225 desc->target, desc->lun[1]) != 0) {
1226 aprint_normal_dev(sc->dev,
1227 "Error creating path for dev change.\n");
1228 break;
1229 }
1230 } else {
1231 if (scsipi_target_detach(ssc->sc_channel,
1232 desc->target, desc->lun[1],
1233 DETACH_FORCE) != 0) {
1234 aprint_normal_dev(sc->dev,
1235 "Error detaching target %d lun %d\n",
1236 desc->target, desc->lun[1]);
1237 };
1238
1239 }
1240 } break;
1241 default:
1242 aprint_normal_dev(sc->dev, "Unknown msg type 0x%x\n", e->type);
1243 };
1244 }
1245
1246 static void
1247 pvscsi_process_msg_ring(struct pvscsi_softc *sc)
1248 {
1249 struct pvscsi_dma *ring_dma;
1250 struct pvscsi_ring_msg_desc *ring;
1251 struct pvscsi_dma *s_dma;
1252 struct pvscsi_rings_state *s;
1253 struct pvscsi_ring_msg_desc *e;
1254 uint32_t mask;
1255
1256 KASSERT(mutex_owned(&sc->lock));
1257
1258 s_dma = &sc->rings_state_dma;
1259 s = sc->rings_state;
1260 ring_dma = &sc->msg_ring_dma;
1261 ring = sc->msg_ring;
1262 mask = MASK(s->msg_num_entries_log2);
1263
1264 for (;;) {
1265 PVSCSI_DMA_SYNC_STATE(sc, s_dma, s, msg_prod_idx,
1266 BUS_DMASYNC_POSTREAD);
1267 size_t mpidx = s->msg_prod_idx; // dma read (device -> cpu)
1268 PVSCSI_DMA_SYNC_STATE(sc, s_dma, s, msg_prod_idx,
1269 BUS_DMASYNC_PREREAD);
1270
1271 if (s->msg_cons_idx == mpidx)
1272 break;
1273
1274 size_t mcidx = s->msg_cons_idx & mask;
1275
1276 PVSCSI_DMA_SYNC_RING(sc, ring_dma, ring, mcidx,
1277 BUS_DMASYNC_POSTREAD);
1278
1279 e = ring + mcidx;
1280
1281 pvscsi_process_msg(sc, e);
1282
1283 /*
1284 * ensure message processing reads happen before write to
1285 * (increment of) msg_cons_idx
1286 */
1287 PVSCSI_DMA_SYNC_RING(sc, ring_dma, ring, mcidx,
1288 BUS_DMASYNC_PREREAD);
1289
1290 PVSCSI_DMA_SYNC_STATE(sc, s_dma, s, msg_cons_idx,
1291 BUS_DMASYNC_POSTWRITE);
1292 s->msg_cons_idx++;
1293 PVSCSI_DMA_SYNC_STATE(sc, s_dma, s, msg_cons_idx,
1294 BUS_DMASYNC_PREWRITE);
1295 }
1296 }
1297
1298 static void
1299 pvscsi_intr_locked(struct pvscsi_softc *sc)
1300 {
1301 uint32_t val;
1302
1303 KASSERT(mutex_owned(&sc->lock));
1304
1305 val = pvscsi_read_intr_status(sc);
1306
1307 if ((val & PVSCSI_INTR_ALL_SUPPORTED) != 0) {
1308 pvscsi_write_intr_status(sc, val & PVSCSI_INTR_ALL_SUPPORTED);
1309 pvscsi_process_cmp_ring(sc);
1310 if (sc->use_msg) {
1311 pvscsi_process_msg_ring(sc);
1312 }
1313 }
1314 }
1315
1316 static int
1317 pvscsi_intr(void *xsc)
1318 {
1319 struct pvscsi_softc *sc;
1320
1321 sc = xsc;
1322
1323 mutex_enter(&sc->lock);
1324 pvscsi_intr_locked(xsc);
1325 mutex_exit(&sc->lock);
1326
1327 return 1;
1328 }
1329
1330 static void
1331 pvscsi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t
1332 request, void *arg)
1333 {
1334 struct pvscsi_softc *sc = device_private(chan->chan_adapter->adapt_dev);
1335
1336 if (request == ADAPTER_REQ_SET_XFER_MODE) {
1337 struct scsipi_xfer_mode *xm = arg;
1338
1339 xm->xm_mode = PERIPH_CAP_TQING;
1340 xm->xm_period = 0;
1341 xm->xm_offset = 0;
1342 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
1343 return;
1344 } else if (request != ADAPTER_REQ_RUN_XFER) {
1345 DEBUG_PRINTF(1, sc->dev, "unhandled %d\n", request);
1346 return;
1347 }
1348
1349 /* request is ADAPTER_REQ_RUN_XFER */
1350 struct scsipi_xfer *xs = arg;
1351 struct scsipi_periph *periph = xs->xs_periph;
1352 #ifdef SCSIPI_DEBUG
1353 periph->periph_dbflags |= SCSIPI_DEBUG_FLAGS;
1354 #endif
1355
1356 uint32_t req_num_entries_log2;
1357 struct pvscsi_dma *ring_dma;
1358 struct pvscsi_ring_req_desc *ring;
1359 struct pvscsi_ring_req_desc *e;
1360 struct pvscsi_dma *s_dma;
1361 struct pvscsi_rings_state *s;
1362 struct pvscsi_hcb *hcb;
1363
1364 if (xs->cmdlen < 0 || xs->cmdlen > sizeof(e->cdb)) {
1365 DEBUG_PRINTF(1, sc->dev, "bad cmdlen %zu > %zu\n",
1366 (size_t)xs->cmdlen, sizeof(e->cdb));
1367 /* not a temporary condition */
1368 xs->error = XS_DRIVER_STUFFUP;
1369 scsipi_done(xs);
1370 return;
1371 }
1372
1373 ring_dma = &sc->req_ring_dma;
1374 ring = sc->req_ring;
1375 s_dma = &sc->rings_state_dma;
1376 s = sc->rings_state;
1377
1378 hcb = NULL;
1379 req_num_entries_log2 = s->req_num_entries_log2;
1380
1381 /* Protect against multiple senders */
1382 mutex_enter(&sc->lock);
1383
1384 if (s->req_prod_idx - s->cmp_cons_idx >=
1385 (1 << req_num_entries_log2)) {
1386 aprint_normal_dev(sc->dev,
1387 "Not enough room on completion ring.\n");
1388 xs->error = XS_RESOURCE_SHORTAGE;
1389 goto finish_xs;
1390 }
1391
1392 if (xs->cmdlen > sizeof(e->cdb)) {
1393 DEBUG_PRINTF(1, sc->dev, "cdb length %u too large\n",
1394 xs->cmdlen);
1395 xs->error = XS_DRIVER_STUFFUP;
1396 goto finish_xs;
1397 }
1398
1399 hcb = pvscsi_hcb_get(sc);
1400 if (hcb == NULL) {
1401 aprint_normal_dev(sc->dev, "No free hcbs.\n");
1402 xs->error = XS_RESOURCE_SHORTAGE;
1403 goto finish_xs;
1404 }
1405
1406 hcb->xs = xs;
1407
1408 const size_t rridx = s->req_prod_idx & MASK(req_num_entries_log2);
1409 PVSCSI_DMA_SYNC_RING(sc, ring_dma, ring, rridx, BUS_DMASYNC_POSTWRITE);
1410 e = ring + rridx;
1411
1412 memset(e, 0, sizeof(*e));
1413 e->bus = 0;
1414 e->target = periph->periph_target;
1415 e->lun[1] = periph->periph_lun;
1416 e->data_addr = 0;
1417 e->data_len = xs->datalen;
1418 e->vcpu_hint = cpu_index(curcpu());
1419 e->flags = 0;
1420
1421 e->cdb_len = xs->cmdlen;
1422 memcpy(e->cdb, xs->cmd, xs->cmdlen);
1423
1424 e->sense_addr = 0;
1425 e->sense_len = sizeof(xs->sense);
1426 if (e->sense_len > 0) {
1427 e->sense_addr = hcb->sense_buffer_paddr;
1428 }
1429 //e->tag = xs->xs_tag_type;
1430 e->tag = MSG_SIMPLE_Q_TAG;
1431
1432 switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1433 case XS_CTL_DATA_IN:
1434 e->flags |= PVSCSI_FLAG_CMD_DIR_TOHOST;
1435 break;
1436 case XS_CTL_DATA_OUT:
1437 e->flags |= PVSCSI_FLAG_CMD_DIR_TODEVICE;
1438 break;
1439 default:
1440 e->flags |= PVSCSI_FLAG_CMD_DIR_NONE;
1441 break;
1442 }
1443
1444 e->context = pvscsi_hcb_to_context(sc, hcb);
1445 hcb->e = e;
1446
1447 DEBUG_PRINTF(3, sc->dev,
1448 " queuing command %02x context %llx\n", e->cdb[0],
1449 (unsigned long long)e->context);
1450
1451 int flags;
1452 flags = (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE;
1453 flags |= (xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK;
1454
1455 int error = bus_dmamap_load(sc->sc_dmat, hcb->dma_map,
1456 xs->data, xs->datalen, NULL, flags);
1457
1458 if (error) {
1459 if (error == ENOMEM || error == EAGAIN) {
1460 xs->error = XS_RESOURCE_SHORTAGE;
1461 } else {
1462 xs->error = XS_DRIVER_STUFFUP;
1463 }
1464 DEBUG_PRINTF(1, sc->dev,
1465 "xs: %p load error %d data %p len %d",
1466 xs, error, xs->data, xs->datalen);
1467 goto error_load;
1468 }
1469
1470 int op = (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
1471 BUS_DMASYNC_PREWRITE;
1472 int nseg = hcb->dma_map->dm_nsegs;
1473 bus_dma_segment_t *segs = hcb->dma_map->dm_segs;
1474 if (nseg != 0) {
1475 if (nseg > 1) {
1476 struct pvscsi_sg_element *sge;
1477
1478 KASSERTMSG(nseg <= PVSCSI_MAX_SG_ENTRIES_PER_SEGMENT,
1479 "too many sg segments");
1480
1481 sge = hcb->sg_list->sge;
1482 e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
1483
1484 for (size_t i = 0; i < nseg; ++i) {
1485 sge[i].addr = segs[i].ds_addr;
1486 sge[i].length = segs[i].ds_len;
1487 sge[i].flags = 0;
1488 }
1489
1490 e->data_addr = hcb->sg_list_paddr;
1491
1492 bus_dmamap_sync(sc->sc_dmat,
1493 sc->sg_list_dma.map, hcb->sg_list_offset,
1494 sizeof(*sge) * nseg, BUS_DMASYNC_PREWRITE);
1495 } else {
1496 e->data_addr = segs->ds_addr;
1497 }
1498
1499 bus_dmamap_sync(sc->sc_dmat, hcb->dma_map, 0,
1500 xs->datalen, op);
1501 } else {
1502 e->data_addr = 0;
1503 }
1504
1505 /*
1506 * Ensure request record writes happen before write to (increment of)
1507 * req_prod_idx.
1508 */
1509 PVSCSI_DMA_SYNC_RING(sc, ring_dma, ring, rridx, BUS_DMASYNC_PREWRITE);
1510
1511 uint8_t cdb0 = e->cdb[0];
1512
1513 /* handle timeout */
1514 if ((xs->xs_control & XS_CTL_POLL) == 0) {
1515 int timeout = mstohz(xs->timeout);
1516 /* start expire timer */
1517 if (timeout == 0)
1518 timeout = 1;
1519 callout_reset(&xs->xs_callout, timeout, pvscsi_timeout, hcb);
1520 }
1521
1522 PVSCSI_DMA_SYNC_STATE(sc, s_dma, s, req_prod_idx,
1523 BUS_DMASYNC_POSTWRITE);
1524 s->req_prod_idx++;
1525
1526 /*
1527 * Ensure req_prod_idx write (increment) happens before
1528 * IO is kicked (via a write).
1529 */
1530 PVSCSI_DMA_SYNC_STATE(sc, s_dma, s, req_prod_idx,
1531 BUS_DMASYNC_PREWRITE);
1532
1533 pvscsi_kick_io(sc, cdb0);
1534 mutex_exit(&sc->lock);
1535
1536 return;
1537
1538 error_load:
1539 pvscsi_hcb_put(sc, hcb);
1540
1541 finish_xs:
1542 mutex_exit(&sc->lock);
1543 scsipi_done(xs);
1544 }
1545
1546 static void
1547 pvscsi_free_interrupts(struct pvscsi_softc *sc)
1548 {
1549
1550 if (sc->sc_ih != NULL) {
1551 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
1552 sc->sc_ih = NULL;
1553 }
1554 if (sc->sc_pihp != NULL) {
1555 pci_intr_release(sc->sc_pc, sc->sc_pihp, 1);
1556 sc->sc_pihp = NULL;
1557 }
1558 }
1559
1560 static int
1561 pvscsi_setup_interrupts(struct pvscsi_softc *sc, const struct pci_attach_args *pa)
1562 {
1563 int use_msix;
1564 int use_msi;
1565 int counts[PCI_INTR_TYPE_SIZE];
1566
1567 for (size_t i = 0; i < PCI_INTR_TYPE_SIZE; i++) {
1568 counts[i] = 1;
1569 }
1570
1571 use_msix = pvscsi_get_tunable(sc, "use_msix", pvscsi_use_msix);
1572 use_msi = pvscsi_get_tunable(sc, "use_msi", pvscsi_use_msi);
1573
1574 if (!use_msix) {
1575 counts[PCI_INTR_TYPE_MSIX] = 0;
1576 }
1577 if (!use_msi) {
1578 counts[PCI_INTR_TYPE_MSI] = 0;
1579 }
1580
1581 /* Allocate and establish the interrupt. */
1582 if (pci_intr_alloc(pa, &sc->sc_pihp, counts, PCI_INTR_TYPE_MSIX)) {
1583 aprint_error_dev(sc->dev, "can't allocate handler\n");
1584 goto fail;
1585 }
1586
1587 char intrbuf[PCI_INTRSTR_LEN];
1588 const pci_chipset_tag_t pc = pa->pa_pc;
1589 char const *intrstr = pci_intr_string(pc, sc->sc_pihp[0], intrbuf,
1590 sizeof(intrbuf));
1591
1592 sc->sc_ih = pci_intr_establish_xname(pc, sc->sc_pihp[0], IPL_BIO,
1593 pvscsi_intr, sc, device_xname(sc->dev));
1594 if (sc->sc_ih == NULL) {
1595 pci_intr_release(pc, sc->sc_pihp, 1);
1596 sc->sc_pihp = NULL;
1597 aprint_error_dev(sc->dev, "couldn't establish interrupt");
1598 if (intrstr != NULL)
1599 aprint_error(" at %s", intrstr);
1600 aprint_error("\n");
1601 goto fail;
1602 }
1603 pci_intr_setattr(pc, sc->sc_pihp, PCI_INTR_MPSAFE, true);
1604
1605 aprint_normal_dev(sc->dev, "interrupting at %s\n", intrstr);
1606
1607 return (0);
1608
1609 fail:
1610 if (sc->sc_ih != NULL) {
1611 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
1612 sc->sc_ih = NULL;
1613 }
1614 if (sc->sc_pihp != NULL) {
1615 pci_intr_release(sc->sc_pc, sc->sc_pihp, 1);
1616 sc->sc_pihp = NULL;
1617 }
1618 if (sc->sc_mems) {
1619 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1620 sc->sc_mems = 0;
1621 }
1622
1623 return 1;
1624 }
1625
1626 static void
1627 pvscsi_free_all(struct pvscsi_softc *sc)
1628 {
1629
1630 pvscsi_dma_free_per_hcb(sc, sc->hcb_cnt);
1631
1632 if (sc->hcbs) {
1633 kmem_free(sc->hcbs, sc->hcb_cnt * sizeof(*sc->hcbs));
1634 }
1635
1636 pvscsi_free_rings(sc);
1637
1638 pvscsi_free_interrupts(sc);
1639
1640 if (sc->sc_mems) {
1641 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1642 sc->sc_mems = 0;
1643 }
1644 }
1645
1646 static inline void
1647 pci_enable_busmaster(device_t dev, const pci_chipset_tag_t pc,
1648 const pcitag_t tag)
1649 {
1650 pcireg_t pci_cmd_word;
1651
1652 pci_cmd_word = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
1653 if (!(pci_cmd_word & PCI_COMMAND_MASTER_ENABLE)) {
1654 pci_cmd_word |= PCI_COMMAND_MASTER_ENABLE;
1655 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, pci_cmd_word);
1656 }
1657 }
1658
1659 static void
1660 pvscsi_attach(device_t parent, device_t dev, void *aux)
1661 {
1662 const struct pci_attach_args *pa = aux;
1663 struct pvscsi_softc *sc;
1664 int rid;
1665 int error;
1666 int max_queue_depth;
1667 int adapter_queue_size;
1668
1669 sc = device_private(dev);
1670 sc->dev = dev;
1671
1672 struct scsipi_adapter *adapt = &sc->sc_adapter;
1673 struct scsipi_channel *chan = &sc->sc_channel;
1674
1675 mutex_init(&sc->lock, MUTEX_DEFAULT, IPL_BIO);
1676
1677 sc->sc_pc = pa->pa_pc;
1678 pci_enable_busmaster(dev, pa->pa_pc, pa->pa_tag);
1679
1680 pci_aprint_devinfo_fancy(pa, "virtual disk controller",
1681 VMWARE_PVSCSI_DEVSTR, true);
1682
1683 /*
1684 * Map the device. All devices support memory-mapped acccess.
1685 */
1686 bool memh_valid;
1687 bus_space_tag_t memt;
1688 bus_space_handle_t memh;
1689 bus_size_t mems;
1690 pcireg_t regt;
1691
1692 for (rid = PCI_MAPREG_START; rid < PCI_MAPREG_END; rid += sizeof(regt)) {
1693 regt = pci_mapreg_type(pa->pa_pc, pa->pa_tag, rid);
1694 if (PCI_MAPREG_TYPE(regt) == PCI_MAPREG_TYPE_MEM)
1695 break;
1696 }
1697
1698 if (rid >= PCI_MAPREG_END) {
1699 aprint_error_dev(dev,
1700 "unable to locate device registers\n");
1701 }
1702
1703 memh_valid = (pci_mapreg_map(pa, rid, regt, 0, &memt, &memh,
1704 NULL, &mems) == 0);
1705 if (!memh_valid) {
1706 aprint_error_dev(dev,
1707 "unable to map device registers\n");
1708 return;
1709 }
1710 sc->sc_memt = memt;
1711 sc->sc_memh = memh;
1712 sc->sc_mems = mems;
1713
1714 if (pci_dma64_available(pa)) {
1715 sc->sc_dmat = pa->pa_dmat64;
1716 aprint_verbose_dev(sc->dev, "64-bit DMA\n");
1717 } else {
1718 aprint_verbose_dev(sc->dev, "32-bit DMA\n");
1719 sc->sc_dmat = pa->pa_dmat;
1720 }
1721
1722 error = pvscsi_setup_interrupts(sc, pa);
1723 if (error) {
1724 aprint_normal_dev(dev, "Interrupt setup failed\n");
1725 pvscsi_free_all(sc);
1726 return;
1727 }
1728
1729 sc->max_targets = pvscsi_get_max_targets(sc);
1730
1731 sc->use_msg = pvscsi_get_tunable(sc, "use_msg", pvscsi_use_msg) &&
1732 pvscsi_hw_supports_msg(sc);
1733 sc->msg_ring_num_pages = sc->use_msg ? 1 : 0;
1734
1735 sc->req_ring_num_pages = pvscsi_get_tunable(sc, "request_ring_pages",
1736 pvscsi_request_ring_pages);
1737 if (sc->req_ring_num_pages <= 0) {
1738 if (sc->max_targets <= 16) {
1739 sc->req_ring_num_pages =
1740 PVSCSI_DEFAULT_NUM_PAGES_REQ_RING;
1741 } else {
1742 sc->req_ring_num_pages = PVSCSI_MAX_NUM_PAGES_REQ_RING;
1743 }
1744 } else if (sc->req_ring_num_pages > PVSCSI_MAX_NUM_PAGES_REQ_RING) {
1745 sc->req_ring_num_pages = PVSCSI_MAX_NUM_PAGES_REQ_RING;
1746 }
1747 sc->cmp_ring_num_pages = sc->req_ring_num_pages;
1748
1749 max_queue_depth = pvscsi_get_tunable(sc, "max_queue_depth",
1750 pvscsi_max_queue_depth);
1751
1752 adapter_queue_size = (sc->req_ring_num_pages * PAGE_SIZE) /
1753 sizeof(struct pvscsi_ring_req_desc);
1754 if (max_queue_depth > 0) {
1755 adapter_queue_size = MIN(adapter_queue_size, max_queue_depth);
1756 }
1757 adapter_queue_size = MIN(adapter_queue_size,
1758 PVSCSI_MAX_REQ_QUEUE_DEPTH);
1759
1760 aprint_normal_dev(sc->dev, "Use Msg: %d\n", sc->use_msg);
1761 aprint_normal_dev(sc->dev, "Max targets: %d\n", sc->max_targets);
1762 aprint_normal_dev(sc->dev, "REQ num pages: %d\n", sc->req_ring_num_pages);
1763 aprint_normal_dev(sc->dev, "CMP num pages: %d\n", sc->cmp_ring_num_pages);
1764 aprint_normal_dev(sc->dev, "MSG num pages: %d\n", sc->msg_ring_num_pages);
1765 aprint_normal_dev(sc->dev, "Queue size: %d\n", adapter_queue_size);
1766
1767 if (pvscsi_allocate_rings(sc)) {
1768 aprint_normal_dev(dev, "ring allocation failed\n");
1769 pvscsi_free_all(sc);
1770 return;
1771 }
1772
1773 sc->hcb_cnt = adapter_queue_size;
1774 sc->hcbs = kmem_zalloc(sc->hcb_cnt * sizeof(*sc->hcbs), KM_SLEEP);
1775
1776 if (pvscsi_dma_alloc_per_hcb(sc)) {
1777 aprint_normal_dev(dev, "error allocating per hcb dma memory\n");
1778 pvscsi_free_all(sc);
1779 return;
1780 }
1781
1782 pvscsi_adapter_reset(sc);
1783
1784 /*
1785 * Fill in the scsipi_adapter.
1786 */
1787 memset(adapt, 0, sizeof(*adapt));
1788 adapt->adapt_dev = sc->dev;
1789 adapt->adapt_nchannels = 1;
1790 adapt->adapt_openings = MIN(adapter_queue_size, PVSCSI_CMD_PER_LUN);
1791 adapt->adapt_max_periph = adapt->adapt_openings;
1792 adapt->adapt_request = pvscsi_scsipi_request;
1793 adapt->adapt_minphys = minphys;
1794
1795 /*
1796 * Fill in the scsipi_channel.
1797 */
1798 memset(chan, 0, sizeof(*chan));
1799 chan->chan_adapter = adapt;
1800 chan->chan_bustype = &scsi_bustype;
1801 chan->chan_channel = 0;
1802 chan->chan_ntargets = MIN(PVSCSI_MAX_TARGET, 16); /* cap reasonably */
1803 chan->chan_nluns = MIN(PVSCSI_MAX_LUN, 1024); /* cap reasonably */
1804 chan->chan_id = PVSCSI_MAX_TARGET;
1805 chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
1806
1807 pvscsi_setup_rings(sc);
1808 if (sc->use_msg) {
1809 pvscsi_setup_msg_ring(sc);
1810 }
1811
1812 sc->use_req_call_threshold = pvscsi_setup_req_call(sc, 1);
1813
1814 pvscsi_intr_enable(sc);
1815
1816 sc->sc_scsibus_dv = config_found(sc->dev, &sc->sc_channel, scsiprint,
1817 CFARGS_NONE);
1818
1819 return;
1820 }
1821
1822 static int
1823 pvscsi_detach(device_t dev, int flags)
1824 {
1825 struct pvscsi_softc *sc;
1826
1827 sc = device_private(dev);
1828
1829 pvscsi_intr_disable(sc);
1830 pvscsi_adapter_reset(sc);
1831
1832 pvscsi_free_all(sc);
1833
1834 mutex_destroy(&sc->lock);
1835
1836 return (0);
1837 }
1838