vdsk.c revision 1.8 1 /* $NetBSD: vdsk.c,v 1.8 2021/08/07 16:19:05 thorpej Exp $ */
2 /* $OpenBSD: vdsk.c,v 1.46 2015/01/25 21:42:13 kettenis Exp $ */
3 /*
4 * Copyright (c) 2009, 2011 Mark Kettenis
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 #include <sys/kmem.h>
20 #include <sys/param.h>
21 #include <sys/buf.h>
22 #include <sys/device.h>
23 #include <sys/systm.h>
24
25 #include <machine/autoconf.h>
26 #include <machine/hypervisor.h>
27
28 #include <uvm/uvm_extern.h>
29
30 #include <dev/scsipi/scsi_all.h>
31 #include <dev/scsipi/scsipi_disk.h>
32 #include <dev/scsipi/scsipi_cd.h>
33 #include <dev/scsipi/scsiconf.h>
34
35 #include <dev/scsipi/scsi_disk.h>
36 #include <dev/scsipi/scsipi_all.h>
37 #include <dev/scsipi/scsiconf.h>
38 #include <dev/scsipi/scsi_message.h>
39
40 #include <sparc64/dev/cbusvar.h>
41 #include <sparc64/dev/ldcvar.h>
42 #include <sparc64/dev/viovar.h>
43
44 #ifdef VDSK_DEBUG
45 #define DPRINTF(x) printf x
46 #else
47 #define DPRINTF(x)
48 #endif
49
50 #define VDSK_TX_ENTRIES 32
51 #define VDSK_RX_ENTRIES 32
52
53 struct vd_attr_info {
54 struct vio_msg_tag tag;
55 uint8_t xfer_mode;
56 uint8_t vd_type;
57 uint8_t vd_mtype;
58 uint8_t _reserved1;
59 uint32_t vdisk_block_size;
60 uint64_t operations;
61 uint64_t vdisk_size;
62 uint64_t max_xfer_sz;
63 uint64_t _reserved2[2];
64 };
65
66 #define VD_DISK_TYPE_SLICE 0x01
67 #define VD_DISK_TYPE_DISK 0x02
68
69 #define VD_MEDIA_TYPE_FIXED 0x01
70 #define VD_MEDIA_TYPE_CD 0x02
71 #define VD_MEDIA_TYPE_DVD 0x03
72
73 /* vDisk version 1.0. */
74 #define VD_OP_BREAD 0x01
75 #define VD_OP_BWRITE 0x02
76 #define VD_OP_FLUSH 0x03
77 #define VD_OP_GET_WCE 0x04
78 #define VD_OP_SET_WCE 0x05
79 #define VD_OP_GET_VTOC 0x06
80 #define VD_OP_SET_VTOC 0x07
81 #define VD_OP_GET_DISKGEOM 0x08
82 #define VD_OP_SET_DISKGEOM 0x09
83 #define VD_OP_GET_DEVID 0x0b
84 #define VD_OP_GET_EFI 0x0c
85 #define VD_OP_SET_EFI 0x0d
86
87 /* vDisk version 1.1 */
88 #define VD_OP_SCSICMD 0x0a
89 #define VD_OP_RESET 0x0e
90 #define VD_OP_GET_ACCESS 0x0f
91 #define VD_OP_SET_ACCESS 0x10
92 #define VD_OP_GET_CAPACITY 0x11
93
94 struct vd_desc {
95 struct vio_dring_hdr hdr;
96 uint64_t req_id;
97 uint8_t operation;
98 uint8_t slice;
99 uint16_t _reserved1;
100 uint32_t status;
101 uint64_t offset;
102 uint64_t size;
103 uint32_t ncookies;
104 uint32_t _reserved2;
105 struct ldc_cookie cookie[MAXPHYS / PAGE_SIZE];
106 };
107
108 #define VD_SLICE_NONE 0xff
109
110 struct vdsk_dring {
111 bus_dmamap_t vd_map;
112 bus_dma_segment_t vd_seg;
113 struct vd_desc *vd_desc;
114 int vd_nentries;
115 };
116
117 #if OPENBSD_BUSDMA
118 struct vdsk_dring *vdsk_dring_alloc(bus_dma_tag_t, int);
119 void vdsk_dring_free(bus_dma_tag_t, struct vdsk_dring *);
120 #else
121 struct vdsk_dring *vdsk_dring_alloc(int);
122 void vdsk_dring_free(struct vdsk_dring *);
123 #endif
124
125 /*
126 * We support vDisk 1.0 and 1.1.
127 */
128 #define VDSK_MAJOR 1
129 #define VDSK_MINOR 1
130
131 struct vdsk_soft_desc {
132 int vsd_map_idx[MAXPHYS / PAGE_SIZE];
133 struct scsipi_xfer *vsd_xs;
134 int vsd_ncookies;
135 };
136
137 struct vdsk_softc {
138 device_t sc_dv;
139
140 struct scsipi_adapter sc_adapter;
141 struct scsipi_channel sc_channel;
142
143 bus_space_tag_t sc_bustag;
144 bus_dma_tag_t sc_dmatag;
145
146 void *sc_tx_ih;
147 void *sc_rx_ih;
148
149 struct ldc_conn sc_lc;
150
151 uint16_t sc_vio_state;
152 #define VIO_SND_VER_INFO 0x0001
153 #define VIO_ACK_VER_INFO 0x0002
154 #define VIO_SND_ATTR_INFO 0x0004
155 #define VIO_ACK_ATTR_INFO 0x0008
156 #define VIO_SND_DRING_REG 0x0010
157 #define VIO_ACK_DRING_REG 0x0020
158 #define VIO_SND_RDX 0x0040
159 #define VIO_ACK_RDX 0x0080
160 #define VIO_ESTABLISHED 0x00ff
161
162 uint16_t sc_major;
163 uint16_t sc_minor;
164
165 uint32_t sc_local_sid;
166 uint64_t sc_dring_ident;
167 uint64_t sc_seq_no;
168
169 int sc_tx_cnt;
170 int sc_tx_prod;
171 int sc_tx_cons;
172
173 struct ldc_map *sc_lm;
174 struct vdsk_dring *sc_vd;
175 struct vdsk_soft_desc *sc_vsd;
176
177 uint32_t sc_vdisk_block_size;
178 uint64_t sc_vdisk_size;
179 uint8_t sc_vd_mtype;
180 };
181
182 int vdsk_match(device_t, cfdata_t, void *);
183 void vdsk_attach(device_t, device_t, void *);
184 void vdsk_scsipi_request(struct scsipi_channel *, scsipi_adapter_req_t,
185 void *);
186
187 CFATTACH_DECL_NEW(vdsk, sizeof(struct vdsk_softc),
188 vdsk_match, vdsk_attach, NULL, NULL);
189
190 int vdsk_tx_intr(void *);
191 int vdsk_rx_intr(void *);
192
193 void vdsk_rx_data(struct ldc_conn *, struct ldc_pkt *);
194 void vdsk_rx_vio_ctrl(struct vdsk_softc *, struct vio_msg *);
195 void vdsk_rx_vio_ver_info(struct vdsk_softc *, struct vio_msg_tag *);
196 void vdsk_rx_vio_attr_info(struct vdsk_softc *, struct vio_msg_tag *);
197 void vdsk_rx_vio_dring_reg(struct vdsk_softc *, struct vio_msg_tag *);
198 void vdsk_rx_vio_rdx(struct vdsk_softc *sc, struct vio_msg_tag *);
199 void vdsk_rx_vio_data(struct vdsk_softc *sc, struct vio_msg *);
200 void vdsk_rx_vio_dring_data(struct vdsk_softc *sc, struct vio_msg_tag *);
201
202 void vdsk_ldc_reset(struct ldc_conn *);
203 void vdsk_ldc_start(struct ldc_conn *);
204
205 void vdsk_sendmsg(struct vdsk_softc *, void *, size_t);
206 void vdsk_send_ver_info(struct vdsk_softc *, uint16_t, uint16_t);
207 void vdsk_send_attr_info(struct vdsk_softc *);
208 void vdsk_send_dring_reg(struct vdsk_softc *);
209 void vdsk_send_rdx(struct vdsk_softc *);
210
211 void *vdsk_io_get(void *);
212 void vdsk_io_put(void *, void *);
213
214 void vdsk_scsi_cmd(struct vdsk_softc *sc, struct scsipi_xfer *);
215 int vdsk_submit_cmd(struct vdsk_softc *sc, struct scsipi_xfer *);
216 void vdsk_complete_cmd(struct vdsk_softc *sc, struct scsipi_xfer *, int);
217 void vdsk_scsi_inq(struct vdsk_softc *sc, struct scsipi_xfer *);
218 void vdsk_scsi_inquiry(struct vdsk_softc *sc, struct scsipi_xfer *);
219 void vdsk_scsi_capacity(struct vdsk_softc *sc, struct scsipi_xfer *);
220 void vdsk_scsi_capacity16(struct vdsk_softc *sc, struct scsipi_xfer *);
221 void vdsk_scsi_done(struct scsipi_xfer *, int);
222
223 int
224 vdsk_match(device_t parent, cfdata_t match, void *aux)
225 {
226 struct cbus_attach_args *ca = aux;
227
228 if (strcmp(ca->ca_name, "disk") == 0)
229 return (1);
230
231 return (0);
232 }
233
234 void
235 vdsk_attach(device_t parent, device_t self, void *aux)
236 {
237 struct vdsk_softc *sc = device_private(self);
238 struct cbus_attach_args *ca = aux;
239 struct ldc_conn *lc;
240 int err, s;
241 int timeout;
242 vaddr_t va;
243 paddr_t pa;
244
245 sc->sc_bustag = ca->ca_bustag;
246 sc->sc_dmatag = ca->ca_dmatag;
247
248 printf(": ivec 0x%llx, 0x%llx",
249 (long long unsigned int)ca->ca_tx_ino,
250 (long long unsigned int)ca->ca_rx_ino);
251
252 /*
253 * Un-configure queues before registering interrupt handlers,
254 * such that we dont get any stale LDC packets or events.
255 */
256 hv_ldc_tx_qconf(ca->ca_id, 0, 0);
257 hv_ldc_rx_qconf(ca->ca_id, 0, 0);
258
259 sc->sc_tx_ih = bus_intr_establish(ca->ca_bustag, ca->ca_tx_ino,
260 IPL_BIO, vdsk_tx_intr, sc);
261 sc->sc_rx_ih = bus_intr_establish(ca->ca_bustag, ca->ca_rx_ino,
262 IPL_BIO, vdsk_rx_intr, sc);
263 if (sc->sc_tx_ih == NULL || sc->sc_rx_ih == NULL) {
264 printf(", can't establish interrupt\n");
265 return;
266 }
267
268 lc = &sc->sc_lc;
269 lc->lc_id = ca->ca_id;
270 lc->lc_sc = sc;
271 lc->lc_reset = vdsk_ldc_reset;
272 lc->lc_start = vdsk_ldc_start;
273 lc->lc_rx_data = vdsk_rx_data;
274
275 #if OPENBSD_BUSDMA
276 lc->lc_txq = ldc_queue_alloc(sc->sc_dmatag, VDSK_TX_ENTRIES);
277 #else
278 lc->lc_txq = ldc_queue_alloc(VDSK_TX_ENTRIES);
279 #endif
280 #if OPENBSD_BUSDMA
281 lc->lc_rxq = ldc_queue_alloc(sc->sc_dmatag, VDSK_RX_ENTRIES);
282 #else
283 lc->lc_rxq = ldc_queue_alloc(VDSK_RX_ENTRIES);
284 #endif
285 #if OPENBSD_BUSDMA
286 sc->sc_lm = ldc_map_alloc(sc->sc_dmatag, 2048);
287 #else
288 sc->sc_lm = ldc_map_alloc(2048);
289 #endif
290 #if OPENBSD_BUSDMA
291 err = hv_ldc_set_map_table(lc->lc_id,
292 sc->sc_lm->lm_map->dm_segs[0].ds_addr, sc->sc_lm->lm_nentries);
293 #else
294 va = (vaddr_t)sc->sc_lm->lm_slot;
295 pa = 0;
296 if (pmap_extract(pmap_kernel(), va, &pa) == FALSE)
297 panic("pmap_extract failed %lx\n", va);
298 err = hv_ldc_set_map_table(lc->lc_id, pa, 2048);
299 #endif
300 if (err != H_EOK) {
301 printf("hv_ldc_set_map_table %d\n", err);
302 goto free_map;
303 }
304 #if OPENBSD_BUSDMA
305 sc->sc_vd = vdsk_dring_alloc(sc->sc_dmatag, 32);
306 #else
307 sc->sc_vd = vdsk_dring_alloc(32);
308 #endif
309 sc->sc_vsd = kmem_zalloc(32 * sizeof(*sc->sc_vsd), KM_SLEEP);
310
311 #if OPENBSD_BUSDMA
312 sc->sc_lm->lm_slot[0].entry = sc->sc_vd->vd_map->dm_segs[0].ds_addr;
313 #else
314 va = (vaddr_t)sc->sc_vd->vd_desc;
315 pa = 0;
316 if (pmap_extract(pmap_kernel(), va, &pa) == FALSE)
317 panic("pmap_extract failed %lx\n", va);
318
319 sc->sc_lm->lm_slot[0].entry = pa;
320 #endif
321 sc->sc_lm->lm_slot[0].entry &= LDC_MTE_RA_MASK;
322 sc->sc_lm->lm_slot[0].entry |= LDC_MTE_CPR | LDC_MTE_CPW;
323 sc->sc_lm->lm_slot[0].entry |= LDC_MTE_R | LDC_MTE_W;
324 sc->sc_lm->lm_next = 1;
325 sc->sc_lm->lm_count = 1;
326 va = lc->lc_txq->lq_va;
327 pa = 0;
328 if (pmap_extract(pmap_kernel(), va, &pa) == FALSE)
329 panic("pmap_extract failed %lx\n", va);
330 #if OPENBSD_BUSDMA
331 err = hv_ldc_tx_qconf(lc->lc_id,
332 lc->lc_txq->lq_map->dm_segs[0].ds_addr, lc->lc_txq->lq_nentries);
333 #else
334 err = hv_ldc_tx_qconf(lc->lc_id, pa, lc->lc_txq->lq_nentries);
335 #endif
336 if (err != H_EOK)
337 printf("hv_ldc_tx_qconf %d\n", err);
338 va = (vaddr_t)lc->lc_rxq->lq_va;
339 pa = 0;
340 if (pmap_extract(pmap_kernel(), va, &pa) == FALSE)
341 panic("pmap_extract failed %lx\n", va);
342 #if OPENBSD_BUSDMA
343 err = hv_ldc_rx_qconf(lc->lc_id,
344 lc->lc_rxq->lq_map->dm_segs[0].ds_addr, lc->lc_rxq->lq_nentries);
345 #else
346 err = hv_ldc_rx_qconf(lc->lc_id, pa, lc->lc_rxq->lq_nentries);
347 #endif
348 if (err != H_EOK)
349 printf("hv_ldc_rx_qconf %d\n", err);
350
351 cbus_intr_setenabled(sc->sc_bustag, ca->ca_tx_ino, INTR_ENABLED);
352 cbus_intr_setenabled(sc->sc_bustag, ca->ca_rx_ino, INTR_ENABLED);
353
354 ldc_send_vers(lc);
355
356 printf("\n");
357
358 /*
359 * Interrupts aren't enabled during autoconf, so poll for VIO
360 * peer-to-peer hanshake completion.
361 */
362 s = splbio();
363 timeout = 10 * 1000;
364 do {
365 if (vdsk_rx_intr(sc) && sc->sc_vio_state == VIO_ESTABLISHED)
366 break;
367
368 delay(1000);
369 } while(--timeout > 0);
370 splx(s);
371
372 if (sc->sc_vio_state != VIO_ESTABLISHED) {
373 printf("vio not establshed: %d\n", sc->sc_vio_state);
374 return;
375 }
376
377 sc->sc_dv = self;
378
379 sc->sc_adapter.adapt_dev = sc->sc_dv;
380 sc->sc_adapter.adapt_nchannels = 1;
381 sc->sc_adapter.adapt_openings = sc->sc_vd->vd_nentries - 1;
382 sc->sc_adapter.adapt_max_periph = sc->sc_vd->vd_nentries - 1;
383
384 sc->sc_adapter.adapt_minphys = minphys;
385 sc->sc_adapter.adapt_request = vdsk_scsipi_request;
386
387 sc->sc_channel.chan_adapter = &sc->sc_adapter;
388 sc->sc_channel.chan_bustype = &scsi_bustype;
389 sc->sc_channel.chan_channel = 0;
390 sc->sc_channel.chan_ntargets = 2; /* XXX why not 1? */
391 sc->sc_channel.chan_nluns = 1; /* XXX slices should be presented as luns? */
392 sc->sc_channel.chan_id = 0;
393 sc->sc_channel.chan_flags = SCSIPI_CHAN_NOSETTLE;
394
395 config_found(self, &sc->sc_channel, scsiprint, CFARGS_NONE);
396
397 return;
398
399 free_map:
400 hv_ldc_set_map_table(lc->lc_id, 0, 0);
401 #if OPENBSD_BUSDMA
402 ldc_map_free(sc->sc_dmatag, sc->sc_lm);
403 #else
404 ldc_map_free(sc->sc_lm);
405 #endif
406 }
407
408 void
409 vdsk_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
410 void *arg)
411 {
412
413 struct vdsk_softc *sc;
414 struct scsipi_xfer *xs;
415
416 sc = device_private(chan->chan_adapter->adapt_dev);
417
418 xs = arg;
419
420 switch (req) {
421 case ADAPTER_REQ_RUN_XFER:
422 vdsk_scsi_cmd(sc, xs);
423 break;
424 case ADAPTER_REQ_GROW_RESOURCES:
425 case ADAPTER_REQ_SET_XFER_MODE:
426 /* Ignored */
427 break;
428 default:
429 panic("req unhandled: %x", req);
430 }
431
432 }
433
434 int
435 vdsk_tx_intr(void *arg)
436 {
437 panic("%s: not verified yet", __FUNCTION__);
438
439 struct vdsk_softc *sc = arg;
440 struct ldc_conn *lc = &sc->sc_lc;
441 uint64_t tx_head, tx_tail, tx_state;
442
443 hv_ldc_tx_get_state(lc->lc_id, &tx_head, &tx_tail, &tx_state);
444 if (tx_state != lc->lc_tx_state) {
445 switch (tx_state) {
446 case LDC_CHANNEL_DOWN:
447 DPRINTF(("Tx link down\n"));
448 break;
449 case LDC_CHANNEL_UP:
450 DPRINTF(("Tx link up\n"));
451 break;
452 case LDC_CHANNEL_RESET:
453 DPRINTF(("Tx link reset\n"));
454 break;
455 }
456 lc->lc_tx_state = tx_state;
457 }
458
459 return (1);
460 }
461
462 int
463 vdsk_rx_intr(void *arg)
464 {
465 struct vdsk_softc *sc = arg;
466 struct ldc_conn *lc = &sc->sc_lc;
467 uint64_t rx_head, rx_tail, rx_state;
468 struct ldc_pkt *lp;
469 int err;
470
471 err = hv_ldc_rx_get_state(lc->lc_id, &rx_head, &rx_tail, &rx_state);
472 if (err == H_EINVAL) {
473 printf("hv_ldc_rx_get_state H_EINVAL\n");
474 return (0);
475 }
476 if (err != H_EOK) {
477 printf("hv_ldc_rx_get_state %d\n", err);
478 return (0);
479 }
480
481 if (rx_state != lc->lc_rx_state) {
482 sc->sc_vio_state = 0;
483 lc->lc_tx_seqid = 0;
484 lc->lc_state = 0;
485 switch (rx_state) {
486 case LDC_CHANNEL_DOWN:
487 DPRINTF(("Rx link down\n"));
488 break;
489 case LDC_CHANNEL_UP:
490 DPRINTF(("Rx link up\n"));
491 ldc_send_vers(lc);
492 break;
493 case LDC_CHANNEL_RESET:
494 DPRINTF(("Rx link reset\n"));
495 ldc_send_vers(lc);
496 break;
497 }
498 lc->lc_rx_state = rx_state;
499 hv_ldc_rx_set_qhead(lc->lc_id, rx_tail);
500 return (1);
501 }
502
503 if (rx_head == rx_tail)
504 return (0);
505
506 lp = (struct ldc_pkt *)(uintptr_t)(lc->lc_rxq->lq_va + rx_head);
507 switch (lp->type) {
508 case LDC_CTRL:
509 ldc_rx_ctrl(lc, lp);
510 break;
511
512 case LDC_DATA:
513 ldc_rx_data(lc, lp);
514 break;
515
516 default:
517 DPRINTF(("%0x02/%0x02/%0x02\n", lp->type, lp->stype,
518 lp->ctrl));
519 ldc_reset(lc);
520 break;
521 }
522
523 if (lc->lc_state == 0)
524 return (1);
525
526 rx_head += sizeof(*lp);
527 rx_head &= ((lc->lc_rxq->lq_nentries * sizeof(*lp)) - 1);
528 err = hv_ldc_rx_set_qhead(lc->lc_id, rx_head);
529 if (err != H_EOK)
530 printf("%s: hv_ldc_rx_set_qhead %d\n", __func__, err);
531
532 return (1);
533 }
534
535 void
536 vdsk_rx_data(struct ldc_conn *lc, struct ldc_pkt *lp)
537 {
538 struct vio_msg *vm = (struct vio_msg *)lp;
539
540 switch (vm->type) {
541 case VIO_TYPE_CTRL:
542 if ((lp->env & LDC_FRAG_START) == 0 &&
543 (lp->env & LDC_FRAG_STOP) == 0)
544 return;
545 vdsk_rx_vio_ctrl(lc->lc_sc, vm);
546 break;
547
548 case VIO_TYPE_DATA:
549 if((lp->env & LDC_FRAG_START) == 0)
550 return;
551 vdsk_rx_vio_data(lc->lc_sc, vm);
552 break;
553
554 default:
555 DPRINTF(("Unhandled packet type 0x%02x\n", vm->type));
556 ldc_reset(lc);
557 break;
558 }
559 }
560
561 void
562 vdsk_rx_vio_ctrl(struct vdsk_softc *sc, struct vio_msg *vm)
563 {
564 struct vio_msg_tag *tag = (struct vio_msg_tag *)&vm->type;
565
566 switch (tag->stype_env) {
567 case VIO_VER_INFO:
568 vdsk_rx_vio_ver_info(sc, tag);
569 break;
570 case VIO_ATTR_INFO:
571 vdsk_rx_vio_attr_info(sc, tag);
572 break;
573 case VIO_DRING_REG:
574 vdsk_rx_vio_dring_reg(sc, tag);
575 break;
576 case VIO_RDX:
577 vdsk_rx_vio_rdx(sc, tag);
578 break;
579 default:
580 DPRINTF(("CTRL/0x%02x/0x%04x\n", tag->stype, tag->stype_env));
581 break;
582 }
583 }
584
585 void
586 vdsk_rx_vio_ver_info(struct vdsk_softc *sc, struct vio_msg_tag *tag)
587 {
588 struct vio_ver_info *vi = (struct vio_ver_info *)tag;
589
590 switch (vi->tag.stype) {
591 case VIO_SUBTYPE_INFO:
592 DPRINTF(("CTRL/INFO/VER_INFO\n"));
593 break;
594
595 case VIO_SUBTYPE_ACK:
596 DPRINTF(("CTRL/ACK/VER_INFO\n"));
597 if (!ISSET(sc->sc_vio_state, VIO_SND_VER_INFO)) {
598 ldc_reset(&sc->sc_lc);
599 break;
600 }
601 sc->sc_major = vi->major;
602 sc->sc_minor = vi->minor;
603 sc->sc_vio_state |= VIO_ACK_VER_INFO;
604 break;
605
606 default:
607 DPRINTF(("CTRL/0x%02x/VER_INFO\n", vi->tag.stype));
608 break;
609 }
610
611 if (ISSET(sc->sc_vio_state, VIO_ACK_VER_INFO))
612 vdsk_send_attr_info(sc);
613 }
614
615 void
616 vdsk_rx_vio_attr_info(struct vdsk_softc *sc, struct vio_msg_tag *tag)
617 {
618 struct vd_attr_info *ai = (struct vd_attr_info *)tag;
619
620 switch (ai->tag.stype) {
621 case VIO_SUBTYPE_INFO:
622 DPRINTF(("CTRL/INFO/ATTR_INFO\n"));
623 break;
624
625 case VIO_SUBTYPE_ACK:
626 DPRINTF(("CTRL/ACK/ATTR_INFO\n"));
627 if (!ISSET(sc->sc_vio_state, VIO_SND_ATTR_INFO)) {
628 ldc_reset(&sc->sc_lc);
629 break;
630 }
631
632 sc->sc_vdisk_block_size = ai->vdisk_block_size;
633 sc->sc_vdisk_size = ai->vdisk_size;
634 if (sc->sc_major > 1 || sc->sc_minor >= 1)
635 sc->sc_vd_mtype = ai->vd_mtype;
636 else
637 sc->sc_vd_mtype = VD_MEDIA_TYPE_FIXED;
638
639 sc->sc_vio_state |= VIO_ACK_ATTR_INFO;
640 break;
641
642 default:
643 DPRINTF(("CTRL/0x%02x/ATTR_INFO\n", ai->tag.stype));
644 break;
645 }
646
647 if (ISSET(sc->sc_vio_state, VIO_ACK_ATTR_INFO))
648 vdsk_send_dring_reg(sc);
649
650 }
651
652 void
653 vdsk_rx_vio_dring_reg(struct vdsk_softc *sc, struct vio_msg_tag *tag)
654 {
655 struct vio_dring_reg *dr = (struct vio_dring_reg *)tag;
656
657 switch (dr->tag.stype) {
658 case VIO_SUBTYPE_INFO:
659 DPRINTF(("CTRL/INFO/DRING_REG\n"));
660 break;
661
662 case VIO_SUBTYPE_ACK:
663 DPRINTF(("CTRL/ACK/DRING_REG\n"));
664 if (!ISSET(sc->sc_vio_state, VIO_SND_DRING_REG)) {
665 ldc_reset(&sc->sc_lc);
666 break;
667 }
668
669 sc->sc_dring_ident = dr->dring_ident;
670 sc->sc_seq_no = 1;
671
672 sc->sc_vio_state |= VIO_ACK_DRING_REG;
673 break;
674
675 default:
676 DPRINTF(("CTRL/0x%02x/DRING_REG\n", dr->tag.stype));
677 break;
678 }
679
680 if (ISSET(sc->sc_vio_state, VIO_ACK_DRING_REG))
681 vdsk_send_rdx(sc);
682 }
683
684 void
685 vdsk_rx_vio_rdx(struct vdsk_softc *sc, struct vio_msg_tag *tag)
686 {
687 switch(tag->stype) {
688 case VIO_SUBTYPE_INFO:
689 DPRINTF(("CTRL/INFO/RDX\n"));
690 break;
691
692 case VIO_SUBTYPE_ACK:
693 {
694 int prod;
695
696 DPRINTF(("CTRL/ACK/RDX\n"));
697 if (!ISSET(sc->sc_vio_state, VIO_SND_RDX)) {
698 ldc_reset(&sc->sc_lc);
699 break;
700 }
701 sc->sc_vio_state |= VIO_ACK_RDX;
702
703 /*
704 * If this ACK is the result of a reconnect, we may
705 * have pending I/O that we need to resubmit. We need
706 * to rebuild the ring descriptors though since the
707 * vDisk server on the other side may have touched
708 * them already. So we just clean up the ring and the
709 * LDC map and resubmit the SCSI commands based on our
710 * soft descriptors.
711 */
712 prod = sc->sc_tx_prod;
713 sc->sc_tx_prod = sc->sc_tx_cons;
714 sc->sc_tx_cnt = 0;
715 sc->sc_lm->lm_next = 1;
716 sc->sc_lm->lm_count = 1;
717 while (sc->sc_tx_prod != prod)
718 vdsk_submit_cmd(sc, sc->sc_vsd[sc->sc_tx_prod].vsd_xs);
719 break;
720 }
721
722 default:
723 DPRINTF(("CTRL/0x%02x/RDX (VIO)\n", tag->stype));
724 break;
725 }
726 }
727
728 void
729 vdsk_rx_vio_data(struct vdsk_softc *sc, struct vio_msg *vm)
730 {
731 struct vio_msg_tag *tag = (struct vio_msg_tag *)&vm->type;
732
733 if (sc->sc_vio_state != VIO_ESTABLISHED) {
734 DPRINTF(("Spurious DATA/0x%02x/0x%04x\n", tag->stype,
735 tag->stype_env));
736 return;
737 }
738
739 switch(tag->stype_env) {
740 case VIO_DRING_DATA:
741 vdsk_rx_vio_dring_data(sc, tag);
742 break;
743
744 default:
745 DPRINTF(("DATA/0x%02x/0x%04x\n", tag->stype, tag->stype_env));
746 break;
747 }
748 }
749
750 void
751 vdsk_rx_vio_dring_data(struct vdsk_softc *sc, struct vio_msg_tag *tag)
752 {
753 switch(tag->stype) {
754 case VIO_SUBTYPE_INFO:
755 DPRINTF(("DATA/INFO/DRING_DATA\n"));
756 break;
757
758 case VIO_SUBTYPE_ACK:
759 {
760 struct scsipi_xfer *xs;
761 int cons;
762
763 cons = sc->sc_tx_cons;
764 while (sc->sc_vd->vd_desc[cons].hdr.dstate == VIO_DESC_DONE) {
765 xs = sc->sc_vsd[cons].vsd_xs;
766 if (ISSET(xs->xs_control, XS_CTL_POLL) == 0)
767 vdsk_complete_cmd(sc, xs, cons);
768 cons++;
769 cons &= (sc->sc_vd->vd_nentries - 1);
770 }
771 sc->sc_tx_cons = cons;
772 break;
773 }
774
775 case VIO_SUBTYPE_NACK:
776 DPRINTF(("DATA/NACK/DRING_DATA\n"));
777 struct ldc_conn *lc = &sc->sc_lc;
778 ldc_send_vers(lc);
779 break;
780
781 default:
782 DPRINTF(("DATA/0x%02x/DRING_DATA\n", tag->stype));
783 break;
784 }
785 }
786
787 void
788 vdsk_ldc_reset(struct ldc_conn *lc)
789 {
790
791 struct vdsk_softc *sc = lc->lc_sc;
792
793 sc->sc_vio_state = 0;
794 }
795
796 void
797 vdsk_ldc_start(struct ldc_conn *lc)
798 {
799
800 struct vdsk_softc *sc = lc->lc_sc;
801
802 vdsk_send_ver_info(sc, VDSK_MAJOR, VDSK_MINOR);
803 }
804
805 void
806 vdsk_sendmsg(struct vdsk_softc *sc, void *msg, size_t len)
807 {
808
809 struct ldc_conn *lc = &sc->sc_lc;
810 int err;
811
812 err = ldc_send_unreliable(lc, msg, len);
813 if (err)
814 printf("%s: ldc_send_unreliable: %d\n", __func__, err);
815 }
816
817 void
818 vdsk_send_ver_info(struct vdsk_softc *sc, uint16_t major, uint16_t minor)
819 {
820
821 struct vio_ver_info vi;
822
823 /* Allocate new session ID. */
824 sc->sc_local_sid = gettick();
825
826 bzero(&vi, sizeof(vi));
827 vi.tag.type = VIO_TYPE_CTRL;
828 vi.tag.stype = VIO_SUBTYPE_INFO;
829 vi.tag.stype_env = VIO_VER_INFO;
830 vi.tag.sid = sc->sc_local_sid;
831 vi.major = major;
832 vi.minor = minor;
833 vi.dev_class = VDEV_DISK;
834 vdsk_sendmsg(sc, &vi, sizeof(vi));
835
836 sc->sc_vio_state |= VIO_SND_VER_INFO;
837 }
838
839 void
840 vdsk_send_attr_info(struct vdsk_softc *sc)
841 {
842 struct vd_attr_info ai;
843
844 bzero(&ai, sizeof(ai));
845 ai.tag.type = VIO_TYPE_CTRL;
846 ai.tag.stype = VIO_SUBTYPE_INFO;
847 ai.tag.stype_env = VIO_ATTR_INFO;
848 ai.tag.sid = sc->sc_local_sid;
849 ai.xfer_mode = VIO_DRING_MODE;
850 ai.vdisk_block_size = DEV_BSIZE;
851 ai.max_xfer_sz = MAXPHYS / DEV_BSIZE;
852 vdsk_sendmsg(sc, &ai, sizeof(ai));
853
854 sc->sc_vio_state |= VIO_SND_ATTR_INFO;
855 }
856
857 void
858 vdsk_send_dring_reg(struct vdsk_softc *sc)
859 {
860 struct vio_dring_reg dr;
861
862 bzero(&dr, sizeof(dr));
863 dr.tag.type = VIO_TYPE_CTRL;
864 dr.tag.stype = VIO_SUBTYPE_INFO;
865 dr.tag.stype_env = VIO_DRING_REG;
866 dr.tag.sid = sc->sc_local_sid;
867 dr.dring_ident = 0;
868 dr.num_descriptors = sc->sc_vd->vd_nentries;
869 dr.descriptor_size = sizeof(struct vd_desc);
870 dr.options = VIO_TX_RING | VIO_RX_RING;
871 dr.ncookies = 1;
872 dr.cookie[0].addr = 0;
873 dr.cookie[0].size = PAGE_SIZE;
874 vdsk_sendmsg(sc, &dr, sizeof(dr));
875
876 sc->sc_vio_state |= VIO_SND_DRING_REG;
877 };
878
879 void
880 vdsk_send_rdx(struct vdsk_softc *sc)
881 {
882 struct vio_rdx rdx;
883
884 bzero(&rdx, sizeof(rdx));
885 rdx.tag.type = VIO_TYPE_CTRL;
886 rdx.tag.stype = VIO_SUBTYPE_INFO;
887 rdx.tag.stype_env = VIO_RDX;
888 rdx.tag.sid = sc->sc_local_sid;
889 vdsk_sendmsg(sc, &rdx, sizeof(rdx));
890
891 sc->sc_vio_state |= VIO_SND_RDX;
892 }
893
894 #if OPENBSD_BUSDMA
895 struct vdsk_dring *
896 vdsk_dring_alloc(bus_dma_tag_t t, int nentries)
897 #else
898 struct vdsk_dring *
899 vdsk_dring_alloc(int nentries)
900 #endif
901 {
902
903 struct vdsk_dring *vd;
904 bus_size_t size;
905 vaddr_t va;
906 #if OPENBSD_BUSDMA
907 int nsegs;
908 #endif
909 int i;
910
911 vd = kmem_zalloc(sizeof(struct vdsk_dring), KM_SLEEP);
912
913 size = roundup(nentries * sizeof(struct vd_desc), PAGE_SIZE);
914
915 #if OPENBSD_BUSDMA
916 if (bus_dmamap_create(t, size, 1, size, 0,
917 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &vd->vd_map) != 0)
918 return (NULL);
919
920 if (bus_dmamem_alloc(t, size, PAGE_SIZE, 0, &vd->vd_seg, 1,
921 &nsegs, BUS_DMA_NOWAIT) != 0)
922 goto destroy;
923
924 if (bus_dmamem_map(t, &vd->vd_seg, 1, size, (void*)&va,
925 BUS_DMA_NOWAIT) != 0)
926 goto free;
927
928 if (bus_dmamap_load(t, vd->vd_map, (void*)va, size, NULL,
929 BUS_DMA_NOWAIT) != 0)
930 goto unmap;
931 #else
932 va = (vaddr_t)kmem_zalloc(size, KM_SLEEP);
933 #endif
934 vd->vd_desc = (struct vd_desc *)va;
935 vd->vd_nentries = nentries;
936 bzero(vd->vd_desc, nentries * sizeof(struct vd_desc));
937 for (i = 0; i < vd->vd_nentries; i++)
938 vd->vd_desc[i].hdr.dstate = VIO_DESC_FREE;
939 return (vd);
940
941 #if OPENBSD_BUSDMA
942 unmap:
943 bus_dmamem_unmap(t, (void*)va, size);
944 free:
945 bus_dmamem_free(t, &vd->vd_seg, 1);
946 destroy:
947 bus_dmamap_destroy(t, vd->vd_map);
948 #endif
949 return (NULL);
950 }
951
952 #if OPENBSD_BUSDMA
953 void
954 vdsk_dring_free(bus_dma_tag_t t, struct vdsk_dring *vd)
955 #else
956 void
957 vdsk_dring_free(struct vdsk_dring *vd)
958 #endif
959 {
960
961 bus_size_t size;
962
963 size = vd->vd_nentries * sizeof(struct vd_desc);
964 size = roundup(size, PAGE_SIZE);
965
966 #if OPENBSD_BUSDMA
967 bus_dmamap_unload(t, vd->vd_map);
968
969 bus_dmamem_unmap(t, (caddr_t)vd->vd_desc, size);
970 bus_dmamem_free(t, &vd->vd_seg, 1);
971 bus_dmamap_destroy(t, vd->vd_map);
972 #else
973 kmem_free(vd->vd_desc, size);
974 #endif
975 kmem_free(vd, size);
976 }
977
978 void *
979 vdsk_io_get(void *xsc)
980 {
981
982 panic("%s: not verified yet", __FUNCTION__);
983
984 struct vdsk_softc *sc = xsc;
985 void *rv = sc; /* just has to be !NULL */
986 int s;
987
988 s = splbio();
989 if (sc->sc_vio_state != VIO_ESTABLISHED ||
990 sc->sc_tx_cnt >= sc->sc_vd->vd_nentries)
991 rv = NULL;
992 else
993 sc->sc_tx_cnt++;
994 splx(s);
995
996 return (rv);
997 }
998
999 void
1000 vdsk_io_put(void *xsc, void *io)
1001 {
1002
1003 panic("%s: not verified yet", __FUNCTION__);
1004
1005 struct vdsk_softc *sc = xsc;
1006 int s;
1007
1008 #ifdef DIAGNOSTIC
1009 if (sc != io)
1010 panic("vsdk_io_put: unexpected io");
1011 #endif
1012
1013 s = splbio();
1014 sc->sc_tx_cnt--;
1015 splx(s);
1016 }
1017
1018 void
1019 vdsk_scsi_cmd(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1020 {
1021 int timeout, s;
1022 int desc;
1023
1024 DPRINTF(("vdsk_scsi_cmd() opcode %x\n", xs->cmd->opcode));
1025
1026 switch (xs->cmd->opcode) {
1027
1028 case SCSI_READ_6_COMMAND:
1029 case READ_10:
1030 case READ_12:
1031 case READ_16:
1032 case SCSI_WRITE_6_COMMAND:
1033 case WRITE_10:
1034 case WRITE_12:
1035 case WRITE_16:
1036 case SCSI_SYNCHRONIZE_CACHE_10:
1037 break;
1038
1039 case INQUIRY:
1040 vdsk_scsi_inq(sc, xs);
1041 return;
1042
1043 case READ_CAPACITY_10:
1044 vdsk_scsi_capacity(sc, xs);
1045 return;
1046
1047 case READ_CAPACITY_16:
1048 vdsk_scsi_capacity16(sc, xs);
1049 return;
1050
1051 case SCSI_TEST_UNIT_READY:
1052 case START_STOP:
1053 case SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL:
1054 case SCSI_MODE_SENSE_6:
1055 case SCSI_MAINTENANCE_IN:
1056 vdsk_scsi_done(xs, XS_NOERROR);
1057 return;
1058
1059 case SCSI_MODE_SENSE_10:
1060 case READ_TOC:
1061 vdsk_scsi_done(xs, XS_DRIVER_STUFFUP);
1062 return;
1063
1064 default:
1065 panic("%s unhandled cmd 0x%02x\n",
1066 __func__, xs->cmd->opcode);
1067 }
1068
1069 s = splbio();
1070 desc = vdsk_submit_cmd(sc, xs);
1071
1072 if (!ISSET(xs->xs_control, XS_CTL_POLL)) {
1073 splx(s);
1074 return;
1075 }
1076 timeout = 1000;
1077 do {
1078 if (sc->sc_vd->vd_desc[desc].hdr.dstate == VIO_DESC_DONE)
1079 break;
1080
1081 delay(1000);
1082 } while(--timeout > 0);
1083 if (sc->sc_vd->vd_desc[desc].hdr.dstate == VIO_DESC_DONE) {
1084 vdsk_complete_cmd(sc, xs, desc);
1085 } else {
1086 ldc_reset(&sc->sc_lc);
1087 vdsk_scsi_done(xs, XS_TIMEOUT);
1088 }
1089 splx(s);
1090 }
1091
1092 int
1093 vdsk_submit_cmd(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1094 {
1095 struct ldc_map *map = sc->sc_lm;
1096 struct vio_dring_msg dm;
1097 struct scsi_rw_6 *rw6;
1098 struct scsipi_rw_10 *rw10;
1099 struct scsipi_rw_12 *rw12;
1100 struct scsipi_rw_16 *rw16;
1101 u_int64_t lba = 0;
1102 uint8_t operation;
1103 vaddr_t va;
1104 paddr_t pa;
1105 psize_t nbytes;
1106 int len, ncookies;
1107 int desc;
1108
1109 switch (xs->cmd->opcode) {
1110
1111 case SCSI_READ_6_COMMAND:
1112 case READ_10:
1113 case READ_12:
1114 case READ_16:
1115 operation = VD_OP_BREAD;
1116 break;
1117
1118 case SCSI_WRITE_6_COMMAND:
1119 case WRITE_10:
1120 case WRITE_12:
1121 case WRITE_16:
1122 operation = VD_OP_BWRITE;
1123 break;
1124
1125 case SCSI_SYNCHRONIZE_CACHE_10:
1126 operation = VD_OP_FLUSH;
1127 break;
1128
1129 default:
1130 panic("%s unhandled cmd opcode 0x%x",
1131 __func__, xs->cmd->opcode);
1132 }
1133
1134 /*
1135 * READ/WRITE/SYNCHRONIZE commands. SYNCHRONIZE CACHE has same
1136 * layout as 10-byte READ/WRITE commands.
1137 */
1138 if (xs->cmdlen == 6) {
1139 rw6 = (struct scsi_rw_6 *)xs->cmd;
1140 lba = _3btol(rw6->addr) & (SRW_TOPADDR << 16 | 0xffff);
1141 } else if (xs->cmdlen == 10) {
1142 rw10 = (struct scsipi_rw_10 *)xs->cmd;
1143 lba = _4btol(rw10->addr);
1144 } else if (xs->cmdlen == 12) {
1145 rw12 = (struct scsipi_rw_12 *)xs->cmd;
1146 lba = _4btol(rw12->addr);
1147 } else if (xs->cmdlen == 16) {
1148 rw16 = (struct scsipi_rw_16 *)xs->cmd;
1149 lba = _8btol(rw16->addr);
1150 }
1151
1152 DPRINTF(("lba = %lu\n", lba));
1153
1154 desc = sc->sc_tx_prod;
1155 ncookies = 0;
1156 len = xs->datalen;
1157 va = (vaddr_t)xs->data;
1158 while (len > 0) {
1159 DPRINTF(("len = %u\n", len));
1160 KASSERT(ncookies < MAXPHYS / PAGE_SIZE);
1161 pa = 0;
1162 pmap_extract(pmap_kernel(), va, &pa);
1163 while (map->lm_slot[map->lm_next].entry != 0) {
1164 map->lm_next++;
1165 map->lm_next &= (map->lm_nentries - 1);
1166 }
1167 map->lm_slot[map->lm_next].entry = (pa & LDC_MTE_RA_MASK);
1168 map->lm_slot[map->lm_next].entry |= LDC_MTE_CPR | LDC_MTE_CPW;
1169 map->lm_slot[map->lm_next].entry |= LDC_MTE_IOR | LDC_MTE_IOW;
1170 map->lm_slot[map->lm_next].entry |= LDC_MTE_R | LDC_MTE_W;
1171 map->lm_count++;
1172
1173 nbytes = MIN(len, PAGE_SIZE - (pa & PAGE_MASK));
1174
1175 sc->sc_vd->vd_desc[desc].cookie[ncookies].addr =
1176 map->lm_next << PAGE_SHIFT | (pa & PAGE_MASK);
1177 sc->sc_vd->vd_desc[desc].cookie[ncookies].size = nbytes;
1178
1179 sc->sc_vsd[desc].vsd_map_idx[ncookies] = map->lm_next;
1180 va += nbytes;
1181 len -= nbytes;
1182 ncookies++;
1183 }
1184 if (ISSET(xs->xs_control, XS_CTL_POLL) == 0)
1185 sc->sc_vd->vd_desc[desc].hdr.ack = 1;
1186 else
1187 sc->sc_vd->vd_desc[desc].hdr.ack = 0;
1188 sc->sc_vd->vd_desc[desc].operation = operation;
1189 sc->sc_vd->vd_desc[desc].slice = VD_SLICE_NONE;
1190 sc->sc_vd->vd_desc[desc].status = 0xffffffff;
1191 sc->sc_vd->vd_desc[desc].offset = lba;
1192 sc->sc_vd->vd_desc[desc].size = xs->datalen;
1193 sc->sc_vd->vd_desc[desc].ncookies = ncookies;
1194
1195 membar_Sync();
1196
1197 sc->sc_vd->vd_desc[desc].hdr.dstate = VIO_DESC_READY;
1198
1199 sc->sc_vsd[desc].vsd_xs = xs;
1200 sc->sc_vsd[desc].vsd_ncookies = ncookies;
1201
1202 sc->sc_tx_prod++;
1203 sc->sc_tx_prod &= (sc->sc_vd->vd_nentries - 1);
1204
1205 bzero(&dm, sizeof(dm));
1206 dm.tag.type = VIO_TYPE_DATA;
1207 dm.tag.stype = VIO_SUBTYPE_INFO;
1208 dm.tag.stype_env = VIO_DRING_DATA;
1209 dm.tag.sid = sc->sc_local_sid;
1210 dm.seq_no = sc->sc_seq_no++;
1211 dm.dring_ident = sc->sc_dring_ident;
1212 dm.start_idx = dm.end_idx = desc;
1213 vdsk_sendmsg(sc, &dm, sizeof(dm));
1214
1215 return desc;
1216 }
1217
1218 void
1219 vdsk_complete_cmd(struct vdsk_softc *sc, struct scsipi_xfer *xs, int desc)
1220 {
1221 struct ldc_map *map = sc->sc_lm;
1222 int cookie, idx;
1223 int error;
1224
1225 cookie = 0;
1226 while (cookie < sc->sc_vsd[desc].vsd_ncookies) {
1227 idx = sc->sc_vsd[desc].vsd_map_idx[cookie++];
1228 map->lm_slot[idx].entry = 0;
1229 map->lm_count--;
1230 }
1231
1232 error = XS_NOERROR;
1233 if (sc->sc_vd->vd_desc[desc].status != 0)
1234 error = XS_DRIVER_STUFFUP;
1235 xs->resid = xs->datalen -
1236 sc->sc_vd->vd_desc[desc].size;
1237
1238 /*
1239 * scsi_done() called by vdsk_scsi_done() requires
1240 * the kernel to be locked
1241 */
1242 KERNEL_LOCK(1, curlwp);
1243 vdsk_scsi_done(xs, error);
1244 KERNEL_UNLOCK_ONE(curlwp);
1245
1246 sc->sc_vd->vd_desc[desc].hdr.dstate = VIO_DESC_FREE;
1247
1248 }
1249
1250 void
1251 vdsk_scsi_inq(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1252 {
1253
1254 vdsk_scsi_inquiry(sc, xs);
1255 }
1256
1257 void
1258 vdsk_scsi_inquiry(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1259 {
1260
1261 struct scsipi_inquiry_data inq;
1262 char buf[5];
1263
1264 bzero(&inq, sizeof(inq));
1265
1266 switch (sc->sc_vd_mtype) {
1267 case VD_MEDIA_TYPE_CD:
1268 case VD_MEDIA_TYPE_DVD:
1269 inq.device = T_CDROM;
1270 break;
1271
1272 case VD_MEDIA_TYPE_FIXED:
1273 default:
1274 inq.device = T_DIRECT;
1275 break;
1276 }
1277
1278 inq.version = 0x05; /* SPC-3 */
1279 inq.response_format = 2;
1280 inq.additional_length = 32;
1281 inq.flags3 |= SID_CmdQue;
1282 bcopy("SUN ", inq.vendor, sizeof(inq.vendor));
1283 bcopy("Virtual Disk ", inq.product, sizeof(inq.product));
1284 snprintf(buf, sizeof(buf), "%u.%u ", sc->sc_major, sc->sc_minor);
1285 bcopy(buf, inq.revision, sizeof(inq.revision));
1286
1287 bcopy(&inq, xs->data, MIN(sizeof(inq), xs->datalen));
1288
1289 vdsk_scsi_done(xs, XS_NOERROR);
1290 }
1291
1292 void
1293 vdsk_scsi_capacity(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1294 {
1295
1296 struct scsipi_read_capacity_10_data rcd;
1297 uint64_t capacity;
1298
1299 bzero(&rcd, sizeof(rcd));
1300
1301 capacity = sc->sc_vdisk_size - 1;
1302 if (capacity > 0xffffffff)
1303 capacity = 0xffffffff;
1304
1305 _lto4b(capacity, rcd.addr);
1306 _lto4b(sc->sc_vdisk_block_size, rcd.length);
1307
1308 DPRINTF(("%s() capacity %lu block size %u\n",
1309 __FUNCTION__, capacity, sc->sc_vdisk_block_size));
1310
1311 bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
1312
1313 vdsk_scsi_done(xs, XS_NOERROR);
1314 }
1315
1316 void
1317 vdsk_scsi_capacity16(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1318 {
1319
1320 struct scsipi_read_capacity_16_data rcd;
1321
1322 bzero(&rcd, sizeof(rcd));
1323
1324 _lto8b(sc->sc_vdisk_size - 1, rcd.addr);
1325 _lto4b(sc->sc_vdisk_block_size, rcd.length);
1326
1327 DPRINTF(("%s() capacity %lu block size %u\n",
1328 __FUNCTION__, capacity, sc->sc_vdisk_block_size));
1329
1330 bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
1331
1332 vdsk_scsi_done(xs, XS_NOERROR);
1333 }
1334
1335 void
1336 vdsk_scsi_done(struct scsipi_xfer *xs, int error)
1337 {
1338
1339 xs->error = error;
1340
1341 scsipi_done(xs);
1342 }
1343