vdsk.c revision 1.19 1 /* $NetBSD: vdsk.c,v 1.19 2025/02/23 20:48:43 palle Exp $ */
2 /* $OpenBSD: vdsk.c,v 1.46 2015/01/25 21:42:13 kettenis Exp $ */
3 /*
4 * Copyright (c) 2009, 2011 Mark Kettenis
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 #include <sys/kmem.h>
20 #include <sys/param.h>
21 #include <sys/buf.h>
22 #include <sys/device.h>
23 #include <sys/systm.h>
24
25 #include <machine/autoconf.h>
26 #include <machine/hypervisor.h>
27
28 #include <uvm/uvm_extern.h>
29
30 #include <dev/scsipi/scsi_all.h>
31 #include <dev/scsipi/scsipi_disk.h>
32 #include <dev/scsipi/scsipi_cd.h>
33 #include <dev/scsipi/scsiconf.h>
34
35 #include <dev/scsipi/scsi_disk.h>
36 #include <dev/scsipi/scsipi_all.h>
37 #include <dev/scsipi/scsiconf.h>
38 #include <dev/scsipi/scsi_message.h>
39
40 #include <sparc64/dev/cbusvar.h>
41 #include <sparc64/dev/ldcvar.h>
42 #include <sparc64/dev/viovar.h>
43
44 #ifdef VDSK_DEBUG
45 #define DPRINTF(x) printf x
46 #else
47 #define DPRINTF(x)
48 #endif
49
50 #define VDSK_TX_ENTRIES 32
51 #define VDSK_RX_ENTRIES 32
52
53 struct vd_attr_info {
54 struct vio_msg_tag tag;
55 uint8_t xfer_mode;
56 uint8_t vd_type;
57 uint8_t vd_mtype;
58 uint8_t _reserved1;
59 uint32_t vdisk_block_size;
60 uint64_t operations;
61 uint64_t vdisk_size;
62 uint64_t max_xfer_sz;
63 uint64_t _reserved2[2];
64 };
65
66 #define VD_DISK_TYPE_SLICE 0x01
67 #define VD_DISK_TYPE_DISK 0x02
68
69 #define VD_MEDIA_TYPE_FIXED 0x01
70 #define VD_MEDIA_TYPE_CD 0x02
71 #define VD_MEDIA_TYPE_DVD 0x03
72
73 /* vDisk version 1.0. */
74 #define VD_OP_BREAD 0x01
75 #define VD_OP_BWRITE 0x02
76 #define VD_OP_FLUSH 0x03
77 #define VD_OP_GET_WCE 0x04
78 #define VD_OP_SET_WCE 0x05
79 #define VD_OP_GET_VTOC 0x06
80 #define VD_OP_SET_VTOC 0x07
81 #define VD_OP_GET_DISKGEOM 0x08
82 #define VD_OP_SET_DISKGEOM 0x09
83 #define VD_OP_GET_DEVID 0x0b
84 #define VD_OP_GET_EFI 0x0c
85 #define VD_OP_SET_EFI 0x0d
86
87 /* vDisk version 1.1 */
88 #define VD_OP_SCSICMD 0x0a
89 #define VD_OP_RESET 0x0e
90 #define VD_OP_GET_ACCESS 0x0f
91 #define VD_OP_SET_ACCESS 0x10
92 #define VD_OP_GET_CAPACITY 0x11
93
94 struct vd_desc {
95 struct vio_dring_hdr hdr;
96 uint64_t req_id;
97 uint8_t operation;
98 uint8_t slice;
99 uint16_t _reserved1;
100 uint32_t status;
101 uint64_t offset;
102 uint64_t size;
103 uint32_t ncookies;
104 uint32_t _reserved2;
105 struct ldc_cookie cookie[MAXPHYS / PAGE_SIZE];
106 };
107
108 #define VD_SLICE_NONE 0xff
109
110 struct vdsk_dring {
111 bus_dmamap_t vd_map;
112 bus_dma_segment_t vd_seg;
113 struct vd_desc *vd_desc;
114 int vd_nentries;
115 };
116
117 #if OPENBSD_BUSDMA
118 struct vdsk_dring *vdsk_dring_alloc(bus_dma_tag_t, int);
119 void vdsk_dring_free(bus_dma_tag_t, struct vdsk_dring *);
120 #else
121 struct vdsk_dring *vdsk_dring_alloc(int);
122 void vdsk_dring_free(struct vdsk_dring *);
123 #endif
124
125 /*
126 * We support vDisk 1.0 and 1.1.
127 */
128 #define VDSK_MAJOR 1
129 #define VDSK_MINOR 1
130
131 struct vdsk_soft_desc {
132 int vsd_map_idx[MAXPHYS / PAGE_SIZE];
133 struct scsipi_xfer *vsd_xs;
134 int vsd_ncookies;
135 };
136
137 struct vdsk_softc {
138 device_t sc_dv;
139
140 struct scsipi_adapter sc_adapter;
141 struct scsipi_channel sc_channel;
142
143 bus_space_tag_t sc_bustag;
144 bus_dma_tag_t sc_dmatag;
145
146 void *sc_tx_ih;
147 void *sc_rx_ih;
148
149 struct ldc_conn sc_lc;
150
151 uint16_t sc_vio_state;
152 #define VIO_SND_VER_INFO 0x0001
153 #define VIO_ACK_VER_INFO 0x0002
154 #define VIO_SND_ATTR_INFO 0x0004
155 #define VIO_ACK_ATTR_INFO 0x0008
156 #define VIO_SND_DRING_REG 0x0010
157 #define VIO_ACK_DRING_REG 0x0020
158 #define VIO_SND_RDX 0x0040
159 #define VIO_ACK_RDX 0x0080
160 #define VIO_ESTABLISHED 0x00ff
161
162 uint16_t sc_major;
163 uint16_t sc_minor;
164
165 uint32_t sc_local_sid;
166 uint64_t sc_dring_ident;
167 uint64_t sc_seq_no;
168
169 int sc_tx_cnt;
170 int sc_tx_prod;
171 int sc_tx_cons;
172
173 struct ldc_map *sc_lm;
174 struct vdsk_dring *sc_vd;
175 struct vdsk_soft_desc *sc_vsd;
176
177 uint32_t sc_vdisk_block_size;
178 uint64_t sc_vdisk_size;
179 uint8_t sc_vd_mtype;
180 };
181
182 int vdsk_match(device_t, cfdata_t, void *);
183 void vdsk_attach(device_t, device_t, void *);
184 void vdsk_scsipi_request(struct scsipi_channel *, scsipi_adapter_req_t,
185 void *);
186
187 CFATTACH_DECL_NEW(vdsk, sizeof(struct vdsk_softc),
188 vdsk_match, vdsk_attach, NULL, NULL);
189
190 int vdsk_tx_intr(void *);
191 int vdsk_rx_intr(void *);
192
193 void vdsk_rx_data(struct ldc_conn *, struct ldc_pkt *);
194 void vdsk_rx_vio_ctrl(struct vdsk_softc *, struct vio_msg *);
195 void vdsk_rx_vio_ver_info(struct vdsk_softc *, struct vio_msg_tag *);
196 void vdsk_rx_vio_attr_info(struct vdsk_softc *, struct vio_msg_tag *);
197 void vdsk_rx_vio_dring_reg(struct vdsk_softc *, struct vio_msg_tag *);
198 void vdsk_rx_vio_rdx(struct vdsk_softc *sc, struct vio_msg_tag *);
199 void vdsk_rx_vio_data(struct vdsk_softc *sc, struct vio_msg *);
200 void vdsk_rx_vio_dring_data(struct vdsk_softc *sc, struct vio_msg_tag *);
201
202 void vdsk_ldc_reset(struct ldc_conn *);
203 void vdsk_ldc_start(struct ldc_conn *);
204
205 void vdsk_sendmsg(struct vdsk_softc *, void *, size_t);
206 void vdsk_send_ver_info(struct vdsk_softc *, uint16_t, uint16_t);
207 void vdsk_send_attr_info(struct vdsk_softc *);
208 void vdsk_send_dring_reg(struct vdsk_softc *);
209 void vdsk_send_rdx(struct vdsk_softc *);
210
211 void *vdsk_io_get(void *);
212 void vdsk_io_put(void *, void *);
213
214 void vdsk_scsi_cmd(struct vdsk_softc *sc, struct scsipi_xfer *);
215 int vdsk_submit_cmd(struct vdsk_softc *sc, struct scsipi_xfer *);
216 void vdsk_complete_cmd(struct vdsk_softc *sc, struct scsipi_xfer *, int);
217 void vdsk_scsi_inq(struct vdsk_softc *sc, struct scsipi_xfer *);
218 void vdsk_scsi_inquiry(struct vdsk_softc *sc, struct scsipi_xfer *);
219 void vdsk_scsi_capacity(struct vdsk_softc *sc, struct scsipi_xfer *);
220 void vdsk_scsi_capacity16(struct vdsk_softc *sc, struct scsipi_xfer *);
221 void vdsk_scsi_report_luns(struct vdsk_softc *sc, struct scsipi_xfer *);
222 void vdsk_scsi_done(struct scsipi_xfer *, int);
223
224 int
225 vdsk_match(device_t parent, cfdata_t match, void *aux)
226 {
227 struct cbus_attach_args *ca = aux;
228
229 if (strcmp(ca->ca_name, "disk") == 0)
230 return (1);
231
232 return (0);
233 }
234
235 void
236 vdsk_attach(device_t parent, device_t self, void *aux)
237 {
238 struct vdsk_softc *sc = device_private(self);
239 struct cbus_attach_args *ca = aux;
240 struct ldc_conn *lc;
241 int err, s;
242 int timeout;
243 vaddr_t va;
244 paddr_t pa;
245
246 sc->sc_bustag = ca->ca_bustag;
247 sc->sc_dmatag = ca->ca_dmatag;
248
249 printf(": ivec 0x%llx, 0x%llx",
250 (long long unsigned int)ca->ca_tx_ino,
251 (long long unsigned int)ca->ca_rx_ino);
252
253 /*
254 * Un-configure queues before registering interrupt handlers,
255 * such that we don't get any stale LDC packets or events.
256 */
257 hv_ldc_tx_qconf(ca->ca_id, 0, 0);
258 hv_ldc_rx_qconf(ca->ca_id, 0, 0);
259
260 sc->sc_tx_ih = bus_intr_establish(ca->ca_bustag, ca->ca_tx_ino,
261 IPL_BIO, vdsk_tx_intr, sc);
262 sc->sc_rx_ih = bus_intr_establish(ca->ca_bustag, ca->ca_rx_ino,
263 IPL_BIO, vdsk_rx_intr, sc);
264 if (sc->sc_tx_ih == NULL || sc->sc_rx_ih == NULL) {
265 printf(", can't establish interrupt\n");
266 return;
267 }
268
269 lc = &sc->sc_lc;
270 lc->lc_id = ca->ca_id;
271 lc->lc_sc = sc;
272 lc->lc_reset = vdsk_ldc_reset;
273 lc->lc_start = vdsk_ldc_start;
274 lc->lc_rx_data = vdsk_rx_data;
275
276 #if OPENBSD_BUSDMA
277 lc->lc_txq = ldc_queue_alloc(sc->sc_dmatag, VDSK_TX_ENTRIES);
278 #else
279 lc->lc_txq = ldc_queue_alloc(VDSK_TX_ENTRIES);
280 #endif
281 #if OPENBSD_BUSDMA
282 lc->lc_rxq = ldc_queue_alloc(sc->sc_dmatag, VDSK_RX_ENTRIES);
283 #else
284 lc->lc_rxq = ldc_queue_alloc(VDSK_RX_ENTRIES);
285 #endif
286 #if OPENBSD_BUSDMA
287 sc->sc_lm = ldc_map_alloc(sc->sc_dmatag, 2048);
288 #else
289 sc->sc_lm = ldc_map_alloc(2048);
290 #endif
291 #if OPENBSD_BUSDMA
292 err = hv_ldc_set_map_table(lc->lc_id,
293 sc->sc_lm->lm_map->dm_segs[0].ds_addr, sc->sc_lm->lm_nentries);
294 #else
295 va = (vaddr_t)sc->sc_lm->lm_slot;
296 pa = 0;
297 if (pmap_extract(pmap_kernel(), va, &pa) == FALSE)
298 panic("pmap_extract failed %lx\n", va);
299 err = hv_ldc_set_map_table(lc->lc_id, pa, 2048);
300 #endif
301 if (err != H_EOK) {
302 printf("hv_ldc_set_map_table %d\n", err);
303 goto free_map;
304 }
305 #if OPENBSD_BUSDMA
306 sc->sc_vd = vdsk_dring_alloc(sc->sc_dmatag, 32);
307 #else
308 sc->sc_vd = vdsk_dring_alloc(32);
309 #endif
310 sc->sc_vsd = kmem_zalloc(32 * sizeof(*sc->sc_vsd), KM_SLEEP);
311
312 #if OPENBSD_BUSDMA
313 sc->sc_lm->lm_slot[0].entry = sc->sc_vd->vd_map->dm_segs[0].ds_addr;
314 #else
315 va = (vaddr_t)sc->sc_vd->vd_desc;
316 pa = 0;
317 if (pmap_extract(pmap_kernel(), va, &pa) == FALSE)
318 panic("pmap_extract failed %lx\n", va);
319
320 sc->sc_lm->lm_slot[0].entry = pa;
321 #endif
322 sc->sc_lm->lm_slot[0].entry &= LDC_MTE_RA_MASK;
323 sc->sc_lm->lm_slot[0].entry |= LDC_MTE_CPR | LDC_MTE_CPW;
324 sc->sc_lm->lm_slot[0].entry |= LDC_MTE_R | LDC_MTE_W;
325 sc->sc_lm->lm_next = 1;
326 sc->sc_lm->lm_count = 1;
327 va = lc->lc_txq->lq_va;
328 pa = 0;
329 if (pmap_extract(pmap_kernel(), va, &pa) == FALSE)
330 panic("pmap_extract failed %lx\n", va);
331 #if OPENBSD_BUSDMA
332 err = hv_ldc_tx_qconf(lc->lc_id,
333 lc->lc_txq->lq_map->dm_segs[0].ds_addr, lc->lc_txq->lq_nentries);
334 #else
335 err = hv_ldc_tx_qconf(lc->lc_id, pa, lc->lc_txq->lq_nentries);
336 #endif
337 if (err != H_EOK)
338 printf("hv_ldc_tx_qconf %d\n", err);
339 va = (vaddr_t)lc->lc_rxq->lq_va;
340 pa = 0;
341 if (pmap_extract(pmap_kernel(), va, &pa) == FALSE)
342 panic("pmap_extract failed %lx\n", va);
343 #if OPENBSD_BUSDMA
344 err = hv_ldc_rx_qconf(lc->lc_id,
345 lc->lc_rxq->lq_map->dm_segs[0].ds_addr, lc->lc_rxq->lq_nentries);
346 #else
347 err = hv_ldc_rx_qconf(lc->lc_id, pa, lc->lc_rxq->lq_nentries);
348 #endif
349 if (err != H_EOK)
350 printf("hv_ldc_rx_qconf %d\n", err);
351
352 cbus_intr_setenabled(sc->sc_bustag, ca->ca_tx_ino, INTR_ENABLED);
353 cbus_intr_setenabled(sc->sc_bustag, ca->ca_rx_ino, INTR_ENABLED);
354
355 ldc_send_vers(lc);
356
357 printf("\n");
358
359 /*
360 * Interrupts aren't enabled during autoconf, so poll for VIO
361 * peer-to-peer handshake completion.
362 */
363 s = splbio();
364 timeout = 10 * 1000;
365 do {
366 if (vdsk_rx_intr(sc) && sc->sc_vio_state == VIO_ESTABLISHED)
367 break;
368
369 delay(1000);
370 } while(--timeout > 0);
371 splx(s);
372
373 if (sc->sc_vio_state != VIO_ESTABLISHED) {
374 printf("vio not established: %d\n", sc->sc_vio_state);
375 return;
376 }
377
378 sc->sc_dv = self;
379
380 sc->sc_adapter.adapt_dev = sc->sc_dv;
381 sc->sc_adapter.adapt_nchannels = 1;
382 sc->sc_adapter.adapt_openings = sc->sc_vd->vd_nentries - 1;
383 sc->sc_adapter.adapt_max_periph = sc->sc_vd->vd_nentries - 1;
384
385 sc->sc_adapter.adapt_minphys = minphys;
386 sc->sc_adapter.adapt_request = vdsk_scsipi_request;
387
388 sc->sc_channel.chan_adapter = &sc->sc_adapter;
389 sc->sc_channel.chan_bustype = &scsi_bustype;
390 sc->sc_channel.chan_channel = 0;
391 sc->sc_channel.chan_ntargets = 2; /* XXX why not 1? */
392 sc->sc_channel.chan_nluns = 1; /* XXX slices should be presented as luns? */
393 sc->sc_channel.chan_id = 0;
394 sc->sc_channel.chan_flags = SCSIPI_CHAN_NOSETTLE;
395
396 config_found(self, &sc->sc_channel, scsiprint, CFARGS_NONE);
397
398 return;
399
400 free_map:
401 hv_ldc_set_map_table(lc->lc_id, 0, 0);
402 #if OPENBSD_BUSDMA
403 ldc_map_free(sc->sc_dmatag, sc->sc_lm);
404 #else
405 ldc_map_free(sc->sc_lm);
406 #endif
407 }
408
409 void
410 vdsk_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
411 void *arg)
412 {
413
414 struct vdsk_softc *sc;
415 struct scsipi_xfer *xs;
416
417 sc = device_private(chan->chan_adapter->adapt_dev);
418
419 xs = arg;
420
421 switch (req) {
422 case ADAPTER_REQ_RUN_XFER:
423 vdsk_scsi_cmd(sc, xs);
424 break;
425 case ADAPTER_REQ_GROW_RESOURCES:
426 case ADAPTER_REQ_SET_XFER_MODE:
427 /* Ignored */
428 break;
429 default:
430 panic("req unhandled: %x", req);
431 }
432
433 }
434
435 int
436 vdsk_tx_intr(void *arg)
437 {
438 panic("%s: not verified yet", __FUNCTION__);
439
440 struct vdsk_softc *sc = arg;
441 struct ldc_conn *lc = &sc->sc_lc;
442 uint64_t tx_head, tx_tail, tx_state;
443
444 hv_ldc_tx_get_state(lc->lc_id, &tx_head, &tx_tail, &tx_state);
445 if (tx_state != lc->lc_tx_state) {
446 switch (tx_state) {
447 case LDC_CHANNEL_DOWN:
448 DPRINTF(("Tx link down\n"));
449 break;
450 case LDC_CHANNEL_UP:
451 DPRINTF(("Tx link up\n"));
452 break;
453 case LDC_CHANNEL_RESET:
454 DPRINTF(("Tx link reset\n"));
455 break;
456 }
457 lc->lc_tx_state = tx_state;
458 }
459
460 return (1);
461 }
462
463 int
464 vdsk_rx_intr(void *arg)
465 {
466 struct vdsk_softc *sc = arg;
467 struct ldc_conn *lc = &sc->sc_lc;
468 uint64_t rx_head, rx_tail, rx_state;
469 struct ldc_pkt *lp;
470 int err;
471
472 err = hv_ldc_rx_get_state(lc->lc_id, &rx_head, &rx_tail, &rx_state);
473 if (err == H_EINVAL) {
474 printf("hv_ldc_rx_get_state H_EINVAL\n");
475 return (0);
476 }
477 if (err != H_EOK) {
478 printf("hv_ldc_rx_get_state %d\n", err);
479 return (0);
480 }
481
482 if (rx_state != lc->lc_rx_state) {
483 sc->sc_vio_state = 0;
484 lc->lc_tx_seqid = 0;
485 lc->lc_state = 0;
486 switch (rx_state) {
487 case LDC_CHANNEL_DOWN:
488 DPRINTF(("Rx link down\n"));
489 break;
490 case LDC_CHANNEL_UP:
491 DPRINTF(("Rx link up\n"));
492 ldc_send_vers(lc);
493 break;
494 case LDC_CHANNEL_RESET:
495 DPRINTF(("Rx link reset\n"));
496 ldc_send_vers(lc);
497 break;
498 }
499 lc->lc_rx_state = rx_state;
500 hv_ldc_rx_set_qhead(lc->lc_id, rx_tail);
501 return (1);
502 }
503
504 if (rx_head == rx_tail)
505 return (0);
506
507 lp = (struct ldc_pkt *)(uintptr_t)(lc->lc_rxq->lq_va + rx_head);
508 switch (lp->type) {
509 case LDC_CTRL:
510 ldc_rx_ctrl(lc, lp);
511 break;
512
513 case LDC_DATA:
514 ldc_rx_data(lc, lp);
515 break;
516
517 default:
518 DPRINTF(("%0x02/%0x02/%0x02\n", lp->type, lp->stype,
519 lp->ctrl));
520 ldc_reset(lc);
521 break;
522 }
523
524 if (lc->lc_state == 0)
525 return (1);
526
527 rx_head += sizeof(*lp);
528 rx_head &= ((lc->lc_rxq->lq_nentries * sizeof(*lp)) - 1);
529 err = hv_ldc_rx_set_qhead(lc->lc_id, rx_head);
530 if (err != H_EOK)
531 printf("%s: hv_ldc_rx_set_qhead %d\n", __func__, err);
532
533 return (1);
534 }
535
536 void
537 vdsk_rx_data(struct ldc_conn *lc, struct ldc_pkt *lp)
538 {
539 struct vio_msg *vm = (struct vio_msg *)lp;
540
541 switch (vm->type) {
542 case VIO_TYPE_CTRL:
543 if ((lp->env & LDC_FRAG_START) == 0 &&
544 (lp->env & LDC_FRAG_STOP) == 0)
545 return;
546 vdsk_rx_vio_ctrl(lc->lc_sc, vm);
547 break;
548
549 case VIO_TYPE_DATA:
550 if((lp->env & LDC_FRAG_START) == 0)
551 return;
552 vdsk_rx_vio_data(lc->lc_sc, vm);
553 break;
554
555 default:
556 DPRINTF(("Unhandled packet type 0x%02x\n", vm->type));
557 ldc_reset(lc);
558 break;
559 }
560 }
561
562 void
563 vdsk_rx_vio_ctrl(struct vdsk_softc *sc, struct vio_msg *vm)
564 {
565 struct vio_msg_tag *tag = (struct vio_msg_tag *)&vm->type;
566
567 switch (tag->stype_env) {
568 case VIO_VER_INFO:
569 vdsk_rx_vio_ver_info(sc, tag);
570 break;
571 case VIO_ATTR_INFO:
572 vdsk_rx_vio_attr_info(sc, tag);
573 break;
574 case VIO_DRING_REG:
575 vdsk_rx_vio_dring_reg(sc, tag);
576 break;
577 case VIO_RDX:
578 vdsk_rx_vio_rdx(sc, tag);
579 break;
580 default:
581 DPRINTF(("CTRL/0x%02x/0x%04x\n", tag->stype, tag->stype_env));
582 break;
583 }
584 }
585
586 void
587 vdsk_rx_vio_ver_info(struct vdsk_softc *sc, struct vio_msg_tag *tag)
588 {
589 struct vio_ver_info *vi = (struct vio_ver_info *)tag;
590
591 switch (vi->tag.stype) {
592 case VIO_SUBTYPE_INFO:
593 DPRINTF(("CTRL/INFO/VER_INFO\n"));
594 break;
595
596 case VIO_SUBTYPE_ACK:
597 DPRINTF(("CTRL/ACK/VER_INFO\n"));
598 if (!ISSET(sc->sc_vio_state, VIO_SND_VER_INFO)) {
599 ldc_reset(&sc->sc_lc);
600 break;
601 }
602 sc->sc_major = vi->major;
603 sc->sc_minor = vi->minor;
604 sc->sc_vio_state |= VIO_ACK_VER_INFO;
605 break;
606
607 default:
608 DPRINTF(("CTRL/0x%02x/VER_INFO\n", vi->tag.stype));
609 break;
610 }
611
612 if (ISSET(sc->sc_vio_state, VIO_ACK_VER_INFO))
613 vdsk_send_attr_info(sc);
614 }
615
616 void
617 vdsk_rx_vio_attr_info(struct vdsk_softc *sc, struct vio_msg_tag *tag)
618 {
619 struct vd_attr_info *ai = (struct vd_attr_info *)tag;
620
621 switch (ai->tag.stype) {
622 case VIO_SUBTYPE_INFO:
623 DPRINTF(("CTRL/INFO/ATTR_INFO\n"));
624 break;
625
626 case VIO_SUBTYPE_ACK:
627 DPRINTF(("CTRL/ACK/ATTR_INFO\n"));
628 if (!ISSET(sc->sc_vio_state, VIO_SND_ATTR_INFO)) {
629 ldc_reset(&sc->sc_lc);
630 break;
631 }
632
633 sc->sc_vdisk_block_size = ai->vdisk_block_size;
634 DPRINTF(("vdisk_block_size %u\n", sc->sc_vdisk_block_size));
635 sc->sc_vdisk_size = ai->vdisk_size;
636 DPRINTF(("vdisk_size %lu\n", sc->sc_vdisk_size));
637 if (sc->sc_major > 1 || sc->sc_minor >= 1)
638 sc->sc_vd_mtype = ai->vd_mtype;
639 else
640 sc->sc_vd_mtype = VD_MEDIA_TYPE_FIXED;
641
642 sc->sc_vio_state |= VIO_ACK_ATTR_INFO;
643 break;
644
645 default:
646 DPRINTF(("CTRL/0x%02x/ATTR_INFO\n", ai->tag.stype));
647 break;
648 }
649
650 if (ISSET(sc->sc_vio_state, VIO_ACK_ATTR_INFO))
651 vdsk_send_dring_reg(sc);
652
653 }
654
655 void
656 vdsk_rx_vio_dring_reg(struct vdsk_softc *sc, struct vio_msg_tag *tag)
657 {
658 struct vio_dring_reg *dr = (struct vio_dring_reg *)tag;
659
660 switch (dr->tag.stype) {
661 case VIO_SUBTYPE_INFO:
662 DPRINTF(("CTRL/INFO/DRING_REG\n"));
663 break;
664
665 case VIO_SUBTYPE_ACK:
666 DPRINTF(("CTRL/ACK/DRING_REG\n"));
667 if (!ISSET(sc->sc_vio_state, VIO_SND_DRING_REG)) {
668 ldc_reset(&sc->sc_lc);
669 break;
670 }
671
672 sc->sc_dring_ident = dr->dring_ident;
673 sc->sc_seq_no = 1;
674
675 sc->sc_vio_state |= VIO_ACK_DRING_REG;
676 break;
677
678 default:
679 DPRINTF(("CTRL/0x%02x/DRING_REG\n", dr->tag.stype));
680 break;
681 }
682
683 if (ISSET(sc->sc_vio_state, VIO_ACK_DRING_REG))
684 vdsk_send_rdx(sc);
685 }
686
687 void
688 vdsk_rx_vio_rdx(struct vdsk_softc *sc, struct vio_msg_tag *tag)
689 {
690 switch(tag->stype) {
691 case VIO_SUBTYPE_INFO:
692 DPRINTF(("CTRL/INFO/RDX\n"));
693 break;
694
695 case VIO_SUBTYPE_ACK:
696 {
697 int prod;
698
699 DPRINTF(("CTRL/ACK/RDX\n"));
700 if (!ISSET(sc->sc_vio_state, VIO_SND_RDX)) {
701 ldc_reset(&sc->sc_lc);
702 break;
703 }
704 sc->sc_vio_state |= VIO_ACK_RDX;
705
706 /*
707 * If this ACK is the result of a reconnect, we may
708 * have pending I/O that we need to resubmit. We need
709 * to rebuild the ring descriptors though since the
710 * vDisk server on the other side may have touched
711 * them already. So we just clean up the ring and the
712 * LDC map and resubmit the SCSI commands based on our
713 * soft descriptors.
714 */
715 prod = sc->sc_tx_prod;
716 sc->sc_tx_prod = sc->sc_tx_cons;
717 sc->sc_tx_cnt = 0;
718 sc->sc_lm->lm_next = 1;
719 sc->sc_lm->lm_count = 1;
720 for (int i = sc->sc_lm->lm_next; i < sc->sc_lm->lm_nentries; i++)
721 sc->sc_lm->lm_slot[i].entry = 0;
722 while (sc->sc_tx_prod != prod)
723 vdsk_submit_cmd(sc, sc->sc_vsd[sc->sc_tx_prod].vsd_xs);
724 break;
725 }
726
727 default:
728 DPRINTF(("CTRL/0x%02x/RDX (VIO)\n", tag->stype));
729 break;
730 }
731 }
732
733 void
734 vdsk_rx_vio_data(struct vdsk_softc *sc, struct vio_msg *vm)
735 {
736 struct vio_msg_tag *tag = (struct vio_msg_tag *)&vm->type;
737
738 if (sc->sc_vio_state != VIO_ESTABLISHED) {
739 DPRINTF(("Spurious DATA/0x%02x/0x%04x\n", tag->stype,
740 tag->stype_env));
741 return;
742 }
743
744 switch(tag->stype_env) {
745 case VIO_DRING_DATA:
746 vdsk_rx_vio_dring_data(sc, tag);
747 break;
748
749 default:
750 DPRINTF(("DATA/0x%02x/0x%04x\n", tag->stype, tag->stype_env));
751 break;
752 }
753 }
754
755 void
756 vdsk_rx_vio_dring_data(struct vdsk_softc *sc, struct vio_msg_tag *tag)
757 {
758 switch(tag->stype) {
759 case VIO_SUBTYPE_INFO:
760 DPRINTF(("DATA/INFO/DRING_DATA\n"));
761 break;
762
763 case VIO_SUBTYPE_ACK:
764 {
765 struct scsipi_xfer *xs;
766 int cons;
767
768 cons = sc->sc_tx_cons;
769 while (sc->sc_vd->vd_desc[cons].hdr.dstate == VIO_DESC_DONE) {
770 xs = sc->sc_vsd[cons].vsd_xs;
771 if (ISSET(xs->xs_control, XS_CTL_POLL) == 0)
772 vdsk_complete_cmd(sc, xs, cons);
773 cons++;
774 cons &= (sc->sc_vd->vd_nentries - 1);
775 }
776 sc->sc_tx_cons = cons;
777 break;
778 }
779
780 case VIO_SUBTYPE_NACK:
781 DPRINTF(("DATA/NACK/DRING_DATA\n"));
782 struct ldc_conn *lc = &sc->sc_lc;
783 ldc_send_vers(lc);
784 break;
785
786 default:
787 DPRINTF(("DATA/0x%02x/DRING_DATA\n", tag->stype));
788 break;
789 }
790 }
791
792 void
793 vdsk_ldc_reset(struct ldc_conn *lc)
794 {
795 struct vdsk_softc *sc = lc->lc_sc;
796
797 sc->sc_vio_state = 0;
798 }
799
800 void
801 vdsk_ldc_start(struct ldc_conn *lc)
802 {
803 struct vdsk_softc *sc = lc->lc_sc;
804
805 vdsk_send_ver_info(sc, VDSK_MAJOR, VDSK_MINOR);
806 }
807
808 void
809 vdsk_sendmsg(struct vdsk_softc *sc, void *msg, size_t len)
810 {
811 struct ldc_conn *lc = &sc->sc_lc;
812 int err;
813
814 err = ldc_send_unreliable(lc, msg, len);
815 if (err)
816 printf("%s: ldc_send_unreliable: %d\n", __func__, err);
817 }
818
819 void
820 vdsk_send_ver_info(struct vdsk_softc *sc, uint16_t major, uint16_t minor)
821 {
822 struct vio_ver_info vi;
823
824 /* Allocate new session ID. */
825 sc->sc_local_sid = gettick();
826
827 bzero(&vi, sizeof(vi));
828 vi.tag.type = VIO_TYPE_CTRL;
829 vi.tag.stype = VIO_SUBTYPE_INFO;
830 vi.tag.stype_env = VIO_VER_INFO;
831 vi.tag.sid = sc->sc_local_sid;
832 vi.major = major;
833 vi.minor = minor;
834 vi.dev_class = VDEV_DISK;
835 vdsk_sendmsg(sc, &vi, sizeof(vi));
836
837 sc->sc_vio_state |= VIO_SND_VER_INFO;
838 }
839
840 void
841 vdsk_send_attr_info(struct vdsk_softc *sc)
842 {
843 struct vd_attr_info ai;
844
845 bzero(&ai, sizeof(ai));
846 ai.tag.type = VIO_TYPE_CTRL;
847 ai.tag.stype = VIO_SUBTYPE_INFO;
848 ai.tag.stype_env = VIO_ATTR_INFO;
849 ai.tag.sid = sc->sc_local_sid;
850 ai.xfer_mode = VIO_DRING_MODE;
851 ai.vdisk_block_size = DEV_BSIZE;
852 ai.max_xfer_sz = MAXPHYS / DEV_BSIZE;
853 DPRINTF(("vdisk_block_size %u\n", ai.vdisk_block_size));
854 DPRINTF(("max_xfer_sz %lu\n", ai.max_xfer_sz));
855 vdsk_sendmsg(sc, &ai, sizeof(ai));
856
857 sc->sc_vio_state |= VIO_SND_ATTR_INFO;
858 }
859
860 void
861 vdsk_send_dring_reg(struct vdsk_softc *sc)
862 {
863 struct vio_dring_reg dr;
864
865 bzero(&dr, sizeof(dr));
866 dr.tag.type = VIO_TYPE_CTRL;
867 dr.tag.stype = VIO_SUBTYPE_INFO;
868 dr.tag.stype_env = VIO_DRING_REG;
869 dr.tag.sid = sc->sc_local_sid;
870 dr.dring_ident = 0;
871 dr.num_descriptors = sc->sc_vd->vd_nentries;
872 dr.descriptor_size = sizeof(struct vd_desc);
873 dr.options = VIO_TX_RING | VIO_RX_RING;
874 dr.ncookies = 1;
875 dr.cookie[0].addr = 0;
876 dr.cookie[0].size = PAGE_SIZE;
877 vdsk_sendmsg(sc, &dr, sizeof(dr));
878
879 sc->sc_vio_state |= VIO_SND_DRING_REG;
880 };
881
882 void
883 vdsk_send_rdx(struct vdsk_softc *sc)
884 {
885 struct vio_rdx rdx;
886
887 bzero(&rdx, sizeof(rdx));
888 rdx.tag.type = VIO_TYPE_CTRL;
889 rdx.tag.stype = VIO_SUBTYPE_INFO;
890 rdx.tag.stype_env = VIO_RDX;
891 rdx.tag.sid = sc->sc_local_sid;
892 vdsk_sendmsg(sc, &rdx, sizeof(rdx));
893
894 sc->sc_vio_state |= VIO_SND_RDX;
895 }
896
897 #if OPENBSD_BUSDMA
898 struct vdsk_dring *
899 vdsk_dring_alloc(bus_dma_tag_t t, int nentries)
900 #else
901 struct vdsk_dring *
902 vdsk_dring_alloc(int nentries)
903 #endif
904 {
905
906 struct vdsk_dring *vd;
907 bus_size_t size;
908 vaddr_t va;
909 #if OPENBSD_BUSDMA
910 int nsegs;
911 #endif
912 int i;
913
914 vd = kmem_zalloc(sizeof(struct vdsk_dring), KM_SLEEP);
915
916 size = roundup(nentries * sizeof(struct vd_desc), PAGE_SIZE);
917
918 #if OPENBSD_BUSDMA
919 if (bus_dmamap_create(t, size, 1, size, 0,
920 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &vd->vd_map) != 0)
921 return (NULL);
922
923 if (bus_dmamem_alloc(t, size, PAGE_SIZE, 0, &vd->vd_seg, 1,
924 &nsegs, BUS_DMA_NOWAIT) != 0)
925 goto destroy;
926
927 if (bus_dmamem_map(t, &vd->vd_seg, 1, size, (void*)&va,
928 BUS_DMA_NOWAIT) != 0)
929 goto free;
930
931 if (bus_dmamap_load(t, vd->vd_map, (void*)va, size, NULL,
932 BUS_DMA_NOWAIT) != 0)
933 goto unmap;
934 #else
935 va = (vaddr_t)kmem_zalloc(size, KM_SLEEP);
936 #endif
937 vd->vd_desc = (struct vd_desc *)va;
938 vd->vd_nentries = nentries;
939 bzero(vd->vd_desc, nentries * sizeof(struct vd_desc));
940 for (i = 0; i < vd->vd_nentries; i++)
941 vd->vd_desc[i].hdr.dstate = VIO_DESC_FREE;
942 return (vd);
943
944 #if OPENBSD_BUSDMA
945 unmap:
946 bus_dmamem_unmap(t, (void*)va, size);
947 free:
948 bus_dmamem_free(t, &vd->vd_seg, 1);
949 destroy:
950 bus_dmamap_destroy(t, vd->vd_map);
951 #endif
952 return (NULL);
953 }
954
955 #if OPENBSD_BUSDMA
956 void
957 vdsk_dring_free(bus_dma_tag_t t, struct vdsk_dring *vd)
958 #else
959 void
960 vdsk_dring_free(struct vdsk_dring *vd)
961 #endif
962 {
963
964 bus_size_t size;
965
966 size = vd->vd_nentries * sizeof(struct vd_desc);
967 size = roundup(size, PAGE_SIZE);
968
969 #if OPENBSD_BUSDMA
970 bus_dmamap_unload(t, vd->vd_map);
971
972 bus_dmamem_unmap(t, (caddr_t)vd->vd_desc, size);
973 bus_dmamem_free(t, &vd->vd_seg, 1);
974 bus_dmamap_destroy(t, vd->vd_map);
975 #else
976 kmem_free(vd->vd_desc, size);
977 #endif
978 kmem_free(vd, size);
979 }
980
981 void *
982 vdsk_io_get(void *xsc)
983 {
984
985 panic("%s: not verified yet", __FUNCTION__);
986
987 struct vdsk_softc *sc = xsc;
988 void *rv = sc; /* just has to be !NULL */
989 int s;
990
991 s = splbio();
992 if (sc->sc_vio_state != VIO_ESTABLISHED ||
993 sc->sc_tx_cnt >= sc->sc_vd->vd_nentries)
994 rv = NULL;
995 else
996 sc->sc_tx_cnt++;
997 splx(s);
998
999 return (rv);
1000 }
1001
1002 void
1003 vdsk_io_put(void *xsc, void *io)
1004 {
1005
1006 panic("%s: not verified yet", __FUNCTION__);
1007
1008 struct vdsk_softc *sc = xsc;
1009 int s;
1010
1011 #ifdef DIAGNOSTIC
1012 if (sc != io)
1013 panic("vsdk_io_put: unexpected io");
1014 #endif
1015
1016 s = splbio();
1017 sc->sc_tx_cnt--;
1018 splx(s);
1019 }
1020
1021 void
1022 vdsk_scsi_cmd(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1023 {
1024 int timeout, s;
1025 int desc;
1026
1027 DPRINTF(("vdsk_scsi_cmd() opcode %x\n", xs->cmd->opcode));
1028
1029 switch (xs->cmd->opcode) {
1030
1031 case SCSI_READ_6_COMMAND:
1032 DPRINTF(("SCSI_READ_6_COMMAND\n"));
1033 break;
1034
1035 case READ_10:
1036 DPRINTF(("SCSI_READ_10\n"));
1037 break;
1038
1039 case READ_12:
1040 DPRINTF(("SCSI_READ_12\n"));
1041 break;
1042
1043 case READ_16:
1044 DPRINTF(("SCSI_READ_16\n"));
1045 break;
1046
1047 case SCSI_WRITE_6_COMMAND:
1048 DPRINTF(("SCSI_WRITE_6\n"));
1049 break;
1050
1051 case WRITE_10:
1052 DPRINTF(("SCSI_WRITE_10\n"));
1053 break;
1054
1055 case WRITE_12:
1056 DPRINTF(("SCSI_WRITE_12\n"));
1057 break;
1058
1059 case WRITE_16:
1060 DPRINTF(("SCSI_WRITE_16\n"));
1061 break;
1062
1063 case SCSI_SYNCHRONIZE_CACHE_10:
1064 DPRINTF(("SCSI_SYNCHRONIZE_CACHE_10WRITE_16\n"));
1065 break;
1066
1067 case INQUIRY:
1068 DPRINTF(("INQUIRY\n"));
1069 vdsk_scsi_inq(sc, xs);
1070 return;
1071
1072 case READ_CAPACITY_10:
1073 DPRINTF(("READ_CAPACITY_10\n"));
1074 vdsk_scsi_capacity(sc, xs);
1075 return;
1076
1077 case READ_CAPACITY_16:
1078 DPRINTF(("READ_CAPACITY_16\n"));
1079 vdsk_scsi_capacity16(sc, xs);
1080 return;
1081
1082 case SCSI_REPORT_LUNS:
1083 DPRINTF(("REPORT_LUNS\n"));
1084 vdsk_scsi_report_luns(sc, xs);
1085 return;
1086
1087 case SCSI_TEST_UNIT_READY:
1088 DPRINTF(("TEST_UNIT_READY\n"));
1089 vdsk_scsi_done(xs, XS_NOERROR);
1090 return;
1091
1092 case START_STOP:
1093 DPRINTF(("START_STOP\n"));
1094 vdsk_scsi_done(xs, XS_NOERROR);
1095 return;
1096
1097 case SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL:
1098 DPRINTF(("PREVENT_ALLOW_MEDIUM_REMOVAL\n"));
1099 vdsk_scsi_done(xs, XS_NOERROR);
1100 return;
1101
1102 case SCSI_MODE_SENSE_6:
1103 DPRINTF(("SCSI_MODE_SENSE_6 (not implemented)\n"));
1104 vdsk_scsi_done(xs, XS_DRIVER_STUFFUP);
1105 return;
1106
1107 case SCSI_MODE_SELECT_6:
1108 DPRINTF(("MODE_SELECT_6 (not implemented)\n"));
1109 vdsk_scsi_done(xs, XS_DRIVER_STUFFUP);
1110 return;
1111
1112 case SCSI_MAINTENANCE_IN:
1113 DPRINTF(("MAINTENANCE_IN\n"));
1114 vdsk_scsi_done(xs, XS_NOERROR);
1115 return;
1116
1117 case SCSI_MODE_SENSE_10:
1118 DPRINTF(("SCSI_MODE_SENSE_10 (not implemented)\n"));
1119 vdsk_scsi_done(xs, XS_DRIVER_STUFFUP);
1120 return;
1121
1122 case READ_TOC:
1123 DPRINTF(("READ_TOC (not implemented)\n"));
1124 vdsk_scsi_done(xs, XS_DRIVER_STUFFUP);
1125 return;
1126
1127 default:
1128 panic("%s unhandled cmd 0x%02x\n",
1129 __func__, xs->cmd->opcode);
1130 }
1131
1132 s = splbio();
1133 desc = vdsk_submit_cmd(sc, xs);
1134
1135 if (!ISSET(xs->xs_control, XS_CTL_POLL)) {
1136 splx(s);
1137 return;
1138 }
1139 timeout = 1000;
1140 do {
1141 if (sc->sc_vd->vd_desc[desc].hdr.dstate == VIO_DESC_DONE)
1142 break;
1143
1144 delay(1000);
1145 } while(--timeout > 0);
1146 if (sc->sc_vd->vd_desc[desc].hdr.dstate == VIO_DESC_DONE) {
1147 vdsk_complete_cmd(sc, xs, desc);
1148 } else {
1149 ldc_reset(&sc->sc_lc);
1150 vdsk_scsi_done(xs, XS_TIMEOUT);
1151 }
1152 splx(s);
1153 }
1154
1155 int
1156 vdsk_submit_cmd(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1157 {
1158 struct ldc_map *map = sc->sc_lm;
1159 struct vio_dring_msg dm;
1160 struct scsi_rw_6 *rw6;
1161 struct scsipi_rw_10 *rw10;
1162 struct scsipi_rw_12 *rw12;
1163 struct scsipi_rw_16 *rw16;
1164 u_int64_t lba = 0;
1165 uint8_t operation;
1166 vaddr_t va;
1167 paddr_t pa;
1168 psize_t nbytes;
1169 int len, ncookies;
1170 int desc;
1171
1172 switch (xs->cmd->opcode) {
1173
1174 case SCSI_READ_6_COMMAND:
1175 case READ_10:
1176 case READ_12:
1177 case READ_16:
1178 DPRINTF(("VD_OP_BREAD\n"));
1179 operation = VD_OP_BREAD;
1180 break;
1181
1182 case SCSI_WRITE_6_COMMAND:
1183 case WRITE_10:
1184 case WRITE_12:
1185 case WRITE_16:
1186 DPRINTF(("VD_OP_BWRITE\n"));
1187 operation = VD_OP_BWRITE;
1188 break;
1189
1190 case SCSI_SYNCHRONIZE_CACHE_10:
1191 DPRINTF(("VD_OP_FLUSH\n"));
1192 operation = VD_OP_FLUSH;
1193 break;
1194
1195 default:
1196 panic("%s unhandled cmd opcode 0x%x",
1197 __func__, xs->cmd->opcode);
1198 }
1199
1200 /*
1201 * READ/WRITE/SYNCHRONIZE commands. SYNCHRONIZE CACHE has same
1202 * layout as 10-byte READ/WRITE commands.
1203 */
1204 if (xs->cmdlen == 6) {
1205 rw6 = (struct scsi_rw_6 *)xs->cmd;
1206 lba = _3btol(rw6->addr) & (SRW_TOPADDR << 16 | 0xffff);
1207 } else if (xs->cmdlen == 10) {
1208 rw10 = (struct scsipi_rw_10 *)xs->cmd;
1209 lba = _4btol(rw10->addr);
1210 } else if (xs->cmdlen == 12) {
1211 rw12 = (struct scsipi_rw_12 *)xs->cmd;
1212 lba = _4btol(rw12->addr);
1213 } else if (xs->cmdlen == 16) {
1214 rw16 = (struct scsipi_rw_16 *)xs->cmd;
1215 lba = _8btol(rw16->addr);
1216 }
1217
1218 DPRINTF(("lba = %lu\n", lba));
1219
1220 desc = sc->sc_tx_prod;
1221 ncookies = 0;
1222 len = xs->datalen;
1223 va = (vaddr_t)xs->data;
1224 while (len > 0) {
1225 DPRINTF(("len = %u\n", len));
1226 KASSERT(ncookies < MAXPHYS / PAGE_SIZE);
1227 pa = 0;
1228 pmap_extract(pmap_kernel(), va, &pa);
1229 while (map->lm_slot[map->lm_next].entry != 0) {
1230 map->lm_next++;
1231 map->lm_next &= (map->lm_nentries - 1);
1232 }
1233 map->lm_slot[map->lm_next].entry = (pa & LDC_MTE_RA_MASK);
1234 map->lm_slot[map->lm_next].entry |= LDC_MTE_CPR | LDC_MTE_CPW;
1235 map->lm_slot[map->lm_next].entry |= LDC_MTE_IOR | LDC_MTE_IOW;
1236 map->lm_slot[map->lm_next].entry |= LDC_MTE_R | LDC_MTE_W;
1237 map->lm_count++;
1238
1239 nbytes = MIN(len, PAGE_SIZE - (pa & PAGE_MASK));
1240
1241 sc->sc_vd->vd_desc[desc].cookie[ncookies].addr =
1242 map->lm_next << PAGE_SHIFT | (pa & PAGE_MASK);
1243 sc->sc_vd->vd_desc[desc].cookie[ncookies].size = nbytes;
1244
1245 sc->sc_vsd[desc].vsd_map_idx[ncookies] = map->lm_next;
1246 va += nbytes;
1247 len -= nbytes;
1248 ncookies++;
1249 }
1250 if (ISSET(xs->xs_control, XS_CTL_POLL) == 0)
1251 sc->sc_vd->vd_desc[desc].hdr.ack = 1;
1252 else
1253 sc->sc_vd->vd_desc[desc].hdr.ack = 0;
1254 sc->sc_vd->vd_desc[desc].operation = operation;
1255 sc->sc_vd->vd_desc[desc].slice = VD_SLICE_NONE;
1256 sc->sc_vd->vd_desc[desc].status = 0xffffffff;
1257 sc->sc_vd->vd_desc[desc].offset = lba;
1258 sc->sc_vd->vd_desc[desc].size = xs->datalen;
1259 sc->sc_vd->vd_desc[desc].ncookies = ncookies;
1260
1261 membar_Sync();
1262
1263 sc->sc_vd->vd_desc[desc].hdr.dstate = VIO_DESC_READY;
1264
1265 sc->sc_vsd[desc].vsd_xs = xs;
1266 sc->sc_vsd[desc].vsd_ncookies = ncookies;
1267
1268 sc->sc_tx_prod++;
1269 sc->sc_tx_prod &= (sc->sc_vd->vd_nentries - 1);
1270
1271 bzero(&dm, sizeof(dm));
1272 dm.tag.type = VIO_TYPE_DATA;
1273 dm.tag.stype = VIO_SUBTYPE_INFO;
1274 dm.tag.stype_env = VIO_DRING_DATA;
1275 dm.tag.sid = sc->sc_local_sid;
1276 dm.seq_no = sc->sc_seq_no++;
1277 dm.dring_ident = sc->sc_dring_ident;
1278 dm.start_idx = dm.end_idx = desc;
1279 vdsk_sendmsg(sc, &dm, sizeof(dm));
1280
1281 return desc;
1282 }
1283
1284 void
1285 vdsk_complete_cmd(struct vdsk_softc *sc, struct scsipi_xfer *xs, int desc)
1286 {
1287 struct ldc_map *map = sc->sc_lm;
1288 int cookie, idx;
1289 int error;
1290
1291 cookie = 0;
1292 while (cookie < sc->sc_vsd[desc].vsd_ncookies) {
1293 idx = sc->sc_vsd[desc].vsd_map_idx[cookie++];
1294 map->lm_slot[idx].entry = 0;
1295 map->lm_count--;
1296 }
1297
1298 error = XS_NOERROR;
1299 if (sc->sc_vd->vd_desc[desc].status != 0)
1300 error = XS_DRIVER_STUFFUP;
1301 xs->resid = xs->datalen -
1302 sc->sc_vd->vd_desc[desc].size;
1303
1304 /*
1305 * scsi_done() called by vdsk_scsi_done() requires
1306 * the kernel to be locked
1307 */
1308 KERNEL_LOCK(1, curlwp);
1309 vdsk_scsi_done(xs, error);
1310 KERNEL_UNLOCK_ONE(curlwp);
1311
1312 sc->sc_vd->vd_desc[desc].hdr.dstate = VIO_DESC_FREE;
1313
1314 }
1315
1316 void
1317 vdsk_scsi_inq(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1318 {
1319 vdsk_scsi_inquiry(sc, xs);
1320 }
1321
1322 void
1323 vdsk_scsi_inquiry(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1324 {
1325 struct scsipi_inquiry_data inq;
1326 char buf[5];
1327
1328 bzero(&inq, sizeof(inq));
1329
1330 switch (sc->sc_vd_mtype) {
1331 case VD_MEDIA_TYPE_CD:
1332 case VD_MEDIA_TYPE_DVD:
1333 inq.device = T_CDROM;
1334 inq.dev_qual2 = SID_REMOVABLE;
1335 bcopy("Virtual CDROM ", inq.product, sizeof(inq.product));
1336 break;
1337 case VD_MEDIA_TYPE_FIXED:
1338 inq.device = T_DIRECT;
1339 bcopy("Virtual Disk ", inq.product, sizeof(inq.product));
1340 break;
1341 default:
1342 panic("Unhandled media type %d\n", sc->sc_vd_mtype);
1343 }
1344 inq.version = 0x05; /* SPC-3 */
1345 inq.response_format = 2;
1346 inq.additional_length = 32;
1347 inq.flags3 |= SID_CmdQue;
1348 bcopy("SUN ", inq.vendor, sizeof(inq.vendor));
1349 snprintf(buf, sizeof(buf), "%u.%u ", sc->sc_major, sc->sc_minor);
1350 bcopy(buf, inq.revision, sizeof(inq.revision));
1351
1352 bcopy(&inq, xs->data, MIN(sizeof(inq), xs->datalen));
1353
1354 vdsk_scsi_done(xs, XS_NOERROR);
1355 }
1356
1357 void
1358 vdsk_scsi_capacity(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1359 {
1360 struct scsipi_read_capacity_10_data rcd;
1361 uint64_t capacity;
1362
1363 bzero(&rcd, sizeof(rcd));
1364
1365 capacity = sc->sc_vdisk_size - 1;
1366 if (capacity > 0xffffffff)
1367 capacity = 0xffffffff;
1368
1369 _lto4b(capacity, rcd.addr);
1370 _lto4b(sc->sc_vdisk_block_size, rcd.length);
1371
1372 DPRINTF(("%s() capacity %lu block size %u\n",
1373 __FUNCTION__, capacity, sc->sc_vdisk_block_size));
1374
1375 bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
1376
1377 vdsk_scsi_done(xs, XS_NOERROR);
1378 }
1379
1380 void
1381 vdsk_scsi_capacity16(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1382 {
1383 struct scsipi_read_capacity_16_data rcd;
1384 uint64_t capacity;
1385
1386 bzero(&rcd, sizeof(rcd));
1387
1388 capacity = sc->sc_vdisk_size - 1;
1389
1390 _lto8b(capacity, rcd.addr);
1391 _lto4b(sc->sc_vdisk_block_size, rcd.length);
1392
1393 DPRINTF(("%s() capacity %lu block size %u\n",
1394 __FUNCTION__, capacity, sc->sc_vdisk_block_size));
1395
1396 bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
1397
1398 vdsk_scsi_done(xs, XS_NOERROR);
1399 }
1400
1401 void
1402 vdsk_scsi_report_luns(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1403 {
1404 vdsk_scsi_done(xs, XS_NOERROR);
1405 }
1406
1407 void
1408 vdsk_scsi_done(struct scsipi_xfer *xs, int error)
1409 {
1410 xs->error = error;
1411
1412 scsipi_done(xs);
1413 }
1414