vdsk.c revision 1.18.2.1 1 /* $NetBSD: vdsk.c,v 1.18.2.1 2025/08/02 05:56:09 perseant Exp $ */
2 /* $OpenBSD: vdsk.c,v 1.46 2015/01/25 21:42:13 kettenis Exp $ */
3 /*
4 * Copyright (c) 2009, 2011 Mark Kettenis
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 #include <sys/kmem.h>
20 #include <sys/param.h>
21 #include <sys/buf.h>
22 #include <sys/device.h>
23 #include <sys/systm.h>
24
25 #include <machine/autoconf.h>
26 #include <machine/hypervisor.h>
27
28 #include <uvm/uvm_extern.h>
29
30 #include <dev/scsipi/scsi_all.h>
31 #include <dev/scsipi/scsipi_disk.h>
32 #include <dev/scsipi/scsipi_cd.h>
33 #include <dev/scsipi/scsiconf.h>
34
35 #include <dev/scsipi/scsi_disk.h>
36 #include <dev/scsipi/scsipi_all.h>
37 #include <dev/scsipi/scsiconf.h>
38 #include <dev/scsipi/scsi_message.h>
39
40 #include <sparc64/dev/cbusvar.h>
41 #include <sparc64/dev/ldcvar.h>
42 #include <sparc64/dev/viovar.h>
43
44 #ifdef VDSK_DEBUG
45 #define DPRINTF(x) printf x
46 #else
47 #define DPRINTF(x)
48 #endif
49
50 #define VDSK_TX_ENTRIES 32
51 #define VDSK_RX_ENTRIES 32
52
53 struct vd_attr_info {
54 struct vio_msg_tag tag;
55 uint8_t xfer_mode;
56 uint8_t vd_type;
57 uint8_t vd_mtype;
58 uint8_t _reserved1;
59 uint32_t vdisk_block_size;
60 uint64_t operations;
61 uint64_t vdisk_size;
62 uint64_t max_xfer_sz;
63 uint64_t _reserved2[2];
64 };
65
66 #define VD_DISK_TYPE_SLICE 0x01
67 #define VD_DISK_TYPE_DISK 0x02
68
69 #define VD_MEDIA_TYPE_FIXED 0x01
70 #define VD_MEDIA_TYPE_CD 0x02
71 #define VD_MEDIA_TYPE_DVD 0x03
72
73 /* vDisk version 1.0. */
74 #define VD_OP_BREAD 0x01
75 #define VD_OP_BWRITE 0x02
76 #define VD_OP_FLUSH 0x03
77 #define VD_OP_GET_WCE 0x04
78 #define VD_OP_SET_WCE 0x05
79 #define VD_OP_GET_VTOC 0x06
80 #define VD_OP_SET_VTOC 0x07
81 #define VD_OP_GET_DISKGEOM 0x08
82 #define VD_OP_SET_DISKGEOM 0x09
83 #define VD_OP_GET_DEVID 0x0b
84 #define VD_OP_GET_EFI 0x0c
85 #define VD_OP_SET_EFI 0x0d
86
87 /* vDisk version 1.1 */
88 #define VD_OP_SCSICMD 0x0a
89 #define VD_OP_RESET 0x0e
90 #define VD_OP_GET_ACCESS 0x0f
91 #define VD_OP_SET_ACCESS 0x10
92 #define VD_OP_GET_CAPACITY 0x11
93
94 struct vd_desc {
95 struct vio_dring_hdr hdr;
96 uint64_t req_id;
97 uint8_t operation;
98 uint8_t slice;
99 uint16_t _reserved1;
100 uint32_t status;
101 uint64_t offset;
102 uint64_t size;
103 uint32_t ncookies;
104 uint32_t _reserved2;
105 struct ldc_cookie cookie[MAXPHYS / PAGE_SIZE];
106 };
107
108 #define VD_SLICE_NONE 0xff
109
110 struct vdsk_dring {
111 bus_dmamap_t vd_map;
112 bus_dma_segment_t vd_seg;
113 struct vd_desc *vd_desc;
114 int vd_nentries;
115 };
116
117 #if OPENBSD_BUSDMA
118 struct vdsk_dring *vdsk_dring_alloc(bus_dma_tag_t, int);
119 void vdsk_dring_free(bus_dma_tag_t, struct vdsk_dring *);
120 #else
121 struct vdsk_dring *vdsk_dring_alloc(int);
122 void vdsk_dring_free(struct vdsk_dring *);
123 #endif
124
125 /*
126 * We support vDisk 1.0 and 1.1.
127 */
128 #define VDSK_MAJOR 1
129 #define VDSK_MINOR 1
130
131 struct vdsk_soft_desc {
132 int vsd_map_idx[MAXPHYS / PAGE_SIZE];
133 struct scsipi_xfer *vsd_xs;
134 int vsd_ncookies;
135 };
136
137 struct vdsk_softc {
138 device_t sc_dv;
139
140 struct scsipi_adapter sc_adapter;
141 struct scsipi_channel sc_channel;
142
143 bus_space_tag_t sc_bustag;
144 bus_dma_tag_t sc_dmatag;
145
146 void *sc_tx_ih;
147 void *sc_rx_ih;
148
149 struct ldc_conn sc_lc;
150
151 uint16_t sc_vio_state;
152 #define VIO_SND_VER_INFO 0x0001
153 #define VIO_ACK_VER_INFO 0x0002
154 #define VIO_SND_ATTR_INFO 0x0004
155 #define VIO_ACK_ATTR_INFO 0x0008
156 #define VIO_SND_DRING_REG 0x0010
157 #define VIO_ACK_DRING_REG 0x0020
158 #define VIO_SND_RDX 0x0040
159 #define VIO_ACK_RDX 0x0080
160 #define VIO_ESTABLISHED 0x00ff
161
162 uint16_t sc_major;
163 uint16_t sc_minor;
164
165 uint32_t sc_local_sid;
166 uint64_t sc_dring_ident;
167 uint64_t sc_seq_no;
168
169 int sc_tx_cnt;
170 int sc_tx_prod;
171 int sc_tx_cons;
172
173 struct ldc_map *sc_lm;
174 struct vdsk_dring *sc_vd;
175 struct vdsk_soft_desc *sc_vsd;
176
177 uint32_t sc_vdisk_block_size;
178 uint64_t sc_vdisk_size;
179 uint8_t sc_vd_mtype;
180 };
181
182 int vdsk_match(device_t, cfdata_t, void *);
183 void vdsk_attach(device_t, device_t, void *);
184 void vdsk_scsipi_request(struct scsipi_channel *, scsipi_adapter_req_t,
185 void *);
186
187 CFATTACH_DECL_NEW(vdsk, sizeof(struct vdsk_softc),
188 vdsk_match, vdsk_attach, NULL, NULL);
189
190 int vdsk_tx_intr(void *);
191 int vdsk_rx_intr(void *);
192
193 void vdsk_rx_data(struct ldc_conn *, struct ldc_pkt *);
194 void vdsk_rx_vio_ctrl(struct vdsk_softc *, struct vio_msg *);
195 void vdsk_rx_vio_ver_info(struct vdsk_softc *, struct vio_msg_tag *);
196 void vdsk_rx_vio_attr_info(struct vdsk_softc *, struct vio_msg_tag *);
197 void vdsk_rx_vio_dring_reg(struct vdsk_softc *, struct vio_msg_tag *);
198 void vdsk_rx_vio_rdx(struct vdsk_softc *sc, struct vio_msg_tag *);
199 void vdsk_rx_vio_data(struct vdsk_softc *sc, struct vio_msg *);
200 void vdsk_rx_vio_dring_data(struct vdsk_softc *sc, struct vio_msg_tag *);
201
202 void vdsk_ldc_reset(struct ldc_conn *);
203 void vdsk_ldc_start(struct ldc_conn *);
204
205 void vdsk_sendmsg(struct vdsk_softc *, void *, size_t);
206 void vdsk_send_ver_info(struct vdsk_softc *, uint16_t, uint16_t);
207 void vdsk_send_attr_info(struct vdsk_softc *);
208 void vdsk_send_dring_reg(struct vdsk_softc *);
209 void vdsk_send_rdx(struct vdsk_softc *);
210
211 void *vdsk_io_get(void *);
212 void vdsk_io_put(void *, void *);
213
214 void vdsk_scsi_cmd(struct vdsk_softc *sc, struct scsipi_xfer *);
215 int vdsk_submit_cmd(struct vdsk_softc *sc, struct scsipi_xfer *);
216 void vdsk_complete_cmd(struct vdsk_softc *sc, struct scsipi_xfer *, int);
217 void vdsk_scsi_inq(struct vdsk_softc *sc, struct scsipi_xfer *);
218 void vdsk_scsi_inquiry(struct vdsk_softc *sc, struct scsipi_xfer *);
219 void vdsk_scsi_mode_sense(struct vdsk_softc *sc, struct scsipi_xfer *);
220 void vdsk_scsi_mode_sense_10(struct vdsk_softc *sc, struct scsipi_xfer *);
221 void vdsk_scsi_mode_select(struct vdsk_softc *sc, struct scsipi_xfer *);
222 void vdsk_scsi_read_toc(struct vdsk_softc *sc, struct scsipi_xfer *);
223 void vdsk_scsi_test_unit_ready(struct vdsk_softc *sc, struct scsipi_xfer *);
224 void vdsk_scsi_start_stop(struct vdsk_softc *sc, struct scsipi_xfer *);
225 void vdsk_scsi_prevent_allow_medium_removal(struct vdsk_softc *sc, struct scsipi_xfer *);
226 void vdsk_scsi_maintenance_in(struct vdsk_softc *sc, struct scsipi_xfer *);
227 void vdsk_scsi_capacity(struct vdsk_softc *sc, struct scsipi_xfer *);
228 void vdsk_scsi_capacity16(struct vdsk_softc *sc, struct scsipi_xfer *);
229 void vdsk_scsi_report_luns(struct vdsk_softc *sc, struct scsipi_xfer *);
230 void vdsk_scsi_done(struct scsipi_xfer *, int);
231
232 int
233 vdsk_match(device_t parent, cfdata_t match, void *aux)
234 {
235 struct cbus_attach_args *ca = aux;
236
237 if (strcmp(ca->ca_name, "disk") == 0)
238 return (1);
239
240 return (0);
241 }
242
243 void
244 vdsk_attach(device_t parent, device_t self, void *aux)
245 {
246 struct vdsk_softc *sc = device_private(self);
247 struct cbus_attach_args *ca = aux;
248 struct ldc_conn *lc;
249 int err, s;
250 int timeout;
251 vaddr_t va;
252 paddr_t pa;
253
254 sc->sc_bustag = ca->ca_bustag;
255 sc->sc_dmatag = ca->ca_dmatag;
256
257 printf(": ivec 0x%llx, 0x%llx",
258 (long long unsigned int)ca->ca_tx_ino,
259 (long long unsigned int)ca->ca_rx_ino);
260
261 /*
262 * Un-configure queues before registering interrupt handlers,
263 * such that we don't get any stale LDC packets or events.
264 */
265 hv_ldc_tx_qconf(ca->ca_id, 0, 0);
266 hv_ldc_rx_qconf(ca->ca_id, 0, 0);
267
268 sc->sc_tx_ih = bus_intr_establish(ca->ca_bustag, ca->ca_tx_ino,
269 IPL_BIO, vdsk_tx_intr, sc);
270 sc->sc_rx_ih = bus_intr_establish(ca->ca_bustag, ca->ca_rx_ino,
271 IPL_BIO, vdsk_rx_intr, sc);
272 if (sc->sc_tx_ih == NULL || sc->sc_rx_ih == NULL) {
273 printf(", can't establish interrupt\n");
274 return;
275 }
276
277 lc = &sc->sc_lc;
278 lc->lc_id = ca->ca_id;
279 lc->lc_sc = sc;
280 lc->lc_reset = vdsk_ldc_reset;
281 lc->lc_start = vdsk_ldc_start;
282 lc->lc_rx_data = vdsk_rx_data;
283
284 #if OPENBSD_BUSDMA
285 lc->lc_txq = ldc_queue_alloc(sc->sc_dmatag, VDSK_TX_ENTRIES);
286 #else
287 lc->lc_txq = ldc_queue_alloc(VDSK_TX_ENTRIES);
288 #endif
289 #if OPENBSD_BUSDMA
290 lc->lc_rxq = ldc_queue_alloc(sc->sc_dmatag, VDSK_RX_ENTRIES);
291 #else
292 lc->lc_rxq = ldc_queue_alloc(VDSK_RX_ENTRIES);
293 #endif
294 #if OPENBSD_BUSDMA
295 sc->sc_lm = ldc_map_alloc(sc->sc_dmatag, 2048);
296 #else
297 sc->sc_lm = ldc_map_alloc(2048);
298 #endif
299 #if OPENBSD_BUSDMA
300 err = hv_ldc_set_map_table(lc->lc_id,
301 sc->sc_lm->lm_map->dm_segs[0].ds_addr, sc->sc_lm->lm_nentries);
302 #else
303 va = (vaddr_t)sc->sc_lm->lm_slot;
304 pa = 0;
305 if (pmap_extract(pmap_kernel(), va, &pa) == FALSE)
306 panic("pmap_extract failed %lx\n", va);
307 err = hv_ldc_set_map_table(lc->lc_id, pa, 2048);
308 #endif
309 if (err != H_EOK) {
310 printf("hv_ldc_set_map_table %d\n", err);
311 goto free_map;
312 }
313 #if OPENBSD_BUSDMA
314 sc->sc_vd = vdsk_dring_alloc(sc->sc_dmatag, 32);
315 #else
316 sc->sc_vd = vdsk_dring_alloc(32);
317 #endif
318 sc->sc_vsd = kmem_zalloc(32 * sizeof(*sc->sc_vsd), KM_SLEEP);
319
320 #if OPENBSD_BUSDMA
321 sc->sc_lm->lm_slot[0].entry = sc->sc_vd->vd_map->dm_segs[0].ds_addr;
322 #else
323 va = (vaddr_t)sc->sc_vd->vd_desc;
324 pa = 0;
325 if (pmap_extract(pmap_kernel(), va, &pa) == FALSE)
326 panic("pmap_extract failed %lx\n", va);
327
328 sc->sc_lm->lm_slot[0].entry = pa;
329 #endif
330 sc->sc_lm->lm_slot[0].entry &= LDC_MTE_RA_MASK;
331 sc->sc_lm->lm_slot[0].entry |= LDC_MTE_CPR | LDC_MTE_CPW;
332 sc->sc_lm->lm_slot[0].entry |= LDC_MTE_R | LDC_MTE_W;
333 sc->sc_lm->lm_next = 1;
334 sc->sc_lm->lm_count = 1;
335 va = lc->lc_txq->lq_va;
336 pa = 0;
337 if (pmap_extract(pmap_kernel(), va, &pa) == FALSE)
338 panic("pmap_extract failed %lx\n", va);
339 #if OPENBSD_BUSDMA
340 err = hv_ldc_tx_qconf(lc->lc_id,
341 lc->lc_txq->lq_map->dm_segs[0].ds_addr, lc->lc_txq->lq_nentries);
342 #else
343 err = hv_ldc_tx_qconf(lc->lc_id, pa, lc->lc_txq->lq_nentries);
344 #endif
345 if (err != H_EOK)
346 printf("hv_ldc_tx_qconf %d\n", err);
347 va = (vaddr_t)lc->lc_rxq->lq_va;
348 pa = 0;
349 if (pmap_extract(pmap_kernel(), va, &pa) == FALSE)
350 panic("pmap_extract failed %lx\n", va);
351 #if OPENBSD_BUSDMA
352 err = hv_ldc_rx_qconf(lc->lc_id,
353 lc->lc_rxq->lq_map->dm_segs[0].ds_addr, lc->lc_rxq->lq_nentries);
354 #else
355 err = hv_ldc_rx_qconf(lc->lc_id, pa, lc->lc_rxq->lq_nentries);
356 #endif
357 if (err != H_EOK)
358 printf("hv_ldc_rx_qconf %d\n", err);
359
360 cbus_intr_setenabled(sc->sc_bustag, ca->ca_tx_ino, INTR_ENABLED);
361 cbus_intr_setenabled(sc->sc_bustag, ca->ca_rx_ino, INTR_ENABLED);
362
363 ldc_send_vers(lc);
364
365 printf("\n");
366
367 /*
368 * Interrupts aren't enabled during autoconf, so poll for VIO
369 * peer-to-peer handshake completion.
370 */
371 s = splbio();
372 timeout = 10 * 1000;
373 do {
374 if (vdsk_rx_intr(sc) && sc->sc_vio_state == VIO_ESTABLISHED)
375 break;
376
377 delay(1000);
378 } while(--timeout > 0);
379 splx(s);
380
381 if (sc->sc_vio_state != VIO_ESTABLISHED) {
382 printf("vio not established: %d\n", sc->sc_vio_state);
383 return;
384 }
385
386 sc->sc_dv = self;
387
388 sc->sc_adapter.adapt_dev = sc->sc_dv;
389 sc->sc_adapter.adapt_nchannels = 1;
390 sc->sc_adapter.adapt_openings = sc->sc_vd->vd_nentries - 1;
391 sc->sc_adapter.adapt_max_periph = sc->sc_vd->vd_nentries - 1;
392
393 sc->sc_adapter.adapt_minphys = minphys;
394 sc->sc_adapter.adapt_request = vdsk_scsipi_request;
395
396 sc->sc_channel.chan_adapter = &sc->sc_adapter;
397 sc->sc_channel.chan_bustype = &scsi_bustype;
398 sc->sc_channel.chan_channel = 0;
399 sc->sc_channel.chan_ntargets = 2; /* XXX why not 1? */
400 sc->sc_channel.chan_nluns = 1; /* XXX slices should be presented as luns? */
401 sc->sc_channel.chan_id = 0;
402 sc->sc_channel.chan_flags = SCSIPI_CHAN_NOSETTLE;
403
404 config_found(self, &sc->sc_channel, scsiprint, CFARGS_NONE);
405
406 return;
407
408 free_map:
409 hv_ldc_set_map_table(lc->lc_id, 0, 0);
410 #if OPENBSD_BUSDMA
411 ldc_map_free(sc->sc_dmatag, sc->sc_lm);
412 #else
413 ldc_map_free(sc->sc_lm);
414 #endif
415 }
416
417 void
418 vdsk_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
419 void *arg)
420 {
421
422 struct vdsk_softc *sc;
423 struct scsipi_xfer *xs;
424
425 sc = device_private(chan->chan_adapter->adapt_dev);
426
427 xs = arg;
428
429 switch (req) {
430 case ADAPTER_REQ_RUN_XFER:
431 vdsk_scsi_cmd(sc, xs);
432 break;
433 case ADAPTER_REQ_GROW_RESOURCES:
434 case ADAPTER_REQ_SET_XFER_MODE:
435 /* Ignored */
436 break;
437 default:
438 panic("req unhandled: %x", req);
439 }
440
441 }
442
443 int
444 vdsk_tx_intr(void *arg)
445 {
446 panic("%s: not verified yet", __FUNCTION__);
447
448 struct vdsk_softc *sc = arg;
449 struct ldc_conn *lc = &sc->sc_lc;
450 uint64_t tx_head, tx_tail, tx_state;
451
452 hv_ldc_tx_get_state(lc->lc_id, &tx_head, &tx_tail, &tx_state);
453 if (tx_state != lc->lc_tx_state) {
454 switch (tx_state) {
455 case LDC_CHANNEL_DOWN:
456 DPRINTF(("Tx link down\n"));
457 break;
458 case LDC_CHANNEL_UP:
459 DPRINTF(("Tx link up\n"));
460 break;
461 case LDC_CHANNEL_RESET:
462 DPRINTF(("Tx link reset\n"));
463 break;
464 }
465 lc->lc_tx_state = tx_state;
466 }
467
468 return (1);
469 }
470
471 int
472 vdsk_rx_intr(void *arg)
473 {
474 struct vdsk_softc *sc = arg;
475 struct ldc_conn *lc = &sc->sc_lc;
476 uint64_t rx_head, rx_tail, rx_state;
477 struct ldc_pkt *lp;
478 int err;
479
480 err = hv_ldc_rx_get_state(lc->lc_id, &rx_head, &rx_tail, &rx_state);
481 if (err == H_EINVAL) {
482 printf("hv_ldc_rx_get_state H_EINVAL\n");
483 return (0);
484 }
485 if (err != H_EOK) {
486 printf("hv_ldc_rx_get_state %d\n", err);
487 return (0);
488 }
489
490 if (rx_state != lc->lc_rx_state) {
491 sc->sc_vio_state = 0;
492 lc->lc_tx_seqid = 0;
493 lc->lc_state = 0;
494 switch (rx_state) {
495 case LDC_CHANNEL_DOWN:
496 DPRINTF(("Rx link down\n"));
497 break;
498 case LDC_CHANNEL_UP:
499 DPRINTF(("Rx link up\n"));
500 ldc_send_vers(lc);
501 break;
502 case LDC_CHANNEL_RESET:
503 DPRINTF(("Rx link reset\n"));
504 ldc_send_vers(lc);
505 break;
506 }
507 lc->lc_rx_state = rx_state;
508 hv_ldc_rx_set_qhead(lc->lc_id, rx_tail);
509 return (1);
510 }
511
512 if (rx_head == rx_tail)
513 return (0);
514
515 lp = (struct ldc_pkt *)(uintptr_t)(lc->lc_rxq->lq_va + rx_head);
516 switch (lp->type) {
517 case LDC_CTRL:
518 ldc_rx_ctrl(lc, lp);
519 break;
520
521 case LDC_DATA:
522 ldc_rx_data(lc, lp);
523 break;
524
525 default:
526 DPRINTF(("%0x02/%0x02/%0x02\n", lp->type, lp->stype,
527 lp->ctrl));
528 ldc_reset(lc);
529 break;
530 }
531
532 if (lc->lc_state == 0)
533 return (1);
534
535 rx_head += sizeof(*lp);
536 rx_head &= ((lc->lc_rxq->lq_nentries * sizeof(*lp)) - 1);
537 err = hv_ldc_rx_set_qhead(lc->lc_id, rx_head);
538 if (err != H_EOK)
539 printf("%s: hv_ldc_rx_set_qhead %d\n", __func__, err);
540
541 return (1);
542 }
543
544 void
545 vdsk_rx_data(struct ldc_conn *lc, struct ldc_pkt *lp)
546 {
547 struct vio_msg *vm = (struct vio_msg *)lp;
548
549 switch (vm->type) {
550 case VIO_TYPE_CTRL:
551 if ((lp->env & LDC_FRAG_START) == 0 &&
552 (lp->env & LDC_FRAG_STOP) == 0)
553 return;
554 vdsk_rx_vio_ctrl(lc->lc_sc, vm);
555 break;
556
557 case VIO_TYPE_DATA:
558 if((lp->env & LDC_FRAG_START) == 0)
559 return;
560 vdsk_rx_vio_data(lc->lc_sc, vm);
561 break;
562
563 default:
564 DPRINTF(("Unhandled packet type 0x%02x\n", vm->type));
565 ldc_reset(lc);
566 break;
567 }
568 }
569
570 void
571 vdsk_rx_vio_ctrl(struct vdsk_softc *sc, struct vio_msg *vm)
572 {
573 struct vio_msg_tag *tag = (struct vio_msg_tag *)&vm->type;
574
575 switch (tag->stype_env) {
576 case VIO_VER_INFO:
577 vdsk_rx_vio_ver_info(sc, tag);
578 break;
579 case VIO_ATTR_INFO:
580 vdsk_rx_vio_attr_info(sc, tag);
581 break;
582 case VIO_DRING_REG:
583 vdsk_rx_vio_dring_reg(sc, tag);
584 break;
585 case VIO_RDX:
586 vdsk_rx_vio_rdx(sc, tag);
587 break;
588 default:
589 DPRINTF(("CTRL/0x%02x/0x%04x\n", tag->stype, tag->stype_env));
590 break;
591 }
592 }
593
594 void
595 vdsk_rx_vio_ver_info(struct vdsk_softc *sc, struct vio_msg_tag *tag)
596 {
597 struct vio_ver_info *vi = (struct vio_ver_info *)tag;
598
599 switch (vi->tag.stype) {
600 case VIO_SUBTYPE_INFO:
601 DPRINTF(("CTRL/INFO/VER_INFO\n"));
602 break;
603
604 case VIO_SUBTYPE_ACK:
605 DPRINTF(("CTRL/ACK/VER_INFO\n"));
606 if (!ISSET(sc->sc_vio_state, VIO_SND_VER_INFO)) {
607 ldc_reset(&sc->sc_lc);
608 break;
609 }
610 sc->sc_major = vi->major;
611 sc->sc_minor = vi->minor;
612 sc->sc_vio_state |= VIO_ACK_VER_INFO;
613 break;
614
615 default:
616 DPRINTF(("CTRL/0x%02x/VER_INFO\n", vi->tag.stype));
617 break;
618 }
619
620 if (ISSET(sc->sc_vio_state, VIO_ACK_VER_INFO))
621 vdsk_send_attr_info(sc);
622 }
623
624 void
625 vdsk_rx_vio_attr_info(struct vdsk_softc *sc, struct vio_msg_tag *tag)
626 {
627 struct vd_attr_info *ai = (struct vd_attr_info *)tag;
628
629 switch (ai->tag.stype) {
630 case VIO_SUBTYPE_INFO:
631 DPRINTF(("CTRL/INFO/ATTR_INFO\n"));
632 break;
633
634 case VIO_SUBTYPE_ACK:
635 DPRINTF(("CTRL/ACK/ATTR_INFO\n"));
636 if (!ISSET(sc->sc_vio_state, VIO_SND_ATTR_INFO)) {
637 ldc_reset(&sc->sc_lc);
638 break;
639 }
640
641 sc->sc_vdisk_block_size = ai->vdisk_block_size;
642 sc->sc_vdisk_size = ai->vdisk_size;
643 if (sc->sc_major > 1 || sc->sc_minor >= 1)
644 sc->sc_vd_mtype = ai->vd_mtype;
645 else
646 sc->sc_vd_mtype = VD_MEDIA_TYPE_FIXED;
647
648 sc->sc_vio_state |= VIO_ACK_ATTR_INFO;
649 break;
650
651 default:
652 DPRINTF(("CTRL/0x%02x/ATTR_INFO\n", ai->tag.stype));
653 break;
654 }
655
656 if (ISSET(sc->sc_vio_state, VIO_ACK_ATTR_INFO))
657 vdsk_send_dring_reg(sc);
658
659 }
660
661 void
662 vdsk_rx_vio_dring_reg(struct vdsk_softc *sc, struct vio_msg_tag *tag)
663 {
664 struct vio_dring_reg *dr = (struct vio_dring_reg *)tag;
665
666 switch (dr->tag.stype) {
667 case VIO_SUBTYPE_INFO:
668 DPRINTF(("CTRL/INFO/DRING_REG\n"));
669 break;
670
671 case VIO_SUBTYPE_ACK:
672 DPRINTF(("CTRL/ACK/DRING_REG\n"));
673 if (!ISSET(sc->sc_vio_state, VIO_SND_DRING_REG)) {
674 ldc_reset(&sc->sc_lc);
675 break;
676 }
677
678 sc->sc_dring_ident = dr->dring_ident;
679 sc->sc_seq_no = 1;
680
681 sc->sc_vio_state |= VIO_ACK_DRING_REG;
682 break;
683
684 default:
685 DPRINTF(("CTRL/0x%02x/DRING_REG\n", dr->tag.stype));
686 break;
687 }
688
689 if (ISSET(sc->sc_vio_state, VIO_ACK_DRING_REG))
690 vdsk_send_rdx(sc);
691 }
692
693 void
694 vdsk_rx_vio_rdx(struct vdsk_softc *sc, struct vio_msg_tag *tag)
695 {
696 switch(tag->stype) {
697 case VIO_SUBTYPE_INFO:
698 DPRINTF(("CTRL/INFO/RDX\n"));
699 break;
700
701 case VIO_SUBTYPE_ACK:
702 {
703 int prod;
704
705 DPRINTF(("CTRL/ACK/RDX\n"));
706 if (!ISSET(sc->sc_vio_state, VIO_SND_RDX)) {
707 ldc_reset(&sc->sc_lc);
708 break;
709 }
710 sc->sc_vio_state |= VIO_ACK_RDX;
711
712 /*
713 * If this ACK is the result of a reconnect, we may
714 * have pending I/O that we need to resubmit. We need
715 * to rebuild the ring descriptors though since the
716 * vDisk server on the other side may have touched
717 * them already. So we just clean up the ring and the
718 * LDC map and resubmit the SCSI commands based on our
719 * soft descriptors.
720 */
721 prod = sc->sc_tx_prod;
722 sc->sc_tx_prod = sc->sc_tx_cons;
723 sc->sc_tx_cnt = 0;
724 sc->sc_lm->lm_next = 1;
725 sc->sc_lm->lm_count = 1;
726 for (int i = sc->sc_lm->lm_next; i < sc->sc_lm->lm_nentries; i++)
727 sc->sc_lm->lm_slot[i].entry = 0;
728 while (sc->sc_tx_prod != prod)
729 vdsk_submit_cmd(sc, sc->sc_vsd[sc->sc_tx_prod].vsd_xs);
730 break;
731 }
732
733 default:
734 DPRINTF(("CTRL/0x%02x/RDX (VIO)\n", tag->stype));
735 break;
736 }
737 }
738
739 void
740 vdsk_rx_vio_data(struct vdsk_softc *sc, struct vio_msg *vm)
741 {
742 struct vio_msg_tag *tag = (struct vio_msg_tag *)&vm->type;
743
744 if (sc->sc_vio_state != VIO_ESTABLISHED) {
745 DPRINTF(("Spurious DATA/0x%02x/0x%04x\n", tag->stype,
746 tag->stype_env));
747 return;
748 }
749
750 switch(tag->stype_env) {
751 case VIO_DRING_DATA:
752 vdsk_rx_vio_dring_data(sc, tag);
753 break;
754
755 default:
756 DPRINTF(("DATA/0x%02x/0x%04x\n", tag->stype, tag->stype_env));
757 break;
758 }
759 }
760
761 void
762 vdsk_rx_vio_dring_data(struct vdsk_softc *sc, struct vio_msg_tag *tag)
763 {
764 switch(tag->stype) {
765 case VIO_SUBTYPE_INFO:
766 DPRINTF(("DATA/INFO/DRING_DATA\n"));
767 break;
768
769 case VIO_SUBTYPE_ACK:
770 {
771 struct scsipi_xfer *xs;
772 int cons;
773
774 cons = sc->sc_tx_cons;
775 while (sc->sc_vd->vd_desc[cons].hdr.dstate == VIO_DESC_DONE) {
776 xs = sc->sc_vsd[cons].vsd_xs;
777 if (ISSET(xs->xs_control, XS_CTL_POLL) == 0)
778 vdsk_complete_cmd(sc, xs, cons);
779 cons++;
780 cons &= (sc->sc_vd->vd_nentries - 1);
781 }
782 sc->sc_tx_cons = cons;
783 break;
784 }
785
786 case VIO_SUBTYPE_NACK:
787 DPRINTF(("DATA/NACK/DRING_DATA\n"));
788 struct ldc_conn *lc = &sc->sc_lc;
789 ldc_send_vers(lc);
790 break;
791
792 default:
793 DPRINTF(("DATA/0x%02x/DRING_DATA\n", tag->stype));
794 break;
795 }
796 }
797
798 void
799 vdsk_ldc_reset(struct ldc_conn *lc)
800 {
801 struct vdsk_softc *sc = lc->lc_sc;
802
803 sc->sc_vio_state = 0;
804 }
805
806 void
807 vdsk_ldc_start(struct ldc_conn *lc)
808 {
809 struct vdsk_softc *sc = lc->lc_sc;
810
811 vdsk_send_ver_info(sc, VDSK_MAJOR, VDSK_MINOR);
812 }
813
814 void
815 vdsk_sendmsg(struct vdsk_softc *sc, void *msg, size_t len)
816 {
817 struct ldc_conn *lc = &sc->sc_lc;
818 int err;
819
820 err = ldc_send_unreliable(lc, msg, len);
821 if (err)
822 printf("%s: ldc_send_unreliable: %d\n", __func__, err);
823 }
824
825 void
826 vdsk_send_ver_info(struct vdsk_softc *sc, uint16_t major, uint16_t minor)
827 {
828 struct vio_ver_info vi;
829
830 /* Allocate new session ID. */
831 sc->sc_local_sid = gettick();
832
833 bzero(&vi, sizeof(vi));
834 vi.tag.type = VIO_TYPE_CTRL;
835 vi.tag.stype = VIO_SUBTYPE_INFO;
836 vi.tag.stype_env = VIO_VER_INFO;
837 vi.tag.sid = sc->sc_local_sid;
838 vi.major = major;
839 vi.minor = minor;
840 vi.dev_class = VDEV_DISK;
841 vdsk_sendmsg(sc, &vi, sizeof(vi));
842
843 sc->sc_vio_state |= VIO_SND_VER_INFO;
844 }
845
846 void
847 vdsk_send_attr_info(struct vdsk_softc *sc)
848 {
849 struct vd_attr_info ai;
850
851 bzero(&ai, sizeof(ai));
852 ai.tag.type = VIO_TYPE_CTRL;
853 ai.tag.stype = VIO_SUBTYPE_INFO;
854 ai.tag.stype_env = VIO_ATTR_INFO;
855 ai.tag.sid = sc->sc_local_sid;
856 ai.xfer_mode = VIO_DRING_MODE;
857 ai.vdisk_block_size = DEV_BSIZE;
858 ai.max_xfer_sz = MAXPHYS / DEV_BSIZE;
859 DPRINTF(("vdisk_block_size %u\n", ai.vdisk_block_size));
860 DPRINTF(("max_xfer_sz %lu\n", ai.max_xfer_sz));
861 vdsk_sendmsg(sc, &ai, sizeof(ai));
862
863 sc->sc_vio_state |= VIO_SND_ATTR_INFO;
864 }
865
866 void
867 vdsk_send_dring_reg(struct vdsk_softc *sc)
868 {
869 struct vio_dring_reg dr;
870
871 bzero(&dr, sizeof(dr));
872 dr.tag.type = VIO_TYPE_CTRL;
873 dr.tag.stype = VIO_SUBTYPE_INFO;
874 dr.tag.stype_env = VIO_DRING_REG;
875 dr.tag.sid = sc->sc_local_sid;
876 dr.dring_ident = 0;
877 dr.num_descriptors = sc->sc_vd->vd_nentries;
878 dr.descriptor_size = sizeof(struct vd_desc);
879 dr.options = VIO_TX_RING | VIO_RX_RING;
880 dr.ncookies = 1;
881 dr.cookie[0].addr = 0;
882 dr.cookie[0].size = PAGE_SIZE;
883 vdsk_sendmsg(sc, &dr, sizeof(dr));
884
885 sc->sc_vio_state |= VIO_SND_DRING_REG;
886 };
887
888 void
889 vdsk_send_rdx(struct vdsk_softc *sc)
890 {
891 struct vio_rdx rdx;
892
893 bzero(&rdx, sizeof(rdx));
894 rdx.tag.type = VIO_TYPE_CTRL;
895 rdx.tag.stype = VIO_SUBTYPE_INFO;
896 rdx.tag.stype_env = VIO_RDX;
897 rdx.tag.sid = sc->sc_local_sid;
898 vdsk_sendmsg(sc, &rdx, sizeof(rdx));
899
900 sc->sc_vio_state |= VIO_SND_RDX;
901 }
902
903 #if OPENBSD_BUSDMA
904 struct vdsk_dring *
905 vdsk_dring_alloc(bus_dma_tag_t t, int nentries)
906 #else
907 struct vdsk_dring *
908 vdsk_dring_alloc(int nentries)
909 #endif
910 {
911
912 struct vdsk_dring *vd;
913 bus_size_t size;
914 vaddr_t va;
915 #if OPENBSD_BUSDMA
916 int nsegs;
917 #endif
918 int i;
919
920 vd = kmem_zalloc(sizeof(struct vdsk_dring), KM_SLEEP);
921
922 size = roundup(nentries * sizeof(struct vd_desc), PAGE_SIZE);
923
924 #if OPENBSD_BUSDMA
925 if (bus_dmamap_create(t, size, 1, size, 0,
926 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &vd->vd_map) != 0)
927 return (NULL);
928
929 if (bus_dmamem_alloc(t, size, PAGE_SIZE, 0, &vd->vd_seg, 1,
930 &nsegs, BUS_DMA_NOWAIT) != 0)
931 goto destroy;
932
933 if (bus_dmamem_map(t, &vd->vd_seg, 1, size, (void*)&va,
934 BUS_DMA_NOWAIT) != 0)
935 goto free;
936
937 if (bus_dmamap_load(t, vd->vd_map, (void*)va, size, NULL,
938 BUS_DMA_NOWAIT) != 0)
939 goto unmap;
940 #else
941 va = (vaddr_t)kmem_zalloc(size, KM_SLEEP);
942 #endif
943 vd->vd_desc = (struct vd_desc *)va;
944 vd->vd_nentries = nentries;
945 bzero(vd->vd_desc, nentries * sizeof(struct vd_desc));
946 for (i = 0; i < vd->vd_nentries; i++)
947 vd->vd_desc[i].hdr.dstate = VIO_DESC_FREE;
948 return (vd);
949
950 #if OPENBSD_BUSDMA
951 unmap:
952 bus_dmamem_unmap(t, (void*)va, size);
953 free:
954 bus_dmamem_free(t, &vd->vd_seg, 1);
955 destroy:
956 bus_dmamap_destroy(t, vd->vd_map);
957 #endif
958 return (NULL);
959 }
960
961 #if OPENBSD_BUSDMA
962 void
963 vdsk_dring_free(bus_dma_tag_t t, struct vdsk_dring *vd)
964 #else
965 void
966 vdsk_dring_free(struct vdsk_dring *vd)
967 #endif
968 {
969
970 bus_size_t size;
971
972 size = vd->vd_nentries * sizeof(struct vd_desc);
973 size = roundup(size, PAGE_SIZE);
974
975 #if OPENBSD_BUSDMA
976 bus_dmamap_unload(t, vd->vd_map);
977
978 bus_dmamem_unmap(t, (caddr_t)vd->vd_desc, size);
979 bus_dmamem_free(t, &vd->vd_seg, 1);
980 bus_dmamap_destroy(t, vd->vd_map);
981 #else
982 kmem_free(vd->vd_desc, size);
983 #endif
984 kmem_free(vd, size);
985 }
986
987 void *
988 vdsk_io_get(void *xsc)
989 {
990
991 panic("%s: not verified yet", __FUNCTION__);
992
993 struct vdsk_softc *sc = xsc;
994 void *rv = sc; /* just has to be !NULL */
995 int s;
996
997 s = splbio();
998 if (sc->sc_vio_state != VIO_ESTABLISHED ||
999 sc->sc_tx_cnt >= sc->sc_vd->vd_nentries)
1000 rv = NULL;
1001 else
1002 sc->sc_tx_cnt++;
1003 splx(s);
1004
1005 return (rv);
1006 }
1007
1008 void
1009 vdsk_io_put(void *xsc, void *io)
1010 {
1011
1012 panic("%s: not verified yet", __FUNCTION__);
1013
1014 struct vdsk_softc *sc = xsc;
1015 int s;
1016
1017 #ifdef DIAGNOSTIC
1018 if (sc != io)
1019 panic("vsdk_io_put: unexpected io");
1020 #endif
1021
1022 s = splbio();
1023 sc->sc_tx_cnt--;
1024 splx(s);
1025 }
1026
1027 void
1028 vdsk_scsi_cmd(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1029 {
1030 int timeout, s;
1031 int desc;
1032
1033 DPRINTF(("vdsk_scsi_cmd() opcode %x\n", xs->cmd->opcode));
1034
1035 switch (xs->cmd->opcode) {
1036
1037 case SCSI_READ_6_COMMAND:
1038 DPRINTF(("SCSI_READ_6_COMMAND\n"));
1039 break;
1040
1041 case READ_10:
1042 DPRINTF(("SCSI_READ_10\n"));
1043 break;
1044
1045 case READ_12:
1046 DPRINTF(("SCSI_READ_12\n"));
1047 break;
1048
1049 case READ_16:
1050 DPRINTF(("SCSI_READ_16\n"));
1051 break;
1052
1053 case SCSI_WRITE_6_COMMAND:
1054 DPRINTF(("SCSI_WRITE_6\n"));
1055 break;
1056
1057 case WRITE_10:
1058 DPRINTF(("SCSI_WRITE_10\n"));
1059 break;
1060
1061 case WRITE_12:
1062 DPRINTF(("SCSI_WRITE_12\n"));
1063 break;
1064
1065 case WRITE_16:
1066 DPRINTF(("SCSI_WRITE_16\n"));
1067 break;
1068
1069 case SCSI_SYNCHRONIZE_CACHE_10:
1070 DPRINTF(("SCSI_SYNCHRONIZE_CACHE_10\n"));
1071 /* No need to flush read-only device types */
1072 if (sc->sc_vd_mtype == VD_MEDIA_TYPE_CD ||
1073 sc->sc_vd_mtype == VD_MEDIA_TYPE_DVD) {
1074 vdsk_scsi_done(xs, XS_NOERROR);
1075 return;
1076 }
1077 break;
1078
1079 case INQUIRY:
1080 DPRINTF(("INQUIRY\n"));
1081 vdsk_scsi_inq(sc, xs);
1082 return;
1083
1084 case READ_CAPACITY_10:
1085 DPRINTF(("READ_CAPACITY_10\n"));
1086 vdsk_scsi_capacity(sc, xs);
1087 return;
1088
1089 case READ_CAPACITY_16:
1090 DPRINTF(("READ_CAPACITY_16\n"));
1091 vdsk_scsi_capacity16(sc, xs);
1092 return;
1093
1094 case SCSI_REPORT_LUNS:
1095 DPRINTF(("REPORT_LUNS\n"));
1096 vdsk_scsi_report_luns(sc, xs);
1097 return;
1098
1099 case SCSI_TEST_UNIT_READY:
1100 DPRINTF(("TEST_UNIT_READY\n"));
1101 vdsk_scsi_test_unit_ready(sc, xs);
1102 return;
1103
1104 case START_STOP:
1105 DPRINTF(("START_STOP\n"));
1106 vdsk_scsi_start_stop(sc, xs);
1107 return;
1108
1109 case SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL:
1110 DPRINTF(("PREVENT_ALLOW_MEDIUM_REMOVAL\n"));
1111 vdsk_scsi_prevent_allow_medium_removal(sc, xs);
1112 return;
1113
1114 case SCSI_MODE_SENSE_6:
1115 DPRINTF(("SCSI_MODE_SENSE_6\n"));
1116 vdsk_scsi_mode_sense(sc, xs);
1117 return;
1118
1119 case SCSI_MODE_SELECT_6:
1120 DPRINTF(("MODE_SELECT_6\n"));
1121 vdsk_scsi_mode_select(sc, xs);
1122 return;
1123
1124 case SCSI_MAINTENANCE_IN:
1125 DPRINTF(("MAINTENANCE_IN\n"));
1126 vdsk_scsi_maintenance_in(sc, xs);
1127 return;
1128
1129 case SCSI_MODE_SENSE_10:
1130 panic("SCSI_MODE_SENSE_10 (not implemented)\n");
1131 vdsk_scsi_mode_sense_10(sc, xs);
1132 return;
1133
1134 case READ_TOC:
1135 DPRINTF(("READ_TOC (not implemented)\n"));
1136 vdsk_scsi_read_toc(sc, xs);
1137 return;
1138
1139 default:
1140 panic("%s unhandled cmd 0x%02x\n",
1141 __func__, xs->cmd->opcode);
1142 }
1143
1144 s = splbio();
1145 desc = vdsk_submit_cmd(sc, xs);
1146
1147 if (!ISSET(xs->xs_control, XS_CTL_POLL)) {
1148 splx(s);
1149 return;
1150 }
1151 timeout = 1000;
1152 do {
1153 if (sc->sc_vd->vd_desc[desc].hdr.dstate == VIO_DESC_DONE)
1154 break;
1155
1156 delay(1000);
1157 } while(--timeout > 0);
1158 if (sc->sc_vd->vd_desc[desc].hdr.dstate == VIO_DESC_DONE) {
1159 vdsk_complete_cmd(sc, xs, desc);
1160 } else {
1161 ldc_reset(&sc->sc_lc);
1162 vdsk_scsi_done(xs, XS_TIMEOUT);
1163 }
1164 splx(s);
1165 }
1166
1167 int
1168 vdsk_submit_cmd(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1169 {
1170 struct ldc_map *map = sc->sc_lm;
1171 struct vio_dring_msg dm;
1172 struct scsi_rw_6 *rw6;
1173 struct scsipi_rw_10 *rw10;
1174 struct scsipi_rw_12 *rw12;
1175 struct scsipi_rw_16 *rw16;
1176 u_int64_t lba = 0;
1177 uint8_t operation;
1178 vaddr_t va;
1179 paddr_t pa;
1180 psize_t nbytes;
1181 int len, ncookies;
1182 int desc;
1183
1184 switch (xs->cmd->opcode) {
1185
1186 case SCSI_READ_6_COMMAND:
1187 case READ_10:
1188 case READ_12:
1189 case READ_16:
1190 DPRINTF(("VD_OP_BREAD\n"));
1191 operation = VD_OP_BREAD;
1192 break;
1193
1194 case SCSI_WRITE_6_COMMAND:
1195 case WRITE_10:
1196 case WRITE_12:
1197 case WRITE_16:
1198 DPRINTF(("VD_OP_BWRITE\n"));
1199 operation = VD_OP_BWRITE;
1200 break;
1201
1202 case SCSI_SYNCHRONIZE_CACHE_10:
1203 DPRINTF(("VD_OP_FLUSH\n"));
1204 operation = VD_OP_FLUSH;
1205 break;
1206
1207 default:
1208 panic("%s unhandled cmd opcode 0x%x",
1209 __func__, xs->cmd->opcode);
1210 }
1211
1212 /*
1213 * READ/WRITE/SYNCHRONIZE commands. SYNCHRONIZE CACHE has same
1214 * layout as 10-byte READ/WRITE commands.
1215 */
1216 if (xs->cmdlen == 6) {
1217 rw6 = (struct scsi_rw_6 *)xs->cmd;
1218 lba = _3btol(rw6->addr) & (SRW_TOPADDR << 16 | 0xffff);
1219 } else if (xs->cmdlen == 10) {
1220 rw10 = (struct scsipi_rw_10 *)xs->cmd;
1221 lba = _4btol(rw10->addr);
1222 } else if (xs->cmdlen == 12) {
1223 rw12 = (struct scsipi_rw_12 *)xs->cmd;
1224 lba = _4btol(rw12->addr);
1225 } else if (xs->cmdlen == 16) {
1226 rw16 = (struct scsipi_rw_16 *)xs->cmd;
1227 lba = _8btol(rw16->addr);
1228 }
1229
1230 DPRINTF(("lba = %lu\n", lba));
1231
1232 desc = sc->sc_tx_prod;
1233 ncookies = 0;
1234 len = xs->datalen;
1235 va = (vaddr_t)xs->data;
1236 while (len > 0) {
1237 DPRINTF(("len = %u\n", len));
1238 KASSERT(ncookies < MAXPHYS / PAGE_SIZE);
1239 pa = 0;
1240 pmap_extract(pmap_kernel(), va, &pa);
1241 while (map->lm_slot[map->lm_next].entry != 0) {
1242 map->lm_next++;
1243 map->lm_next &= (map->lm_nentries - 1);
1244 }
1245 map->lm_slot[map->lm_next].entry = (pa & LDC_MTE_RA_MASK);
1246 map->lm_slot[map->lm_next].entry |= LDC_MTE_CPR | LDC_MTE_CPW;
1247 map->lm_slot[map->lm_next].entry |= LDC_MTE_IOR | LDC_MTE_IOW;
1248 map->lm_slot[map->lm_next].entry |= LDC_MTE_R | LDC_MTE_W;
1249 map->lm_count++;
1250
1251 nbytes = MIN(len, PAGE_SIZE - (pa & PAGE_MASK));
1252
1253 sc->sc_vd->vd_desc[desc].cookie[ncookies].addr =
1254 map->lm_next << PAGE_SHIFT | (pa & PAGE_MASK);
1255 sc->sc_vd->vd_desc[desc].cookie[ncookies].size = nbytes;
1256
1257 sc->sc_vsd[desc].vsd_map_idx[ncookies] = map->lm_next;
1258 va += nbytes;
1259 len -= nbytes;
1260 ncookies++;
1261 }
1262 if (ISSET(xs->xs_control, XS_CTL_POLL) == 0)
1263 sc->sc_vd->vd_desc[desc].hdr.ack = 1;
1264 else
1265 sc->sc_vd->vd_desc[desc].hdr.ack = 0;
1266 sc->sc_vd->vd_desc[desc].operation = operation;
1267 sc->sc_vd->vd_desc[desc].slice = VD_SLICE_NONE;
1268 sc->sc_vd->vd_desc[desc].status = 0xffffffff;
1269 sc->sc_vd->vd_desc[desc].offset = lba;
1270 sc->sc_vd->vd_desc[desc].size = xs->datalen;
1271 sc->sc_vd->vd_desc[desc].ncookies = ncookies;
1272
1273 membar_Sync();
1274
1275 sc->sc_vd->vd_desc[desc].hdr.dstate = VIO_DESC_READY;
1276
1277 sc->sc_vsd[desc].vsd_xs = xs;
1278 sc->sc_vsd[desc].vsd_ncookies = ncookies;
1279
1280 sc->sc_tx_prod++;
1281 sc->sc_tx_prod &= (sc->sc_vd->vd_nentries - 1);
1282
1283 bzero(&dm, sizeof(dm));
1284 dm.tag.type = VIO_TYPE_DATA;
1285 dm.tag.stype = VIO_SUBTYPE_INFO;
1286 dm.tag.stype_env = VIO_DRING_DATA;
1287 dm.tag.sid = sc->sc_local_sid;
1288 dm.seq_no = sc->sc_seq_no++;
1289 dm.dring_ident = sc->sc_dring_ident;
1290 dm.start_idx = dm.end_idx = desc;
1291 vdsk_sendmsg(sc, &dm, sizeof(dm));
1292
1293 return desc;
1294 }
1295
1296 void
1297 vdsk_complete_cmd(struct vdsk_softc *sc, struct scsipi_xfer *xs, int desc)
1298 {
1299 struct ldc_map *map = sc->sc_lm;
1300 int cookie, idx;
1301 int error;
1302
1303 cookie = 0;
1304 while (cookie < sc->sc_vsd[desc].vsd_ncookies) {
1305 idx = sc->sc_vsd[desc].vsd_map_idx[cookie++];
1306 map->lm_slot[idx].entry = 0;
1307 map->lm_count--;
1308 }
1309
1310 error = XS_NOERROR;
1311 if (sc->sc_vd->vd_desc[desc].status != 0)
1312 error = XS_DRIVER_STUFFUP;
1313 xs->resid = xs->datalen -
1314 sc->sc_vd->vd_desc[desc].size;
1315
1316 /*
1317 * scsi_done() called by vdsk_scsi_done() requires
1318 * the kernel to be locked
1319 */
1320 KERNEL_LOCK(1, curlwp);
1321 vdsk_scsi_done(xs, error);
1322 KERNEL_UNLOCK_ONE(curlwp);
1323
1324 sc->sc_vd->vd_desc[desc].hdr.dstate = VIO_DESC_FREE;
1325
1326 }
1327
1328 void
1329 vdsk_scsi_inq(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1330 {
1331 vdsk_scsi_inquiry(sc, xs);
1332 }
1333
1334 void
1335 vdsk_scsi_inquiry(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1336 {
1337 struct scsipi_inquiry_data inq;
1338 char buf[5];
1339
1340 bzero(&inq, sizeof(inq));
1341
1342 switch (sc->sc_vd_mtype) {
1343 case VD_MEDIA_TYPE_CD:
1344 case VD_MEDIA_TYPE_DVD:
1345 inq.device = T_CDROM;
1346 inq.dev_qual2 = SID_REMOVABLE;
1347 bcopy("Virtual CDROM ", inq.product, sizeof(inq.product));
1348 break;
1349 case VD_MEDIA_TYPE_FIXED:
1350 inq.device = T_DIRECT;
1351 bcopy("Virtual Disk ", inq.product, sizeof(inq.product));
1352 break;
1353 default:
1354 panic("Unhandled media type %d\n", sc->sc_vd_mtype);
1355 }
1356 inq.version = 0x05; /* SPC-3 */
1357 inq.response_format = 2;
1358 inq.additional_length = 32;
1359 inq.flags3 |= SID_CmdQue;
1360 bcopy("SUN ", inq.vendor, sizeof(inq.vendor));
1361 snprintf(buf, sizeof(buf), "%u.%u ", sc->sc_major, sc->sc_minor);
1362 bcopy(buf, inq.revision, sizeof(inq.revision));
1363
1364 bcopy(&inq, xs->data, MIN(sizeof(inq), xs->datalen));
1365
1366 vdsk_scsi_done(xs, XS_NOERROR);
1367 }
1368
1369 void
1370 vdsk_scsi_mode_sense(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1371 {
1372 struct {
1373 struct scsi_mode_parameter_header_6 hdr;
1374 struct scsi_general_block_descriptor blk_desc;
1375 } data;
1376
1377 bzero(&data, sizeof(data));
1378 data.hdr.blk_desc_len = sizeof(data.blk_desc);
1379 _lto3b(sc->sc_vdisk_block_size, data.blk_desc.blklen);
1380 bcopy(&data, xs->data, MIN(sizeof(data), xs->datalen));
1381 vdsk_scsi_done(xs, XS_NOERROR);
1382 }
1383
1384 void
1385 vdsk_scsi_mode_sense_10(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1386 {
1387 vdsk_scsi_done(xs, XS_NOERROR);
1388 }
1389
1390 void
1391 vdsk_scsi_mode_select(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1392 {
1393 vdsk_scsi_done(xs, XS_NOERROR);
1394 }
1395
1396 void
1397 vdsk_scsi_read_toc(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1398 {
1399 vdsk_scsi_done(xs, XS_NOERROR);
1400 }
1401
1402 void
1403 vdsk_scsi_maintenance_in(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1404 {
1405 vdsk_scsi_done(xs, XS_NOERROR);
1406 }
1407
1408 void
1409 vdsk_scsi_start_stop(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1410 {
1411 vdsk_scsi_done(xs, XS_NOERROR);
1412 }
1413
1414 void
1415 vdsk_scsi_prevent_allow_medium_removal(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1416 {
1417 vdsk_scsi_done(xs, XS_NOERROR);
1418 }
1419
1420 void
1421 vdsk_scsi_test_unit_ready(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1422 {
1423 vdsk_scsi_done(xs, XS_NOERROR);
1424 }
1425
1426 void
1427 vdsk_scsi_capacity(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1428 {
1429 struct scsipi_read_capacity_10_data rcd;
1430 uint64_t capacity;
1431
1432 bzero(&rcd, sizeof(rcd));
1433
1434 capacity = sc->sc_vdisk_size - 1;
1435 if (capacity > 0xffffffff)
1436 capacity = 0xffffffff;
1437
1438 _lto4b(capacity, rcd.addr);
1439 _lto4b(sc->sc_vdisk_block_size, rcd.length);
1440
1441 DPRINTF(("%s() capacity %lu block size %u\n",
1442 __FUNCTION__, capacity, sc->sc_vdisk_block_size));
1443
1444 bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
1445
1446 vdsk_scsi_done(xs, XS_NOERROR);
1447 }
1448
1449 void
1450 vdsk_scsi_capacity16(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1451 {
1452 struct scsipi_read_capacity_16_data rcd;
1453 uint64_t capacity;
1454
1455 bzero(&rcd, sizeof(rcd));
1456
1457 capacity = sc->sc_vdisk_size - 1;
1458
1459 _lto8b(capacity, rcd.addr);
1460 _lto4b(sc->sc_vdisk_block_size, rcd.length);
1461
1462 DPRINTF(("%s() capacity %lu block size %u\n",
1463 __FUNCTION__, capacity, sc->sc_vdisk_block_size));
1464
1465 bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
1466
1467 vdsk_scsi_done(xs, XS_NOERROR);
1468 }
1469
1470 void
1471 vdsk_scsi_report_luns(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1472 {
1473 vdsk_scsi_done(xs, XS_NOERROR);
1474 }
1475
1476 void
1477 vdsk_scsi_done(struct scsipi_xfer *xs, int error)
1478 {
1479 xs->error = error;
1480
1481 scsipi_done(xs);
1482 }
1483