vioscsi.c revision 1.9 1 /* $NetBSD: vioscsi.c,v 1.9 2017/03/07 22:03:04 jdolecek Exp $ */
2 /* $OpenBSD: vioscsi.c,v 1.3 2015/03/14 03:38:49 jsg Exp $ */
3
4 /*
5 * Copyright (c) 2013 Google Inc.
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include <sys/cdefs.h>
21 __KERNEL_RCSID(0, "$NetBSD: vioscsi.c,v 1.9 2017/03/07 22:03:04 jdolecek Exp $");
22
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/device.h>
26 #include <sys/bus.h>
27 #include <sys/buf.h>
28
29 #include <dev/pci/pcidevs.h>
30 #include <dev/pci/pcireg.h>
31 #include <dev/pci/pcivar.h>
32
33 #include <dev/pci/vioscsireg.h>
34 #include <dev/pci/virtiovar.h>
35
36 #include <dev/scsipi/scsi_all.h>
37 #include <dev/scsipi/scsiconf.h>
38
39 #ifdef VIOSCSI_DEBUG
40 static int vioscsi_debug = 1;
41 #define DPRINTF(f) do { if (vioscsi_debug) printf f; } while (/*CONSTCOND*/0)
42 #else
43 #define DPRINTF(f) ((void)0)
44 #endif
45
46 struct vioscsi_req {
47 struct virtio_scsi_req_hdr vr_req;
48 struct virtio_scsi_res_hdr vr_res;
49 struct scsipi_xfer *vr_xs;
50 bus_dmamap_t vr_control;
51 bus_dmamap_t vr_data;
52 };
53
54 struct vioscsi_softc {
55 device_t sc_dev;
56 struct scsipi_adapter sc_adapter;
57 struct scsipi_channel sc_channel;
58
59 struct virtqueue sc_vqs[3];
60 struct vioscsi_req *sc_reqs;
61 int sc_nreqs;
62 bus_dma_segment_t sc_reqs_segs[1];
63
64 u_int32_t sc_seg_max;
65 };
66
67 /*
68 * Each block request uses at least two segments - one for the header
69 * and one for the status.
70 */
71 #define VIRTIO_SCSI_MIN_SEGMENTS 2
72
73 static int vioscsi_match(device_t, cfdata_t, void *);
74 static void vioscsi_attach(device_t, device_t, void *);
75
76 static int vioscsi_alloc_reqs(struct vioscsi_softc *,
77 struct virtio_softc *, int, uint32_t);
78 static void vioscsi_scsipi_request(struct scsipi_channel *,
79 scsipi_adapter_req_t, void *);
80 static int vioscsi_vq_done(struct virtqueue *);
81 static void vioscsi_req_done(struct vioscsi_softc *, struct virtio_softc *,
82 struct vioscsi_req *);
83 static struct vioscsi_req *vioscsi_req_get(struct vioscsi_softc *);
84 static void vioscsi_req_put(struct vioscsi_softc *, struct vioscsi_req *);
85
86 static const char *const vioscsi_vq_names[] = {
87 "control",
88 "event",
89 "request",
90 };
91
92 CFATTACH_DECL_NEW(vioscsi, sizeof(struct vioscsi_softc),
93 vioscsi_match, vioscsi_attach, NULL, NULL);
94
95 static int
96 vioscsi_match(device_t parent, cfdata_t match, void *aux)
97 {
98 struct virtio_softc *va = aux;
99
100 if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_SCSI)
101 return 1;
102 return 0;
103 }
104
105 static void
106 vioscsi_attach(device_t parent, device_t self, void *aux)
107 {
108 struct vioscsi_softc *sc = device_private(self);
109 struct virtio_softc *vsc = device_private(parent);
110 struct scsipi_adapter *adapt = &sc->sc_adapter;
111 struct scsipi_channel *chan = &sc->sc_channel;
112 uint32_t features;
113 char buf[256];
114 int rv;
115
116 if (vsc->sc_child != NULL) {
117 aprint_error(": parent %s already has a child\n",
118 device_xname(parent));
119 return;
120 }
121
122 sc->sc_dev = self;
123
124 vsc->sc_child = self;
125 vsc->sc_ipl = IPL_BIO;
126 vsc->sc_vqs = sc->sc_vqs;
127 vsc->sc_nvqs = __arraycount(sc->sc_vqs);
128 vsc->sc_config_change = NULL;
129 vsc->sc_intrhand = virtio_vq_intr;
130 vsc->sc_flags = 0;
131
132 features = virtio_negotiate_features(vsc, 0);
133 snprintb(buf, sizeof(buf), VIRTIO_COMMON_FLAG_BITS, features);
134 aprint_normal(": Features: %s\n", buf);
135 aprint_naive("\n");
136
137 uint32_t cmd_per_lun = virtio_read_device_config_4(vsc,
138 VIRTIO_SCSI_CONFIG_CMD_PER_LUN);
139
140 uint32_t seg_max = virtio_read_device_config_4(vsc,
141 VIRTIO_SCSI_CONFIG_SEG_MAX);
142
143 uint16_t max_target = virtio_read_device_config_2(vsc,
144 VIRTIO_SCSI_CONFIG_MAX_TARGET);
145
146 uint16_t max_channel = virtio_read_device_config_2(vsc,
147 VIRTIO_SCSI_CONFIG_MAX_CHANNEL);
148
149 uint32_t max_lun = virtio_read_device_config_4(vsc,
150 VIRTIO_SCSI_CONFIG_MAX_LUN);
151
152 sc->sc_seg_max = seg_max;
153
154 for (size_t i = 0; i < __arraycount(sc->sc_vqs); i++) {
155 rv = virtio_alloc_vq(vsc, &sc->sc_vqs[i], i, MAXPHYS,
156 1 + howmany(MAXPHYS, NBPG), vioscsi_vq_names[i]);
157 if (rv) {
158 aprint_error_dev(sc->sc_dev,
159 "failed to allocate virtqueue %zu\n", i);
160 return;
161 }
162 sc->sc_vqs[i].vq_done = vioscsi_vq_done;
163 }
164
165 int qsize = sc->sc_vqs[2].vq_num;
166 aprint_normal_dev(sc->sc_dev, "qsize %d\n", qsize);
167 if (vioscsi_alloc_reqs(sc, vsc, qsize, seg_max))
168 return;
169
170 /*
171 * Fill in the scsipi_adapter.
172 */
173 memset(adapt, 0, sizeof(*adapt));
174 adapt->adapt_dev = sc->sc_dev;
175 adapt->adapt_nchannels = max_channel;
176 adapt->adapt_openings = cmd_per_lun;
177 adapt->adapt_max_periph = adapt->adapt_openings;
178 adapt->adapt_request = vioscsi_scsipi_request;
179 adapt->adapt_minphys = minphys;
180
181 /*
182 * Fill in the scsipi_channel.
183 */
184 memset(chan, 0, sizeof(*chan));
185 chan->chan_adapter = adapt;
186 chan->chan_bustype = &scsi_bustype;
187 chan->chan_channel = 0;
188 chan->chan_ntargets = max_target;
189 chan->chan_nluns = max_lun;
190 chan->chan_id = 0;
191 chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
192
193 config_found(sc->sc_dev, &sc->sc_channel, scsiprint);
194 }
195
196 #define XS2DMA(xs) \
197 ((((xs)->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE) | \
198 (((xs)->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | \
199 BUS_DMA_STREAMING)
200
201 #define XS2DMAPRE(xs) (((xs)->xs_control & XS_CTL_DATA_IN) ? \
202 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)
203
204 #define XS2DMAPOST(xs) (((xs)->xs_control & XS_CTL_DATA_IN) ? \
205 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)
206
207 static void
208 vioscsi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t
209 request, void *arg)
210 {
211 struct vioscsi_softc *sc =
212 device_private(chan->chan_adapter->adapt_dev);
213 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
214 struct scsipi_xfer *xs;
215 struct scsipi_periph *periph;
216 struct vioscsi_req *vr;
217 struct virtio_scsi_req_hdr *req;
218 struct virtqueue *vq = &sc->sc_vqs[2];
219 int slot, error;
220
221 DPRINTF(("%s: enter\n", __func__));
222
223 switch (request) {
224 case ADAPTER_REQ_RUN_XFER:
225 break;
226 case ADAPTER_REQ_SET_XFER_MODE:
227 {
228 struct scsipi_xfer_mode *xm = arg;
229 xm->xm_mode = PERIPH_CAP_TQING;
230 xm->xm_period = 0;
231 xm->xm_offset = 0;
232 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
233 return;
234 }
235 default:
236 DPRINTF(("%s: unhandled %d\n", __func__, request));
237 return;
238 }
239
240 xs = arg;
241 periph = xs->xs_periph;
242
243 vr = vioscsi_req_get(sc);
244 /*
245 * This can happen when we run out of queue slots.
246 */
247 if (vr == NULL) {
248 xs->error = XS_RESOURCE_SHORTAGE;
249 scsipi_done(xs);
250 return;
251 }
252
253 req = &vr->vr_req;
254 slot = vr - sc->sc_reqs;
255
256 vr->vr_xs = xs;
257
258 /*
259 * "The only supported format for the LUN field is: first byte set to
260 * 1, second byte set to target, third and fourth byte representing a
261 * single level LUN structure, followed by four zero bytes."
262 */
263 if (periph->periph_target >= 256 || periph->periph_lun >= 16384) {
264 DPRINTF(("%s: bad target %u or lun %u\n", __func__,
265 periph->periph_target, periph->periph_lun));
266 goto stuffup;
267 }
268 req->lun[0] = 1;
269 req->lun[1] = periph->periph_target - 1;
270 req->lun[2] = 0x40 | (periph->periph_lun >> 8);
271 req->lun[3] = periph->periph_lun;
272 memset(req->lun + 4, 0, 4);
273 DPRINTF(("%s: command for %u:%u at slot %d\n", __func__,
274 periph->periph_target - 1, periph->periph_lun, slot));
275
276 /* tag */
277 switch (XS_CTL_TAGTYPE(xs)) {
278 case XS_CTL_HEAD_TAG:
279 req->task_attr = VIRTIO_SCSI_S_HEAD;
280 break;
281
282 #if 0 /* XXX */
283 case XS_CTL_ACA_TAG:
284 req->task_attr = VIRTIO_SCSI_S_ACA;
285 break;
286 #endif
287
288 case XS_CTL_ORDERED_TAG:
289 req->task_attr = VIRTIO_SCSI_S_ORDERED;
290 break;
291
292 case XS_CTL_SIMPLE_TAG:
293 default:
294 req->task_attr = VIRTIO_SCSI_S_SIMPLE;
295 break;
296 }
297 req->id = (intptr_t)vr;
298
299 if ((size_t)xs->cmdlen > sizeof(req->cdb)) {
300 DPRINTF(("%s: bad cmdlen %zu > %zu\n", __func__,
301 (size_t)xs->cmdlen, sizeof(req->cdb)));
302 goto stuffup;
303 }
304
305 memset(req->cdb, 0, sizeof(req->cdb));
306 memcpy(req->cdb, xs->cmd, xs->cmdlen);
307
308 error = bus_dmamap_load(vsc->sc_dmat, vr->vr_data,
309 xs->data, xs->datalen, NULL, XS2DMA(xs));
310 switch (error) {
311 case 0:
312 break;
313 case ENOMEM:
314 case EAGAIN:
315 xs->error = XS_RESOURCE_SHORTAGE;
316 goto nomore;
317 default:
318 aprint_error_dev(sc->sc_dev, "error %d loading DMA map\n",
319 error);
320 stuffup:
321 xs->error = XS_DRIVER_STUFFUP;
322 nomore:
323 vioscsi_req_put(sc, vr);
324 scsipi_done(xs);
325 return;
326 }
327
328 int nsegs = VIRTIO_SCSI_MIN_SEGMENTS;
329 if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0)
330 nsegs += vr->vr_data->dm_nsegs;
331
332 error = virtio_enqueue_reserve(vsc, vq, slot, nsegs);
333 if (error) {
334 DPRINTF(("%s: error reserving %d\n", __func__, error));
335 goto stuffup;
336 }
337
338 bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
339 offsetof(struct vioscsi_req, vr_req),
340 sizeof(struct virtio_scsi_req_hdr),
341 BUS_DMASYNC_PREWRITE);
342 bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
343 offsetof(struct vioscsi_req, vr_res),
344 sizeof(struct virtio_scsi_res_hdr),
345 BUS_DMASYNC_PREREAD);
346 if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0)
347 bus_dmamap_sync(vsc->sc_dmat, vr->vr_data, 0, xs->datalen,
348 XS2DMAPRE(xs));
349
350 virtio_enqueue_p(vsc, vq, slot, vr->vr_control,
351 offsetof(struct vioscsi_req, vr_req),
352 sizeof(struct virtio_scsi_req_hdr), 1);
353 if (xs->xs_control & XS_CTL_DATA_OUT)
354 virtio_enqueue(vsc, vq, slot, vr->vr_data, 1);
355 virtio_enqueue_p(vsc, vq, slot, vr->vr_control,
356 offsetof(struct vioscsi_req, vr_res),
357 sizeof(struct virtio_scsi_res_hdr), 0);
358 if (xs->xs_control & XS_CTL_DATA_IN)
359 virtio_enqueue(vsc, vq, slot, vr->vr_data, 0);
360 virtio_enqueue_commit(vsc, vq, slot, 1);
361
362 if ((xs->xs_control & XS_CTL_POLL) == 0)
363 return;
364
365 DPRINTF(("%s: polling...\n", __func__));
366 // XXX: do this better.
367 int timeout = 1000;
368 do {
369 (*vsc->sc_intrhand)(vsc);
370 if (vr->vr_xs != xs)
371 break;
372 delay(1000);
373 } while (--timeout > 0);
374
375 if (vr->vr_xs == xs) {
376 // XXX: Abort!
377 xs->error = XS_TIMEOUT;
378 xs->resid = xs->datalen;
379 DPRINTF(("%s: polling timeout\n", __func__));
380 scsipi_done(xs);
381 }
382 DPRINTF(("%s: done (timeout=%d)\n", __func__, timeout));
383 }
384
385 static void
386 vioscsi_req_done(struct vioscsi_softc *sc, struct virtio_softc *vsc,
387 struct vioscsi_req *vr)
388 {
389 struct scsipi_xfer *xs = vr->vr_xs;
390 struct scsi_sense_data *sense = &xs->sense.scsi_sense;
391 size_t sense_len;
392
393 DPRINTF(("%s: enter\n", __func__));
394
395 bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
396 offsetof(struct vioscsi_req, vr_req),
397 sizeof(struct virtio_scsi_req_hdr),
398 BUS_DMASYNC_POSTWRITE);
399 bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
400 offsetof(struct vioscsi_req, vr_res),
401 sizeof(struct virtio_scsi_res_hdr),
402 BUS_DMASYNC_POSTREAD);
403 bus_dmamap_sync(vsc->sc_dmat, vr->vr_data, 0, xs->datalen,
404 XS2DMAPOST(xs));
405
406 switch (vr->vr_res.response) {
407 case VIRTIO_SCSI_S_OK:
408 sense_len = MIN(sizeof(xs->sense), vr->vr_res.sense_len);
409 memcpy(&xs->sense, vr->vr_res.sense, sense_len);
410 xs->error = (sense_len == 0) ? XS_NOERROR : XS_SENSE;
411 break;
412 case VIRTIO_SCSI_S_BAD_TARGET:
413 DPRINTF(("%s: bad target\n", __func__));
414 memset(sense, 0, sizeof(*sense));
415 sense->response_code = 0x70;
416 sense->flags = SKEY_ILLEGAL_REQUEST;
417 xs->error = XS_SENSE;
418 xs->status = 0;
419 xs->resid = 0;
420 break;
421 default:
422 DPRINTF(("%s: stuffup: %d\n", __func__, vr->vr_res.response));
423 xs->error = XS_DRIVER_STUFFUP;
424 xs->resid = xs->datalen;
425 break;
426 }
427
428 xs->status = vr->vr_res.status;
429 xs->resid = vr->vr_res.residual;
430
431 DPRINTF(("%s: done %d, %d, %d\n", __func__,
432 xs->error, xs->status, xs->resid));
433
434 vr->vr_xs = NULL;
435 vioscsi_req_put(sc, vr);
436 scsipi_done(xs);
437 }
438
439 static int
440 vioscsi_vq_done(struct virtqueue *vq)
441 {
442 struct virtio_softc *vsc = vq->vq_owner;
443 struct vioscsi_softc *sc = device_private(vsc->sc_child);
444 int ret = 0;
445
446 DPRINTF(("%s: enter\n", __func__));
447
448 for (;;) {
449 int r, slot;
450 r = virtio_dequeue(vsc, vq, &slot, NULL);
451 if (r != 0)
452 break;
453
454 DPRINTF(("%s: slot=%d\n", __func__, slot));
455 vioscsi_req_done(sc, vsc, &sc->sc_reqs[slot]);
456 ret = 1;
457 }
458
459 DPRINTF(("%s: exit %d\n", __func__, ret));
460
461 return ret;
462 }
463
464 static struct vioscsi_req *
465 vioscsi_req_get(struct vioscsi_softc *sc)
466 {
467 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
468 struct virtqueue *vq = &sc->sc_vqs[2];
469 struct vioscsi_req *vr;
470 int r, slot;
471
472 if ((r = virtio_enqueue_prep(vsc, vq, &slot)) != 0) {
473 DPRINTF(("%s: virtio_enqueue_get error %d\n", __func__, r));
474 return NULL;
475 }
476 KASSERT(slot < sc->sc_nreqs);
477 vr = &sc->sc_reqs[slot];
478
479 vr->vr_req.id = slot;
480 vr->vr_req.task_attr = VIRTIO_SCSI_S_SIMPLE;
481
482 DPRINTF(("%s: %p, %d\n", __func__, vr, slot));
483
484 return vr;
485 }
486
487 static void
488 vioscsi_req_put(struct vioscsi_softc *sc, struct vioscsi_req *vr)
489 {
490 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
491 struct virtqueue *vq = &sc->sc_vqs[2];
492 int slot = vr - sc->sc_reqs;
493
494 DPRINTF(("%s: %p, %d\n", __func__, vr, slot));
495
496 bus_dmamap_unload(vsc->sc_dmat, vr->vr_data);
497
498 virtio_dequeue_commit(vsc, vq, slot);
499 }
500
501 int
502 vioscsi_alloc_reqs(struct vioscsi_softc *sc, struct virtio_softc *vsc,
503 int qsize, uint32_t seg_max)
504 {
505 size_t allocsize;
506 int r, rsegs, slot;
507 void *vaddr;
508 struct vioscsi_req *vr;
509
510 allocsize = qsize * sizeof(struct vioscsi_req);
511 r = bus_dmamem_alloc(vsc->sc_dmat, allocsize, 0, 0,
512 &sc->sc_reqs_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
513 if (r != 0) {
514 aprint_error_dev(sc->sc_dev,
515 "%s: bus_dmamem_alloc, size %zu, error %d\n", __func__,
516 allocsize, r);
517 return r;
518 }
519 r = bus_dmamem_map(vsc->sc_dmat, &sc->sc_reqs_segs[0], 1,
520 allocsize, &vaddr, BUS_DMA_NOWAIT);
521 if (r != 0) {
522 aprint_error_dev(sc->sc_dev,
523 "%s: bus_dmamem_map failed, error %d\n", __func__, r);
524 bus_dmamem_free(vsc->sc_dmat, &sc->sc_reqs_segs[0], 1);
525 return r;
526 }
527 memset(vaddr, 0, allocsize);
528
529 sc->sc_reqs = vaddr;
530 sc->sc_nreqs = qsize;
531
532 /* Prepare maps for the requests */
533 for (slot=0; slot < qsize; slot++) {
534 vr = &sc->sc_reqs[slot];
535
536 r = bus_dmamap_create(vsc->sc_dmat,
537 offsetof(struct vioscsi_req, vr_xs), 1,
538 offsetof(struct vioscsi_req, vr_xs), 0,
539 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_control);
540 if (r != 0) {
541 aprint_error_dev(sc->sc_dev,
542 "%s: bus_dmamem_create failed, error %d\n",
543 __func__, r);
544 goto cleanup;
545 }
546
547 r = bus_dmamap_create(vsc->sc_dmat, MAXPHYS, sc->sc_seg_max,
548 MAXPHYS, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_data);
549 if (r != 0) {
550 aprint_error_dev(sc->sc_dev,
551 "%s: bus_dmamem_map failed, error %d\n",
552 __func__, r);
553 goto cleanup;
554 }
555
556 r = bus_dmamap_load(vsc->sc_dmat, vr->vr_control,
557 vr, offsetof(struct vioscsi_req, vr_xs), NULL,
558 BUS_DMA_NOWAIT);
559 if (r != 0) {
560 aprint_error_dev(sc->sc_dev,
561 "%s: bus_dmamap_create ctrl error %d\n",
562 __func__, r);
563 goto cleanup;
564 }
565 }
566
567 return 0;
568
569 cleanup:
570 for (; slot > 0; slot--) {
571 vr = &sc->sc_reqs[slot];
572
573 if (vr->vr_control) {
574 /* this will also unload the mapping if loaded */
575 bus_dmamap_destroy(vsc->sc_dmat, vr->vr_control);
576 vr->vr_control = NULL;
577 }
578
579 if (vr->vr_data) {
580 bus_dmamap_destroy(vsc->sc_dmat, vr->vr_data);
581 vr->vr_data = NULL;
582 }
583 }
584
585 bus_dmamem_unmap(vsc->sc_dmat, vaddr, allocsize);
586 bus_dmamem_free(vsc->sc_dmat, &sc->sc_reqs_segs[0], 1);
587
588 return r;
589 }
590