vioscsi.c revision 1.3 1 /* $OpenBSD: vioscsi.c,v 1.3 2015/03/14 03:38:49 jsg Exp $ */
2
3 /*
4 * Copyright (c) 2013 Google Inc.
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 #include <sys/cdefs.h>
20 __KERNEL_RCSID(0, "$NetBSD: vioscsi.c,v 1.3 2015/10/30 21:18:16 christos Exp $");
21
22 #include <sys/param.h>
23 #include <sys/systm.h>
24 #include <sys/device.h>
25 #include <sys/bus.h>
26 #include <sys/buf.h>
27
28 #include <dev/pci/pcidevs.h>
29 #include <dev/pci/pcireg.h>
30 #include <dev/pci/pcivar.h>
31
32 #include <dev/pci/vioscsireg.h>
33 #include <dev/pci/virtiovar.h>
34
35 #include <dev/scsipi/scsi_all.h>
36 #include <dev/scsipi/scsiconf.h>
37
38 #ifdef VIOSCSI_DEBUG
39 static int vioscsi_debug = 1;
40 #define DPRINTF(f) do { if (vioscsi_debug) printf f; } while (/*CONSTCOND*/0)
41 #else
42 #define DPRINTF(f) ((void)0)
43 #endif
44
45 struct vioscsi_req {
46 struct virtio_scsi_req_hdr vr_req;
47 struct virtio_scsi_res_hdr vr_res;
48 struct scsipi_xfer *vr_xs;
49 bus_dmamap_t vr_control;
50 bus_dmamap_t vr_data;
51 };
52
53 struct vioscsi_softc {
54 device_t sc_dev;
55 struct scsipi_adapter sc_adapter;
56 struct scsipi_channel sc_channel;
57
58 struct virtqueue sc_vqs[3];
59 struct vioscsi_req *sc_reqs;
60 bus_dma_segment_t sc_reqs_segs[1];
61
62 u_int32_t sc_seg_max;
63 };
64
65 /*
66 * Each block request uses at least two segments - one for the header
67 * and one for the status.
68 */
69 #define VIRTIO_SCSI_MIN_SEGMENTS 2
70
71 static int vioscsi_match(device_t, cfdata_t, void *);
72 static void vioscsi_attach(device_t, device_t, void *);
73
74 static int vioscsi_alloc_reqs(struct vioscsi_softc *,
75 struct virtio_softc *, int, uint32_t);
76 static void vioscsi_scsipi_request(struct scsipi_channel *,
77 scsipi_adapter_req_t, void *);
78 static int vioscsi_vq_done(struct virtqueue *);
79 static void vioscsi_req_done(struct vioscsi_softc *, struct virtio_softc *,
80 struct vioscsi_req *);
81 static struct vioscsi_req *vioscsi_req_get(struct vioscsi_softc *);
82 static void vioscsi_req_put(struct vioscsi_softc *, struct vioscsi_req *);
83
84 static const char *const vioscsi_vq_names[] = {
85 "control",
86 "event",
87 "request",
88 };
89
90 CFATTACH_DECL_NEW(vioscsi, sizeof(struct vioscsi_softc),
91 vioscsi_match, vioscsi_attach, NULL, NULL);
92
93 static int
94 vioscsi_match(device_t parent, cfdata_t match, void *aux)
95 {
96 struct virtio_softc *va = aux;
97
98 if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_SCSI)
99 return 1;
100 return 0;
101 }
102
103 static void
104 vioscsi_attach(device_t parent, device_t self, void *aux)
105 {
106 struct vioscsi_softc *sc = device_private(self);
107 struct virtio_softc *vsc = device_private(parent);
108 struct scsipi_adapter *adapt = &sc->sc_adapter;
109 struct scsipi_channel *chan = &sc->sc_channel;
110 uint32_t features;
111 char buf[256];
112 int rv;
113
114 if (vsc->sc_child != NULL) {
115 aprint_error(": parent %s already has a child\n",
116 device_xname(parent));
117 return;
118 }
119
120 sc->sc_dev = self;
121
122 vsc->sc_child = self;
123 vsc->sc_ipl = IPL_BIO;
124 vsc->sc_vqs = sc->sc_vqs;
125 vsc->sc_nvqs = __arraycount(sc->sc_vqs);
126 vsc->sc_config_change = NULL;
127 vsc->sc_intrhand = virtio_vq_intr;
128 vsc->sc_flags = 0;
129
130 features = virtio_negotiate_features(vsc, 0);
131 snprintb(buf, sizeof(buf), VIRTIO_COMMON_FLAG_BITS, features);
132 aprint_normal(": Features: %s\n", buf);
133 aprint_naive("\n");
134
135 uint32_t cmd_per_lun = virtio_read_device_config_4(vsc,
136 VIRTIO_SCSI_CONFIG_CMD_PER_LUN);
137
138 uint32_t seg_max = virtio_read_device_config_4(vsc,
139 VIRTIO_SCSI_CONFIG_SEG_MAX);
140
141 uint16_t max_target = virtio_read_device_config_2(vsc,
142 VIRTIO_SCSI_CONFIG_MAX_TARGET);
143
144 uint16_t max_channel = virtio_read_device_config_2(vsc,
145 VIRTIO_SCSI_CONFIG_MAX_CHANNEL);
146
147 uint32_t max_lun = virtio_read_device_config_4(vsc,
148 VIRTIO_SCSI_CONFIG_MAX_LUN);
149
150 sc->sc_seg_max = seg_max;
151
152 for (size_t i = 0; i < __arraycount(sc->sc_vqs); i++) {
153 rv = virtio_alloc_vq(vsc, &sc->sc_vqs[i], i, MAXPHYS,
154 1 + howmany(MAXPHYS, NBPG), vioscsi_vq_names[i]);
155 if (rv) {
156 aprint_error_dev(sc->sc_dev,
157 "failed to allocate virtqueue %zu\n", i);
158 return;
159 }
160 sc->sc_vqs[i].vq_done = vioscsi_vq_done;
161 }
162
163 int qsize = sc->sc_vqs[2].vq_num;
164 aprint_normal_dev(sc->sc_dev, "qsize %d\n", qsize);
165 if (vioscsi_alloc_reqs(sc, vsc, qsize, seg_max))
166 return;
167
168 /*
169 * Fill in the scsipi_adapter.
170 */
171 memset(adapt, 0, sizeof(*adapt));
172 adapt->adapt_dev = sc->sc_dev;
173 adapt->adapt_nchannels = max_channel;
174 adapt->adapt_openings = cmd_per_lun;
175 adapt->adapt_max_periph = adapt->adapt_openings;
176 adapt->adapt_request = vioscsi_scsipi_request;
177 adapt->adapt_minphys = minphys;
178
179 /*
180 * Fill in the scsipi_channel.
181 */
182 memset(chan, 0, sizeof(*chan));
183 chan->chan_adapter = adapt;
184 chan->chan_bustype = &scsi_bustype;
185 chan->chan_channel = 0;
186 chan->chan_ntargets = max_target;
187 chan->chan_nluns = max_lun;
188 chan->chan_id = 0;
189
190 config_found(sc->sc_dev, &sc->sc_channel, scsiprint);
191 }
192
193 #define XS2DMA(xs) \
194 ((((xs)->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE) | \
195 (((xs)->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | \
196 BUS_DMA_STREAMING)
197
198 #define XS2DMAPRE(xs) (((xs)->xs_control & XS_CTL_DATA_IN) ? \
199 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)
200
201 #define XS2DMAPOST(xs) (((xs)->xs_control & XS_CTL_DATA_IN) ? \
202 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)
203
204 static void
205 vioscsi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t
206 request, void *arg)
207 {
208 struct vioscsi_softc *sc =
209 device_private(chan->chan_adapter->adapt_dev);
210 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
211 struct scsipi_xfer *xs;
212 struct scsipi_periph *periph;
213 struct vioscsi_req *vr;
214 struct virtio_scsi_req_hdr *req;
215 struct virtqueue *vq = &sc->sc_vqs[2];
216 int slot, error;
217
218 DPRINTF(("%s: enter\n", __func__));
219
220 if (request != ADAPTER_REQ_RUN_XFER) {
221 DPRINTF(("%s: unhandled %d\n", __func__, request));
222 return;
223 }
224
225 xs = arg;
226 periph = xs->xs_periph;
227
228 vr = vioscsi_req_get(sc);
229 #ifdef DIAGNOSTIC
230 /*
231 * This should never happen as we track the resources
232 * in the mid-layer.
233 */
234 if (vr == NULL) {
235 scsipi_printaddr(xs->xs_periph);
236 panic("%s: unable to allocate request\n", __func__);
237 }
238 #endif
239 req = &vr->vr_req;
240 slot = vr - sc->sc_reqs;
241
242 vr->vr_xs = xs;
243
244 /*
245 * "The only supported format for the LUN field is: first byte set to
246 * 1, second byte set to target, third and fourth byte representing a
247 * single level LUN structure, followed by four zero bytes."
248 */
249 if (periph->periph_target >= 256 || periph->periph_lun >= 16384) {
250 DPRINTF(("%s: bad target %u or lun %u\n", __func__,
251 periph->periph_target, periph->periph_lun));
252 goto stuffup;
253 }
254 req->lun[0] = 1;
255 req->lun[1] = periph->periph_target - 1;
256 req->lun[2] = 0x40 | (periph->periph_lun >> 8);
257 req->lun[3] = periph->periph_lun;
258 memset(req->lun + 4, 0, 4);
259 DPRINTF(("%s: command for %u:%u at slot %d\n", __func__,
260 periph->periph_target - 1, periph->periph_lun, slot));
261
262 if ((size_t)xs->cmdlen > sizeof(req->cdb)) {
263 DPRINTF(("%s: bad cmdlen %zu > %zu\n", __func__,
264 (size_t)xs->cmdlen, sizeof(req->cdb)));
265 goto stuffup;
266 }
267
268 memset(req->cdb, 0, sizeof(req->cdb));
269 memcpy(req->cdb, xs->cmd, xs->cmdlen);
270
271 error = bus_dmamap_load(vsc->sc_dmat, vr->vr_data,
272 xs->data, xs->datalen, NULL, XS2DMA(xs));
273 switch (error) {
274 case 0:
275 break;
276 case ENOMEM:
277 case EAGAIN:
278 xs->error = XS_RESOURCE_SHORTAGE;
279 goto nomore;
280 default:
281 aprint_error_dev(sc->sc_dev, "error %d loading DMA map\n",
282 error);
283 stuffup:
284 xs->error = XS_DRIVER_STUFFUP;
285 nomore:
286 // XXX: free req?
287 scsipi_done(xs);
288 return;
289 }
290
291 int nsegs = VIRTIO_SCSI_MIN_SEGMENTS;
292 if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0)
293 nsegs += vr->vr_data->dm_nsegs;
294
295 error = virtio_enqueue_reserve(vsc, vq, slot, nsegs);
296 if (error) {
297 DPRINTF(("%s: error reserving %d\n", __func__, error));
298 goto stuffup;
299 }
300
301 bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
302 offsetof(struct vioscsi_req, vr_req),
303 sizeof(struct virtio_scsi_req_hdr),
304 BUS_DMASYNC_PREWRITE);
305 bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
306 offsetof(struct vioscsi_req, vr_res),
307 sizeof(struct virtio_scsi_res_hdr),
308 BUS_DMASYNC_PREREAD);
309 if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0)
310 bus_dmamap_sync(vsc->sc_dmat, vr->vr_data, 0, xs->datalen,
311 XS2DMAPRE(xs));
312
313 virtio_enqueue_p(vsc, vq, slot, vr->vr_control,
314 offsetof(struct vioscsi_req, vr_req),
315 sizeof(struct virtio_scsi_req_hdr), 1);
316 if (xs->xs_control & XS_CTL_DATA_OUT)
317 virtio_enqueue(vsc, vq, slot, vr->vr_data, 1);
318 virtio_enqueue_p(vsc, vq, slot, vr->vr_control,
319 offsetof(struct vioscsi_req, vr_res),
320 sizeof(struct virtio_scsi_res_hdr), 0);
321 if (xs->xs_control & XS_CTL_DATA_IN)
322 virtio_enqueue(vsc, vq, slot, vr->vr_data, 0);
323 virtio_enqueue_commit(vsc, vq, slot, 1);
324
325 if ((xs->xs_control & XS_CTL_POLL) == 0)
326 return;
327
328 DPRINTF(("%s: polling...\n", __func__));
329 // XXX: do this better.
330 int timeout = 1000;
331 do {
332 (*vsc->sc_intrhand)(vsc);
333 if (vr->vr_xs != xs)
334 break;
335 delay(1000);
336 } while (--timeout > 0);
337
338 if (vr->vr_xs == xs) {
339 // XXX: Abort!
340 xs->error = XS_TIMEOUT;
341 xs->resid = xs->datalen;
342 DPRINTF(("%s: polling timeout\n", __func__));
343 scsipi_done(xs);
344 }
345 DPRINTF(("%s: done (timeout=%d)\n", __func__, timeout));
346 }
347
348 static void
349 vioscsi_req_done(struct vioscsi_softc *sc, struct virtio_softc *vsc,
350 struct vioscsi_req *vr)
351 {
352 struct scsipi_xfer *xs = vr->vr_xs;
353
354 DPRINTF(("%s: enter\n", __func__));
355
356 bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
357 offsetof(struct vioscsi_req, vr_req),
358 sizeof(struct virtio_scsi_req_hdr),
359 BUS_DMASYNC_POSTWRITE);
360 bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
361 offsetof(struct vioscsi_req, vr_res),
362 sizeof(struct virtio_scsi_res_hdr),
363 BUS_DMASYNC_POSTREAD);
364 bus_dmamap_sync(vsc->sc_dmat, vr->vr_data, 0, xs->datalen,
365 XS2DMAPOST(xs));
366
367 if (vr->vr_res.response != VIRTIO_SCSI_S_OK) {
368 xs->error = XS_DRIVER_STUFFUP;
369 xs->resid = xs->datalen;
370 DPRINTF(("%s: stuffup: %d\n", __func__, vr->vr_res.response));
371 goto done;
372 }
373
374 size_t sense_len = MIN(sizeof(xs->sense), vr->vr_res.sense_len);
375 memcpy(&xs->sense, vr->vr_res.sense, sense_len);
376 xs->error = (sense_len == 0) ? XS_NOERROR : XS_SENSE;
377
378 xs->status = vr->vr_res.status;
379 xs->resid = vr->vr_res.residual;
380
381 DPRINTF(("%s: done %d, %d, %d\n", __func__,
382 xs->error, xs->status, xs->resid));
383
384 done:
385 vr->vr_xs = NULL;
386 vioscsi_req_put(sc, vr);
387 scsipi_done(xs);
388 }
389
390 static int
391 vioscsi_vq_done(struct virtqueue *vq)
392 {
393 struct virtio_softc *vsc = vq->vq_owner;
394 struct vioscsi_softc *sc = device_private(vsc->sc_child);
395 int ret = 0;
396
397 DPRINTF(("%s: enter\n", __func__));
398
399 for (;;) {
400 int r, slot;
401 r = virtio_dequeue(vsc, vq, &slot, NULL);
402 if (r != 0)
403 break;
404
405 DPRINTF(("%s: slot=%d\n", __func__, slot));
406 vioscsi_req_done(sc, vsc, &sc->sc_reqs[slot]);
407 ret = 1;
408 }
409
410 DPRINTF(("%s: exit %d\n", __func__, ret));
411
412 return ret;
413 }
414
415 static struct vioscsi_req *
416 vioscsi_req_get(struct vioscsi_softc *sc)
417 {
418 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
419 struct virtqueue *vq = &sc->sc_vqs[2];
420 struct vioscsi_req *vr;
421 int r, slot;
422
423 if ((r = virtio_enqueue_prep(vsc, vq, &slot)) != 0) {
424 DPRINTF(("%s: virtio_enqueue_get error %d\n", __func__, r));
425 goto err1;
426 }
427 vr = &sc->sc_reqs[slot];
428
429 vr->vr_req.id = slot;
430 vr->vr_req.task_attr = VIRTIO_SCSI_S_SIMPLE;
431
432 r = bus_dmamap_create(vsc->sc_dmat,
433 offsetof(struct vioscsi_req, vr_xs), 1,
434 offsetof(struct vioscsi_req, vr_xs), 0,
435 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_control);
436 if (r != 0) {
437 DPRINTF(("%s: bus_dmamap_create xs error %d\n", __func__, r));
438 goto err2;
439 }
440 r = bus_dmamap_create(vsc->sc_dmat, MAXPHYS, sc->sc_seg_max,
441 MAXPHYS, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_data);
442 if (r != 0) {
443 DPRINTF(("%s: bus_dmamap_create data error %d\n", __func__, r));
444 goto err3;
445 }
446 r = bus_dmamap_load(vsc->sc_dmat, vr->vr_control,
447 vr, offsetof(struct vioscsi_req, vr_xs), NULL,
448 BUS_DMA_NOWAIT);
449 if (r != 0) {
450 DPRINTF(("%s: bus_dmamap_create ctrl error %d\n", __func__, r));
451 goto err4;
452 }
453
454 DPRINTF(("%s: %p, %d\n", __func__, vr, slot));
455
456 return vr;
457
458 err4:
459 bus_dmamap_destroy(vsc->sc_dmat, vr->vr_data);
460 err3:
461 bus_dmamap_destroy(vsc->sc_dmat, vr->vr_control);
462 err2:
463 virtio_enqueue_abort(vsc, vq, slot);
464 err1:
465 return NULL;
466 }
467
468 static void
469 vioscsi_req_put(struct vioscsi_softc *sc, struct vioscsi_req *vr)
470 {
471 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
472 struct virtqueue *vq = &sc->sc_vqs[2];
473 int slot = vr - sc->sc_reqs;
474
475 DPRINTF(("%s: %p, %d\n", __func__, vr, slot));
476
477 bus_dmamap_destroy(vsc->sc_dmat, vr->vr_control);
478 bus_dmamap_destroy(vsc->sc_dmat, vr->vr_data);
479
480 virtio_dequeue_commit(vsc, vq, slot);
481 }
482
483 int
484 vioscsi_alloc_reqs(struct vioscsi_softc *sc, struct virtio_softc *vsc,
485 int qsize, uint32_t seg_max)
486 {
487 size_t allocsize;
488 int r, rsegs;
489 void *vaddr;
490
491 allocsize = qsize * sizeof(struct vioscsi_req);
492 r = bus_dmamem_alloc(vsc->sc_dmat, allocsize, 0, 0,
493 &sc->sc_reqs_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
494 if (r != 0) {
495 aprint_error_dev(sc->sc_dev,
496 "%s: bus_dmamem_alloc, size %zu, error %d\n", __func__,
497 allocsize, r);
498 return 1;
499 }
500 r = bus_dmamem_map(vsc->sc_dmat, &sc->sc_reqs_segs[0], 1,
501 allocsize, &vaddr, BUS_DMA_NOWAIT);
502 if (r != 0) {
503 aprint_error_dev(sc->sc_dev,
504 "%s: bus_dmamem_map failed, error %d\n", __func__, r);
505 bus_dmamem_free(vsc->sc_dmat, &sc->sc_reqs_segs[0], 1);
506 return 1;
507 }
508 sc->sc_reqs = vaddr;
509 memset(vaddr, 0, allocsize);
510 return 0;
511 }
512