vioscsi.c revision 1.1 1 /* $OpenBSD: vioscsi.c,v 1.3 2015/03/14 03:38:49 jsg Exp $ */
2
3 /*
4 * Copyright (c) 2013 Google Inc.
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 #include <sys/cdefs.h>
20 __KERNEL_RCSID(0, "$NetBSD: vioscsi.c,v 1.1 2015/10/29 01:56:12 christos Exp $");
21
22 #include <sys/param.h>
23 #include <sys/systm.h>
24 #include <sys/device.h>
25 #include <sys/bus.h>
26 #include <sys/buf.h>
27
28 #include <dev/pci/pcidevs.h>
29 #include <dev/pci/pcireg.h>
30 #include <dev/pci/pcivar.h>
31
32 #include <dev/pci/vioscsireg.h>
33 #include <dev/pci/virtiovar.h>
34
35 #include <dev/scsipi/scsi_all.h>
36 #include <dev/scsipi/scsiconf.h>
37
38 #define VIOSCSI_DEBUG
39
40 #ifdef VIOSCSI_DEBUG
41 static int vioscsi_debug = 1;
42 #define DPRINTF(f) do { if (vioscsi_debug) printf f; } while (/*CONSTCOND*/0)
43 #else
44 #define DPRINTF(f) ((void)0)
45 #endif
46
47 struct vioscsi_req {
48 struct virtio_scsi_req_hdr vr_req;
49 struct virtio_scsi_res_hdr vr_res;
50 struct scsipi_xfer *vr_xs;
51 bus_dmamap_t vr_control;
52 bus_dmamap_t vr_data;
53 };
54
55 struct vioscsi_softc {
56 device_t sc_dev;
57 struct scsipi_adapter sc_adapter;
58 struct scsipi_channel sc_channel;
59
60 struct virtqueue sc_vqs[3];
61 struct vioscsi_req *sc_reqs;
62 bus_dma_segment_t sc_reqs_segs[1];
63
64 u_int32_t sc_seg_max;
65 };
66
67 static int vioscsi_match(device_t, cfdata_t, void *);
68 static void vioscsi_attach(device_t, device_t, void *);
69
70 static int vioscsi_alloc_reqs(struct vioscsi_softc *,
71 struct virtio_softc *, int, uint32_t);
72 static void vioscsi_scsipi_request(struct scsipi_channel *,
73 scsipi_adapter_req_t, void *);
74 static int vioscsi_vq_done(struct virtqueue *);
75 static void vioscsi_req_done(struct vioscsi_softc *, struct virtio_softc *,
76 struct vioscsi_req *);
77 static struct vioscsi_req *vioscsi_req_get(struct vioscsi_softc *);
78 static void vioscsi_req_put(struct vioscsi_softc *, struct vioscsi_req *);
79
80 static const char *const vioscsi_vq_names[] = {
81 "control",
82 "event",
83 "request",
84 };
85
86 CFATTACH_DECL_NEW(vioscsi, sizeof(struct vioscsi_softc),
87 vioscsi_match, vioscsi_attach, NULL, NULL);
88
89 static int
90 vioscsi_match(device_t parent, cfdata_t match, void *aux)
91 {
92 struct virtio_softc *va = aux;
93
94 if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_SCSI)
95 return 1;
96 return 0;
97 }
98
99 static void
100 vioscsi_attach(device_t parent, device_t self, void *aux)
101 {
102 struct vioscsi_softc *sc = device_private(self);
103 struct virtio_softc *vsc = device_private(parent);
104 struct scsipi_adapter *adapt = &sc->sc_adapter;
105 struct scsipi_channel *chan = &sc->sc_channel;
106 uint32_t features;
107 char buf[256];
108 int rv;
109
110 if (vsc->sc_child != NULL) {
111 aprint_error(": parent %s already has a child\n",
112 device_xname(parent));
113 return;
114 }
115
116 sc->sc_dev = self;
117
118 vsc->sc_child = self;
119 vsc->sc_ipl = IPL_BIO;
120 vsc->sc_vqs = sc->sc_vqs;
121 vsc->sc_nvqs = __arraycount(sc->sc_vqs);
122 vsc->sc_config_change = NULL;
123 vsc->sc_intrhand = virtio_vq_intr;
124 vsc->sc_flags = 0;
125
126 features = virtio_negotiate_features(vsc, 0);
127 snprintb(buf, sizeof(buf), VIRTIO_COMMON_FLAG_BITS, features);
128 aprint_normal(": Features: %s\n", buf);
129 aprint_naive("\n");
130
131 uint32_t cmd_per_lun = virtio_read_device_config_4(vsc,
132 VIRTIO_SCSI_CONFIG_CMD_PER_LUN);
133
134 uint32_t seg_max = virtio_read_device_config_4(vsc,
135 VIRTIO_SCSI_CONFIG_SEG_MAX);
136
137 uint16_t max_target = virtio_read_device_config_2(vsc,
138 VIRTIO_SCSI_CONFIG_MAX_TARGET);
139
140 uint16_t max_channel = virtio_read_device_config_2(vsc,
141 VIRTIO_SCSI_CONFIG_MAX_CHANNEL);
142
143 uint32_t max_lun = virtio_read_device_config_4(vsc,
144 VIRTIO_SCSI_CONFIG_MAX_LUN);
145
146 sc->sc_seg_max = seg_max;
147
148 for (size_t i = 0; i < __arraycount(sc->sc_vqs); i++) {
149 rv = virtio_alloc_vq(vsc, &sc->sc_vqs[i], i, MAXPHYS,
150 1 + howmany(MAXPHYS, NBPG), vioscsi_vq_names[i]);
151 if (rv) {
152 aprint_error_dev(sc->sc_dev,
153 "failed to allocate virtqueue %zu\n", i);
154 return;
155 }
156 sc->sc_vqs[i].vq_done = vioscsi_vq_done;
157 }
158
159 int qsize = sc->sc_vqs[2].vq_num;
160 aprint_normal_dev(sc->sc_dev, "qsize %d\n", qsize);
161 if (vioscsi_alloc_reqs(sc, vsc, qsize, seg_max))
162 return;
163
164 /*
165 * Fill in the scsipi_adapter.
166 */
167 memset(adapt, 0, sizeof(*adapt));
168 adapt->adapt_dev = sc->sc_dev;
169 adapt->adapt_nchannels = max_channel;
170 adapt->adapt_openings = cmd_per_lun;
171 adapt->adapt_max_periph = adapt->adapt_openings;
172 adapt->adapt_request = vioscsi_scsipi_request;
173 adapt->adapt_minphys = minphys;
174
175 /*
176 * Fill in the scsipi_channel.
177 */
178 memset(chan, 0, sizeof(*chan));
179 chan->chan_adapter = adapt;
180 chan->chan_bustype = &scsi_bustype;
181 chan->chan_channel = 0;
182 chan->chan_ntargets = max_target;
183 chan->chan_nluns = max_lun;
184 chan->chan_id = 0; /*XXX*/
185
186 config_found(sc->sc_dev, &sc->sc_channel, scsiprint);
187 }
188
189 #define XS2DMA(xs) \
190 ((((xs)->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE) | \
191 (((xs)->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | \
192 BUS_DMA_STREAMING)
193
194 #define XS2DMAPRE(xs) (((xs)->xs_control & XS_CTL_DATA_IN) ? \
195 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)
196
197 #define XS2DMAPOST(xs) (((xs)->xs_control & XS_CTL_DATA_IN) ? \
198 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)
199
200 static void
201 vioscsi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t
202 request, void *arg)
203 {
204 struct vioscsi_softc *sc =
205 device_private(chan->chan_adapter->adapt_dev);
206 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
207 struct scsipi_xfer *xs;
208 struct scsipi_periph *periph;
209 struct vioscsi_req *vr;
210 struct virtio_scsi_req_hdr *req;
211 struct virtqueue *vq = &sc->sc_vqs[2];
212 int slot, error;
213
214 DPRINTF(("%s: enter\n", __func__));
215
216 if (request != ADAPTER_REQ_RUN_XFER) {
217 DPRINTF(("%s: unhandled %d\n", __func__, request));
218 return;
219 }
220
221
222 xs = arg;
223 periph = xs->xs_periph;
224
225 KASSERT((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0);
226
227 vr = vioscsi_req_get(sc);
228 #ifdef DIAGNOSTIC
229 /*
230 * This should never happen as we track the resources
231 * in the mid-layer.
232 */
233 if (vr == NULL) {
234 scsipi_printaddr(xs->xs_periph);
235 panic("%s: unable to allocate request\n", __func__);
236 }
237 #endif
238 req = &vr->vr_req;
239 slot = vr - sc->sc_reqs;
240
241 vr->vr_xs = xs;
242
243 /*
244 * "The only supported format for the LUN field is: first byte set to
245 * 1, second byte set to target, third and fourth byte representing a
246 * single level LUN structure, followed by four zero bytes."
247 */
248 if (periph->periph_target >= 256 || periph->periph_lun >= 16384) {
249 DPRINTF(("%s: bad target %u or lun %u\n", __func__,
250 periph->periph_target, periph->periph_lun));
251 goto stuffup;
252 }
253 req->lun[0] = 1;
254 req->lun[1] = periph->periph_target;
255 req->lun[2] = 0x40 | (periph->periph_lun >> 8);
256 req->lun[3] = periph->periph_lun;
257 memset(req->lun + 4, 0, 4);
258
259 if ((size_t)xs->cmdlen > sizeof(req->cdb))
260 goto stuffup;
261 memset(req->cdb, 0, sizeof(req->cdb));
262 memcpy(req->cdb, xs->cmd, xs->cmdlen);
263
264 error = bus_dmamap_load(vsc->sc_dmat, vr->vr_data,
265 xs->data, xs->datalen, NULL, XS2DMA(xs));
266 switch (error) {
267 case 0:
268 break;
269 case ENOMEM:
270 case EAGAIN:
271 xs->error = XS_RESOURCE_SHORTAGE;
272 goto nomore;
273 default:
274 aprint_error_dev(sc->sc_dev, "error %d loading DMA map\n",
275 error);
276 stuffup:
277 xs->error = XS_DRIVER_STUFFUP;
278 nomore:
279 // XXX: free req?
280 scsipi_done(xs);
281 return;
282 }
283
284 error = virtio_enqueue_reserve(vsc, vq, slot,
285 vr->vr_data->dm_nsegs + 2);
286 if (error) {
287 DPRINTF(("%s: error reserving %d\n", __func__, error));
288 goto stuffup;
289 }
290
291 bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
292 offsetof(struct vioscsi_req, vr_req),
293 sizeof(struct virtio_scsi_req_hdr),
294 BUS_DMASYNC_PREWRITE);
295 bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
296 offsetof(struct vioscsi_req, vr_res),
297 sizeof(struct virtio_scsi_res_hdr),
298 BUS_DMASYNC_PREREAD);
299 bus_dmamap_sync(vsc->sc_dmat, vr->vr_data, 0, xs->datalen,
300 XS2DMAPRE(xs));
301
302 virtio_enqueue_p(vsc, vq, slot, vr->vr_control,
303 offsetof(struct vioscsi_req, vr_req),
304 sizeof(struct virtio_scsi_req_hdr), 1);
305 if (xs->xs_control & XS_CTL_DATA_OUT)
306 virtio_enqueue(vsc, vq, slot, vr->vr_data, 1);
307 virtio_enqueue_p(vsc, vq, slot, vr->vr_control,
308 offsetof(struct vioscsi_req, vr_res),
309 sizeof(struct virtio_scsi_res_hdr), 0);
310 if (xs->xs_control & XS_CTL_DATA_IN)
311 virtio_enqueue(vsc, vq, slot, vr->vr_data, 0);
312 virtio_enqueue_commit(vsc, vq, slot, 1);
313
314 if ((xs->xs_control & XS_CTL_POLL) == 0)
315 return;
316
317 DPRINTF(("%s: polling...\n", __func__));
318 // XXX: do this better.
319 int timeout = 1000;
320 do {
321 (*vsc->sc_intrhand)(vsc);
322 if (vr->vr_xs != xs)
323 break;
324 delay(1000);
325 } while (--timeout > 0);
326
327 if (vr->vr_xs == xs) {
328 // XXX: Abort!
329 xs->error = XS_TIMEOUT;
330 xs->resid = xs->datalen;
331 DPRINTF(("%s: polling timeout\n", __func__));
332 scsipi_done(xs);
333 }
334 DPRINTF(("%s: done (timeout=%d)\n", __func__, timeout));
335 }
336
337 static void
338 vioscsi_req_done(struct vioscsi_softc *sc, struct virtio_softc *vsc,
339 struct vioscsi_req *vr)
340 {
341 struct scsipi_xfer *xs = vr->vr_xs;
342
343 DPRINTF(("%s: enter\n", __func__));
344
345 bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
346 offsetof(struct vioscsi_req, vr_req),
347 sizeof(struct virtio_scsi_req_hdr),
348 BUS_DMASYNC_POSTWRITE);
349 bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
350 offsetof(struct vioscsi_req, vr_res),
351 sizeof(struct virtio_scsi_res_hdr),
352 BUS_DMASYNC_POSTREAD);
353 bus_dmamap_sync(vsc->sc_dmat, vr->vr_data, 0, xs->datalen,
354 XS2DMAPOST(xs));
355
356 if (vr->vr_res.response != VIRTIO_SCSI_S_OK) {
357 xs->error = XS_DRIVER_STUFFUP;
358 xs->resid = xs->datalen;
359 DPRINTF(("%s: stuffup: %d\n", __func__, vr->vr_res.response));
360 goto done;
361 }
362
363 size_t sense_len = MIN(sizeof(xs->sense), vr->vr_res.sense_len);
364 memcpy(&xs->sense, vr->vr_res.sense, sense_len);
365 xs->error = (sense_len == 0) ? XS_NOERROR : XS_SENSE;
366
367 xs->status = vr->vr_res.status;
368 xs->resid = vr->vr_res.residual;
369
370 DPRINTF(("%s: done %d, %d, %d\n", __func__,
371 xs->error, xs->status, xs->resid));
372
373 done:
374 vr->vr_xs = NULL;
375 vioscsi_req_put(sc, vr);
376 scsipi_done(xs);
377 }
378
379 static int
380 vioscsi_vq_done(struct virtqueue *vq)
381 {
382 struct virtio_softc *vsc = vq->vq_owner;
383 struct vioscsi_softc *sc = device_private(vsc->sc_child);
384 int ret = 0;
385
386 DPRINTF(("%s: enter\n", __func__));
387
388 for (;;) {
389 int r, slot;
390 r = virtio_dequeue(vsc, vq, &slot, NULL);
391 if (r != 0)
392 break;
393
394 DPRINTF(("%s: slot=%d\n", __func__, slot));
395 vioscsi_req_done(sc, vsc, &sc->sc_reqs[slot]);
396 ret = 1;
397 }
398
399 DPRINTF(("%s: exit %d\n", __func__, ret));
400
401 return ret;
402 }
403
404 static struct vioscsi_req *
405 vioscsi_req_get(struct vioscsi_softc *sc)
406 {
407 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
408 struct virtqueue *vq = &sc->sc_vqs[2];
409 struct vioscsi_req *vr;
410 int r, slot;
411
412 if ((r = virtio_enqueue_prep(vsc, vq, &slot)) != 0) {
413 DPRINTF(("%s: virtio_enqueue_get error %d\n", __func__, r));
414 goto err1;
415 }
416 vr = &sc->sc_reqs[slot];
417
418 vr->vr_req.id = slot;
419 vr->vr_req.task_attr = VIRTIO_SCSI_S_SIMPLE;
420
421 r = bus_dmamap_create(vsc->sc_dmat,
422 offsetof(struct vioscsi_req, vr_xs), 1,
423 offsetof(struct vioscsi_req, vr_xs), 0,
424 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_control);
425 if (r != 0) {
426 DPRINTF(("%s: bus_dmamap_create xs error %d\n", __func__, r));
427 goto err2;
428 }
429 r = bus_dmamap_create(vsc->sc_dmat, MAXPHYS, sc->sc_seg_max,
430 MAXPHYS, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_data);
431 if (r != 0) {
432 DPRINTF(("%s: bus_dmamap_create data error %d\n", __func__, r));
433 goto err3;
434 }
435 r = bus_dmamap_load(vsc->sc_dmat, vr->vr_control,
436 vr, offsetof(struct vioscsi_req, vr_xs), NULL,
437 BUS_DMA_NOWAIT);
438 if (r != 0) {
439 DPRINTF(("%s: bus_dmamap_create ctrl error %d\n", __func__, r));
440 goto err4;
441 }
442
443 DPRINTF(("%s: %p, %d\n", __func__, vr, slot));
444
445 return vr;
446
447 err4:
448 bus_dmamap_destroy(vsc->sc_dmat, vr->vr_data);
449 err3:
450 bus_dmamap_destroy(vsc->sc_dmat, vr->vr_control);
451 err2:
452 virtio_enqueue_abort(vsc, vq, slot);
453 err1:
454 return NULL;
455 }
456
457 static void
458 vioscsi_req_put(struct vioscsi_softc *sc, struct vioscsi_req *vr)
459 {
460 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
461 struct virtqueue *vq = &sc->sc_vqs[2];
462 int slot = vr - sc->sc_reqs;
463
464 DPRINTF(("%s: %p, %d\n", __func__, vr, slot));
465
466 bus_dmamap_destroy(vsc->sc_dmat, vr->vr_control);
467 bus_dmamap_destroy(vsc->sc_dmat, vr->vr_data);
468
469 virtio_dequeue_commit(vsc, vq, slot);
470 }
471
472 int
473 vioscsi_alloc_reqs(struct vioscsi_softc *sc, struct virtio_softc *vsc,
474 int qsize, uint32_t seg_max)
475 {
476 size_t allocsize;
477 int r, rsegs;
478 void *vaddr;
479
480 allocsize = qsize * sizeof(struct vioscsi_req);
481 r = bus_dmamem_alloc(vsc->sc_dmat, allocsize, 0, 0,
482 &sc->sc_reqs_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
483 if (r != 0) {
484 aprint_error_dev(sc->sc_dev,
485 "%s: bus_dmamem_alloc, size %zu, error %d\n", __func__,
486 allocsize, r);
487 return 1;
488 }
489 r = bus_dmamem_map(vsc->sc_dmat, &sc->sc_reqs_segs[0], 1,
490 allocsize, &vaddr, BUS_DMA_NOWAIT);
491 if (r != 0) {
492 aprint_error_dev(sc->sc_dev,
493 "%s: bus_dmamem_map failed, error %d\n", __func__, r);
494 bus_dmamem_free(vsc->sc_dmat, &sc->sc_reqs_segs[0], 1);
495 return 1;
496 }
497 sc->sc_reqs = vaddr;
498 memset(vaddr, 0, allocsize);
499 return 0;
500 }
501