vioscsi.c revision 1.17 1 /* $NetBSD: vioscsi.c,v 1.17 2017/05/13 20:17:42 jdolecek Exp $ */
2 /* $OpenBSD: vioscsi.c,v 1.3 2015/03/14 03:38:49 jsg Exp $ */
3
4 /*
5 * Copyright (c) 2013 Google Inc.
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include <sys/cdefs.h>
21 __KERNEL_RCSID(0, "$NetBSD: vioscsi.c,v 1.17 2017/05/13 20:17:42 jdolecek Exp $");
22
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/device.h>
26 #include <sys/bus.h>
27 #include <sys/buf.h>
28 #include <sys/module.h>
29
30 #include <dev/pci/pcidevs.h>
31 #include <dev/pci/pcireg.h>
32 #include <dev/pci/pcivar.h>
33
34 #include <dev/pci/vioscsireg.h>
35 #include <dev/pci/virtiovar.h>
36
37 #include <dev/scsipi/scsi_all.h>
38 #include <dev/scsipi/scsiconf.h>
39
40 #ifdef VIOSCSI_DEBUG
41 static int vioscsi_debug = 1;
42 #define DPRINTF(f) do { if (vioscsi_debug) printf f; } while (/*CONSTCOND*/0)
43 #else
44 #define DPRINTF(f) ((void)0)
45 #endif
46
47 struct vioscsi_req {
48 struct virtio_scsi_req_hdr vr_req;
49 struct virtio_scsi_res_hdr vr_res;
50 struct scsipi_xfer *vr_xs;
51 bus_dmamap_t vr_control;
52 bus_dmamap_t vr_data;
53 };
54
55 struct vioscsi_softc {
56 device_t sc_dev;
57 struct scsipi_adapter sc_adapter;
58 struct scsipi_channel sc_channel;
59
60 struct virtqueue sc_vqs[3];
61 #define VIOSCSI_VQ_CONTROL 0
62 #define VIOSCSI_VQ_EVENT 1
63 #define VIOSCSI_VQ_REQUEST 2
64
65 struct vioscsi_req *sc_reqs;
66 int sc_nreqs;
67 bus_dma_segment_t sc_reqs_segs[1];
68
69 u_int32_t sc_seg_max;
70
71 kmutex_t sc_mutex;
72 };
73
74 /*
75 * Each block request uses at least two segments - one for the header
76 * and one for the status.
77 */
78 #define VIRTIO_SCSI_MIN_SEGMENTS 2
79
80 static int vioscsi_match(device_t, cfdata_t, void *);
81 static void vioscsi_attach(device_t, device_t, void *);
82 static int vioscsi_detach(device_t, int);
83
84 static int vioscsi_alloc_reqs(struct vioscsi_softc *,
85 struct virtio_softc *, int);
86 static void vioscsi_free_reqs(struct vioscsi_softc *,
87 struct virtio_softc *);
88 static void vioscsi_scsipi_request(struct scsipi_channel *,
89 scsipi_adapter_req_t, void *);
90 static int vioscsi_vq_done(struct virtqueue *);
91 static void vioscsi_req_done(struct vioscsi_softc *, struct virtio_softc *,
92 struct vioscsi_req *);
93 static struct vioscsi_req *vioscsi_req_get(struct vioscsi_softc *);
94 static void vioscsi_bad_target(struct scsipi_xfer *);
95
96 static const char *const vioscsi_vq_names[] = {
97 "control",
98 "event",
99 "request",
100 };
101
102 CFATTACH_DECL3_NEW(vioscsi, sizeof(struct vioscsi_softc),
103 vioscsi_match, vioscsi_attach, vioscsi_detach, NULL, NULL, NULL,
104 DVF_DETACH_SHUTDOWN);
105
106 static int
107 vioscsi_match(device_t parent, cfdata_t match, void *aux)
108 {
109 struct virtio_attach_args *va = aux;
110
111 if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_SCSI)
112 return 1;
113
114 return 0;
115 }
116
117 static void
118 vioscsi_attach(device_t parent, device_t self, void *aux)
119 {
120 struct vioscsi_softc *sc = device_private(self);
121 struct virtio_softc *vsc = device_private(parent);
122 struct scsipi_adapter *adapt = &sc->sc_adapter;
123 struct scsipi_channel *chan = &sc->sc_channel;
124 int rv, qsize = 0, i = 0;
125 int ipl = IPL_BIO;
126
127 if (virtio_child(vsc) != NULL) {
128 aprint_error(": parent %s already has a child\n",
129 device_xname(parent));
130 return;
131 }
132
133 sc->sc_dev = self;
134
135 virtio_child_attach_start(vsc, self, ipl, sc->sc_vqs,
136 NULL, virtio_vq_intr, VIRTIO_F_PCI_INTR_MSIX,
137 0, VIRTIO_COMMON_FLAG_BITS);
138
139 mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, ipl);
140
141 uint32_t cmd_per_lun = virtio_read_device_config_4(vsc,
142 VIRTIO_SCSI_CONFIG_CMD_PER_LUN);
143
144 uint32_t seg_max = virtio_read_device_config_4(vsc,
145 VIRTIO_SCSI_CONFIG_SEG_MAX);
146
147 uint16_t max_target = virtio_read_device_config_2(vsc,
148 VIRTIO_SCSI_CONFIG_MAX_TARGET);
149
150 uint32_t max_lun = virtio_read_device_config_4(vsc,
151 VIRTIO_SCSI_CONFIG_MAX_LUN);
152
153 sc->sc_seg_max = seg_max;
154
155 for(i=0; i < __arraycount(sc->sc_vqs); i++) {
156 rv = virtio_alloc_vq(vsc, &sc->sc_vqs[i], i, MAXPHYS,
157 VIRTIO_SCSI_MIN_SEGMENTS + howmany(MAXPHYS, NBPG),
158 vioscsi_vq_names[i]);
159 if (rv) {
160 aprint_error_dev(sc->sc_dev,
161 "failed to allocate virtqueue %d\n", i);
162 goto err;
163 }
164
165 if (i == VIOSCSI_VQ_REQUEST)
166 sc->sc_vqs[i].vq_done = vioscsi_vq_done;
167 }
168
169 qsize = sc->sc_vqs[VIOSCSI_VQ_REQUEST].vq_num;
170 if (vioscsi_alloc_reqs(sc, vsc, qsize))
171 goto err;
172
173 aprint_normal_dev(sc->sc_dev,
174 "cmd_per_lun %u qsize %d seg_max %u max_target %hu"
175 " max_lun %u\n",
176 cmd_per_lun, qsize, seg_max, max_target, max_lun);
177
178 if (virtio_child_attach_finish(vsc) != 0)
179 goto err;
180
181 /*
182 * Fill in the scsipi_adapter.
183 */
184 memset(adapt, 0, sizeof(*adapt));
185 adapt->adapt_dev = sc->sc_dev;
186 adapt->adapt_nchannels = 1;
187 adapt->adapt_openings = MIN(qsize, cmd_per_lun);
188 adapt->adapt_max_periph = adapt->adapt_openings;
189 adapt->adapt_request = vioscsi_scsipi_request;
190 adapt->adapt_minphys = minphys;
191
192 /*
193 * Fill in the scsipi_channel.
194 */
195 memset(chan, 0, sizeof(*chan));
196 chan->chan_adapter = adapt;
197 chan->chan_bustype = &scsi_bustype;
198 chan->chan_channel = 0;
199 chan->chan_ntargets = MIN(max_target, 16); /* cap reasonably */
200 chan->chan_nluns = MIN(max_lun, 1024); /* cap reasonably */
201 chan->chan_id = 0;
202 chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
203
204 config_found(self, &sc->sc_channel, scsiprint);
205 return;
206
207 err:
208 if (qsize > 0)
209 vioscsi_free_reqs(sc, vsc);
210
211 for (i=0; i < __arraycount(sc->sc_vqs); i++) {
212 if (sc->sc_vqs[i].vq_num > 0)
213 virtio_free_vq(vsc, &sc->sc_vqs[i]);
214 }
215
216 virtio_child_attach_failed(vsc);
217 }
218
219 static int
220 vioscsi_detach(device_t self, int flags)
221 {
222 struct vioscsi_softc *sc = device_private(self);
223 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
224 int rc, i;
225
226 /*
227 * Dequeue all pending finished requests. Must be done
228 * before we try to detach children so that we process
229 * their pending requests while they still exist.
230 */
231 if (sc->sc_vqs[VIOSCSI_VQ_REQUEST].vq_num > 0)
232 vioscsi_vq_done(&sc->sc_vqs[VIOSCSI_VQ_REQUEST]);
233
234 if ((rc = config_detach_children(self, flags)) != 0)
235 return rc;
236
237 virtio_reset(vsc);
238
239 for (i = 0; i < __arraycount(sc->sc_vqs); i++) {
240 if (sc->sc_vqs[i].vq_num > 0)
241 virtio_free_vq(vsc, &sc->sc_vqs[i]);
242 }
243
244 vioscsi_free_reqs(sc, vsc);
245
246 virtio_child_detach(vsc);
247
248 mutex_destroy(&sc->sc_mutex);
249
250 return 0;
251 }
252
253 #define XS2DMA(xs) \
254 ((((xs)->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE) | \
255 (((xs)->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | \
256 BUS_DMA_STREAMING)
257
258 #define XS2DMAPRE(xs) (((xs)->xs_control & XS_CTL_DATA_IN) ? \
259 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)
260
261 #define XS2DMAPOST(xs) (((xs)->xs_control & XS_CTL_DATA_IN) ? \
262 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)
263
264 static void
265 vioscsi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t
266 request, void *arg)
267 {
268 struct vioscsi_softc *sc =
269 device_private(chan->chan_adapter->adapt_dev);
270 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
271 struct scsipi_xfer *xs;
272 struct scsipi_periph *periph;
273 struct vioscsi_req *vr;
274 struct virtio_scsi_req_hdr *req;
275 struct virtqueue *vq = &sc->sc_vqs[VIOSCSI_VQ_REQUEST];
276 int slot, error;
277
278 DPRINTF(("%s: enter\n", __func__));
279
280 switch (request) {
281 case ADAPTER_REQ_RUN_XFER:
282 break;
283 case ADAPTER_REQ_SET_XFER_MODE:
284 {
285 struct scsipi_xfer_mode *xm = arg;
286 xm->xm_mode = PERIPH_CAP_TQING;
287 xm->xm_period = 0;
288 xm->xm_offset = 0;
289 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
290 return;
291 }
292 default:
293 DPRINTF(("%s: unhandled %d\n", __func__, request));
294 return;
295 }
296
297 xs = arg;
298 periph = xs->xs_periph;
299
300 /*
301 * This can happen when we run out of queue slots.
302 */
303 vr = vioscsi_req_get(sc);
304 if (vr == NULL) {
305 xs->error = XS_RESOURCE_SHORTAGE;
306 scsipi_done(xs);
307 return;
308 }
309
310 req = &vr->vr_req;
311 slot = vr - sc->sc_reqs;
312
313 /*
314 * "The only supported format for the LUN field is: first byte set to
315 * 1, second byte set to target, third and fourth byte representing a
316 * single level LUN structure, followed by four zero bytes."
317 */
318 if (periph->periph_target >= 256 || periph->periph_lun >= 16384
319 || periph->periph_target < 0 || periph->periph_lun < 0) {
320 goto stuffup;
321 }
322
323 req->lun[0] = 1;
324 req->lun[1] = periph->periph_target - 1;
325 req->lun[2] = 0x40 | ((periph->periph_lun >> 8) & 0x3F);
326 req->lun[3] = periph->periph_lun & 0xFF;
327 memset(req->lun + 4, 0, 4);
328 DPRINTF(("%s: command %p for %d:%d at slot %d\n", __func__,
329 xs, periph->periph_target, periph->periph_lun, slot));
330
331 /* tag */
332 switch (XS_CTL_TAGTYPE(xs)) {
333 case XS_CTL_HEAD_TAG:
334 req->task_attr = VIRTIO_SCSI_S_HEAD;
335 break;
336
337 #if 0 /* XXX */
338 case XS_CTL_ACA_TAG:
339 req->task_attr = VIRTIO_SCSI_S_ACA;
340 break;
341 #endif
342
343 case XS_CTL_ORDERED_TAG:
344 req->task_attr = VIRTIO_SCSI_S_ORDERED;
345 break;
346
347 case XS_CTL_SIMPLE_TAG:
348 default:
349 req->task_attr = VIRTIO_SCSI_S_SIMPLE;
350 break;
351 }
352 req->id = slot;
353
354 if ((size_t)xs->cmdlen > sizeof(req->cdb)) {
355 DPRINTF(("%s: bad cmdlen %zu > %zu\n", __func__,
356 (size_t)xs->cmdlen, sizeof(req->cdb)));
357 goto stuffup;
358 }
359
360 memset(req->cdb, 0, sizeof(req->cdb));
361 memcpy(req->cdb, xs->cmd, xs->cmdlen);
362
363 error = bus_dmamap_load(virtio_dmat(vsc), vr->vr_data,
364 xs->data, xs->datalen, NULL, XS2DMA(xs));
365 switch (error) {
366 case 0:
367 break;
368 case ENOMEM:
369 case EAGAIN:
370 xs->error = XS_RESOURCE_SHORTAGE;
371 goto nomore;
372 default:
373 aprint_error_dev(sc->sc_dev, "error %d loading DMA map\n",
374 error);
375 stuffup:
376 xs->error = XS_DRIVER_STUFFUP;
377 nomore:
378 /* nothing else to free */
379 scsipi_done(xs);
380 return;
381 }
382
383 int nsegs = VIRTIO_SCSI_MIN_SEGMENTS;
384 if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0)
385 nsegs += vr->vr_data->dm_nsegs;
386
387 error = virtio_enqueue_reserve(vsc, vq, slot, nsegs);
388 if (error) {
389 DPRINTF(("%s: error reserving %d\n", __func__, error));
390 bus_dmamap_unload(virtio_dmat(vsc), vr->vr_data);
391 xs->error = XS_RESOURCE_SHORTAGE;
392 goto nomore;
393 }
394
395 vr->vr_xs = xs;
396
397 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control,
398 offsetof(struct vioscsi_req, vr_req),
399 sizeof(struct virtio_scsi_req_hdr),
400 BUS_DMASYNC_PREWRITE);
401 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control,
402 offsetof(struct vioscsi_req, vr_res),
403 sizeof(struct virtio_scsi_res_hdr),
404 BUS_DMASYNC_PREREAD);
405 if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0)
406 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_data, 0, xs->datalen,
407 XS2DMAPRE(xs));
408
409 virtio_enqueue_p(vsc, vq, slot, vr->vr_control,
410 offsetof(struct vioscsi_req, vr_req),
411 sizeof(struct virtio_scsi_req_hdr), 1);
412 if (xs->xs_control & XS_CTL_DATA_OUT)
413 virtio_enqueue(vsc, vq, slot, vr->vr_data, 1);
414 virtio_enqueue_p(vsc, vq, slot, vr->vr_control,
415 offsetof(struct vioscsi_req, vr_res),
416 sizeof(struct virtio_scsi_res_hdr), 0);
417 if (xs->xs_control & XS_CTL_DATA_IN)
418 virtio_enqueue(vsc, vq, slot, vr->vr_data, 0);
419 virtio_enqueue_commit(vsc, vq, slot, 1);
420
421 if ((xs->xs_control & XS_CTL_POLL) == 0)
422 return;
423
424 DPRINTF(("%s: polling...\n", __func__));
425 // XXX: do this better.
426 int timeout = 1000;
427 do {
428 virtio_intrhand(vsc);
429 if (vr->vr_xs != xs)
430 break;
431 delay(1000);
432 } while (--timeout > 0);
433
434 if (vr->vr_xs == xs) {
435 // XXX: Abort!
436 xs->error = XS_TIMEOUT;
437 xs->resid = xs->datalen;
438 DPRINTF(("%s: polling timeout\n", __func__));
439 scsipi_done(xs);
440 }
441 DPRINTF(("%s: command %p done (timeout=%d)\n", __func__,
442 xs, timeout));
443 }
444
445 static void
446 vioscsi_req_done(struct vioscsi_softc *sc, struct virtio_softc *vsc,
447 struct vioscsi_req *vr)
448 {
449 struct scsipi_xfer *xs = vr->vr_xs;
450 size_t sense_len;
451
452 DPRINTF(("%s: enter\n", __func__));
453
454 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control,
455 offsetof(struct vioscsi_req, vr_req),
456 sizeof(struct virtio_scsi_req_hdr),
457 BUS_DMASYNC_POSTWRITE);
458 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control,
459 offsetof(struct vioscsi_req, vr_res),
460 sizeof(struct virtio_scsi_res_hdr),
461 BUS_DMASYNC_POSTREAD);
462 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_data, 0, xs->datalen,
463 XS2DMAPOST(xs));
464
465 xs->status = vr->vr_res.status;
466 xs->resid = vr->vr_res.residual;
467
468 switch (vr->vr_res.response) {
469 case VIRTIO_SCSI_S_OK:
470 sense_len = MIN(sizeof(xs->sense), vr->vr_res.sense_len);
471 memcpy(&xs->sense, vr->vr_res.sense, sense_len);
472 xs->error = (sense_len == 0) ? XS_NOERROR : XS_SENSE;
473 break;
474 case VIRTIO_SCSI_S_BAD_TARGET:
475 vioscsi_bad_target(xs);
476 break;
477 default:
478 DPRINTF(("%s: stuffup: %d\n", __func__, vr->vr_res.response));
479 xs->error = XS_DRIVER_STUFFUP;
480 xs->resid = xs->datalen;
481 break;
482 }
483
484 DPRINTF(("%s: command %p done %d, %d, %d\n", __func__,
485 xs, xs->error, xs->status, xs->resid));
486
487 bus_dmamap_unload(virtio_dmat(vsc), vr->vr_data);
488 vr->vr_xs = NULL;
489
490 mutex_exit(&sc->sc_mutex);
491 scsipi_done(xs);
492 mutex_enter(&sc->sc_mutex);
493 }
494
495 static void
496 vioscsi_bad_target(struct scsipi_xfer *xs)
497 {
498 struct scsi_sense_data *sense = &xs->sense.scsi_sense;
499
500 DPRINTF(("%s: bad target %d:%d\n", __func__,
501 xs->xs_periph->periph_target, xs->xs_periph->periph_lun));
502
503 memset(sense, 0, sizeof(*sense));
504 sense->response_code = 0x70;
505 sense->flags = SKEY_ILLEGAL_REQUEST;
506 xs->error = XS_SENSE;
507 xs->status = 0;
508 xs->resid = 0;
509 }
510
511 static int
512 vioscsi_vq_done(struct virtqueue *vq)
513 {
514 struct virtio_softc *vsc = vq->vq_owner;
515 struct vioscsi_softc *sc = device_private(virtio_child(vsc));
516 int ret = 0;
517
518 DPRINTF(("%s: enter %d\n", __func__, vq->vq_index));
519
520 mutex_enter(&sc->sc_mutex);
521
522 for (;;) {
523 int r, slot;
524
525 r = virtio_dequeue(vsc, vq, &slot, NULL);
526 if (r != 0)
527 break;
528
529 DPRINTF(("%s: slot=%d\n", __func__, slot));
530
531 vioscsi_req_done(sc, vsc, &sc->sc_reqs[slot]);
532
533 virtio_dequeue_commit(vsc, vq, slot);
534
535 ret = 1;
536 }
537
538 mutex_exit(&sc->sc_mutex);
539
540 DPRINTF(("%s: exit %d: %d\n", __func__, vq->vq_index, ret));
541
542 return ret;
543 }
544
545 static struct vioscsi_req *
546 vioscsi_req_get(struct vioscsi_softc *sc)
547 {
548 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
549 struct virtqueue *vq = &sc->sc_vqs[VIOSCSI_VQ_REQUEST];
550 struct vioscsi_req *vr = NULL;
551 int r, slot;
552
553 mutex_enter(&sc->sc_mutex);
554
555 if ((r = virtio_enqueue_prep(vsc, vq, &slot)) != 0) {
556 DPRINTF(("%s: virtio_enqueue_get error %d\n", __func__, r));
557 goto out;
558 }
559 KASSERT(slot < sc->sc_nreqs);
560 vr = &sc->sc_reqs[slot];
561
562 DPRINTF(("%s: %p, %d\n", __func__, vr, slot));
563
564 out:
565 mutex_exit(&sc->sc_mutex);
566
567 return vr;
568 }
569
570 static int
571 vioscsi_alloc_reqs(struct vioscsi_softc *sc, struct virtio_softc *vsc,
572 int qsize)
573 {
574 size_t allocsize;
575 int r, rsegs, slot;
576 void *vaddr;
577 struct vioscsi_req *vr;
578
579 allocsize = qsize * sizeof(struct vioscsi_req);
580 r = bus_dmamem_alloc(virtio_dmat(vsc), allocsize, 0, 0,
581 &sc->sc_reqs_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
582 if (r != 0) {
583 aprint_error_dev(sc->sc_dev,
584 "%s: bus_dmamem_alloc, size %zu, error %d\n", __func__,
585 allocsize, r);
586 return r;
587 }
588 r = bus_dmamem_map(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1,
589 allocsize, &vaddr, BUS_DMA_NOWAIT);
590 if (r != 0) {
591 aprint_error_dev(sc->sc_dev,
592 "%s: bus_dmamem_map failed, error %d\n", __func__, r);
593 bus_dmamem_free(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1);
594 return r;
595 }
596 memset(vaddr, 0, allocsize);
597
598 sc->sc_reqs = vaddr;
599 sc->sc_nreqs = qsize;
600
601 /* Prepare maps for the requests */
602 for (slot=0; slot < qsize; slot++) {
603 vr = &sc->sc_reqs[slot];
604
605 r = bus_dmamap_create(virtio_dmat(vsc),
606 offsetof(struct vioscsi_req, vr_xs), 1,
607 offsetof(struct vioscsi_req, vr_xs), 0,
608 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_control);
609 if (r != 0) {
610 aprint_error_dev(sc->sc_dev,
611 "%s: bus_dmamem_create ctrl failed, error %d\n",
612 __func__, r);
613 goto cleanup;
614 }
615
616 r = bus_dmamap_create(virtio_dmat(vsc), MAXPHYS, sc->sc_seg_max,
617 MAXPHYS, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_data);
618 if (r != 0) {
619 aprint_error_dev(sc->sc_dev,
620 "%s: bus_dmamem_create data failed, error %d\n",
621 __func__, r);
622 goto cleanup;
623 }
624
625 r = bus_dmamap_load(virtio_dmat(vsc), vr->vr_control,
626 vr, offsetof(struct vioscsi_req, vr_xs), NULL,
627 BUS_DMA_NOWAIT);
628 if (r != 0) {
629 aprint_error_dev(sc->sc_dev,
630 "%s: bus_dmamap_load ctrl error %d\n",
631 __func__, r);
632 goto cleanup;
633 }
634 }
635
636 return 0;
637
638 cleanup:
639 for (; slot > 0; slot--) {
640 vr = &sc->sc_reqs[slot];
641
642 if (vr->vr_control) {
643 /* this will also unload the mapping if loaded */
644 bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_control);
645 vr->vr_control = NULL;
646 }
647
648 if (vr->vr_data) {
649 bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_data);
650 vr->vr_data = NULL;
651 }
652 }
653
654 bus_dmamem_unmap(virtio_dmat(vsc), vaddr, allocsize);
655 bus_dmamem_free(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1);
656
657 return r;
658 }
659
660 static void
661 vioscsi_free_reqs(struct vioscsi_softc *sc, struct virtio_softc *vsc)
662 {
663 int slot;
664 struct vioscsi_req *vr;
665
666 if (sc->sc_nreqs == 0) {
667 /* Not allocated */
668 return;
669 }
670
671 /* Free request maps */
672 for (slot=0; slot < sc->sc_nreqs; slot++) {
673 vr = &sc->sc_reqs[slot];
674
675 bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_control);
676 bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_data);
677 }
678
679 bus_dmamem_unmap(virtio_dmat(vsc), sc->sc_reqs,
680 sc->sc_nreqs * sizeof(struct vioscsi_req));
681 bus_dmamem_free(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1);
682 }
683
684 MODULE(MODULE_CLASS_DRIVER, vioscsi, "virtio");
685
686 #ifdef _MODULE
687 #include "ioconf.c"
688 #endif
689
690 static int
691 vioscsi_modcmd(modcmd_t cmd, void *opaque)
692 {
693 int error = 0;
694
695 #ifdef _MODULE
696 switch (cmd) {
697 case MODULE_CMD_INIT:
698 error = config_init_component(cfdriver_ioconf_vioscsi,
699 cfattach_ioconf_vioscsi, cfdata_ioconf_vioscsi);
700 break;
701 case MODULE_CMD_FINI:
702 error = config_fini_component(cfdriver_ioconf_vioscsi,
703 cfattach_ioconf_vioscsi, cfdata_ioconf_vioscsi);
704 break;
705 default:
706 error = ENOTTY;
707 break;
708 }
709 #endif
710
711 return error;
712 }
713