vioscsi.c revision 1.14 1 /* $NetBSD: vioscsi.c,v 1.14 2017/03/25 18:13:53 jdolecek Exp $ */
2 /* $OpenBSD: vioscsi.c,v 1.3 2015/03/14 03:38:49 jsg Exp $ */
3
4 /*
5 * Copyright (c) 2013 Google Inc.
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include <sys/cdefs.h>
21 __KERNEL_RCSID(0, "$NetBSD: vioscsi.c,v 1.14 2017/03/25 18:13:53 jdolecek Exp $");
22
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/device.h>
26 #include <sys/bus.h>
27 #include <sys/buf.h>
28
29 #include <dev/pci/pcidevs.h>
30 #include <dev/pci/pcireg.h>
31 #include <dev/pci/pcivar.h>
32
33 #include <dev/pci/vioscsireg.h>
34 #include <dev/pci/virtiovar.h>
35
36 #include <dev/scsipi/scsi_all.h>
37 #include <dev/scsipi/scsiconf.h>
38
39 #ifdef VIOSCSI_DEBUG
40 static int vioscsi_debug = 1;
41 #define DPRINTF(f) do { if (vioscsi_debug) printf f; } while (/*CONSTCOND*/0)
42 #else
43 #define DPRINTF(f) ((void)0)
44 #endif
45
46 struct vioscsi_req {
47 struct virtio_scsi_req_hdr vr_req;
48 struct virtio_scsi_res_hdr vr_res;
49 struct scsipi_xfer *vr_xs;
50 bus_dmamap_t vr_control;
51 bus_dmamap_t vr_data;
52 };
53
54 struct vioscsi_softc {
55 device_t sc_dev;
56 struct scsipi_adapter sc_adapter;
57 struct scsipi_channel sc_channel;
58
59 struct virtqueue sc_vqs[3];
60 #define VIOSCSI_VQ_CONTROL 0
61 #define VIOSCSI_VQ_EVENT 1
62 #define VIOSCSI_VQ_REQUEST 2
63
64 struct vioscsi_req *sc_reqs;
65 int sc_nreqs;
66 bus_dma_segment_t sc_reqs_segs[1];
67
68 u_int32_t sc_seg_max;
69 };
70
71 /*
72 * Each block request uses at least two segments - one for the header
73 * and one for the status.
74 */
75 #define VIRTIO_SCSI_MIN_SEGMENTS 2
76
77 static int vioscsi_match(device_t, cfdata_t, void *);
78 static void vioscsi_attach(device_t, device_t, void *);
79
80 static int vioscsi_alloc_reqs(struct vioscsi_softc *,
81 struct virtio_softc *, int, uint32_t);
82 static void vioscsi_free_reqs(struct vioscsi_softc *,
83 struct virtio_softc *);
84 static void vioscsi_scsipi_request(struct scsipi_channel *,
85 scsipi_adapter_req_t, void *);
86 static int vioscsi_vq_done(struct virtqueue *);
87 static void vioscsi_req_done(struct vioscsi_softc *, struct virtio_softc *,
88 struct vioscsi_req *);
89 static struct vioscsi_req *vioscsi_req_get(struct vioscsi_softc *);
90
91 static const char *const vioscsi_vq_names[] = {
92 "control",
93 "event",
94 "request",
95 };
96
97 CFATTACH_DECL_NEW(vioscsi, sizeof(struct vioscsi_softc),
98 vioscsi_match, vioscsi_attach, NULL, NULL);
99
100 static int
101 vioscsi_match(device_t parent, cfdata_t match, void *aux)
102 {
103 struct virtio_attach_args *va = aux;
104
105 if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_SCSI)
106 return 1;
107
108 return 0;
109 }
110
111 static void
112 vioscsi_attach(device_t parent, device_t self, void *aux)
113 {
114 struct vioscsi_softc *sc = device_private(self);
115 struct virtio_softc *vsc = device_private(parent);
116 struct scsipi_adapter *adapt = &sc->sc_adapter;
117 struct scsipi_channel *chan = &sc->sc_channel;
118 int rv, qsize = 0, i = 0;
119 int ipl = IPL_BIO;
120
121 if (virtio_child(vsc) != NULL) {
122 aprint_error(": parent %s already has a child\n",
123 device_xname(parent));
124 return;
125 }
126
127 sc->sc_dev = self;
128
129 virtio_child_attach_start(vsc, self, ipl, sc->sc_vqs,
130 NULL, virtio_vq_intr, VIRTIO_F_PCI_INTR_MSIX,
131 0, VIRTIO_COMMON_FLAG_BITS);
132
133 uint32_t cmd_per_lun = virtio_read_device_config_4(vsc,
134 VIRTIO_SCSI_CONFIG_CMD_PER_LUN);
135
136 uint32_t seg_max = virtio_read_device_config_4(vsc,
137 VIRTIO_SCSI_CONFIG_SEG_MAX);
138
139 uint16_t max_target = virtio_read_device_config_2(vsc,
140 VIRTIO_SCSI_CONFIG_MAX_TARGET);
141
142 uint32_t max_lun = virtio_read_device_config_4(vsc,
143 VIRTIO_SCSI_CONFIG_MAX_LUN);
144
145 sc->sc_seg_max = seg_max;
146
147 for (i = 0; i < __arraycount(sc->sc_vqs); i++) {
148 rv = virtio_alloc_vq(vsc, &sc->sc_vqs[i], i, MAXPHYS,
149 1 + howmany(MAXPHYS, NBPG), vioscsi_vq_names[i]);
150 if (rv) {
151 aprint_error_dev(sc->sc_dev,
152 "failed to allocate virtqueue %zu\n", i);
153 return;
154 }
155
156 if (i == VIOSCSI_VQ_REQUEST)
157 sc->sc_vqs[i].vq_done = vioscsi_vq_done;
158 }
159
160 qsize = sc->sc_vqs[VIOSCSI_VQ_REQUEST].vq_num;
161 if (vioscsi_alloc_reqs(sc, vsc, qsize, seg_max))
162 return;
163
164 aprint_normal_dev(sc->sc_dev,
165 "cmd_per_lun %zu qsize %zu seg_max %zu max_target %zu"
166 " max_lun %zu\n",
167 cmd_per_lun, qsize, seg_max, max_target, max_lun);
168
169 if (virtio_child_attach_finish(vsc) != 0)
170 goto err;
171
172 /*
173 * Fill in the scsipi_adapter.
174 */
175 memset(adapt, 0, sizeof(*adapt));
176 adapt->adapt_dev = sc->sc_dev;
177 adapt->adapt_nchannels = 1;
178 adapt->adapt_openings = MIN(qsize, cmd_per_lun);
179 adapt->adapt_max_periph = adapt->adapt_openings;
180 adapt->adapt_request = vioscsi_scsipi_request;
181 adapt->adapt_minphys = minphys;
182
183 /*
184 * Fill in the scsipi_channel.
185 */
186 memset(chan, 0, sizeof(*chan));
187 chan->chan_adapter = adapt;
188 chan->chan_bustype = &scsi_bustype;
189 chan->chan_channel = 0;
190 chan->chan_ntargets = MIN(max_target, 16); /* cap reasonably */
191 chan->chan_nluns = MIN(max_lun, 16); /* cap reasonably */
192 chan->chan_id = 0;
193 chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
194
195 config_found(self, &sc->sc_channel, scsiprint);
196 return;
197
198 err:
199 if (qsize > 0)
200 vioscsi_free_reqs(sc, vsc);
201
202 for (i=0; i < __arraycount(sc->sc_vqs); i++) {
203 if (sc->sc_vqs[i].vq_num > 0)
204 virtio_free_vq(vsc, &sc->sc_vqs[i]);
205 }
206
207 virtio_child_attach_failed(vsc);
208
209 }
210
211 #define XS2DMA(xs) \
212 ((((xs)->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE) | \
213 (((xs)->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | \
214 BUS_DMA_STREAMING)
215
216 #define XS2DMAPRE(xs) (((xs)->xs_control & XS_CTL_DATA_IN) ? \
217 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)
218
219 #define XS2DMAPOST(xs) (((xs)->xs_control & XS_CTL_DATA_IN) ? \
220 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)
221
222 static void
223 vioscsi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t
224 request, void *arg)
225 {
226 struct vioscsi_softc *sc =
227 device_private(chan->chan_adapter->adapt_dev);
228 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
229 struct scsipi_xfer *xs;
230 struct scsipi_periph *periph;
231 struct vioscsi_req *vr;
232 struct virtio_scsi_req_hdr *req;
233 struct virtqueue *vq = &sc->sc_vqs[VIOSCSI_VQ_REQUEST];
234 int slot, error;
235
236 DPRINTF(("%s: enter\n", __func__));
237
238 switch (request) {
239 case ADAPTER_REQ_RUN_XFER:
240 break;
241 case ADAPTER_REQ_SET_XFER_MODE:
242 {
243 struct scsipi_xfer_mode *xm = arg;
244 xm->xm_mode = PERIPH_CAP_TQING;
245 xm->xm_period = 0;
246 xm->xm_offset = 0;
247 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
248 return;
249 }
250 default:
251 DPRINTF(("%s: unhandled %d\n", __func__, request));
252 return;
253 }
254
255 xs = arg;
256 periph = xs->xs_periph;
257
258 /*
259 * This can happen when we run out of queue slots.
260 */
261 vr = vioscsi_req_get(sc);
262 if (vr == NULL) {
263 xs->error = XS_RESOURCE_SHORTAGE;
264 scsipi_done(xs);
265 return;
266 }
267
268 req = &vr->vr_req;
269 slot = vr - sc->sc_reqs;
270
271 vr->vr_xs = xs;
272
273 /*
274 * "The only supported format for the LUN field is: first byte set to
275 * 1, second byte set to target, third and fourth byte representing a
276 * single level LUN structure, followed by four zero bytes."
277 */
278 if (periph->periph_target >= 256 || periph->periph_lun >= 16384) {
279 DPRINTF(("%s: bad target %u or lun %u\n", __func__,
280 periph->periph_target, periph->periph_lun));
281 goto stuffup;
282 }
283 req->lun[0] = 1;
284 req->lun[1] = periph->periph_target - 1;
285 req->lun[2] = 0x40 | ((periph->periph_lun >> 8) & 0x3F);
286 req->lun[3] = periph->periph_lun & 0xFF;
287 memset(req->lun + 4, 0, 4);
288 DPRINTF(("%s: command for %u:%u at slot %d\n", __func__,
289 periph->periph_target - 1, periph->periph_lun, slot));
290
291 /* tag */
292 switch (XS_CTL_TAGTYPE(xs)) {
293 case XS_CTL_HEAD_TAG:
294 req->task_attr = VIRTIO_SCSI_S_HEAD;
295 break;
296
297 #if 0 /* XXX */
298 case XS_CTL_ACA_TAG:
299 req->task_attr = VIRTIO_SCSI_S_ACA;
300 break;
301 #endif
302
303 case XS_CTL_ORDERED_TAG:
304 req->task_attr = VIRTIO_SCSI_S_ORDERED;
305 break;
306
307 case XS_CTL_SIMPLE_TAG:
308 default:
309 req->task_attr = VIRTIO_SCSI_S_SIMPLE;
310 break;
311 }
312 req->id = slot;
313
314 if ((size_t)xs->cmdlen > sizeof(req->cdb)) {
315 DPRINTF(("%s: bad cmdlen %zu > %zu\n", __func__,
316 (size_t)xs->cmdlen, sizeof(req->cdb)));
317 goto stuffup;
318 }
319
320 memset(req->cdb, 0, sizeof(req->cdb));
321 memcpy(req->cdb, xs->cmd, xs->cmdlen);
322
323 error = bus_dmamap_load(virtio_dmat(vsc), vr->vr_data,
324 xs->data, xs->datalen, NULL, XS2DMA(xs));
325 switch (error) {
326 case 0:
327 break;
328 case ENOMEM:
329 case EAGAIN:
330 xs->error = XS_RESOURCE_SHORTAGE;
331 goto nomore;
332 default:
333 aprint_error_dev(sc->sc_dev, "error %d loading DMA map\n",
334 error);
335 stuffup:
336 xs->error = XS_DRIVER_STUFFUP;
337 nomore:
338 /* nothing else to free */
339 scsipi_done(xs);
340 return;
341 }
342
343 int nsegs = VIRTIO_SCSI_MIN_SEGMENTS;
344 if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0)
345 nsegs += vr->vr_data->dm_nsegs;
346
347 error = virtio_enqueue_reserve(vsc, vq, slot, nsegs);
348 if (error) {
349 DPRINTF(("%s: error reserving %d\n", __func__, error));
350 bus_dmamap_unload(virtio_dmat(vsc), vr->vr_data);
351 xs->error = XS_RESOURCE_SHORTAGE;
352 goto nomore;
353 }
354
355 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control,
356 offsetof(struct vioscsi_req, vr_req),
357 sizeof(struct virtio_scsi_req_hdr),
358 BUS_DMASYNC_PREWRITE);
359 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control,
360 offsetof(struct vioscsi_req, vr_res),
361 sizeof(struct virtio_scsi_res_hdr),
362 BUS_DMASYNC_PREREAD);
363 if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0)
364 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_data, 0, xs->datalen,
365 XS2DMAPRE(xs));
366
367 virtio_enqueue_p(vsc, vq, slot, vr->vr_control,
368 offsetof(struct vioscsi_req, vr_req),
369 sizeof(struct virtio_scsi_req_hdr), 1);
370 if (xs->xs_control & XS_CTL_DATA_OUT)
371 virtio_enqueue(vsc, vq, slot, vr->vr_data, 1);
372 virtio_enqueue_p(vsc, vq, slot, vr->vr_control,
373 offsetof(struct vioscsi_req, vr_res),
374 sizeof(struct virtio_scsi_res_hdr), 0);
375 if (xs->xs_control & XS_CTL_DATA_IN)
376 virtio_enqueue(vsc, vq, slot, vr->vr_data, 0);
377 virtio_enqueue_commit(vsc, vq, slot, 1);
378
379 if ((xs->xs_control & XS_CTL_POLL) == 0)
380 return;
381
382 DPRINTF(("%s: polling...\n", __func__));
383 // XXX: do this better.
384 int timeout = 1000;
385 do {
386 virtio_intrhand(vsc);
387 if (vr->vr_xs != xs)
388 break;
389 delay(1000);
390 } while (--timeout > 0);
391
392 if (vr->vr_xs == xs) {
393 // XXX: Abort!
394 xs->error = XS_TIMEOUT;
395 xs->resid = xs->datalen;
396 DPRINTF(("%s: polling timeout\n", __func__));
397 scsipi_done(xs);
398 }
399 DPRINTF(("%s: done (timeout=%d)\n", __func__, timeout));
400 }
401
402 static void
403 vioscsi_req_done(struct vioscsi_softc *sc, struct virtio_softc *vsc,
404 struct vioscsi_req *vr)
405 {
406 struct scsipi_xfer *xs = vr->vr_xs;
407 struct scsi_sense_data *sense = &xs->sense.scsi_sense;
408 size_t sense_len;
409
410 DPRINTF(("%s: enter\n", __func__));
411
412 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control,
413 offsetof(struct vioscsi_req, vr_req),
414 sizeof(struct virtio_scsi_req_hdr),
415 BUS_DMASYNC_POSTWRITE);
416 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control,
417 offsetof(struct vioscsi_req, vr_res),
418 sizeof(struct virtio_scsi_res_hdr),
419 BUS_DMASYNC_POSTREAD);
420 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_data, 0, xs->datalen,
421 XS2DMAPOST(xs));
422
423 xs->status = vr->vr_res.status;
424 xs->resid = vr->vr_res.residual;
425
426 switch (vr->vr_res.response) {
427 case VIRTIO_SCSI_S_OK:
428 sense_len = MIN(sizeof(xs->sense), vr->vr_res.sense_len);
429 memcpy(&xs->sense, vr->vr_res.sense, sense_len);
430 xs->error = (sense_len == 0) ? XS_NOERROR : XS_SENSE;
431 break;
432 case VIRTIO_SCSI_S_BAD_TARGET:
433 DPRINTF(("%s: bad target\n", __func__));
434 memset(sense, 0, sizeof(*sense));
435 sense->response_code = 0x70;
436 sense->flags = SKEY_ILLEGAL_REQUEST;
437 xs->error = XS_SENSE;
438 xs->status = 0;
439 xs->resid = 0;
440 break;
441 default:
442 DPRINTF(("%s: stuffup: %d\n", __func__, vr->vr_res.response));
443 xs->error = XS_DRIVER_STUFFUP;
444 xs->resid = xs->datalen;
445 break;
446 }
447
448 DPRINTF(("%s: done %d, %d, %d\n", __func__,
449 xs->error, xs->status, xs->resid));
450
451 bus_dmamap_unload(virtio_dmat(vsc), vr->vr_data);
452 vr->vr_xs = NULL;
453
454 scsipi_done(xs);
455 }
456
457 static int
458 vioscsi_vq_done(struct virtqueue *vq)
459 {
460 struct virtio_softc *vsc = vq->vq_owner;
461 struct vioscsi_softc *sc = device_private(virtio_child(vsc));
462 int ret = 0;
463
464 DPRINTF(("%s: enter\n", __func__));
465
466 for (;;) {
467 int r, slot;
468 r = virtio_dequeue(vsc, vq, &slot, NULL);
469 if (r != 0)
470 break;
471
472 DPRINTF(("%s: slot=%d\n", __func__, slot));
473
474 vioscsi_req_done(sc, vsc, &sc->sc_reqs[slot]);
475
476 virtio_dequeue_commit(vsc, vq, slot);
477
478 ret = 1;
479 }
480
481 DPRINTF(("%s: exit %d\n", __func__, ret));
482
483 return ret;
484 }
485
486 static struct vioscsi_req *
487 vioscsi_req_get(struct vioscsi_softc *sc)
488 {
489 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
490 struct virtqueue *vq = &sc->sc_vqs[VIOSCSI_VQ_REQUEST];
491 struct vioscsi_req *vr;
492 int r, slot;
493
494 if ((r = virtio_enqueue_prep(vsc, vq, &slot)) != 0) {
495 DPRINTF(("%s: virtio_enqueue_get error %d\n", __func__, r));
496 return NULL;
497 }
498 KASSERT(slot < sc->sc_nreqs);
499 vr = &sc->sc_reqs[slot];
500
501 DPRINTF(("%s: %p, %d\n", __func__, vr, slot));
502
503 return vr;
504 }
505
506 int
507 vioscsi_alloc_reqs(struct vioscsi_softc *sc, struct virtio_softc *vsc,
508 int qsize, uint32_t seg_max)
509 {
510 size_t allocsize;
511 int r, rsegs, slot;
512 void *vaddr;
513 struct vioscsi_req *vr;
514
515 allocsize = qsize * sizeof(struct vioscsi_req);
516 r = bus_dmamem_alloc(virtio_dmat(vsc), allocsize, 0, 0,
517 &sc->sc_reqs_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
518 if (r != 0) {
519 aprint_error_dev(sc->sc_dev,
520 "%s: bus_dmamem_alloc, size %zu, error %d\n", __func__,
521 allocsize, r);
522 return r;
523 }
524 r = bus_dmamem_map(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1,
525 allocsize, &vaddr, BUS_DMA_NOWAIT);
526 if (r != 0) {
527 aprint_error_dev(sc->sc_dev,
528 "%s: bus_dmamem_map failed, error %d\n", __func__, r);
529 bus_dmamem_free(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1);
530 return r;
531 }
532 memset(vaddr, 0, allocsize);
533
534 sc->sc_reqs = vaddr;
535 sc->sc_nreqs = qsize;
536
537 /* Prepare maps for the requests */
538 for (slot=0; slot < qsize; slot++) {
539 vr = &sc->sc_reqs[slot];
540
541 r = bus_dmamap_create(virtio_dmat(vsc),
542 offsetof(struct vioscsi_req, vr_xs), 1,
543 offsetof(struct vioscsi_req, vr_xs), 0,
544 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_control);
545 if (r != 0) {
546 aprint_error_dev(sc->sc_dev,
547 "%s: bus_dmamem_create failed, error %d\n",
548 __func__, r);
549 goto cleanup;
550 }
551
552 r = bus_dmamap_create(virtio_dmat(vsc), MAXPHYS, sc->sc_seg_max,
553 MAXPHYS, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_data);
554 if (r != 0) {
555 aprint_error_dev(sc->sc_dev,
556 "%s: bus_dmamem_map failed, error %d\n",
557 __func__, r);
558 goto cleanup;
559 }
560
561 r = bus_dmamap_load(virtio_dmat(vsc), vr->vr_control,
562 vr, offsetof(struct vioscsi_req, vr_xs), NULL,
563 BUS_DMA_NOWAIT);
564 if (r != 0) {
565 aprint_error_dev(sc->sc_dev,
566 "%s: bus_dmamap_create ctrl error %d\n",
567 __func__, r);
568 goto cleanup;
569 }
570 }
571
572 return 0;
573
574 cleanup:
575 for (; slot > 0; slot--) {
576 vr = &sc->sc_reqs[slot];
577
578 if (vr->vr_control) {
579 /* this will also unload the mapping if loaded */
580 bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_control);
581 vr->vr_control = NULL;
582 }
583
584 if (vr->vr_data) {
585 bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_data);
586 vr->vr_data = NULL;
587 }
588 }
589
590 bus_dmamem_unmap(virtio_dmat(vsc), vaddr, allocsize);
591 bus_dmamem_free(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1);
592
593 return r;
594 }
595
596 static void
597 vioscsi_free_reqs(struct vioscsi_softc *sc, struct virtio_softc *vsc)
598 {
599 int slot;
600 struct vioscsi_req *vr;
601
602 if (sc->sc_nreqs == 0) {
603 /* Not allocated */
604 return;
605 }
606
607 /* Free request maps */
608 for (slot=0; slot < sc->sc_nreqs; slot++) {
609 vr = &sc->sc_reqs[slot];
610
611 bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_control);
612 bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_data);
613 }
614
615 bus_dmamem_unmap(virtio_dmat(vsc), sc->sc_reqs,
616 sc->sc_nreqs * sizeof(struct vioscsi_req));
617 bus_dmamem_free(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1);
618 }
619
620