vioscsi.c revision 1.13 1 /* $NetBSD: vioscsi.c,v 1.13 2017/03/25 18:09:44 jdolecek Exp $ */
2 /* $OpenBSD: vioscsi.c,v 1.3 2015/03/14 03:38:49 jsg Exp $ */
3
4 /*
5 * Copyright (c) 2013 Google Inc.
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include <sys/cdefs.h>
21 __KERNEL_RCSID(0, "$NetBSD: vioscsi.c,v 1.13 2017/03/25 18:09:44 jdolecek Exp $");
22
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/device.h>
26 #include <sys/bus.h>
27 #include <sys/buf.h>
28
29 #include <dev/pci/pcidevs.h>
30 #include <dev/pci/pcireg.h>
31 #include <dev/pci/pcivar.h>
32
33 #include <dev/pci/vioscsireg.h>
34 #include <dev/pci/virtiovar.h>
35
36 #include <dev/scsipi/scsi_all.h>
37 #include <dev/scsipi/scsiconf.h>
38
39 #ifdef VIOSCSI_DEBUG
40 static int vioscsi_debug = 1;
41 #define DPRINTF(f) do { if (vioscsi_debug) printf f; } while (/*CONSTCOND*/0)
42 #else
43 #define DPRINTF(f) ((void)0)
44 #endif
45
46 struct vioscsi_req {
47 struct virtio_scsi_req_hdr vr_req;
48 struct virtio_scsi_res_hdr vr_res;
49 struct scsipi_xfer *vr_xs;
50 bus_dmamap_t vr_control;
51 bus_dmamap_t vr_data;
52 };
53
54 struct vioscsi_softc {
55 device_t sc_dev;
56 struct scsipi_adapter sc_adapter;
57 struct scsipi_channel sc_channel;
58
59 struct virtqueue sc_vqs[3];
60 #define VIOSCSI_VQ_CONTROL 0
61 #define VIOSCSI_VQ_EVENT 1
62 #define VIOSCSI_VQ_REQUEST 2
63
64 struct vioscsi_req *sc_reqs;
65 int sc_nreqs;
66 bus_dma_segment_t sc_reqs_segs[1];
67
68 u_int32_t sc_seg_max;
69 };
70
71 /*
72 * Each block request uses at least two segments - one for the header
73 * and one for the status.
74 */
75 #define VIRTIO_SCSI_MIN_SEGMENTS 2
76
77 static int vioscsi_match(device_t, cfdata_t, void *);
78 static void vioscsi_attach(device_t, device_t, void *);
79
80 static int vioscsi_alloc_reqs(struct vioscsi_softc *,
81 struct virtio_softc *, int, uint32_t);
82 static void vioscsi_free_reqs(struct vioscsi_softc *,
83 struct virtio_softc *);
84 static void vioscsi_scsipi_request(struct scsipi_channel *,
85 scsipi_adapter_req_t, void *);
86 static int vioscsi_vq_done(struct virtqueue *);
87 static void vioscsi_req_done(struct vioscsi_softc *, struct virtio_softc *,
88 struct vioscsi_req *);
89 static struct vioscsi_req *vioscsi_req_get(struct vioscsi_softc *);
90
91 static const char *const vioscsi_vq_names[] = {
92 "control",
93 "event",
94 "request",
95 };
96
97 CFATTACH_DECL_NEW(vioscsi, sizeof(struct vioscsi_softc),
98 vioscsi_match, vioscsi_attach, NULL, NULL);
99
100 static int
101 vioscsi_match(device_t parent, cfdata_t match, void *aux)
102 {
103 struct virtio_attach_args *va = aux;
104
105 if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_SCSI)
106 return 1;
107
108 return 0;
109 }
110
111 static void
112 vioscsi_attach(device_t parent, device_t self, void *aux)
113 {
114 struct vioscsi_softc *sc = device_private(self);
115 struct virtio_softc *vsc = device_private(parent);
116 struct scsipi_adapter *adapt = &sc->sc_adapter;
117 struct scsipi_channel *chan = &sc->sc_channel;
118 int rv, qsize = 0, i = 0;
119 int ipl = IPL_BIO;
120
121 if (virtio_child(vsc) != NULL) {
122 aprint_error(": parent %s already has a child\n",
123 device_xname(parent));
124 return;
125 }
126
127 sc->sc_dev = self;
128
129 virtio_child_attach_start(vsc, self, ipl, sc->sc_vqs,
130 NULL, virtio_vq_intr, VIRTIO_F_PCI_INTR_MSIX,
131 0, VIRTIO_COMMON_FLAG_BITS);
132
133 uint32_t cmd_per_lun = virtio_read_device_config_4(vsc,
134 VIRTIO_SCSI_CONFIG_CMD_PER_LUN);
135
136 uint32_t seg_max = virtio_read_device_config_4(vsc,
137 VIRTIO_SCSI_CONFIG_SEG_MAX);
138
139 uint16_t max_target = virtio_read_device_config_2(vsc,
140 VIRTIO_SCSI_CONFIG_MAX_TARGET);
141
142 uint16_t max_channel = virtio_read_device_config_2(vsc,
143 VIRTIO_SCSI_CONFIG_MAX_CHANNEL);
144
145 uint32_t max_lun = virtio_read_device_config_4(vsc,
146 VIRTIO_SCSI_CONFIG_MAX_LUN);
147
148 sc->sc_seg_max = seg_max;
149
150 for (i = 0; i < __arraycount(sc->sc_vqs); i++) {
151 rv = virtio_alloc_vq(vsc, &sc->sc_vqs[i], i, MAXPHYS,
152 1 + howmany(MAXPHYS, NBPG), vioscsi_vq_names[i]);
153 if (rv) {
154 aprint_error_dev(sc->sc_dev,
155 "failed to allocate virtqueue %zu\n", i);
156 return;
157 }
158
159 if (i == VIOSCSI_VQ_REQUEST)
160 sc->sc_vqs[i].vq_done = vioscsi_vq_done;
161 }
162
163 qsize = sc->sc_vqs[VIOSCSI_VQ_REQUEST].vq_num;
164 if (vioscsi_alloc_reqs(sc, vsc, qsize, seg_max))
165 return;
166
167 aprint_normal_dev(sc->sc_dev,
168 "cmd_per_lun %zu qsize %zu seg_max %zu max_target %zu"
169 " max_lun %zu\n",
170 cmd_per_lun, qsize, seg_max, max_target, max_lun);
171
172 if (virtio_child_attach_finish(vsc) != 0)
173 goto err;
174
175 /*
176 * Fill in the scsipi_adapter.
177 */
178 memset(adapt, 0, sizeof(*adapt));
179 adapt->adapt_dev = sc->sc_dev;
180 adapt->adapt_nchannels = max_channel;
181 adapt->adapt_openings = MIN(qsize, cmd_per_lun);
182 adapt->adapt_max_periph = adapt->adapt_openings;
183 adapt->adapt_request = vioscsi_scsipi_request;
184 adapt->adapt_minphys = minphys;
185
186 /*
187 * Fill in the scsipi_channel.
188 */
189 memset(chan, 0, sizeof(*chan));
190 chan->chan_adapter = adapt;
191 chan->chan_bustype = &scsi_bustype;
192 chan->chan_channel = 0;
193 chan->chan_ntargets = MIN(max_target, 16); /* cap reasonably */
194 chan->chan_nluns = MIN(max_lun, 16); /* cap reasonably */
195 chan->chan_id = 0;
196 chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
197
198 config_found(self, &sc->sc_channel, scsiprint);
199 return;
200
201 err:
202 if (qsize > 0)
203 vioscsi_free_reqs(sc, vsc);
204
205 for (i=0; i < __arraycount(sc->sc_vqs); i++) {
206 if (sc->sc_vqs[i].vq_num > 0)
207 virtio_free_vq(vsc, &sc->sc_vqs[i]);
208 }
209
210 virtio_child_attach_failed(vsc);
211
212 }
213
214 #define XS2DMA(xs) \
215 ((((xs)->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE) | \
216 (((xs)->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | \
217 BUS_DMA_STREAMING)
218
219 #define XS2DMAPRE(xs) (((xs)->xs_control & XS_CTL_DATA_IN) ? \
220 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)
221
222 #define XS2DMAPOST(xs) (((xs)->xs_control & XS_CTL_DATA_IN) ? \
223 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)
224
225 static void
226 vioscsi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t
227 request, void *arg)
228 {
229 struct vioscsi_softc *sc =
230 device_private(chan->chan_adapter->adapt_dev);
231 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
232 struct scsipi_xfer *xs;
233 struct scsipi_periph *periph;
234 struct vioscsi_req *vr;
235 struct virtio_scsi_req_hdr *req;
236 struct virtqueue *vq = &sc->sc_vqs[VIOSCSI_VQ_REQUEST];
237 int slot, error;
238
239 DPRINTF(("%s: enter\n", __func__));
240
241 switch (request) {
242 case ADAPTER_REQ_RUN_XFER:
243 break;
244 case ADAPTER_REQ_SET_XFER_MODE:
245 {
246 struct scsipi_xfer_mode *xm = arg;
247 xm->xm_mode = PERIPH_CAP_TQING;
248 xm->xm_period = 0;
249 xm->xm_offset = 0;
250 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
251 return;
252 }
253 default:
254 DPRINTF(("%s: unhandled %d\n", __func__, request));
255 return;
256 }
257
258 xs = arg;
259 periph = xs->xs_periph;
260
261 /*
262 * This can happen when we run out of queue slots.
263 */
264 vr = vioscsi_req_get(sc);
265 if (vr == NULL) {
266 xs->error = XS_RESOURCE_SHORTAGE;
267 scsipi_done(xs);
268 return;
269 }
270
271 req = &vr->vr_req;
272 slot = vr - sc->sc_reqs;
273
274 vr->vr_xs = xs;
275
276 /*
277 * "The only supported format for the LUN field is: first byte set to
278 * 1, second byte set to target, third and fourth byte representing a
279 * single level LUN structure, followed by four zero bytes."
280 */
281 if (periph->periph_target >= 256 || periph->periph_lun >= 16384) {
282 DPRINTF(("%s: bad target %u or lun %u\n", __func__,
283 periph->periph_target, periph->periph_lun));
284 goto stuffup;
285 }
286 req->lun[0] = 1;
287 req->lun[1] = periph->periph_target - 1;
288 req->lun[2] = 0x40 | ((periph->periph_lun >> 8) & 0x3F);
289 req->lun[3] = periph->periph_lun & 0xFF;
290 memset(req->lun + 4, 0, 4);
291 DPRINTF(("%s: command for %u:%u at slot %d\n", __func__,
292 periph->periph_target - 1, periph->periph_lun, slot));
293
294 /* tag */
295 switch (XS_CTL_TAGTYPE(xs)) {
296 case XS_CTL_HEAD_TAG:
297 req->task_attr = VIRTIO_SCSI_S_HEAD;
298 break;
299
300 #if 0 /* XXX */
301 case XS_CTL_ACA_TAG:
302 req->task_attr = VIRTIO_SCSI_S_ACA;
303 break;
304 #endif
305
306 case XS_CTL_ORDERED_TAG:
307 req->task_attr = VIRTIO_SCSI_S_ORDERED;
308 break;
309
310 case XS_CTL_SIMPLE_TAG:
311 default:
312 req->task_attr = VIRTIO_SCSI_S_SIMPLE;
313 break;
314 }
315 req->id = slot;
316
317 if ((size_t)xs->cmdlen > sizeof(req->cdb)) {
318 DPRINTF(("%s: bad cmdlen %zu > %zu\n", __func__,
319 (size_t)xs->cmdlen, sizeof(req->cdb)));
320 goto stuffup;
321 }
322
323 memset(req->cdb, 0, sizeof(req->cdb));
324 memcpy(req->cdb, xs->cmd, xs->cmdlen);
325
326 error = bus_dmamap_load(virtio_dmat(vsc), vr->vr_data,
327 xs->data, xs->datalen, NULL, XS2DMA(xs));
328 switch (error) {
329 case 0:
330 break;
331 case ENOMEM:
332 case EAGAIN:
333 xs->error = XS_RESOURCE_SHORTAGE;
334 goto nomore;
335 default:
336 aprint_error_dev(sc->sc_dev, "error %d loading DMA map\n",
337 error);
338 stuffup:
339 xs->error = XS_DRIVER_STUFFUP;
340 nomore:
341 /* nothing else to free */
342 scsipi_done(xs);
343 return;
344 }
345
346 int nsegs = VIRTIO_SCSI_MIN_SEGMENTS;
347 if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0)
348 nsegs += vr->vr_data->dm_nsegs;
349
350 error = virtio_enqueue_reserve(vsc, vq, slot, nsegs);
351 if (error) {
352 DPRINTF(("%s: error reserving %d\n", __func__, error));
353 bus_dmamap_unload(virtio_dmat(vsc), vr->vr_data);
354 xs->error = XS_RESOURCE_SHORTAGE;
355 goto nomore;
356 }
357
358 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control,
359 offsetof(struct vioscsi_req, vr_req),
360 sizeof(struct virtio_scsi_req_hdr),
361 BUS_DMASYNC_PREWRITE);
362 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control,
363 offsetof(struct vioscsi_req, vr_res),
364 sizeof(struct virtio_scsi_res_hdr),
365 BUS_DMASYNC_PREREAD);
366 if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0)
367 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_data, 0, xs->datalen,
368 XS2DMAPRE(xs));
369
370 virtio_enqueue_p(vsc, vq, slot, vr->vr_control,
371 offsetof(struct vioscsi_req, vr_req),
372 sizeof(struct virtio_scsi_req_hdr), 1);
373 if (xs->xs_control & XS_CTL_DATA_OUT)
374 virtio_enqueue(vsc, vq, slot, vr->vr_data, 1);
375 virtio_enqueue_p(vsc, vq, slot, vr->vr_control,
376 offsetof(struct vioscsi_req, vr_res),
377 sizeof(struct virtio_scsi_res_hdr), 0);
378 if (xs->xs_control & XS_CTL_DATA_IN)
379 virtio_enqueue(vsc, vq, slot, vr->vr_data, 0);
380 virtio_enqueue_commit(vsc, vq, slot, 1);
381
382 if ((xs->xs_control & XS_CTL_POLL) == 0)
383 return;
384
385 DPRINTF(("%s: polling...\n", __func__));
386 // XXX: do this better.
387 int timeout = 1000;
388 do {
389 virtio_intrhand(vsc);
390 if (vr->vr_xs != xs)
391 break;
392 delay(1000);
393 } while (--timeout > 0);
394
395 if (vr->vr_xs == xs) {
396 // XXX: Abort!
397 xs->error = XS_TIMEOUT;
398 xs->resid = xs->datalen;
399 DPRINTF(("%s: polling timeout\n", __func__));
400 scsipi_done(xs);
401 }
402 DPRINTF(("%s: done (timeout=%d)\n", __func__, timeout));
403 }
404
405 static void
406 vioscsi_req_done(struct vioscsi_softc *sc, struct virtio_softc *vsc,
407 struct vioscsi_req *vr)
408 {
409 struct scsipi_xfer *xs = vr->vr_xs;
410 struct scsi_sense_data *sense = &xs->sense.scsi_sense;
411 size_t sense_len;
412
413 DPRINTF(("%s: enter\n", __func__));
414
415 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control,
416 offsetof(struct vioscsi_req, vr_req),
417 sizeof(struct virtio_scsi_req_hdr),
418 BUS_DMASYNC_POSTWRITE);
419 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control,
420 offsetof(struct vioscsi_req, vr_res),
421 sizeof(struct virtio_scsi_res_hdr),
422 BUS_DMASYNC_POSTREAD);
423 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_data, 0, xs->datalen,
424 XS2DMAPOST(xs));
425
426 xs->status = vr->vr_res.status;
427 xs->resid = vr->vr_res.residual;
428
429 switch (vr->vr_res.response) {
430 case VIRTIO_SCSI_S_OK:
431 sense_len = MIN(sizeof(xs->sense), vr->vr_res.sense_len);
432 memcpy(&xs->sense, vr->vr_res.sense, sense_len);
433 xs->error = (sense_len == 0) ? XS_NOERROR : XS_SENSE;
434 break;
435 case VIRTIO_SCSI_S_BAD_TARGET:
436 DPRINTF(("%s: bad target\n", __func__));
437 memset(sense, 0, sizeof(*sense));
438 sense->response_code = 0x70;
439 sense->flags = SKEY_ILLEGAL_REQUEST;
440 xs->error = XS_SENSE;
441 xs->status = 0;
442 xs->resid = 0;
443 break;
444 default:
445 DPRINTF(("%s: stuffup: %d\n", __func__, vr->vr_res.response));
446 xs->error = XS_DRIVER_STUFFUP;
447 xs->resid = xs->datalen;
448 break;
449 }
450
451 DPRINTF(("%s: done %d, %d, %d\n", __func__,
452 xs->error, xs->status, xs->resid));
453
454 bus_dmamap_unload(virtio_dmat(vsc), vr->vr_data);
455 vr->vr_xs = NULL;
456
457 scsipi_done(xs);
458 }
459
460 static int
461 vioscsi_vq_done(struct virtqueue *vq)
462 {
463 struct virtio_softc *vsc = vq->vq_owner;
464 struct vioscsi_softc *sc = device_private(virtio_child(vsc));
465 int ret = 0;
466
467 DPRINTF(("%s: enter\n", __func__));
468
469 for (;;) {
470 int r, slot;
471 r = virtio_dequeue(vsc, vq, &slot, NULL);
472 if (r != 0)
473 break;
474
475 DPRINTF(("%s: slot=%d\n", __func__, slot));
476
477 vioscsi_req_done(sc, vsc, &sc->sc_reqs[slot]);
478
479 virtio_dequeue_commit(vsc, vq, slot);
480
481 ret = 1;
482 }
483
484 DPRINTF(("%s: exit %d\n", __func__, ret));
485
486 return ret;
487 }
488
489 static struct vioscsi_req *
490 vioscsi_req_get(struct vioscsi_softc *sc)
491 {
492 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
493 struct virtqueue *vq = &sc->sc_vqs[VIOSCSI_VQ_REQUEST];
494 struct vioscsi_req *vr;
495 int r, slot;
496
497 if ((r = virtio_enqueue_prep(vsc, vq, &slot)) != 0) {
498 DPRINTF(("%s: virtio_enqueue_get error %d\n", __func__, r));
499 return NULL;
500 }
501 KASSERT(slot < sc->sc_nreqs);
502 vr = &sc->sc_reqs[slot];
503
504 DPRINTF(("%s: %p, %d\n", __func__, vr, slot));
505
506 return vr;
507 }
508
509 int
510 vioscsi_alloc_reqs(struct vioscsi_softc *sc, struct virtio_softc *vsc,
511 int qsize, uint32_t seg_max)
512 {
513 size_t allocsize;
514 int r, rsegs, slot;
515 void *vaddr;
516 struct vioscsi_req *vr;
517
518 allocsize = qsize * sizeof(struct vioscsi_req);
519 r = bus_dmamem_alloc(virtio_dmat(vsc), allocsize, 0, 0,
520 &sc->sc_reqs_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
521 if (r != 0) {
522 aprint_error_dev(sc->sc_dev,
523 "%s: bus_dmamem_alloc, size %zu, error %d\n", __func__,
524 allocsize, r);
525 return r;
526 }
527 r = bus_dmamem_map(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1,
528 allocsize, &vaddr, BUS_DMA_NOWAIT);
529 if (r != 0) {
530 aprint_error_dev(sc->sc_dev,
531 "%s: bus_dmamem_map failed, error %d\n", __func__, r);
532 bus_dmamem_free(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1);
533 return r;
534 }
535 memset(vaddr, 0, allocsize);
536
537 sc->sc_reqs = vaddr;
538 sc->sc_nreqs = qsize;
539
540 /* Prepare maps for the requests */
541 for (slot=0; slot < qsize; slot++) {
542 vr = &sc->sc_reqs[slot];
543
544 r = bus_dmamap_create(virtio_dmat(vsc),
545 offsetof(struct vioscsi_req, vr_xs), 1,
546 offsetof(struct vioscsi_req, vr_xs), 0,
547 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_control);
548 if (r != 0) {
549 aprint_error_dev(sc->sc_dev,
550 "%s: bus_dmamem_create failed, error %d\n",
551 __func__, r);
552 goto cleanup;
553 }
554
555 r = bus_dmamap_create(virtio_dmat(vsc), MAXPHYS, sc->sc_seg_max,
556 MAXPHYS, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_data);
557 if (r != 0) {
558 aprint_error_dev(sc->sc_dev,
559 "%s: bus_dmamem_map failed, error %d\n",
560 __func__, r);
561 goto cleanup;
562 }
563
564 r = bus_dmamap_load(virtio_dmat(vsc), vr->vr_control,
565 vr, offsetof(struct vioscsi_req, vr_xs), NULL,
566 BUS_DMA_NOWAIT);
567 if (r != 0) {
568 aprint_error_dev(sc->sc_dev,
569 "%s: bus_dmamap_create ctrl error %d\n",
570 __func__, r);
571 goto cleanup;
572 }
573 }
574
575 return 0;
576
577 cleanup:
578 for (; slot > 0; slot--) {
579 vr = &sc->sc_reqs[slot];
580
581 if (vr->vr_control) {
582 /* this will also unload the mapping if loaded */
583 bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_control);
584 vr->vr_control = NULL;
585 }
586
587 if (vr->vr_data) {
588 bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_data);
589 vr->vr_data = NULL;
590 }
591 }
592
593 bus_dmamem_unmap(virtio_dmat(vsc), vaddr, allocsize);
594 bus_dmamem_free(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1);
595
596 return r;
597 }
598
599 static void
600 vioscsi_free_reqs(struct vioscsi_softc *sc, struct virtio_softc *vsc)
601 {
602 int slot;
603 struct vioscsi_req *vr;
604
605 if (sc->sc_nreqs == 0) {
606 /* Not allocated */
607 return;
608 }
609
610 /* Free request maps */
611 for (slot=0; slot < sc->sc_nreqs; slot++) {
612 vr = &sc->sc_reqs[slot];
613
614 bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_control);
615 bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_data);
616 }
617
618 bus_dmamem_unmap(virtio_dmat(vsc), sc->sc_reqs,
619 sc->sc_nreqs * sizeof(struct vioscsi_req));
620 bus_dmamem_free(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1);
621 }
622
623