vioscsi.c revision 1.34 1 /* $NetBSD: vioscsi.c,v 1.34 2023/03/25 08:14:00 mlelstv Exp $ */
2 /* $OpenBSD: vioscsi.c,v 1.3 2015/03/14 03:38:49 jsg Exp $ */
3
4 /*
5 * Copyright (c) 2013 Google Inc.
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include <sys/cdefs.h>
21 __KERNEL_RCSID(0, "$NetBSD: vioscsi.c,v 1.34 2023/03/25 08:14:00 mlelstv Exp $");
22
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/device.h>
26 #include <sys/bus.h>
27 #include <sys/buf.h>
28 #include <sys/module.h>
29
30 #include <dev/pci/vioscsireg.h>
31 #include <dev/pci/virtiovar.h>
32
33 #include <dev/scsipi/scsi_all.h>
34 #include <dev/scsipi/scsiconf.h>
35
36 #ifdef VIOSCSI_DEBUG
37 static int vioscsi_debug = 1;
38 #define DPRINTF(f) do { if (vioscsi_debug) printf f; } while (/*CONSTCOND*/0)
39 #else
40 #define DPRINTF(f) ((void)0)
41 #endif
42
43 struct vioscsi_req {
44 struct virtio_scsi_req_hdr vr_req;
45 struct virtio_scsi_res_hdr vr_res;
46 struct scsipi_xfer *vr_xs;
47 bus_dmamap_t vr_control;
48 bus_dmamap_t vr_data;
49 };
50
51 struct vioscsi_softc {
52 device_t sc_dev;
53 struct scsipi_adapter sc_adapter;
54 struct scsipi_channel sc_channel;
55
56 struct virtqueue sc_vqs[3];
57 #define VIOSCSI_VQ_CONTROL 0
58 #define VIOSCSI_VQ_EVENT 1
59 #define VIOSCSI_VQ_REQUEST 2
60
61 struct vioscsi_req *sc_reqs;
62 int sc_nreqs;
63 bus_dma_segment_t sc_reqs_segs[1];
64
65 u_int32_t sc_seg_max;
66
67 kmutex_t sc_mutex;
68 };
69
70 /*
71 * Each block request uses at least two segments - one for the header
72 * and one for the status.
73 */
74 #define VIRTIO_SCSI_MIN_SEGMENTS 2
75
76 static int vioscsi_match(device_t, cfdata_t, void *);
77 static void vioscsi_attach(device_t, device_t, void *);
78 static int vioscsi_detach(device_t, int);
79
80 static int vioscsi_alloc_reqs(struct vioscsi_softc *,
81 struct virtio_softc *, int);
82 static void vioscsi_free_reqs(struct vioscsi_softc *,
83 struct virtio_softc *);
84 static void vioscsi_scsipi_request(struct scsipi_channel *,
85 scsipi_adapter_req_t, void *);
86 static int vioscsi_vq_done(struct virtqueue *);
87 static void vioscsi_req_done(struct vioscsi_softc *, struct virtio_softc *,
88 struct vioscsi_req *, struct virtqueue *, int);
89 static struct vioscsi_req *vioscsi_req_get(struct vioscsi_softc *);
90 static void vioscsi_bad_target(struct scsipi_xfer *);
91
92 static const char *const vioscsi_vq_names[] = {
93 "control",
94 "event",
95 "request",
96 };
97
98 CFATTACH_DECL3_NEW(vioscsi, sizeof(struct vioscsi_softc),
99 vioscsi_match, vioscsi_attach, vioscsi_detach, NULL, NULL, NULL,
100 DVF_DETACH_SHUTDOWN);
101
102 static int
103 vioscsi_match(device_t parent, cfdata_t match, void *aux)
104 {
105 struct virtio_attach_args *va = aux;
106
107 if (va->sc_childdevid == VIRTIO_DEVICE_ID_SCSI)
108 return 1;
109
110 return 0;
111 }
112
113 static void
114 vioscsi_attach(device_t parent, device_t self, void *aux)
115 {
116 struct vioscsi_softc *sc = device_private(self);
117 struct virtio_softc *vsc = device_private(parent);
118 struct scsipi_adapter *adapt = &sc->sc_adapter;
119 struct scsipi_channel *chan = &sc->sc_channel;
120 int rv, qsize = 0, i = 0;
121 int ipl = IPL_BIO;
122
123 if (virtio_child(vsc) != NULL) {
124 aprint_error(": parent %s already has a child\n",
125 device_xname(parent));
126 return;
127 }
128
129 sc->sc_dev = self;
130
131 virtio_child_attach_start(vsc, self, ipl,
132 0, VIRTIO_COMMON_FLAG_BITS);
133
134 mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, ipl);
135
136 uint32_t cmd_per_lun = virtio_read_device_config_4(vsc,
137 VIRTIO_SCSI_CONFIG_CMD_PER_LUN);
138
139 uint32_t seg_max = virtio_read_device_config_4(vsc,
140 VIRTIO_SCSI_CONFIG_SEG_MAX);
141
142 uint16_t max_target = virtio_read_device_config_2(vsc,
143 VIRTIO_SCSI_CONFIG_MAX_TARGET);
144
145 uint32_t max_lun = virtio_read_device_config_4(vsc,
146 VIRTIO_SCSI_CONFIG_MAX_LUN);
147
148 sc->sc_seg_max = seg_max;
149
150 for(i=0; i < __arraycount(sc->sc_vqs); i++) {
151 virtio_init_vq_vqdone(vsc, &sc->sc_vqs[i], i,
152 vioscsi_vq_done);
153 rv = virtio_alloc_vq(vsc, &sc->sc_vqs[i], MAXPHYS,
154 VIRTIO_SCSI_MIN_SEGMENTS + howmany(MAXPHYS, NBPG),
155 vioscsi_vq_names[i]);
156 if (rv) {
157 aprint_error_dev(sc->sc_dev,
158 "failed to allocate virtqueue %d\n", i);
159 goto err;
160 }
161
162 if (i == VIOSCSI_VQ_REQUEST)
163 sc->sc_vqs[i].vq_done = vioscsi_vq_done;
164 }
165
166 qsize = sc->sc_vqs[VIOSCSI_VQ_REQUEST].vq_num;
167 if (vioscsi_alloc_reqs(sc, vsc, qsize))
168 goto err;
169
170 aprint_normal_dev(sc->sc_dev,
171 "cmd_per_lun %u qsize %d seg_max %u max_target %hu"
172 " max_lun %u\n",
173 cmd_per_lun, qsize, seg_max, max_target, max_lun);
174
175 if (virtio_child_attach_finish(vsc, sc->sc_vqs,
176 __arraycount(sc->sc_vqs), NULL, VIRTIO_F_INTR_MSIX) != 0)
177 goto err;
178
179 /*
180 * Fill in the scsipi_adapter.
181 */
182 memset(adapt, 0, sizeof(*adapt));
183 adapt->adapt_dev = sc->sc_dev;
184 adapt->adapt_nchannels = 1;
185 adapt->adapt_openings = MIN(qsize, cmd_per_lun);
186 adapt->adapt_max_periph = adapt->adapt_openings;
187 adapt->adapt_request = vioscsi_scsipi_request;
188 adapt->adapt_minphys = minphys;
189
190 /*
191 * Fill in the scsipi_channel.
192 */
193 memset(chan, 0, sizeof(*chan));
194 chan->chan_adapter = adapt;
195 chan->chan_bustype = &scsi_bustype;
196 chan->chan_channel = 0;
197 chan->chan_ntargets = MIN(1 + max_target, 256); /* cap reasonably */
198 chan->chan_nluns = MIN(1 + max_lun, 16384); /* cap reasonably */
199 chan->chan_id = max_target + 1;
200 chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
201
202 config_found(self, &sc->sc_channel, scsiprint, CFARGS_NONE);
203 return;
204
205 err:
206 if (qsize > 0)
207 vioscsi_free_reqs(sc, vsc);
208
209 for (i=0; i < __arraycount(sc->sc_vqs); i++) {
210 virtio_free_vq(vsc, &sc->sc_vqs[i]);
211 }
212
213 virtio_child_attach_failed(vsc);
214 }
215
216 static int
217 vioscsi_detach(device_t self, int flags)
218 {
219 struct vioscsi_softc *sc = device_private(self);
220 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
221 int rc, i;
222
223 /*
224 * Dequeue all pending finished requests. Must be done
225 * before we try to detach children so that we process
226 * their pending requests while they still exist.
227 */
228 if (sc->sc_vqs[VIOSCSI_VQ_REQUEST].vq_num > 0)
229 vioscsi_vq_done(&sc->sc_vqs[VIOSCSI_VQ_REQUEST]);
230
231 if ((rc = config_detach_children(self, flags)) != 0)
232 return rc;
233
234 virtio_reset(vsc);
235
236 for (i = 0; i < __arraycount(sc->sc_vqs); i++) {
237 if (sc->sc_vqs[i].vq_num > 0)
238 virtio_free_vq(vsc, &sc->sc_vqs[i]);
239 }
240
241 vioscsi_free_reqs(sc, vsc);
242
243 virtio_child_detach(vsc);
244
245 mutex_destroy(&sc->sc_mutex);
246
247 return 0;
248 }
249
250 #define XS2DMA(xs) \
251 ((((xs)->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE) | \
252 (((xs)->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | \
253 BUS_DMA_STREAMING)
254
255 #define XS2DMAPRE(xs) (((xs)->xs_control & XS_CTL_DATA_IN) ? \
256 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)
257
258 #define XS2DMAPOST(xs) (((xs)->xs_control & XS_CTL_DATA_IN) ? \
259 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)
260
261 static void
262 vioscsi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t
263 request, void *arg)
264 {
265 struct vioscsi_softc *sc =
266 device_private(chan->chan_adapter->adapt_dev);
267 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
268 struct scsipi_xfer *xs;
269 struct scsipi_periph *periph;
270 struct vioscsi_req *vr;
271 struct virtio_scsi_req_hdr *req;
272 struct virtqueue *vq = &sc->sc_vqs[VIOSCSI_VQ_REQUEST];
273 int slot, error;
274 bool dopoll;
275
276 DPRINTF(("%s: enter\n", __func__));
277
278 switch (request) {
279 case ADAPTER_REQ_RUN_XFER:
280 break;
281 case ADAPTER_REQ_SET_XFER_MODE:
282 {
283 struct scsipi_xfer_mode *xm = arg;
284 xm->xm_mode = PERIPH_CAP_TQING;
285 xm->xm_period = 0;
286 xm->xm_offset = 0;
287 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
288 return;
289 }
290 default:
291 DPRINTF(("%s: unhandled %d\n", __func__, request));
292 return;
293 }
294
295 xs = arg;
296 periph = xs->xs_periph;
297
298 /*
299 * This can happen when we run out of queue slots.
300 */
301 vr = vioscsi_req_get(sc);
302 if (vr == NULL) {
303 xs->error = XS_RESOURCE_SHORTAGE;
304 scsipi_done(xs);
305 return;
306 }
307
308 req = &vr->vr_req;
309 slot = vr - sc->sc_reqs;
310
311 /*
312 * "The only supported format for the LUN field is: first byte set to
313 * 1, second byte set to target, third and fourth byte representing a
314 * single level LUN structure, followed by four zero bytes."
315 */
316 if (periph->periph_target >= 256 || periph->periph_lun >= 16384
317 || periph->periph_target < 0 || periph->periph_lun < 0) {
318 goto stuffup;
319 }
320
321 req->lun[0] = 1;
322 req->lun[1] = periph->periph_target;
323 req->lun[2] = 0x40 | ((periph->periph_lun >> 8) & 0x3F);
324 req->lun[3] = periph->periph_lun & 0xFF;
325 memset(req->lun + 4, 0, 4);
326 DPRINTF(("%s: command %p for %d:%d at slot %d\n", __func__,
327 xs, periph->periph_target, periph->periph_lun, slot));
328
329 /* tag */
330 switch (XS_CTL_TAGTYPE(xs)) {
331 case XS_CTL_HEAD_TAG:
332 req->task_attr = VIRTIO_SCSI_S_HEAD;
333 break;
334
335 #if 0 /* XXX */
336 case XS_CTL_ACA_TAG:
337 req->task_attr = VIRTIO_SCSI_S_ACA;
338 break;
339 #endif
340
341 case XS_CTL_ORDERED_TAG:
342 req->task_attr = VIRTIO_SCSI_S_ORDERED;
343 break;
344
345 case XS_CTL_SIMPLE_TAG:
346 default:
347 req->task_attr = VIRTIO_SCSI_S_SIMPLE;
348 break;
349 }
350 req->id = virtio_rw64(vsc, slot);
351
352 if ((size_t)xs->cmdlen > sizeof(req->cdb)) {
353 DPRINTF(("%s: bad cmdlen %zu > %zu\n", __func__,
354 (size_t)xs->cmdlen, sizeof(req->cdb)));
355 goto stuffup;
356 }
357
358 memset(req->cdb, 0, sizeof(req->cdb));
359 memcpy(req->cdb, xs->cmd, xs->cmdlen);
360
361 error = bus_dmamap_load(virtio_dmat(vsc), vr->vr_data,
362 xs->data, xs->datalen, NULL, XS2DMA(xs));
363 if (error) {
364 aprint_error_dev(sc->sc_dev, "%s: error %d loading DMA map\n",
365 __func__, error);
366
367 if (error == ENOMEM || error == EAGAIN) {
368 /*
369 * Map is allocated with ALLOCNOW, so this should
370 * actually never ever happen.
371 */
372 xs->error = XS_RESOURCE_SHORTAGE;
373 } else {
374 stuffup:
375 /* not a temporary condition */
376 xs->error = XS_DRIVER_STUFFUP;
377 }
378
379 virtio_enqueue_abort(vsc, vq, slot);
380 scsipi_done(xs);
381 return;
382 }
383
384 int nsegs = VIRTIO_SCSI_MIN_SEGMENTS;
385 if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0)
386 nsegs += vr->vr_data->dm_nsegs;
387
388 error = virtio_enqueue_reserve(vsc, vq, slot, nsegs);
389 if (error) {
390 aprint_error_dev(sc->sc_dev, "error reserving %d (nsegs %d)\n",
391 error, nsegs);
392 bus_dmamap_unload(virtio_dmat(vsc), vr->vr_data);
393 /* slot already freed by virtio_enqueue_reserve() */
394 xs->error = XS_RESOURCE_SHORTAGE;
395 scsipi_done(xs);
396 return;
397 }
398
399 vr->vr_xs = xs;
400
401 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control,
402 offsetof(struct vioscsi_req, vr_req),
403 sizeof(struct virtio_scsi_req_hdr),
404 BUS_DMASYNC_PREWRITE);
405 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control,
406 offsetof(struct vioscsi_req, vr_res),
407 sizeof(struct virtio_scsi_res_hdr),
408 BUS_DMASYNC_PREREAD);
409 if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0)
410 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_data, 0, xs->datalen,
411 XS2DMAPRE(xs));
412
413 virtio_enqueue_p(vsc, vq, slot, vr->vr_control,
414 offsetof(struct vioscsi_req, vr_req),
415 sizeof(struct virtio_scsi_req_hdr), 1);
416 if (xs->xs_control & XS_CTL_DATA_OUT)
417 virtio_enqueue(vsc, vq, slot, vr->vr_data, 1);
418 virtio_enqueue_p(vsc, vq, slot, vr->vr_control,
419 offsetof(struct vioscsi_req, vr_res),
420 sizeof(struct virtio_scsi_res_hdr), 0);
421 if (xs->xs_control & XS_CTL_DATA_IN)
422 virtio_enqueue(vsc, vq, slot, vr->vr_data, 0);
423 dopoll = (xs->xs_control & XS_CTL_POLL) != 0;
424 virtio_enqueue_commit(vsc, vq, slot, 1);
425
426 if (!dopoll)
427 return;
428
429 DPRINTF(("%s: polling...\n", __func__));
430 // XXX: do this better.
431 int timeout = 1000;
432 do {
433 virtio_intrhand(vsc);
434 if (vr->vr_xs != xs)
435 break;
436 delay(1000);
437 } while (--timeout > 0);
438
439 if (vr->vr_xs == xs) {
440 // XXX: Abort!
441 xs->error = XS_TIMEOUT;
442 xs->resid = xs->datalen;
443 DPRINTF(("%s: polling timeout\n", __func__));
444 scsipi_done(xs);
445 }
446 DPRINTF(("%s: command %p done (timeout=%d)\n", __func__,
447 xs, timeout));
448 }
449
450 static void
451 vioscsi_req_done(struct vioscsi_softc *sc, struct virtio_softc *vsc,
452 struct vioscsi_req *vr, struct virtqueue *vq, int slot)
453 {
454 struct scsipi_xfer *xs = vr->vr_xs;
455 size_t sense_len;
456
457 DPRINTF(("%s: enter\n", __func__));
458
459 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control,
460 offsetof(struct vioscsi_req, vr_req),
461 sizeof(struct virtio_scsi_req_hdr),
462 BUS_DMASYNC_POSTWRITE);
463 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control,
464 offsetof(struct vioscsi_req, vr_res),
465 sizeof(struct virtio_scsi_res_hdr),
466 BUS_DMASYNC_POSTREAD);
467 if (xs->datalen)
468 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_data, 0, xs->datalen,
469 XS2DMAPOST(xs));
470
471 xs->status = vr->vr_res.status;
472 xs->resid = virtio_rw32(vsc, vr->vr_res.residual);
473
474 switch (vr->vr_res.response) {
475 case VIRTIO_SCSI_S_OK:
476 sense_len = MIN(sizeof(xs->sense),
477 virtio_rw32(vsc, vr->vr_res.sense_len));
478 memcpy(&xs->sense, vr->vr_res.sense, sense_len);
479 xs->error = (sense_len == 0) ? XS_NOERROR : XS_SENSE;
480 break;
481 case VIRTIO_SCSI_S_BAD_TARGET:
482 vioscsi_bad_target(xs);
483 break;
484 default:
485 DPRINTF(("%s: stuffup: %d\n", __func__, vr->vr_res.response));
486 xs->error = XS_DRIVER_STUFFUP;
487 xs->resid = xs->datalen;
488 break;
489 }
490
491 DPRINTF(("%s: command %p done %d, %d, %d\n", __func__,
492 xs, xs->error, xs->status, xs->resid));
493
494 bus_dmamap_unload(virtio_dmat(vsc), vr->vr_data);
495 vr->vr_xs = NULL;
496
497 virtio_dequeue_commit(vsc, vq, slot);
498
499 mutex_exit(&sc->sc_mutex);
500 scsipi_done(xs);
501 mutex_enter(&sc->sc_mutex);
502 }
503
504 static void
505 vioscsi_bad_target(struct scsipi_xfer *xs)
506 {
507 struct scsi_sense_data *sense = &xs->sense.scsi_sense;
508
509 DPRINTF(("%s: bad target %d:%d\n", __func__,
510 xs->xs_periph->periph_target, xs->xs_periph->periph_lun));
511
512 memset(sense, 0, sizeof(*sense));
513 sense->response_code = 0x70;
514 sense->flags = SKEY_ILLEGAL_REQUEST;
515 xs->error = XS_SENSE;
516 xs->status = 0;
517 xs->resid = 0;
518 }
519
520 static int
521 vioscsi_vq_done(struct virtqueue *vq)
522 {
523 struct virtio_softc *vsc = vq->vq_owner;
524 struct vioscsi_softc *sc = device_private(virtio_child(vsc));
525 int ret = 0;
526
527 DPRINTF(("%s: enter %d\n", __func__, vq->vq_index));
528
529 mutex_enter(&sc->sc_mutex);
530
531 for (;;) {
532 int r, slot;
533
534 r = virtio_dequeue(vsc, vq, &slot, NULL);
535 if (r != 0)
536 break;
537
538 DPRINTF(("%s: slot=%d\n", __func__, slot));
539
540 vioscsi_req_done(sc, vsc, &sc->sc_reqs[slot], vq, slot);
541
542 ret = 1;
543 }
544
545 mutex_exit(&sc->sc_mutex);
546
547 DPRINTF(("%s: exit %d: %d\n", __func__, vq->vq_index, ret));
548
549 return ret;
550 }
551
552 static struct vioscsi_req *
553 vioscsi_req_get(struct vioscsi_softc *sc)
554 {
555 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
556 struct virtqueue *vq = &sc->sc_vqs[VIOSCSI_VQ_REQUEST];
557 struct vioscsi_req *vr = NULL;
558 int r, slot;
559
560 mutex_enter(&sc->sc_mutex);
561
562 if ((r = virtio_enqueue_prep(vsc, vq, &slot)) != 0) {
563 DPRINTF(("%s: virtio_enqueue_get error %d\n", __func__, r));
564 goto out;
565 }
566 KASSERT(slot < sc->sc_nreqs);
567 vr = &sc->sc_reqs[slot];
568
569 DPRINTF(("%s: %p, %d\n", __func__, vr, slot));
570
571 out:
572 mutex_exit(&sc->sc_mutex);
573
574 return vr;
575 }
576
577 static int
578 vioscsi_alloc_reqs(struct vioscsi_softc *sc, struct virtio_softc *vsc,
579 int qsize)
580 {
581 size_t allocsize;
582 int r, rsegs, slot;
583 void *vaddr;
584 struct vioscsi_req *vr;
585
586 allocsize = qsize * sizeof(struct vioscsi_req);
587 r = bus_dmamem_alloc(virtio_dmat(vsc), allocsize, 0, 0,
588 &sc->sc_reqs_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
589 if (r != 0) {
590 aprint_error_dev(sc->sc_dev,
591 "%s: bus_dmamem_alloc, size %zu, error %d\n", __func__,
592 allocsize, r);
593 return r;
594 }
595 r = bus_dmamem_map(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1,
596 allocsize, &vaddr, BUS_DMA_NOWAIT);
597 if (r != 0) {
598 aprint_error_dev(sc->sc_dev,
599 "%s: bus_dmamem_map failed, error %d\n", __func__, r);
600 bus_dmamem_free(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1);
601 return r;
602 }
603 memset(vaddr, 0, allocsize);
604
605 sc->sc_reqs = vaddr;
606 sc->sc_nreqs = qsize;
607
608 /* Prepare maps for the requests */
609 for (slot=0; slot < qsize; slot++) {
610 vr = &sc->sc_reqs[slot];
611
612 r = bus_dmamap_create(virtio_dmat(vsc),
613 offsetof(struct vioscsi_req, vr_xs), 1,
614 offsetof(struct vioscsi_req, vr_xs), 0,
615 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_control);
616 if (r != 0) {
617 aprint_error_dev(sc->sc_dev,
618 "%s: bus_dmamap_create ctrl failed, error %d\n",
619 __func__, r);
620 goto cleanup;
621 }
622
623 r = bus_dmamap_create(virtio_dmat(vsc), MAXPHYS, sc->sc_seg_max,
624 MAXPHYS, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_data);
625 if (r != 0) {
626 aprint_error_dev(sc->sc_dev,
627 "%s: bus_dmamap_create data failed, error %d\n",
628 __func__, r);
629 goto cleanup;
630 }
631
632 r = bus_dmamap_load(virtio_dmat(vsc), vr->vr_control,
633 vr, offsetof(struct vioscsi_req, vr_xs), NULL,
634 BUS_DMA_NOWAIT);
635 if (r != 0) {
636 aprint_error_dev(sc->sc_dev,
637 "%s: bus_dmamap_load ctrl error %d\n",
638 __func__, r);
639 goto cleanup;
640 }
641 }
642
643 return 0;
644
645 cleanup:
646 for (; slot > 0; slot--) {
647 vr = &sc->sc_reqs[slot];
648
649 if (vr->vr_control) {
650 /* this will also unload the mapping if loaded */
651 bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_control);
652 vr->vr_control = NULL;
653 }
654
655 if (vr->vr_data) {
656 bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_data);
657 vr->vr_data = NULL;
658 }
659 }
660
661 bus_dmamem_unmap(virtio_dmat(vsc), vaddr, allocsize);
662 bus_dmamem_free(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1);
663
664 return r;
665 }
666
667 static void
668 vioscsi_free_reqs(struct vioscsi_softc *sc, struct virtio_softc *vsc)
669 {
670 int slot;
671 struct vioscsi_req *vr;
672
673 if (sc->sc_nreqs == 0) {
674 /* Not allocated */
675 return;
676 }
677
678 /* Free request maps */
679 for (slot=0; slot < sc->sc_nreqs; slot++) {
680 vr = &sc->sc_reqs[slot];
681
682 bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_control);
683 bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_data);
684 }
685
686 bus_dmamem_unmap(virtio_dmat(vsc), sc->sc_reqs,
687 sc->sc_nreqs * sizeof(struct vioscsi_req));
688 bus_dmamem_free(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1);
689 }
690
691 MODULE(MODULE_CLASS_DRIVER, vioscsi, "virtio");
692
693 #ifdef _MODULE
694 #include "ioconf.c"
695 #endif
696
697 static int
698 vioscsi_modcmd(modcmd_t cmd, void *opaque)
699 {
700 int error = 0;
701
702 #ifdef _MODULE
703 switch (cmd) {
704 case MODULE_CMD_INIT:
705 error = config_init_component(cfdriver_ioconf_vioscsi,
706 cfattach_ioconf_vioscsi, cfdata_ioconf_vioscsi);
707 break;
708 case MODULE_CMD_FINI:
709 error = config_fini_component(cfdriver_ioconf_vioscsi,
710 cfattach_ioconf_vioscsi, cfdata_ioconf_vioscsi);
711 break;
712 default:
713 error = ENOTTY;
714 break;
715 }
716 #endif
717
718 return error;
719 }
720