vioscsi.c revision 1.28 1 /* $NetBSD: vioscsi.c,v 1.28 2021/11/12 07:18:53 skrll Exp $ */
2 /* $OpenBSD: vioscsi.c,v 1.3 2015/03/14 03:38:49 jsg Exp $ */
3
4 /*
5 * Copyright (c) 2013 Google Inc.
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include <sys/cdefs.h>
21 __KERNEL_RCSID(0, "$NetBSD: vioscsi.c,v 1.28 2021/11/12 07:18:53 skrll Exp $");
22
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/device.h>
26 #include <sys/bus.h>
27 #include <sys/buf.h>
28 #include <sys/module.h>
29
30 #include <dev/pci/vioscsireg.h>
31 #include <dev/pci/virtiovar.h>
32
33 #include <dev/scsipi/scsi_all.h>
34 #include <dev/scsipi/scsiconf.h>
35
36 #ifdef VIOSCSI_DEBUG
37 static int vioscsi_debug = 1;
38 #define DPRINTF(f) do { if (vioscsi_debug) printf f; } while (/*CONSTCOND*/0)
39 #else
40 #define DPRINTF(f) ((void)0)
41 #endif
42
43 struct vioscsi_req {
44 struct virtio_scsi_req_hdr vr_req;
45 struct virtio_scsi_res_hdr vr_res;
46 struct scsipi_xfer *vr_xs;
47 bus_dmamap_t vr_control;
48 bus_dmamap_t vr_data;
49 };
50
51 struct vioscsi_softc {
52 device_t sc_dev;
53 struct scsipi_adapter sc_adapter;
54 struct scsipi_channel sc_channel;
55
56 struct virtqueue sc_vqs[3];
57 #define VIOSCSI_VQ_CONTROL 0
58 #define VIOSCSI_VQ_EVENT 1
59 #define VIOSCSI_VQ_REQUEST 2
60
61 struct vioscsi_req *sc_reqs;
62 int sc_nreqs;
63 bus_dma_segment_t sc_reqs_segs[1];
64
65 u_int32_t sc_seg_max;
66
67 kmutex_t sc_mutex;
68 };
69
70 /*
71 * Each block request uses at least two segments - one for the header
72 * and one for the status.
73 */
74 #define VIRTIO_SCSI_MIN_SEGMENTS 2
75
76 static int vioscsi_match(device_t, cfdata_t, void *);
77 static void vioscsi_attach(device_t, device_t, void *);
78 static int vioscsi_detach(device_t, int);
79
80 static int vioscsi_alloc_reqs(struct vioscsi_softc *,
81 struct virtio_softc *, int);
82 static void vioscsi_free_reqs(struct vioscsi_softc *,
83 struct virtio_softc *);
84 static void vioscsi_scsipi_request(struct scsipi_channel *,
85 scsipi_adapter_req_t, void *);
86 static int vioscsi_vq_done(struct virtqueue *);
87 static void vioscsi_req_done(struct vioscsi_softc *, struct virtio_softc *,
88 struct vioscsi_req *, struct virtqueue *, int);
89 static struct vioscsi_req *vioscsi_req_get(struct vioscsi_softc *);
90 static void vioscsi_bad_target(struct scsipi_xfer *);
91
92 static const char *const vioscsi_vq_names[] = {
93 "control",
94 "event",
95 "request",
96 };
97
98 CFATTACH_DECL3_NEW(vioscsi, sizeof(struct vioscsi_softc),
99 vioscsi_match, vioscsi_attach, vioscsi_detach, NULL, NULL, NULL,
100 DVF_DETACH_SHUTDOWN);
101
102 static int
103 vioscsi_match(device_t parent, cfdata_t match, void *aux)
104 {
105 struct virtio_attach_args *va = aux;
106
107 if (va->sc_childdevid == VIRTIO_DEVICE_ID_SCSI)
108 return 1;
109
110 return 0;
111 }
112
113 static void
114 vioscsi_attach(device_t parent, device_t self, void *aux)
115 {
116 struct vioscsi_softc *sc = device_private(self);
117 struct virtio_softc *vsc = device_private(parent);
118 struct scsipi_adapter *adapt = &sc->sc_adapter;
119 struct scsipi_channel *chan = &sc->sc_channel;
120 int rv, qsize = 0, i = 0;
121 int ipl = IPL_BIO;
122
123 if (virtio_child(vsc) != NULL) {
124 aprint_error(": parent %s already has a child\n",
125 device_xname(parent));
126 return;
127 }
128
129 sc->sc_dev = self;
130
131 virtio_child_attach_start(vsc, self, ipl, sc->sc_vqs,
132 NULL, virtio_vq_intr, VIRTIO_F_INTR_MSIX,
133 0, VIRTIO_COMMON_FLAG_BITS);
134
135 mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, ipl);
136
137 uint32_t cmd_per_lun = virtio_read_device_config_4(vsc,
138 VIRTIO_SCSI_CONFIG_CMD_PER_LUN);
139
140 uint32_t seg_max = virtio_read_device_config_4(vsc,
141 VIRTIO_SCSI_CONFIG_SEG_MAX);
142
143 uint16_t max_target = virtio_read_device_config_2(vsc,
144 VIRTIO_SCSI_CONFIG_MAX_TARGET);
145
146 uint32_t max_lun = virtio_read_device_config_4(vsc,
147 VIRTIO_SCSI_CONFIG_MAX_LUN);
148
149 sc->sc_seg_max = seg_max;
150
151 for(i=0; i < __arraycount(sc->sc_vqs); i++) {
152 rv = virtio_alloc_vq(vsc, &sc->sc_vqs[i], i, MAXPHYS,
153 VIRTIO_SCSI_MIN_SEGMENTS + howmany(MAXPHYS, NBPG),
154 vioscsi_vq_names[i]);
155 if (rv) {
156 aprint_error_dev(sc->sc_dev,
157 "failed to allocate virtqueue %d\n", i);
158 goto err;
159 }
160
161 if (i == VIOSCSI_VQ_REQUEST)
162 sc->sc_vqs[i].vq_done = vioscsi_vq_done;
163 }
164
165 qsize = sc->sc_vqs[VIOSCSI_VQ_REQUEST].vq_num;
166 if (vioscsi_alloc_reqs(sc, vsc, qsize))
167 goto err;
168
169 aprint_normal_dev(sc->sc_dev,
170 "cmd_per_lun %u qsize %d seg_max %u max_target %hu"
171 " max_lun %u\n",
172 cmd_per_lun, qsize, seg_max, max_target, max_lun);
173
174 if (virtio_child_attach_finish(vsc) != 0)
175 goto err;
176
177 /*
178 * Fill in the scsipi_adapter.
179 */
180 memset(adapt, 0, sizeof(*adapt));
181 adapt->adapt_dev = sc->sc_dev;
182 adapt->adapt_nchannels = 1;
183 adapt->adapt_openings = MIN(qsize, cmd_per_lun);
184 adapt->adapt_max_periph = adapt->adapt_openings;
185 adapt->adapt_request = vioscsi_scsipi_request;
186 adapt->adapt_minphys = minphys;
187
188 /*
189 * Fill in the scsipi_channel.
190 */
191 memset(chan, 0, sizeof(*chan));
192 chan->chan_adapter = adapt;
193 chan->chan_bustype = &scsi_bustype;
194 chan->chan_channel = 0;
195 chan->chan_ntargets = MIN(max_target, 16); /* cap reasonably */
196 chan->chan_nluns = MIN(max_lun, 1024); /* cap reasonably */
197 chan->chan_id = max_target;
198 chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
199 /*
200 * XXX Remove this when scsipi is REPORT LUNS-aware.
201 * scsipi(4) insists that LUNs must be contiguous starting from 0.
202 * This is not true on Linode (circa 2020).
203 *
204 * Also if explicitly selecting the 'Virtio SCSI Single'
205 * controller (which is not the default SCSI controller) on
206 * Proxmox hosts, each disk will be on its own scsi bus at
207 * target 0 but unexpectedly on a LUN matching the drive number
208 * on the system (i.e. drive 0 will be bus 0, target 0, lun
209 * 0; drive 1 will be bus 1, target 0, lun 1, drive 2 will be
210 * bus 2, target 0, lun 2 -- which is where the gaps start
211 * happening). https://bugzilla.proxmox.com/show_bug.cgi?id=2985
212 */
213 chan->chan_defquirks = PQUIRK_FORCELUNS;
214
215 config_found(self, &sc->sc_channel, scsiprint, CFARGS_NONE);
216 return;
217
218 err:
219 if (qsize > 0)
220 vioscsi_free_reqs(sc, vsc);
221
222 for (i=0; i < __arraycount(sc->sc_vqs); i++) {
223 if (sc->sc_vqs[i].vq_num > 0)
224 virtio_free_vq(vsc, &sc->sc_vqs[i]);
225 }
226
227 virtio_child_attach_failed(vsc);
228 }
229
230 static int
231 vioscsi_detach(device_t self, int flags)
232 {
233 struct vioscsi_softc *sc = device_private(self);
234 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
235 int rc, i;
236
237 /*
238 * Dequeue all pending finished requests. Must be done
239 * before we try to detach children so that we process
240 * their pending requests while they still exist.
241 */
242 if (sc->sc_vqs[VIOSCSI_VQ_REQUEST].vq_num > 0)
243 vioscsi_vq_done(&sc->sc_vqs[VIOSCSI_VQ_REQUEST]);
244
245 if ((rc = config_detach_children(self, flags)) != 0)
246 return rc;
247
248 virtio_reset(vsc);
249
250 for (i = 0; i < __arraycount(sc->sc_vqs); i++) {
251 if (sc->sc_vqs[i].vq_num > 0)
252 virtio_free_vq(vsc, &sc->sc_vqs[i]);
253 }
254
255 vioscsi_free_reqs(sc, vsc);
256
257 virtio_child_detach(vsc);
258
259 mutex_destroy(&sc->sc_mutex);
260
261 return 0;
262 }
263
264 #define XS2DMA(xs) \
265 ((((xs)->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE) | \
266 (((xs)->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | \
267 BUS_DMA_STREAMING)
268
269 #define XS2DMAPRE(xs) (((xs)->xs_control & XS_CTL_DATA_IN) ? \
270 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)
271
272 #define XS2DMAPOST(xs) (((xs)->xs_control & XS_CTL_DATA_IN) ? \
273 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)
274
275 static void
276 vioscsi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t
277 request, void *arg)
278 {
279 struct vioscsi_softc *sc =
280 device_private(chan->chan_adapter->adapt_dev);
281 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
282 struct scsipi_xfer *xs;
283 struct scsipi_periph *periph;
284 struct vioscsi_req *vr;
285 struct virtio_scsi_req_hdr *req;
286 struct virtqueue *vq = &sc->sc_vqs[VIOSCSI_VQ_REQUEST];
287 int slot, error;
288 bool dopoll;
289
290 DPRINTF(("%s: enter\n", __func__));
291
292 switch (request) {
293 case ADAPTER_REQ_RUN_XFER:
294 break;
295 case ADAPTER_REQ_SET_XFER_MODE:
296 {
297 struct scsipi_xfer_mode *xm = arg;
298 xm->xm_mode = PERIPH_CAP_TQING;
299 xm->xm_period = 0;
300 xm->xm_offset = 0;
301 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
302 return;
303 }
304 default:
305 DPRINTF(("%s: unhandled %d\n", __func__, request));
306 return;
307 }
308
309 xs = arg;
310 periph = xs->xs_periph;
311
312 /*
313 * This can happen when we run out of queue slots.
314 */
315 vr = vioscsi_req_get(sc);
316 if (vr == NULL) {
317 xs->error = XS_RESOURCE_SHORTAGE;
318 scsipi_done(xs);
319 return;
320 }
321
322 req = &vr->vr_req;
323 slot = vr - sc->sc_reqs;
324
325 /*
326 * "The only supported format for the LUN field is: first byte set to
327 * 1, second byte set to target, third and fourth byte representing a
328 * single level LUN structure, followed by four zero bytes."
329 */
330 if (periph->periph_target >= 256 || periph->periph_lun >= 16384
331 || periph->periph_target < 0 || periph->periph_lun < 0) {
332 goto stuffup;
333 }
334
335 req->lun[0] = 1;
336 req->lun[1] = periph->periph_target;
337 req->lun[2] = 0x40 | ((periph->periph_lun >> 8) & 0x3F);
338 req->lun[3] = periph->periph_lun & 0xFF;
339 memset(req->lun + 4, 0, 4);
340 DPRINTF(("%s: command %p for %d:%d at slot %d\n", __func__,
341 xs, periph->periph_target, periph->periph_lun, slot));
342
343 /* tag */
344 switch (XS_CTL_TAGTYPE(xs)) {
345 case XS_CTL_HEAD_TAG:
346 req->task_attr = VIRTIO_SCSI_S_HEAD;
347 break;
348
349 #if 0 /* XXX */
350 case XS_CTL_ACA_TAG:
351 req->task_attr = VIRTIO_SCSI_S_ACA;
352 break;
353 #endif
354
355 case XS_CTL_ORDERED_TAG:
356 req->task_attr = VIRTIO_SCSI_S_ORDERED;
357 break;
358
359 case XS_CTL_SIMPLE_TAG:
360 default:
361 req->task_attr = VIRTIO_SCSI_S_SIMPLE;
362 break;
363 }
364 req->id = virtio_rw64(vsc, slot);
365
366 if ((size_t)xs->cmdlen > sizeof(req->cdb)) {
367 DPRINTF(("%s: bad cmdlen %zu > %zu\n", __func__,
368 (size_t)xs->cmdlen, sizeof(req->cdb)));
369 goto stuffup;
370 }
371
372 memset(req->cdb, 0, sizeof(req->cdb));
373 memcpy(req->cdb, xs->cmd, xs->cmdlen);
374
375 error = bus_dmamap_load(virtio_dmat(vsc), vr->vr_data,
376 xs->data, xs->datalen, NULL, XS2DMA(xs));
377 if (error) {
378 aprint_error_dev(sc->sc_dev, "%s: error %d loading DMA map\n",
379 __func__, error);
380
381 if (error == ENOMEM || error == EAGAIN) {
382 /*
383 * Map is allocated with ALLOCNOW, so this should
384 * actually never ever happen.
385 */
386 xs->error = XS_RESOURCE_SHORTAGE;
387 } else {
388 stuffup:
389 /* not a temporary condition */
390 xs->error = XS_DRIVER_STUFFUP;
391 }
392
393 virtio_enqueue_abort(vsc, vq, slot);
394 scsipi_done(xs);
395 return;
396 }
397
398 int nsegs = VIRTIO_SCSI_MIN_SEGMENTS;
399 if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0)
400 nsegs += vr->vr_data->dm_nsegs;
401
402 error = virtio_enqueue_reserve(vsc, vq, slot, nsegs);
403 if (error) {
404 aprint_error_dev(sc->sc_dev, "error reserving %d (nsegs %d)\n",
405 error, nsegs);
406 bus_dmamap_unload(virtio_dmat(vsc), vr->vr_data);
407 /* slot already freed by virtio_enqueue_reserve() */
408 xs->error = XS_RESOURCE_SHORTAGE;
409 scsipi_done(xs);
410 return;
411 }
412
413 vr->vr_xs = xs;
414
415 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control,
416 offsetof(struct vioscsi_req, vr_req),
417 sizeof(struct virtio_scsi_req_hdr),
418 BUS_DMASYNC_PREWRITE);
419 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control,
420 offsetof(struct vioscsi_req, vr_res),
421 sizeof(struct virtio_scsi_res_hdr),
422 BUS_DMASYNC_PREREAD);
423 if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0)
424 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_data, 0, xs->datalen,
425 XS2DMAPRE(xs));
426
427 virtio_enqueue_p(vsc, vq, slot, vr->vr_control,
428 offsetof(struct vioscsi_req, vr_req),
429 sizeof(struct virtio_scsi_req_hdr), 1);
430 if (xs->xs_control & XS_CTL_DATA_OUT)
431 virtio_enqueue(vsc, vq, slot, vr->vr_data, 1);
432 virtio_enqueue_p(vsc, vq, slot, vr->vr_control,
433 offsetof(struct vioscsi_req, vr_res),
434 sizeof(struct virtio_scsi_res_hdr), 0);
435 if (xs->xs_control & XS_CTL_DATA_IN)
436 virtio_enqueue(vsc, vq, slot, vr->vr_data, 0);
437 dopoll = (xs->xs_control & XS_CTL_POLL) != 0;
438 virtio_enqueue_commit(vsc, vq, slot, 1);
439
440 if (!dopoll)
441 return;
442
443 DPRINTF(("%s: polling...\n", __func__));
444 // XXX: do this better.
445 int timeout = 1000;
446 do {
447 virtio_intrhand(vsc);
448 if (vr->vr_xs != xs)
449 break;
450 delay(1000);
451 } while (--timeout > 0);
452
453 if (vr->vr_xs == xs) {
454 // XXX: Abort!
455 xs->error = XS_TIMEOUT;
456 xs->resid = xs->datalen;
457 DPRINTF(("%s: polling timeout\n", __func__));
458 scsipi_done(xs);
459 }
460 DPRINTF(("%s: command %p done (timeout=%d)\n", __func__,
461 xs, timeout));
462 }
463
464 static void
465 vioscsi_req_done(struct vioscsi_softc *sc, struct virtio_softc *vsc,
466 struct vioscsi_req *vr, struct virtqueue *vq, int slot)
467 {
468 struct scsipi_xfer *xs = vr->vr_xs;
469 size_t sense_len;
470
471 DPRINTF(("%s: enter\n", __func__));
472
473 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control,
474 offsetof(struct vioscsi_req, vr_req),
475 sizeof(struct virtio_scsi_req_hdr),
476 BUS_DMASYNC_POSTWRITE);
477 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control,
478 offsetof(struct vioscsi_req, vr_res),
479 sizeof(struct virtio_scsi_res_hdr),
480 BUS_DMASYNC_POSTREAD);
481 if (xs->datalen)
482 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_data, 0, xs->datalen,
483 XS2DMAPOST(xs));
484
485 xs->status = vr->vr_res.status;
486 xs->resid = virtio_rw32(vsc, vr->vr_res.residual);
487
488 switch (vr->vr_res.response) {
489 case VIRTIO_SCSI_S_OK:
490 sense_len = MIN(sizeof(xs->sense),
491 virtio_rw32(vsc, vr->vr_res.sense_len));
492 memcpy(&xs->sense, vr->vr_res.sense, sense_len);
493 xs->error = (sense_len == 0) ? XS_NOERROR : XS_SENSE;
494 break;
495 case VIRTIO_SCSI_S_BAD_TARGET:
496 vioscsi_bad_target(xs);
497 break;
498 default:
499 DPRINTF(("%s: stuffup: %d\n", __func__, vr->vr_res.response));
500 xs->error = XS_DRIVER_STUFFUP;
501 xs->resid = xs->datalen;
502 break;
503 }
504
505 DPRINTF(("%s: command %p done %d, %d, %d\n", __func__,
506 xs, xs->error, xs->status, xs->resid));
507
508 bus_dmamap_unload(virtio_dmat(vsc), vr->vr_data);
509 vr->vr_xs = NULL;
510
511 virtio_dequeue_commit(vsc, vq, slot);
512
513 mutex_exit(&sc->sc_mutex);
514 scsipi_done(xs);
515 mutex_enter(&sc->sc_mutex);
516 }
517
518 static void
519 vioscsi_bad_target(struct scsipi_xfer *xs)
520 {
521 struct scsi_sense_data *sense = &xs->sense.scsi_sense;
522
523 DPRINTF(("%s: bad target %d:%d\n", __func__,
524 xs->xs_periph->periph_target, xs->xs_periph->periph_lun));
525
526 memset(sense, 0, sizeof(*sense));
527 sense->response_code = 0x70;
528 sense->flags = SKEY_ILLEGAL_REQUEST;
529 xs->error = XS_SENSE;
530 xs->status = 0;
531 xs->resid = 0;
532 }
533
534 static int
535 vioscsi_vq_done(struct virtqueue *vq)
536 {
537 struct virtio_softc *vsc = vq->vq_owner;
538 struct vioscsi_softc *sc = device_private(virtio_child(vsc));
539 int ret = 0;
540
541 DPRINTF(("%s: enter %d\n", __func__, vq->vq_index));
542
543 mutex_enter(&sc->sc_mutex);
544
545 for (;;) {
546 int r, slot;
547
548 r = virtio_dequeue(vsc, vq, &slot, NULL);
549 if (r != 0)
550 break;
551
552 DPRINTF(("%s: slot=%d\n", __func__, slot));
553
554 vioscsi_req_done(sc, vsc, &sc->sc_reqs[slot], vq, slot);
555
556 ret = 1;
557 }
558
559 mutex_exit(&sc->sc_mutex);
560
561 DPRINTF(("%s: exit %d: %d\n", __func__, vq->vq_index, ret));
562
563 return ret;
564 }
565
566 static struct vioscsi_req *
567 vioscsi_req_get(struct vioscsi_softc *sc)
568 {
569 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
570 struct virtqueue *vq = &sc->sc_vqs[VIOSCSI_VQ_REQUEST];
571 struct vioscsi_req *vr = NULL;
572 int r, slot;
573
574 mutex_enter(&sc->sc_mutex);
575
576 if ((r = virtio_enqueue_prep(vsc, vq, &slot)) != 0) {
577 DPRINTF(("%s: virtio_enqueue_get error %d\n", __func__, r));
578 goto out;
579 }
580 KASSERT(slot < sc->sc_nreqs);
581 vr = &sc->sc_reqs[slot];
582
583 DPRINTF(("%s: %p, %d\n", __func__, vr, slot));
584
585 out:
586 mutex_exit(&sc->sc_mutex);
587
588 return vr;
589 }
590
591 static int
592 vioscsi_alloc_reqs(struct vioscsi_softc *sc, struct virtio_softc *vsc,
593 int qsize)
594 {
595 size_t allocsize;
596 int r, rsegs, slot;
597 void *vaddr;
598 struct vioscsi_req *vr;
599
600 allocsize = qsize * sizeof(struct vioscsi_req);
601 r = bus_dmamem_alloc(virtio_dmat(vsc), allocsize, 0, 0,
602 &sc->sc_reqs_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
603 if (r != 0) {
604 aprint_error_dev(sc->sc_dev,
605 "%s: bus_dmamem_alloc, size %zu, error %d\n", __func__,
606 allocsize, r);
607 return r;
608 }
609 r = bus_dmamem_map(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1,
610 allocsize, &vaddr, BUS_DMA_NOWAIT);
611 if (r != 0) {
612 aprint_error_dev(sc->sc_dev,
613 "%s: bus_dmamem_map failed, error %d\n", __func__, r);
614 bus_dmamem_free(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1);
615 return r;
616 }
617 memset(vaddr, 0, allocsize);
618
619 sc->sc_reqs = vaddr;
620 sc->sc_nreqs = qsize;
621
622 /* Prepare maps for the requests */
623 for (slot=0; slot < qsize; slot++) {
624 vr = &sc->sc_reqs[slot];
625
626 r = bus_dmamap_create(virtio_dmat(vsc),
627 offsetof(struct vioscsi_req, vr_xs), 1,
628 offsetof(struct vioscsi_req, vr_xs), 0,
629 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_control);
630 if (r != 0) {
631 aprint_error_dev(sc->sc_dev,
632 "%s: bus_dmamem_create ctrl failed, error %d\n",
633 __func__, r);
634 goto cleanup;
635 }
636
637 r = bus_dmamap_create(virtio_dmat(vsc), MAXPHYS, sc->sc_seg_max,
638 MAXPHYS, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_data);
639 if (r != 0) {
640 aprint_error_dev(sc->sc_dev,
641 "%s: bus_dmamem_create data failed, error %d\n",
642 __func__, r);
643 goto cleanup;
644 }
645
646 r = bus_dmamap_load(virtio_dmat(vsc), vr->vr_control,
647 vr, offsetof(struct vioscsi_req, vr_xs), NULL,
648 BUS_DMA_NOWAIT);
649 if (r != 0) {
650 aprint_error_dev(sc->sc_dev,
651 "%s: bus_dmamap_load ctrl error %d\n",
652 __func__, r);
653 goto cleanup;
654 }
655 }
656
657 return 0;
658
659 cleanup:
660 for (; slot > 0; slot--) {
661 vr = &sc->sc_reqs[slot];
662
663 if (vr->vr_control) {
664 /* this will also unload the mapping if loaded */
665 bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_control);
666 vr->vr_control = NULL;
667 }
668
669 if (vr->vr_data) {
670 bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_data);
671 vr->vr_data = NULL;
672 }
673 }
674
675 bus_dmamem_unmap(virtio_dmat(vsc), vaddr, allocsize);
676 bus_dmamem_free(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1);
677
678 return r;
679 }
680
681 static void
682 vioscsi_free_reqs(struct vioscsi_softc *sc, struct virtio_softc *vsc)
683 {
684 int slot;
685 struct vioscsi_req *vr;
686
687 if (sc->sc_nreqs == 0) {
688 /* Not allocated */
689 return;
690 }
691
692 /* Free request maps */
693 for (slot=0; slot < sc->sc_nreqs; slot++) {
694 vr = &sc->sc_reqs[slot];
695
696 bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_control);
697 bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_data);
698 }
699
700 bus_dmamem_unmap(virtio_dmat(vsc), sc->sc_reqs,
701 sc->sc_nreqs * sizeof(struct vioscsi_req));
702 bus_dmamem_free(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1);
703 }
704
705 MODULE(MODULE_CLASS_DRIVER, vioscsi, "virtio");
706
707 #ifdef _MODULE
708 #include "ioconf.c"
709 #endif
710
711 static int
712 vioscsi_modcmd(modcmd_t cmd, void *opaque)
713 {
714 int error = 0;
715
716 #ifdef _MODULE
717 switch (cmd) {
718 case MODULE_CMD_INIT:
719 error = config_init_component(cfdriver_ioconf_vioscsi,
720 cfattach_ioconf_vioscsi, cfdata_ioconf_vioscsi);
721 break;
722 case MODULE_CMD_FINI:
723 error = config_fini_component(cfdriver_ioconf_vioscsi,
724 cfattach_ioconf_vioscsi, cfdata_ioconf_vioscsi);
725 break;
726 default:
727 error = ENOTTY;
728 break;
729 }
730 #endif
731
732 return error;
733 }
734