virtio.c revision 1.3.6.2 1 1.3.6.2 riz /* $NetBSD: virtio.c,v 1.3.6.2 2012/01/25 21:18:15 riz Exp $ */
2 1.3.6.2 riz
3 1.3.6.2 riz /*
4 1.3.6.2 riz * Copyright (c) 2010 Minoura Makoto.
5 1.3.6.2 riz * All rights reserved.
6 1.3.6.2 riz *
7 1.3.6.2 riz * Redistribution and use in source and binary forms, with or without
8 1.3.6.2 riz * modification, are permitted provided that the following conditions
9 1.3.6.2 riz * are met:
10 1.3.6.2 riz * 1. Redistributions of source code must retain the above copyright
11 1.3.6.2 riz * notice, this list of conditions and the following disclaimer.
12 1.3.6.2 riz * 2. Redistributions in binary form must reproduce the above copyright
13 1.3.6.2 riz * notice, this list of conditions and the following disclaimer in the
14 1.3.6.2 riz * documentation and/or other materials provided with the distribution.
15 1.3.6.2 riz *
16 1.3.6.2 riz * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 1.3.6.2 riz * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 1.3.6.2 riz * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 1.3.6.2 riz * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 1.3.6.2 riz * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 1.3.6.2 riz * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 1.3.6.2 riz * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 1.3.6.2 riz * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 1.3.6.2 riz * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 1.3.6.2 riz * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 1.3.6.2 riz */
27 1.3.6.2 riz
28 1.3.6.2 riz #include <sys/cdefs.h>
29 1.3.6.2 riz __KERNEL_RCSID(0, "$NetBSD: virtio.c,v 1.3.6.2 2012/01/25 21:18:15 riz Exp $");
30 1.3.6.2 riz
31 1.3.6.2 riz #include <sys/param.h>
32 1.3.6.2 riz #include <sys/systm.h>
33 1.3.6.2 riz #include <sys/kernel.h>
34 1.3.6.2 riz #include <sys/atomic.h>
35 1.3.6.2 riz #include <sys/bus.h>
36 1.3.6.2 riz #include <sys/device.h>
37 1.3.6.2 riz #include <sys/kmem.h>
38 1.3.6.2 riz
39 1.3.6.2 riz #include <dev/pci/pcidevs.h>
40 1.3.6.2 riz #include <dev/pci/pcireg.h>
41 1.3.6.2 riz #include <dev/pci/pcivar.h>
42 1.3.6.2 riz
43 1.3.6.2 riz #include <dev/pci/virtioreg.h>
44 1.3.6.2 riz #include <dev/pci/virtiovar.h>
45 1.3.6.2 riz
46 1.3.6.2 riz #define MINSEG_INDIRECT 2 /* use indirect if nsegs >= this value */
47 1.3.6.2 riz
48 1.3.6.2 riz static int virtio_match(device_t, cfdata_t, void *);
49 1.3.6.2 riz static void virtio_attach(device_t, device_t, void *);
50 1.3.6.2 riz static int virtio_detach(device_t, int);
51 1.3.6.2 riz static int virtio_intr(void *arg);
52 1.3.6.2 riz static void virtio_init_vq(struct virtio_softc *,
53 1.3.6.2 riz struct virtqueue *, const bool);
54 1.3.6.2 riz
55 1.3.6.2 riz CFATTACH_DECL_NEW(virtio, sizeof(struct virtio_softc),
56 1.3.6.2 riz virtio_match, virtio_attach, virtio_detach, NULL);
57 1.3.6.2 riz
58 1.3.6.2 riz static void
59 1.3.6.2 riz virtio_set_status(struct virtio_softc *sc, int status)
60 1.3.6.2 riz {
61 1.3.6.2 riz int old = 0;
62 1.3.6.2 riz
63 1.3.6.2 riz if (status != 0)
64 1.3.6.2 riz old = bus_space_read_1(sc->sc_iot, sc->sc_ioh,
65 1.3.6.2 riz VIRTIO_CONFIG_DEVICE_STATUS);
66 1.3.6.2 riz bus_space_write_1(sc->sc_iot, sc->sc_ioh, VIRTIO_CONFIG_DEVICE_STATUS,
67 1.3.6.2 riz status|old);
68 1.3.6.2 riz }
69 1.3.6.2 riz
70 1.3.6.2 riz #define virtio_device_reset(sc) virtio_set_status((sc), 0)
71 1.3.6.2 riz
72 1.3.6.2 riz static int
73 1.3.6.2 riz virtio_match(device_t parent, cfdata_t match, void *aux)
74 1.3.6.2 riz {
75 1.3.6.2 riz struct pci_attach_args *pa;
76 1.3.6.2 riz
77 1.3.6.2 riz pa = (struct pci_attach_args *)aux;
78 1.3.6.2 riz switch (PCI_VENDOR(pa->pa_id)) {
79 1.3.6.2 riz case 0x1af4 /*PCI_VENDOR_QUMRANET*/:
80 1.3.6.2 riz if ((0x1000 /*PCI_PRODUCT_QUMRANET_VIRTIO_1000*/ <=
81 1.3.6.2 riz PCI_PRODUCT(pa->pa_id)) &&
82 1.3.6.2 riz (PCI_PRODUCT(pa->pa_id) <=
83 1.3.6.2 riz 0x103f /*PCI_PRODUCT_QUMRANET_VIRTIO_103F*/))
84 1.3.6.2 riz return 1;
85 1.3.6.2 riz break;
86 1.3.6.2 riz }
87 1.3.6.2 riz
88 1.3.6.2 riz return 0;
89 1.3.6.2 riz }
90 1.3.6.2 riz
91 1.3.6.2 riz static const char *virtio_device_name[] = {
92 1.3.6.2 riz "Unknown (0)", /* 0 */
93 1.3.6.2 riz "Network", /* 1 */
94 1.3.6.2 riz "Block", /* 2 */
95 1.3.6.2 riz "Console", /* 3 */
96 1.3.6.2 riz "Entropy", /* 4 */
97 1.3.6.2 riz "Memory Balloon", /* 5 */
98 1.3.6.2 riz "Unknown (6)", /* 6 */
99 1.3.6.2 riz "Unknown (7)", /* 7 */
100 1.3.6.2 riz "Unknown (8)", /* 8 */
101 1.3.6.2 riz "9P Transport" /* 9 */
102 1.3.6.2 riz };
103 1.3.6.2 riz #define NDEVNAMES (sizeof(virtio_device_name)/sizeof(char*))
104 1.3.6.2 riz
105 1.3.6.2 riz static void
106 1.3.6.2 riz virtio_attach(device_t parent, device_t self, void *aux)
107 1.3.6.2 riz {
108 1.3.6.2 riz struct virtio_softc *sc = device_private(self);
109 1.3.6.2 riz struct pci_attach_args *pa = (struct pci_attach_args *)aux;
110 1.3.6.2 riz pci_chipset_tag_t pc = pa->pa_pc;
111 1.3.6.2 riz pcitag_t tag = pa->pa_tag;
112 1.3.6.2 riz int revision;
113 1.3.6.2 riz pcireg_t id;
114 1.3.6.2 riz char const *intrstr;
115 1.3.6.2 riz pci_intr_handle_t ih;
116 1.3.6.2 riz
117 1.3.6.2 riz revision = PCI_REVISION(pa->pa_class);
118 1.3.6.2 riz if (revision != 0) {
119 1.3.6.2 riz aprint_normal(": unknown revision 0x%02x; giving up\n",
120 1.3.6.2 riz revision);
121 1.3.6.2 riz return;
122 1.3.6.2 riz }
123 1.3.6.2 riz aprint_normal("\n");
124 1.3.6.2 riz aprint_naive("\n");
125 1.3.6.2 riz
126 1.3.6.2 riz /* subsystem ID shows what I am */
127 1.3.6.2 riz id = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG);
128 1.3.6.2 riz aprint_normal_dev(self, "Virtio %s Device (rev. 0x%02x)\n",
129 1.3.6.2 riz (PCI_PRODUCT(id) < NDEVNAMES?
130 1.3.6.2 riz virtio_device_name[PCI_PRODUCT(id)] : "Unknown"),
131 1.3.6.2 riz revision);
132 1.3.6.2 riz
133 1.3.6.2 riz sc->sc_dev = self;
134 1.3.6.2 riz sc->sc_pc = pc;
135 1.3.6.2 riz sc->sc_tag = tag;
136 1.3.6.2 riz sc->sc_iot = pa->pa_iot;
137 1.3.6.2 riz sc->sc_dmat = pa->pa_dmat;
138 1.3.6.2 riz sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
139 1.3.6.2 riz
140 1.3.6.2 riz if (pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_IO, 0,
141 1.3.6.2 riz &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_iosize)) {
142 1.3.6.2 riz aprint_error_dev(self, "can't map i/o space\n");
143 1.3.6.2 riz return;
144 1.3.6.2 riz }
145 1.3.6.2 riz
146 1.3.6.2 riz virtio_device_reset(sc);
147 1.3.6.2 riz virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
148 1.3.6.2 riz virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
149 1.3.6.2 riz
150 1.3.6.2 riz /* XXX: use softc as aux... */
151 1.3.6.2 riz sc->sc_childdevid = PCI_PRODUCT(id);
152 1.3.6.2 riz sc->sc_child = NULL;
153 1.3.6.2 riz config_found(self, sc, NULL);
154 1.3.6.2 riz if (sc->sc_child == NULL) {
155 1.3.6.2 riz aprint_error_dev(self,
156 1.3.6.2 riz "no matching child driver; not configured\n");
157 1.3.6.2 riz return;
158 1.3.6.2 riz }
159 1.3.6.2 riz if (sc->sc_child == (void*)1) { /* this shows error */
160 1.3.6.2 riz aprint_error_dev(self,
161 1.3.6.2 riz "virtio configuration failed\n");
162 1.3.6.2 riz virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
163 1.3.6.2 riz return;
164 1.3.6.2 riz }
165 1.3.6.2 riz
166 1.3.6.2 riz if (pci_intr_map(pa, &ih)) {
167 1.3.6.2 riz aprint_error_dev(self, "couldn't map interrupt\n");
168 1.3.6.2 riz virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
169 1.3.6.2 riz return;
170 1.3.6.2 riz }
171 1.3.6.2 riz intrstr = pci_intr_string(pc, ih);
172 1.3.6.2 riz sc->sc_ih = pci_intr_establish(pc, ih, sc->sc_ipl, virtio_intr, sc);
173 1.3.6.2 riz if (sc->sc_ih == NULL) {
174 1.3.6.2 riz aprint_error_dev(self, "couldn't establish interrupt");
175 1.3.6.2 riz if (intrstr != NULL)
176 1.3.6.2 riz aprint_error(" at %s", intrstr);
177 1.3.6.2 riz aprint_error("\n");
178 1.3.6.2 riz virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
179 1.3.6.2 riz return;
180 1.3.6.2 riz }
181 1.3.6.2 riz aprint_normal_dev(self, "interrupting at %s\n", intrstr);
182 1.3.6.2 riz
183 1.3.6.2 riz virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
184 1.3.6.2 riz
185 1.3.6.2 riz return;
186 1.3.6.2 riz }
187 1.3.6.2 riz
188 1.3.6.2 riz static int
189 1.3.6.2 riz virtio_detach(device_t self, int flags)
190 1.3.6.2 riz {
191 1.3.6.2 riz struct virtio_softc *sc = device_private(self);
192 1.3.6.2 riz int r;
193 1.3.6.2 riz
194 1.3.6.2 riz if (sc->sc_child != 0 && sc->sc_child != (void*)1) {
195 1.3.6.2 riz r = config_detach(sc->sc_child, flags);
196 1.3.6.2 riz if (r)
197 1.3.6.2 riz return r;
198 1.3.6.2 riz }
199 1.3.6.2 riz KASSERT(sc->sc_child == 0 || sc->sc_child == (void*)1);
200 1.3.6.2 riz KASSERT(sc->sc_vqs == 0);
201 1.3.6.2 riz pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
202 1.3.6.2 riz sc->sc_ih = 0;
203 1.3.6.2 riz if (sc->sc_iosize)
204 1.3.6.2 riz bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_iosize);
205 1.3.6.2 riz sc->sc_iosize = 0;
206 1.3.6.2 riz
207 1.3.6.2 riz return 0;
208 1.3.6.2 riz }
209 1.3.6.2 riz
210 1.3.6.2 riz /*
211 1.3.6.2 riz * Reset the device.
212 1.3.6.2 riz */
213 1.3.6.2 riz /*
214 1.3.6.2 riz * To reset the device to a known state, do following:
215 1.3.6.2 riz * virtio_reset(sc); // this will stop the device activity
216 1.3.6.2 riz * <dequeue finished requests>; // virtio_dequeue() still can be called
217 1.3.6.2 riz * <revoke pending requests in the vqs if any>;
218 1.3.6.2 riz * virtio_reinit_begin(sc); // dequeue prohibitted
219 1.3.6.2 riz * newfeatures = virtio_negotiate_features(sc, requestedfeatures);
220 1.3.6.2 riz * <some other initialization>;
221 1.3.6.2 riz * virtio_reinit_end(sc); // device activated; enqueue allowed
222 1.3.6.2 riz * Once attached, feature negotiation can only be allowed after virtio_reset.
223 1.3.6.2 riz */
224 1.3.6.2 riz void
225 1.3.6.2 riz virtio_reset(struct virtio_softc *sc)
226 1.3.6.2 riz {
227 1.3.6.2 riz virtio_device_reset(sc);
228 1.3.6.2 riz }
229 1.3.6.2 riz
230 1.3.6.2 riz void
231 1.3.6.2 riz virtio_reinit_start(struct virtio_softc *sc)
232 1.3.6.2 riz {
233 1.3.6.2 riz int i;
234 1.3.6.2 riz
235 1.3.6.2 riz virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
236 1.3.6.2 riz virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
237 1.3.6.2 riz for (i = 0; i < sc->sc_nvqs; i++) {
238 1.3.6.2 riz int n;
239 1.3.6.2 riz struct virtqueue *vq = &sc->sc_vqs[i];
240 1.3.6.2 riz bus_space_write_2(sc->sc_iot, sc->sc_ioh,
241 1.3.6.2 riz VIRTIO_CONFIG_QUEUE_SELECT,
242 1.3.6.2 riz vq->vq_index);
243 1.3.6.2 riz n = bus_space_read_2(sc->sc_iot, sc->sc_ioh,
244 1.3.6.2 riz VIRTIO_CONFIG_QUEUE_SIZE);
245 1.3.6.2 riz if (n == 0) /* vq disappeared */
246 1.3.6.2 riz continue;
247 1.3.6.2 riz if (n != vq->vq_num) {
248 1.3.6.2 riz panic("%s: virtqueue size changed, vq index %d\n",
249 1.3.6.2 riz device_xname(sc->sc_dev),
250 1.3.6.2 riz vq->vq_index);
251 1.3.6.2 riz }
252 1.3.6.2 riz virtio_init_vq(sc, vq, true);
253 1.3.6.2 riz bus_space_write_4(sc->sc_iot, sc->sc_ioh,
254 1.3.6.2 riz VIRTIO_CONFIG_QUEUE_ADDRESS,
255 1.3.6.2 riz (vq->vq_dmamap->dm_segs[0].ds_addr
256 1.3.6.2 riz / VIRTIO_PAGE_SIZE));
257 1.3.6.2 riz }
258 1.3.6.2 riz }
259 1.3.6.2 riz
260 1.3.6.2 riz void
261 1.3.6.2 riz virtio_reinit_end(struct virtio_softc *sc)
262 1.3.6.2 riz {
263 1.3.6.2 riz virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
264 1.3.6.2 riz }
265 1.3.6.2 riz
266 1.3.6.2 riz /*
267 1.3.6.2 riz * Feature negotiation.
268 1.3.6.2 riz */
269 1.3.6.2 riz uint32_t
270 1.3.6.2 riz virtio_negotiate_features(struct virtio_softc *sc, uint32_t guest_features)
271 1.3.6.2 riz {
272 1.3.6.2 riz uint32_t r;
273 1.3.6.2 riz
274 1.3.6.2 riz if (!(device_cfdata(sc->sc_dev)->cf_flags & 1) &&
275 1.3.6.2 riz !(device_cfdata(sc->sc_child)->cf_flags & 1)) /* XXX */
276 1.3.6.2 riz guest_features |= VIRTIO_F_RING_INDIRECT_DESC;
277 1.3.6.2 riz r = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
278 1.3.6.2 riz VIRTIO_CONFIG_DEVICE_FEATURES);
279 1.3.6.2 riz r &= guest_features;
280 1.3.6.2 riz bus_space_write_4(sc->sc_iot, sc->sc_ioh,
281 1.3.6.2 riz VIRTIO_CONFIG_GUEST_FEATURES, r);
282 1.3.6.2 riz sc->sc_features = r;
283 1.3.6.2 riz if (r & VIRTIO_F_RING_INDIRECT_DESC)
284 1.3.6.2 riz sc->sc_indirect = true;
285 1.3.6.2 riz else
286 1.3.6.2 riz sc->sc_indirect = false;
287 1.3.6.2 riz
288 1.3.6.2 riz return r;
289 1.3.6.2 riz }
290 1.3.6.2 riz
291 1.3.6.2 riz /*
292 1.3.6.2 riz * Device configuration registers.
293 1.3.6.2 riz */
294 1.3.6.2 riz uint8_t
295 1.3.6.2 riz virtio_read_device_config_1(struct virtio_softc *sc, int index)
296 1.3.6.2 riz {
297 1.3.6.2 riz return bus_space_read_1(sc->sc_iot, sc->sc_ioh,
298 1.3.6.2 riz sc->sc_config_offset + index);
299 1.3.6.2 riz }
300 1.3.6.2 riz
301 1.3.6.2 riz uint16_t
302 1.3.6.2 riz virtio_read_device_config_2(struct virtio_softc *sc, int index)
303 1.3.6.2 riz {
304 1.3.6.2 riz return bus_space_read_2(sc->sc_iot, sc->sc_ioh,
305 1.3.6.2 riz sc->sc_config_offset + index);
306 1.3.6.2 riz }
307 1.3.6.2 riz
308 1.3.6.2 riz uint32_t
309 1.3.6.2 riz virtio_read_device_config_4(struct virtio_softc *sc, int index)
310 1.3.6.2 riz {
311 1.3.6.2 riz return bus_space_read_4(sc->sc_iot, sc->sc_ioh,
312 1.3.6.2 riz sc->sc_config_offset + index);
313 1.3.6.2 riz }
314 1.3.6.2 riz
315 1.3.6.2 riz uint64_t
316 1.3.6.2 riz virtio_read_device_config_8(struct virtio_softc *sc, int index)
317 1.3.6.2 riz {
318 1.3.6.2 riz uint64_t r;
319 1.3.6.2 riz
320 1.3.6.2 riz r = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
321 1.3.6.2 riz sc->sc_config_offset + index + sizeof(uint32_t));
322 1.3.6.2 riz r <<= 32;
323 1.3.6.2 riz r += bus_space_read_4(sc->sc_iot, sc->sc_ioh,
324 1.3.6.2 riz sc->sc_config_offset + index);
325 1.3.6.2 riz return r;
326 1.3.6.2 riz }
327 1.3.6.2 riz
328 1.3.6.2 riz void
329 1.3.6.2 riz virtio_write_device_config_1(struct virtio_softc *sc,
330 1.3.6.2 riz int index, uint8_t value)
331 1.3.6.2 riz {
332 1.3.6.2 riz bus_space_write_1(sc->sc_iot, sc->sc_ioh,
333 1.3.6.2 riz sc->sc_config_offset + index, value);
334 1.3.6.2 riz }
335 1.3.6.2 riz
336 1.3.6.2 riz void
337 1.3.6.2 riz virtio_write_device_config_2(struct virtio_softc *sc,
338 1.3.6.2 riz int index, uint16_t value)
339 1.3.6.2 riz {
340 1.3.6.2 riz bus_space_write_2(sc->sc_iot, sc->sc_ioh,
341 1.3.6.2 riz sc->sc_config_offset + index, value);
342 1.3.6.2 riz }
343 1.3.6.2 riz
344 1.3.6.2 riz void
345 1.3.6.2 riz virtio_write_device_config_4(struct virtio_softc *sc,
346 1.3.6.2 riz int index, uint32_t value)
347 1.3.6.2 riz {
348 1.3.6.2 riz bus_space_write_4(sc->sc_iot, sc->sc_ioh,
349 1.3.6.2 riz sc->sc_config_offset + index, value);
350 1.3.6.2 riz }
351 1.3.6.2 riz
352 1.3.6.2 riz void
353 1.3.6.2 riz virtio_write_device_config_8(struct virtio_softc *sc,
354 1.3.6.2 riz int index, uint64_t value)
355 1.3.6.2 riz {
356 1.3.6.2 riz bus_space_write_4(sc->sc_iot, sc->sc_ioh,
357 1.3.6.2 riz sc->sc_config_offset + index,
358 1.3.6.2 riz value & 0xffffffff);
359 1.3.6.2 riz bus_space_write_4(sc->sc_iot, sc->sc_ioh,
360 1.3.6.2 riz sc->sc_config_offset + index + sizeof(uint32_t),
361 1.3.6.2 riz value >> 32);
362 1.3.6.2 riz }
363 1.3.6.2 riz
364 1.3.6.2 riz /*
365 1.3.6.2 riz * Interrupt handler.
366 1.3.6.2 riz */
367 1.3.6.2 riz static int
368 1.3.6.2 riz virtio_intr(void *arg)
369 1.3.6.2 riz {
370 1.3.6.2 riz struct virtio_softc *sc = arg;
371 1.3.6.2 riz int isr, r = 0;
372 1.3.6.2 riz
373 1.3.6.2 riz /* check and ack the interrupt */
374 1.3.6.2 riz isr = bus_space_read_1(sc->sc_iot, sc->sc_ioh,
375 1.3.6.2 riz VIRTIO_CONFIG_ISR_STATUS);
376 1.3.6.2 riz if (isr == 0)
377 1.3.6.2 riz return 0;
378 1.3.6.2 riz if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
379 1.3.6.2 riz (sc->sc_config_change != NULL))
380 1.3.6.2 riz r = (sc->sc_config_change)(sc);
381 1.3.6.2 riz if (sc->sc_intrhand != NULL)
382 1.3.6.2 riz r |= (sc->sc_intrhand)(sc);
383 1.3.6.2 riz
384 1.3.6.2 riz return r;
385 1.3.6.2 riz }
386 1.3.6.2 riz
387 1.3.6.2 riz /*
388 1.3.6.2 riz * dmamap sync operations for a virtqueue.
389 1.3.6.2 riz */
390 1.3.6.2 riz static inline void
391 1.3.6.2 riz vq_sync_descs(struct virtio_softc *sc, struct virtqueue *vq, int ops)
392 1.3.6.2 riz {
393 1.3.6.2 riz /* availoffset == sizeof(vring_desc)*vq_num */
394 1.3.6.2 riz bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 0, vq->vq_availoffset,
395 1.3.6.2 riz ops);
396 1.3.6.2 riz }
397 1.3.6.2 riz
398 1.3.6.2 riz static inline void
399 1.3.6.2 riz vq_sync_aring(struct virtio_softc *sc, struct virtqueue *vq, int ops)
400 1.3.6.2 riz {
401 1.3.6.2 riz bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
402 1.3.6.2 riz vq->vq_availoffset,
403 1.3.6.2 riz offsetof(struct vring_avail, ring)
404 1.3.6.2 riz + vq->vq_num * sizeof(uint16_t),
405 1.3.6.2 riz ops);
406 1.3.6.2 riz }
407 1.3.6.2 riz
408 1.3.6.2 riz static inline void
409 1.3.6.2 riz vq_sync_uring(struct virtio_softc *sc, struct virtqueue *vq, int ops)
410 1.3.6.2 riz {
411 1.3.6.2 riz bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
412 1.3.6.2 riz vq->vq_usedoffset,
413 1.3.6.2 riz offsetof(struct vring_used, ring)
414 1.3.6.2 riz + vq->vq_num * sizeof(struct vring_used_elem),
415 1.3.6.2 riz ops);
416 1.3.6.2 riz }
417 1.3.6.2 riz
418 1.3.6.2 riz static inline void
419 1.3.6.2 riz vq_sync_indirect(struct virtio_softc *sc, struct virtqueue *vq, int slot,
420 1.3.6.2 riz int ops)
421 1.3.6.2 riz {
422 1.3.6.2 riz int offset = vq->vq_indirectoffset
423 1.3.6.2 riz + sizeof(struct vring_desc) * vq->vq_maxnsegs * slot;
424 1.3.6.2 riz
425 1.3.6.2 riz bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
426 1.3.6.2 riz offset, sizeof(struct vring_desc) * vq->vq_maxnsegs,
427 1.3.6.2 riz ops);
428 1.3.6.2 riz }
429 1.3.6.2 riz
430 1.3.6.2 riz /*
431 1.3.6.2 riz * Can be used as sc_intrhand.
432 1.3.6.2 riz */
433 1.3.6.2 riz /*
434 1.3.6.2 riz * Scan vq, bus_dmamap_sync for the vqs (not for the payload),
435 1.3.6.2 riz * and calls (*vq_done)() if some entries are consumed.
436 1.3.6.2 riz */
437 1.3.6.2 riz int
438 1.3.6.2 riz virtio_vq_intr(struct virtio_softc *sc)
439 1.3.6.2 riz {
440 1.3.6.2 riz struct virtqueue *vq;
441 1.3.6.2 riz int i, r = 0;
442 1.3.6.2 riz
443 1.3.6.2 riz for (i = 0; i < sc->sc_nvqs; i++) {
444 1.3.6.2 riz vq = &sc->sc_vqs[i];
445 1.3.6.2 riz if (vq->vq_queued) {
446 1.3.6.2 riz vq->vq_queued = 0;
447 1.3.6.2 riz vq_sync_aring(sc, vq, BUS_DMASYNC_POSTWRITE);
448 1.3.6.2 riz }
449 1.3.6.2 riz vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD);
450 1.3.6.2 riz membar_consumer();
451 1.3.6.2 riz if (vq->vq_used_idx != vq->vq_used->idx) {
452 1.3.6.2 riz if (vq->vq_done)
453 1.3.6.2 riz r |= (vq->vq_done)(vq);
454 1.3.6.2 riz }
455 1.3.6.2 riz }
456 1.3.6.2 riz
457 1.3.6.2 riz
458 1.3.6.2 riz return r;
459 1.3.6.2 riz }
460 1.3.6.2 riz
461 1.3.6.2 riz /*
462 1.3.6.2 riz * Start/stop vq interrupt. No guarantee.
463 1.3.6.2 riz */
464 1.3.6.2 riz void
465 1.3.6.2 riz virtio_stop_vq_intr(struct virtio_softc *sc, struct virtqueue *vq)
466 1.3.6.2 riz {
467 1.3.6.2 riz vq->vq_avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
468 1.3.6.2 riz vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
469 1.3.6.2 riz vq->vq_queued++;
470 1.3.6.2 riz }
471 1.3.6.2 riz
472 1.3.6.2 riz void
473 1.3.6.2 riz virtio_start_vq_intr(struct virtio_softc *sc, struct virtqueue *vq)
474 1.3.6.2 riz {
475 1.3.6.2 riz vq->vq_avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
476 1.3.6.2 riz vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
477 1.3.6.2 riz vq->vq_queued++;
478 1.3.6.2 riz }
479 1.3.6.2 riz
480 1.3.6.2 riz /*
481 1.3.6.2 riz * Initialize vq structure.
482 1.3.6.2 riz */
483 1.3.6.2 riz static void
484 1.3.6.2 riz virtio_init_vq(struct virtio_softc *sc, struct virtqueue *vq, const bool reinit)
485 1.3.6.2 riz {
486 1.3.6.2 riz int i, j;
487 1.3.6.2 riz int vq_size = vq->vq_num;
488 1.3.6.2 riz
489 1.3.6.2 riz memset(vq->vq_vaddr, 0, vq->vq_bytesize);
490 1.3.6.2 riz
491 1.3.6.2 riz /* build the indirect descriptor chain */
492 1.3.6.2 riz if (vq->vq_indirect != NULL) {
493 1.3.6.2 riz struct vring_desc *vd;
494 1.3.6.2 riz
495 1.3.6.2 riz for (i = 0; i < vq_size; i++) {
496 1.3.6.2 riz vd = vq->vq_indirect;
497 1.3.6.2 riz vd += vq->vq_maxnsegs * i;
498 1.3.6.2 riz for (j = 0; j < vq->vq_maxnsegs-1; j++)
499 1.3.6.2 riz vd[j].next = j + 1;
500 1.3.6.2 riz }
501 1.3.6.2 riz }
502 1.3.6.2 riz
503 1.3.6.2 riz /* free slot management */
504 1.3.6.2 riz SIMPLEQ_INIT(&vq->vq_freelist);
505 1.3.6.2 riz for (i = 0; i < vq_size; i++) {
506 1.3.6.2 riz SIMPLEQ_INSERT_TAIL(&vq->vq_freelist,
507 1.3.6.2 riz &vq->vq_entries[i], qe_list);
508 1.3.6.2 riz vq->vq_entries[i].qe_index = i;
509 1.3.6.2 riz }
510 1.3.6.2 riz if (!reinit)
511 1.3.6.2 riz mutex_init(&vq->vq_freelist_lock, MUTEX_SPIN, sc->sc_ipl);
512 1.3.6.2 riz
513 1.3.6.2 riz /* enqueue/dequeue status */
514 1.3.6.2 riz vq->vq_avail_idx = 0;
515 1.3.6.2 riz vq->vq_used_idx = 0;
516 1.3.6.2 riz vq->vq_queued = 0;
517 1.3.6.2 riz if (!reinit) {
518 1.3.6.2 riz mutex_init(&vq->vq_aring_lock, MUTEX_SPIN, sc->sc_ipl);
519 1.3.6.2 riz mutex_init(&vq->vq_uring_lock, MUTEX_SPIN, sc->sc_ipl);
520 1.3.6.2 riz }
521 1.3.6.2 riz vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
522 1.3.6.2 riz vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD);
523 1.3.6.2 riz vq->vq_queued++;
524 1.3.6.2 riz }
525 1.3.6.2 riz
526 1.3.6.2 riz /*
527 1.3.6.2 riz * Allocate/free a vq.
528 1.3.6.2 riz */
529 1.3.6.2 riz int
530 1.3.6.2 riz virtio_alloc_vq(struct virtio_softc *sc,
531 1.3.6.2 riz struct virtqueue *vq, int index, int maxsegsize, int maxnsegs,
532 1.3.6.2 riz const char *name)
533 1.3.6.2 riz {
534 1.3.6.2 riz int vq_size, allocsize1, allocsize2, allocsize3, allocsize = 0;
535 1.3.6.2 riz int rsegs, r;
536 1.3.6.2 riz #define VIRTQUEUE_ALIGN(n) (((n)+(VIRTIO_PAGE_SIZE-1))& \
537 1.3.6.2 riz ~(VIRTIO_PAGE_SIZE-1))
538 1.3.6.2 riz
539 1.3.6.2 riz memset(vq, 0, sizeof(*vq));
540 1.3.6.2 riz
541 1.3.6.2 riz bus_space_write_2(sc->sc_iot, sc->sc_ioh,
542 1.3.6.2 riz VIRTIO_CONFIG_QUEUE_SELECT, index);
543 1.3.6.2 riz vq_size = bus_space_read_2(sc->sc_iot, sc->sc_ioh,
544 1.3.6.2 riz VIRTIO_CONFIG_QUEUE_SIZE);
545 1.3.6.2 riz if (vq_size == 0) {
546 1.3.6.2 riz aprint_error_dev(sc->sc_dev,
547 1.3.6.2 riz "virtqueue not exist, index %d for %s\n",
548 1.3.6.2 riz index, name);
549 1.3.6.2 riz goto err;
550 1.3.6.2 riz }
551 1.3.6.2 riz /* allocsize1: descriptor table + avail ring + pad */
552 1.3.6.2 riz allocsize1 = VIRTQUEUE_ALIGN(sizeof(struct vring_desc)*vq_size
553 1.3.6.2 riz + sizeof(uint16_t)*(2+vq_size));
554 1.3.6.2 riz /* allocsize2: used ring + pad */
555 1.3.6.2 riz allocsize2 = VIRTQUEUE_ALIGN(sizeof(uint16_t)*2
556 1.3.6.2 riz + sizeof(struct vring_used_elem)*vq_size);
557 1.3.6.2 riz /* allocsize3: indirect table */
558 1.3.6.2 riz if (sc->sc_indirect && maxnsegs >= MINSEG_INDIRECT)
559 1.3.6.2 riz allocsize3 = sizeof(struct vring_desc) * maxnsegs * vq_size;
560 1.3.6.2 riz else
561 1.3.6.2 riz allocsize3 = 0;
562 1.3.6.2 riz allocsize = allocsize1 + allocsize2 + allocsize3;
563 1.3.6.2 riz
564 1.3.6.2 riz /* alloc and map the memory */
565 1.3.6.2 riz r = bus_dmamem_alloc(sc->sc_dmat, allocsize, VIRTIO_PAGE_SIZE, 0,
566 1.3.6.2 riz &vq->vq_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
567 1.3.6.2 riz if (r != 0) {
568 1.3.6.2 riz aprint_error_dev(sc->sc_dev,
569 1.3.6.2 riz "virtqueue %d for %s allocation failed, "
570 1.3.6.2 riz "error code %d\n", index, name, r);
571 1.3.6.2 riz goto err;
572 1.3.6.2 riz }
573 1.3.6.2 riz r = bus_dmamem_map(sc->sc_dmat, &vq->vq_segs[0], 1, allocsize,
574 1.3.6.2 riz &vq->vq_vaddr, BUS_DMA_NOWAIT);
575 1.3.6.2 riz if (r != 0) {
576 1.3.6.2 riz aprint_error_dev(sc->sc_dev,
577 1.3.6.2 riz "virtqueue %d for %s map failed, "
578 1.3.6.2 riz "error code %d\n", index, name, r);
579 1.3.6.2 riz goto err;
580 1.3.6.2 riz }
581 1.3.6.2 riz r = bus_dmamap_create(sc->sc_dmat, allocsize, 1, allocsize, 0,
582 1.3.6.2 riz BUS_DMA_NOWAIT, &vq->vq_dmamap);
583 1.3.6.2 riz if (r != 0) {
584 1.3.6.2 riz aprint_error_dev(sc->sc_dev,
585 1.3.6.2 riz "virtqueue %d for %s dmamap creation failed, "
586 1.3.6.2 riz "error code %d\n", index, name, r);
587 1.3.6.2 riz goto err;
588 1.3.6.2 riz }
589 1.3.6.2 riz r = bus_dmamap_load(sc->sc_dmat, vq->vq_dmamap,
590 1.3.6.2 riz vq->vq_vaddr, allocsize, NULL, BUS_DMA_NOWAIT);
591 1.3.6.2 riz if (r != 0) {
592 1.3.6.2 riz aprint_error_dev(sc->sc_dev,
593 1.3.6.2 riz "virtqueue %d for %s dmamap load failed, "
594 1.3.6.2 riz "error code %d\n", index, name, r);
595 1.3.6.2 riz goto err;
596 1.3.6.2 riz }
597 1.3.6.2 riz
598 1.3.6.2 riz /* set the vq address */
599 1.3.6.2 riz bus_space_write_4(sc->sc_iot, sc->sc_ioh,
600 1.3.6.2 riz VIRTIO_CONFIG_QUEUE_ADDRESS,
601 1.3.6.2 riz (vq->vq_dmamap->dm_segs[0].ds_addr
602 1.3.6.2 riz / VIRTIO_PAGE_SIZE));
603 1.3.6.2 riz
604 1.3.6.2 riz /* remember addresses and offsets for later use */
605 1.3.6.2 riz vq->vq_owner = sc;
606 1.3.6.2 riz vq->vq_num = vq_size;
607 1.3.6.2 riz vq->vq_index = index;
608 1.3.6.2 riz vq->vq_desc = vq->vq_vaddr;
609 1.3.6.2 riz vq->vq_availoffset = sizeof(struct vring_desc)*vq_size;
610 1.3.6.2 riz vq->vq_avail = (void*)(((char*)vq->vq_desc) + vq->vq_availoffset);
611 1.3.6.2 riz vq->vq_usedoffset = allocsize1;
612 1.3.6.2 riz vq->vq_used = (void*)(((char*)vq->vq_desc) + vq->vq_usedoffset);
613 1.3.6.2 riz if (allocsize3 > 0) {
614 1.3.6.2 riz vq->vq_indirectoffset = allocsize1 + allocsize2;
615 1.3.6.2 riz vq->vq_indirect = (void*)(((char*)vq->vq_desc)
616 1.3.6.2 riz + vq->vq_indirectoffset);
617 1.3.6.2 riz }
618 1.3.6.2 riz vq->vq_bytesize = allocsize;
619 1.3.6.2 riz vq->vq_maxsegsize = maxsegsize;
620 1.3.6.2 riz vq->vq_maxnsegs = maxnsegs;
621 1.3.6.2 riz
622 1.3.6.2 riz /* free slot management */
623 1.3.6.2 riz vq->vq_entries = kmem_zalloc(sizeof(struct vq_entry)*vq_size,
624 1.3.6.2 riz KM_NOSLEEP);
625 1.3.6.2 riz if (vq->vq_entries == NULL) {
626 1.3.6.2 riz r = ENOMEM;
627 1.3.6.2 riz goto err;
628 1.3.6.2 riz }
629 1.3.6.2 riz
630 1.3.6.2 riz virtio_init_vq(sc, vq, false);
631 1.3.6.2 riz
632 1.3.6.2 riz aprint_verbose_dev(sc->sc_dev,
633 1.3.6.2 riz "allocated %u byte for virtqueue %d for %s, "
634 1.3.6.2 riz "size %d\n", allocsize, index, name, vq_size);
635 1.3.6.2 riz if (allocsize3 > 0)
636 1.3.6.2 riz aprint_verbose_dev(sc->sc_dev,
637 1.3.6.2 riz "using %d byte (%d entries) "
638 1.3.6.2 riz "indirect descriptors\n",
639 1.3.6.2 riz allocsize3, maxnsegs * vq_size);
640 1.3.6.2 riz return 0;
641 1.3.6.2 riz
642 1.3.6.2 riz err:
643 1.3.6.2 riz bus_space_write_4(sc->sc_iot, sc->sc_ioh,
644 1.3.6.2 riz VIRTIO_CONFIG_QUEUE_ADDRESS, 0);
645 1.3.6.2 riz if (vq->vq_dmamap)
646 1.3.6.2 riz bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap);
647 1.3.6.2 riz if (vq->vq_vaddr)
648 1.3.6.2 riz bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, allocsize);
649 1.3.6.2 riz if (vq->vq_segs[0].ds_addr)
650 1.3.6.2 riz bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1);
651 1.3.6.2 riz memset(vq, 0, sizeof(*vq));
652 1.3.6.2 riz
653 1.3.6.2 riz return -1;
654 1.3.6.2 riz }
655 1.3.6.2 riz
656 1.3.6.2 riz int
657 1.3.6.2 riz virtio_free_vq(struct virtio_softc *sc, struct virtqueue *vq)
658 1.3.6.2 riz {
659 1.3.6.2 riz struct vq_entry *qe;
660 1.3.6.2 riz int i = 0;
661 1.3.6.2 riz
662 1.3.6.2 riz /* device must be already deactivated */
663 1.3.6.2 riz /* confirm the vq is empty */
664 1.3.6.2 riz SIMPLEQ_FOREACH(qe, &vq->vq_freelist, qe_list) {
665 1.3.6.2 riz i++;
666 1.3.6.2 riz }
667 1.3.6.2 riz if (i != vq->vq_num) {
668 1.3.6.2 riz printf("%s: freeing non-empty vq, index %d\n",
669 1.3.6.2 riz device_xname(sc->sc_dev), vq->vq_index);
670 1.3.6.2 riz return EBUSY;
671 1.3.6.2 riz }
672 1.3.6.2 riz
673 1.3.6.2 riz /* tell device that there's no virtqueue any longer */
674 1.3.6.2 riz bus_space_write_2(sc->sc_iot, sc->sc_ioh,
675 1.3.6.2 riz VIRTIO_CONFIG_QUEUE_SELECT, vq->vq_index);
676 1.3.6.2 riz bus_space_write_4(sc->sc_iot, sc->sc_ioh,
677 1.3.6.2 riz VIRTIO_CONFIG_QUEUE_ADDRESS, 0);
678 1.3.6.2 riz
679 1.3.6.2 riz kmem_free(vq->vq_entries, vq->vq_bytesize);
680 1.3.6.2 riz bus_dmamap_unload(sc->sc_dmat, vq->vq_dmamap);
681 1.3.6.2 riz bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap);
682 1.3.6.2 riz bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, vq->vq_bytesize);
683 1.3.6.2 riz bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1);
684 1.3.6.2 riz mutex_destroy(&vq->vq_freelist_lock);
685 1.3.6.2 riz mutex_destroy(&vq->vq_uring_lock);
686 1.3.6.2 riz mutex_destroy(&vq->vq_aring_lock);
687 1.3.6.2 riz memset(vq, 0, sizeof(*vq));
688 1.3.6.2 riz
689 1.3.6.2 riz return 0;
690 1.3.6.2 riz }
691 1.3.6.2 riz
692 1.3.6.2 riz /*
693 1.3.6.2 riz * Free descriptor management.
694 1.3.6.2 riz */
695 1.3.6.2 riz static struct vq_entry *
696 1.3.6.2 riz vq_alloc_entry(struct virtqueue *vq)
697 1.3.6.2 riz {
698 1.3.6.2 riz struct vq_entry *qe;
699 1.3.6.2 riz
700 1.3.6.2 riz mutex_enter(&vq->vq_freelist_lock);
701 1.3.6.2 riz if (SIMPLEQ_EMPTY(&vq->vq_freelist)) {
702 1.3.6.2 riz mutex_exit(&vq->vq_freelist_lock);
703 1.3.6.2 riz return NULL;
704 1.3.6.2 riz }
705 1.3.6.2 riz qe = SIMPLEQ_FIRST(&vq->vq_freelist);
706 1.3.6.2 riz SIMPLEQ_REMOVE_HEAD(&vq->vq_freelist, qe_list);
707 1.3.6.2 riz mutex_exit(&vq->vq_freelist_lock);
708 1.3.6.2 riz
709 1.3.6.2 riz return qe;
710 1.3.6.2 riz }
711 1.3.6.2 riz
712 1.3.6.2 riz static void
713 1.3.6.2 riz vq_free_entry(struct virtqueue *vq, struct vq_entry *qe)
714 1.3.6.2 riz {
715 1.3.6.2 riz mutex_enter(&vq->vq_freelist_lock);
716 1.3.6.2 riz SIMPLEQ_INSERT_TAIL(&vq->vq_freelist, qe, qe_list);
717 1.3.6.2 riz mutex_exit(&vq->vq_freelist_lock);
718 1.3.6.2 riz
719 1.3.6.2 riz return;
720 1.3.6.2 riz }
721 1.3.6.2 riz
722 1.3.6.2 riz /*
723 1.3.6.2 riz * Enqueue several dmamaps as a single request.
724 1.3.6.2 riz */
725 1.3.6.2 riz /*
726 1.3.6.2 riz * Typical usage:
727 1.3.6.2 riz * <queue size> number of followings are stored in arrays
728 1.3.6.2 riz * - command blocks (in dmamem) should be pre-allocated and mapped
729 1.3.6.2 riz * - dmamaps for command blocks should be pre-allocated and loaded
730 1.3.6.2 riz * - dmamaps for payload should be pre-allocated
731 1.3.6.2 riz * r = virtio_enqueue_prep(sc, vq, &slot); // allocate a slot
732 1.3.6.2 riz * if (r) // currently 0 or EAGAIN
733 1.3.6.2 riz * return r;
734 1.3.6.2 riz * r = bus_dmamap_load(dmat, dmamap_payload[slot], data, count, ..);
735 1.3.6.2 riz * if (r) {
736 1.3.6.2 riz * virtio_enqueue_abort(sc, vq, slot);
737 1.3.6.2 riz * bus_dmamap_unload(dmat, dmamap_payload[slot]);
738 1.3.6.2 riz * return r;
739 1.3.6.2 riz * }
740 1.3.6.2 riz * r = virtio_enqueue_reserve(sc, vq, slot,
741 1.3.6.2 riz * dmamap_payload[slot]->dm_nsegs+1);
742 1.3.6.2 riz * // ^ +1 for command
743 1.3.6.2 riz * if (r) { // currently 0 or EAGAIN
744 1.3.6.2 riz * bus_dmamap_unload(dmat, dmamap_payload[slot]);
745 1.3.6.2 riz * return r; // do not call abort()
746 1.3.6.2 riz * }
747 1.3.6.2 riz * <setup and prepare commands>
748 1.3.6.2 riz * bus_dmamap_sync(dmat, dmamap_cmd[slot],... BUS_DMASYNC_PREWRITE);
749 1.3.6.2 riz * bus_dmamap_sync(dmat, dmamap_payload[slot],...);
750 1.3.6.2 riz * virtio_enqueue(sc, vq, slot, dmamap_cmd[slot], false);
751 1.3.6.2 riz * virtio_enqueue(sc, vq, slot, dmamap_payload[slot], iswrite);
752 1.3.6.2 riz * virtio_enqueue_commit(sc, vq, slot, true);
753 1.3.6.2 riz */
754 1.3.6.2 riz
755 1.3.6.2 riz /*
756 1.3.6.2 riz * enqueue_prep: allocate a slot number
757 1.3.6.2 riz */
758 1.3.6.2 riz int
759 1.3.6.2 riz virtio_enqueue_prep(struct virtio_softc *sc, struct virtqueue *vq, int *slotp)
760 1.3.6.2 riz {
761 1.3.6.2 riz struct vq_entry *qe1;
762 1.3.6.2 riz
763 1.3.6.2 riz KASSERT(slotp != NULL);
764 1.3.6.2 riz
765 1.3.6.2 riz qe1 = vq_alloc_entry(vq);
766 1.3.6.2 riz if (qe1 == NULL)
767 1.3.6.2 riz return EAGAIN;
768 1.3.6.2 riz /* next slot is not allocated yet */
769 1.3.6.2 riz qe1->qe_next = -1;
770 1.3.6.2 riz *slotp = qe1->qe_index;
771 1.3.6.2 riz
772 1.3.6.2 riz return 0;
773 1.3.6.2 riz }
774 1.3.6.2 riz
775 1.3.6.2 riz /*
776 1.3.6.2 riz * enqueue_reserve: allocate remaining slots and build the descriptor chain.
777 1.3.6.2 riz */
778 1.3.6.2 riz int
779 1.3.6.2 riz virtio_enqueue_reserve(struct virtio_softc *sc, struct virtqueue *vq,
780 1.3.6.2 riz int slot, int nsegs)
781 1.3.6.2 riz {
782 1.3.6.2 riz int indirect;
783 1.3.6.2 riz struct vq_entry *qe1 = &vq->vq_entries[slot];
784 1.3.6.2 riz
785 1.3.6.2 riz KASSERT(qe1->qe_next == -1);
786 1.3.6.2 riz KASSERT(1 <= nsegs && nsegs <= vq->vq_num);
787 1.3.6.2 riz
788 1.3.6.2 riz if ((vq->vq_indirect != NULL) &&
789 1.3.6.2 riz (nsegs >= MINSEG_INDIRECT) &&
790 1.3.6.2 riz (nsegs <= vq->vq_maxnsegs))
791 1.3.6.2 riz indirect = 1;
792 1.3.6.2 riz else
793 1.3.6.2 riz indirect = 0;
794 1.3.6.2 riz qe1->qe_indirect = indirect;
795 1.3.6.2 riz
796 1.3.6.2 riz if (indirect) {
797 1.3.6.2 riz struct vring_desc *vd;
798 1.3.6.2 riz int i;
799 1.3.6.2 riz
800 1.3.6.2 riz vd = &vq->vq_desc[qe1->qe_index];
801 1.3.6.2 riz vd->addr = vq->vq_dmamap->dm_segs[0].ds_addr
802 1.3.6.2 riz + vq->vq_indirectoffset;
803 1.3.6.2 riz vd->addr += sizeof(struct vring_desc)
804 1.3.6.2 riz * vq->vq_maxnsegs * qe1->qe_index;
805 1.3.6.2 riz vd->len = sizeof(struct vring_desc) * nsegs;
806 1.3.6.2 riz vd->flags = VRING_DESC_F_INDIRECT;
807 1.3.6.2 riz
808 1.3.6.2 riz vd = vq->vq_indirect;
809 1.3.6.2 riz vd += vq->vq_maxnsegs * qe1->qe_index;
810 1.3.6.2 riz qe1->qe_desc_base = vd;
811 1.3.6.2 riz
812 1.3.6.2 riz for (i = 0; i < nsegs-1; i++) {
813 1.3.6.2 riz vd[i].flags = VRING_DESC_F_NEXT;
814 1.3.6.2 riz }
815 1.3.6.2 riz vd[i].flags = 0;
816 1.3.6.2 riz qe1->qe_next = 0;
817 1.3.6.2 riz
818 1.3.6.2 riz return 0;
819 1.3.6.2 riz } else {
820 1.3.6.2 riz struct vring_desc *vd;
821 1.3.6.2 riz struct vq_entry *qe;
822 1.3.6.2 riz int i, s;
823 1.3.6.2 riz
824 1.3.6.2 riz vd = &vq->vq_desc[0];
825 1.3.6.2 riz qe1->qe_desc_base = vd;
826 1.3.6.2 riz qe1->qe_next = qe1->qe_index;
827 1.3.6.2 riz s = slot;
828 1.3.6.2 riz for (i = 0; i < nsegs - 1; i++) {
829 1.3.6.2 riz qe = vq_alloc_entry(vq);
830 1.3.6.2 riz if (qe == NULL) {
831 1.3.6.2 riz vd[s].flags = 0;
832 1.3.6.2 riz virtio_enqueue_abort(sc, vq, slot);
833 1.3.6.2 riz return EAGAIN;
834 1.3.6.2 riz }
835 1.3.6.2 riz vd[s].flags = VRING_DESC_F_NEXT;
836 1.3.6.2 riz vd[s].next = qe->qe_index;
837 1.3.6.2 riz s = qe->qe_index;
838 1.3.6.2 riz }
839 1.3.6.2 riz vd[s].flags = 0;
840 1.3.6.2 riz
841 1.3.6.2 riz return 0;
842 1.3.6.2 riz }
843 1.3.6.2 riz }
844 1.3.6.2 riz
845 1.3.6.2 riz /*
846 1.3.6.2 riz * enqueue: enqueue a single dmamap.
847 1.3.6.2 riz */
848 1.3.6.2 riz int
849 1.3.6.2 riz virtio_enqueue(struct virtio_softc *sc, struct virtqueue *vq, int slot,
850 1.3.6.2 riz bus_dmamap_t dmamap, bool write)
851 1.3.6.2 riz {
852 1.3.6.2 riz struct vq_entry *qe1 = &vq->vq_entries[slot];
853 1.3.6.2 riz struct vring_desc *vd = qe1->qe_desc_base;
854 1.3.6.2 riz int i;
855 1.3.6.2 riz int s = qe1->qe_next;
856 1.3.6.2 riz
857 1.3.6.2 riz KASSERT(s >= 0);
858 1.3.6.2 riz KASSERT(dmamap->dm_nsegs > 0);
859 1.3.6.2 riz
860 1.3.6.2 riz for (i = 0; i < dmamap->dm_nsegs; i++) {
861 1.3.6.2 riz vd[s].addr = dmamap->dm_segs[i].ds_addr;
862 1.3.6.2 riz vd[s].len = dmamap->dm_segs[i].ds_len;
863 1.3.6.2 riz if (!write)
864 1.3.6.2 riz vd[s].flags |= VRING_DESC_F_WRITE;
865 1.3.6.2 riz s = vd[s].next;
866 1.3.6.2 riz }
867 1.3.6.2 riz qe1->qe_next = s;
868 1.3.6.2 riz
869 1.3.6.2 riz return 0;
870 1.3.6.2 riz }
871 1.3.6.2 riz
872 1.3.6.2 riz int
873 1.3.6.2 riz virtio_enqueue_p(struct virtio_softc *sc, struct virtqueue *vq, int slot,
874 1.3.6.2 riz bus_dmamap_t dmamap, bus_addr_t start, bus_size_t len,
875 1.3.6.2 riz bool write)
876 1.3.6.2 riz {
877 1.3.6.2 riz struct vq_entry *qe1 = &vq->vq_entries[slot];
878 1.3.6.2 riz struct vring_desc *vd = qe1->qe_desc_base;
879 1.3.6.2 riz int s = qe1->qe_next;
880 1.3.6.2 riz
881 1.3.6.2 riz KASSERT(s >= 0);
882 1.3.6.2 riz KASSERT(dmamap->dm_nsegs == 1); /* XXX */
883 1.3.6.2 riz KASSERT((dmamap->dm_segs[0].ds_len > start) &&
884 1.3.6.2 riz (dmamap->dm_segs[0].ds_len >= start + len));
885 1.3.6.2 riz
886 1.3.6.2 riz vd[s].addr = dmamap->dm_segs[0].ds_addr + start;
887 1.3.6.2 riz vd[s].len = len;
888 1.3.6.2 riz if (!write)
889 1.3.6.2 riz vd[s].flags |= VRING_DESC_F_WRITE;
890 1.3.6.2 riz qe1->qe_next = vd[s].next;
891 1.3.6.2 riz
892 1.3.6.2 riz return 0;
893 1.3.6.2 riz }
894 1.3.6.2 riz
895 1.3.6.2 riz /*
896 1.3.6.2 riz * enqueue_commit: add it to the aring.
897 1.3.6.2 riz */
898 1.3.6.2 riz int
899 1.3.6.2 riz virtio_enqueue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot,
900 1.3.6.2 riz bool notifynow)
901 1.3.6.2 riz {
902 1.3.6.2 riz struct vq_entry *qe1;
903 1.3.6.2 riz
904 1.3.6.2 riz if (slot < 0) {
905 1.3.6.2 riz mutex_enter(&vq->vq_aring_lock);
906 1.3.6.2 riz goto notify;
907 1.3.6.2 riz }
908 1.3.6.2 riz vq_sync_descs(sc, vq, BUS_DMASYNC_PREWRITE);
909 1.3.6.2 riz qe1 = &vq->vq_entries[slot];
910 1.3.6.2 riz if (qe1->qe_indirect)
911 1.3.6.2 riz vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_PREWRITE);
912 1.3.6.2 riz mutex_enter(&vq->vq_aring_lock);
913 1.3.6.2 riz vq->vq_avail->ring[(vq->vq_avail_idx++) % vq->vq_num] = slot;
914 1.3.6.2 riz
915 1.3.6.2 riz notify:
916 1.3.6.2 riz if (notifynow) {
917 1.3.6.2 riz vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
918 1.3.6.2 riz vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD);
919 1.3.6.2 riz membar_producer();
920 1.3.6.2 riz vq->vq_avail->idx = vq->vq_avail_idx;
921 1.3.6.2 riz vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
922 1.3.6.2 riz membar_producer();
923 1.3.6.2 riz vq->vq_queued++;
924 1.3.6.2 riz vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD);
925 1.3.6.2 riz membar_consumer();
926 1.3.6.2 riz if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY))
927 1.3.6.2 riz bus_space_write_2(sc->sc_iot, sc->sc_ioh,
928 1.3.6.2 riz VIRTIO_CONFIG_QUEUE_NOTIFY,
929 1.3.6.2 riz vq->vq_index);
930 1.3.6.2 riz }
931 1.3.6.2 riz mutex_exit(&vq->vq_aring_lock);
932 1.3.6.2 riz
933 1.3.6.2 riz return 0;
934 1.3.6.2 riz }
935 1.3.6.2 riz
936 1.3.6.2 riz /*
937 1.3.6.2 riz * enqueue_abort: rollback.
938 1.3.6.2 riz */
939 1.3.6.2 riz int
940 1.3.6.2 riz virtio_enqueue_abort(struct virtio_softc *sc, struct virtqueue *vq, int slot)
941 1.3.6.2 riz {
942 1.3.6.2 riz struct vq_entry *qe = &vq->vq_entries[slot];
943 1.3.6.2 riz struct vring_desc *vd;
944 1.3.6.2 riz int s;
945 1.3.6.2 riz
946 1.3.6.2 riz if (qe->qe_next < 0) {
947 1.3.6.2 riz vq_free_entry(vq, qe);
948 1.3.6.2 riz return 0;
949 1.3.6.2 riz }
950 1.3.6.2 riz
951 1.3.6.2 riz s = slot;
952 1.3.6.2 riz vd = &vq->vq_desc[0];
953 1.3.6.2 riz while (vd[s].flags & VRING_DESC_F_NEXT) {
954 1.3.6.2 riz s = vd[s].next;
955 1.3.6.2 riz vq_free_entry(vq, qe);
956 1.3.6.2 riz qe = &vq->vq_entries[s];
957 1.3.6.2 riz }
958 1.3.6.2 riz vq_free_entry(vq, qe);
959 1.3.6.2 riz return 0;
960 1.3.6.2 riz }
961 1.3.6.2 riz
962 1.3.6.2 riz /*
963 1.3.6.2 riz * Dequeue a request.
964 1.3.6.2 riz */
965 1.3.6.2 riz /*
966 1.3.6.2 riz * dequeue: dequeue a request from uring; dmamap_sync for uring is
967 1.3.6.2 riz * already done in the interrupt handler.
968 1.3.6.2 riz */
969 1.3.6.2 riz int
970 1.3.6.2 riz virtio_dequeue(struct virtio_softc *sc, struct virtqueue *vq,
971 1.3.6.2 riz int *slotp, int *lenp)
972 1.3.6.2 riz {
973 1.3.6.2 riz uint16_t slot, usedidx;
974 1.3.6.2 riz struct vq_entry *qe;
975 1.3.6.2 riz
976 1.3.6.2 riz if (vq->vq_used_idx == vq->vq_used->idx)
977 1.3.6.2 riz return ENOENT;
978 1.3.6.2 riz mutex_enter(&vq->vq_uring_lock);
979 1.3.6.2 riz usedidx = vq->vq_used_idx++;
980 1.3.6.2 riz mutex_exit(&vq->vq_uring_lock);
981 1.3.6.2 riz usedidx %= vq->vq_num;
982 1.3.6.2 riz slot = vq->vq_used->ring[usedidx].id;
983 1.3.6.2 riz qe = &vq->vq_entries[slot];
984 1.3.6.2 riz
985 1.3.6.2 riz if (qe->qe_indirect)
986 1.3.6.2 riz vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_POSTWRITE);
987 1.3.6.2 riz
988 1.3.6.2 riz if (slotp)
989 1.3.6.2 riz *slotp = slot;
990 1.3.6.2 riz if (lenp)
991 1.3.6.2 riz *lenp = vq->vq_used->ring[usedidx].len;
992 1.3.6.2 riz
993 1.3.6.2 riz return 0;
994 1.3.6.2 riz }
995 1.3.6.2 riz
996 1.3.6.2 riz /*
997 1.3.6.2 riz * dequeue_commit: complete dequeue; the slot is recycled for future use.
998 1.3.6.2 riz * if you forget to call this the slot will be leaked.
999 1.3.6.2 riz */
1000 1.3.6.2 riz int
1001 1.3.6.2 riz virtio_dequeue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot)
1002 1.3.6.2 riz {
1003 1.3.6.2 riz struct vq_entry *qe = &vq->vq_entries[slot];
1004 1.3.6.2 riz struct vring_desc *vd = &vq->vq_desc[0];
1005 1.3.6.2 riz int s = slot;
1006 1.3.6.2 riz
1007 1.3.6.2 riz while (vd[s].flags & VRING_DESC_F_NEXT) {
1008 1.3.6.2 riz s = vd[s].next;
1009 1.3.6.2 riz vq_free_entry(vq, qe);
1010 1.3.6.2 riz qe = &vq->vq_entries[s];
1011 1.3.6.2 riz }
1012 1.3.6.2 riz vq_free_entry(vq, qe);
1013 1.3.6.2 riz
1014 1.3.6.2 riz return 0;
1015 1.3.6.2 riz }
1016