if_hvn.c revision 1.13 1 /* $NetBSD: if_hvn.c,v 1.13 2019/12/10 12:20:20 nonaka Exp $ */
2 /* $OpenBSD: if_hvn.c,v 1.39 2018/03/11 14:31:34 mikeb Exp $ */
3
4 /*-
5 * Copyright (c) 2009-2012,2016 Microsoft Corp.
6 * Copyright (c) 2010-2012 Citrix Inc.
7 * Copyright (c) 2012 NetApp Inc.
8 * Copyright (c) 2016 Mike Belopuhov <mike (at) esdenera.com>
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice unmodified, this list of conditions, and the following
16 * disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * The OpenBSD port was done under funding by Esdenera Networks GmbH.
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: if_hvn.c,v 1.13 2019/12/10 12:20:20 nonaka Exp $");
39
40 #ifdef _KERNEL_OPT
41 #include "opt_inet.h"
42 #include "opt_inet6.h"
43 #include "opt_net_mpsafe.h"
44 #endif
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/device.h>
50 #include <sys/atomic.h>
51 #include <sys/bus.h>
52 #include <sys/intr.h>
53 #include <sys/kmem.h>
54
55 #include <net/if.h>
56 #include <net/if_ether.h>
57 #include <net/if_media.h>
58
59 #include <net/bpf.h>
60
61 #include <dev/ic/ndisreg.h>
62 #include <dev/ic/rndisreg.h>
63
64 #include <dev/hyperv/vmbusvar.h>
65 #include <dev/hyperv/if_hvnreg.h>
66
67 #ifndef EVL_PRIO_BITS
68 #define EVL_PRIO_BITS 13
69 #endif
70
71 #define HVN_NVS_MSGSIZE 32
72 #define HVN_NVS_BUFSIZE PAGE_SIZE
73
74 /*
75 * RNDIS control interface
76 */
77 #define HVN_RNDIS_CTLREQS 4
78 #define HVN_RNDIS_BUFSIZE 512
79
80 struct rndis_cmd {
81 uint32_t rc_id;
82 struct hvn_nvs_rndis rc_msg;
83 void *rc_req;
84 bus_dmamap_t rc_dmap;
85 bus_dma_segment_t rc_segs;
86 int rc_nsegs;
87 uint64_t rc_gpa;
88 struct rndis_packet_msg rc_cmp;
89 uint32_t rc_cmplen;
90 uint8_t rc_cmpbuf[HVN_RNDIS_BUFSIZE];
91 int rc_done;
92 TAILQ_ENTRY(rndis_cmd) rc_entry;
93 };
94 TAILQ_HEAD(rndis_queue, rndis_cmd);
95
96 #define HVN_MAXMTU (9 * 1024)
97
98 #define HVN_RNDIS_XFER_SIZE 2048
99
100 /*
101 * Tx ring
102 */
103 #define HVN_TX_DESC 256
104 #define HVN_TX_FRAGS 15 /* 31 is the max */
105 #define HVN_TX_FRAG_SIZE PAGE_SIZE
106 #define HVN_TX_PKT_SIZE 16384
107
108 #define HVN_RNDIS_PKT_LEN \
109 (sizeof(struct rndis_packet_msg) + \
110 sizeof(struct rndis_pktinfo) + NDIS_VLAN_INFO_SIZE + \
111 sizeof(struct rndis_pktinfo) + NDIS_TXCSUM_INFO_SIZE)
112
113 struct hvn_tx_desc {
114 uint32_t txd_id;
115 int txd_ready;
116 struct vmbus_gpa txd_sgl[HVN_TX_FRAGS + 1];
117 int txd_nsge;
118 struct mbuf *txd_buf;
119 bus_dmamap_t txd_dmap;
120 struct vmbus_gpa txd_gpa;
121 struct rndis_packet_msg *txd_req;
122 };
123
124 struct hvn_softc {
125 device_t sc_dev;
126
127 struct vmbus_softc *sc_vmbus;
128 struct vmbus_channel *sc_chan;
129 bus_dma_tag_t sc_dmat;
130
131 struct ethercom sc_ec;
132 struct ifmedia sc_media;
133 struct if_percpuq *sc_ipq;
134 int sc_link_state;
135 int sc_promisc;
136
137 uint32_t sc_flags;
138 #define HVN_SCF_ATTACHED __BIT(0)
139
140 /* NVS protocol */
141 int sc_proto;
142 uint32_t sc_nvstid;
143 uint8_t sc_nvsrsp[HVN_NVS_MSGSIZE];
144 uint8_t *sc_nvsbuf;
145 int sc_nvsdone;
146
147 /* RNDIS protocol */
148 int sc_ndisver;
149 uint32_t sc_rndisrid;
150 struct rndis_queue sc_cntl_sq; /* submission queue */
151 kmutex_t sc_cntl_sqlck;
152 struct rndis_queue sc_cntl_cq; /* completion queue */
153 kmutex_t sc_cntl_cqlck;
154 struct rndis_queue sc_cntl_fq; /* free queue */
155 kmutex_t sc_cntl_fqlck;
156 struct rndis_cmd sc_cntl_msgs[HVN_RNDIS_CTLREQS];
157 struct hvn_nvs_rndis sc_data_msg;
158
159 /* Rx ring */
160 uint8_t *sc_rx_ring;
161 int sc_rx_size;
162 uint32_t sc_rx_hndl;
163 struct hyperv_dma sc_rx_dma;
164
165 /* Tx ring */
166 uint32_t sc_tx_next;
167 uint32_t sc_tx_avail;
168 struct hvn_tx_desc sc_tx_desc[HVN_TX_DESC];
169 bus_dmamap_t sc_tx_rmap;
170 uint8_t *sc_tx_msgs;
171 bus_dma_segment_t sc_tx_mseg;
172 };
173
174 #define SC2IFP(_sc_) (&(_sc_)->sc_ec.ec_if)
175 #define IFP2SC(_ifp_) ((_ifp_)->if_softc)
176
177
178 static int hvn_match(device_t, cfdata_t, void *);
179 static void hvn_attach(device_t, device_t, void *);
180 static int hvn_detach(device_t, int);
181
182 CFATTACH_DECL_NEW(hvn, sizeof(struct hvn_softc),
183 hvn_match, hvn_attach, hvn_detach, NULL);
184
185 static int hvn_ioctl(struct ifnet *, u_long, void *);
186 static int hvn_media_change(struct ifnet *);
187 static void hvn_media_status(struct ifnet *, struct ifmediareq *);
188 static int hvn_iff(struct hvn_softc *);
189 static int hvn_init(struct ifnet *);
190 static void hvn_stop(struct ifnet *, int);
191 static void hvn_start(struct ifnet *);
192 static int hvn_encap(struct hvn_softc *, struct mbuf *,
193 struct hvn_tx_desc **);
194 static void hvn_decap(struct hvn_softc *, struct hvn_tx_desc *);
195 static void hvn_txeof(struct hvn_softc *, uint64_t);
196 static int hvn_rx_ring_create(struct hvn_softc *);
197 static int hvn_rx_ring_destroy(struct hvn_softc *);
198 static int hvn_tx_ring_create(struct hvn_softc *);
199 static void hvn_tx_ring_destroy(struct hvn_softc *);
200 static int hvn_set_capabilities(struct hvn_softc *);
201 static int hvn_get_lladdr(struct hvn_softc *, uint8_t *);
202 static void hvn_get_link_status(struct hvn_softc *);
203
204 /* NSVP */
205 static int hvn_nvs_attach(struct hvn_softc *);
206 static void hvn_nvs_intr(void *);
207 static int hvn_nvs_cmd(struct hvn_softc *, void *, size_t, uint64_t, int);
208 static int hvn_nvs_ack(struct hvn_softc *, uint64_t);
209 static void hvn_nvs_detach(struct hvn_softc *);
210
211 /* RNDIS */
212 static int hvn_rndis_attach(struct hvn_softc *);
213 static int hvn_rndis_cmd(struct hvn_softc *, struct rndis_cmd *, int);
214 static void hvn_rndis_input(struct hvn_softc *, uint64_t, void *);
215 static void hvn_rxeof(struct hvn_softc *, uint8_t *, uint32_t);
216 static void hvn_rndis_complete(struct hvn_softc *, uint8_t *, uint32_t);
217 static int hvn_rndis_output(struct hvn_softc *, struct hvn_tx_desc *);
218 static void hvn_rndis_status(struct hvn_softc *, uint8_t *, uint32_t);
219 static int hvn_rndis_query(struct hvn_softc *, uint32_t, void *, size_t *);
220 static int hvn_rndis_set(struct hvn_softc *, uint32_t, void *, size_t);
221 static int hvn_rndis_open(struct hvn_softc *);
222 static int hvn_rndis_close(struct hvn_softc *);
223 static void hvn_rndis_detach(struct hvn_softc *);
224
225 static int
226 hvn_match(device_t parent, cfdata_t match, void *aux)
227 {
228 struct vmbus_attach_args *aa = aux;
229
230 if (memcmp(aa->aa_type, &hyperv_guid_network, sizeof(*aa->aa_type)))
231 return 0;
232 return 1;
233 }
234
235 static void
236 hvn_attach(device_t parent, device_t self, void *aux)
237 {
238 struct hvn_softc *sc = device_private(self);
239 struct vmbus_attach_args *aa = aux;
240 struct ifnet *ifp = SC2IFP(sc);
241 uint8_t enaddr[ETHER_ADDR_LEN];
242 int error;
243
244 sc->sc_dev = self;
245 sc->sc_vmbus = (struct vmbus_softc *)device_private(parent);
246 sc->sc_chan = aa->aa_chan;
247 sc->sc_dmat = sc->sc_vmbus->sc_dmat;
248
249 aprint_naive("\n");
250 aprint_normal(": Hyper-V NetVSC\n");
251
252 if (hvn_nvs_attach(sc)) {
253 aprint_error_dev(self, "failed to init NVSP\n");
254 return;
255 }
256
257 if (hvn_rx_ring_create(sc)) {
258 aprint_error_dev(self, "failed to create Rx ring\n");
259 goto fail1;
260 }
261
262 if (hvn_tx_ring_create(sc)) {
263 aprint_error_dev(self, "failed to create Tx ring\n");
264 goto fail2;
265 }
266
267 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
268 ifp->if_softc = sc;
269 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
270 ifp->if_ioctl = hvn_ioctl;
271 ifp->if_start = hvn_start;
272 ifp->if_init = hvn_init;
273 ifp->if_stop = hvn_stop;
274 ifp->if_capabilities = IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx;
275 ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx;
276 ifp->if_capabilities |= IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_TCPv6_Rx;
277 if (sc->sc_ndisver > NDIS_VERSION_6_30) {
278 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Tx;
279 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Rx;
280 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Tx;
281 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Rx;
282 }
283 if (sc->sc_proto >= HVN_NVS_PROTO_VERSION_2) {
284 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
285 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
286 sc->sc_ec.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
287 }
288
289 IFQ_SET_MAXLEN(&ifp->if_snd, HVN_TX_DESC - 1);
290 IFQ_SET_READY(&ifp->if_snd);
291
292 /* Initialize ifmedia structures. */
293 sc->sc_ec.ec_ifmedia = &sc->sc_media;
294 ifmedia_init(&sc->sc_media, IFM_IMASK, hvn_media_change,
295 hvn_media_status);
296 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
297 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_MANUAL);
298
299 error = if_initialize(ifp);
300 if (error) {
301 aprint_error_dev(self, "if_initialize failed(%d)\n", error);
302 goto fail3;
303 }
304 sc->sc_ipq = if_percpuq_create(ifp);
305 if_deferred_start_init(ifp, NULL);
306
307 if (hvn_rndis_attach(sc)) {
308 aprint_error_dev(self, "failed to init RNDIS\n");
309 goto fail3;
310 }
311
312 aprint_normal_dev(self, "NVS %d.%d NDIS %d.%d\n",
313 sc->sc_proto >> 16, sc->sc_proto & 0xffff,
314 sc->sc_ndisver >> 16 , sc->sc_ndisver & 0xffff);
315
316 if (hvn_set_capabilities(sc)) {
317 aprint_error_dev(self, "failed to setup offloading\n");
318 goto fail4;
319 }
320
321 if (hvn_get_lladdr(sc, enaddr)) {
322 aprint_error_dev(self,
323 "failed to obtain an ethernet address\n");
324 goto fail4;
325 }
326 aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(enaddr));
327
328 ether_ifattach(ifp, enaddr);
329 if_register(ifp);
330
331 if (pmf_device_register(self, NULL, NULL))
332 pmf_class_network_register(self, ifp);
333 else
334 aprint_error_dev(self, "couldn't establish power handler\n");
335
336 SET(sc->sc_flags, HVN_SCF_ATTACHED);
337 return;
338
339 fail4: hvn_rndis_detach(sc);
340 if_percpuq_destroy(sc->sc_ipq);
341 fail3: hvn_tx_ring_destroy(sc);
342 fail2: hvn_rx_ring_destroy(sc);
343 fail1: hvn_nvs_detach(sc);
344 }
345
346 static int
347 hvn_detach(device_t self, int flags)
348 {
349 struct hvn_softc *sc = device_private(self);
350 struct ifnet *ifp = SC2IFP(sc);
351
352 if (!ISSET(sc->sc_flags, HVN_SCF_ATTACHED))
353 return 0;
354
355 if (ifp->if_flags & IFF_RUNNING)
356 hvn_stop(ifp, 1);
357
358 pmf_device_deregister(self);
359
360 ether_ifdetach(ifp);
361 if_detach(ifp);
362 if_percpuq_destroy(sc->sc_ipq);
363
364 hvn_rndis_detach(sc);
365 hvn_rx_ring_destroy(sc);
366 hvn_tx_ring_destroy(sc);
367 hvn_nvs_detach(sc);
368
369 return 0;
370 }
371
372 static int
373 hvn_ioctl(struct ifnet *ifp, u_long command, void * data)
374 {
375 struct hvn_softc *sc = IFP2SC(ifp);
376 int s, error = 0;
377
378 s = splnet();
379
380 switch (command) {
381 case SIOCSIFFLAGS:
382 if (ifp->if_flags & IFF_UP) {
383 if (ifp->if_flags & IFF_RUNNING)
384 error = ENETRESET;
385 else {
386 error = hvn_init(ifp);
387 if (error)
388 ifp->if_flags &= ~IFF_UP;
389 }
390 } else {
391 if (ifp->if_flags & IFF_RUNNING)
392 hvn_stop(ifp, 1);
393 }
394 break;
395 default:
396 error = ether_ioctl(ifp, command, data);
397 break;
398 }
399
400 if (error == ENETRESET) {
401 if (ifp->if_flags & IFF_RUNNING)
402 hvn_iff(sc);
403 error = 0;
404 }
405
406 splx(s);
407
408 return error;
409 }
410
411 static int
412 hvn_media_change(struct ifnet *ifp)
413 {
414
415 return 0;
416 }
417
418 static void
419 hvn_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
420 {
421 struct hvn_softc *sc = IFP2SC(ifp);
422 int link_state;
423
424 link_state = sc->sc_link_state;
425 hvn_get_link_status(sc);
426 if (link_state != sc->sc_link_state)
427 if_link_state_change(ifp, sc->sc_link_state);
428
429 ifmr->ifm_status = IFM_AVALID;
430 ifmr->ifm_active = IFM_ETHER | IFM_MANUAL;
431 if (sc->sc_link_state == LINK_STATE_UP)
432 ifmr->ifm_status |= IFM_ACTIVE;
433 }
434
435 static int
436 hvn_iff(struct hvn_softc *sc)
437 {
438
439 /* XXX */
440 sc->sc_promisc = 0;
441
442 return 0;
443 }
444
445 static int
446 hvn_init(struct ifnet *ifp)
447 {
448 struct hvn_softc *sc = IFP2SC(ifp);
449 int error;
450
451 hvn_stop(ifp, 0);
452
453 error = hvn_iff(sc);
454 if (error)
455 return error;
456
457 error = hvn_rndis_open(sc);
458 if (error == 0) {
459 ifp->if_flags |= IFF_RUNNING;
460 ifp->if_flags &= ~IFF_OACTIVE;
461 }
462 return error;
463 }
464
465 static void
466 hvn_stop(struct ifnet *ifp, int disable)
467 {
468 struct hvn_softc *sc = IFP2SC(ifp);
469
470 hvn_rndis_close(sc);
471
472 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
473 }
474
475 static void
476 hvn_start(struct ifnet *ifp)
477 {
478 struct hvn_softc *sc = IFP2SC(ifp);
479 struct hvn_tx_desc *txd;
480 struct mbuf *m;
481
482 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
483 return;
484
485 for (;;) {
486 if (!sc->sc_tx_avail) {
487 /* transient */
488 ifp->if_flags |= IFF_OACTIVE;
489 break;
490 }
491
492 IFQ_DEQUEUE(&ifp->if_snd, m);
493 if (m == NULL)
494 break;
495
496 if (hvn_encap(sc, m, &txd)) {
497 /* the chain is too large */
498 ifp->if_oerrors++;
499 m_freem(m);
500 continue;
501 }
502
503 bpf_mtap(ifp, m, BPF_D_OUT);
504
505 if (hvn_rndis_output(sc, txd)) {
506 hvn_decap(sc, txd);
507 ifp->if_oerrors++;
508 m_freem(m);
509 continue;
510 }
511
512 sc->sc_tx_next++;
513 }
514 }
515
516 static inline char *
517 hvn_rndis_pktinfo_append(struct rndis_packet_msg *pkt, size_t pktsize,
518 size_t datalen, uint32_t type)
519 {
520 struct rndis_pktinfo *pi;
521 size_t pi_size = sizeof(*pi) + datalen;
522 char *cp;
523
524 KASSERT(pkt->rm_pktinfooffset + pkt->rm_pktinfolen + pi_size <=
525 pktsize);
526
527 cp = (char *)pkt + pkt->rm_pktinfooffset + pkt->rm_pktinfolen;
528 pi = (struct rndis_pktinfo *)cp;
529 pi->rm_size = pi_size;
530 pi->rm_type = type;
531 pi->rm_pktinfooffset = sizeof(*pi);
532 pkt->rm_pktinfolen += pi_size;
533 pkt->rm_dataoffset += pi_size;
534 pkt->rm_len += pi_size;
535
536 return (char *)pi->rm_data;
537 }
538
539 static int
540 hvn_encap(struct hvn_softc *sc, struct mbuf *m, struct hvn_tx_desc **txd0)
541 {
542 struct hvn_tx_desc *txd;
543 struct rndis_packet_msg *pkt;
544 bus_dma_segment_t *seg;
545 size_t pktlen;
546 int i, rv;
547
548 do {
549 txd = &sc->sc_tx_desc[sc->sc_tx_next % HVN_TX_DESC];
550 sc->sc_tx_next++;
551 } while (!txd->txd_ready);
552 txd->txd_ready = 0;
553
554 pkt = txd->txd_req;
555 memset(pkt, 0, HVN_RNDIS_PKT_LEN);
556 pkt->rm_type = REMOTE_NDIS_PACKET_MSG;
557 pkt->rm_len = sizeof(*pkt) + m->m_pkthdr.len;
558 pkt->rm_dataoffset = RNDIS_DATA_OFFSET;
559 pkt->rm_datalen = m->m_pkthdr.len;
560 pkt->rm_pktinfooffset = sizeof(*pkt); /* adjusted below */
561 pkt->rm_pktinfolen = 0;
562
563 rv = bus_dmamap_load_mbuf(sc->sc_dmat, txd->txd_dmap, m, BUS_DMA_READ |
564 BUS_DMA_NOWAIT);
565 switch (rv) {
566 case 0:
567 break;
568 case EFBIG:
569 if (m_defrag(m, M_NOWAIT) != NULL &&
570 bus_dmamap_load_mbuf(sc->sc_dmat, txd->txd_dmap, m,
571 BUS_DMA_READ | BUS_DMA_NOWAIT) == 0)
572 break;
573 /* FALLTHROUGH */
574 default:
575 DPRINTF("%s: failed to load mbuf\n", device_xname(sc->sc_dev));
576 return -1;
577 }
578 txd->txd_buf = m;
579
580 if (m->m_flags & M_VLANTAG) {
581 uint32_t vlan;
582 char *cp;
583
584 vlan = NDIS_VLAN_INFO_MAKE(
585 EVL_VLANOFTAG(m->m_pkthdr.ether_vtag),
586 EVL_PRIOFTAG(m->m_pkthdr.ether_vtag), 0);
587 cp = hvn_rndis_pktinfo_append(pkt, HVN_RNDIS_PKT_LEN,
588 NDIS_VLAN_INFO_SIZE, NDIS_PKTINFO_TYPE_VLAN);
589 memcpy(cp, &vlan, NDIS_VLAN_INFO_SIZE);
590 }
591
592 if (m->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_UDPv4 |
593 M_CSUM_TCPv4)) {
594 uint32_t csum = NDIS_TXCSUM_INFO_IPV4;
595 char *cp;
596
597 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4)
598 csum |= NDIS_TXCSUM_INFO_IPCS;
599 if (m->m_pkthdr.csum_flags & M_CSUM_TCPv4)
600 csum |= NDIS_TXCSUM_INFO_TCPCS;
601 if (m->m_pkthdr.csum_flags & M_CSUM_UDPv4)
602 csum |= NDIS_TXCSUM_INFO_UDPCS;
603 cp = hvn_rndis_pktinfo_append(pkt, HVN_RNDIS_PKT_LEN,
604 NDIS_TXCSUM_INFO_SIZE, NDIS_PKTINFO_TYPE_CSUM);
605 memcpy(cp, &csum, NDIS_TXCSUM_INFO_SIZE);
606 }
607
608 pktlen = pkt->rm_pktinfooffset + pkt->rm_pktinfolen;
609 pkt->rm_pktinfooffset -= RNDIS_HEADER_OFFSET;
610
611 /* Attach an RNDIS message to the first slot */
612 txd->txd_sgl[0].gpa_page = txd->txd_gpa.gpa_page;
613 txd->txd_sgl[0].gpa_ofs = txd->txd_gpa.gpa_ofs;
614 txd->txd_sgl[0].gpa_len = pktlen;
615 txd->txd_nsge = txd->txd_dmap->dm_nsegs + 1;
616
617 for (i = 0; i < txd->txd_dmap->dm_nsegs; i++) {
618 seg = &txd->txd_dmap->dm_segs[i];
619 txd->txd_sgl[1 + i].gpa_page = atop(seg->ds_addr);
620 txd->txd_sgl[1 + i].gpa_ofs = seg->ds_addr & PAGE_MASK;
621 txd->txd_sgl[1 + i].gpa_len = seg->ds_len;
622 }
623
624 *txd0 = txd;
625
626 atomic_dec_uint(&sc->sc_tx_avail);
627
628 return 0;
629 }
630
631 static void
632 hvn_decap(struct hvn_softc *sc, struct hvn_tx_desc *txd)
633 {
634 struct ifnet *ifp = SC2IFP(sc);
635
636 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmap,
637 0, txd->txd_dmap->dm_mapsize,
638 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
639 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmap);
640 txd->txd_buf = NULL;
641 txd->txd_nsge = 0;
642 txd->txd_ready = 1;
643 atomic_inc_uint(&sc->sc_tx_avail);
644 ifp->if_flags &= ~IFF_OACTIVE;
645 }
646
647 static void
648 hvn_txeof(struct hvn_softc *sc, uint64_t tid)
649 {
650 struct ifnet *ifp = SC2IFP(sc);
651 struct hvn_tx_desc *txd;
652 struct mbuf *m;
653 uint32_t id = tid >> 32;
654
655 if ((tid & 0xffffffffU) != 0)
656 return;
657
658 id -= HVN_NVS_CHIM_SIG;
659 if (id >= HVN_TX_DESC) {
660 device_printf(sc->sc_dev, "tx packet index too large: %u", id);
661 return;
662 }
663
664 txd = &sc->sc_tx_desc[id];
665
666 if ((m = txd->txd_buf) == NULL) {
667 device_printf(sc->sc_dev, "no mbuf @%u\n", id);
668 return;
669 }
670 txd->txd_buf = NULL;
671
672 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmap,
673 0, txd->txd_dmap->dm_mapsize,
674 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
675 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmap);
676 m_freem(m);
677 ifp->if_opackets++;
678
679 txd->txd_ready = 1;
680
681 atomic_inc_uint(&sc->sc_tx_avail);
682 ifp->if_flags &= ~IFF_OACTIVE;
683 }
684
685 static int
686 hvn_rx_ring_create(struct hvn_softc *sc)
687 {
688 struct hvn_nvs_rxbuf_conn cmd;
689 struct hvn_nvs_rxbuf_conn_resp *rsp;
690 uint64_t tid;
691
692 if (sc->sc_proto <= HVN_NVS_PROTO_VERSION_2)
693 sc->sc_rx_size = 15 * 1024 * 1024; /* 15MB */
694 else
695 sc->sc_rx_size = 16 * 1024 * 1024; /* 16MB */
696 sc->sc_rx_ring = hyperv_dma_alloc(sc->sc_dmat, &sc->sc_rx_dma,
697 sc->sc_rx_size, PAGE_SIZE, PAGE_SIZE, sc->sc_rx_size / PAGE_SIZE,
698 HYPERV_DMA_SLEEPOK);
699 if (sc->sc_rx_ring == NULL) {
700 DPRINTF("%s: failed to allocate Rx ring buffer\n",
701 device_xname(sc->sc_dev));
702 return -1;
703 }
704 if (vmbus_handle_alloc(sc->sc_chan, &sc->sc_rx_dma, sc->sc_rx_size,
705 &sc->sc_rx_hndl)) {
706 DPRINTF("%s: failed to obtain a PA handle\n",
707 device_xname(sc->sc_dev));
708 goto errout;
709 }
710
711 memset(&cmd, 0, sizeof(cmd));
712 cmd.nvs_type = HVN_NVS_TYPE_RXBUF_CONN;
713 cmd.nvs_gpadl = sc->sc_rx_hndl;
714 cmd.nvs_sig = HVN_NVS_RXBUF_SIG;
715
716 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
717 if (hvn_nvs_cmd(sc, &cmd, sizeof(cmd), tid, 100))
718 goto errout;
719
720 rsp = (struct hvn_nvs_rxbuf_conn_resp *)&sc->sc_nvsrsp;
721 if (rsp->nvs_status != HVN_NVS_STATUS_OK) {
722 DPRINTF("%s: failed to set up the Rx ring\n",
723 device_xname(sc->sc_dev));
724 goto errout;
725 }
726 if (rsp->nvs_nsect > 1) {
727 DPRINTF("%s: invalid number of Rx ring sections: %u\n",
728 device_xname(sc->sc_dev), rsp->nvs_nsect);
729 hvn_rx_ring_destroy(sc);
730 return -1;
731 }
732 return 0;
733
734 errout:
735 if (sc->sc_rx_hndl) {
736 vmbus_handle_free(sc->sc_chan, sc->sc_rx_hndl);
737 sc->sc_rx_hndl = 0;
738 }
739 if (sc->sc_rx_ring) {
740 hyperv_dma_free(sc->sc_dmat, &sc->sc_rx_dma);
741 sc->sc_rx_ring = NULL;
742 }
743 return -1;
744 }
745
746 static int
747 hvn_rx_ring_destroy(struct hvn_softc *sc)
748 {
749 struct hvn_nvs_rxbuf_disconn cmd;
750 uint64_t tid;
751
752 if (sc->sc_rx_ring == NULL)
753 return 0;
754
755 memset(&cmd, 0, sizeof(cmd));
756 cmd.nvs_type = HVN_NVS_TYPE_RXBUF_DISCONN;
757 cmd.nvs_sig = HVN_NVS_RXBUF_SIG;
758
759 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
760 if (hvn_nvs_cmd(sc, &cmd, sizeof(cmd), tid, 0))
761 return -1;
762
763 delay(100);
764
765 vmbus_handle_free(sc->sc_chan, sc->sc_rx_hndl);
766 sc->sc_rx_hndl = 0;
767
768 hyperv_dma_free(sc->sc_dmat, &sc->sc_rx_dma);
769 sc->sc_rx_ring = NULL;
770
771 return 0;
772 }
773
774 static int
775 hvn_tx_ring_create(struct hvn_softc *sc)
776 {
777 const int dmaflags = cold ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK;
778 struct hvn_tx_desc *txd;
779 bus_dma_segment_t *seg;
780 size_t msgsize;
781 int i, rsegs;
782 paddr_t pa;
783
784 msgsize = roundup(HVN_RNDIS_PKT_LEN, 128);
785
786 /* Allocate memory to store RNDIS messages */
787 if (bus_dmamem_alloc(sc->sc_dmat, msgsize * HVN_TX_DESC, PAGE_SIZE, 0,
788 &sc->sc_tx_mseg, 1, &rsegs, dmaflags)) {
789 DPRINTF("%s: failed to allocate memory for RDNIS messages\n",
790 device_xname(sc->sc_dev));
791 goto errout;
792 }
793 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_tx_mseg, 1, msgsize *
794 HVN_TX_DESC, (void **)&sc->sc_tx_msgs, dmaflags)) {
795 DPRINTF("%s: failed to establish mapping for RDNIS messages\n",
796 device_xname(sc->sc_dev));
797 goto errout;
798 }
799 memset(sc->sc_tx_msgs, 0, msgsize * HVN_TX_DESC);
800 if (bus_dmamap_create(sc->sc_dmat, msgsize * HVN_TX_DESC, 1,
801 msgsize * HVN_TX_DESC, 0, dmaflags, &sc->sc_tx_rmap)) {
802 DPRINTF("%s: failed to create map for RDNIS messages\n",
803 device_xname(sc->sc_dev));
804 goto errout;
805 }
806 if (bus_dmamap_load(sc->sc_dmat, sc->sc_tx_rmap, sc->sc_tx_msgs,
807 msgsize * HVN_TX_DESC, NULL, dmaflags)) {
808 DPRINTF("%s: failed to create map for RDNIS messages\n",
809 device_xname(sc->sc_dev));
810 goto errout;
811 }
812
813 for (i = 0; i < HVN_TX_DESC; i++) {
814 txd = &sc->sc_tx_desc[i];
815 if (bus_dmamap_create(sc->sc_dmat, HVN_TX_PKT_SIZE,
816 HVN_TX_FRAGS, HVN_TX_FRAG_SIZE, PAGE_SIZE, dmaflags,
817 &txd->txd_dmap)) {
818 DPRINTF("%s: failed to create map for TX descriptors\n",
819 device_xname(sc->sc_dev));
820 goto errout;
821 }
822 seg = &sc->sc_tx_rmap->dm_segs[0];
823 pa = seg->ds_addr + (msgsize * i);
824 txd->txd_gpa.gpa_page = atop(pa);
825 txd->txd_gpa.gpa_ofs = pa & PAGE_MASK;
826 txd->txd_gpa.gpa_len = msgsize;
827 txd->txd_req = (void *)(sc->sc_tx_msgs + (msgsize * i));
828 txd->txd_id = i + HVN_NVS_CHIM_SIG;
829 txd->txd_ready = 1;
830 }
831 sc->sc_tx_avail = HVN_TX_DESC;
832
833 return 0;
834
835 errout:
836 hvn_tx_ring_destroy(sc);
837 return -1;
838 }
839
840 static void
841 hvn_tx_ring_destroy(struct hvn_softc *sc)
842 {
843 struct hvn_tx_desc *txd;
844 int i;
845
846 for (i = 0; i < HVN_TX_DESC; i++) {
847 txd = &sc->sc_tx_desc[i];
848 if (txd->txd_dmap == NULL)
849 continue;
850 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmap,
851 0, txd->txd_dmap->dm_mapsize,
852 BUS_DMASYNC_POSTWRITE);
853 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmap);
854 bus_dmamap_destroy(sc->sc_dmat, txd->txd_dmap);
855 txd->txd_dmap = NULL;
856 if (txd->txd_buf == NULL)
857 continue;
858 m_freem(txd->txd_buf);
859 txd->txd_buf = NULL;
860 }
861 if (sc->sc_tx_rmap != NULL) {
862 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap,
863 0, sc->sc_tx_rmap->dm_mapsize,
864 BUS_DMASYNC_POSTWRITE);
865 bus_dmamap_unload(sc->sc_dmat, sc->sc_tx_rmap);
866 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_rmap);
867 sc->sc_tx_rmap = NULL;
868 }
869 if (sc->sc_tx_msgs != NULL) {
870 size_t msgsize = roundup(HVN_RNDIS_PKT_LEN, 128);
871
872 bus_dmamem_unmap(sc->sc_dmat, sc->sc_tx_msgs,
873 msgsize * HVN_TX_DESC);
874 bus_dmamem_free(sc->sc_dmat, &sc->sc_tx_mseg, 1);
875 sc->sc_tx_msgs = NULL;
876 }
877 }
878
879 static int
880 hvn_get_lladdr(struct hvn_softc *sc, uint8_t *enaddr)
881 {
882 size_t addrlen = ETHER_ADDR_LEN;
883 int rv;
884
885 rv = hvn_rndis_query(sc, OID_802_3_PERMANENT_ADDRESS, enaddr, &addrlen);
886 if (rv == 0 && addrlen != ETHER_ADDR_LEN)
887 rv = -1;
888 return rv;
889 }
890
891 static void
892 hvn_get_link_status(struct hvn_softc *sc)
893 {
894 uint32_t state;
895 size_t len = sizeof(state);
896
897 if (hvn_rndis_query(sc, OID_GEN_MEDIA_CONNECT_STATUS,
898 &state, &len) == 0)
899 sc->sc_link_state = (state == NDIS_MEDIA_STATE_CONNECTED) ?
900 LINK_STATE_UP : LINK_STATE_DOWN;
901 }
902
903 static int
904 hvn_nvs_attach(struct hvn_softc *sc)
905 {
906 static const uint32_t protos[] = {
907 HVN_NVS_PROTO_VERSION_5,
908 HVN_NVS_PROTO_VERSION_4,
909 HVN_NVS_PROTO_VERSION_2,
910 HVN_NVS_PROTO_VERSION_1
911 };
912 struct hvn_nvs_init cmd;
913 struct hvn_nvs_init_resp *rsp;
914 struct hvn_nvs_ndis_init ncmd;
915 struct hvn_nvs_ndis_conf ccmd;
916 uint32_t ndisver, ringsize;
917 uint64_t tid;
918 int i;
919
920 sc->sc_nvsbuf = kmem_zalloc(HVN_NVS_BUFSIZE, KM_SLEEP);
921
922 /* We need to be able to fit all RNDIS control and data messages */
923 ringsize = HVN_RNDIS_CTLREQS *
924 (sizeof(struct hvn_nvs_rndis) + sizeof(struct vmbus_gpa)) +
925 HVN_TX_DESC * (sizeof(struct hvn_nvs_rndis) +
926 (HVN_TX_FRAGS + 1) * sizeof(struct vmbus_gpa));
927
928 sc->sc_chan->ch_flags &= ~CHF_BATCHED;
929
930 /* Associate our interrupt handler with the channel */
931 if (vmbus_channel_open(sc->sc_chan, ringsize, NULL, 0,
932 hvn_nvs_intr, sc)) {
933 DPRINTF("%s: failed to open channel\n",
934 device_xname(sc->sc_dev));
935 kmem_free(sc->sc_nvsbuf, HVN_NVS_BUFSIZE);
936 return -1;
937 }
938
939 memset(&cmd, 0, sizeof(cmd));
940 cmd.nvs_type = HVN_NVS_TYPE_INIT;
941 for (i = 0; i < __arraycount(protos); i++) {
942 cmd.nvs_ver_min = cmd.nvs_ver_max = protos[i];
943 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
944 if (hvn_nvs_cmd(sc, &cmd, sizeof(cmd), tid, 100))
945 return -1;
946
947 rsp = (struct hvn_nvs_init_resp *)&sc->sc_nvsrsp;
948 if (rsp->nvs_status == HVN_NVS_STATUS_OK) {
949 sc->sc_proto = protos[i];
950 break;
951 }
952 }
953 if (i == __arraycount(protos)) {
954 DPRINTF("%s: failed to negotiate NVSP version\n",
955 device_xname(sc->sc_dev));
956 return -1;
957 }
958
959 if (sc->sc_proto >= HVN_NVS_PROTO_VERSION_2) {
960 memset(&ccmd, 0, sizeof(ccmd));
961 ccmd.nvs_type = HVN_NVS_TYPE_NDIS_CONF;
962 ccmd.nvs_mtu = HVN_MAXMTU;
963 ccmd.nvs_caps = HVN_NVS_NDIS_CONF_VLAN;
964
965 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
966 if (hvn_nvs_cmd(sc, &ccmd, sizeof(ccmd), tid, 100))
967 return -1;
968 }
969
970 memset(&ncmd, 0, sizeof(ncmd));
971 ncmd.nvs_type = HVN_NVS_TYPE_NDIS_INIT;
972 if (sc->sc_proto <= HVN_NVS_PROTO_VERSION_4)
973 ndisver = NDIS_VERSION_6_1;
974 else
975 ndisver = NDIS_VERSION_6_30;
976 ncmd.nvs_ndis_major = (ndisver & 0xffff0000) >> 16;
977 ncmd.nvs_ndis_minor = ndisver & 0x0000ffff;
978
979 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
980 if (hvn_nvs_cmd(sc, &ncmd, sizeof(ncmd), tid, 100))
981 return -1;
982
983 sc->sc_ndisver = ndisver;
984
985 return 0;
986 }
987
988 static void
989 hvn_nvs_intr(void *arg)
990 {
991 struct hvn_softc *sc = arg;
992 struct ifnet *ifp = SC2IFP(sc);
993 struct vmbus_chanpkt_hdr *cph;
994 const struct hvn_nvs_hdr *nvs;
995 uint64_t rid;
996 uint32_t rlen;
997 int rv;
998 bool dotx = false;
999
1000 for (;;) {
1001 rv = vmbus_channel_recv(sc->sc_chan, sc->sc_nvsbuf,
1002 HVN_NVS_BUFSIZE, &rlen, &rid, 1);
1003 if (rv != 0 || rlen == 0) {
1004 if (rv != EAGAIN)
1005 device_printf(sc->sc_dev,
1006 "failed to receive an NVSP packet\n");
1007 break;
1008 }
1009 cph = (struct vmbus_chanpkt_hdr *)sc->sc_nvsbuf;
1010 nvs = (const struct hvn_nvs_hdr *)VMBUS_CHANPKT_CONST_DATA(cph);
1011
1012 if (cph->cph_type == VMBUS_CHANPKT_TYPE_COMP) {
1013 switch (nvs->nvs_type) {
1014 case HVN_NVS_TYPE_INIT_RESP:
1015 case HVN_NVS_TYPE_RXBUF_CONNRESP:
1016 case HVN_NVS_TYPE_CHIM_CONNRESP:
1017 case HVN_NVS_TYPE_SUBCH_RESP:
1018 /* copy the response back */
1019 memcpy(&sc->sc_nvsrsp, nvs, HVN_NVS_MSGSIZE);
1020 sc->sc_nvsdone = 1;
1021 wakeup(&sc->sc_nvsrsp);
1022 break;
1023 case HVN_NVS_TYPE_RNDIS_ACK:
1024 dotx = true;
1025 hvn_txeof(sc, cph->cph_tid);
1026 break;
1027 default:
1028 device_printf(sc->sc_dev,
1029 "unhandled NVSP packet type %u "
1030 "on completion\n", nvs->nvs_type);
1031 break;
1032 }
1033 } else if (cph->cph_type == VMBUS_CHANPKT_TYPE_RXBUF) {
1034 switch (nvs->nvs_type) {
1035 case HVN_NVS_TYPE_RNDIS:
1036 hvn_rndis_input(sc, cph->cph_tid, cph);
1037 break;
1038 default:
1039 device_printf(sc->sc_dev,
1040 "unhandled NVSP packet type %u "
1041 "on receive\n", nvs->nvs_type);
1042 break;
1043 }
1044 } else if (cph->cph_type == VMBUS_CHANPKT_TYPE_INBAND) {
1045 switch (nvs->nvs_type) {
1046 case HVN_NVS_TYPE_TXTBL_NOTE:
1047 /* Useless; ignore */
1048 break;
1049 default:
1050 device_printf(sc->sc_dev,
1051 "got notify, nvs type %u\n", nvs->nvs_type);
1052 break;
1053 }
1054 } else
1055 device_printf(sc->sc_dev,
1056 "unknown NVSP packet type %u\n", cph->cph_type);
1057 }
1058
1059 if (dotx)
1060 if_schedule_deferred_start(ifp);
1061 }
1062
1063 static int
1064 hvn_nvs_cmd(struct hvn_softc *sc, void *cmd, size_t cmdsize, uint64_t tid,
1065 int timo)
1066 {
1067 struct hvn_nvs_hdr *hdr = cmd;
1068 int tries = 10;
1069 int rv, s;
1070
1071 sc->sc_nvsdone = 0;
1072
1073 do {
1074 rv = vmbus_channel_send(sc->sc_chan, cmd, cmdsize,
1075 tid, VMBUS_CHANPKT_TYPE_INBAND,
1076 timo ? VMBUS_CHANPKT_FLAG_RC : 0);
1077 if (rv == EAGAIN) {
1078 if (cold)
1079 delay(1000);
1080 else
1081 tsleep(cmd, PRIBIO, "nvsout", mstohz(1));
1082 } else if (rv) {
1083 DPRINTF("%s: NVSP operation %u send error %d\n",
1084 device_xname(sc->sc_dev), hdr->nvs_type, rv);
1085 return rv;
1086 }
1087 } while (rv != 0 && --tries > 0);
1088
1089 if (tries == 0 && rv != 0) {
1090 device_printf(sc->sc_dev,
1091 "NVSP operation %u send error %d\n", hdr->nvs_type, rv);
1092 return rv;
1093 }
1094
1095 if (timo == 0)
1096 return 0;
1097
1098 do {
1099 if (cold) {
1100 delay(1000);
1101 s = splnet();
1102 hvn_nvs_intr(sc);
1103 splx(s);
1104 } else
1105 tsleep(sc->sc_nvsrsp, PRIBIO | PCATCH, "nvscmd",
1106 mstohz(1));
1107 } while (--timo > 0 && sc->sc_nvsdone != 1);
1108
1109 if (timo == 0 && sc->sc_nvsdone != 1) {
1110 device_printf(sc->sc_dev, "NVSP operation %u timed out\n",
1111 hdr->nvs_type);
1112 return ETIMEDOUT;
1113 }
1114 return 0;
1115 }
1116
1117 static int
1118 hvn_nvs_ack(struct hvn_softc *sc, uint64_t tid)
1119 {
1120 struct hvn_nvs_rndis_ack cmd;
1121 int tries = 5;
1122 int rv;
1123
1124 cmd.nvs_type = HVN_NVS_TYPE_RNDIS_ACK;
1125 cmd.nvs_status = HVN_NVS_STATUS_OK;
1126 do {
1127 rv = vmbus_channel_send(sc->sc_chan, &cmd, sizeof(cmd),
1128 tid, VMBUS_CHANPKT_TYPE_COMP, 0);
1129 if (rv == EAGAIN)
1130 delay(10);
1131 else if (rv) {
1132 DPRINTF("%s: NVSP acknowledgement error %d\n",
1133 device_xname(sc->sc_dev), rv);
1134 return rv;
1135 }
1136 } while (rv != 0 && --tries > 0);
1137 return rv;
1138 }
1139
1140 static void
1141 hvn_nvs_detach(struct hvn_softc *sc)
1142 {
1143
1144 if (vmbus_channel_close(sc->sc_chan) == 0) {
1145 kmem_free(sc->sc_nvsbuf, HVN_NVS_BUFSIZE);
1146 sc->sc_nvsbuf = NULL;
1147 }
1148 }
1149
1150 static inline struct rndis_cmd *
1151 hvn_alloc_cmd(struct hvn_softc *sc)
1152 {
1153 struct rndis_cmd *rc;
1154
1155 mutex_enter(&sc->sc_cntl_fqlck);
1156 while ((rc = TAILQ_FIRST(&sc->sc_cntl_fq)) == NULL)
1157 /* XXX use condvar(9) instead of mtsleep */
1158 mtsleep(&sc->sc_cntl_fq, PRIBIO, "nvsalloc", 1,
1159 &sc->sc_cntl_fqlck);
1160 TAILQ_REMOVE(&sc->sc_cntl_fq, rc, rc_entry);
1161 mutex_exit(&sc->sc_cntl_fqlck);
1162 return rc;
1163 }
1164
1165 static inline void
1166 hvn_submit_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1167 {
1168
1169 mutex_enter(&sc->sc_cntl_sqlck);
1170 TAILQ_INSERT_TAIL(&sc->sc_cntl_sq, rc, rc_entry);
1171 mutex_exit(&sc->sc_cntl_sqlck);
1172 }
1173
1174 static inline struct rndis_cmd *
1175 hvn_complete_cmd(struct hvn_softc *sc, uint32_t id)
1176 {
1177 struct rndis_cmd *rc;
1178
1179 mutex_enter(&sc->sc_cntl_sqlck);
1180 TAILQ_FOREACH(rc, &sc->sc_cntl_sq, rc_entry) {
1181 if (rc->rc_id == id) {
1182 TAILQ_REMOVE(&sc->sc_cntl_sq, rc, rc_entry);
1183 break;
1184 }
1185 }
1186 mutex_exit(&sc->sc_cntl_sqlck);
1187 if (rc != NULL) {
1188 mutex_enter(&sc->sc_cntl_cqlck);
1189 TAILQ_INSERT_TAIL(&sc->sc_cntl_cq, rc, rc_entry);
1190 mutex_exit(&sc->sc_cntl_cqlck);
1191 }
1192 return rc;
1193 }
1194
1195 static inline void
1196 hvn_release_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1197 {
1198
1199 mutex_enter(&sc->sc_cntl_cqlck);
1200 TAILQ_REMOVE(&sc->sc_cntl_cq, rc, rc_entry);
1201 mutex_exit(&sc->sc_cntl_cqlck);
1202 }
1203
1204 static inline int
1205 hvn_rollback_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1206 {
1207 struct rndis_cmd *rn;
1208
1209 mutex_enter(&sc->sc_cntl_sqlck);
1210 TAILQ_FOREACH(rn, &sc->sc_cntl_sq, rc_entry) {
1211 if (rn == rc) {
1212 TAILQ_REMOVE(&sc->sc_cntl_sq, rc, rc_entry);
1213 mutex_exit(&sc->sc_cntl_sqlck);
1214 return 0;
1215 }
1216 }
1217 mutex_exit(&sc->sc_cntl_sqlck);
1218 return -1;
1219 }
1220
1221 static inline void
1222 hvn_free_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1223 {
1224
1225 memset(rc->rc_req, 0, sizeof(struct rndis_packet_msg));
1226 memset(&rc->rc_cmp, 0, sizeof(rc->rc_cmp));
1227 memset(&rc->rc_msg, 0, sizeof(rc->rc_msg));
1228 mutex_enter(&sc->sc_cntl_fqlck);
1229 TAILQ_INSERT_TAIL(&sc->sc_cntl_fq, rc, rc_entry);
1230 mutex_exit(&sc->sc_cntl_fqlck);
1231 wakeup(&sc->sc_cntl_fq);
1232 }
1233
1234 static int
1235 hvn_rndis_attach(struct hvn_softc *sc)
1236 {
1237 const int dmaflags = cold ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK;
1238 struct rndis_init_req *req;
1239 struct rndis_init_comp *cmp;
1240 struct rndis_cmd *rc;
1241 int i, rv;
1242
1243 /* RNDIS control message queues */
1244 TAILQ_INIT(&sc->sc_cntl_sq);
1245 TAILQ_INIT(&sc->sc_cntl_cq);
1246 TAILQ_INIT(&sc->sc_cntl_fq);
1247 mutex_init(&sc->sc_cntl_sqlck, MUTEX_DEFAULT, IPL_NET);
1248 mutex_init(&sc->sc_cntl_cqlck, MUTEX_DEFAULT, IPL_NET);
1249 mutex_init(&sc->sc_cntl_fqlck, MUTEX_DEFAULT, IPL_NET);
1250
1251 for (i = 0; i < HVN_RNDIS_CTLREQS; i++) {
1252 rc = &sc->sc_cntl_msgs[i];
1253 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1254 dmaflags, &rc->rc_dmap)) {
1255 DPRINTF("%s: failed to create RNDIS command map\n",
1256 device_xname(sc->sc_dev));
1257 goto errout;
1258 }
1259 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE,
1260 0, &rc->rc_segs, 1, &rc->rc_nsegs, dmaflags)) {
1261 DPRINTF("%s: failed to allocate RNDIS command\n",
1262 device_xname(sc->sc_dev));
1263 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1264 goto errout;
1265 }
1266 if (bus_dmamem_map(sc->sc_dmat, &rc->rc_segs, rc->rc_nsegs,
1267 PAGE_SIZE, (void **)&rc->rc_req, dmaflags)) {
1268 DPRINTF("%s: failed to allocate RNDIS command\n",
1269 device_xname(sc->sc_dev));
1270 bus_dmamem_free(sc->sc_dmat, &rc->rc_segs,
1271 rc->rc_nsegs);
1272 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1273 goto errout;
1274 }
1275 memset(rc->rc_req, 0, PAGE_SIZE);
1276 if (bus_dmamap_load(sc->sc_dmat, rc->rc_dmap, rc->rc_req,
1277 PAGE_SIZE, NULL, dmaflags)) {
1278 DPRINTF("%s: failed to load RNDIS command map\n",
1279 device_xname(sc->sc_dev));
1280 bus_dmamem_free(sc->sc_dmat, &rc->rc_segs,
1281 rc->rc_nsegs);
1282 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1283 goto errout;
1284 }
1285 rc->rc_gpa = atop(rc->rc_dmap->dm_segs[0].ds_addr);
1286 TAILQ_INSERT_TAIL(&sc->sc_cntl_fq, rc, rc_entry);
1287 }
1288
1289 rc = hvn_alloc_cmd(sc);
1290
1291 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1292 BUS_DMASYNC_PREREAD);
1293
1294 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1295
1296 req = rc->rc_req;
1297 req->rm_type = REMOTE_NDIS_INITIALIZE_MSG;
1298 req->rm_len = sizeof(*req);
1299 req->rm_rid = rc->rc_id;
1300 req->rm_ver_major = RNDIS_VERSION_MAJOR;
1301 req->rm_ver_minor = RNDIS_VERSION_MINOR;
1302 req->rm_max_xfersz = HVN_RNDIS_XFER_SIZE;
1303
1304 rc->rc_cmplen = sizeof(*cmp);
1305
1306 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1307 BUS_DMASYNC_PREWRITE);
1308
1309 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1310 DPRINTF("%s: INITIALIZE_MSG failed, error %d\n",
1311 device_xname(sc->sc_dev), rv);
1312 hvn_free_cmd(sc, rc);
1313 goto errout;
1314 }
1315 cmp = (struct rndis_init_comp *)&rc->rc_cmp;
1316 if (cmp->rm_status != RNDIS_STATUS_SUCCESS) {
1317 DPRINTF("%s: failed to init RNDIS, error %#x\n",
1318 device_xname(sc->sc_dev), cmp->rm_status);
1319 hvn_free_cmd(sc, rc);
1320 goto errout;
1321 }
1322
1323 hvn_free_cmd(sc, rc);
1324
1325 /* Initialize RNDIS Data command */
1326 memset(&sc->sc_data_msg, 0, sizeof(sc->sc_data_msg));
1327 sc->sc_data_msg.nvs_type = HVN_NVS_TYPE_RNDIS;
1328 sc->sc_data_msg.nvs_rndis_mtype = HVN_NVS_RNDIS_MTYPE_DATA;
1329 sc->sc_data_msg.nvs_chim_idx = HVN_NVS_CHIM_IDX_INVALID;
1330
1331 return 0;
1332
1333 errout:
1334 for (i = 0; i < HVN_RNDIS_CTLREQS; i++) {
1335 rc = &sc->sc_cntl_msgs[i];
1336 if (rc->rc_req == NULL)
1337 continue;
1338 TAILQ_REMOVE(&sc->sc_cntl_fq, rc, rc_entry);
1339 bus_dmamem_free(sc->sc_dmat, &rc->rc_segs, rc->rc_nsegs);
1340 rc->rc_req = NULL;
1341 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1342 }
1343 return -1;
1344 }
1345
1346 static int
1347 hvn_set_capabilities(struct hvn_softc *sc)
1348 {
1349 struct ndis_offload_params params;
1350 size_t len = sizeof(params);
1351
1352 memset(¶ms, 0, sizeof(params));
1353
1354 params.ndis_hdr.ndis_type = NDIS_OBJTYPE_DEFAULT;
1355 if (sc->sc_ndisver < NDIS_VERSION_6_30) {
1356 params.ndis_hdr.ndis_rev = NDIS_OFFLOAD_PARAMS_REV_2;
1357 len = params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE_6_1;
1358 } else {
1359 params.ndis_hdr.ndis_rev = NDIS_OFFLOAD_PARAMS_REV_3;
1360 len = params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE;
1361 }
1362
1363 params.ndis_ip4csum = NDIS_OFFLOAD_PARAM_TXRX;
1364 params.ndis_tcp4csum = NDIS_OFFLOAD_PARAM_TXRX;
1365 params.ndis_tcp6csum = NDIS_OFFLOAD_PARAM_TXRX;
1366 if (sc->sc_ndisver >= NDIS_VERSION_6_30) {
1367 params.ndis_udp4csum = NDIS_OFFLOAD_PARAM_TXRX;
1368 params.ndis_udp6csum = NDIS_OFFLOAD_PARAM_TXRX;
1369 }
1370
1371 return hvn_rndis_set(sc, OID_TCP_OFFLOAD_PARAMETERS, ¶ms, len);
1372 }
1373
1374 static int
1375 hvn_rndis_cmd(struct hvn_softc *sc, struct rndis_cmd *rc, int timo)
1376 {
1377 struct hvn_nvs_rndis *msg = &rc->rc_msg;
1378 struct rndis_msghdr *hdr = rc->rc_req;
1379 struct vmbus_gpa sgl[1];
1380 int tries = 10;
1381 int rv, s;
1382
1383 KASSERT(timo > 0);
1384
1385 msg->nvs_type = HVN_NVS_TYPE_RNDIS;
1386 msg->nvs_rndis_mtype = HVN_NVS_RNDIS_MTYPE_CTRL;
1387 msg->nvs_chim_idx = HVN_NVS_CHIM_IDX_INVALID;
1388
1389 sgl[0].gpa_page = rc->rc_gpa;
1390 sgl[0].gpa_len = hdr->rm_len;
1391 sgl[0].gpa_ofs = 0;
1392
1393 rc->rc_done = 0;
1394
1395 hvn_submit_cmd(sc, rc);
1396
1397 do {
1398 rv = vmbus_channel_send_sgl(sc->sc_chan, sgl, 1, &rc->rc_msg,
1399 sizeof(*msg), rc->rc_id);
1400 if (rv == EAGAIN) {
1401 if (cold)
1402 delay(1000);
1403 else
1404 tsleep(rc, PRIBIO, "rndisout", mstohz(1));
1405 } else if (rv) {
1406 DPRINTF("%s: RNDIS operation %u send error %d\n",
1407 device_xname(sc->sc_dev), hdr->rm_type, rv);
1408 hvn_rollback_cmd(sc, rc);
1409 return rv;
1410 }
1411 } while (rv != 0 && --tries > 0);
1412
1413 if (tries == 0 && rv != 0) {
1414 device_printf(sc->sc_dev,
1415 "RNDIS operation %u send error %d\n", hdr->rm_type, rv);
1416 return rv;
1417 }
1418 if (vmbus_channel_is_revoked(sc->sc_chan)) {
1419 /* No response */
1420 return 0;
1421 }
1422
1423 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1424 BUS_DMASYNC_POSTWRITE);
1425
1426 do {
1427 if (cold) {
1428 delay(1000);
1429 s = splnet();
1430 hvn_nvs_intr(sc);
1431 splx(s);
1432 } else
1433 tsleep(rc, PRIBIO | PCATCH, "rndiscmd", mstohz(1));
1434 } while (--timo > 0 && rc->rc_done != 1);
1435
1436 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1437 BUS_DMASYNC_POSTREAD);
1438
1439 if (rc->rc_done != 1) {
1440 rv = timo == 0 ? ETIMEDOUT : EINTR;
1441 if (hvn_rollback_cmd(sc, rc)) {
1442 hvn_release_cmd(sc, rc);
1443 rv = 0;
1444 } else if (rv == ETIMEDOUT) {
1445 device_printf(sc->sc_dev,
1446 "RNDIS operation %u timed out\n", hdr->rm_type);
1447 }
1448 return rv;
1449 }
1450
1451 hvn_release_cmd(sc, rc);
1452 return 0;
1453 }
1454
1455 static void
1456 hvn_rndis_input(struct hvn_softc *sc, uint64_t tid, void *arg)
1457 {
1458 struct vmbus_chanpkt_prplist *cp = arg;
1459 uint32_t off, len, type;
1460 int i;
1461
1462 if (sc->sc_rx_ring == NULL) {
1463 DPRINTF("%s: invalid rx ring\n", device_xname(sc->sc_dev));
1464 return;
1465 }
1466
1467 for (i = 0; i < cp->cp_range_cnt; i++) {
1468 off = cp->cp_range[i].gpa_ofs;
1469 len = cp->cp_range[i].gpa_len;
1470
1471 KASSERT(off + len <= sc->sc_rx_size);
1472 KASSERT(len >= RNDIS_HEADER_OFFSET + 4);
1473
1474 memcpy(&type, sc->sc_rx_ring + off, sizeof(type));
1475 switch (type) {
1476 /* data message */
1477 case REMOTE_NDIS_PACKET_MSG:
1478 hvn_rxeof(sc, sc->sc_rx_ring + off, len);
1479 break;
1480 /* completion messages */
1481 case REMOTE_NDIS_INITIALIZE_CMPLT:
1482 case REMOTE_NDIS_QUERY_CMPLT:
1483 case REMOTE_NDIS_SET_CMPLT:
1484 case REMOTE_NDIS_RESET_CMPLT:
1485 case REMOTE_NDIS_KEEPALIVE_CMPLT:
1486 hvn_rndis_complete(sc, sc->sc_rx_ring + off, len);
1487 break;
1488 /* notification message */
1489 case REMOTE_NDIS_INDICATE_STATUS_MSG:
1490 hvn_rndis_status(sc, sc->sc_rx_ring + off, len);
1491 break;
1492 default:
1493 device_printf(sc->sc_dev,
1494 "unhandled RNDIS message type %u\n", type);
1495 break;
1496 }
1497 }
1498
1499 hvn_nvs_ack(sc, tid);
1500 }
1501
1502 static inline struct mbuf *
1503 hvn_devget(struct hvn_softc *sc, void *buf, uint32_t len)
1504 {
1505 struct ifnet *ifp = SC2IFP(sc);
1506 struct mbuf *m;
1507 size_t size = len + ETHER_ALIGN;
1508
1509 MGETHDR(m, M_NOWAIT, MT_DATA);
1510 if (m == NULL)
1511 return NULL;
1512
1513 if (size > MHLEN) {
1514 if (size <= MCLBYTES)
1515 MCLGET(m, M_NOWAIT);
1516 else
1517 MEXTMALLOC(m, size, M_NOWAIT);
1518 if ((m->m_flags & M_EXT) == 0) {
1519 m_freem(m);
1520 return NULL;
1521 }
1522 }
1523
1524 m->m_len = m->m_pkthdr.len = size;
1525 m_adj(m, ETHER_ALIGN);
1526 m_copyback(m, 0, len, buf);
1527 m_set_rcvif(m, ifp);
1528 return m;
1529 }
1530
1531 static void
1532 hvn_rxeof(struct hvn_softc *sc, uint8_t *buf, uint32_t len)
1533 {
1534 struct ifnet *ifp = SC2IFP(sc);
1535 struct rndis_packet_msg *pkt;
1536 struct rndis_pktinfo *pi;
1537 uint32_t csum, vlan;
1538 struct mbuf *m;
1539
1540 if (!(ifp->if_flags & IFF_RUNNING))
1541 return;
1542
1543 if (len < sizeof(*pkt)) {
1544 device_printf(sc->sc_dev, "data packet too short: %u\n",
1545 len);
1546 return;
1547 }
1548
1549 pkt = (struct rndis_packet_msg *)buf;
1550 if (pkt->rm_dataoffset + pkt->rm_datalen > len) {
1551 device_printf(sc->sc_dev,
1552 "data packet out of bounds: %u@%u\n", pkt->rm_dataoffset,
1553 pkt->rm_datalen);
1554 return;
1555 }
1556
1557 if ((m = hvn_devget(sc, buf + RNDIS_HEADER_OFFSET + pkt->rm_dataoffset,
1558 pkt->rm_datalen)) == NULL) {
1559 ifp->if_ierrors++;
1560 return;
1561 }
1562
1563 if (pkt->rm_pktinfooffset + pkt->rm_pktinfolen > len) {
1564 device_printf(sc->sc_dev,
1565 "pktinfo is out of bounds: %u@%u vs %u\n",
1566 pkt->rm_pktinfolen, pkt->rm_pktinfooffset, len);
1567 goto done;
1568 }
1569
1570 pi = (struct rndis_pktinfo *)(buf + RNDIS_HEADER_OFFSET +
1571 pkt->rm_pktinfooffset);
1572 while (pkt->rm_pktinfolen > 0) {
1573 if (pi->rm_size > pkt->rm_pktinfolen) {
1574 device_printf(sc->sc_dev,
1575 "invalid pktinfo size: %u/%u\n", pi->rm_size,
1576 pkt->rm_pktinfolen);
1577 break;
1578 }
1579
1580 switch (pi->rm_type) {
1581 case NDIS_PKTINFO_TYPE_CSUM:
1582 memcpy(&csum, pi->rm_data, sizeof(csum));
1583 if (csum & NDIS_RXCSUM_INFO_IPCS_OK)
1584 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1585 if (csum & NDIS_RXCSUM_INFO_TCPCS_OK)
1586 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1587 if (csum & NDIS_RXCSUM_INFO_UDPCS_OK)
1588 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1589 break;
1590 case NDIS_PKTINFO_TYPE_VLAN:
1591 memcpy(&vlan, pi->rm_data, sizeof(vlan));
1592 if (vlan != 0xffffffff) {
1593 m->m_pkthdr.ether_vtag =
1594 NDIS_VLAN_INFO_ID(vlan) |
1595 (NDIS_VLAN_INFO_PRI(vlan) << EVL_PRIO_BITS);
1596 m->m_flags |= M_VLANTAG;
1597 }
1598 break;
1599 default:
1600 DPRINTF("%s: unhandled pktinfo type %u\n",
1601 device_xname(sc->sc_dev), pi->rm_type);
1602 break;
1603 }
1604
1605 pkt->rm_pktinfolen -= pi->rm_size;
1606 pi = (struct rndis_pktinfo *)((char *)pi + pi->rm_size);
1607 }
1608
1609 done:
1610 if_percpuq_enqueue(sc->sc_ipq, m);
1611 }
1612
1613 static void
1614 hvn_rndis_complete(struct hvn_softc *sc, uint8_t *buf, uint32_t len)
1615 {
1616 struct rndis_cmd *rc;
1617 uint32_t id;
1618
1619 memcpy(&id, buf + RNDIS_HEADER_OFFSET, sizeof(id));
1620 if ((rc = hvn_complete_cmd(sc, id)) != NULL) {
1621 if (len < rc->rc_cmplen)
1622 device_printf(sc->sc_dev,
1623 "RNDIS response %u too short: %u\n", id, len);
1624 else
1625 memcpy(&rc->rc_cmp, buf, rc->rc_cmplen);
1626 if (len > rc->rc_cmplen &&
1627 len - rc->rc_cmplen > HVN_RNDIS_BUFSIZE)
1628 device_printf(sc->sc_dev,
1629 "RNDIS response %u too large: %u\n", id, len);
1630 else if (len > rc->rc_cmplen)
1631 memcpy(&rc->rc_cmpbuf, buf + rc->rc_cmplen,
1632 len - rc->rc_cmplen);
1633 rc->rc_done = 1;
1634 wakeup(rc);
1635 } else {
1636 DPRINTF("%s: failed to complete RNDIS request id %u\n",
1637 device_xname(sc->sc_dev), id);
1638 }
1639 }
1640
1641 static int
1642 hvn_rndis_output(struct hvn_softc *sc, struct hvn_tx_desc *txd)
1643 {
1644 uint64_t rid = (uint64_t)txd->txd_id << 32;
1645 int rv;
1646
1647 rv = vmbus_channel_send_sgl(sc->sc_chan, txd->txd_sgl, txd->txd_nsge,
1648 &sc->sc_data_msg, sizeof(sc->sc_data_msg), rid);
1649 if (rv) {
1650 DPRINTF("%s: RNDIS data send error %d\n",
1651 device_xname(sc->sc_dev), rv);
1652 return rv;
1653 }
1654 return 0;
1655 }
1656
1657 static void
1658 hvn_rndis_status(struct hvn_softc *sc, uint8_t *buf, uint32_t len)
1659 {
1660 struct ifnet *ifp = SC2IFP(sc);
1661 uint32_t status;
1662 int link_state = sc->sc_link_state;
1663
1664 memcpy(&status, buf + RNDIS_HEADER_OFFSET, sizeof(status));
1665 switch (status) {
1666 case RNDIS_STATUS_MEDIA_CONNECT:
1667 sc->sc_link_state = LINK_STATE_UP;
1668 break;
1669 case RNDIS_STATUS_MEDIA_DISCONNECT:
1670 sc->sc_link_state = LINK_STATE_DOWN;
1671 break;
1672 /* Ignore these */
1673 case RNDIS_STATUS_OFFLOAD_CURRENT_CONFIG:
1674 return;
1675 default:
1676 DPRINTF("%s: unhandled status %#x\n", device_xname(sc->sc_dev),
1677 status);
1678 return;
1679 }
1680 if (link_state != sc->sc_link_state)
1681 if_link_state_change(ifp, sc->sc_link_state);
1682 }
1683
1684 static int
1685 hvn_rndis_query(struct hvn_softc *sc, uint32_t oid, void *res, size_t *length)
1686 {
1687 struct rndis_cmd *rc;
1688 struct rndis_query_req *req;
1689 struct rndis_query_comp *cmp;
1690 size_t olength = *length;
1691 int rv;
1692
1693 rc = hvn_alloc_cmd(sc);
1694
1695 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1696 BUS_DMASYNC_PREREAD);
1697
1698 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1699
1700 req = rc->rc_req;
1701 req->rm_type = REMOTE_NDIS_QUERY_MSG;
1702 req->rm_len = sizeof(*req);
1703 req->rm_rid = rc->rc_id;
1704 req->rm_oid = oid;
1705 req->rm_infobufoffset = sizeof(*req) - RNDIS_HEADER_OFFSET;
1706
1707 rc->rc_cmplen = sizeof(*cmp);
1708
1709 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1710 BUS_DMASYNC_PREWRITE);
1711
1712 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1713 DPRINTF("%s: QUERY_MSG failed, error %d\n",
1714 device_xname(sc->sc_dev), rv);
1715 hvn_free_cmd(sc, rc);
1716 return rv;
1717 }
1718
1719 cmp = (struct rndis_query_comp *)&rc->rc_cmp;
1720 switch (cmp->rm_status) {
1721 case RNDIS_STATUS_SUCCESS:
1722 if (cmp->rm_infobuflen > olength) {
1723 rv = EINVAL;
1724 break;
1725 }
1726 memcpy(res, rc->rc_cmpbuf, cmp->rm_infobuflen);
1727 *length = cmp->rm_infobuflen;
1728 break;
1729 default:
1730 *length = 0;
1731 rv = EIO;
1732 break;
1733 }
1734
1735 hvn_free_cmd(sc, rc);
1736 return rv;
1737 }
1738
1739 static int
1740 hvn_rndis_set(struct hvn_softc *sc, uint32_t oid, void *data, size_t length)
1741 {
1742 struct rndis_cmd *rc;
1743 struct rndis_set_req *req;
1744 struct rndis_set_comp *cmp;
1745 int rv;
1746
1747 rc = hvn_alloc_cmd(sc);
1748
1749 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1750 BUS_DMASYNC_PREREAD);
1751
1752 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1753
1754 req = rc->rc_req;
1755 req->rm_type = REMOTE_NDIS_SET_MSG;
1756 req->rm_len = sizeof(*req) + length;
1757 req->rm_rid = rc->rc_id;
1758 req->rm_oid = oid;
1759 req->rm_infobufoffset = sizeof(*req) - RNDIS_HEADER_OFFSET;
1760
1761 rc->rc_cmplen = sizeof(*cmp);
1762
1763 if (length > 0) {
1764 KASSERT(sizeof(*req) + length < PAGE_SIZE);
1765 req->rm_infobuflen = length;
1766 memcpy(req + 1, data, length);
1767 }
1768
1769 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1770 BUS_DMASYNC_PREWRITE);
1771
1772 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1773 DPRINTF("%s: SET_MSG failed, error %d\n",
1774 device_xname(sc->sc_dev), rv);
1775 hvn_free_cmd(sc, rc);
1776 return rv;
1777 }
1778
1779 cmp = (struct rndis_set_comp *)&rc->rc_cmp;
1780 if (cmp->rm_status != RNDIS_STATUS_SUCCESS)
1781 rv = EIO;
1782
1783 hvn_free_cmd(sc, rc);
1784 return rv;
1785 }
1786
1787 static int
1788 hvn_rndis_open(struct hvn_softc *sc)
1789 {
1790 uint32_t filter;
1791 int rv;
1792
1793 if (sc->sc_promisc)
1794 filter = RNDIS_PACKET_TYPE_PROMISCUOUS;
1795 else
1796 filter = RNDIS_PACKET_TYPE_BROADCAST |
1797 RNDIS_PACKET_TYPE_ALL_MULTICAST |
1798 RNDIS_PACKET_TYPE_DIRECTED;
1799
1800 rv = hvn_rndis_set(sc, OID_GEN_CURRENT_PACKET_FILTER,
1801 &filter, sizeof(filter));
1802 if (rv) {
1803 DPRINTF("%s: failed to set RNDIS filter to %#x\n",
1804 device_xname(sc->sc_dev), filter);
1805 }
1806 return rv;
1807 }
1808
1809 static int
1810 hvn_rndis_close(struct hvn_softc *sc)
1811 {
1812 uint32_t filter = 0;
1813 int rv;
1814
1815 rv = hvn_rndis_set(sc, OID_GEN_CURRENT_PACKET_FILTER,
1816 &filter, sizeof(filter));
1817 if (rv) {
1818 DPRINTF("%s: failed to clear RNDIS filter\n",
1819 device_xname(sc->sc_dev));
1820 }
1821 return rv;
1822 }
1823
1824 static void
1825 hvn_rndis_detach(struct hvn_softc *sc)
1826 {
1827 struct rndis_cmd *rc;
1828 struct rndis_halt_req *req;
1829 int rv;
1830
1831 rc = hvn_alloc_cmd(sc);
1832
1833 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1834 BUS_DMASYNC_PREREAD);
1835
1836 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1837
1838 req = rc->rc_req;
1839 req->rm_type = REMOTE_NDIS_HALT_MSG;
1840 req->rm_len = sizeof(*req);
1841 req->rm_rid = rc->rc_id;
1842
1843 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1844 BUS_DMASYNC_PREWRITE);
1845
1846 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1847 DPRINTF("%s: HALT_MSG failed, error %d\n",
1848 device_xname(sc->sc_dev), rv);
1849 }
1850 hvn_free_cmd(sc, rc);
1851
1852 mutex_destroy(&sc->sc_cntl_sqlck);
1853 mutex_destroy(&sc->sc_cntl_cqlck);
1854 mutex_destroy(&sc->sc_cntl_fqlck);
1855 }
1856