if_hvn.c revision 1.15.2.1 1 /* $NetBSD: if_hvn.c,v 1.15.2.1 2020/02/29 20:19:07 ad Exp $ */
2 /* $OpenBSD: if_hvn.c,v 1.39 2018/03/11 14:31:34 mikeb Exp $ */
3
4 /*-
5 * Copyright (c) 2009-2012,2016 Microsoft Corp.
6 * Copyright (c) 2010-2012 Citrix Inc.
7 * Copyright (c) 2012 NetApp Inc.
8 * Copyright (c) 2016 Mike Belopuhov <mike (at) esdenera.com>
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice unmodified, this list of conditions, and the following
16 * disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * The OpenBSD port was done under funding by Esdenera Networks GmbH.
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: if_hvn.c,v 1.15.2.1 2020/02/29 20:19:07 ad Exp $");
39
40 #ifdef _KERNEL_OPT
41 #include "opt_inet.h"
42 #include "opt_inet6.h"
43 #include "opt_net_mpsafe.h"
44 #endif
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/device.h>
50 #include <sys/atomic.h>
51 #include <sys/bus.h>
52 #include <sys/intr.h>
53 #include <sys/kmem.h>
54
55 #include <net/if.h>
56 #include <net/if_ether.h>
57 #include <net/if_media.h>
58
59 #include <net/bpf.h>
60
61 #include <dev/ic/ndisreg.h>
62 #include <dev/ic/rndisreg.h>
63
64 #include <dev/hyperv/vmbusvar.h>
65 #include <dev/hyperv/if_hvnreg.h>
66
67 #ifndef EVL_PRIO_BITS
68 #define EVL_PRIO_BITS 13
69 #endif
70 #ifndef EVL_CFI_BITS
71 #define EVL_CFI_BITS 12
72 #endif
73
74 #define HVN_NVS_MSGSIZE 32
75 #define HVN_NVS_BUFSIZE PAGE_SIZE
76
77 /*
78 * RNDIS control interface
79 */
80 #define HVN_RNDIS_CTLREQS 4
81 #define HVN_RNDIS_BUFSIZE 512
82
83 struct rndis_cmd {
84 uint32_t rc_id;
85 struct hvn_nvs_rndis rc_msg;
86 void *rc_req;
87 bus_dmamap_t rc_dmap;
88 bus_dma_segment_t rc_segs;
89 int rc_nsegs;
90 uint64_t rc_gpa;
91 struct rndis_packet_msg rc_cmp;
92 uint32_t rc_cmplen;
93 uint8_t rc_cmpbuf[HVN_RNDIS_BUFSIZE];
94 int rc_done;
95 TAILQ_ENTRY(rndis_cmd) rc_entry;
96 };
97 TAILQ_HEAD(rndis_queue, rndis_cmd);
98
99 #define HVN_MAXMTU (9 * 1024)
100
101 #define HVN_RNDIS_XFER_SIZE 2048
102
103 /*
104 * Tx ring
105 */
106 #define HVN_TX_DESC 256
107 #define HVN_TX_FRAGS 15 /* 31 is the max */
108 #define HVN_TX_FRAG_SIZE PAGE_SIZE
109 #define HVN_TX_PKT_SIZE 16384
110
111 #define HVN_RNDIS_PKT_LEN \
112 (sizeof(struct rndis_packet_msg) + \
113 sizeof(struct rndis_pktinfo) + NDIS_VLAN_INFO_SIZE + \
114 sizeof(struct rndis_pktinfo) + NDIS_TXCSUM_INFO_SIZE)
115
116 struct hvn_tx_desc {
117 uint32_t txd_id;
118 int txd_ready;
119 struct vmbus_gpa txd_sgl[HVN_TX_FRAGS + 1];
120 int txd_nsge;
121 struct mbuf *txd_buf;
122 bus_dmamap_t txd_dmap;
123 struct vmbus_gpa txd_gpa;
124 struct rndis_packet_msg *txd_req;
125 };
126
127 struct hvn_softc {
128 device_t sc_dev;
129
130 struct vmbus_softc *sc_vmbus;
131 struct vmbus_channel *sc_chan;
132 bus_dma_tag_t sc_dmat;
133
134 struct ethercom sc_ec;
135 struct ifmedia sc_media;
136 struct if_percpuq *sc_ipq;
137 int sc_link_state;
138 int sc_promisc;
139
140 uint32_t sc_flags;
141 #define HVN_SCF_ATTACHED __BIT(0)
142
143 /* NVS protocol */
144 int sc_proto;
145 uint32_t sc_nvstid;
146 uint8_t sc_nvsrsp[HVN_NVS_MSGSIZE];
147 uint8_t *sc_nvsbuf;
148 int sc_nvsdone;
149
150 /* RNDIS protocol */
151 int sc_ndisver;
152 uint32_t sc_rndisrid;
153 struct rndis_queue sc_cntl_sq; /* submission queue */
154 kmutex_t sc_cntl_sqlck;
155 struct rndis_queue sc_cntl_cq; /* completion queue */
156 kmutex_t sc_cntl_cqlck;
157 struct rndis_queue sc_cntl_fq; /* free queue */
158 kmutex_t sc_cntl_fqlck;
159 struct rndis_cmd sc_cntl_msgs[HVN_RNDIS_CTLREQS];
160 struct hvn_nvs_rndis sc_data_msg;
161
162 /* Rx ring */
163 uint8_t *sc_rx_ring;
164 int sc_rx_size;
165 uint32_t sc_rx_hndl;
166 struct hyperv_dma sc_rx_dma;
167
168 /* Tx ring */
169 uint32_t sc_tx_next;
170 uint32_t sc_tx_avail;
171 struct hvn_tx_desc sc_tx_desc[HVN_TX_DESC];
172 bus_dmamap_t sc_tx_rmap;
173 uint8_t *sc_tx_msgs;
174 bus_dma_segment_t sc_tx_mseg;
175 };
176
177 #define SC2IFP(_sc_) (&(_sc_)->sc_ec.ec_if)
178 #define IFP2SC(_ifp_) ((_ifp_)->if_softc)
179
180
181 static int hvn_match(device_t, cfdata_t, void *);
182 static void hvn_attach(device_t, device_t, void *);
183 static int hvn_detach(device_t, int);
184
185 CFATTACH_DECL_NEW(hvn, sizeof(struct hvn_softc),
186 hvn_match, hvn_attach, hvn_detach, NULL);
187
188 static int hvn_ioctl(struct ifnet *, u_long, void *);
189 static int hvn_media_change(struct ifnet *);
190 static void hvn_media_status(struct ifnet *, struct ifmediareq *);
191 static int hvn_iff(struct hvn_softc *);
192 static int hvn_init(struct ifnet *);
193 static void hvn_stop(struct ifnet *, int);
194 static void hvn_start(struct ifnet *);
195 static int hvn_encap(struct hvn_softc *, struct mbuf *,
196 struct hvn_tx_desc **);
197 static void hvn_decap(struct hvn_softc *, struct hvn_tx_desc *);
198 static void hvn_txeof(struct hvn_softc *, uint64_t);
199 static int hvn_rx_ring_create(struct hvn_softc *);
200 static int hvn_rx_ring_destroy(struct hvn_softc *);
201 static int hvn_tx_ring_create(struct hvn_softc *);
202 static void hvn_tx_ring_destroy(struct hvn_softc *);
203 static int hvn_set_capabilities(struct hvn_softc *);
204 static int hvn_get_lladdr(struct hvn_softc *, uint8_t *);
205 static void hvn_get_link_status(struct hvn_softc *);
206
207 /* NSVP */
208 static int hvn_nvs_attach(struct hvn_softc *);
209 static void hvn_nvs_intr(void *);
210 static int hvn_nvs_cmd(struct hvn_softc *, void *, size_t, uint64_t, int);
211 static int hvn_nvs_ack(struct hvn_softc *, uint64_t);
212 static void hvn_nvs_detach(struct hvn_softc *);
213
214 /* RNDIS */
215 static int hvn_rndis_attach(struct hvn_softc *);
216 static int hvn_rndis_cmd(struct hvn_softc *, struct rndis_cmd *, int);
217 static void hvn_rndis_input(struct hvn_softc *, uint64_t, void *);
218 static void hvn_rxeof(struct hvn_softc *, uint8_t *, uint32_t);
219 static void hvn_rndis_complete(struct hvn_softc *, uint8_t *, uint32_t);
220 static int hvn_rndis_output(struct hvn_softc *, struct hvn_tx_desc *);
221 static void hvn_rndis_status(struct hvn_softc *, uint8_t *, uint32_t);
222 static int hvn_rndis_query(struct hvn_softc *, uint32_t, void *, size_t *);
223 static int hvn_rndis_set(struct hvn_softc *, uint32_t, void *, size_t);
224 static int hvn_rndis_open(struct hvn_softc *);
225 static int hvn_rndis_close(struct hvn_softc *);
226 static void hvn_rndis_detach(struct hvn_softc *);
227
228 static int
229 hvn_match(device_t parent, cfdata_t match, void *aux)
230 {
231 struct vmbus_attach_args *aa = aux;
232
233 if (memcmp(aa->aa_type, &hyperv_guid_network, sizeof(*aa->aa_type)))
234 return 0;
235 return 1;
236 }
237
238 static void
239 hvn_attach(device_t parent, device_t self, void *aux)
240 {
241 struct hvn_softc *sc = device_private(self);
242 struct vmbus_attach_args *aa = aux;
243 struct ifnet *ifp = SC2IFP(sc);
244 uint8_t enaddr[ETHER_ADDR_LEN];
245 int error;
246
247 sc->sc_dev = self;
248 sc->sc_vmbus = (struct vmbus_softc *)device_private(parent);
249 sc->sc_chan = aa->aa_chan;
250 sc->sc_dmat = sc->sc_vmbus->sc_dmat;
251
252 aprint_naive("\n");
253 aprint_normal(": Hyper-V NetVSC\n");
254
255 if (hvn_nvs_attach(sc)) {
256 aprint_error_dev(self, "failed to init NVSP\n");
257 return;
258 }
259
260 if (hvn_rx_ring_create(sc)) {
261 aprint_error_dev(self, "failed to create Rx ring\n");
262 goto fail1;
263 }
264
265 if (hvn_tx_ring_create(sc)) {
266 aprint_error_dev(self, "failed to create Tx ring\n");
267 goto fail2;
268 }
269
270 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
271 ifp->if_softc = sc;
272 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
273 ifp->if_ioctl = hvn_ioctl;
274 ifp->if_start = hvn_start;
275 ifp->if_init = hvn_init;
276 ifp->if_stop = hvn_stop;
277 ifp->if_capabilities = IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx;
278 ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx;
279 ifp->if_capabilities |= IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_TCPv6_Rx;
280 if (sc->sc_ndisver > NDIS_VERSION_6_30) {
281 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Tx;
282 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Rx;
283 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Tx;
284 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Rx;
285 }
286 if (sc->sc_proto >= HVN_NVS_PROTO_VERSION_2) {
287 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
288 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
289 sc->sc_ec.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
290 }
291
292 IFQ_SET_MAXLEN(&ifp->if_snd, HVN_TX_DESC - 1);
293 IFQ_SET_READY(&ifp->if_snd);
294
295 /* Initialize ifmedia structures. */
296 sc->sc_ec.ec_ifmedia = &sc->sc_media;
297 ifmedia_init(&sc->sc_media, IFM_IMASK, hvn_media_change,
298 hvn_media_status);
299 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
300 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_MANUAL);
301
302 error = if_initialize(ifp);
303 if (error) {
304 aprint_error_dev(self, "if_initialize failed(%d)\n", error);
305 goto fail3;
306 }
307 sc->sc_ipq = if_percpuq_create(ifp);
308 if_deferred_start_init(ifp, NULL);
309
310 if (hvn_rndis_attach(sc)) {
311 aprint_error_dev(self, "failed to init RNDIS\n");
312 goto fail3;
313 }
314
315 aprint_normal_dev(self, "NVS %d.%d NDIS %d.%d\n",
316 sc->sc_proto >> 16, sc->sc_proto & 0xffff,
317 sc->sc_ndisver >> 16 , sc->sc_ndisver & 0xffff);
318
319 if (hvn_set_capabilities(sc)) {
320 aprint_error_dev(self, "failed to setup offloading\n");
321 goto fail4;
322 }
323
324 if (hvn_get_lladdr(sc, enaddr)) {
325 aprint_error_dev(self,
326 "failed to obtain an ethernet address\n");
327 goto fail4;
328 }
329 aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(enaddr));
330
331 ether_ifattach(ifp, enaddr);
332 if_register(ifp);
333
334 if (pmf_device_register(self, NULL, NULL))
335 pmf_class_network_register(self, ifp);
336 else
337 aprint_error_dev(self, "couldn't establish power handler\n");
338
339 SET(sc->sc_flags, HVN_SCF_ATTACHED);
340 return;
341
342 fail4: hvn_rndis_detach(sc);
343 if_percpuq_destroy(sc->sc_ipq);
344 fail3: hvn_tx_ring_destroy(sc);
345 fail2: hvn_rx_ring_destroy(sc);
346 fail1: hvn_nvs_detach(sc);
347 }
348
349 static int
350 hvn_detach(device_t self, int flags)
351 {
352 struct hvn_softc *sc = device_private(self);
353 struct ifnet *ifp = SC2IFP(sc);
354
355 if (!ISSET(sc->sc_flags, HVN_SCF_ATTACHED))
356 return 0;
357
358 if (ifp->if_flags & IFF_RUNNING)
359 hvn_stop(ifp, 1);
360
361 pmf_device_deregister(self);
362
363 ether_ifdetach(ifp);
364 if_detach(ifp);
365 ifmedia_fini(&sc->sc_media);
366 if_percpuq_destroy(sc->sc_ipq);
367
368 hvn_rndis_detach(sc);
369 hvn_rx_ring_destroy(sc);
370 hvn_tx_ring_destroy(sc);
371 hvn_nvs_detach(sc);
372
373 return 0;
374 }
375
376 static int
377 hvn_ioctl(struct ifnet *ifp, u_long command, void * data)
378 {
379 struct hvn_softc *sc = IFP2SC(ifp);
380 int s, error = 0;
381
382 s = splnet();
383
384 switch (command) {
385 case SIOCSIFFLAGS:
386 if (ifp->if_flags & IFF_UP) {
387 if (ifp->if_flags & IFF_RUNNING)
388 error = ENETRESET;
389 else {
390 error = hvn_init(ifp);
391 if (error)
392 ifp->if_flags &= ~IFF_UP;
393 }
394 } else {
395 if (ifp->if_flags & IFF_RUNNING)
396 hvn_stop(ifp, 1);
397 }
398 break;
399 default:
400 error = ether_ioctl(ifp, command, data);
401 break;
402 }
403
404 if (error == ENETRESET) {
405 if (ifp->if_flags & IFF_RUNNING)
406 hvn_iff(sc);
407 error = 0;
408 }
409
410 splx(s);
411
412 return error;
413 }
414
415 static int
416 hvn_media_change(struct ifnet *ifp)
417 {
418
419 return 0;
420 }
421
422 static void
423 hvn_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
424 {
425 struct hvn_softc *sc = IFP2SC(ifp);
426 int link_state;
427
428 link_state = sc->sc_link_state;
429 hvn_get_link_status(sc);
430 if (link_state != sc->sc_link_state)
431 if_link_state_change(ifp, sc->sc_link_state);
432
433 ifmr->ifm_status = IFM_AVALID;
434 ifmr->ifm_active = IFM_ETHER | IFM_MANUAL;
435 if (sc->sc_link_state == LINK_STATE_UP)
436 ifmr->ifm_status |= IFM_ACTIVE;
437 }
438
439 static int
440 hvn_iff(struct hvn_softc *sc)
441 {
442
443 /* XXX */
444 sc->sc_promisc = 0;
445
446 return 0;
447 }
448
449 static int
450 hvn_init(struct ifnet *ifp)
451 {
452 struct hvn_softc *sc = IFP2SC(ifp);
453 int error;
454
455 hvn_stop(ifp, 0);
456
457 error = hvn_iff(sc);
458 if (error)
459 return error;
460
461 error = hvn_rndis_open(sc);
462 if (error == 0) {
463 ifp->if_flags |= IFF_RUNNING;
464 ifp->if_flags &= ~IFF_OACTIVE;
465 }
466 return error;
467 }
468
469 static void
470 hvn_stop(struct ifnet *ifp, int disable)
471 {
472 struct hvn_softc *sc = IFP2SC(ifp);
473
474 hvn_rndis_close(sc);
475
476 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
477 }
478
479 static void
480 hvn_start(struct ifnet *ifp)
481 {
482 struct hvn_softc *sc = IFP2SC(ifp);
483 struct hvn_tx_desc *txd;
484 struct mbuf *m;
485
486 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
487 return;
488
489 for (;;) {
490 if (!sc->sc_tx_avail) {
491 /* transient */
492 ifp->if_flags |= IFF_OACTIVE;
493 break;
494 }
495
496 IFQ_DEQUEUE(&ifp->if_snd, m);
497 if (m == NULL)
498 break;
499
500 if (hvn_encap(sc, m, &txd)) {
501 /* the chain is too large */
502 if_statinc(ifp, if_oerrors);
503 m_freem(m);
504 continue;
505 }
506
507 bpf_mtap(ifp, m, BPF_D_OUT);
508
509 if (hvn_rndis_output(sc, txd)) {
510 hvn_decap(sc, txd);
511 if_statinc(ifp, if_oerrors);
512 m_freem(m);
513 continue;
514 }
515
516 sc->sc_tx_next++;
517 }
518 }
519
520 static inline char *
521 hvn_rndis_pktinfo_append(struct rndis_packet_msg *pkt, size_t pktsize,
522 size_t datalen, uint32_t type)
523 {
524 struct rndis_pktinfo *pi;
525 size_t pi_size = sizeof(*pi) + datalen;
526 char *cp;
527
528 KASSERT(pkt->rm_pktinfooffset + pkt->rm_pktinfolen + pi_size <=
529 pktsize);
530
531 cp = (char *)pkt + pkt->rm_pktinfooffset + pkt->rm_pktinfolen;
532 pi = (struct rndis_pktinfo *)cp;
533 pi->rm_size = pi_size;
534 pi->rm_type = type;
535 pi->rm_pktinfooffset = sizeof(*pi);
536 pkt->rm_pktinfolen += pi_size;
537 pkt->rm_dataoffset += pi_size;
538 pkt->rm_len += pi_size;
539
540 return (char *)pi->rm_data;
541 }
542
543 static int
544 hvn_encap(struct hvn_softc *sc, struct mbuf *m, struct hvn_tx_desc **txd0)
545 {
546 struct hvn_tx_desc *txd;
547 struct rndis_packet_msg *pkt;
548 bus_dma_segment_t *seg;
549 size_t pktlen;
550 int i, rv;
551
552 do {
553 txd = &sc->sc_tx_desc[sc->sc_tx_next % HVN_TX_DESC];
554 sc->sc_tx_next++;
555 } while (!txd->txd_ready);
556 txd->txd_ready = 0;
557
558 pkt = txd->txd_req;
559 memset(pkt, 0, HVN_RNDIS_PKT_LEN);
560 pkt->rm_type = REMOTE_NDIS_PACKET_MSG;
561 pkt->rm_len = sizeof(*pkt) + m->m_pkthdr.len;
562 pkt->rm_dataoffset = RNDIS_DATA_OFFSET;
563 pkt->rm_datalen = m->m_pkthdr.len;
564 pkt->rm_pktinfooffset = sizeof(*pkt); /* adjusted below */
565 pkt->rm_pktinfolen = 0;
566
567 rv = bus_dmamap_load_mbuf(sc->sc_dmat, txd->txd_dmap, m, BUS_DMA_READ |
568 BUS_DMA_NOWAIT);
569 switch (rv) {
570 case 0:
571 break;
572 case EFBIG:
573 if (m_defrag(m, M_NOWAIT) != NULL &&
574 bus_dmamap_load_mbuf(sc->sc_dmat, txd->txd_dmap, m,
575 BUS_DMA_READ | BUS_DMA_NOWAIT) == 0)
576 break;
577 /* FALLTHROUGH */
578 default:
579 DPRINTF("%s: failed to load mbuf\n", device_xname(sc->sc_dev));
580 return -1;
581 }
582 txd->txd_buf = m;
583
584 if (vlan_has_tag(m)) {
585 uint32_t vlan;
586 char *cp;
587 uint16_t tag;
588
589 tag = vlan_get_tag(m);
590 vlan = NDIS_VLAN_INFO_MAKE(EVL_VLANOFTAG(tag),
591 EVL_PRIOFTAG(tag), EVL_CFIOFTAG(tag));
592 cp = hvn_rndis_pktinfo_append(pkt, HVN_RNDIS_PKT_LEN,
593 NDIS_VLAN_INFO_SIZE, NDIS_PKTINFO_TYPE_VLAN);
594 memcpy(cp, &vlan, NDIS_VLAN_INFO_SIZE);
595 }
596
597 if (m->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_UDPv4 |
598 M_CSUM_TCPv4)) {
599 uint32_t csum = NDIS_TXCSUM_INFO_IPV4;
600 char *cp;
601
602 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4)
603 csum |= NDIS_TXCSUM_INFO_IPCS;
604 if (m->m_pkthdr.csum_flags & M_CSUM_TCPv4)
605 csum |= NDIS_TXCSUM_INFO_TCPCS;
606 if (m->m_pkthdr.csum_flags & M_CSUM_UDPv4)
607 csum |= NDIS_TXCSUM_INFO_UDPCS;
608 cp = hvn_rndis_pktinfo_append(pkt, HVN_RNDIS_PKT_LEN,
609 NDIS_TXCSUM_INFO_SIZE, NDIS_PKTINFO_TYPE_CSUM);
610 memcpy(cp, &csum, NDIS_TXCSUM_INFO_SIZE);
611 }
612
613 pktlen = pkt->rm_pktinfooffset + pkt->rm_pktinfolen;
614 pkt->rm_pktinfooffset -= RNDIS_HEADER_OFFSET;
615
616 /* Attach an RNDIS message to the first slot */
617 txd->txd_sgl[0].gpa_page = txd->txd_gpa.gpa_page;
618 txd->txd_sgl[0].gpa_ofs = txd->txd_gpa.gpa_ofs;
619 txd->txd_sgl[0].gpa_len = pktlen;
620 txd->txd_nsge = txd->txd_dmap->dm_nsegs + 1;
621
622 for (i = 0; i < txd->txd_dmap->dm_nsegs; i++) {
623 seg = &txd->txd_dmap->dm_segs[i];
624 txd->txd_sgl[1 + i].gpa_page = atop(seg->ds_addr);
625 txd->txd_sgl[1 + i].gpa_ofs = seg->ds_addr & PAGE_MASK;
626 txd->txd_sgl[1 + i].gpa_len = seg->ds_len;
627 }
628
629 *txd0 = txd;
630
631 atomic_dec_uint(&sc->sc_tx_avail);
632
633 return 0;
634 }
635
636 static void
637 hvn_decap(struct hvn_softc *sc, struct hvn_tx_desc *txd)
638 {
639 struct ifnet *ifp = SC2IFP(sc);
640
641 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmap,
642 0, txd->txd_dmap->dm_mapsize,
643 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
644 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmap);
645 txd->txd_buf = NULL;
646 txd->txd_nsge = 0;
647 txd->txd_ready = 1;
648 atomic_inc_uint(&sc->sc_tx_avail);
649 ifp->if_flags &= ~IFF_OACTIVE;
650 }
651
652 static void
653 hvn_txeof(struct hvn_softc *sc, uint64_t tid)
654 {
655 struct ifnet *ifp = SC2IFP(sc);
656 struct hvn_tx_desc *txd;
657 struct mbuf *m;
658 uint32_t id = tid >> 32;
659
660 if ((tid & 0xffffffffU) != 0)
661 return;
662
663 id -= HVN_NVS_CHIM_SIG;
664 if (id >= HVN_TX_DESC) {
665 device_printf(sc->sc_dev, "tx packet index too large: %u", id);
666 return;
667 }
668
669 txd = &sc->sc_tx_desc[id];
670
671 if ((m = txd->txd_buf) == NULL) {
672 device_printf(sc->sc_dev, "no mbuf @%u\n", id);
673 return;
674 }
675 txd->txd_buf = NULL;
676
677 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmap,
678 0, txd->txd_dmap->dm_mapsize,
679 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
680 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmap);
681 m_freem(m);
682 if_statinc(ifp, if_opackets);
683
684 txd->txd_ready = 1;
685
686 atomic_inc_uint(&sc->sc_tx_avail);
687 ifp->if_flags &= ~IFF_OACTIVE;
688 }
689
690 static int
691 hvn_rx_ring_create(struct hvn_softc *sc)
692 {
693 struct hvn_nvs_rxbuf_conn cmd;
694 struct hvn_nvs_rxbuf_conn_resp *rsp;
695 uint64_t tid;
696
697 if (sc->sc_proto <= HVN_NVS_PROTO_VERSION_2)
698 sc->sc_rx_size = 15 * 1024 * 1024; /* 15MB */
699 else
700 sc->sc_rx_size = 16 * 1024 * 1024; /* 16MB */
701 sc->sc_rx_ring = hyperv_dma_alloc(sc->sc_dmat, &sc->sc_rx_dma,
702 sc->sc_rx_size, PAGE_SIZE, PAGE_SIZE, sc->sc_rx_size / PAGE_SIZE,
703 HYPERV_DMA_SLEEPOK);
704 if (sc->sc_rx_ring == NULL) {
705 DPRINTF("%s: failed to allocate Rx ring buffer\n",
706 device_xname(sc->sc_dev));
707 return -1;
708 }
709 if (vmbus_handle_alloc(sc->sc_chan, &sc->sc_rx_dma, sc->sc_rx_size,
710 &sc->sc_rx_hndl)) {
711 DPRINTF("%s: failed to obtain a PA handle\n",
712 device_xname(sc->sc_dev));
713 goto errout;
714 }
715
716 memset(&cmd, 0, sizeof(cmd));
717 cmd.nvs_type = HVN_NVS_TYPE_RXBUF_CONN;
718 cmd.nvs_gpadl = sc->sc_rx_hndl;
719 cmd.nvs_sig = HVN_NVS_RXBUF_SIG;
720
721 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
722 if (hvn_nvs_cmd(sc, &cmd, sizeof(cmd), tid, 100))
723 goto errout;
724
725 rsp = (struct hvn_nvs_rxbuf_conn_resp *)&sc->sc_nvsrsp;
726 if (rsp->nvs_status != HVN_NVS_STATUS_OK) {
727 DPRINTF("%s: failed to set up the Rx ring\n",
728 device_xname(sc->sc_dev));
729 goto errout;
730 }
731 if (rsp->nvs_nsect > 1) {
732 DPRINTF("%s: invalid number of Rx ring sections: %u\n",
733 device_xname(sc->sc_dev), rsp->nvs_nsect);
734 hvn_rx_ring_destroy(sc);
735 return -1;
736 }
737 return 0;
738
739 errout:
740 if (sc->sc_rx_hndl) {
741 vmbus_handle_free(sc->sc_chan, sc->sc_rx_hndl);
742 sc->sc_rx_hndl = 0;
743 }
744 if (sc->sc_rx_ring) {
745 hyperv_dma_free(sc->sc_dmat, &sc->sc_rx_dma);
746 sc->sc_rx_ring = NULL;
747 }
748 return -1;
749 }
750
751 static int
752 hvn_rx_ring_destroy(struct hvn_softc *sc)
753 {
754 struct hvn_nvs_rxbuf_disconn cmd;
755 uint64_t tid;
756
757 if (sc->sc_rx_ring == NULL)
758 return 0;
759
760 memset(&cmd, 0, sizeof(cmd));
761 cmd.nvs_type = HVN_NVS_TYPE_RXBUF_DISCONN;
762 cmd.nvs_sig = HVN_NVS_RXBUF_SIG;
763
764 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
765 if (hvn_nvs_cmd(sc, &cmd, sizeof(cmd), tid, 0))
766 return -1;
767
768 delay(100);
769
770 vmbus_handle_free(sc->sc_chan, sc->sc_rx_hndl);
771 sc->sc_rx_hndl = 0;
772
773 hyperv_dma_free(sc->sc_dmat, &sc->sc_rx_dma);
774 sc->sc_rx_ring = NULL;
775
776 return 0;
777 }
778
779 static int
780 hvn_tx_ring_create(struct hvn_softc *sc)
781 {
782 const int dmaflags = cold ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK;
783 struct hvn_tx_desc *txd;
784 bus_dma_segment_t *seg;
785 size_t msgsize;
786 int i, rsegs;
787 paddr_t pa;
788
789 msgsize = roundup(HVN_RNDIS_PKT_LEN, 128);
790
791 /* Allocate memory to store RNDIS messages */
792 if (bus_dmamem_alloc(sc->sc_dmat, msgsize * HVN_TX_DESC, PAGE_SIZE, 0,
793 &sc->sc_tx_mseg, 1, &rsegs, dmaflags)) {
794 DPRINTF("%s: failed to allocate memory for RDNIS messages\n",
795 device_xname(sc->sc_dev));
796 goto errout;
797 }
798 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_tx_mseg, 1, msgsize *
799 HVN_TX_DESC, (void **)&sc->sc_tx_msgs, dmaflags)) {
800 DPRINTF("%s: failed to establish mapping for RDNIS messages\n",
801 device_xname(sc->sc_dev));
802 goto errout;
803 }
804 memset(sc->sc_tx_msgs, 0, msgsize * HVN_TX_DESC);
805 if (bus_dmamap_create(sc->sc_dmat, msgsize * HVN_TX_DESC, 1,
806 msgsize * HVN_TX_DESC, 0, dmaflags, &sc->sc_tx_rmap)) {
807 DPRINTF("%s: failed to create map for RDNIS messages\n",
808 device_xname(sc->sc_dev));
809 goto errout;
810 }
811 if (bus_dmamap_load(sc->sc_dmat, sc->sc_tx_rmap, sc->sc_tx_msgs,
812 msgsize * HVN_TX_DESC, NULL, dmaflags)) {
813 DPRINTF("%s: failed to create map for RDNIS messages\n",
814 device_xname(sc->sc_dev));
815 goto errout;
816 }
817
818 for (i = 0; i < HVN_TX_DESC; i++) {
819 txd = &sc->sc_tx_desc[i];
820 if (bus_dmamap_create(sc->sc_dmat, HVN_TX_PKT_SIZE,
821 HVN_TX_FRAGS, HVN_TX_FRAG_SIZE, PAGE_SIZE, dmaflags,
822 &txd->txd_dmap)) {
823 DPRINTF("%s: failed to create map for TX descriptors\n",
824 device_xname(sc->sc_dev));
825 goto errout;
826 }
827 seg = &sc->sc_tx_rmap->dm_segs[0];
828 pa = seg->ds_addr + (msgsize * i);
829 txd->txd_gpa.gpa_page = atop(pa);
830 txd->txd_gpa.gpa_ofs = pa & PAGE_MASK;
831 txd->txd_gpa.gpa_len = msgsize;
832 txd->txd_req = (void *)(sc->sc_tx_msgs + (msgsize * i));
833 txd->txd_id = i + HVN_NVS_CHIM_SIG;
834 txd->txd_ready = 1;
835 }
836 sc->sc_tx_avail = HVN_TX_DESC;
837
838 return 0;
839
840 errout:
841 hvn_tx_ring_destroy(sc);
842 return -1;
843 }
844
845 static void
846 hvn_tx_ring_destroy(struct hvn_softc *sc)
847 {
848 struct hvn_tx_desc *txd;
849 int i;
850
851 for (i = 0; i < HVN_TX_DESC; i++) {
852 txd = &sc->sc_tx_desc[i];
853 if (txd->txd_dmap == NULL)
854 continue;
855 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmap,
856 0, txd->txd_dmap->dm_mapsize,
857 BUS_DMASYNC_POSTWRITE);
858 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmap);
859 bus_dmamap_destroy(sc->sc_dmat, txd->txd_dmap);
860 txd->txd_dmap = NULL;
861 if (txd->txd_buf == NULL)
862 continue;
863 m_freem(txd->txd_buf);
864 txd->txd_buf = NULL;
865 }
866 if (sc->sc_tx_rmap != NULL) {
867 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap,
868 0, sc->sc_tx_rmap->dm_mapsize,
869 BUS_DMASYNC_POSTWRITE);
870 bus_dmamap_unload(sc->sc_dmat, sc->sc_tx_rmap);
871 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_rmap);
872 sc->sc_tx_rmap = NULL;
873 }
874 if (sc->sc_tx_msgs != NULL) {
875 size_t msgsize = roundup(HVN_RNDIS_PKT_LEN, 128);
876
877 bus_dmamem_unmap(sc->sc_dmat, sc->sc_tx_msgs,
878 msgsize * HVN_TX_DESC);
879 bus_dmamem_free(sc->sc_dmat, &sc->sc_tx_mseg, 1);
880 sc->sc_tx_msgs = NULL;
881 }
882 }
883
884 static int
885 hvn_get_lladdr(struct hvn_softc *sc, uint8_t *enaddr)
886 {
887 size_t addrlen = ETHER_ADDR_LEN;
888 int rv;
889
890 rv = hvn_rndis_query(sc, OID_802_3_PERMANENT_ADDRESS, enaddr, &addrlen);
891 if (rv == 0 && addrlen != ETHER_ADDR_LEN)
892 rv = -1;
893 return rv;
894 }
895
896 static void
897 hvn_get_link_status(struct hvn_softc *sc)
898 {
899 uint32_t state;
900 size_t len = sizeof(state);
901
902 if (hvn_rndis_query(sc, OID_GEN_MEDIA_CONNECT_STATUS,
903 &state, &len) == 0)
904 sc->sc_link_state = (state == NDIS_MEDIA_STATE_CONNECTED) ?
905 LINK_STATE_UP : LINK_STATE_DOWN;
906 }
907
908 static int
909 hvn_nvs_attach(struct hvn_softc *sc)
910 {
911 static const uint32_t protos[] = {
912 HVN_NVS_PROTO_VERSION_5,
913 HVN_NVS_PROTO_VERSION_4,
914 HVN_NVS_PROTO_VERSION_2,
915 HVN_NVS_PROTO_VERSION_1
916 };
917 struct hvn_nvs_init cmd;
918 struct hvn_nvs_init_resp *rsp;
919 struct hvn_nvs_ndis_init ncmd;
920 struct hvn_nvs_ndis_conf ccmd;
921 uint32_t ndisver, ringsize;
922 uint64_t tid;
923 int i;
924
925 sc->sc_nvsbuf = kmem_zalloc(HVN_NVS_BUFSIZE, KM_SLEEP);
926
927 /* We need to be able to fit all RNDIS control and data messages */
928 ringsize = HVN_RNDIS_CTLREQS *
929 (sizeof(struct hvn_nvs_rndis) + sizeof(struct vmbus_gpa)) +
930 HVN_TX_DESC * (sizeof(struct hvn_nvs_rndis) +
931 (HVN_TX_FRAGS + 1) * sizeof(struct vmbus_gpa));
932
933 sc->sc_chan->ch_flags &= ~CHF_BATCHED;
934
935 /* Associate our interrupt handler with the channel */
936 if (vmbus_channel_open(sc->sc_chan, ringsize, NULL, 0,
937 hvn_nvs_intr, sc)) {
938 DPRINTF("%s: failed to open channel\n",
939 device_xname(sc->sc_dev));
940 kmem_free(sc->sc_nvsbuf, HVN_NVS_BUFSIZE);
941 return -1;
942 }
943
944 memset(&cmd, 0, sizeof(cmd));
945 cmd.nvs_type = HVN_NVS_TYPE_INIT;
946 for (i = 0; i < __arraycount(protos); i++) {
947 cmd.nvs_ver_min = cmd.nvs_ver_max = protos[i];
948 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
949 if (hvn_nvs_cmd(sc, &cmd, sizeof(cmd), tid, 100))
950 return -1;
951
952 rsp = (struct hvn_nvs_init_resp *)&sc->sc_nvsrsp;
953 if (rsp->nvs_status == HVN_NVS_STATUS_OK) {
954 sc->sc_proto = protos[i];
955 break;
956 }
957 }
958 if (i == __arraycount(protos)) {
959 DPRINTF("%s: failed to negotiate NVSP version\n",
960 device_xname(sc->sc_dev));
961 return -1;
962 }
963
964 if (sc->sc_proto >= HVN_NVS_PROTO_VERSION_2) {
965 memset(&ccmd, 0, sizeof(ccmd));
966 ccmd.nvs_type = HVN_NVS_TYPE_NDIS_CONF;
967 ccmd.nvs_mtu = HVN_MAXMTU;
968 ccmd.nvs_caps = HVN_NVS_NDIS_CONF_VLAN;
969
970 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
971 if (hvn_nvs_cmd(sc, &ccmd, sizeof(ccmd), tid, 100))
972 return -1;
973 }
974
975 memset(&ncmd, 0, sizeof(ncmd));
976 ncmd.nvs_type = HVN_NVS_TYPE_NDIS_INIT;
977 if (sc->sc_proto <= HVN_NVS_PROTO_VERSION_4)
978 ndisver = NDIS_VERSION_6_1;
979 else
980 ndisver = NDIS_VERSION_6_30;
981 ncmd.nvs_ndis_major = (ndisver & 0xffff0000) >> 16;
982 ncmd.nvs_ndis_minor = ndisver & 0x0000ffff;
983
984 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
985 if (hvn_nvs_cmd(sc, &ncmd, sizeof(ncmd), tid, 100))
986 return -1;
987
988 sc->sc_ndisver = ndisver;
989
990 return 0;
991 }
992
993 static void
994 hvn_nvs_intr(void *arg)
995 {
996 struct hvn_softc *sc = arg;
997 struct ifnet *ifp = SC2IFP(sc);
998 struct vmbus_chanpkt_hdr *cph;
999 const struct hvn_nvs_hdr *nvs;
1000 uint64_t rid;
1001 uint32_t rlen;
1002 int rv;
1003 bool dotx = false;
1004
1005 for (;;) {
1006 rv = vmbus_channel_recv(sc->sc_chan, sc->sc_nvsbuf,
1007 HVN_NVS_BUFSIZE, &rlen, &rid, 1);
1008 if (rv != 0 || rlen == 0) {
1009 if (rv != EAGAIN)
1010 device_printf(sc->sc_dev,
1011 "failed to receive an NVSP packet\n");
1012 break;
1013 }
1014 cph = (struct vmbus_chanpkt_hdr *)sc->sc_nvsbuf;
1015 nvs = (const struct hvn_nvs_hdr *)VMBUS_CHANPKT_CONST_DATA(cph);
1016
1017 if (cph->cph_type == VMBUS_CHANPKT_TYPE_COMP) {
1018 switch (nvs->nvs_type) {
1019 case HVN_NVS_TYPE_INIT_RESP:
1020 case HVN_NVS_TYPE_RXBUF_CONNRESP:
1021 case HVN_NVS_TYPE_CHIM_CONNRESP:
1022 case HVN_NVS_TYPE_SUBCH_RESP:
1023 /* copy the response back */
1024 memcpy(&sc->sc_nvsrsp, nvs, HVN_NVS_MSGSIZE);
1025 sc->sc_nvsdone = 1;
1026 wakeup(&sc->sc_nvsrsp);
1027 break;
1028 case HVN_NVS_TYPE_RNDIS_ACK:
1029 dotx = true;
1030 hvn_txeof(sc, cph->cph_tid);
1031 break;
1032 default:
1033 device_printf(sc->sc_dev,
1034 "unhandled NVSP packet type %u "
1035 "on completion\n", nvs->nvs_type);
1036 break;
1037 }
1038 } else if (cph->cph_type == VMBUS_CHANPKT_TYPE_RXBUF) {
1039 switch (nvs->nvs_type) {
1040 case HVN_NVS_TYPE_RNDIS:
1041 hvn_rndis_input(sc, cph->cph_tid, cph);
1042 break;
1043 default:
1044 device_printf(sc->sc_dev,
1045 "unhandled NVSP packet type %u "
1046 "on receive\n", nvs->nvs_type);
1047 break;
1048 }
1049 } else if (cph->cph_type == VMBUS_CHANPKT_TYPE_INBAND) {
1050 switch (nvs->nvs_type) {
1051 case HVN_NVS_TYPE_TXTBL_NOTE:
1052 /* Useless; ignore */
1053 break;
1054 default:
1055 device_printf(sc->sc_dev,
1056 "got notify, nvs type %u\n", nvs->nvs_type);
1057 break;
1058 }
1059 } else
1060 device_printf(sc->sc_dev,
1061 "unknown NVSP packet type %u\n", cph->cph_type);
1062 }
1063
1064 if (dotx)
1065 if_schedule_deferred_start(ifp);
1066 }
1067
1068 static int
1069 hvn_nvs_cmd(struct hvn_softc *sc, void *cmd, size_t cmdsize, uint64_t tid,
1070 int timo)
1071 {
1072 struct hvn_nvs_hdr *hdr = cmd;
1073 int tries = 10;
1074 int rv, s;
1075
1076 sc->sc_nvsdone = 0;
1077
1078 do {
1079 rv = vmbus_channel_send(sc->sc_chan, cmd, cmdsize,
1080 tid, VMBUS_CHANPKT_TYPE_INBAND,
1081 timo ? VMBUS_CHANPKT_FLAG_RC : 0);
1082 if (rv == EAGAIN) {
1083 if (cold)
1084 delay(1000);
1085 else
1086 tsleep(cmd, PRIBIO, "nvsout", mstohz(1));
1087 } else if (rv) {
1088 DPRINTF("%s: NVSP operation %u send error %d\n",
1089 device_xname(sc->sc_dev), hdr->nvs_type, rv);
1090 return rv;
1091 }
1092 } while (rv != 0 && --tries > 0);
1093
1094 if (tries == 0 && rv != 0) {
1095 device_printf(sc->sc_dev,
1096 "NVSP operation %u send error %d\n", hdr->nvs_type, rv);
1097 return rv;
1098 }
1099
1100 if (timo == 0)
1101 return 0;
1102
1103 do {
1104 if (cold) {
1105 delay(1000);
1106 s = splnet();
1107 hvn_nvs_intr(sc);
1108 splx(s);
1109 } else
1110 tsleep(sc->sc_nvsrsp, PRIBIO | PCATCH, "nvscmd",
1111 mstohz(1));
1112 } while (--timo > 0 && sc->sc_nvsdone != 1);
1113
1114 if (timo == 0 && sc->sc_nvsdone != 1) {
1115 device_printf(sc->sc_dev, "NVSP operation %u timed out\n",
1116 hdr->nvs_type);
1117 return ETIMEDOUT;
1118 }
1119 return 0;
1120 }
1121
1122 static int
1123 hvn_nvs_ack(struct hvn_softc *sc, uint64_t tid)
1124 {
1125 struct hvn_nvs_rndis_ack cmd;
1126 int tries = 5;
1127 int rv;
1128
1129 cmd.nvs_type = HVN_NVS_TYPE_RNDIS_ACK;
1130 cmd.nvs_status = HVN_NVS_STATUS_OK;
1131 do {
1132 rv = vmbus_channel_send(sc->sc_chan, &cmd, sizeof(cmd),
1133 tid, VMBUS_CHANPKT_TYPE_COMP, 0);
1134 if (rv == EAGAIN)
1135 delay(10);
1136 else if (rv) {
1137 DPRINTF("%s: NVSP acknowledgement error %d\n",
1138 device_xname(sc->sc_dev), rv);
1139 return rv;
1140 }
1141 } while (rv != 0 && --tries > 0);
1142 return rv;
1143 }
1144
1145 static void
1146 hvn_nvs_detach(struct hvn_softc *sc)
1147 {
1148
1149 if (vmbus_channel_close(sc->sc_chan) == 0) {
1150 kmem_free(sc->sc_nvsbuf, HVN_NVS_BUFSIZE);
1151 sc->sc_nvsbuf = NULL;
1152 }
1153 }
1154
1155 static inline struct rndis_cmd *
1156 hvn_alloc_cmd(struct hvn_softc *sc)
1157 {
1158 struct rndis_cmd *rc;
1159
1160 mutex_enter(&sc->sc_cntl_fqlck);
1161 while ((rc = TAILQ_FIRST(&sc->sc_cntl_fq)) == NULL)
1162 /* XXX use condvar(9) instead of mtsleep */
1163 mtsleep(&sc->sc_cntl_fq, PRIBIO, "nvsalloc", 1,
1164 &sc->sc_cntl_fqlck);
1165 TAILQ_REMOVE(&sc->sc_cntl_fq, rc, rc_entry);
1166 mutex_exit(&sc->sc_cntl_fqlck);
1167 return rc;
1168 }
1169
1170 static inline void
1171 hvn_submit_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1172 {
1173
1174 mutex_enter(&sc->sc_cntl_sqlck);
1175 TAILQ_INSERT_TAIL(&sc->sc_cntl_sq, rc, rc_entry);
1176 mutex_exit(&sc->sc_cntl_sqlck);
1177 }
1178
1179 static inline struct rndis_cmd *
1180 hvn_complete_cmd(struct hvn_softc *sc, uint32_t id)
1181 {
1182 struct rndis_cmd *rc;
1183
1184 mutex_enter(&sc->sc_cntl_sqlck);
1185 TAILQ_FOREACH(rc, &sc->sc_cntl_sq, rc_entry) {
1186 if (rc->rc_id == id) {
1187 TAILQ_REMOVE(&sc->sc_cntl_sq, rc, rc_entry);
1188 break;
1189 }
1190 }
1191 mutex_exit(&sc->sc_cntl_sqlck);
1192 if (rc != NULL) {
1193 mutex_enter(&sc->sc_cntl_cqlck);
1194 TAILQ_INSERT_TAIL(&sc->sc_cntl_cq, rc, rc_entry);
1195 mutex_exit(&sc->sc_cntl_cqlck);
1196 }
1197 return rc;
1198 }
1199
1200 static inline void
1201 hvn_release_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1202 {
1203
1204 mutex_enter(&sc->sc_cntl_cqlck);
1205 TAILQ_REMOVE(&sc->sc_cntl_cq, rc, rc_entry);
1206 mutex_exit(&sc->sc_cntl_cqlck);
1207 }
1208
1209 static inline int
1210 hvn_rollback_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1211 {
1212 struct rndis_cmd *rn;
1213
1214 mutex_enter(&sc->sc_cntl_sqlck);
1215 TAILQ_FOREACH(rn, &sc->sc_cntl_sq, rc_entry) {
1216 if (rn == rc) {
1217 TAILQ_REMOVE(&sc->sc_cntl_sq, rc, rc_entry);
1218 mutex_exit(&sc->sc_cntl_sqlck);
1219 return 0;
1220 }
1221 }
1222 mutex_exit(&sc->sc_cntl_sqlck);
1223 return -1;
1224 }
1225
1226 static inline void
1227 hvn_free_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1228 {
1229
1230 memset(rc->rc_req, 0, sizeof(struct rndis_packet_msg));
1231 memset(&rc->rc_cmp, 0, sizeof(rc->rc_cmp));
1232 memset(&rc->rc_msg, 0, sizeof(rc->rc_msg));
1233 mutex_enter(&sc->sc_cntl_fqlck);
1234 TAILQ_INSERT_TAIL(&sc->sc_cntl_fq, rc, rc_entry);
1235 mutex_exit(&sc->sc_cntl_fqlck);
1236 wakeup(&sc->sc_cntl_fq);
1237 }
1238
1239 static int
1240 hvn_rndis_attach(struct hvn_softc *sc)
1241 {
1242 const int dmaflags = cold ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK;
1243 struct rndis_init_req *req;
1244 struct rndis_init_comp *cmp;
1245 struct rndis_cmd *rc;
1246 int i, rv;
1247
1248 /* RNDIS control message queues */
1249 TAILQ_INIT(&sc->sc_cntl_sq);
1250 TAILQ_INIT(&sc->sc_cntl_cq);
1251 TAILQ_INIT(&sc->sc_cntl_fq);
1252 mutex_init(&sc->sc_cntl_sqlck, MUTEX_DEFAULT, IPL_NET);
1253 mutex_init(&sc->sc_cntl_cqlck, MUTEX_DEFAULT, IPL_NET);
1254 mutex_init(&sc->sc_cntl_fqlck, MUTEX_DEFAULT, IPL_NET);
1255
1256 for (i = 0; i < HVN_RNDIS_CTLREQS; i++) {
1257 rc = &sc->sc_cntl_msgs[i];
1258 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1259 dmaflags, &rc->rc_dmap)) {
1260 DPRINTF("%s: failed to create RNDIS command map\n",
1261 device_xname(sc->sc_dev));
1262 goto errout;
1263 }
1264 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE,
1265 0, &rc->rc_segs, 1, &rc->rc_nsegs, dmaflags)) {
1266 DPRINTF("%s: failed to allocate RNDIS command\n",
1267 device_xname(sc->sc_dev));
1268 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1269 goto errout;
1270 }
1271 if (bus_dmamem_map(sc->sc_dmat, &rc->rc_segs, rc->rc_nsegs,
1272 PAGE_SIZE, (void **)&rc->rc_req, dmaflags)) {
1273 DPRINTF("%s: failed to allocate RNDIS command\n",
1274 device_xname(sc->sc_dev));
1275 bus_dmamem_free(sc->sc_dmat, &rc->rc_segs,
1276 rc->rc_nsegs);
1277 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1278 goto errout;
1279 }
1280 memset(rc->rc_req, 0, PAGE_SIZE);
1281 if (bus_dmamap_load(sc->sc_dmat, rc->rc_dmap, rc->rc_req,
1282 PAGE_SIZE, NULL, dmaflags)) {
1283 DPRINTF("%s: failed to load RNDIS command map\n",
1284 device_xname(sc->sc_dev));
1285 bus_dmamem_free(sc->sc_dmat, &rc->rc_segs,
1286 rc->rc_nsegs);
1287 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1288 goto errout;
1289 }
1290 rc->rc_gpa = atop(rc->rc_dmap->dm_segs[0].ds_addr);
1291 TAILQ_INSERT_TAIL(&sc->sc_cntl_fq, rc, rc_entry);
1292 }
1293
1294 rc = hvn_alloc_cmd(sc);
1295
1296 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1297 BUS_DMASYNC_PREREAD);
1298
1299 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1300
1301 req = rc->rc_req;
1302 req->rm_type = REMOTE_NDIS_INITIALIZE_MSG;
1303 req->rm_len = sizeof(*req);
1304 req->rm_rid = rc->rc_id;
1305 req->rm_ver_major = RNDIS_VERSION_MAJOR;
1306 req->rm_ver_minor = RNDIS_VERSION_MINOR;
1307 req->rm_max_xfersz = HVN_RNDIS_XFER_SIZE;
1308
1309 rc->rc_cmplen = sizeof(*cmp);
1310
1311 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1312 BUS_DMASYNC_PREWRITE);
1313
1314 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1315 DPRINTF("%s: INITIALIZE_MSG failed, error %d\n",
1316 device_xname(sc->sc_dev), rv);
1317 hvn_free_cmd(sc, rc);
1318 goto errout;
1319 }
1320 cmp = (struct rndis_init_comp *)&rc->rc_cmp;
1321 if (cmp->rm_status != RNDIS_STATUS_SUCCESS) {
1322 DPRINTF("%s: failed to init RNDIS, error %#x\n",
1323 device_xname(sc->sc_dev), cmp->rm_status);
1324 hvn_free_cmd(sc, rc);
1325 goto errout;
1326 }
1327
1328 hvn_free_cmd(sc, rc);
1329
1330 /* Initialize RNDIS Data command */
1331 memset(&sc->sc_data_msg, 0, sizeof(sc->sc_data_msg));
1332 sc->sc_data_msg.nvs_type = HVN_NVS_TYPE_RNDIS;
1333 sc->sc_data_msg.nvs_rndis_mtype = HVN_NVS_RNDIS_MTYPE_DATA;
1334 sc->sc_data_msg.nvs_chim_idx = HVN_NVS_CHIM_IDX_INVALID;
1335
1336 return 0;
1337
1338 errout:
1339 for (i = 0; i < HVN_RNDIS_CTLREQS; i++) {
1340 rc = &sc->sc_cntl_msgs[i];
1341 if (rc->rc_req == NULL)
1342 continue;
1343 TAILQ_REMOVE(&sc->sc_cntl_fq, rc, rc_entry);
1344 bus_dmamem_free(sc->sc_dmat, &rc->rc_segs, rc->rc_nsegs);
1345 rc->rc_req = NULL;
1346 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1347 }
1348 return -1;
1349 }
1350
1351 static int
1352 hvn_set_capabilities(struct hvn_softc *sc)
1353 {
1354 struct ndis_offload_params params;
1355 size_t len = sizeof(params);
1356
1357 memset(¶ms, 0, sizeof(params));
1358
1359 params.ndis_hdr.ndis_type = NDIS_OBJTYPE_DEFAULT;
1360 if (sc->sc_ndisver < NDIS_VERSION_6_30) {
1361 params.ndis_hdr.ndis_rev = NDIS_OFFLOAD_PARAMS_REV_2;
1362 len = params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE_6_1;
1363 } else {
1364 params.ndis_hdr.ndis_rev = NDIS_OFFLOAD_PARAMS_REV_3;
1365 len = params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE;
1366 }
1367
1368 params.ndis_ip4csum = NDIS_OFFLOAD_PARAM_TXRX;
1369 params.ndis_tcp4csum = NDIS_OFFLOAD_PARAM_TXRX;
1370 params.ndis_tcp6csum = NDIS_OFFLOAD_PARAM_TXRX;
1371 if (sc->sc_ndisver >= NDIS_VERSION_6_30) {
1372 params.ndis_udp4csum = NDIS_OFFLOAD_PARAM_TXRX;
1373 params.ndis_udp6csum = NDIS_OFFLOAD_PARAM_TXRX;
1374 }
1375
1376 return hvn_rndis_set(sc, OID_TCP_OFFLOAD_PARAMETERS, ¶ms, len);
1377 }
1378
1379 static int
1380 hvn_rndis_cmd(struct hvn_softc *sc, struct rndis_cmd *rc, int timo)
1381 {
1382 struct hvn_nvs_rndis *msg = &rc->rc_msg;
1383 struct rndis_msghdr *hdr = rc->rc_req;
1384 struct vmbus_gpa sgl[1];
1385 int tries = 10;
1386 int rv, s;
1387
1388 KASSERT(timo > 0);
1389
1390 msg->nvs_type = HVN_NVS_TYPE_RNDIS;
1391 msg->nvs_rndis_mtype = HVN_NVS_RNDIS_MTYPE_CTRL;
1392 msg->nvs_chim_idx = HVN_NVS_CHIM_IDX_INVALID;
1393
1394 sgl[0].gpa_page = rc->rc_gpa;
1395 sgl[0].gpa_len = hdr->rm_len;
1396 sgl[0].gpa_ofs = 0;
1397
1398 rc->rc_done = 0;
1399
1400 hvn_submit_cmd(sc, rc);
1401
1402 do {
1403 rv = vmbus_channel_send_sgl(sc->sc_chan, sgl, 1, &rc->rc_msg,
1404 sizeof(*msg), rc->rc_id);
1405 if (rv == EAGAIN) {
1406 if (cold)
1407 delay(1000);
1408 else
1409 tsleep(rc, PRIBIO, "rndisout", mstohz(1));
1410 } else if (rv) {
1411 DPRINTF("%s: RNDIS operation %u send error %d\n",
1412 device_xname(sc->sc_dev), hdr->rm_type, rv);
1413 hvn_rollback_cmd(sc, rc);
1414 return rv;
1415 }
1416 } while (rv != 0 && --tries > 0);
1417
1418 if (tries == 0 && rv != 0) {
1419 device_printf(sc->sc_dev,
1420 "RNDIS operation %u send error %d\n", hdr->rm_type, rv);
1421 return rv;
1422 }
1423 if (vmbus_channel_is_revoked(sc->sc_chan)) {
1424 /* No response */
1425 return 0;
1426 }
1427
1428 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1429 BUS_DMASYNC_POSTWRITE);
1430
1431 do {
1432 if (cold) {
1433 delay(1000);
1434 s = splnet();
1435 hvn_nvs_intr(sc);
1436 splx(s);
1437 } else
1438 tsleep(rc, PRIBIO | PCATCH, "rndiscmd", mstohz(1));
1439 } while (--timo > 0 && rc->rc_done != 1);
1440
1441 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1442 BUS_DMASYNC_POSTREAD);
1443
1444 if (rc->rc_done != 1) {
1445 rv = timo == 0 ? ETIMEDOUT : EINTR;
1446 if (hvn_rollback_cmd(sc, rc)) {
1447 hvn_release_cmd(sc, rc);
1448 rv = 0;
1449 } else if (rv == ETIMEDOUT) {
1450 device_printf(sc->sc_dev,
1451 "RNDIS operation %u timed out\n", hdr->rm_type);
1452 }
1453 return rv;
1454 }
1455
1456 hvn_release_cmd(sc, rc);
1457 return 0;
1458 }
1459
1460 static void
1461 hvn_rndis_input(struct hvn_softc *sc, uint64_t tid, void *arg)
1462 {
1463 struct vmbus_chanpkt_prplist *cp = arg;
1464 uint32_t off, len, type;
1465 int i;
1466
1467 if (sc->sc_rx_ring == NULL) {
1468 DPRINTF("%s: invalid rx ring\n", device_xname(sc->sc_dev));
1469 return;
1470 }
1471
1472 for (i = 0; i < cp->cp_range_cnt; i++) {
1473 off = cp->cp_range[i].gpa_ofs;
1474 len = cp->cp_range[i].gpa_len;
1475
1476 KASSERT(off + len <= sc->sc_rx_size);
1477 KASSERT(len >= RNDIS_HEADER_OFFSET + 4);
1478
1479 memcpy(&type, sc->sc_rx_ring + off, sizeof(type));
1480 switch (type) {
1481 /* data message */
1482 case REMOTE_NDIS_PACKET_MSG:
1483 hvn_rxeof(sc, sc->sc_rx_ring + off, len);
1484 break;
1485 /* completion messages */
1486 case REMOTE_NDIS_INITIALIZE_CMPLT:
1487 case REMOTE_NDIS_QUERY_CMPLT:
1488 case REMOTE_NDIS_SET_CMPLT:
1489 case REMOTE_NDIS_RESET_CMPLT:
1490 case REMOTE_NDIS_KEEPALIVE_CMPLT:
1491 hvn_rndis_complete(sc, sc->sc_rx_ring + off, len);
1492 break;
1493 /* notification message */
1494 case REMOTE_NDIS_INDICATE_STATUS_MSG:
1495 hvn_rndis_status(sc, sc->sc_rx_ring + off, len);
1496 break;
1497 default:
1498 device_printf(sc->sc_dev,
1499 "unhandled RNDIS message type %u\n", type);
1500 break;
1501 }
1502 }
1503
1504 hvn_nvs_ack(sc, tid);
1505 }
1506
1507 static inline struct mbuf *
1508 hvn_devget(struct hvn_softc *sc, void *buf, uint32_t len)
1509 {
1510 struct ifnet *ifp = SC2IFP(sc);
1511 struct mbuf *m;
1512 size_t size = len + ETHER_ALIGN;
1513
1514 MGETHDR(m, M_NOWAIT, MT_DATA);
1515 if (m == NULL)
1516 return NULL;
1517
1518 if (size > MHLEN) {
1519 if (size <= MCLBYTES)
1520 MCLGET(m, M_NOWAIT);
1521 else
1522 MEXTMALLOC(m, size, M_NOWAIT);
1523 if ((m->m_flags & M_EXT) == 0) {
1524 m_freem(m);
1525 return NULL;
1526 }
1527 }
1528
1529 m->m_len = m->m_pkthdr.len = size;
1530 m_adj(m, ETHER_ALIGN);
1531 m_copyback(m, 0, len, buf);
1532 m_set_rcvif(m, ifp);
1533 return m;
1534 }
1535
1536 static void
1537 hvn_rxeof(struct hvn_softc *sc, uint8_t *buf, uint32_t len)
1538 {
1539 struct ifnet *ifp = SC2IFP(sc);
1540 struct rndis_packet_msg *pkt;
1541 struct rndis_pktinfo *pi;
1542 uint32_t csum, vlan;
1543 struct mbuf *m;
1544
1545 if (!(ifp->if_flags & IFF_RUNNING))
1546 return;
1547
1548 if (len < sizeof(*pkt)) {
1549 device_printf(sc->sc_dev, "data packet too short: %u\n",
1550 len);
1551 return;
1552 }
1553
1554 pkt = (struct rndis_packet_msg *)buf;
1555 if (pkt->rm_dataoffset + pkt->rm_datalen > len) {
1556 device_printf(sc->sc_dev,
1557 "data packet out of bounds: %u@%u\n", pkt->rm_dataoffset,
1558 pkt->rm_datalen);
1559 return;
1560 }
1561
1562 if ((m = hvn_devget(sc, buf + RNDIS_HEADER_OFFSET + pkt->rm_dataoffset,
1563 pkt->rm_datalen)) == NULL) {
1564 if_statinc(ifp, if_ierrors);
1565 return;
1566 }
1567
1568 if (pkt->rm_pktinfooffset + pkt->rm_pktinfolen > len) {
1569 device_printf(sc->sc_dev,
1570 "pktinfo is out of bounds: %u@%u vs %u\n",
1571 pkt->rm_pktinfolen, pkt->rm_pktinfooffset, len);
1572 goto done;
1573 }
1574
1575 pi = (struct rndis_pktinfo *)(buf + RNDIS_HEADER_OFFSET +
1576 pkt->rm_pktinfooffset);
1577 while (pkt->rm_pktinfolen > 0) {
1578 if (pi->rm_size > pkt->rm_pktinfolen) {
1579 device_printf(sc->sc_dev,
1580 "invalid pktinfo size: %u/%u\n", pi->rm_size,
1581 pkt->rm_pktinfolen);
1582 break;
1583 }
1584
1585 switch (pi->rm_type) {
1586 case NDIS_PKTINFO_TYPE_CSUM:
1587 memcpy(&csum, pi->rm_data, sizeof(csum));
1588 if (csum & NDIS_RXCSUM_INFO_IPCS_OK)
1589 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1590 if (csum & NDIS_RXCSUM_INFO_TCPCS_OK)
1591 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1592 if (csum & NDIS_RXCSUM_INFO_UDPCS_OK)
1593 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1594 break;
1595 case NDIS_PKTINFO_TYPE_VLAN:
1596 memcpy(&vlan, pi->rm_data, sizeof(vlan));
1597 if (vlan != 0xffffffff) {
1598 uint16_t t = NDIS_VLAN_INFO_ID(vlan);
1599 t |= NDIS_VLAN_INFO_PRI(vlan) << EVL_PRIO_BITS;
1600 t |= NDIS_VLAN_INFO_CFI(vlan) << EVL_CFI_BITS;
1601 vlan_set_tag(m, t);
1602 }
1603 break;
1604 default:
1605 DPRINTF("%s: unhandled pktinfo type %u\n",
1606 device_xname(sc->sc_dev), pi->rm_type);
1607 break;
1608 }
1609
1610 pkt->rm_pktinfolen -= pi->rm_size;
1611 pi = (struct rndis_pktinfo *)((char *)pi + pi->rm_size);
1612 }
1613
1614 done:
1615 if_percpuq_enqueue(sc->sc_ipq, m);
1616 }
1617
1618 static void
1619 hvn_rndis_complete(struct hvn_softc *sc, uint8_t *buf, uint32_t len)
1620 {
1621 struct rndis_cmd *rc;
1622 uint32_t id;
1623
1624 memcpy(&id, buf + RNDIS_HEADER_OFFSET, sizeof(id));
1625 if ((rc = hvn_complete_cmd(sc, id)) != NULL) {
1626 if (len < rc->rc_cmplen)
1627 device_printf(sc->sc_dev,
1628 "RNDIS response %u too short: %u\n", id, len);
1629 else
1630 memcpy(&rc->rc_cmp, buf, rc->rc_cmplen);
1631 if (len > rc->rc_cmplen &&
1632 len - rc->rc_cmplen > HVN_RNDIS_BUFSIZE)
1633 device_printf(sc->sc_dev,
1634 "RNDIS response %u too large: %u\n", id, len);
1635 else if (len > rc->rc_cmplen)
1636 memcpy(&rc->rc_cmpbuf, buf + rc->rc_cmplen,
1637 len - rc->rc_cmplen);
1638 rc->rc_done = 1;
1639 wakeup(rc);
1640 } else {
1641 DPRINTF("%s: failed to complete RNDIS request id %u\n",
1642 device_xname(sc->sc_dev), id);
1643 }
1644 }
1645
1646 static int
1647 hvn_rndis_output(struct hvn_softc *sc, struct hvn_tx_desc *txd)
1648 {
1649 uint64_t rid = (uint64_t)txd->txd_id << 32;
1650 int rv;
1651
1652 rv = vmbus_channel_send_sgl(sc->sc_chan, txd->txd_sgl, txd->txd_nsge,
1653 &sc->sc_data_msg, sizeof(sc->sc_data_msg), rid);
1654 if (rv) {
1655 DPRINTF("%s: RNDIS data send error %d\n",
1656 device_xname(sc->sc_dev), rv);
1657 return rv;
1658 }
1659 return 0;
1660 }
1661
1662 static void
1663 hvn_rndis_status(struct hvn_softc *sc, uint8_t *buf, uint32_t len)
1664 {
1665 struct ifnet *ifp = SC2IFP(sc);
1666 uint32_t status;
1667 int link_state = sc->sc_link_state;
1668
1669 memcpy(&status, buf + RNDIS_HEADER_OFFSET, sizeof(status));
1670 switch (status) {
1671 case RNDIS_STATUS_MEDIA_CONNECT:
1672 sc->sc_link_state = LINK_STATE_UP;
1673 break;
1674 case RNDIS_STATUS_MEDIA_DISCONNECT:
1675 sc->sc_link_state = LINK_STATE_DOWN;
1676 break;
1677 /* Ignore these */
1678 case RNDIS_STATUS_OFFLOAD_CURRENT_CONFIG:
1679 return;
1680 default:
1681 DPRINTF("%s: unhandled status %#x\n", device_xname(sc->sc_dev),
1682 status);
1683 return;
1684 }
1685 if (link_state != sc->sc_link_state)
1686 if_link_state_change(ifp, sc->sc_link_state);
1687 }
1688
1689 static int
1690 hvn_rndis_query(struct hvn_softc *sc, uint32_t oid, void *res, size_t *length)
1691 {
1692 struct rndis_cmd *rc;
1693 struct rndis_query_req *req;
1694 struct rndis_query_comp *cmp;
1695 size_t olength = *length;
1696 int rv;
1697
1698 rc = hvn_alloc_cmd(sc);
1699
1700 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1701 BUS_DMASYNC_PREREAD);
1702
1703 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1704
1705 req = rc->rc_req;
1706 req->rm_type = REMOTE_NDIS_QUERY_MSG;
1707 req->rm_len = sizeof(*req);
1708 req->rm_rid = rc->rc_id;
1709 req->rm_oid = oid;
1710 req->rm_infobufoffset = sizeof(*req) - RNDIS_HEADER_OFFSET;
1711
1712 rc->rc_cmplen = sizeof(*cmp);
1713
1714 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1715 BUS_DMASYNC_PREWRITE);
1716
1717 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1718 DPRINTF("%s: QUERY_MSG failed, error %d\n",
1719 device_xname(sc->sc_dev), rv);
1720 hvn_free_cmd(sc, rc);
1721 return rv;
1722 }
1723
1724 cmp = (struct rndis_query_comp *)&rc->rc_cmp;
1725 switch (cmp->rm_status) {
1726 case RNDIS_STATUS_SUCCESS:
1727 if (cmp->rm_infobuflen > olength) {
1728 rv = EINVAL;
1729 break;
1730 }
1731 memcpy(res, rc->rc_cmpbuf, cmp->rm_infobuflen);
1732 *length = cmp->rm_infobuflen;
1733 break;
1734 default:
1735 *length = 0;
1736 rv = EIO;
1737 break;
1738 }
1739
1740 hvn_free_cmd(sc, rc);
1741 return rv;
1742 }
1743
1744 static int
1745 hvn_rndis_set(struct hvn_softc *sc, uint32_t oid, void *data, size_t length)
1746 {
1747 struct rndis_cmd *rc;
1748 struct rndis_set_req *req;
1749 struct rndis_set_comp *cmp;
1750 int rv;
1751
1752 rc = hvn_alloc_cmd(sc);
1753
1754 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1755 BUS_DMASYNC_PREREAD);
1756
1757 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1758
1759 req = rc->rc_req;
1760 req->rm_type = REMOTE_NDIS_SET_MSG;
1761 req->rm_len = sizeof(*req) + length;
1762 req->rm_rid = rc->rc_id;
1763 req->rm_oid = oid;
1764 req->rm_infobufoffset = sizeof(*req) - RNDIS_HEADER_OFFSET;
1765
1766 rc->rc_cmplen = sizeof(*cmp);
1767
1768 if (length > 0) {
1769 KASSERT(sizeof(*req) + length < PAGE_SIZE);
1770 req->rm_infobuflen = length;
1771 memcpy(req + 1, data, length);
1772 }
1773
1774 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1775 BUS_DMASYNC_PREWRITE);
1776
1777 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1778 DPRINTF("%s: SET_MSG failed, error %d\n",
1779 device_xname(sc->sc_dev), rv);
1780 hvn_free_cmd(sc, rc);
1781 return rv;
1782 }
1783
1784 cmp = (struct rndis_set_comp *)&rc->rc_cmp;
1785 if (cmp->rm_status != RNDIS_STATUS_SUCCESS)
1786 rv = EIO;
1787
1788 hvn_free_cmd(sc, rc);
1789 return rv;
1790 }
1791
1792 static int
1793 hvn_rndis_open(struct hvn_softc *sc)
1794 {
1795 uint32_t filter;
1796 int rv;
1797
1798 if (sc->sc_promisc)
1799 filter = RNDIS_PACKET_TYPE_PROMISCUOUS;
1800 else
1801 filter = RNDIS_PACKET_TYPE_BROADCAST |
1802 RNDIS_PACKET_TYPE_ALL_MULTICAST |
1803 RNDIS_PACKET_TYPE_DIRECTED;
1804
1805 rv = hvn_rndis_set(sc, OID_GEN_CURRENT_PACKET_FILTER,
1806 &filter, sizeof(filter));
1807 if (rv) {
1808 DPRINTF("%s: failed to set RNDIS filter to %#x\n",
1809 device_xname(sc->sc_dev), filter);
1810 }
1811 return rv;
1812 }
1813
1814 static int
1815 hvn_rndis_close(struct hvn_softc *sc)
1816 {
1817 uint32_t filter = 0;
1818 int rv;
1819
1820 rv = hvn_rndis_set(sc, OID_GEN_CURRENT_PACKET_FILTER,
1821 &filter, sizeof(filter));
1822 if (rv) {
1823 DPRINTF("%s: failed to clear RNDIS filter\n",
1824 device_xname(sc->sc_dev));
1825 }
1826 return rv;
1827 }
1828
1829 static void
1830 hvn_rndis_detach(struct hvn_softc *sc)
1831 {
1832 struct rndis_cmd *rc;
1833 struct rndis_halt_req *req;
1834 int rv;
1835
1836 rc = hvn_alloc_cmd(sc);
1837
1838 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1839 BUS_DMASYNC_PREREAD);
1840
1841 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1842
1843 req = rc->rc_req;
1844 req->rm_type = REMOTE_NDIS_HALT_MSG;
1845 req->rm_len = sizeof(*req);
1846 req->rm_rid = rc->rc_id;
1847
1848 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1849 BUS_DMASYNC_PREWRITE);
1850
1851 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1852 DPRINTF("%s: HALT_MSG failed, error %d\n",
1853 device_xname(sc->sc_dev), rv);
1854 }
1855 hvn_free_cmd(sc, rc);
1856
1857 mutex_destroy(&sc->sc_cntl_sqlck);
1858 mutex_destroy(&sc->sc_cntl_cqlck);
1859 mutex_destroy(&sc->sc_cntl_fqlck);
1860 }
1861