if_hvn.c revision 1.12 1 /* $NetBSD: if_hvn.c,v 1.12 2019/12/10 11:19:25 nonaka Exp $ */
2 /* $OpenBSD: if_hvn.c,v 1.39 2018/03/11 14:31:34 mikeb Exp $ */
3
4 /*-
5 * Copyright (c) 2009-2012,2016 Microsoft Corp.
6 * Copyright (c) 2010-2012 Citrix Inc.
7 * Copyright (c) 2012 NetApp Inc.
8 * Copyright (c) 2016 Mike Belopuhov <mike (at) esdenera.com>
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice unmodified, this list of conditions, and the following
16 * disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * The OpenBSD port was done under funding by Esdenera Networks GmbH.
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: if_hvn.c,v 1.12 2019/12/10 11:19:25 nonaka Exp $");
39
40 #ifdef _KERNEL_OPT
41 #include "opt_inet.h"
42 #include "opt_inet6.h"
43 #include "opt_net_mpsafe.h"
44 #endif
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/device.h>
50 #include <sys/atomic.h>
51 #include <sys/bus.h>
52 #include <sys/intr.h>
53 #include <sys/kmem.h>
54
55 #include <net/if.h>
56 #include <net/if_ether.h>
57 #include <net/if_media.h>
58
59 #include <net/bpf.h>
60
61 #include <dev/ic/ndisreg.h>
62 #include <dev/ic/rndisreg.h>
63
64 #include <dev/hyperv/vmbusvar.h>
65 #include <dev/hyperv/if_hvnreg.h>
66
67 #ifndef EVL_PRIO_BITS
68 #define EVL_PRIO_BITS 13
69 #endif
70
71 #define HVN_NVS_MSGSIZE 32
72 #define HVN_NVS_BUFSIZE PAGE_SIZE
73
74 /*
75 * RNDIS control interface
76 */
77 #define HVN_RNDIS_CTLREQS 4
78 #define HVN_RNDIS_BUFSIZE 512
79
80 struct rndis_cmd {
81 uint32_t rc_id;
82 struct hvn_nvs_rndis rc_msg;
83 void *rc_req;
84 bus_dmamap_t rc_dmap;
85 bus_dma_segment_t rc_segs;
86 int rc_nsegs;
87 uint64_t rc_gpa;
88 struct rndis_packet_msg rc_cmp;
89 uint32_t rc_cmplen;
90 uint8_t rc_cmpbuf[HVN_RNDIS_BUFSIZE];
91 int rc_done;
92 TAILQ_ENTRY(rndis_cmd) rc_entry;
93 };
94 TAILQ_HEAD(rndis_queue, rndis_cmd);
95
96 #define HVN_MAXMTU (9 * 1024)
97
98 #define HVN_RNDIS_XFER_SIZE 2048
99
100 /*
101 * Tx ring
102 */
103 #define HVN_TX_DESC 256
104 #define HVN_TX_FRAGS 15 /* 31 is the max */
105 #define HVN_TX_FRAG_SIZE PAGE_SIZE
106 #define HVN_TX_PKT_SIZE 16384
107
108 #define HVN_RNDIS_PKT_LEN \
109 (sizeof(struct rndis_packet_msg) + \
110 sizeof(struct rndis_pktinfo) + NDIS_VLAN_INFO_SIZE + \
111 sizeof(struct rndis_pktinfo) + NDIS_TXCSUM_INFO_SIZE)
112
113 struct hvn_tx_desc {
114 uint32_t txd_id;
115 int txd_ready;
116 struct vmbus_gpa txd_sgl[HVN_TX_FRAGS + 1];
117 int txd_nsge;
118 struct mbuf *txd_buf;
119 bus_dmamap_t txd_dmap;
120 struct vmbus_gpa txd_gpa;
121 struct rndis_packet_msg *txd_req;
122 };
123
124 struct hvn_softc {
125 device_t sc_dev;
126
127 struct vmbus_softc *sc_vmbus;
128 struct vmbus_channel *sc_chan;
129 bus_dma_tag_t sc_dmat;
130
131 struct ethercom sc_ec;
132 struct ifmedia sc_media;
133 struct if_percpuq *sc_ipq;
134 int sc_link_state;
135 int sc_promisc;
136
137 uint32_t sc_flags;
138 #define HVN_SCF_ATTACHED __BIT(0)
139
140 /* NVS protocol */
141 int sc_proto;
142 uint32_t sc_nvstid;
143 uint8_t sc_nvsrsp[HVN_NVS_MSGSIZE];
144 uint8_t *sc_nvsbuf;
145 int sc_nvsdone;
146
147 /* RNDIS protocol */
148 int sc_ndisver;
149 uint32_t sc_rndisrid;
150 struct rndis_queue sc_cntl_sq; /* submission queue */
151 kmutex_t sc_cntl_sqlck;
152 struct rndis_queue sc_cntl_cq; /* completion queue */
153 kmutex_t sc_cntl_cqlck;
154 struct rndis_queue sc_cntl_fq; /* free queue */
155 kmutex_t sc_cntl_fqlck;
156 struct rndis_cmd sc_cntl_msgs[HVN_RNDIS_CTLREQS];
157 struct hvn_nvs_rndis sc_data_msg;
158
159 /* Rx ring */
160 uint8_t *sc_rx_ring;
161 int sc_rx_size;
162 uint32_t sc_rx_hndl;
163 struct hyperv_dma sc_rx_dma;
164
165 /* Tx ring */
166 uint32_t sc_tx_next;
167 uint32_t sc_tx_avail;
168 struct hvn_tx_desc sc_tx_desc[HVN_TX_DESC];
169 bus_dmamap_t sc_tx_rmap;
170 uint8_t *sc_tx_msgs;
171 bus_dma_segment_t sc_tx_mseg;
172 };
173
174 #define SC2IFP(_sc_) (&(_sc_)->sc_ec.ec_if)
175 #define IFP2SC(_ifp_) ((_ifp_)->if_softc)
176
177
178 static int hvn_match(device_t, cfdata_t, void *);
179 static void hvn_attach(device_t, device_t, void *);
180 static int hvn_detach(device_t, int);
181
182 CFATTACH_DECL_NEW(hvn, sizeof(struct hvn_softc),
183 hvn_match, hvn_attach, hvn_detach, NULL);
184
185 static int hvn_ioctl(struct ifnet *, u_long, void *);
186 static int hvn_media_change(struct ifnet *);
187 static void hvn_media_status(struct ifnet *, struct ifmediareq *);
188 static int hvn_iff(struct hvn_softc *);
189 static int hvn_init(struct ifnet *);
190 static void hvn_stop(struct ifnet *, int);
191 static void hvn_start(struct ifnet *);
192 static int hvn_encap(struct hvn_softc *, struct mbuf *,
193 struct hvn_tx_desc **);
194 static void hvn_decap(struct hvn_softc *, struct hvn_tx_desc *);
195 static void hvn_txeof(struct hvn_softc *, uint64_t);
196 static int hvn_rx_ring_create(struct hvn_softc *);
197 static int hvn_rx_ring_destroy(struct hvn_softc *);
198 static int hvn_tx_ring_create(struct hvn_softc *);
199 static void hvn_tx_ring_destroy(struct hvn_softc *);
200 static int hvn_set_capabilities(struct hvn_softc *);
201 static int hvn_get_lladdr(struct hvn_softc *, uint8_t *);
202 static void hvn_get_link_status(struct hvn_softc *);
203
204 /* NSVP */
205 static int hvn_nvs_attach(struct hvn_softc *);
206 static void hvn_nvs_intr(void *);
207 static int hvn_nvs_cmd(struct hvn_softc *, void *, size_t, uint64_t, int);
208 static int hvn_nvs_ack(struct hvn_softc *, uint64_t);
209 static void hvn_nvs_detach(struct hvn_softc *);
210
211 /* RNDIS */
212 static int hvn_rndis_attach(struct hvn_softc *);
213 static int hvn_rndis_cmd(struct hvn_softc *, struct rndis_cmd *, int);
214 static void hvn_rndis_input(struct hvn_softc *, uint64_t, void *);
215 static void hvn_rxeof(struct hvn_softc *, uint8_t *, uint32_t);
216 static void hvn_rndis_complete(struct hvn_softc *, uint8_t *, uint32_t);
217 static int hvn_rndis_output(struct hvn_softc *, struct hvn_tx_desc *);
218 static void hvn_rndis_status(struct hvn_softc *, uint8_t *, uint32_t);
219 static int hvn_rndis_query(struct hvn_softc *, uint32_t, void *, size_t *);
220 static int hvn_rndis_set(struct hvn_softc *, uint32_t, void *, size_t);
221 static int hvn_rndis_open(struct hvn_softc *);
222 static int hvn_rndis_close(struct hvn_softc *);
223 static void hvn_rndis_detach(struct hvn_softc *);
224
225 static int
226 hvn_match(device_t parent, cfdata_t match, void *aux)
227 {
228 struct vmbus_attach_args *aa = aux;
229
230 if (memcmp(aa->aa_type, &hyperv_guid_network, sizeof(*aa->aa_type)))
231 return 0;
232 return 1;
233 }
234
235 static void
236 hvn_attach(device_t parent, device_t self, void *aux)
237 {
238 struct hvn_softc *sc = device_private(self);
239 struct vmbus_attach_args *aa = aux;
240 struct ifnet *ifp = SC2IFP(sc);
241 uint8_t enaddr[ETHER_ADDR_LEN];
242 int error;
243
244 sc->sc_dev = self;
245 sc->sc_vmbus = (struct vmbus_softc *)device_private(parent);
246 sc->sc_chan = aa->aa_chan;
247 sc->sc_dmat = sc->sc_vmbus->sc_dmat;
248
249 aprint_naive("\n");
250 aprint_normal(": Hyper-V NetVSC\n");
251
252 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
253
254 if (hvn_nvs_attach(sc)) {
255 aprint_error_dev(self, "failed to init NVSP\n");
256 return;
257 }
258
259 if (hvn_rx_ring_create(sc)) {
260 aprint_error_dev(self, "failed to create Rx ring\n");
261 goto fail1;
262 }
263
264 if (hvn_tx_ring_create(sc)) {
265 aprint_error_dev(self, "failed to create Tx ring\n");
266 goto fail1;
267 }
268
269 ifp->if_softc = sc;
270 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
271 ifp->if_ioctl = hvn_ioctl;
272 ifp->if_start = hvn_start;
273 ifp->if_init = hvn_init;
274 ifp->if_stop = hvn_stop;
275 ifp->if_capabilities = IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx;
276 ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx;
277 ifp->if_capabilities |= IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_TCPv6_Rx;
278 if (sc->sc_ndisver > NDIS_VERSION_6_30) {
279 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Tx;
280 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Rx;
281 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Tx;
282 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Rx;
283 }
284 if (sc->sc_proto >= HVN_NVS_PROTO_VERSION_2) {
285 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
286 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
287 sc->sc_ec.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
288 }
289
290 IFQ_SET_MAXLEN(&ifp->if_snd, HVN_TX_DESC - 1);
291 IFQ_SET_READY(&ifp->if_snd);
292
293 /* Initialize ifmedia structures. */
294 sc->sc_ec.ec_ifmedia = &sc->sc_media;
295 ifmedia_init(&sc->sc_media, IFM_IMASK, hvn_media_change,
296 hvn_media_status);
297 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
298 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_MANUAL);
299
300 error = if_initialize(ifp);
301 if (error) {
302 aprint_error_dev(self, "if_initialize failed(%d)\n", error);
303 goto fail2;
304 }
305 sc->sc_ipq = if_percpuq_create(ifp);
306 if_deferred_start_init(ifp, NULL);
307
308 if (hvn_rndis_attach(sc)) {
309 aprint_error_dev(self, "failed to init RNDIS\n");
310 goto fail1;
311 }
312
313 aprint_normal_dev(self, "NVS %d.%d NDIS %d.%d\n",
314 sc->sc_proto >> 16, sc->sc_proto & 0xffff,
315 sc->sc_ndisver >> 16 , sc->sc_ndisver & 0xffff);
316
317 if (hvn_set_capabilities(sc)) {
318 aprint_error_dev(self, "failed to setup offloading\n");
319 goto fail2;
320 }
321
322 if (hvn_get_lladdr(sc, enaddr)) {
323 aprint_error_dev(self,
324 "failed to obtain an ethernet address\n");
325 goto fail2;
326 }
327 aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(enaddr));
328
329 ether_ifattach(ifp, enaddr);
330 if_register(ifp);
331
332 if (pmf_device_register(self, NULL, NULL))
333 pmf_class_network_register(self, ifp);
334 else
335 aprint_error_dev(self, "couldn't establish power handler\n");
336
337 SET(sc->sc_flags, HVN_SCF_ATTACHED);
338 return;
339
340 fail2: hvn_rndis_detach(sc);
341 fail1: hvn_rx_ring_destroy(sc);
342 hvn_tx_ring_destroy(sc);
343 hvn_nvs_detach(sc);
344 }
345
346 static int
347 hvn_detach(device_t self, int flags)
348 {
349 struct hvn_softc *sc = device_private(self);
350 struct ifnet *ifp = SC2IFP(sc);
351
352 if (!ISSET(sc->sc_flags, HVN_SCF_ATTACHED))
353 return 0;
354
355 hvn_stop(ifp, 1);
356
357 pmf_device_deregister(self);
358
359 ether_ifdetach(ifp);
360 if_detach(ifp);
361 if_percpuq_destroy(sc->sc_ipq);
362
363 hvn_rndis_detach(sc);
364 hvn_rx_ring_destroy(sc);
365 hvn_tx_ring_destroy(sc);
366 hvn_nvs_detach(sc);
367
368 return 0;
369 }
370
371 static int
372 hvn_ioctl(struct ifnet *ifp, u_long command, void * data)
373 {
374 struct hvn_softc *sc = IFP2SC(ifp);
375 int s, error = 0;
376
377 s = splnet();
378
379 switch (command) {
380 case SIOCSIFFLAGS:
381 if (ifp->if_flags & IFF_UP) {
382 if (ifp->if_flags & IFF_RUNNING)
383 error = ENETRESET;
384 else {
385 error = hvn_init(ifp);
386 if (error)
387 ifp->if_flags &= ~IFF_UP;
388 }
389 } else {
390 if (ifp->if_flags & IFF_RUNNING)
391 hvn_stop(ifp, 1);
392 }
393 break;
394 default:
395 error = ether_ioctl(ifp, command, data);
396 break;
397 }
398
399 if (error == ENETRESET) {
400 if (ifp->if_flags & IFF_RUNNING)
401 hvn_iff(sc);
402 error = 0;
403 }
404
405 splx(s);
406
407 return error;
408 }
409
410 static int
411 hvn_media_change(struct ifnet *ifp)
412 {
413
414 return 0;
415 }
416
417 static void
418 hvn_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
419 {
420 struct hvn_softc *sc = IFP2SC(ifp);
421 int link_state;
422
423 link_state = sc->sc_link_state;
424 hvn_get_link_status(sc);
425 if (link_state != sc->sc_link_state)
426 if_link_state_change(ifp, sc->sc_link_state);
427
428 ifmr->ifm_status = IFM_AVALID;
429 ifmr->ifm_active = IFM_ETHER | IFM_MANUAL;
430 if (sc->sc_link_state == LINK_STATE_UP)
431 ifmr->ifm_status |= IFM_ACTIVE;
432 }
433
434 static int
435 hvn_iff(struct hvn_softc *sc)
436 {
437
438 /* XXX */
439 sc->sc_promisc = 0;
440
441 return 0;
442 }
443
444 static int
445 hvn_init(struct ifnet *ifp)
446 {
447 struct hvn_softc *sc = IFP2SC(ifp);
448 int error;
449
450 hvn_stop(ifp, 0);
451
452 error = hvn_iff(sc);
453 if (error)
454 return error;
455
456 error = hvn_rndis_open(sc);
457 if (error == 0) {
458 ifp->if_flags |= IFF_RUNNING;
459 ifp->if_flags &= ~IFF_OACTIVE;
460 }
461 return error;
462 }
463
464 static void
465 hvn_stop(struct ifnet *ifp, int disable)
466 {
467 struct hvn_softc *sc = IFP2SC(ifp);
468
469 hvn_rndis_close(sc);
470
471 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
472 }
473
474 static void
475 hvn_start(struct ifnet *ifp)
476 {
477 struct hvn_softc *sc = IFP2SC(ifp);
478 struct hvn_tx_desc *txd;
479 struct mbuf *m;
480
481 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
482 return;
483
484 for (;;) {
485 if (!sc->sc_tx_avail) {
486 /* transient */
487 ifp->if_flags |= IFF_OACTIVE;
488 break;
489 }
490
491 IFQ_DEQUEUE(&ifp->if_snd, m);
492 if (m == NULL)
493 break;
494
495 if (hvn_encap(sc, m, &txd)) {
496 /* the chain is too large */
497 ifp->if_oerrors++;
498 m_freem(m);
499 continue;
500 }
501
502 bpf_mtap(ifp, m, BPF_D_OUT);
503
504 if (hvn_rndis_output(sc, txd)) {
505 hvn_decap(sc, txd);
506 ifp->if_oerrors++;
507 m_freem(m);
508 continue;
509 }
510
511 sc->sc_tx_next++;
512 }
513 }
514
515 static inline char *
516 hvn_rndis_pktinfo_append(struct rndis_packet_msg *pkt, size_t pktsize,
517 size_t datalen, uint32_t type)
518 {
519 struct rndis_pktinfo *pi;
520 size_t pi_size = sizeof(*pi) + datalen;
521 char *cp;
522
523 KASSERT(pkt->rm_pktinfooffset + pkt->rm_pktinfolen + pi_size <=
524 pktsize);
525
526 cp = (char *)pkt + pkt->rm_pktinfooffset + pkt->rm_pktinfolen;
527 pi = (struct rndis_pktinfo *)cp;
528 pi->rm_size = pi_size;
529 pi->rm_type = type;
530 pi->rm_pktinfooffset = sizeof(*pi);
531 pkt->rm_pktinfolen += pi_size;
532 pkt->rm_dataoffset += pi_size;
533 pkt->rm_len += pi_size;
534
535 return (char *)pi->rm_data;
536 }
537
538 static int
539 hvn_encap(struct hvn_softc *sc, struct mbuf *m, struct hvn_tx_desc **txd0)
540 {
541 struct hvn_tx_desc *txd;
542 struct rndis_packet_msg *pkt;
543 bus_dma_segment_t *seg;
544 size_t pktlen;
545 int i, rv;
546
547 do {
548 txd = &sc->sc_tx_desc[sc->sc_tx_next % HVN_TX_DESC];
549 sc->sc_tx_next++;
550 } while (!txd->txd_ready);
551 txd->txd_ready = 0;
552
553 pkt = txd->txd_req;
554 memset(pkt, 0, HVN_RNDIS_PKT_LEN);
555 pkt->rm_type = REMOTE_NDIS_PACKET_MSG;
556 pkt->rm_len = sizeof(*pkt) + m->m_pkthdr.len;
557 pkt->rm_dataoffset = RNDIS_DATA_OFFSET;
558 pkt->rm_datalen = m->m_pkthdr.len;
559 pkt->rm_pktinfooffset = sizeof(*pkt); /* adjusted below */
560 pkt->rm_pktinfolen = 0;
561
562 rv = bus_dmamap_load_mbuf(sc->sc_dmat, txd->txd_dmap, m, BUS_DMA_READ |
563 BUS_DMA_NOWAIT);
564 switch (rv) {
565 case 0:
566 break;
567 case EFBIG:
568 if (m_defrag(m, M_NOWAIT) != NULL &&
569 bus_dmamap_load_mbuf(sc->sc_dmat, txd->txd_dmap, m,
570 BUS_DMA_READ | BUS_DMA_NOWAIT) == 0)
571 break;
572 /* FALLTHROUGH */
573 default:
574 DPRINTF("%s: failed to load mbuf\n", device_xname(sc->sc_dev));
575 return -1;
576 }
577 txd->txd_buf = m;
578
579 if (m->m_flags & M_VLANTAG) {
580 uint32_t vlan;
581 char *cp;
582
583 vlan = NDIS_VLAN_INFO_MAKE(
584 EVL_VLANOFTAG(m->m_pkthdr.ether_vtag),
585 EVL_PRIOFTAG(m->m_pkthdr.ether_vtag), 0);
586 cp = hvn_rndis_pktinfo_append(pkt, HVN_RNDIS_PKT_LEN,
587 NDIS_VLAN_INFO_SIZE, NDIS_PKTINFO_TYPE_VLAN);
588 memcpy(cp, &vlan, NDIS_VLAN_INFO_SIZE);
589 }
590
591 if (m->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_UDPv4 |
592 M_CSUM_TCPv4)) {
593 uint32_t csum = NDIS_TXCSUM_INFO_IPV4;
594 char *cp;
595
596 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4)
597 csum |= NDIS_TXCSUM_INFO_IPCS;
598 if (m->m_pkthdr.csum_flags & M_CSUM_TCPv4)
599 csum |= NDIS_TXCSUM_INFO_TCPCS;
600 if (m->m_pkthdr.csum_flags & M_CSUM_UDPv4)
601 csum |= NDIS_TXCSUM_INFO_UDPCS;
602 cp = hvn_rndis_pktinfo_append(pkt, HVN_RNDIS_PKT_LEN,
603 NDIS_TXCSUM_INFO_SIZE, NDIS_PKTINFO_TYPE_CSUM);
604 memcpy(cp, &csum, NDIS_TXCSUM_INFO_SIZE);
605 }
606
607 pktlen = pkt->rm_pktinfooffset + pkt->rm_pktinfolen;
608 pkt->rm_pktinfooffset -= RNDIS_HEADER_OFFSET;
609
610 /* Attach an RNDIS message to the first slot */
611 txd->txd_sgl[0].gpa_page = txd->txd_gpa.gpa_page;
612 txd->txd_sgl[0].gpa_ofs = txd->txd_gpa.gpa_ofs;
613 txd->txd_sgl[0].gpa_len = pktlen;
614 txd->txd_nsge = txd->txd_dmap->dm_nsegs + 1;
615
616 for (i = 0; i < txd->txd_dmap->dm_nsegs; i++) {
617 seg = &txd->txd_dmap->dm_segs[i];
618 txd->txd_sgl[1 + i].gpa_page = atop(seg->ds_addr);
619 txd->txd_sgl[1 + i].gpa_ofs = seg->ds_addr & PAGE_MASK;
620 txd->txd_sgl[1 + i].gpa_len = seg->ds_len;
621 }
622
623 *txd0 = txd;
624
625 atomic_dec_uint(&sc->sc_tx_avail);
626
627 return 0;
628 }
629
630 static void
631 hvn_decap(struct hvn_softc *sc, struct hvn_tx_desc *txd)
632 {
633 struct ifnet *ifp = SC2IFP(sc);
634
635 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmap,
636 0, txd->txd_dmap->dm_mapsize,
637 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
638 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmap);
639 txd->txd_buf = NULL;
640 txd->txd_nsge = 0;
641 txd->txd_ready = 1;
642 atomic_inc_uint(&sc->sc_tx_avail);
643 ifp->if_flags &= ~IFF_OACTIVE;
644 }
645
646 static void
647 hvn_txeof(struct hvn_softc *sc, uint64_t tid)
648 {
649 struct ifnet *ifp = SC2IFP(sc);
650 struct hvn_tx_desc *txd;
651 struct mbuf *m;
652 uint32_t id = tid >> 32;
653
654 if ((tid & 0xffffffffU) != 0)
655 return;
656
657 id -= HVN_NVS_CHIM_SIG;
658 if (id >= HVN_TX_DESC) {
659 device_printf(sc->sc_dev, "tx packet index too large: %u", id);
660 return;
661 }
662
663 txd = &sc->sc_tx_desc[id];
664
665 if ((m = txd->txd_buf) == NULL) {
666 device_printf(sc->sc_dev, "no mbuf @%u\n", id);
667 return;
668 }
669 txd->txd_buf = NULL;
670
671 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmap,
672 0, txd->txd_dmap->dm_mapsize,
673 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
674 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmap);
675 m_freem(m);
676 ifp->if_opackets++;
677
678 txd->txd_ready = 1;
679
680 atomic_inc_uint(&sc->sc_tx_avail);
681 ifp->if_flags &= ~IFF_OACTIVE;
682 }
683
684 static int
685 hvn_rx_ring_create(struct hvn_softc *sc)
686 {
687 struct hvn_nvs_rxbuf_conn cmd;
688 struct hvn_nvs_rxbuf_conn_resp *rsp;
689 uint64_t tid;
690
691 if (sc->sc_proto <= HVN_NVS_PROTO_VERSION_2)
692 sc->sc_rx_size = 15 * 1024 * 1024; /* 15MB */
693 else
694 sc->sc_rx_size = 16 * 1024 * 1024; /* 16MB */
695 sc->sc_rx_ring = hyperv_dma_alloc(sc->sc_dmat, &sc->sc_rx_dma,
696 sc->sc_rx_size, PAGE_SIZE, PAGE_SIZE, sc->sc_rx_size / PAGE_SIZE);
697 if (sc->sc_rx_ring == NULL) {
698 DPRINTF("%s: failed to allocate Rx ring buffer\n",
699 device_xname(sc->sc_dev));
700 return -1;
701 }
702 if (vmbus_handle_alloc(sc->sc_chan, &sc->sc_rx_dma, sc->sc_rx_size,
703 &sc->sc_rx_hndl)) {
704 DPRINTF("%s: failed to obtain a PA handle\n",
705 device_xname(sc->sc_dev));
706 goto errout;
707 }
708
709 memset(&cmd, 0, sizeof(cmd));
710 cmd.nvs_type = HVN_NVS_TYPE_RXBUF_CONN;
711 cmd.nvs_gpadl = sc->sc_rx_hndl;
712 cmd.nvs_sig = HVN_NVS_RXBUF_SIG;
713
714 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
715 if (hvn_nvs_cmd(sc, &cmd, sizeof(cmd), tid, 100))
716 goto errout;
717
718 rsp = (struct hvn_nvs_rxbuf_conn_resp *)&sc->sc_nvsrsp;
719 if (rsp->nvs_status != HVN_NVS_STATUS_OK) {
720 DPRINTF("%s: failed to set up the Rx ring\n",
721 device_xname(sc->sc_dev));
722 goto errout;
723 }
724 if (rsp->nvs_nsect > 1) {
725 DPRINTF("%s: invalid number of Rx ring sections: %u\n",
726 device_xname(sc->sc_dev), rsp->nvs_nsect);
727 hvn_rx_ring_destroy(sc);
728 return -1;
729 }
730 return 0;
731
732 errout:
733 if (sc->sc_rx_hndl) {
734 vmbus_handle_free(sc->sc_chan, sc->sc_rx_hndl);
735 sc->sc_rx_hndl = 0;
736 }
737 if (sc->sc_rx_ring) {
738 kmem_free(sc->sc_rx_ring, sc->sc_rx_size);
739 sc->sc_rx_ring = NULL;
740 }
741 return -1;
742 }
743
744 static int
745 hvn_rx_ring_destroy(struct hvn_softc *sc)
746 {
747 struct hvn_nvs_rxbuf_disconn cmd;
748 uint64_t tid;
749
750 if (sc->sc_rx_ring == NULL)
751 return 0;
752
753 memset(&cmd, 0, sizeof(cmd));
754 cmd.nvs_type = HVN_NVS_TYPE_RXBUF_DISCONN;
755 cmd.nvs_sig = HVN_NVS_RXBUF_SIG;
756
757 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
758 if (hvn_nvs_cmd(sc, &cmd, sizeof(cmd), tid, 0))
759 return -1;
760
761 delay(100);
762
763 vmbus_handle_free(sc->sc_chan, sc->sc_rx_hndl);
764
765 sc->sc_rx_hndl = 0;
766
767 kmem_free(sc->sc_rx_ring, sc->sc_rx_size);
768 sc->sc_rx_ring = NULL;
769
770 return 0;
771 }
772
773 static int
774 hvn_tx_ring_create(struct hvn_softc *sc)
775 {
776 const int dmaflags = cold ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK;
777 struct hvn_tx_desc *txd;
778 bus_dma_segment_t *seg;
779 size_t msgsize;
780 int i, rsegs;
781 paddr_t pa;
782
783 msgsize = roundup(HVN_RNDIS_PKT_LEN, 128);
784
785 /* Allocate memory to store RNDIS messages */
786 if (bus_dmamem_alloc(sc->sc_dmat, msgsize * HVN_TX_DESC, PAGE_SIZE, 0,
787 &sc->sc_tx_mseg, 1, &rsegs, dmaflags)) {
788 DPRINTF("%s: failed to allocate memory for RDNIS messages\n",
789 device_xname(sc->sc_dev));
790 goto errout;
791 }
792 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_tx_mseg, 1, msgsize *
793 HVN_TX_DESC, (void **)&sc->sc_tx_msgs, dmaflags)) {
794 DPRINTF("%s: failed to establish mapping for RDNIS messages\n",
795 device_xname(sc->sc_dev));
796 goto errout;
797 }
798 memset(sc->sc_tx_msgs, 0, msgsize * HVN_TX_DESC);
799 if (bus_dmamap_create(sc->sc_dmat, msgsize * HVN_TX_DESC, 1,
800 msgsize * HVN_TX_DESC, 0, dmaflags, &sc->sc_tx_rmap)) {
801 DPRINTF("%s: failed to create map for RDNIS messages\n",
802 device_xname(sc->sc_dev));
803 goto errout;
804 }
805 if (bus_dmamap_load(sc->sc_dmat, sc->sc_tx_rmap, sc->sc_tx_msgs,
806 msgsize * HVN_TX_DESC, NULL, dmaflags)) {
807 DPRINTF("%s: failed to create map for RDNIS messages\n",
808 device_xname(sc->sc_dev));
809 goto errout;
810 }
811
812 for (i = 0; i < HVN_TX_DESC; i++) {
813 txd = &sc->sc_tx_desc[i];
814 if (bus_dmamap_create(sc->sc_dmat, HVN_TX_PKT_SIZE,
815 HVN_TX_FRAGS, HVN_TX_FRAG_SIZE, PAGE_SIZE, dmaflags,
816 &txd->txd_dmap)) {
817 DPRINTF("%s: failed to create map for TX descriptors\n",
818 device_xname(sc->sc_dev));
819 goto errout;
820 }
821 seg = &sc->sc_tx_rmap->dm_segs[0];
822 pa = seg->ds_addr + (msgsize * i);
823 txd->txd_gpa.gpa_page = atop(pa);
824 txd->txd_gpa.gpa_ofs = pa & PAGE_MASK;
825 txd->txd_gpa.gpa_len = msgsize;
826 txd->txd_req = (void *)(sc->sc_tx_msgs + (msgsize * i));
827 txd->txd_id = i + HVN_NVS_CHIM_SIG;
828 txd->txd_ready = 1;
829 }
830 sc->sc_tx_avail = HVN_TX_DESC;
831
832 return 0;
833
834 errout:
835 hvn_tx_ring_destroy(sc);
836 return -1;
837 }
838
839 static void
840 hvn_tx_ring_destroy(struct hvn_softc *sc)
841 {
842 struct hvn_tx_desc *txd;
843 int i;
844
845 for (i = 0; i < HVN_TX_DESC; i++) {
846 txd = &sc->sc_tx_desc[i];
847 if (txd->txd_dmap == NULL)
848 continue;
849 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmap,
850 0, txd->txd_dmap->dm_mapsize,
851 BUS_DMASYNC_POSTWRITE);
852 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmap);
853 bus_dmamap_destroy(sc->sc_dmat, txd->txd_dmap);
854 txd->txd_dmap = NULL;
855 if (txd->txd_buf == NULL)
856 continue;
857 m_free(txd->txd_buf);
858 txd->txd_buf = NULL;
859 }
860 if (sc->sc_tx_rmap) {
861 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap,
862 0, txd->txd_dmap->dm_mapsize,
863 BUS_DMASYNC_POSTWRITE);
864 bus_dmamap_unload(sc->sc_dmat, sc->sc_tx_rmap);
865 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_rmap);
866 }
867 if (sc->sc_tx_msgs) {
868 size_t msgsize = roundup(HVN_RNDIS_PKT_LEN, 128);
869
870 bus_dmamem_unmap(sc->sc_dmat, sc->sc_tx_msgs,
871 msgsize * HVN_TX_DESC);
872 bus_dmamem_free(sc->sc_dmat, &sc->sc_tx_mseg, 1);
873 }
874 sc->sc_tx_rmap = NULL;
875 sc->sc_tx_msgs = NULL;
876 }
877
878 static int
879 hvn_get_lladdr(struct hvn_softc *sc, uint8_t *enaddr)
880 {
881 size_t addrlen = ETHER_ADDR_LEN;
882 int rv;
883
884 rv = hvn_rndis_query(sc, OID_802_3_PERMANENT_ADDRESS, enaddr, &addrlen);
885 if (rv == 0 && addrlen != ETHER_ADDR_LEN)
886 rv = -1;
887 return rv;
888 }
889
890 static void
891 hvn_get_link_status(struct hvn_softc *sc)
892 {
893 uint32_t state;
894 size_t len = sizeof(state);
895
896 if (hvn_rndis_query(sc, OID_GEN_MEDIA_CONNECT_STATUS,
897 &state, &len) == 0)
898 sc->sc_link_state = (state == NDIS_MEDIA_STATE_CONNECTED) ?
899 LINK_STATE_UP : LINK_STATE_DOWN;
900 }
901
902 static int
903 hvn_nvs_attach(struct hvn_softc *sc)
904 {
905 static const uint32_t protos[] = {
906 HVN_NVS_PROTO_VERSION_5,
907 HVN_NVS_PROTO_VERSION_4,
908 HVN_NVS_PROTO_VERSION_2,
909 HVN_NVS_PROTO_VERSION_1
910 };
911 struct hvn_nvs_init cmd;
912 struct hvn_nvs_init_resp *rsp;
913 struct hvn_nvs_ndis_init ncmd;
914 struct hvn_nvs_ndis_conf ccmd;
915 uint32_t ndisver, ringsize;
916 uint64_t tid;
917 int i;
918
919 sc->sc_nvsbuf = kmem_zalloc(HVN_NVS_BUFSIZE, KM_SLEEP);
920
921 /* We need to be able to fit all RNDIS control and data messages */
922 ringsize = HVN_RNDIS_CTLREQS *
923 (sizeof(struct hvn_nvs_rndis) + sizeof(struct vmbus_gpa)) +
924 HVN_TX_DESC * (sizeof(struct hvn_nvs_rndis) +
925 (HVN_TX_FRAGS + 1) * sizeof(struct vmbus_gpa));
926
927 sc->sc_chan->ch_flags &= ~CHF_BATCHED;
928
929 /* Associate our interrupt handler with the channel */
930 if (vmbus_channel_open(sc->sc_chan, ringsize, NULL, 0,
931 hvn_nvs_intr, sc)) {
932 DPRINTF("%s: failed to open channel\n",
933 device_xname(sc->sc_dev));
934 kmem_free(sc->sc_nvsbuf, HVN_NVS_BUFSIZE);
935 return -1;
936 }
937
938 memset(&cmd, 0, sizeof(cmd));
939 cmd.nvs_type = HVN_NVS_TYPE_INIT;
940 for (i = 0; i < __arraycount(protos); i++) {
941 cmd.nvs_ver_min = cmd.nvs_ver_max = protos[i];
942 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
943 if (hvn_nvs_cmd(sc, &cmd, sizeof(cmd), tid, 100))
944 return -1;
945
946 rsp = (struct hvn_nvs_init_resp *)&sc->sc_nvsrsp;
947 if (rsp->nvs_status == HVN_NVS_STATUS_OK) {
948 sc->sc_proto = protos[i];
949 break;
950 }
951 }
952 if (i == __arraycount(protos)) {
953 DPRINTF("%s: failed to negotiate NVSP version\n",
954 device_xname(sc->sc_dev));
955 return -1;
956 }
957
958 if (sc->sc_proto >= HVN_NVS_PROTO_VERSION_2) {
959 memset(&ccmd, 0, sizeof(ccmd));
960 ccmd.nvs_type = HVN_NVS_TYPE_NDIS_CONF;
961 ccmd.nvs_mtu = HVN_MAXMTU;
962 ccmd.nvs_caps = HVN_NVS_NDIS_CONF_VLAN;
963
964 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
965 if (hvn_nvs_cmd(sc, &ccmd, sizeof(ccmd), tid, 100))
966 return -1;
967 }
968
969 memset(&ncmd, 0, sizeof(ncmd));
970 ncmd.nvs_type = HVN_NVS_TYPE_NDIS_INIT;
971 if (sc->sc_proto <= HVN_NVS_PROTO_VERSION_4)
972 ndisver = NDIS_VERSION_6_1;
973 else
974 ndisver = NDIS_VERSION_6_30;
975 ncmd.nvs_ndis_major = (ndisver & 0xffff0000) >> 16;
976 ncmd.nvs_ndis_minor = ndisver & 0x0000ffff;
977
978 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
979 if (hvn_nvs_cmd(sc, &ncmd, sizeof(ncmd), tid, 100))
980 return -1;
981
982 sc->sc_ndisver = ndisver;
983
984 return 0;
985 }
986
987 static void
988 hvn_nvs_intr(void *arg)
989 {
990 struct hvn_softc *sc = arg;
991 struct ifnet *ifp = SC2IFP(sc);
992 struct vmbus_chanpkt_hdr *cph;
993 const struct hvn_nvs_hdr *nvs;
994 uint64_t rid;
995 uint32_t rlen;
996 int rv;
997 bool dotx = false;
998
999 for (;;) {
1000 rv = vmbus_channel_recv(sc->sc_chan, sc->sc_nvsbuf,
1001 HVN_NVS_BUFSIZE, &rlen, &rid, 1);
1002 if (rv != 0 || rlen == 0) {
1003 if (rv != EAGAIN)
1004 device_printf(sc->sc_dev,
1005 "failed to receive an NVSP packet\n");
1006 break;
1007 }
1008 cph = (struct vmbus_chanpkt_hdr *)sc->sc_nvsbuf;
1009 nvs = (const struct hvn_nvs_hdr *)VMBUS_CHANPKT_CONST_DATA(cph);
1010
1011 if (cph->cph_type == VMBUS_CHANPKT_TYPE_COMP) {
1012 switch (nvs->nvs_type) {
1013 case HVN_NVS_TYPE_INIT_RESP:
1014 case HVN_NVS_TYPE_RXBUF_CONNRESP:
1015 case HVN_NVS_TYPE_CHIM_CONNRESP:
1016 case HVN_NVS_TYPE_SUBCH_RESP:
1017 /* copy the response back */
1018 memcpy(&sc->sc_nvsrsp, nvs, HVN_NVS_MSGSIZE);
1019 sc->sc_nvsdone = 1;
1020 wakeup(&sc->sc_nvsrsp);
1021 break;
1022 case HVN_NVS_TYPE_RNDIS_ACK:
1023 dotx = true;
1024 hvn_txeof(sc, cph->cph_tid);
1025 break;
1026 default:
1027 device_printf(sc->sc_dev,
1028 "unhandled NVSP packet type %u "
1029 "on completion\n", nvs->nvs_type);
1030 break;
1031 }
1032 } else if (cph->cph_type == VMBUS_CHANPKT_TYPE_RXBUF) {
1033 switch (nvs->nvs_type) {
1034 case HVN_NVS_TYPE_RNDIS:
1035 hvn_rndis_input(sc, cph->cph_tid, cph);
1036 break;
1037 default:
1038 device_printf(sc->sc_dev,
1039 "unhandled NVSP packet type %u "
1040 "on receive\n", nvs->nvs_type);
1041 break;
1042 }
1043 } else if (cph->cph_type == VMBUS_CHANPKT_TYPE_INBAND) {
1044 switch (nvs->nvs_type) {
1045 case HVN_NVS_TYPE_TXTBL_NOTE:
1046 /* Useless; ignore */
1047 break;
1048 default:
1049 device_printf(sc->sc_dev,
1050 "got notify, nvs type %u\n", nvs->nvs_type);
1051 break;
1052 }
1053 } else
1054 device_printf(sc->sc_dev,
1055 "unknown NVSP packet type %u\n", cph->cph_type);
1056 }
1057
1058 if (dotx)
1059 if_schedule_deferred_start(ifp);
1060 }
1061
1062 static int
1063 hvn_nvs_cmd(struct hvn_softc *sc, void *cmd, size_t cmdsize, uint64_t tid,
1064 int timo)
1065 {
1066 struct hvn_nvs_hdr *hdr = cmd;
1067 int tries = 10;
1068 int rv, s;
1069
1070 sc->sc_nvsdone = 0;
1071
1072 do {
1073 rv = vmbus_channel_send(sc->sc_chan, cmd, cmdsize,
1074 tid, VMBUS_CHANPKT_TYPE_INBAND,
1075 timo ? VMBUS_CHANPKT_FLAG_RC : 0);
1076 if (rv == EAGAIN) {
1077 if (cold)
1078 delay(1000);
1079 else
1080 tsleep(cmd, PRIBIO, "nvsout", mstohz(1));
1081 } else if (rv) {
1082 DPRINTF("%s: NVSP operation %u send error %d\n",
1083 device_xname(sc->sc_dev), hdr->nvs_type, rv);
1084 return rv;
1085 }
1086 } while (rv != 0 && --tries > 0);
1087
1088 if (tries == 0 && rv != 0) {
1089 device_printf(sc->sc_dev,
1090 "NVSP operation %u send error %d\n", hdr->nvs_type, rv);
1091 return rv;
1092 }
1093
1094 if (timo == 0)
1095 return 0;
1096
1097 do {
1098 if (cold) {
1099 delay(1000);
1100 s = splnet();
1101 hvn_nvs_intr(sc);
1102 splx(s);
1103 } else
1104 tsleep(sc->sc_nvsrsp, PRIBIO | PCATCH, "nvscmd",
1105 mstohz(1));
1106 } while (--timo > 0 && sc->sc_nvsdone != 1);
1107
1108 if (timo == 0 && sc->sc_nvsdone != 1) {
1109 device_printf(sc->sc_dev, "NVSP operation %u timed out\n",
1110 hdr->nvs_type);
1111 return ETIMEDOUT;
1112 }
1113 return 0;
1114 }
1115
1116 static int
1117 hvn_nvs_ack(struct hvn_softc *sc, uint64_t tid)
1118 {
1119 struct hvn_nvs_rndis_ack cmd;
1120 int tries = 5;
1121 int rv;
1122
1123 cmd.nvs_type = HVN_NVS_TYPE_RNDIS_ACK;
1124 cmd.nvs_status = HVN_NVS_STATUS_OK;
1125 do {
1126 rv = vmbus_channel_send(sc->sc_chan, &cmd, sizeof(cmd),
1127 tid, VMBUS_CHANPKT_TYPE_COMP, 0);
1128 if (rv == EAGAIN)
1129 delay(10);
1130 else if (rv) {
1131 DPRINTF("%s: NVSP acknowledgement error %d\n",
1132 device_xname(sc->sc_dev), rv);
1133 return rv;
1134 }
1135 } while (rv != 0 && --tries > 0);
1136 return rv;
1137 }
1138
1139 static void
1140 hvn_nvs_detach(struct hvn_softc *sc)
1141 {
1142
1143 if (vmbus_channel_close(sc->sc_chan) == 0) {
1144 kmem_free(sc->sc_nvsbuf, HVN_NVS_BUFSIZE);
1145 sc->sc_nvsbuf = NULL;
1146 }
1147 }
1148
1149 static inline struct rndis_cmd *
1150 hvn_alloc_cmd(struct hvn_softc *sc)
1151 {
1152 struct rndis_cmd *rc;
1153
1154 mutex_enter(&sc->sc_cntl_fqlck);
1155 while ((rc = TAILQ_FIRST(&sc->sc_cntl_fq)) == NULL)
1156 /* XXX use condvar(9) instead of mtsleep */
1157 mtsleep(&sc->sc_cntl_fq, PRIBIO, "nvsalloc", 1,
1158 &sc->sc_cntl_fqlck);
1159 TAILQ_REMOVE(&sc->sc_cntl_fq, rc, rc_entry);
1160 mutex_exit(&sc->sc_cntl_fqlck);
1161 return rc;
1162 }
1163
1164 static inline void
1165 hvn_submit_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1166 {
1167
1168 mutex_enter(&sc->sc_cntl_sqlck);
1169 TAILQ_INSERT_TAIL(&sc->sc_cntl_sq, rc, rc_entry);
1170 mutex_exit(&sc->sc_cntl_sqlck);
1171 }
1172
1173 static inline struct rndis_cmd *
1174 hvn_complete_cmd(struct hvn_softc *sc, uint32_t id)
1175 {
1176 struct rndis_cmd *rc;
1177
1178 mutex_enter(&sc->sc_cntl_sqlck);
1179 TAILQ_FOREACH(rc, &sc->sc_cntl_sq, rc_entry) {
1180 if (rc->rc_id == id) {
1181 TAILQ_REMOVE(&sc->sc_cntl_sq, rc, rc_entry);
1182 break;
1183 }
1184 }
1185 mutex_exit(&sc->sc_cntl_sqlck);
1186 if (rc != NULL) {
1187 mutex_enter(&sc->sc_cntl_cqlck);
1188 TAILQ_INSERT_TAIL(&sc->sc_cntl_cq, rc, rc_entry);
1189 mutex_exit(&sc->sc_cntl_cqlck);
1190 }
1191 return rc;
1192 }
1193
1194 static inline void
1195 hvn_release_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1196 {
1197
1198 mutex_enter(&sc->sc_cntl_cqlck);
1199 TAILQ_REMOVE(&sc->sc_cntl_cq, rc, rc_entry);
1200 mutex_exit(&sc->sc_cntl_cqlck);
1201 }
1202
1203 static inline int
1204 hvn_rollback_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1205 {
1206 struct rndis_cmd *rn;
1207
1208 mutex_enter(&sc->sc_cntl_sqlck);
1209 TAILQ_FOREACH(rn, &sc->sc_cntl_sq, rc_entry) {
1210 if (rn == rc) {
1211 TAILQ_REMOVE(&sc->sc_cntl_sq, rc, rc_entry);
1212 mutex_exit(&sc->sc_cntl_sqlck);
1213 return 0;
1214 }
1215 }
1216 mutex_exit(&sc->sc_cntl_sqlck);
1217 return -1;
1218 }
1219
1220 static inline void
1221 hvn_free_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1222 {
1223
1224 memset(rc->rc_req, 0, sizeof(struct rndis_packet_msg));
1225 memset(&rc->rc_cmp, 0, sizeof(rc->rc_cmp));
1226 memset(&rc->rc_msg, 0, sizeof(rc->rc_msg));
1227 mutex_enter(&sc->sc_cntl_fqlck);
1228 TAILQ_INSERT_TAIL(&sc->sc_cntl_fq, rc, rc_entry);
1229 mutex_exit(&sc->sc_cntl_fqlck);
1230 wakeup(&sc->sc_cntl_fq);
1231 }
1232
1233 static int
1234 hvn_rndis_attach(struct hvn_softc *sc)
1235 {
1236 const int dmaflags = cold ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK;
1237 struct rndis_init_req *req;
1238 struct rndis_init_comp *cmp;
1239 struct rndis_cmd *rc;
1240 int i, rv;
1241
1242 /* RNDIS control message queues */
1243 TAILQ_INIT(&sc->sc_cntl_sq);
1244 TAILQ_INIT(&sc->sc_cntl_cq);
1245 TAILQ_INIT(&sc->sc_cntl_fq);
1246 mutex_init(&sc->sc_cntl_sqlck, MUTEX_DEFAULT, IPL_NET);
1247 mutex_init(&sc->sc_cntl_cqlck, MUTEX_DEFAULT, IPL_NET);
1248 mutex_init(&sc->sc_cntl_fqlck, MUTEX_DEFAULT, IPL_NET);
1249
1250 for (i = 0; i < HVN_RNDIS_CTLREQS; i++) {
1251 rc = &sc->sc_cntl_msgs[i];
1252 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1253 dmaflags, &rc->rc_dmap)) {
1254 DPRINTF("%s: failed to create RNDIS command map\n",
1255 device_xname(sc->sc_dev));
1256 goto errout;
1257 }
1258 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE,
1259 0, &rc->rc_segs, 1, &rc->rc_nsegs, dmaflags)) {
1260 DPRINTF("%s: failed to allocate RNDIS command\n",
1261 device_xname(sc->sc_dev));
1262 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1263 goto errout;
1264 }
1265 if (bus_dmamem_map(sc->sc_dmat, &rc->rc_segs, rc->rc_nsegs,
1266 PAGE_SIZE, (void **)&rc->rc_req, dmaflags)) {
1267 DPRINTF("%s: failed to allocate RNDIS command\n",
1268 device_xname(sc->sc_dev));
1269 bus_dmamem_free(sc->sc_dmat, &rc->rc_segs,
1270 rc->rc_nsegs);
1271 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1272 goto errout;
1273 }
1274 memset(rc->rc_req, 0, PAGE_SIZE);
1275 if (bus_dmamap_load(sc->sc_dmat, rc->rc_dmap, rc->rc_req,
1276 PAGE_SIZE, NULL, dmaflags)) {
1277 DPRINTF("%s: failed to load RNDIS command map\n",
1278 device_xname(sc->sc_dev));
1279 bus_dmamem_free(sc->sc_dmat, &rc->rc_segs,
1280 rc->rc_nsegs);
1281 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1282 goto errout;
1283 }
1284 rc->rc_gpa = atop(rc->rc_dmap->dm_segs[0].ds_addr);
1285 TAILQ_INSERT_TAIL(&sc->sc_cntl_fq, rc, rc_entry);
1286 }
1287
1288 rc = hvn_alloc_cmd(sc);
1289
1290 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1291 BUS_DMASYNC_PREREAD);
1292
1293 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1294
1295 req = rc->rc_req;
1296 req->rm_type = REMOTE_NDIS_INITIALIZE_MSG;
1297 req->rm_len = sizeof(*req);
1298 req->rm_rid = rc->rc_id;
1299 req->rm_ver_major = RNDIS_VERSION_MAJOR;
1300 req->rm_ver_minor = RNDIS_VERSION_MINOR;
1301 req->rm_max_xfersz = HVN_RNDIS_XFER_SIZE;
1302
1303 rc->rc_cmplen = sizeof(*cmp);
1304
1305 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1306 BUS_DMASYNC_PREWRITE);
1307
1308 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1309 DPRINTF("%s: INITIALIZE_MSG failed, error %d\n",
1310 device_xname(sc->sc_dev), rv);
1311 hvn_free_cmd(sc, rc);
1312 goto errout;
1313 }
1314 cmp = (struct rndis_init_comp *)&rc->rc_cmp;
1315 if (cmp->rm_status != RNDIS_STATUS_SUCCESS) {
1316 DPRINTF("%s: failed to init RNDIS, error %#x\n",
1317 device_xname(sc->sc_dev), cmp->rm_status);
1318 hvn_free_cmd(sc, rc);
1319 goto errout;
1320 }
1321
1322 hvn_free_cmd(sc, rc);
1323
1324 /* Initialize RNDIS Data command */
1325 memset(&sc->sc_data_msg, 0, sizeof(sc->sc_data_msg));
1326 sc->sc_data_msg.nvs_type = HVN_NVS_TYPE_RNDIS;
1327 sc->sc_data_msg.nvs_rndis_mtype = HVN_NVS_RNDIS_MTYPE_DATA;
1328 sc->sc_data_msg.nvs_chim_idx = HVN_NVS_CHIM_IDX_INVALID;
1329
1330 return 0;
1331
1332 errout:
1333 for (i = 0; i < HVN_RNDIS_CTLREQS; i++) {
1334 rc = &sc->sc_cntl_msgs[i];
1335 if (rc->rc_req == NULL)
1336 continue;
1337 TAILQ_REMOVE(&sc->sc_cntl_fq, rc, rc_entry);
1338 bus_dmamem_free(sc->sc_dmat, &rc->rc_segs, rc->rc_nsegs);
1339 rc->rc_req = NULL;
1340 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1341 }
1342 return -1;
1343 }
1344
1345 static int
1346 hvn_set_capabilities(struct hvn_softc *sc)
1347 {
1348 struct ndis_offload_params params;
1349 size_t len = sizeof(params);
1350
1351 memset(¶ms, 0, sizeof(params));
1352
1353 params.ndis_hdr.ndis_type = NDIS_OBJTYPE_DEFAULT;
1354 if (sc->sc_ndisver < NDIS_VERSION_6_30) {
1355 params.ndis_hdr.ndis_rev = NDIS_OFFLOAD_PARAMS_REV_2;
1356 len = params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE_6_1;
1357 } else {
1358 params.ndis_hdr.ndis_rev = NDIS_OFFLOAD_PARAMS_REV_3;
1359 len = params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE;
1360 }
1361
1362 params.ndis_ip4csum = NDIS_OFFLOAD_PARAM_TXRX;
1363 params.ndis_tcp4csum = NDIS_OFFLOAD_PARAM_TXRX;
1364 params.ndis_tcp6csum = NDIS_OFFLOAD_PARAM_TXRX;
1365 if (sc->sc_ndisver >= NDIS_VERSION_6_30) {
1366 params.ndis_udp4csum = NDIS_OFFLOAD_PARAM_TXRX;
1367 params.ndis_udp6csum = NDIS_OFFLOAD_PARAM_TXRX;
1368 }
1369
1370 return hvn_rndis_set(sc, OID_TCP_OFFLOAD_PARAMETERS, ¶ms, len);
1371 }
1372
1373 static int
1374 hvn_rndis_cmd(struct hvn_softc *sc, struct rndis_cmd *rc, int timo)
1375 {
1376 struct hvn_nvs_rndis *msg = &rc->rc_msg;
1377 struct rndis_msghdr *hdr = rc->rc_req;
1378 struct vmbus_gpa sgl[1];
1379 int tries = 10;
1380 int rv, s;
1381
1382 KASSERT(timo > 0);
1383
1384 msg->nvs_type = HVN_NVS_TYPE_RNDIS;
1385 msg->nvs_rndis_mtype = HVN_NVS_RNDIS_MTYPE_CTRL;
1386 msg->nvs_chim_idx = HVN_NVS_CHIM_IDX_INVALID;
1387
1388 sgl[0].gpa_page = rc->rc_gpa;
1389 sgl[0].gpa_len = hdr->rm_len;
1390 sgl[0].gpa_ofs = 0;
1391
1392 rc->rc_done = 0;
1393
1394 hvn_submit_cmd(sc, rc);
1395
1396 do {
1397 rv = vmbus_channel_send_sgl(sc->sc_chan, sgl, 1, &rc->rc_msg,
1398 sizeof(*msg), rc->rc_id);
1399 if (rv == EAGAIN) {
1400 if (cold)
1401 delay(1000);
1402 else
1403 tsleep(rc, PRIBIO, "rndisout", mstohz(1));
1404 } else if (rv) {
1405 DPRINTF("%s: RNDIS operation %u send error %d\n",
1406 device_xname(sc->sc_dev), hdr->rm_type, rv);
1407 hvn_rollback_cmd(sc, rc);
1408 return rv;
1409 }
1410 } while (rv != 0 && --tries > 0);
1411
1412 if (tries == 0 && rv != 0) {
1413 device_printf(sc->sc_dev,
1414 "RNDIS operation %u send error %d\n", hdr->rm_type, rv);
1415 return rv;
1416 }
1417
1418 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1419 BUS_DMASYNC_POSTWRITE);
1420
1421 do {
1422 if (cold) {
1423 delay(1000);
1424 s = splnet();
1425 hvn_nvs_intr(sc);
1426 splx(s);
1427 } else
1428 tsleep(rc, PRIBIO | PCATCH, "rndiscmd", mstohz(1));
1429 } while (--timo > 0 && rc->rc_done != 1);
1430
1431 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1432 BUS_DMASYNC_POSTREAD);
1433
1434 if (rc->rc_done != 1) {
1435 rv = timo == 0 ? ETIMEDOUT : EINTR;
1436 if (hvn_rollback_cmd(sc, rc)) {
1437 hvn_release_cmd(sc, rc);
1438 rv = 0;
1439 } else if (rv == ETIMEDOUT) {
1440 device_printf(sc->sc_dev,
1441 "RNDIS operation %u timed out\n", hdr->rm_type);
1442 }
1443 return rv;
1444 }
1445
1446 hvn_release_cmd(sc, rc);
1447 return 0;
1448 }
1449
1450 static void
1451 hvn_rndis_input(struct hvn_softc *sc, uint64_t tid, void *arg)
1452 {
1453 struct vmbus_chanpkt_prplist *cp = arg;
1454 uint32_t off, len, type;
1455 int i;
1456
1457 if (sc->sc_rx_ring == NULL) {
1458 DPRINTF("%s: invalid rx ring\n", device_xname(sc->sc_dev));
1459 return;
1460 }
1461
1462 for (i = 0; i < cp->cp_range_cnt; i++) {
1463 off = cp->cp_range[i].gpa_ofs;
1464 len = cp->cp_range[i].gpa_len;
1465
1466 KASSERT(off + len <= sc->sc_rx_size);
1467 KASSERT(len >= RNDIS_HEADER_OFFSET + 4);
1468
1469 memcpy(&type, sc->sc_rx_ring + off, sizeof(type));
1470 switch (type) {
1471 /* data message */
1472 case REMOTE_NDIS_PACKET_MSG:
1473 hvn_rxeof(sc, sc->sc_rx_ring + off, len);
1474 break;
1475 /* completion messages */
1476 case REMOTE_NDIS_INITIALIZE_CMPLT:
1477 case REMOTE_NDIS_QUERY_CMPLT:
1478 case REMOTE_NDIS_SET_CMPLT:
1479 case REMOTE_NDIS_RESET_CMPLT:
1480 case REMOTE_NDIS_KEEPALIVE_CMPLT:
1481 hvn_rndis_complete(sc, sc->sc_rx_ring + off, len);
1482 break;
1483 /* notification message */
1484 case REMOTE_NDIS_INDICATE_STATUS_MSG:
1485 hvn_rndis_status(sc, sc->sc_rx_ring + off, len);
1486 break;
1487 default:
1488 device_printf(sc->sc_dev,
1489 "unhandled RNDIS message type %u\n", type);
1490 break;
1491 }
1492 }
1493
1494 hvn_nvs_ack(sc, tid);
1495 }
1496
1497 static inline struct mbuf *
1498 hvn_devget(struct hvn_softc *sc, void *buf, uint32_t len)
1499 {
1500 struct ifnet *ifp = SC2IFP(sc);
1501 struct mbuf *m;
1502 size_t size = len + ETHER_ALIGN;
1503
1504 MGETHDR(m, M_NOWAIT, MT_DATA);
1505 if (m == NULL)
1506 return NULL;
1507
1508 if (size > MHLEN) {
1509 if (size <= MCLBYTES)
1510 MCLGET(m, M_NOWAIT);
1511 else
1512 MEXTMALLOC(m, size, M_NOWAIT);
1513 if ((m->m_flags & M_EXT) == 0) {
1514 m_freem(m);
1515 return NULL;
1516 }
1517 }
1518
1519 m->m_len = m->m_pkthdr.len = size;
1520 m_adj(m, ETHER_ALIGN);
1521 m_copyback(m, 0, len, buf);
1522 m_set_rcvif(m, ifp);
1523 return m;
1524 }
1525
1526 static void
1527 hvn_rxeof(struct hvn_softc *sc, uint8_t *buf, uint32_t len)
1528 {
1529 struct ifnet *ifp = SC2IFP(sc);
1530 struct rndis_packet_msg *pkt;
1531 struct rndis_pktinfo *pi;
1532 uint32_t csum, vlan;
1533 struct mbuf *m;
1534
1535 if (!(ifp->if_flags & IFF_RUNNING))
1536 return;
1537
1538 if (len < sizeof(*pkt)) {
1539 device_printf(sc->sc_dev, "data packet too short: %u\n",
1540 len);
1541 return;
1542 }
1543
1544 pkt = (struct rndis_packet_msg *)buf;
1545 if (pkt->rm_dataoffset + pkt->rm_datalen > len) {
1546 device_printf(sc->sc_dev,
1547 "data packet out of bounds: %u@%u\n", pkt->rm_dataoffset,
1548 pkt->rm_datalen);
1549 return;
1550 }
1551
1552 if ((m = hvn_devget(sc, buf + RNDIS_HEADER_OFFSET + pkt->rm_dataoffset,
1553 pkt->rm_datalen)) == NULL) {
1554 ifp->if_ierrors++;
1555 return;
1556 }
1557
1558 if (pkt->rm_pktinfooffset + pkt->rm_pktinfolen > len) {
1559 device_printf(sc->sc_dev,
1560 "pktinfo is out of bounds: %u@%u vs %u\n",
1561 pkt->rm_pktinfolen, pkt->rm_pktinfooffset, len);
1562 goto done;
1563 }
1564
1565 pi = (struct rndis_pktinfo *)(buf + RNDIS_HEADER_OFFSET +
1566 pkt->rm_pktinfooffset);
1567 while (pkt->rm_pktinfolen > 0) {
1568 if (pi->rm_size > pkt->rm_pktinfolen) {
1569 device_printf(sc->sc_dev,
1570 "invalid pktinfo size: %u/%u\n", pi->rm_size,
1571 pkt->rm_pktinfolen);
1572 break;
1573 }
1574
1575 switch (pi->rm_type) {
1576 case NDIS_PKTINFO_TYPE_CSUM:
1577 memcpy(&csum, pi->rm_data, sizeof(csum));
1578 if (csum & NDIS_RXCSUM_INFO_IPCS_OK)
1579 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1580 if (csum & NDIS_RXCSUM_INFO_TCPCS_OK)
1581 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1582 if (csum & NDIS_RXCSUM_INFO_UDPCS_OK)
1583 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1584 break;
1585 case NDIS_PKTINFO_TYPE_VLAN:
1586 memcpy(&vlan, pi->rm_data, sizeof(vlan));
1587 if (vlan != 0xffffffff) {
1588 m->m_pkthdr.ether_vtag =
1589 NDIS_VLAN_INFO_ID(vlan) |
1590 (NDIS_VLAN_INFO_PRI(vlan) << EVL_PRIO_BITS);
1591 m->m_flags |= M_VLANTAG;
1592 }
1593 break;
1594 default:
1595 DPRINTF("%s: unhandled pktinfo type %u\n",
1596 device_xname(sc->sc_dev), pi->rm_type);
1597 break;
1598 }
1599
1600 pkt->rm_pktinfolen -= pi->rm_size;
1601 pi = (struct rndis_pktinfo *)((char *)pi + pi->rm_size);
1602 }
1603
1604 done:
1605 if_percpuq_enqueue(sc->sc_ipq, m);
1606 }
1607
1608 static void
1609 hvn_rndis_complete(struct hvn_softc *sc, uint8_t *buf, uint32_t len)
1610 {
1611 struct rndis_cmd *rc;
1612 uint32_t id;
1613
1614 memcpy(&id, buf + RNDIS_HEADER_OFFSET, sizeof(id));
1615 if ((rc = hvn_complete_cmd(sc, id)) != NULL) {
1616 if (len < rc->rc_cmplen)
1617 device_printf(sc->sc_dev,
1618 "RNDIS response %u too short: %u\n", id, len);
1619 else
1620 memcpy(&rc->rc_cmp, buf, rc->rc_cmplen);
1621 if (len > rc->rc_cmplen &&
1622 len - rc->rc_cmplen > HVN_RNDIS_BUFSIZE)
1623 device_printf(sc->sc_dev,
1624 "RNDIS response %u too large: %u\n", id, len);
1625 else if (len > rc->rc_cmplen)
1626 memcpy(&rc->rc_cmpbuf, buf + rc->rc_cmplen,
1627 len - rc->rc_cmplen);
1628 rc->rc_done = 1;
1629 wakeup(rc);
1630 } else {
1631 DPRINTF("%s: failed to complete RNDIS request id %u\n",
1632 device_xname(sc->sc_dev), id);
1633 }
1634 }
1635
1636 static int
1637 hvn_rndis_output(struct hvn_softc *sc, struct hvn_tx_desc *txd)
1638 {
1639 uint64_t rid = (uint64_t)txd->txd_id << 32;
1640 int rv;
1641
1642 rv = vmbus_channel_send_sgl(sc->sc_chan, txd->txd_sgl, txd->txd_nsge,
1643 &sc->sc_data_msg, sizeof(sc->sc_data_msg), rid);
1644 if (rv) {
1645 DPRINTF("%s: RNDIS data send error %d\n",
1646 device_xname(sc->sc_dev), rv);
1647 return rv;
1648 }
1649 return 0;
1650 }
1651
1652 static void
1653 hvn_rndis_status(struct hvn_softc *sc, uint8_t *buf, uint32_t len)
1654 {
1655 struct ifnet *ifp = SC2IFP(sc);
1656 uint32_t status;
1657 int link_state = sc->sc_link_state;
1658
1659 memcpy(&status, buf + RNDIS_HEADER_OFFSET, sizeof(status));
1660 switch (status) {
1661 case RNDIS_STATUS_MEDIA_CONNECT:
1662 sc->sc_link_state = LINK_STATE_UP;
1663 break;
1664 case RNDIS_STATUS_MEDIA_DISCONNECT:
1665 sc->sc_link_state = LINK_STATE_DOWN;
1666 break;
1667 /* Ignore these */
1668 case RNDIS_STATUS_OFFLOAD_CURRENT_CONFIG:
1669 return;
1670 default:
1671 DPRINTF("%s: unhandled status %#x\n", device_xname(sc->sc_dev),
1672 status);
1673 return;
1674 }
1675 if (link_state != sc->sc_link_state)
1676 if_link_state_change(ifp, sc->sc_link_state);
1677 }
1678
1679 static int
1680 hvn_rndis_query(struct hvn_softc *sc, uint32_t oid, void *res, size_t *length)
1681 {
1682 struct rndis_cmd *rc;
1683 struct rndis_query_req *req;
1684 struct rndis_query_comp *cmp;
1685 size_t olength = *length;
1686 int rv;
1687
1688 rc = hvn_alloc_cmd(sc);
1689
1690 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1691 BUS_DMASYNC_PREREAD);
1692
1693 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1694
1695 req = rc->rc_req;
1696 req->rm_type = REMOTE_NDIS_QUERY_MSG;
1697 req->rm_len = sizeof(*req);
1698 req->rm_rid = rc->rc_id;
1699 req->rm_oid = oid;
1700 req->rm_infobufoffset = sizeof(*req) - RNDIS_HEADER_OFFSET;
1701
1702 rc->rc_cmplen = sizeof(*cmp);
1703
1704 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1705 BUS_DMASYNC_PREWRITE);
1706
1707 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1708 DPRINTF("%s: QUERY_MSG failed, error %d\n",
1709 device_xname(sc->sc_dev), rv);
1710 hvn_free_cmd(sc, rc);
1711 return rv;
1712 }
1713
1714 cmp = (struct rndis_query_comp *)&rc->rc_cmp;
1715 switch (cmp->rm_status) {
1716 case RNDIS_STATUS_SUCCESS:
1717 if (cmp->rm_infobuflen > olength) {
1718 rv = EINVAL;
1719 break;
1720 }
1721 memcpy(res, rc->rc_cmpbuf, cmp->rm_infobuflen);
1722 *length = cmp->rm_infobuflen;
1723 break;
1724 default:
1725 *length = 0;
1726 rv = EIO;
1727 break;
1728 }
1729
1730 hvn_free_cmd(sc, rc);
1731 return rv;
1732 }
1733
1734 static int
1735 hvn_rndis_set(struct hvn_softc *sc, uint32_t oid, void *data, size_t length)
1736 {
1737 struct rndis_cmd *rc;
1738 struct rndis_set_req *req;
1739 struct rndis_set_comp *cmp;
1740 int rv;
1741
1742 rc = hvn_alloc_cmd(sc);
1743
1744 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1745 BUS_DMASYNC_PREREAD);
1746
1747 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1748
1749 req = rc->rc_req;
1750 req->rm_type = REMOTE_NDIS_SET_MSG;
1751 req->rm_len = sizeof(*req) + length;
1752 req->rm_rid = rc->rc_id;
1753 req->rm_oid = oid;
1754 req->rm_infobufoffset = sizeof(*req) - RNDIS_HEADER_OFFSET;
1755
1756 rc->rc_cmplen = sizeof(*cmp);
1757
1758 if (length > 0) {
1759 KASSERT(sizeof(*req) + length < PAGE_SIZE);
1760 req->rm_infobuflen = length;
1761 memcpy(req + 1, data, length);
1762 }
1763
1764 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1765 BUS_DMASYNC_PREWRITE);
1766
1767 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1768 DPRINTF("%s: SET_MSG failed, error %d\n",
1769 device_xname(sc->sc_dev), rv);
1770 hvn_free_cmd(sc, rc);
1771 return rv;
1772 }
1773
1774 cmp = (struct rndis_set_comp *)&rc->rc_cmp;
1775 if (cmp->rm_status != RNDIS_STATUS_SUCCESS)
1776 rv = EIO;
1777
1778 hvn_free_cmd(sc, rc);
1779 return rv;
1780 }
1781
1782 static int
1783 hvn_rndis_open(struct hvn_softc *sc)
1784 {
1785 uint32_t filter;
1786 int rv;
1787
1788 if (sc->sc_promisc)
1789 filter = RNDIS_PACKET_TYPE_PROMISCUOUS;
1790 else
1791 filter = RNDIS_PACKET_TYPE_BROADCAST |
1792 RNDIS_PACKET_TYPE_ALL_MULTICAST |
1793 RNDIS_PACKET_TYPE_DIRECTED;
1794
1795 rv = hvn_rndis_set(sc, OID_GEN_CURRENT_PACKET_FILTER,
1796 &filter, sizeof(filter));
1797 if (rv) {
1798 DPRINTF("%s: failed to set RNDIS filter to %#x\n",
1799 device_xname(sc->sc_dev), filter);
1800 }
1801 return rv;
1802 }
1803
1804 static int
1805 hvn_rndis_close(struct hvn_softc *sc)
1806 {
1807 uint32_t filter = 0;
1808 int rv;
1809
1810 rv = hvn_rndis_set(sc, OID_GEN_CURRENT_PACKET_FILTER,
1811 &filter, sizeof(filter));
1812 if (rv) {
1813 DPRINTF("%s: failed to clear RNDIS filter\n",
1814 device_xname(sc->sc_dev));
1815 }
1816 return rv;
1817 }
1818
1819 static void
1820 hvn_rndis_detach(struct hvn_softc *sc)
1821 {
1822 struct rndis_cmd *rc;
1823 struct rndis_halt_req *req;
1824 int rv;
1825
1826 rc = hvn_alloc_cmd(sc);
1827
1828 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1829 BUS_DMASYNC_PREREAD);
1830
1831 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1832
1833 req = rc->rc_req;
1834 req->rm_type = REMOTE_NDIS_HALT_MSG;
1835 req->rm_len = sizeof(*req);
1836 req->rm_rid = rc->rc_id;
1837
1838 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1839 BUS_DMASYNC_PREWRITE);
1840
1841 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1842 DPRINTF("%s: HALT_MSG failed, error %d\n",
1843 device_xname(sc->sc_dev), rv);
1844 }
1845 hvn_free_cmd(sc, rc);
1846 }
1847