if_hvn.c revision 1.2.2.8 1 /* $NetBSD: if_hvn.c,v 1.2.2.8 2020/12/11 15:48:02 martin Exp $ */
2 /* $OpenBSD: if_hvn.c,v 1.39 2018/03/11 14:31:34 mikeb Exp $ */
3
4 /*-
5 * Copyright (c) 2009-2012,2016 Microsoft Corp.
6 * Copyright (c) 2010-2012 Citrix Inc.
7 * Copyright (c) 2012 NetApp Inc.
8 * Copyright (c) 2016 Mike Belopuhov <mike (at) esdenera.com>
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice unmodified, this list of conditions, and the following
16 * disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * The OpenBSD port was done under funding by Esdenera Networks GmbH.
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: if_hvn.c,v 1.2.2.8 2020/12/11 15:48:02 martin Exp $");
39
40 #ifdef _KERNEL_OPT
41 #include "opt_inet.h"
42 #include "opt_inet6.h"
43 #include "opt_net_mpsafe.h"
44 #endif
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/device.h>
50 #include <sys/atomic.h>
51 #include <sys/bus.h>
52 #include <sys/intr.h>
53 #include <sys/kmem.h>
54
55 #include <net/if.h>
56 #include <net/if_ether.h>
57 #include <net/if_media.h>
58
59 #include <net/bpf.h>
60
61 #include <dev/ic/ndisreg.h>
62 #include <dev/ic/rndisreg.h>
63
64 #include <dev/hyperv/vmbusvar.h>
65 #include <dev/hyperv/if_hvnreg.h>
66
67 #ifndef EVL_PRIO_BITS
68 #define EVL_PRIO_BITS 13
69 #endif
70
71 #ifndef ETHER_ALIGN
72 #define ETHER_ALIGN 2
73 #endif
74
75 #define HVN_NVS_MSGSIZE 32
76 #define HVN_NVS_BUFSIZE PAGE_SIZE
77
78 /*
79 * RNDIS control interface
80 */
81 #define HVN_RNDIS_CTLREQS 4
82 #define HVN_RNDIS_BUFSIZE 512
83
84 struct rndis_cmd {
85 uint32_t rc_id;
86 struct hvn_nvs_rndis rc_msg;
87 void *rc_req;
88 bus_dmamap_t rc_dmap;
89 bus_dma_segment_t rc_segs;
90 int rc_nsegs;
91 uint64_t rc_gpa;
92 struct rndis_packet_msg rc_cmp;
93 uint32_t rc_cmplen;
94 uint8_t rc_cmpbuf[HVN_RNDIS_BUFSIZE];
95 int rc_done;
96 TAILQ_ENTRY(rndis_cmd) rc_entry;
97 };
98 TAILQ_HEAD(rndis_queue, rndis_cmd);
99
100 #define HVN_MAXMTU (9 * 1024)
101
102 #define HVN_RNDIS_XFER_SIZE 2048
103
104 /*
105 * Tx ring
106 */
107 #define HVN_TX_DESC 256
108 #define HVN_TX_FRAGS 15 /* 31 is the max */
109 #define HVN_TX_FRAG_SIZE PAGE_SIZE
110 #define HVN_TX_PKT_SIZE 16384
111
112 #define HVN_RNDIS_PKT_LEN \
113 (sizeof(struct rndis_packet_msg) + \
114 sizeof(struct rndis_pktinfo) + NDIS_VLAN_INFO_SIZE + \
115 sizeof(struct rndis_pktinfo) + NDIS_TXCSUM_INFO_SIZE)
116
117 struct hvn_tx_desc {
118 uint32_t txd_id;
119 int txd_ready;
120 struct vmbus_gpa txd_sgl[HVN_TX_FRAGS + 1];
121 int txd_nsge;
122 struct mbuf *txd_buf;
123 bus_dmamap_t txd_dmap;
124 struct vmbus_gpa txd_gpa;
125 struct rndis_packet_msg *txd_req;
126 };
127
128 struct hvn_softc {
129 device_t sc_dev;
130
131 struct vmbus_softc *sc_vmbus;
132 struct vmbus_channel *sc_chan;
133 bus_dma_tag_t sc_dmat;
134
135 struct ethercom sc_ec;
136 struct ifmedia sc_media;
137 struct if_percpuq *sc_ipq;
138 int sc_link_state;
139 int sc_promisc;
140
141 uint32_t sc_flags;
142 #define HVN_SCF_ATTACHED __BIT(0)
143
144 /* NVS protocol */
145 int sc_proto;
146 uint32_t sc_nvstid;
147 uint8_t sc_nvsrsp[HVN_NVS_MSGSIZE];
148 uint8_t *sc_nvsbuf;
149 int sc_nvsdone;
150
151 /* RNDIS protocol */
152 int sc_ndisver;
153 uint32_t sc_rndisrid;
154 struct rndis_queue sc_cntl_sq; /* submission queue */
155 kmutex_t sc_cntl_sqlck;
156 struct rndis_queue sc_cntl_cq; /* completion queue */
157 kmutex_t sc_cntl_cqlck;
158 struct rndis_queue sc_cntl_fq; /* free queue */
159 kmutex_t sc_cntl_fqlck;
160 struct rndis_cmd sc_cntl_msgs[HVN_RNDIS_CTLREQS];
161 struct hvn_nvs_rndis sc_data_msg;
162
163 /* Rx ring */
164 uint8_t *sc_rx_ring;
165 int sc_rx_size;
166 uint32_t sc_rx_hndl;
167 struct hyperv_dma sc_rx_dma;
168
169 /* Tx ring */
170 uint32_t sc_tx_next;
171 uint32_t sc_tx_avail;
172 struct hvn_tx_desc sc_tx_desc[HVN_TX_DESC];
173 bus_dmamap_t sc_tx_rmap;
174 uint8_t *sc_tx_msgs;
175 bus_dma_segment_t sc_tx_mseg;
176 };
177
178 #define SC2IFP(_sc_) (&(_sc_)->sc_ec.ec_if)
179 #define IFP2SC(_ifp_) ((_ifp_)->if_softc)
180
181
182 static int hvn_match(device_t, cfdata_t, void *);
183 static void hvn_attach(device_t, device_t, void *);
184 static int hvn_detach(device_t, int);
185
186 CFATTACH_DECL_NEW(hvn, sizeof(struct hvn_softc),
187 hvn_match, hvn_attach, hvn_detach, NULL);
188
189 static int hvn_ioctl(struct ifnet *, u_long, void *);
190 static int hvn_media_change(struct ifnet *);
191 static void hvn_media_status(struct ifnet *, struct ifmediareq *);
192 static int hvn_iff(struct hvn_softc *);
193 static int hvn_init(struct ifnet *);
194 static void hvn_stop(struct ifnet *, int);
195 static void hvn_start(struct ifnet *);
196 static int hvn_encap(struct hvn_softc *, struct mbuf *,
197 struct hvn_tx_desc **);
198 static void hvn_decap(struct hvn_softc *, struct hvn_tx_desc *);
199 static void hvn_txeof(struct hvn_softc *, uint64_t);
200 static int hvn_rx_ring_create(struct hvn_softc *);
201 static int hvn_rx_ring_destroy(struct hvn_softc *);
202 static int hvn_tx_ring_create(struct hvn_softc *);
203 static void hvn_tx_ring_destroy(struct hvn_softc *);
204 static int hvn_set_capabilities(struct hvn_softc *);
205 static int hvn_get_lladdr(struct hvn_softc *, uint8_t *);
206 static void hvn_get_link_status(struct hvn_softc *);
207
208 /* NSVP */
209 static int hvn_nvs_attach(struct hvn_softc *);
210 static void hvn_nvs_intr(void *);
211 static int hvn_nvs_cmd(struct hvn_softc *, void *, size_t, uint64_t, int);
212 static int hvn_nvs_ack(struct hvn_softc *, uint64_t);
213 static void hvn_nvs_detach(struct hvn_softc *);
214
215 /* RNDIS */
216 static int hvn_rndis_attach(struct hvn_softc *);
217 static int hvn_rndis_cmd(struct hvn_softc *, struct rndis_cmd *, int);
218 static void hvn_rndis_input(struct hvn_softc *, uint64_t, void *);
219 static void hvn_rxeof(struct hvn_softc *, uint8_t *, uint32_t);
220 static void hvn_rndis_complete(struct hvn_softc *, uint8_t *, uint32_t);
221 static int hvn_rndis_output(struct hvn_softc *, struct hvn_tx_desc *);
222 static void hvn_rndis_status(struct hvn_softc *, uint8_t *, uint32_t);
223 static int hvn_rndis_query(struct hvn_softc *, uint32_t, void *, size_t *);
224 static int hvn_rndis_set(struct hvn_softc *, uint32_t, void *, size_t);
225 static int hvn_rndis_open(struct hvn_softc *);
226 static int hvn_rndis_close(struct hvn_softc *);
227 static void hvn_rndis_detach(struct hvn_softc *);
228
229 static int
230 hvn_match(device_t parent, cfdata_t match, void *aux)
231 {
232 struct vmbus_attach_args *aa = aux;
233
234 if (memcmp(aa->aa_type, &hyperv_guid_network, sizeof(*aa->aa_type)))
235 return 0;
236 return 1;
237 }
238
239 static void
240 hvn_attach(device_t parent, device_t self, void *aux)
241 {
242 struct hvn_softc *sc = device_private(self);
243 struct vmbus_attach_args *aa = aux;
244 struct ifnet *ifp = SC2IFP(sc);
245 uint8_t enaddr[ETHER_ADDR_LEN];
246 int error;
247
248 sc->sc_dev = self;
249 sc->sc_vmbus = (struct vmbus_softc *)device_private(parent);
250 sc->sc_chan = aa->aa_chan;
251 sc->sc_dmat = sc->sc_vmbus->sc_dmat;
252
253 aprint_naive("\n");
254 aprint_normal(": Hyper-V NetVSC\n");
255
256 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
257
258 if (hvn_nvs_attach(sc)) {
259 aprint_error_dev(self, "failed to init NVSP\n");
260 return;
261 }
262
263 if (hvn_rx_ring_create(sc)) {
264 aprint_error_dev(self, "failed to create Rx ring\n");
265 goto fail1;
266 }
267
268 if (hvn_tx_ring_create(sc)) {
269 aprint_error_dev(self, "failed to create Tx ring\n");
270 goto fail1;
271 }
272
273 ifp->if_softc = sc;
274 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
275 ifp->if_ioctl = hvn_ioctl;
276 ifp->if_start = hvn_start;
277 ifp->if_init = hvn_init;
278 ifp->if_stop = hvn_stop;
279 ifp->if_capabilities = IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx;
280 ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx;
281 ifp->if_capabilities |= IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_TCPv6_Rx;
282 if (sc->sc_ndisver > NDIS_VERSION_6_30) {
283 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Tx;
284 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Rx;
285 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Tx;
286 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Rx;
287 }
288 if (sc->sc_proto >= HVN_NVS_PROTO_VERSION_2) {
289 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
290 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
291 }
292
293 IFQ_SET_MAXLEN(&ifp->if_snd, HVN_TX_DESC - 1);
294 IFQ_SET_READY(&ifp->if_snd);
295
296 ifmedia_init(&sc->sc_media, IFM_IMASK, hvn_media_change,
297 hvn_media_status);
298 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
299 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_MANUAL);
300
301 error = if_initialize(ifp);
302 if (error) {
303 aprint_error_dev(self, "if_initialize failed(%d)\n", error);
304 goto fail2;
305 }
306 sc->sc_ipq = if_percpuq_create(ifp);
307 if_deferred_start_init(ifp, NULL);
308
309 if (hvn_rndis_attach(sc)) {
310 aprint_error_dev(self, "failed to init RNDIS\n");
311 goto fail1;
312 }
313
314 aprint_normal_dev(self, "NVS %d.%d NDIS %d.%d\n",
315 sc->sc_proto >> 16, sc->sc_proto & 0xffff,
316 sc->sc_ndisver >> 16 , sc->sc_ndisver & 0xffff);
317
318 if (hvn_set_capabilities(sc)) {
319 aprint_error_dev(self, "failed to setup offloading\n");
320 goto fail2;
321 }
322
323 if (hvn_get_lladdr(sc, enaddr)) {
324 aprint_error_dev(self,
325 "failed to obtain an ethernet address\n");
326 goto fail2;
327 }
328 aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(enaddr));
329
330 ether_ifattach(ifp, enaddr);
331 if_register(ifp);
332
333 if (pmf_device_register(self, NULL, NULL))
334 pmf_class_network_register(self, ifp);
335 else
336 aprint_error_dev(self, "couldn't establish power handler\n");
337
338 SET(sc->sc_flags, HVN_SCF_ATTACHED);
339 return;
340
341 fail2: hvn_rndis_detach(sc);
342 fail1: hvn_rx_ring_destroy(sc);
343 hvn_tx_ring_destroy(sc);
344 hvn_nvs_detach(sc);
345 }
346
347 static int
348 hvn_detach(device_t self, int flags)
349 {
350 struct hvn_softc *sc = device_private(self);
351 struct ifnet *ifp = SC2IFP(sc);
352
353 if (!ISSET(sc->sc_flags, HVN_SCF_ATTACHED))
354 return 0;
355
356 hvn_stop(ifp, 1);
357
358 pmf_device_deregister(self);
359
360 ether_ifdetach(ifp);
361 if_detach(ifp);
362 if_percpuq_destroy(sc->sc_ipq);
363
364 hvn_rndis_detach(sc);
365 hvn_rx_ring_destroy(sc);
366 hvn_tx_ring_destroy(sc);
367 hvn_nvs_detach(sc);
368
369 return 0;
370 }
371
372 static int
373 hvn_ioctl(struct ifnet *ifp, u_long command, void * data)
374 {
375 struct hvn_softc *sc = IFP2SC(ifp);
376 struct ifreq *ifr = (struct ifreq *)data;
377 int s, error = 0;
378
379 s = splnet();
380
381 switch (command) {
382 case SIOCGIFMEDIA:
383 case SIOCSIFMEDIA:
384 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command);
385 break;
386 default:
387 error = ether_ioctl(ifp, command, data);
388 break;
389 }
390
391 if (error == ENETRESET) {
392 if (ifp->if_flags & IFF_RUNNING)
393 hvn_iff(sc);
394 error = 0;
395 }
396
397 splx(s);
398
399 return error;
400 }
401
402 static int
403 hvn_media_change(struct ifnet *ifp)
404 {
405
406 return 0;
407 }
408
409 static void
410 hvn_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
411 {
412 struct hvn_softc *sc = IFP2SC(ifp);
413 int link_state;
414
415 link_state = sc->sc_link_state;
416 hvn_get_link_status(sc);
417 if (link_state != sc->sc_link_state)
418 if_link_state_change(ifp, sc->sc_link_state);
419
420 ifmr->ifm_status = IFM_AVALID;
421 ifmr->ifm_active = IFM_ETHER | IFM_MANUAL;
422 if (sc->sc_link_state == LINK_STATE_UP)
423 ifmr->ifm_status |= IFM_ACTIVE;
424 }
425
426 static int
427 hvn_iff(struct hvn_softc *sc)
428 {
429
430 /* XXX */
431 sc->sc_promisc = 0;
432
433 return 0;
434 }
435
436 static int
437 hvn_init(struct ifnet *ifp)
438 {
439 struct hvn_softc *sc = IFP2SC(ifp);
440 int error;
441
442 hvn_stop(ifp, 0);
443
444 error = hvn_iff(sc);
445 if (error)
446 return error;
447
448 error = hvn_rndis_open(sc);
449 if (error == 0) {
450 ifp->if_flags |= IFF_RUNNING;
451 ifp->if_flags &= ~IFF_OACTIVE;
452 }
453 return error;
454 }
455
456 static void
457 hvn_stop(struct ifnet *ifp, int disable)
458 {
459 struct hvn_softc *sc = IFP2SC(ifp);
460
461 hvn_rndis_close(sc);
462
463 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
464 }
465
466 static void
467 hvn_start(struct ifnet *ifp)
468 {
469 struct hvn_softc *sc = IFP2SC(ifp);
470 struct hvn_tx_desc *txd;
471 struct mbuf *m;
472
473 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
474 return;
475
476 for (;;) {
477 if (!sc->sc_tx_avail) {
478 /* transient */
479 ifp->if_flags |= IFF_OACTIVE;
480 break;
481 }
482
483 IFQ_DEQUEUE(&ifp->if_snd, m);
484 if (m == NULL)
485 break;
486
487 if (hvn_encap(sc, m, &txd)) {
488 /* the chain is too large */
489 ifp->if_oerrors++;
490 m_freem(m);
491 continue;
492 }
493
494 bpf_mtap(ifp, m);
495
496 if (hvn_rndis_output(sc, txd)) {
497 hvn_decap(sc, txd);
498 ifp->if_oerrors++;
499 m_freem(m);
500 continue;
501 }
502
503 sc->sc_tx_next++;
504 }
505 }
506
507 static inline char *
508 hvn_rndis_pktinfo_append(struct rndis_packet_msg *pkt, size_t pktsize,
509 size_t datalen, uint32_t type)
510 {
511 struct rndis_pktinfo *pi;
512 size_t pi_size = sizeof(*pi) + datalen;
513 char *cp;
514
515 KASSERT(pkt->rm_pktinfooffset + pkt->rm_pktinfolen + pi_size <=
516 pktsize);
517
518 cp = (char *)pkt + pkt->rm_pktinfooffset + pkt->rm_pktinfolen;
519 pi = (struct rndis_pktinfo *)cp;
520 pi->rm_size = pi_size;
521 pi->rm_type = type;
522 pi->rm_pktinfooffset = sizeof(*pi);
523 pkt->rm_pktinfolen += pi_size;
524 pkt->rm_dataoffset += pi_size;
525 pkt->rm_len += pi_size;
526
527 return (char *)pi->rm_data;
528 }
529
530 static int
531 hvn_encap(struct hvn_softc *sc, struct mbuf *m, struct hvn_tx_desc **txd0)
532 {
533 struct hvn_tx_desc *txd;
534 struct rndis_packet_msg *pkt;
535 bus_dma_segment_t *seg;
536 size_t pktlen;
537 int i, rv;
538
539 do {
540 txd = &sc->sc_tx_desc[sc->sc_tx_next % HVN_TX_DESC];
541 sc->sc_tx_next++;
542 } while (!txd->txd_ready);
543 txd->txd_ready = 0;
544
545 pkt = txd->txd_req;
546 memset(pkt, 0, HVN_RNDIS_PKT_LEN);
547 pkt->rm_type = REMOTE_NDIS_PACKET_MSG;
548 pkt->rm_len = sizeof(*pkt) + m->m_pkthdr.len;
549 pkt->rm_dataoffset = RNDIS_DATA_OFFSET;
550 pkt->rm_datalen = m->m_pkthdr.len;
551 pkt->rm_pktinfooffset = sizeof(*pkt); /* adjusted below */
552 pkt->rm_pktinfolen = 0;
553
554 rv = bus_dmamap_load_mbuf(sc->sc_dmat, txd->txd_dmap, m, BUS_DMA_READ |
555 BUS_DMA_NOWAIT);
556 switch (rv) {
557 case 0:
558 break;
559 case EFBIG:
560 if (m_defrag(m, M_NOWAIT) != NULL &&
561 bus_dmamap_load_mbuf(sc->sc_dmat, txd->txd_dmap, m,
562 BUS_DMA_READ | BUS_DMA_NOWAIT) == 0)
563 break;
564 /* FALLTHROUGH */
565 default:
566 DPRINTF("%s: failed to load mbuf\n", device_xname(sc->sc_dev));
567 return -1;
568 }
569 txd->txd_buf = m;
570
571 if (m->m_flags & M_VLANTAG) {
572 uint32_t vlan;
573 char *cp;
574
575 vlan = NDIS_VLAN_INFO_MAKE(
576 EVL_VLANOFTAG(m->m_pkthdr.ether_vtag),
577 EVL_PRIOFTAG(m->m_pkthdr.ether_vtag), 0);
578 cp = hvn_rndis_pktinfo_append(pkt, HVN_RNDIS_PKT_LEN,
579 NDIS_VLAN_INFO_SIZE, NDIS_PKTINFO_TYPE_VLAN);
580 memcpy(cp, &vlan, NDIS_VLAN_INFO_SIZE);
581 }
582
583 if (m->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_UDPv4 |
584 M_CSUM_TCPv4)) {
585 uint32_t csum = NDIS_TXCSUM_INFO_IPV4;
586 char *cp;
587
588 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4)
589 csum |= NDIS_TXCSUM_INFO_IPCS;
590 if (m->m_pkthdr.csum_flags & M_CSUM_TCPv4)
591 csum |= NDIS_TXCSUM_INFO_TCPCS;
592 if (m->m_pkthdr.csum_flags & M_CSUM_UDPv4)
593 csum |= NDIS_TXCSUM_INFO_UDPCS;
594 cp = hvn_rndis_pktinfo_append(pkt, HVN_RNDIS_PKT_LEN,
595 NDIS_TXCSUM_INFO_SIZE, NDIS_PKTINFO_TYPE_CSUM);
596 memcpy(cp, &csum, NDIS_TXCSUM_INFO_SIZE);
597 }
598
599 pktlen = pkt->rm_pktinfooffset + pkt->rm_pktinfolen;
600 pkt->rm_pktinfooffset -= RNDIS_HEADER_OFFSET;
601
602 /* Attach an RNDIS message to the first slot */
603 txd->txd_sgl[0].gpa_page = txd->txd_gpa.gpa_page;
604 txd->txd_sgl[0].gpa_ofs = txd->txd_gpa.gpa_ofs;
605 txd->txd_sgl[0].gpa_len = pktlen;
606 txd->txd_nsge = txd->txd_dmap->dm_nsegs + 1;
607
608 for (i = 0; i < txd->txd_dmap->dm_nsegs; i++) {
609 seg = &txd->txd_dmap->dm_segs[i];
610 txd->txd_sgl[1 + i].gpa_page = atop(seg->ds_addr);
611 txd->txd_sgl[1 + i].gpa_ofs = seg->ds_addr & PAGE_MASK;
612 txd->txd_sgl[1 + i].gpa_len = seg->ds_len;
613 }
614
615 *txd0 = txd;
616
617 atomic_dec_uint(&sc->sc_tx_avail);
618
619 return 0;
620 }
621
622 static void
623 hvn_decap(struct hvn_softc *sc, struct hvn_tx_desc *txd)
624 {
625 struct ifnet *ifp = SC2IFP(sc);
626
627 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmap,
628 0, txd->txd_dmap->dm_mapsize,
629 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
630 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmap);
631 txd->txd_buf = NULL;
632 txd->txd_nsge = 0;
633 txd->txd_ready = 1;
634 atomic_inc_uint(&sc->sc_tx_avail);
635 ifp->if_flags &= ~IFF_OACTIVE;
636 }
637
638 static void
639 hvn_txeof(struct hvn_softc *sc, uint64_t tid)
640 {
641 struct ifnet *ifp = SC2IFP(sc);
642 struct hvn_tx_desc *txd;
643 struct mbuf *m;
644 uint32_t id = tid >> 32;
645
646 if ((tid & 0xffffffffU) != 0)
647 return;
648
649 id -= HVN_NVS_CHIM_SIG;
650 if (id >= HVN_TX_DESC) {
651 device_printf(sc->sc_dev, "tx packet index too large: %u", id);
652 return;
653 }
654
655 txd = &sc->sc_tx_desc[id];
656
657 if ((m = txd->txd_buf) == NULL) {
658 device_printf(sc->sc_dev, "no mbuf @%u\n", id);
659 return;
660 }
661 txd->txd_buf = NULL;
662
663 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmap,
664 0, txd->txd_dmap->dm_mapsize,
665 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
666 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmap);
667 m_freem(m);
668 ifp->if_opackets++;
669
670 txd->txd_ready = 1;
671
672 atomic_inc_uint(&sc->sc_tx_avail);
673 ifp->if_flags &= ~IFF_OACTIVE;
674 }
675
676 static int
677 hvn_rx_ring_create(struct hvn_softc *sc)
678 {
679 struct hvn_nvs_rxbuf_conn cmd;
680 struct hvn_nvs_rxbuf_conn_resp *rsp;
681 uint64_t tid;
682
683 if (sc->sc_proto <= HVN_NVS_PROTO_VERSION_2)
684 sc->sc_rx_size = 15 * 1024 * 1024; /* 15MB */
685 else
686 sc->sc_rx_size = 16 * 1024 * 1024; /* 16MB */
687 sc->sc_rx_ring = hyperv_dma_alloc(sc->sc_dmat, &sc->sc_rx_dma,
688 sc->sc_rx_size, PAGE_SIZE, PAGE_SIZE, sc->sc_rx_size / PAGE_SIZE);
689 if (sc->sc_rx_ring == NULL) {
690 DPRINTF("%s: failed to allocate Rx ring buffer\n",
691 device_xname(sc->sc_dev));
692 return -1;
693 }
694 if (vmbus_handle_alloc(sc->sc_chan, &sc->sc_rx_dma, sc->sc_rx_size,
695 &sc->sc_rx_hndl)) {
696 DPRINTF("%s: failed to obtain a PA handle\n",
697 device_xname(sc->sc_dev));
698 goto errout;
699 }
700
701 memset(&cmd, 0, sizeof(cmd));
702 cmd.nvs_type = HVN_NVS_TYPE_RXBUF_CONN;
703 cmd.nvs_gpadl = sc->sc_rx_hndl;
704 cmd.nvs_sig = HVN_NVS_RXBUF_SIG;
705
706 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
707 if (hvn_nvs_cmd(sc, &cmd, sizeof(cmd), tid, 100))
708 goto errout;
709
710 rsp = (struct hvn_nvs_rxbuf_conn_resp *)&sc->sc_nvsrsp;
711 if (rsp->nvs_status != HVN_NVS_STATUS_OK) {
712 DPRINTF("%s: failed to set up the Rx ring\n",
713 device_xname(sc->sc_dev));
714 goto errout;
715 }
716 if (rsp->nvs_nsect > 1) {
717 DPRINTF("%s: invalid number of Rx ring sections: %u\n",
718 device_xname(sc->sc_dev), rsp->nvs_nsect);
719 hvn_rx_ring_destroy(sc);
720 return -1;
721 }
722 return 0;
723
724 errout:
725 if (sc->sc_rx_hndl) {
726 vmbus_handle_free(sc->sc_chan, sc->sc_rx_hndl);
727 sc->sc_rx_hndl = 0;
728 }
729 if (sc->sc_rx_ring) {
730 kmem_free(sc->sc_rx_ring, sc->sc_rx_size);
731 sc->sc_rx_ring = NULL;
732 }
733 return -1;
734 }
735
736 static int
737 hvn_rx_ring_destroy(struct hvn_softc *sc)
738 {
739 struct hvn_nvs_rxbuf_disconn cmd;
740 uint64_t tid;
741
742 if (sc->sc_rx_ring == NULL)
743 return 0;
744
745 memset(&cmd, 0, sizeof(cmd));
746 cmd.nvs_type = HVN_NVS_TYPE_RXBUF_DISCONN;
747 cmd.nvs_sig = HVN_NVS_RXBUF_SIG;
748
749 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
750 if (hvn_nvs_cmd(sc, &cmd, sizeof(cmd), tid, 0))
751 return -1;
752
753 delay(100);
754
755 vmbus_handle_free(sc->sc_chan, sc->sc_rx_hndl);
756
757 sc->sc_rx_hndl = 0;
758
759 kmem_free(sc->sc_rx_ring, sc->sc_rx_size);
760 sc->sc_rx_ring = NULL;
761
762 return 0;
763 }
764
765 static int
766 hvn_tx_ring_create(struct hvn_softc *sc)
767 {
768 const int dmaflags = cold ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK;
769 struct hvn_tx_desc *txd;
770 bus_dma_segment_t *seg;
771 size_t msgsize;
772 int i, rsegs;
773 paddr_t pa;
774
775 msgsize = roundup(HVN_RNDIS_PKT_LEN, 128);
776
777 /* Allocate memory to store RNDIS messages */
778 if (bus_dmamem_alloc(sc->sc_dmat, msgsize * HVN_TX_DESC, PAGE_SIZE, 0,
779 &sc->sc_tx_mseg, 1, &rsegs, dmaflags)) {
780 DPRINTF("%s: failed to allocate memory for RDNIS messages\n",
781 device_xname(sc->sc_dev));
782 goto errout;
783 }
784 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_tx_mseg, 1, msgsize *
785 HVN_TX_DESC, (void **)&sc->sc_tx_msgs, dmaflags)) {
786 DPRINTF("%s: failed to establish mapping for RDNIS messages\n",
787 device_xname(sc->sc_dev));
788 goto errout;
789 }
790 memset(sc->sc_tx_msgs, 0, msgsize * HVN_TX_DESC);
791 if (bus_dmamap_create(sc->sc_dmat, msgsize * HVN_TX_DESC, 1,
792 msgsize * HVN_TX_DESC, 0, dmaflags, &sc->sc_tx_rmap)) {
793 DPRINTF("%s: failed to create map for RDNIS messages\n",
794 device_xname(sc->sc_dev));
795 goto errout;
796 }
797 if (bus_dmamap_load(sc->sc_dmat, sc->sc_tx_rmap, sc->sc_tx_msgs,
798 msgsize * HVN_TX_DESC, NULL, dmaflags)) {
799 DPRINTF("%s: failed to create map for RDNIS messages\n",
800 device_xname(sc->sc_dev));
801 goto errout;
802 }
803
804 for (i = 0; i < HVN_TX_DESC; i++) {
805 txd = &sc->sc_tx_desc[i];
806 if (bus_dmamap_create(sc->sc_dmat, HVN_TX_PKT_SIZE,
807 HVN_TX_FRAGS, HVN_TX_FRAG_SIZE, PAGE_SIZE, dmaflags,
808 &txd->txd_dmap)) {
809 DPRINTF("%s: failed to create map for TX descriptors\n",
810 device_xname(sc->sc_dev));
811 goto errout;
812 }
813 seg = &sc->sc_tx_rmap->dm_segs[0];
814 pa = seg->ds_addr + (msgsize * i);
815 txd->txd_gpa.gpa_page = atop(pa);
816 txd->txd_gpa.gpa_ofs = pa & PAGE_MASK;
817 txd->txd_gpa.gpa_len = msgsize;
818 txd->txd_req = (void *)(sc->sc_tx_msgs + (msgsize * i));
819 txd->txd_id = i + HVN_NVS_CHIM_SIG;
820 txd->txd_ready = 1;
821 }
822 sc->sc_tx_avail = HVN_TX_DESC;
823
824 return 0;
825
826 errout:
827 hvn_tx_ring_destroy(sc);
828 return -1;
829 }
830
831 static void
832 hvn_tx_ring_destroy(struct hvn_softc *sc)
833 {
834 struct hvn_tx_desc *txd;
835 int i;
836
837 for (i = 0; i < HVN_TX_DESC; i++) {
838 txd = &sc->sc_tx_desc[i];
839 if (txd->txd_dmap == NULL)
840 continue;
841 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmap,
842 0, txd->txd_dmap->dm_mapsize,
843 BUS_DMASYNC_POSTWRITE);
844 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmap);
845 bus_dmamap_destroy(sc->sc_dmat, txd->txd_dmap);
846 txd->txd_dmap = NULL;
847 if (txd->txd_buf == NULL)
848 continue;
849 m_free(txd->txd_buf);
850 txd->txd_buf = NULL;
851 }
852 if (sc->sc_tx_rmap) {
853 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap,
854 0, txd->txd_dmap->dm_mapsize,
855 BUS_DMASYNC_POSTWRITE);
856 bus_dmamap_unload(sc->sc_dmat, sc->sc_tx_rmap);
857 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_rmap);
858 }
859 if (sc->sc_tx_msgs) {
860 size_t msgsize = roundup(HVN_RNDIS_PKT_LEN, 128);
861
862 bus_dmamem_unmap(sc->sc_dmat, sc->sc_tx_msgs,
863 msgsize * HVN_TX_DESC);
864 bus_dmamem_free(sc->sc_dmat, &sc->sc_tx_mseg, 1);
865 }
866 sc->sc_tx_rmap = NULL;
867 sc->sc_tx_msgs = NULL;
868 }
869
870 static int
871 hvn_get_lladdr(struct hvn_softc *sc, uint8_t *enaddr)
872 {
873 size_t addrlen = ETHER_ADDR_LEN;
874 int rv;
875
876 rv = hvn_rndis_query(sc, OID_802_3_PERMANENT_ADDRESS, enaddr, &addrlen);
877 if (rv == 0 && addrlen != ETHER_ADDR_LEN)
878 rv = -1;
879 return rv;
880 }
881
882 static void
883 hvn_get_link_status(struct hvn_softc *sc)
884 {
885 uint32_t state;
886 size_t len = sizeof(state);
887
888 if (hvn_rndis_query(sc, OID_GEN_MEDIA_CONNECT_STATUS,
889 &state, &len) == 0)
890 sc->sc_link_state = (state == NDIS_MEDIA_STATE_CONNECTED) ?
891 LINK_STATE_UP : LINK_STATE_DOWN;
892 }
893
894 static int
895 hvn_nvs_attach(struct hvn_softc *sc)
896 {
897 static const uint32_t protos[] = {
898 HVN_NVS_PROTO_VERSION_5,
899 HVN_NVS_PROTO_VERSION_4,
900 HVN_NVS_PROTO_VERSION_2,
901 HVN_NVS_PROTO_VERSION_1
902 };
903 const int kmemflags = cold ? KM_NOSLEEP : KM_SLEEP;
904 struct hvn_nvs_init cmd;
905 struct hvn_nvs_init_resp *rsp;
906 struct hvn_nvs_ndis_init ncmd;
907 struct hvn_nvs_ndis_conf ccmd;
908 uint32_t ndisver, ringsize;
909 uint64_t tid;
910 int i;
911
912 sc->sc_nvsbuf = kmem_zalloc(HVN_NVS_BUFSIZE, kmemflags);
913 if (sc->sc_nvsbuf == NULL) {
914 DPRINTF("%s: failed to allocate channel data buffer\n",
915 device_xname(sc->sc_dev));
916 return -1;
917 }
918
919 /* We need to be able to fit all RNDIS control and data messages */
920 ringsize = HVN_RNDIS_CTLREQS *
921 (sizeof(struct hvn_nvs_rndis) + sizeof(struct vmbus_gpa)) +
922 HVN_TX_DESC * (sizeof(struct hvn_nvs_rndis) +
923 (HVN_TX_FRAGS + 1) * sizeof(struct vmbus_gpa));
924
925 sc->sc_chan->ch_flags &= ~CHF_BATCHED;
926
927 /* Associate our interrupt handler with the channel */
928 if (vmbus_channel_open(sc->sc_chan, ringsize, NULL, 0,
929 hvn_nvs_intr, sc)) {
930 DPRINTF("%s: failed to open channel\n",
931 device_xname(sc->sc_dev));
932 kmem_free(sc->sc_nvsbuf, HVN_NVS_BUFSIZE);
933 return -1;
934 }
935
936 memset(&cmd, 0, sizeof(cmd));
937 cmd.nvs_type = HVN_NVS_TYPE_INIT;
938 for (i = 0; i < __arraycount(protos); i++) {
939 cmd.nvs_ver_min = cmd.nvs_ver_max = protos[i];
940 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
941 if (hvn_nvs_cmd(sc, &cmd, sizeof(cmd), tid, 100))
942 return -1;
943
944 rsp = (struct hvn_nvs_init_resp *)&sc->sc_nvsrsp;
945 if (rsp->nvs_status == HVN_NVS_STATUS_OK) {
946 sc->sc_proto = protos[i];
947 break;
948 }
949 }
950 if (i == __arraycount(protos)) {
951 DPRINTF("%s: failed to negotiate NVSP version\n",
952 device_xname(sc->sc_dev));
953 return -1;
954 }
955
956 if (sc->sc_proto >= HVN_NVS_PROTO_VERSION_2) {
957 memset(&ccmd, 0, sizeof(ccmd));
958 ccmd.nvs_type = HVN_NVS_TYPE_NDIS_CONF;
959 ccmd.nvs_mtu = HVN_MAXMTU;
960 ccmd.nvs_caps = HVN_NVS_NDIS_CONF_VLAN;
961
962 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
963 if (hvn_nvs_cmd(sc, &ccmd, sizeof(ccmd), tid, 100))
964 return -1;
965 }
966
967 memset(&ncmd, 0, sizeof(ncmd));
968 ncmd.nvs_type = HVN_NVS_TYPE_NDIS_INIT;
969 if (sc->sc_proto <= HVN_NVS_PROTO_VERSION_4)
970 ndisver = NDIS_VERSION_6_1;
971 else
972 ndisver = NDIS_VERSION_6_30;
973 ncmd.nvs_ndis_major = (ndisver & 0xffff0000) >> 16;
974 ncmd.nvs_ndis_minor = ndisver & 0x0000ffff;
975
976 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
977 if (hvn_nvs_cmd(sc, &ncmd, sizeof(ncmd), tid, 100))
978 return -1;
979
980 sc->sc_ndisver = ndisver;
981
982 return 0;
983 }
984
985 static void
986 hvn_nvs_intr(void *arg)
987 {
988 struct hvn_softc *sc = arg;
989 struct ifnet *ifp = SC2IFP(sc);
990 struct vmbus_chanpkt_hdr *cph;
991 const struct hvn_nvs_hdr *nvs;
992 uint64_t rid;
993 uint32_t rlen;
994 int rv;
995 bool dotx = false;
996
997 for (;;) {
998 rv = vmbus_channel_recv(sc->sc_chan, sc->sc_nvsbuf,
999 HVN_NVS_BUFSIZE, &rlen, &rid, 1);
1000 if (rv != 0 || rlen == 0) {
1001 if (rv != EAGAIN)
1002 device_printf(sc->sc_dev,
1003 "failed to receive an NVSP packet\n");
1004 break;
1005 }
1006 cph = (struct vmbus_chanpkt_hdr *)sc->sc_nvsbuf;
1007 nvs = (const struct hvn_nvs_hdr *)VMBUS_CHANPKT_CONST_DATA(cph);
1008
1009 if (cph->cph_type == VMBUS_CHANPKT_TYPE_COMP) {
1010 switch (nvs->nvs_type) {
1011 case HVN_NVS_TYPE_INIT_RESP:
1012 case HVN_NVS_TYPE_RXBUF_CONNRESP:
1013 case HVN_NVS_TYPE_CHIM_CONNRESP:
1014 case HVN_NVS_TYPE_SUBCH_RESP:
1015 /* copy the response back */
1016 memcpy(&sc->sc_nvsrsp, nvs, HVN_NVS_MSGSIZE);
1017 sc->sc_nvsdone = 1;
1018 wakeup(&sc->sc_nvsrsp);
1019 break;
1020 case HVN_NVS_TYPE_RNDIS_ACK:
1021 dotx = true;
1022 hvn_txeof(sc, cph->cph_tid);
1023 break;
1024 default:
1025 device_printf(sc->sc_dev,
1026 "unhandled NVSP packet type %u "
1027 "on completion\n", nvs->nvs_type);
1028 break;
1029 }
1030 } else if (cph->cph_type == VMBUS_CHANPKT_TYPE_RXBUF) {
1031 switch (nvs->nvs_type) {
1032 case HVN_NVS_TYPE_RNDIS:
1033 hvn_rndis_input(sc, cph->cph_tid, cph);
1034 break;
1035 default:
1036 device_printf(sc->sc_dev,
1037 "unhandled NVSP packet type %u "
1038 "on receive\n", nvs->nvs_type);
1039 break;
1040 }
1041 } else
1042 device_printf(sc->sc_dev,
1043 "unknown NVSP packet type %u\n", cph->cph_type);
1044 }
1045
1046 if (dotx)
1047 if_schedule_deferred_start(ifp);
1048 }
1049
1050 static int
1051 hvn_nvs_cmd(struct hvn_softc *sc, void *cmd, size_t cmdsize, uint64_t tid,
1052 int timo)
1053 {
1054 struct hvn_nvs_hdr *hdr = cmd;
1055 int tries = 10;
1056 int rv, s;
1057
1058 sc->sc_nvsdone = 0;
1059
1060 do {
1061 rv = vmbus_channel_send(sc->sc_chan, cmd, cmdsize,
1062 tid, VMBUS_CHANPKT_TYPE_INBAND,
1063 timo ? VMBUS_CHANPKT_FLAG_RC : 0);
1064 if (rv == EAGAIN) {
1065 if (cold)
1066 delay(1000);
1067 else
1068 tsleep(cmd, PRIBIO, "nvsout", mstohz(1));
1069 } else if (rv) {
1070 DPRINTF("%s: NVSP operation %u send error %d\n",
1071 device_xname(sc->sc_dev), hdr->nvs_type, rv);
1072 return rv;
1073 }
1074 } while (rv != 0 && --tries > 0);
1075
1076 if (tries == 0 && rv != 0) {
1077 device_printf(sc->sc_dev,
1078 "NVSP operation %u send error %d\n", hdr->nvs_type, rv);
1079 return rv;
1080 }
1081
1082 if (timo == 0)
1083 return 0;
1084
1085 do {
1086 if (cold) {
1087 delay(1000);
1088 s = splnet();
1089 hvn_nvs_intr(sc);
1090 splx(s);
1091 } else
1092 tsleep(sc->sc_nvsrsp, PRIBIO | PCATCH, "nvscmd",
1093 mstohz(1));
1094 } while (--timo > 0 && sc->sc_nvsdone != 1);
1095
1096 if (timo == 0 && sc->sc_nvsdone != 1) {
1097 device_printf(sc->sc_dev, "NVSP operation %u timed out\n",
1098 hdr->nvs_type);
1099 return ETIMEDOUT;
1100 }
1101 return 0;
1102 }
1103
1104 static int
1105 hvn_nvs_ack(struct hvn_softc *sc, uint64_t tid)
1106 {
1107 struct hvn_nvs_rndis_ack cmd;
1108 int tries = 5;
1109 int rv;
1110
1111 cmd.nvs_type = HVN_NVS_TYPE_RNDIS_ACK;
1112 cmd.nvs_status = HVN_NVS_STATUS_OK;
1113 do {
1114 rv = vmbus_channel_send(sc->sc_chan, &cmd, sizeof(cmd),
1115 tid, VMBUS_CHANPKT_TYPE_COMP, 0);
1116 if (rv == EAGAIN)
1117 delay(10);
1118 else if (rv) {
1119 DPRINTF("%s: NVSP acknowledgement error %d\n",
1120 device_xname(sc->sc_dev), rv);
1121 return rv;
1122 }
1123 } while (rv != 0 && --tries > 0);
1124 return rv;
1125 }
1126
1127 static void
1128 hvn_nvs_detach(struct hvn_softc *sc)
1129 {
1130
1131 if (vmbus_channel_close(sc->sc_chan) == 0) {
1132 kmem_free(sc->sc_nvsbuf, HVN_NVS_BUFSIZE);
1133 sc->sc_nvsbuf = NULL;
1134 }
1135 }
1136
1137 static inline struct rndis_cmd *
1138 hvn_alloc_cmd(struct hvn_softc *sc)
1139 {
1140 struct rndis_cmd *rc;
1141
1142 mutex_enter(&sc->sc_cntl_fqlck);
1143 while ((rc = TAILQ_FIRST(&sc->sc_cntl_fq)) == NULL)
1144 /* XXX use condvar(9) instead of mtsleep */
1145 mtsleep(&sc->sc_cntl_fq, PRIBIO, "nvsalloc", 1,
1146 &sc->sc_cntl_fqlck);
1147 TAILQ_REMOVE(&sc->sc_cntl_fq, rc, rc_entry);
1148 mutex_exit(&sc->sc_cntl_fqlck);
1149 return rc;
1150 }
1151
1152 static inline void
1153 hvn_submit_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1154 {
1155
1156 mutex_enter(&sc->sc_cntl_sqlck);
1157 TAILQ_INSERT_TAIL(&sc->sc_cntl_sq, rc, rc_entry);
1158 mutex_exit(&sc->sc_cntl_sqlck);
1159 }
1160
1161 static inline struct rndis_cmd *
1162 hvn_complete_cmd(struct hvn_softc *sc, uint32_t id)
1163 {
1164 struct rndis_cmd *rc;
1165
1166 mutex_enter(&sc->sc_cntl_sqlck);
1167 TAILQ_FOREACH(rc, &sc->sc_cntl_sq, rc_entry) {
1168 if (rc->rc_id == id) {
1169 TAILQ_REMOVE(&sc->sc_cntl_sq, rc, rc_entry);
1170 break;
1171 }
1172 }
1173 mutex_exit(&sc->sc_cntl_sqlck);
1174 if (rc != NULL) {
1175 mutex_enter(&sc->sc_cntl_cqlck);
1176 TAILQ_INSERT_TAIL(&sc->sc_cntl_cq, rc, rc_entry);
1177 mutex_exit(&sc->sc_cntl_cqlck);
1178 }
1179 return rc;
1180 }
1181
1182 static inline void
1183 hvn_release_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1184 {
1185
1186 mutex_enter(&sc->sc_cntl_cqlck);
1187 TAILQ_REMOVE(&sc->sc_cntl_cq, rc, rc_entry);
1188 mutex_exit(&sc->sc_cntl_cqlck);
1189 }
1190
1191 static inline int
1192 hvn_rollback_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1193 {
1194 struct rndis_cmd *rn;
1195
1196 mutex_enter(&sc->sc_cntl_sqlck);
1197 TAILQ_FOREACH(rn, &sc->sc_cntl_sq, rc_entry) {
1198 if (rn == rc) {
1199 TAILQ_REMOVE(&sc->sc_cntl_sq, rc, rc_entry);
1200 mutex_exit(&sc->sc_cntl_sqlck);
1201 return 0;
1202 }
1203 }
1204 mutex_exit(&sc->sc_cntl_sqlck);
1205 return -1;
1206 }
1207
1208 static inline void
1209 hvn_free_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1210 {
1211
1212 memset(rc->rc_req, 0, sizeof(struct rndis_packet_msg));
1213 memset(&rc->rc_cmp, 0, sizeof(rc->rc_cmp));
1214 memset(&rc->rc_msg, 0, sizeof(rc->rc_msg));
1215 mutex_enter(&sc->sc_cntl_fqlck);
1216 TAILQ_INSERT_TAIL(&sc->sc_cntl_fq, rc, rc_entry);
1217 mutex_exit(&sc->sc_cntl_fqlck);
1218 wakeup(&sc->sc_cntl_fq);
1219 }
1220
1221 static int
1222 hvn_rndis_attach(struct hvn_softc *sc)
1223 {
1224 const int dmaflags = cold ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK;
1225 struct rndis_init_req *req;
1226 struct rndis_init_comp *cmp;
1227 struct rndis_cmd *rc;
1228 int i, rv;
1229
1230 /* RNDIS control message queues */
1231 TAILQ_INIT(&sc->sc_cntl_sq);
1232 TAILQ_INIT(&sc->sc_cntl_cq);
1233 TAILQ_INIT(&sc->sc_cntl_fq);
1234 mutex_init(&sc->sc_cntl_sqlck, MUTEX_DEFAULT, IPL_NET);
1235 mutex_init(&sc->sc_cntl_cqlck, MUTEX_DEFAULT, IPL_NET);
1236 mutex_init(&sc->sc_cntl_fqlck, MUTEX_DEFAULT, IPL_NET);
1237
1238 for (i = 0; i < HVN_RNDIS_CTLREQS; i++) {
1239 rc = &sc->sc_cntl_msgs[i];
1240 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1241 dmaflags, &rc->rc_dmap)) {
1242 DPRINTF("%s: failed to create RNDIS command map\n",
1243 device_xname(sc->sc_dev));
1244 goto errout;
1245 }
1246 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE,
1247 0, &rc->rc_segs, 1, &rc->rc_nsegs, dmaflags)) {
1248 DPRINTF("%s: failed to allocate RNDIS command\n",
1249 device_xname(sc->sc_dev));
1250 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1251 goto errout;
1252 }
1253 if (bus_dmamem_map(sc->sc_dmat, &rc->rc_segs, rc->rc_nsegs,
1254 PAGE_SIZE, (void **)&rc->rc_req, dmaflags)) {
1255 DPRINTF("%s: failed to allocate RNDIS command\n",
1256 device_xname(sc->sc_dev));
1257 bus_dmamem_free(sc->sc_dmat, &rc->rc_segs,
1258 rc->rc_nsegs);
1259 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1260 goto errout;
1261 }
1262 memset(rc->rc_req, 0, PAGE_SIZE);
1263 if (bus_dmamap_load(sc->sc_dmat, rc->rc_dmap, rc->rc_req,
1264 PAGE_SIZE, NULL, dmaflags)) {
1265 DPRINTF("%s: failed to load RNDIS command map\n",
1266 device_xname(sc->sc_dev));
1267 bus_dmamem_free(sc->sc_dmat, &rc->rc_segs,
1268 rc->rc_nsegs);
1269 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1270 goto errout;
1271 }
1272 rc->rc_gpa = atop(rc->rc_dmap->dm_segs[0].ds_addr);
1273 TAILQ_INSERT_TAIL(&sc->sc_cntl_fq, rc, rc_entry);
1274 }
1275
1276 rc = hvn_alloc_cmd(sc);
1277
1278 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1279 BUS_DMASYNC_PREREAD);
1280
1281 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1282
1283 req = rc->rc_req;
1284 req->rm_type = REMOTE_NDIS_INITIALIZE_MSG;
1285 req->rm_len = sizeof(*req);
1286 req->rm_rid = rc->rc_id;
1287 req->rm_ver_major = RNDIS_VERSION_MAJOR;
1288 req->rm_ver_minor = RNDIS_VERSION_MINOR;
1289 req->rm_max_xfersz = HVN_RNDIS_XFER_SIZE;
1290
1291 rc->rc_cmplen = sizeof(*cmp);
1292
1293 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1294 BUS_DMASYNC_PREWRITE);
1295
1296 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1297 DPRINTF("%s: INITIALIZE_MSG failed, error %d\n",
1298 device_xname(sc->sc_dev), rv);
1299 hvn_free_cmd(sc, rc);
1300 goto errout;
1301 }
1302 cmp = (struct rndis_init_comp *)&rc->rc_cmp;
1303 if (cmp->rm_status != RNDIS_STATUS_SUCCESS) {
1304 DPRINTF("%s: failed to init RNDIS, error %#x\n",
1305 device_xname(sc->sc_dev), cmp->rm_status);
1306 hvn_free_cmd(sc, rc);
1307 goto errout;
1308 }
1309
1310 hvn_free_cmd(sc, rc);
1311
1312 /* Initialize RNDIS Data command */
1313 memset(&sc->sc_data_msg, 0, sizeof(sc->sc_data_msg));
1314 sc->sc_data_msg.nvs_type = HVN_NVS_TYPE_RNDIS;
1315 sc->sc_data_msg.nvs_rndis_mtype = HVN_NVS_RNDIS_MTYPE_DATA;
1316 sc->sc_data_msg.nvs_chim_idx = HVN_NVS_CHIM_IDX_INVALID;
1317
1318 return 0;
1319
1320 errout:
1321 for (i = 0; i < HVN_RNDIS_CTLREQS; i++) {
1322 rc = &sc->sc_cntl_msgs[i];
1323 if (rc->rc_req == NULL)
1324 continue;
1325 TAILQ_REMOVE(&sc->sc_cntl_fq, rc, rc_entry);
1326 bus_dmamem_free(sc->sc_dmat, &rc->rc_segs, rc->rc_nsegs);
1327 rc->rc_req = NULL;
1328 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1329 }
1330 return -1;
1331 }
1332
1333 static int
1334 hvn_set_capabilities(struct hvn_softc *sc)
1335 {
1336 struct ndis_offload_params params;
1337 size_t len = sizeof(params);
1338
1339 memset(¶ms, 0, sizeof(params));
1340
1341 params.ndis_hdr.ndis_type = NDIS_OBJTYPE_DEFAULT;
1342 if (sc->sc_ndisver < NDIS_VERSION_6_30) {
1343 params.ndis_hdr.ndis_rev = NDIS_OFFLOAD_PARAMS_REV_2;
1344 len = params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE_6_1;
1345 } else {
1346 params.ndis_hdr.ndis_rev = NDIS_OFFLOAD_PARAMS_REV_3;
1347 len = params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE;
1348 }
1349
1350 params.ndis_ip4csum = NDIS_OFFLOAD_PARAM_TXRX;
1351 params.ndis_tcp4csum = NDIS_OFFLOAD_PARAM_TXRX;
1352 params.ndis_tcp6csum = NDIS_OFFLOAD_PARAM_TXRX;
1353 if (sc->sc_ndisver >= NDIS_VERSION_6_30) {
1354 params.ndis_udp4csum = NDIS_OFFLOAD_PARAM_TXRX;
1355 params.ndis_udp6csum = NDIS_OFFLOAD_PARAM_TXRX;
1356 }
1357
1358 return hvn_rndis_set(sc, OID_TCP_OFFLOAD_PARAMETERS, ¶ms, len);
1359 }
1360
1361 static int
1362 hvn_rndis_cmd(struct hvn_softc *sc, struct rndis_cmd *rc, int timo)
1363 {
1364 struct hvn_nvs_rndis *msg = &rc->rc_msg;
1365 struct rndis_msghdr *hdr = rc->rc_req;
1366 struct vmbus_gpa sgl[1];
1367 int tries = 10;
1368 int rv, s;
1369
1370 KASSERT(timo > 0);
1371
1372 msg->nvs_type = HVN_NVS_TYPE_RNDIS;
1373 msg->nvs_rndis_mtype = HVN_NVS_RNDIS_MTYPE_CTRL;
1374 msg->nvs_chim_idx = HVN_NVS_CHIM_IDX_INVALID;
1375
1376 sgl[0].gpa_page = rc->rc_gpa;
1377 sgl[0].gpa_len = hdr->rm_len;
1378 sgl[0].gpa_ofs = 0;
1379
1380 rc->rc_done = 0;
1381
1382 hvn_submit_cmd(sc, rc);
1383
1384 do {
1385 rv = vmbus_channel_send_sgl(sc->sc_chan, sgl, 1, &rc->rc_msg,
1386 sizeof(*msg), rc->rc_id);
1387 if (rv == EAGAIN) {
1388 if (cold)
1389 delay(1000);
1390 else
1391 tsleep(rc, PRIBIO, "rndisout", mstohz(1));
1392 } else if (rv) {
1393 DPRINTF("%s: RNDIS operation %u send error %d\n",
1394 device_xname(sc->sc_dev), hdr->rm_type, rv);
1395 hvn_rollback_cmd(sc, rc);
1396 return rv;
1397 }
1398 } while (rv != 0 && --tries > 0);
1399
1400 if (tries == 0 && rv != 0) {
1401 device_printf(sc->sc_dev,
1402 "RNDIS operation %u send error %d\n", hdr->rm_type, rv);
1403 return rv;
1404 }
1405
1406 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1407 BUS_DMASYNC_POSTWRITE);
1408
1409 do {
1410 if (cold) {
1411 delay(1000);
1412 s = splnet();
1413 hvn_nvs_intr(sc);
1414 splx(s);
1415 } else
1416 tsleep(rc, PRIBIO | PCATCH, "rndiscmd", mstohz(1));
1417 } while (--timo > 0 && rc->rc_done != 1);
1418
1419 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1420 BUS_DMASYNC_POSTREAD);
1421
1422 if (rc->rc_done != 1) {
1423 rv = timo == 0 ? ETIMEDOUT : EINTR;
1424 if (hvn_rollback_cmd(sc, rc)) {
1425 hvn_release_cmd(sc, rc);
1426 rv = 0;
1427 } else if (rv == ETIMEDOUT) {
1428 device_printf(sc->sc_dev,
1429 "RNDIS operation %u timed out\n", hdr->rm_type);
1430 }
1431 return rv;
1432 }
1433
1434 hvn_release_cmd(sc, rc);
1435 return 0;
1436 }
1437
1438 static void
1439 hvn_rndis_input(struct hvn_softc *sc, uint64_t tid, void *arg)
1440 {
1441 struct vmbus_chanpkt_prplist *cp = arg;
1442 uint32_t off, len, type;
1443 int i;
1444
1445 if (sc->sc_rx_ring == NULL) {
1446 DPRINTF("%s: invalid rx ring\n", device_xname(sc->sc_dev));
1447 return;
1448 }
1449
1450 for (i = 0; i < cp->cp_range_cnt; i++) {
1451 off = cp->cp_range[i].gpa_ofs;
1452 len = cp->cp_range[i].gpa_len;
1453
1454 KASSERT(off + len <= sc->sc_rx_size);
1455 KASSERT(len >= RNDIS_HEADER_OFFSET + 4);
1456
1457 memcpy(&type, sc->sc_rx_ring + off, sizeof(type));
1458 switch (type) {
1459 /* data message */
1460 case REMOTE_NDIS_PACKET_MSG:
1461 hvn_rxeof(sc, sc->sc_rx_ring + off, len);
1462 break;
1463 /* completion messages */
1464 case REMOTE_NDIS_INITIALIZE_CMPLT:
1465 case REMOTE_NDIS_QUERY_CMPLT:
1466 case REMOTE_NDIS_SET_CMPLT:
1467 case REMOTE_NDIS_RESET_CMPLT:
1468 case REMOTE_NDIS_KEEPALIVE_CMPLT:
1469 hvn_rndis_complete(sc, sc->sc_rx_ring + off, len);
1470 break;
1471 /* notification message */
1472 case REMOTE_NDIS_INDICATE_STATUS_MSG:
1473 hvn_rndis_status(sc, sc->sc_rx_ring + off, len);
1474 break;
1475 default:
1476 device_printf(sc->sc_dev,
1477 "unhandled RNDIS message type %u\n", type);
1478 break;
1479 }
1480 }
1481
1482 hvn_nvs_ack(sc, tid);
1483 }
1484
1485 static inline struct mbuf *
1486 hvn_devget(struct hvn_softc *sc, void *buf, uint32_t len)
1487 {
1488 struct ifnet *ifp = SC2IFP(sc);
1489 struct mbuf *m;
1490 size_t size = len + ETHER_ALIGN;
1491
1492 MGETHDR(m, M_NOWAIT, MT_DATA);
1493 if (m == NULL)
1494 return NULL;
1495
1496 if (size > MHLEN) {
1497 if (size <= MCLBYTES)
1498 MCLGET(m, M_NOWAIT);
1499 else
1500 MEXTMALLOC(m, size, M_NOWAIT);
1501 if ((m->m_flags & M_EXT) == 0) {
1502 m_freem(m);
1503 return NULL;
1504 }
1505 }
1506
1507 m->m_len = m->m_pkthdr.len = size;
1508 m_adj(m, ETHER_ALIGN);
1509 m_copyback(m, 0, len, buf);
1510 m_set_rcvif(m, ifp);
1511 return m;
1512 }
1513
1514 static void
1515 hvn_rxeof(struct hvn_softc *sc, uint8_t *buf, uint32_t len)
1516 {
1517 struct ifnet *ifp = SC2IFP(sc);
1518 struct rndis_packet_msg *pkt;
1519 struct rndis_pktinfo *pi;
1520 uint32_t csum, vlan;
1521 struct mbuf *m;
1522
1523 if (!(ifp->if_flags & IFF_RUNNING))
1524 return;
1525
1526 if (len < sizeof(*pkt)) {
1527 device_printf(sc->sc_dev, "data packet too short: %u\n",
1528 len);
1529 return;
1530 }
1531
1532 pkt = (struct rndis_packet_msg *)buf;
1533 if (pkt->rm_dataoffset + pkt->rm_datalen > len) {
1534 device_printf(sc->sc_dev,
1535 "data packet out of bounds: %u@%u\n", pkt->rm_dataoffset,
1536 pkt->rm_datalen);
1537 return;
1538 }
1539
1540 if ((m = hvn_devget(sc, buf + RNDIS_HEADER_OFFSET + pkt->rm_dataoffset,
1541 pkt->rm_datalen)) == NULL) {
1542 ifp->if_ierrors++;
1543 return;
1544 }
1545
1546 if (pkt->rm_pktinfooffset + pkt->rm_pktinfolen > len) {
1547 device_printf(sc->sc_dev,
1548 "pktinfo is out of bounds: %u@%u vs %u\n",
1549 pkt->rm_pktinfolen, pkt->rm_pktinfooffset, len);
1550 goto done;
1551 }
1552
1553 pi = (struct rndis_pktinfo *)(buf + RNDIS_HEADER_OFFSET +
1554 pkt->rm_pktinfooffset);
1555 while (pkt->rm_pktinfolen > 0) {
1556 if (pi->rm_size > pkt->rm_pktinfolen) {
1557 device_printf(sc->sc_dev,
1558 "invalid pktinfo size: %u/%u\n", pi->rm_size,
1559 pkt->rm_pktinfolen);
1560 break;
1561 }
1562
1563 switch (pi->rm_type) {
1564 case NDIS_PKTINFO_TYPE_CSUM:
1565 memcpy(&csum, pi->rm_data, sizeof(csum));
1566 if (csum & NDIS_RXCSUM_INFO_IPCS_OK)
1567 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1568 if (csum & NDIS_RXCSUM_INFO_TCPCS_OK)
1569 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1570 if (csum & NDIS_RXCSUM_INFO_UDPCS_OK)
1571 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1572 break;
1573 case NDIS_PKTINFO_TYPE_VLAN:
1574 memcpy(&vlan, pi->rm_data, sizeof(vlan));
1575 if (vlan != 0xffffffff) {
1576 m->m_pkthdr.ether_vtag =
1577 NDIS_VLAN_INFO_ID(vlan) |
1578 (NDIS_VLAN_INFO_PRI(vlan) << EVL_PRIO_BITS);
1579 m->m_flags |= M_VLANTAG;
1580 }
1581 break;
1582 default:
1583 DPRINTF("%s: unhandled pktinfo type %u\n",
1584 device_xname(sc->sc_dev), pi->rm_type);
1585 break;
1586 }
1587
1588 pkt->rm_pktinfolen -= pi->rm_size;
1589 pi = (struct rndis_pktinfo *)((char *)pi + pi->rm_size);
1590 }
1591
1592 done:
1593 if_percpuq_enqueue(sc->sc_ipq, m);
1594 }
1595
1596 static void
1597 hvn_rndis_complete(struct hvn_softc *sc, uint8_t *buf, uint32_t len)
1598 {
1599 struct rndis_cmd *rc;
1600 uint32_t id;
1601
1602 memcpy(&id, buf + RNDIS_HEADER_OFFSET, sizeof(id));
1603 if ((rc = hvn_complete_cmd(sc, id)) != NULL) {
1604 if (len < rc->rc_cmplen)
1605 device_printf(sc->sc_dev,
1606 "RNDIS response %u too short: %u\n", id, len);
1607 else
1608 memcpy(&rc->rc_cmp, buf, rc->rc_cmplen);
1609 if (len > rc->rc_cmplen &&
1610 len - rc->rc_cmplen > HVN_RNDIS_BUFSIZE)
1611 device_printf(sc->sc_dev,
1612 "RNDIS response %u too large: %u\n", id, len);
1613 else if (len > rc->rc_cmplen)
1614 memcpy(&rc->rc_cmpbuf, buf + rc->rc_cmplen,
1615 len - rc->rc_cmplen);
1616 rc->rc_done = 1;
1617 wakeup(rc);
1618 } else {
1619 DPRINTF("%s: failed to complete RNDIS request id %u\n",
1620 device_xname(sc->sc_dev), id);
1621 }
1622 }
1623
1624 static int
1625 hvn_rndis_output(struct hvn_softc *sc, struct hvn_tx_desc *txd)
1626 {
1627 uint64_t rid = (uint64_t)txd->txd_id << 32;
1628 int rv;
1629
1630 rv = vmbus_channel_send_sgl(sc->sc_chan, txd->txd_sgl, txd->txd_nsge,
1631 &sc->sc_data_msg, sizeof(sc->sc_data_msg), rid);
1632 if (rv) {
1633 DPRINTF("%s: RNDIS data send error %d\n",
1634 device_xname(sc->sc_dev), rv);
1635 return rv;
1636 }
1637 return 0;
1638 }
1639
1640 static void
1641 hvn_rndis_status(struct hvn_softc *sc, uint8_t *buf, uint32_t len)
1642 {
1643 struct ifnet *ifp = SC2IFP(sc);
1644 uint32_t status;
1645 int link_state = sc->sc_link_state;
1646
1647 memcpy(&status, buf + RNDIS_HEADER_OFFSET, sizeof(status));
1648 switch (status) {
1649 case RNDIS_STATUS_MEDIA_CONNECT:
1650 sc->sc_link_state = LINK_STATE_UP;
1651 break;
1652 case RNDIS_STATUS_MEDIA_DISCONNECT:
1653 sc->sc_link_state = LINK_STATE_DOWN;
1654 break;
1655 /* Ignore these */
1656 case RNDIS_STATUS_OFFLOAD_CURRENT_CONFIG:
1657 return;
1658 default:
1659 DPRINTF("%s: unhandled status %#x\n", device_xname(sc->sc_dev),
1660 status);
1661 return;
1662 }
1663 if (link_state != sc->sc_link_state)
1664 if_link_state_change(ifp, sc->sc_link_state);
1665 }
1666
1667 static int
1668 hvn_rndis_query(struct hvn_softc *sc, uint32_t oid, void *res, size_t *length)
1669 {
1670 struct rndis_cmd *rc;
1671 struct rndis_query_req *req;
1672 struct rndis_query_comp *cmp;
1673 size_t olength = *length;
1674 int rv;
1675
1676 rc = hvn_alloc_cmd(sc);
1677
1678 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1679 BUS_DMASYNC_PREREAD);
1680
1681 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1682
1683 req = rc->rc_req;
1684 req->rm_type = REMOTE_NDIS_QUERY_MSG;
1685 req->rm_len = sizeof(*req);
1686 req->rm_rid = rc->rc_id;
1687 req->rm_oid = oid;
1688 req->rm_infobufoffset = sizeof(*req) - RNDIS_HEADER_OFFSET;
1689
1690 rc->rc_cmplen = sizeof(*cmp);
1691
1692 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1693 BUS_DMASYNC_PREWRITE);
1694
1695 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1696 DPRINTF("%s: QUERY_MSG failed, error %d\n",
1697 device_xname(sc->sc_dev), rv);
1698 hvn_free_cmd(sc, rc);
1699 return rv;
1700 }
1701
1702 cmp = (struct rndis_query_comp *)&rc->rc_cmp;
1703 switch (cmp->rm_status) {
1704 case RNDIS_STATUS_SUCCESS:
1705 if (cmp->rm_infobuflen > olength) {
1706 rv = EINVAL;
1707 break;
1708 }
1709 memcpy(res, rc->rc_cmpbuf, cmp->rm_infobuflen);
1710 *length = cmp->rm_infobuflen;
1711 break;
1712 default:
1713 *length = 0;
1714 rv = EIO;
1715 break;
1716 }
1717
1718 hvn_free_cmd(sc, rc);
1719 return rv;
1720 }
1721
1722 static int
1723 hvn_rndis_set(struct hvn_softc *sc, uint32_t oid, void *data, size_t length)
1724 {
1725 struct rndis_cmd *rc;
1726 struct rndis_set_req *req;
1727 struct rndis_set_comp *cmp;
1728 int rv;
1729
1730 rc = hvn_alloc_cmd(sc);
1731
1732 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1733 BUS_DMASYNC_PREREAD);
1734
1735 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1736
1737 req = rc->rc_req;
1738 req->rm_type = REMOTE_NDIS_SET_MSG;
1739 req->rm_len = sizeof(*req) + length;
1740 req->rm_rid = rc->rc_id;
1741 req->rm_oid = oid;
1742 req->rm_infobufoffset = sizeof(*req) - RNDIS_HEADER_OFFSET;
1743
1744 rc->rc_cmplen = sizeof(*cmp);
1745
1746 if (length > 0) {
1747 KASSERT(sizeof(*req) + length < PAGE_SIZE);
1748 req->rm_infobuflen = length;
1749 memcpy(req + 1, data, length);
1750 }
1751
1752 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1753 BUS_DMASYNC_PREWRITE);
1754
1755 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1756 DPRINTF("%s: SET_MSG failed, error %d\n",
1757 device_xname(sc->sc_dev), rv);
1758 hvn_free_cmd(sc, rc);
1759 return rv;
1760 }
1761
1762 cmp = (struct rndis_set_comp *)&rc->rc_cmp;
1763 if (cmp->rm_status != RNDIS_STATUS_SUCCESS)
1764 rv = EIO;
1765
1766 hvn_free_cmd(sc, rc);
1767 return rv;
1768 }
1769
1770 static int
1771 hvn_rndis_open(struct hvn_softc *sc)
1772 {
1773 uint32_t filter;
1774 int rv;
1775
1776 if (sc->sc_promisc)
1777 filter = RNDIS_PACKET_TYPE_PROMISCUOUS;
1778 else
1779 filter = RNDIS_PACKET_TYPE_BROADCAST |
1780 RNDIS_PACKET_TYPE_ALL_MULTICAST |
1781 RNDIS_PACKET_TYPE_DIRECTED;
1782
1783 rv = hvn_rndis_set(sc, OID_GEN_CURRENT_PACKET_FILTER,
1784 &filter, sizeof(filter));
1785 if (rv) {
1786 DPRINTF("%s: failed to set RNDIS filter to %#x\n",
1787 device_xname(sc->sc_dev), filter);
1788 }
1789 return rv;
1790 }
1791
1792 static int
1793 hvn_rndis_close(struct hvn_softc *sc)
1794 {
1795 uint32_t filter = 0;
1796 int rv;
1797
1798 rv = hvn_rndis_set(sc, OID_GEN_CURRENT_PACKET_FILTER,
1799 &filter, sizeof(filter));
1800 if (rv) {
1801 DPRINTF("%s: failed to clear RNDIS filter\n",
1802 device_xname(sc->sc_dev));
1803 }
1804 return rv;
1805 }
1806
1807 static void
1808 hvn_rndis_detach(struct hvn_softc *sc)
1809 {
1810 struct rndis_cmd *rc;
1811 struct rndis_halt_req *req;
1812 int rv;
1813
1814 rc = hvn_alloc_cmd(sc);
1815
1816 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1817 BUS_DMASYNC_PREREAD);
1818
1819 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1820
1821 req = rc->rc_req;
1822 req->rm_type = REMOTE_NDIS_HALT_MSG;
1823 req->rm_len = sizeof(*req);
1824 req->rm_rid = rc->rc_id;
1825
1826 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1827 BUS_DMASYNC_PREWRITE);
1828
1829 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1830 DPRINTF("%s: HALT_MSG failed, error %d\n",
1831 device_xname(sc->sc_dev), rv);
1832 }
1833 hvn_free_cmd(sc, rc);
1834 }
1835