if_hvn.c revision 1.4.2.6 1 /* $NetBSD: if_hvn.c,v 1.4.2.6 2020/12/11 15:43:16 martin Exp $ */
2 /* $OpenBSD: if_hvn.c,v 1.39 2018/03/11 14:31:34 mikeb Exp $ */
3
4 /*-
5 * Copyright (c) 2009-2012,2016 Microsoft Corp.
6 * Copyright (c) 2010-2012 Citrix Inc.
7 * Copyright (c) 2012 NetApp Inc.
8 * Copyright (c) 2016 Mike Belopuhov <mike (at) esdenera.com>
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice unmodified, this list of conditions, and the following
16 * disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * The OpenBSD port was done under funding by Esdenera Networks GmbH.
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: if_hvn.c,v 1.4.2.6 2020/12/11 15:43:16 martin Exp $");
39
40 #ifdef _KERNEL_OPT
41 #include "opt_inet.h"
42 #include "opt_inet6.h"
43 #include "opt_net_mpsafe.h"
44 #endif
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/device.h>
50 #include <sys/atomic.h>
51 #include <sys/bus.h>
52 #include <sys/intr.h>
53 #include <sys/kmem.h>
54
55 #include <net/if.h>
56 #include <net/if_ether.h>
57 #include <net/if_media.h>
58
59 #include <net/bpf.h>
60
61 #include <dev/ic/ndisreg.h>
62 #include <dev/ic/rndisreg.h>
63
64 #include <dev/hyperv/vmbusvar.h>
65 #include <dev/hyperv/if_hvnreg.h>
66
67 #ifndef EVL_PRIO_BITS
68 #define EVL_PRIO_BITS 13
69 #endif
70
71 #define HVN_NVS_MSGSIZE 32
72 #define HVN_NVS_BUFSIZE PAGE_SIZE
73
74 /*
75 * RNDIS control interface
76 */
77 #define HVN_RNDIS_CTLREQS 4
78 #define HVN_RNDIS_BUFSIZE 512
79
80 struct rndis_cmd {
81 uint32_t rc_id;
82 struct hvn_nvs_rndis rc_msg;
83 void *rc_req;
84 bus_dmamap_t rc_dmap;
85 bus_dma_segment_t rc_segs;
86 int rc_nsegs;
87 uint64_t rc_gpa;
88 struct rndis_packet_msg rc_cmp;
89 uint32_t rc_cmplen;
90 uint8_t rc_cmpbuf[HVN_RNDIS_BUFSIZE];
91 int rc_done;
92 TAILQ_ENTRY(rndis_cmd) rc_entry;
93 };
94 TAILQ_HEAD(rndis_queue, rndis_cmd);
95
96 #define HVN_MAXMTU (9 * 1024)
97
98 #define HVN_RNDIS_XFER_SIZE 2048
99
100 /*
101 * Tx ring
102 */
103 #define HVN_TX_DESC 256
104 #define HVN_TX_FRAGS 15 /* 31 is the max */
105 #define HVN_TX_FRAG_SIZE PAGE_SIZE
106 #define HVN_TX_PKT_SIZE 16384
107
108 #define HVN_RNDIS_PKT_LEN \
109 (sizeof(struct rndis_packet_msg) + \
110 sizeof(struct rndis_pktinfo) + NDIS_VLAN_INFO_SIZE + \
111 sizeof(struct rndis_pktinfo) + NDIS_TXCSUM_INFO_SIZE)
112
113 struct hvn_tx_desc {
114 uint32_t txd_id;
115 int txd_ready;
116 struct vmbus_gpa txd_sgl[HVN_TX_FRAGS + 1];
117 int txd_nsge;
118 struct mbuf *txd_buf;
119 bus_dmamap_t txd_dmap;
120 struct vmbus_gpa txd_gpa;
121 struct rndis_packet_msg *txd_req;
122 };
123
124 struct hvn_softc {
125 device_t sc_dev;
126
127 struct vmbus_softc *sc_vmbus;
128 struct vmbus_channel *sc_chan;
129 bus_dma_tag_t sc_dmat;
130
131 struct ethercom sc_ec;
132 struct ifmedia sc_media;
133 struct if_percpuq *sc_ipq;
134 int sc_link_state;
135 int sc_promisc;
136
137 uint32_t sc_flags;
138 #define HVN_SCF_ATTACHED __BIT(0)
139
140 /* NVS protocol */
141 int sc_proto;
142 uint32_t sc_nvstid;
143 uint8_t sc_nvsrsp[HVN_NVS_MSGSIZE];
144 uint8_t *sc_nvsbuf;
145 int sc_nvsdone;
146
147 /* RNDIS protocol */
148 int sc_ndisver;
149 uint32_t sc_rndisrid;
150 struct rndis_queue sc_cntl_sq; /* submission queue */
151 kmutex_t sc_cntl_sqlck;
152 struct rndis_queue sc_cntl_cq; /* completion queue */
153 kmutex_t sc_cntl_cqlck;
154 struct rndis_queue sc_cntl_fq; /* free queue */
155 kmutex_t sc_cntl_fqlck;
156 struct rndis_cmd sc_cntl_msgs[HVN_RNDIS_CTLREQS];
157 struct hvn_nvs_rndis sc_data_msg;
158
159 /* Rx ring */
160 uint8_t *sc_rx_ring;
161 int sc_rx_size;
162 uint32_t sc_rx_hndl;
163 struct hyperv_dma sc_rx_dma;
164
165 /* Tx ring */
166 uint32_t sc_tx_next;
167 uint32_t sc_tx_avail;
168 struct hvn_tx_desc sc_tx_desc[HVN_TX_DESC];
169 bus_dmamap_t sc_tx_rmap;
170 uint8_t *sc_tx_msgs;
171 bus_dma_segment_t sc_tx_mseg;
172 };
173
174 #define SC2IFP(_sc_) (&(_sc_)->sc_ec.ec_if)
175 #define IFP2SC(_ifp_) ((_ifp_)->if_softc)
176
177
178 static int hvn_match(device_t, cfdata_t, void *);
179 static void hvn_attach(device_t, device_t, void *);
180 static int hvn_detach(device_t, int);
181
182 CFATTACH_DECL_NEW(hvn, sizeof(struct hvn_softc),
183 hvn_match, hvn_attach, hvn_detach, NULL);
184
185 static int hvn_ioctl(struct ifnet *, u_long, void *);
186 static int hvn_media_change(struct ifnet *);
187 static void hvn_media_status(struct ifnet *, struct ifmediareq *);
188 static int hvn_iff(struct hvn_softc *);
189 static int hvn_init(struct ifnet *);
190 static void hvn_stop(struct ifnet *, int);
191 static void hvn_start(struct ifnet *);
192 static int hvn_encap(struct hvn_softc *, struct mbuf *,
193 struct hvn_tx_desc **);
194 static void hvn_decap(struct hvn_softc *, struct hvn_tx_desc *);
195 static void hvn_txeof(struct hvn_softc *, uint64_t);
196 static int hvn_rx_ring_create(struct hvn_softc *);
197 static int hvn_rx_ring_destroy(struct hvn_softc *);
198 static int hvn_tx_ring_create(struct hvn_softc *);
199 static void hvn_tx_ring_destroy(struct hvn_softc *);
200 static int hvn_set_capabilities(struct hvn_softc *);
201 static int hvn_get_lladdr(struct hvn_softc *, uint8_t *);
202 static void hvn_get_link_status(struct hvn_softc *);
203
204 /* NSVP */
205 static int hvn_nvs_attach(struct hvn_softc *);
206 static void hvn_nvs_intr(void *);
207 static int hvn_nvs_cmd(struct hvn_softc *, void *, size_t, uint64_t, int);
208 static int hvn_nvs_ack(struct hvn_softc *, uint64_t);
209 static void hvn_nvs_detach(struct hvn_softc *);
210
211 /* RNDIS */
212 static int hvn_rndis_attach(struct hvn_softc *);
213 static int hvn_rndis_cmd(struct hvn_softc *, struct rndis_cmd *, int);
214 static void hvn_rndis_input(struct hvn_softc *, uint64_t, void *);
215 static void hvn_rxeof(struct hvn_softc *, uint8_t *, uint32_t);
216 static void hvn_rndis_complete(struct hvn_softc *, uint8_t *, uint32_t);
217 static int hvn_rndis_output(struct hvn_softc *, struct hvn_tx_desc *);
218 static void hvn_rndis_status(struct hvn_softc *, uint8_t *, uint32_t);
219 static int hvn_rndis_query(struct hvn_softc *, uint32_t, void *, size_t *);
220 static int hvn_rndis_set(struct hvn_softc *, uint32_t, void *, size_t);
221 static int hvn_rndis_open(struct hvn_softc *);
222 static int hvn_rndis_close(struct hvn_softc *);
223 static void hvn_rndis_detach(struct hvn_softc *);
224
225 static int
226 hvn_match(device_t parent, cfdata_t match, void *aux)
227 {
228 struct vmbus_attach_args *aa = aux;
229
230 if (memcmp(aa->aa_type, &hyperv_guid_network, sizeof(*aa->aa_type)))
231 return 0;
232 return 1;
233 }
234
235 static void
236 hvn_attach(device_t parent, device_t self, void *aux)
237 {
238 struct hvn_softc *sc = device_private(self);
239 struct vmbus_attach_args *aa = aux;
240 struct ifnet *ifp = SC2IFP(sc);
241 uint8_t enaddr[ETHER_ADDR_LEN];
242 int error;
243
244 sc->sc_dev = self;
245 sc->sc_vmbus = (struct vmbus_softc *)device_private(parent);
246 sc->sc_chan = aa->aa_chan;
247 sc->sc_dmat = sc->sc_vmbus->sc_dmat;
248
249 aprint_naive("\n");
250 aprint_normal(": Hyper-V NetVSC\n");
251
252 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
253
254 if (hvn_nvs_attach(sc)) {
255 aprint_error_dev(self, "failed to init NVSP\n");
256 return;
257 }
258
259 if (hvn_rx_ring_create(sc)) {
260 aprint_error_dev(self, "failed to create Rx ring\n");
261 goto fail1;
262 }
263
264 if (hvn_tx_ring_create(sc)) {
265 aprint_error_dev(self, "failed to create Tx ring\n");
266 goto fail1;
267 }
268
269 ifp->if_softc = sc;
270 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
271 ifp->if_ioctl = hvn_ioctl;
272 ifp->if_start = hvn_start;
273 ifp->if_init = hvn_init;
274 ifp->if_stop = hvn_stop;
275 ifp->if_capabilities = IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx;
276 ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx;
277 ifp->if_capabilities |= IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_TCPv6_Rx;
278 if (sc->sc_ndisver > NDIS_VERSION_6_30) {
279 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Tx;
280 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Rx;
281 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Tx;
282 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Rx;
283 }
284 if (sc->sc_proto >= HVN_NVS_PROTO_VERSION_2) {
285 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
286 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
287 sc->sc_ec.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
288 }
289
290 IFQ_SET_MAXLEN(&ifp->if_snd, HVN_TX_DESC - 1);
291 IFQ_SET_READY(&ifp->if_snd);
292
293 /* Initialize ifmedia structures. */
294 sc->sc_ec.ec_ifmedia = &sc->sc_media;
295 ifmedia_init(&sc->sc_media, IFM_IMASK, hvn_media_change,
296 hvn_media_status);
297 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
298 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_MANUAL);
299
300 error = if_initialize(ifp);
301 if (error) {
302 aprint_error_dev(self, "if_initialize failed(%d)\n", error);
303 goto fail2;
304 }
305 sc->sc_ipq = if_percpuq_create(ifp);
306 if_deferred_start_init(ifp, NULL);
307
308 if (hvn_rndis_attach(sc)) {
309 aprint_error_dev(self, "failed to init RNDIS\n");
310 goto fail1;
311 }
312
313 aprint_normal_dev(self, "NVS %d.%d NDIS %d.%d\n",
314 sc->sc_proto >> 16, sc->sc_proto & 0xffff,
315 sc->sc_ndisver >> 16 , sc->sc_ndisver & 0xffff);
316
317 if (hvn_set_capabilities(sc)) {
318 aprint_error_dev(self, "failed to setup offloading\n");
319 goto fail2;
320 }
321
322 if (hvn_get_lladdr(sc, enaddr)) {
323 aprint_error_dev(self,
324 "failed to obtain an ethernet address\n");
325 goto fail2;
326 }
327 aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(enaddr));
328
329 ether_ifattach(ifp, enaddr);
330 if_register(ifp);
331
332 if (pmf_device_register(self, NULL, NULL))
333 pmf_class_network_register(self, ifp);
334 else
335 aprint_error_dev(self, "couldn't establish power handler\n");
336
337 SET(sc->sc_flags, HVN_SCF_ATTACHED);
338 return;
339
340 fail2: hvn_rndis_detach(sc);
341 fail1: hvn_rx_ring_destroy(sc);
342 hvn_tx_ring_destroy(sc);
343 hvn_nvs_detach(sc);
344 }
345
346 static int
347 hvn_detach(device_t self, int flags)
348 {
349 struct hvn_softc *sc = device_private(self);
350 struct ifnet *ifp = SC2IFP(sc);
351
352 if (!ISSET(sc->sc_flags, HVN_SCF_ATTACHED))
353 return 0;
354
355 hvn_stop(ifp, 1);
356
357 pmf_device_deregister(self);
358
359 ether_ifdetach(ifp);
360 if_detach(ifp);
361 if_percpuq_destroy(sc->sc_ipq);
362
363 hvn_rndis_detach(sc);
364 hvn_rx_ring_destroy(sc);
365 hvn_tx_ring_destroy(sc);
366 hvn_nvs_detach(sc);
367
368 return 0;
369 }
370
371 static int
372 hvn_ioctl(struct ifnet *ifp, u_long command, void * data)
373 {
374 struct hvn_softc *sc = IFP2SC(ifp);
375 int s, error = 0;
376
377 s = splnet();
378
379 error = ether_ioctl(ifp, command, data);
380 if (error == ENETRESET) {
381 if (ifp->if_flags & IFF_RUNNING)
382 hvn_iff(sc);
383 error = 0;
384 }
385
386 splx(s);
387
388 return error;
389 }
390
391 static int
392 hvn_media_change(struct ifnet *ifp)
393 {
394
395 return 0;
396 }
397
398 static void
399 hvn_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
400 {
401 struct hvn_softc *sc = IFP2SC(ifp);
402 int link_state;
403
404 link_state = sc->sc_link_state;
405 hvn_get_link_status(sc);
406 if (link_state != sc->sc_link_state)
407 if_link_state_change(ifp, sc->sc_link_state);
408
409 ifmr->ifm_status = IFM_AVALID;
410 ifmr->ifm_active = IFM_ETHER | IFM_MANUAL;
411 if (sc->sc_link_state == LINK_STATE_UP)
412 ifmr->ifm_status |= IFM_ACTIVE;
413 }
414
415 static int
416 hvn_iff(struct hvn_softc *sc)
417 {
418
419 /* XXX */
420 sc->sc_promisc = 0;
421
422 return 0;
423 }
424
425 static int
426 hvn_init(struct ifnet *ifp)
427 {
428 struct hvn_softc *sc = IFP2SC(ifp);
429 int error;
430
431 hvn_stop(ifp, 0);
432
433 error = hvn_iff(sc);
434 if (error)
435 return error;
436
437 error = hvn_rndis_open(sc);
438 if (error == 0) {
439 ifp->if_flags |= IFF_RUNNING;
440 ifp->if_flags &= ~IFF_OACTIVE;
441 }
442 return error;
443 }
444
445 static void
446 hvn_stop(struct ifnet *ifp, int disable)
447 {
448 struct hvn_softc *sc = IFP2SC(ifp);
449
450 hvn_rndis_close(sc);
451
452 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
453 }
454
455 static void
456 hvn_start(struct ifnet *ifp)
457 {
458 struct hvn_softc *sc = IFP2SC(ifp);
459 struct hvn_tx_desc *txd;
460 struct mbuf *m;
461
462 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
463 return;
464
465 for (;;) {
466 if (!sc->sc_tx_avail) {
467 /* transient */
468 ifp->if_flags |= IFF_OACTIVE;
469 break;
470 }
471
472 IFQ_DEQUEUE(&ifp->if_snd, m);
473 if (m == NULL)
474 break;
475
476 if (hvn_encap(sc, m, &txd)) {
477 /* the chain is too large */
478 ifp->if_oerrors++;
479 m_freem(m);
480 continue;
481 }
482
483 bpf_mtap(ifp, m, BPF_D_OUT);
484
485 if (hvn_rndis_output(sc, txd)) {
486 hvn_decap(sc, txd);
487 ifp->if_oerrors++;
488 m_freem(m);
489 continue;
490 }
491
492 sc->sc_tx_next++;
493 }
494 }
495
496 static inline char *
497 hvn_rndis_pktinfo_append(struct rndis_packet_msg *pkt, size_t pktsize,
498 size_t datalen, uint32_t type)
499 {
500 struct rndis_pktinfo *pi;
501 size_t pi_size = sizeof(*pi) + datalen;
502 char *cp;
503
504 KASSERT(pkt->rm_pktinfooffset + pkt->rm_pktinfolen + pi_size <=
505 pktsize);
506
507 cp = (char *)pkt + pkt->rm_pktinfooffset + pkt->rm_pktinfolen;
508 pi = (struct rndis_pktinfo *)cp;
509 pi->rm_size = pi_size;
510 pi->rm_type = type;
511 pi->rm_pktinfooffset = sizeof(*pi);
512 pkt->rm_pktinfolen += pi_size;
513 pkt->rm_dataoffset += pi_size;
514 pkt->rm_len += pi_size;
515
516 return (char *)pi->rm_data;
517 }
518
519 static int
520 hvn_encap(struct hvn_softc *sc, struct mbuf *m, struct hvn_tx_desc **txd0)
521 {
522 struct hvn_tx_desc *txd;
523 struct rndis_packet_msg *pkt;
524 bus_dma_segment_t *seg;
525 size_t pktlen;
526 int i, rv;
527
528 do {
529 txd = &sc->sc_tx_desc[sc->sc_tx_next % HVN_TX_DESC];
530 sc->sc_tx_next++;
531 } while (!txd->txd_ready);
532 txd->txd_ready = 0;
533
534 pkt = txd->txd_req;
535 memset(pkt, 0, HVN_RNDIS_PKT_LEN);
536 pkt->rm_type = REMOTE_NDIS_PACKET_MSG;
537 pkt->rm_len = sizeof(*pkt) + m->m_pkthdr.len;
538 pkt->rm_dataoffset = RNDIS_DATA_OFFSET;
539 pkt->rm_datalen = m->m_pkthdr.len;
540 pkt->rm_pktinfooffset = sizeof(*pkt); /* adjusted below */
541 pkt->rm_pktinfolen = 0;
542
543 rv = bus_dmamap_load_mbuf(sc->sc_dmat, txd->txd_dmap, m, BUS_DMA_READ |
544 BUS_DMA_NOWAIT);
545 switch (rv) {
546 case 0:
547 break;
548 case EFBIG:
549 if (m_defrag(m, M_NOWAIT) != NULL &&
550 bus_dmamap_load_mbuf(sc->sc_dmat, txd->txd_dmap, m,
551 BUS_DMA_READ | BUS_DMA_NOWAIT) == 0)
552 break;
553 /* FALLTHROUGH */
554 default:
555 DPRINTF("%s: failed to load mbuf\n", device_xname(sc->sc_dev));
556 return -1;
557 }
558 txd->txd_buf = m;
559
560 if (m->m_flags & M_VLANTAG) {
561 uint32_t vlan;
562 char *cp;
563
564 vlan = NDIS_VLAN_INFO_MAKE(
565 EVL_VLANOFTAG(m->m_pkthdr.ether_vtag),
566 EVL_PRIOFTAG(m->m_pkthdr.ether_vtag), 0);
567 cp = hvn_rndis_pktinfo_append(pkt, HVN_RNDIS_PKT_LEN,
568 NDIS_VLAN_INFO_SIZE, NDIS_PKTINFO_TYPE_VLAN);
569 memcpy(cp, &vlan, NDIS_VLAN_INFO_SIZE);
570 }
571
572 if (m->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_UDPv4 |
573 M_CSUM_TCPv4)) {
574 uint32_t csum = NDIS_TXCSUM_INFO_IPV4;
575 char *cp;
576
577 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4)
578 csum |= NDIS_TXCSUM_INFO_IPCS;
579 if (m->m_pkthdr.csum_flags & M_CSUM_TCPv4)
580 csum |= NDIS_TXCSUM_INFO_TCPCS;
581 if (m->m_pkthdr.csum_flags & M_CSUM_UDPv4)
582 csum |= NDIS_TXCSUM_INFO_UDPCS;
583 cp = hvn_rndis_pktinfo_append(pkt, HVN_RNDIS_PKT_LEN,
584 NDIS_TXCSUM_INFO_SIZE, NDIS_PKTINFO_TYPE_CSUM);
585 memcpy(cp, &csum, NDIS_TXCSUM_INFO_SIZE);
586 }
587
588 pktlen = pkt->rm_pktinfooffset + pkt->rm_pktinfolen;
589 pkt->rm_pktinfooffset -= RNDIS_HEADER_OFFSET;
590
591 /* Attach an RNDIS message to the first slot */
592 txd->txd_sgl[0].gpa_page = txd->txd_gpa.gpa_page;
593 txd->txd_sgl[0].gpa_ofs = txd->txd_gpa.gpa_ofs;
594 txd->txd_sgl[0].gpa_len = pktlen;
595 txd->txd_nsge = txd->txd_dmap->dm_nsegs + 1;
596
597 for (i = 0; i < txd->txd_dmap->dm_nsegs; i++) {
598 seg = &txd->txd_dmap->dm_segs[i];
599 txd->txd_sgl[1 + i].gpa_page = atop(seg->ds_addr);
600 txd->txd_sgl[1 + i].gpa_ofs = seg->ds_addr & PAGE_MASK;
601 txd->txd_sgl[1 + i].gpa_len = seg->ds_len;
602 }
603
604 *txd0 = txd;
605
606 atomic_dec_uint(&sc->sc_tx_avail);
607
608 return 0;
609 }
610
611 static void
612 hvn_decap(struct hvn_softc *sc, struct hvn_tx_desc *txd)
613 {
614 struct ifnet *ifp = SC2IFP(sc);
615
616 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmap,
617 0, txd->txd_dmap->dm_mapsize,
618 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
619 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmap);
620 txd->txd_buf = NULL;
621 txd->txd_nsge = 0;
622 txd->txd_ready = 1;
623 atomic_inc_uint(&sc->sc_tx_avail);
624 ifp->if_flags &= ~IFF_OACTIVE;
625 }
626
627 static void
628 hvn_txeof(struct hvn_softc *sc, uint64_t tid)
629 {
630 struct ifnet *ifp = SC2IFP(sc);
631 struct hvn_tx_desc *txd;
632 struct mbuf *m;
633 uint32_t id = tid >> 32;
634
635 if ((tid & 0xffffffffU) != 0)
636 return;
637
638 id -= HVN_NVS_CHIM_SIG;
639 if (id >= HVN_TX_DESC) {
640 device_printf(sc->sc_dev, "tx packet index too large: %u", id);
641 return;
642 }
643
644 txd = &sc->sc_tx_desc[id];
645
646 if ((m = txd->txd_buf) == NULL) {
647 device_printf(sc->sc_dev, "no mbuf @%u\n", id);
648 return;
649 }
650 txd->txd_buf = NULL;
651
652 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmap,
653 0, txd->txd_dmap->dm_mapsize,
654 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
655 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmap);
656 m_freem(m);
657 ifp->if_opackets++;
658
659 txd->txd_ready = 1;
660
661 atomic_inc_uint(&sc->sc_tx_avail);
662 ifp->if_flags &= ~IFF_OACTIVE;
663 }
664
665 static int
666 hvn_rx_ring_create(struct hvn_softc *sc)
667 {
668 struct hvn_nvs_rxbuf_conn cmd;
669 struct hvn_nvs_rxbuf_conn_resp *rsp;
670 uint64_t tid;
671
672 if (sc->sc_proto <= HVN_NVS_PROTO_VERSION_2)
673 sc->sc_rx_size = 15 * 1024 * 1024; /* 15MB */
674 else
675 sc->sc_rx_size = 16 * 1024 * 1024; /* 16MB */
676 sc->sc_rx_ring = hyperv_dma_alloc(sc->sc_dmat, &sc->sc_rx_dma,
677 sc->sc_rx_size, PAGE_SIZE, PAGE_SIZE, sc->sc_rx_size / PAGE_SIZE);
678 if (sc->sc_rx_ring == NULL) {
679 DPRINTF("%s: failed to allocate Rx ring buffer\n",
680 device_xname(sc->sc_dev));
681 return -1;
682 }
683 if (vmbus_handle_alloc(sc->sc_chan, &sc->sc_rx_dma, sc->sc_rx_size,
684 &sc->sc_rx_hndl)) {
685 DPRINTF("%s: failed to obtain a PA handle\n",
686 device_xname(sc->sc_dev));
687 goto errout;
688 }
689
690 memset(&cmd, 0, sizeof(cmd));
691 cmd.nvs_type = HVN_NVS_TYPE_RXBUF_CONN;
692 cmd.nvs_gpadl = sc->sc_rx_hndl;
693 cmd.nvs_sig = HVN_NVS_RXBUF_SIG;
694
695 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
696 if (hvn_nvs_cmd(sc, &cmd, sizeof(cmd), tid, 100))
697 goto errout;
698
699 rsp = (struct hvn_nvs_rxbuf_conn_resp *)&sc->sc_nvsrsp;
700 if (rsp->nvs_status != HVN_NVS_STATUS_OK) {
701 DPRINTF("%s: failed to set up the Rx ring\n",
702 device_xname(sc->sc_dev));
703 goto errout;
704 }
705 if (rsp->nvs_nsect > 1) {
706 DPRINTF("%s: invalid number of Rx ring sections: %u\n",
707 device_xname(sc->sc_dev), rsp->nvs_nsect);
708 hvn_rx_ring_destroy(sc);
709 return -1;
710 }
711 return 0;
712
713 errout:
714 if (sc->sc_rx_hndl) {
715 vmbus_handle_free(sc->sc_chan, sc->sc_rx_hndl);
716 sc->sc_rx_hndl = 0;
717 }
718 if (sc->sc_rx_ring) {
719 kmem_free(sc->sc_rx_ring, sc->sc_rx_size);
720 sc->sc_rx_ring = NULL;
721 }
722 return -1;
723 }
724
725 static int
726 hvn_rx_ring_destroy(struct hvn_softc *sc)
727 {
728 struct hvn_nvs_rxbuf_disconn cmd;
729 uint64_t tid;
730
731 if (sc->sc_rx_ring == NULL)
732 return 0;
733
734 memset(&cmd, 0, sizeof(cmd));
735 cmd.nvs_type = HVN_NVS_TYPE_RXBUF_DISCONN;
736 cmd.nvs_sig = HVN_NVS_RXBUF_SIG;
737
738 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
739 if (hvn_nvs_cmd(sc, &cmd, sizeof(cmd), tid, 0))
740 return -1;
741
742 delay(100);
743
744 vmbus_handle_free(sc->sc_chan, sc->sc_rx_hndl);
745
746 sc->sc_rx_hndl = 0;
747
748 kmem_free(sc->sc_rx_ring, sc->sc_rx_size);
749 sc->sc_rx_ring = NULL;
750
751 return 0;
752 }
753
754 static int
755 hvn_tx_ring_create(struct hvn_softc *sc)
756 {
757 const int dmaflags = cold ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK;
758 struct hvn_tx_desc *txd;
759 bus_dma_segment_t *seg;
760 size_t msgsize;
761 int i, rsegs;
762 paddr_t pa;
763
764 msgsize = roundup(HVN_RNDIS_PKT_LEN, 128);
765
766 /* Allocate memory to store RNDIS messages */
767 if (bus_dmamem_alloc(sc->sc_dmat, msgsize * HVN_TX_DESC, PAGE_SIZE, 0,
768 &sc->sc_tx_mseg, 1, &rsegs, dmaflags)) {
769 DPRINTF("%s: failed to allocate memory for RDNIS messages\n",
770 device_xname(sc->sc_dev));
771 goto errout;
772 }
773 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_tx_mseg, 1, msgsize *
774 HVN_TX_DESC, (void **)&sc->sc_tx_msgs, dmaflags)) {
775 DPRINTF("%s: failed to establish mapping for RDNIS messages\n",
776 device_xname(sc->sc_dev));
777 goto errout;
778 }
779 memset(sc->sc_tx_msgs, 0, msgsize * HVN_TX_DESC);
780 if (bus_dmamap_create(sc->sc_dmat, msgsize * HVN_TX_DESC, 1,
781 msgsize * HVN_TX_DESC, 0, dmaflags, &sc->sc_tx_rmap)) {
782 DPRINTF("%s: failed to create map for RDNIS messages\n",
783 device_xname(sc->sc_dev));
784 goto errout;
785 }
786 if (bus_dmamap_load(sc->sc_dmat, sc->sc_tx_rmap, sc->sc_tx_msgs,
787 msgsize * HVN_TX_DESC, NULL, dmaflags)) {
788 DPRINTF("%s: failed to create map for RDNIS messages\n",
789 device_xname(sc->sc_dev));
790 goto errout;
791 }
792
793 for (i = 0; i < HVN_TX_DESC; i++) {
794 txd = &sc->sc_tx_desc[i];
795 if (bus_dmamap_create(sc->sc_dmat, HVN_TX_PKT_SIZE,
796 HVN_TX_FRAGS, HVN_TX_FRAG_SIZE, PAGE_SIZE, dmaflags,
797 &txd->txd_dmap)) {
798 DPRINTF("%s: failed to create map for TX descriptors\n",
799 device_xname(sc->sc_dev));
800 goto errout;
801 }
802 seg = &sc->sc_tx_rmap->dm_segs[0];
803 pa = seg->ds_addr + (msgsize * i);
804 txd->txd_gpa.gpa_page = atop(pa);
805 txd->txd_gpa.gpa_ofs = pa & PAGE_MASK;
806 txd->txd_gpa.gpa_len = msgsize;
807 txd->txd_req = (void *)(sc->sc_tx_msgs + (msgsize * i));
808 txd->txd_id = i + HVN_NVS_CHIM_SIG;
809 txd->txd_ready = 1;
810 }
811 sc->sc_tx_avail = HVN_TX_DESC;
812
813 return 0;
814
815 errout:
816 hvn_tx_ring_destroy(sc);
817 return -1;
818 }
819
820 static void
821 hvn_tx_ring_destroy(struct hvn_softc *sc)
822 {
823 struct hvn_tx_desc *txd;
824 int i;
825
826 for (i = 0; i < HVN_TX_DESC; i++) {
827 txd = &sc->sc_tx_desc[i];
828 if (txd->txd_dmap == NULL)
829 continue;
830 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmap,
831 0, txd->txd_dmap->dm_mapsize,
832 BUS_DMASYNC_POSTWRITE);
833 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmap);
834 bus_dmamap_destroy(sc->sc_dmat, txd->txd_dmap);
835 txd->txd_dmap = NULL;
836 if (txd->txd_buf == NULL)
837 continue;
838 m_free(txd->txd_buf);
839 txd->txd_buf = NULL;
840 }
841 if (sc->sc_tx_rmap) {
842 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap,
843 0, txd->txd_dmap->dm_mapsize,
844 BUS_DMASYNC_POSTWRITE);
845 bus_dmamap_unload(sc->sc_dmat, sc->sc_tx_rmap);
846 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_rmap);
847 }
848 if (sc->sc_tx_msgs) {
849 size_t msgsize = roundup(HVN_RNDIS_PKT_LEN, 128);
850
851 bus_dmamem_unmap(sc->sc_dmat, sc->sc_tx_msgs,
852 msgsize * HVN_TX_DESC);
853 bus_dmamem_free(sc->sc_dmat, &sc->sc_tx_mseg, 1);
854 }
855 sc->sc_tx_rmap = NULL;
856 sc->sc_tx_msgs = NULL;
857 }
858
859 static int
860 hvn_get_lladdr(struct hvn_softc *sc, uint8_t *enaddr)
861 {
862 size_t addrlen = ETHER_ADDR_LEN;
863 int rv;
864
865 rv = hvn_rndis_query(sc, OID_802_3_PERMANENT_ADDRESS, enaddr, &addrlen);
866 if (rv == 0 && addrlen != ETHER_ADDR_LEN)
867 rv = -1;
868 return rv;
869 }
870
871 static void
872 hvn_get_link_status(struct hvn_softc *sc)
873 {
874 uint32_t state;
875 size_t len = sizeof(state);
876
877 if (hvn_rndis_query(sc, OID_GEN_MEDIA_CONNECT_STATUS,
878 &state, &len) == 0)
879 sc->sc_link_state = (state == NDIS_MEDIA_STATE_CONNECTED) ?
880 LINK_STATE_UP : LINK_STATE_DOWN;
881 }
882
883 static int
884 hvn_nvs_attach(struct hvn_softc *sc)
885 {
886 static const uint32_t protos[] = {
887 HVN_NVS_PROTO_VERSION_5,
888 HVN_NVS_PROTO_VERSION_4,
889 HVN_NVS_PROTO_VERSION_2,
890 HVN_NVS_PROTO_VERSION_1
891 };
892 const int kmemflags = cold ? KM_NOSLEEP : KM_SLEEP;
893 struct hvn_nvs_init cmd;
894 struct hvn_nvs_init_resp *rsp;
895 struct hvn_nvs_ndis_init ncmd;
896 struct hvn_nvs_ndis_conf ccmd;
897 uint32_t ndisver, ringsize;
898 uint64_t tid;
899 int i;
900
901 sc->sc_nvsbuf = kmem_zalloc(HVN_NVS_BUFSIZE, kmemflags);
902 if (sc->sc_nvsbuf == NULL) {
903 DPRINTF("%s: failed to allocate channel data buffer\n",
904 device_xname(sc->sc_dev));
905 return -1;
906 }
907
908 /* We need to be able to fit all RNDIS control and data messages */
909 ringsize = HVN_RNDIS_CTLREQS *
910 (sizeof(struct hvn_nvs_rndis) + sizeof(struct vmbus_gpa)) +
911 HVN_TX_DESC * (sizeof(struct hvn_nvs_rndis) +
912 (HVN_TX_FRAGS + 1) * sizeof(struct vmbus_gpa));
913
914 sc->sc_chan->ch_flags &= ~CHF_BATCHED;
915
916 /* Associate our interrupt handler with the channel */
917 if (vmbus_channel_open(sc->sc_chan, ringsize, NULL, 0,
918 hvn_nvs_intr, sc)) {
919 DPRINTF("%s: failed to open channel\n",
920 device_xname(sc->sc_dev));
921 kmem_free(sc->sc_nvsbuf, HVN_NVS_BUFSIZE);
922 return -1;
923 }
924
925 memset(&cmd, 0, sizeof(cmd));
926 cmd.nvs_type = HVN_NVS_TYPE_INIT;
927 for (i = 0; i < __arraycount(protos); i++) {
928 cmd.nvs_ver_min = cmd.nvs_ver_max = protos[i];
929 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
930 if (hvn_nvs_cmd(sc, &cmd, sizeof(cmd), tid, 100))
931 return -1;
932
933 rsp = (struct hvn_nvs_init_resp *)&sc->sc_nvsrsp;
934 if (rsp->nvs_status == HVN_NVS_STATUS_OK) {
935 sc->sc_proto = protos[i];
936 break;
937 }
938 }
939 if (i == __arraycount(protos)) {
940 DPRINTF("%s: failed to negotiate NVSP version\n",
941 device_xname(sc->sc_dev));
942 return -1;
943 }
944
945 if (sc->sc_proto >= HVN_NVS_PROTO_VERSION_2) {
946 memset(&ccmd, 0, sizeof(ccmd));
947 ccmd.nvs_type = HVN_NVS_TYPE_NDIS_CONF;
948 ccmd.nvs_mtu = HVN_MAXMTU;
949 ccmd.nvs_caps = HVN_NVS_NDIS_CONF_VLAN;
950
951 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
952 if (hvn_nvs_cmd(sc, &ccmd, sizeof(ccmd), tid, 100))
953 return -1;
954 }
955
956 memset(&ncmd, 0, sizeof(ncmd));
957 ncmd.nvs_type = HVN_NVS_TYPE_NDIS_INIT;
958 if (sc->sc_proto <= HVN_NVS_PROTO_VERSION_4)
959 ndisver = NDIS_VERSION_6_1;
960 else
961 ndisver = NDIS_VERSION_6_30;
962 ncmd.nvs_ndis_major = (ndisver & 0xffff0000) >> 16;
963 ncmd.nvs_ndis_minor = ndisver & 0x0000ffff;
964
965 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
966 if (hvn_nvs_cmd(sc, &ncmd, sizeof(ncmd), tid, 100))
967 return -1;
968
969 sc->sc_ndisver = ndisver;
970
971 return 0;
972 }
973
974 static void
975 hvn_nvs_intr(void *arg)
976 {
977 struct hvn_softc *sc = arg;
978 struct ifnet *ifp = SC2IFP(sc);
979 struct vmbus_chanpkt_hdr *cph;
980 const struct hvn_nvs_hdr *nvs;
981 uint64_t rid;
982 uint32_t rlen;
983 int rv;
984 bool dotx = false;
985
986 for (;;) {
987 rv = vmbus_channel_recv(sc->sc_chan, sc->sc_nvsbuf,
988 HVN_NVS_BUFSIZE, &rlen, &rid, 1);
989 if (rv != 0 || rlen == 0) {
990 if (rv != EAGAIN)
991 device_printf(sc->sc_dev,
992 "failed to receive an NVSP packet\n");
993 break;
994 }
995 cph = (struct vmbus_chanpkt_hdr *)sc->sc_nvsbuf;
996 nvs = (const struct hvn_nvs_hdr *)VMBUS_CHANPKT_CONST_DATA(cph);
997
998 if (cph->cph_type == VMBUS_CHANPKT_TYPE_COMP) {
999 switch (nvs->nvs_type) {
1000 case HVN_NVS_TYPE_INIT_RESP:
1001 case HVN_NVS_TYPE_RXBUF_CONNRESP:
1002 case HVN_NVS_TYPE_CHIM_CONNRESP:
1003 case HVN_NVS_TYPE_SUBCH_RESP:
1004 /* copy the response back */
1005 memcpy(&sc->sc_nvsrsp, nvs, HVN_NVS_MSGSIZE);
1006 sc->sc_nvsdone = 1;
1007 wakeup(&sc->sc_nvsrsp);
1008 break;
1009 case HVN_NVS_TYPE_RNDIS_ACK:
1010 dotx = true;
1011 hvn_txeof(sc, cph->cph_tid);
1012 break;
1013 default:
1014 device_printf(sc->sc_dev,
1015 "unhandled NVSP packet type %u "
1016 "on completion\n", nvs->nvs_type);
1017 break;
1018 }
1019 } else if (cph->cph_type == VMBUS_CHANPKT_TYPE_RXBUF) {
1020 switch (nvs->nvs_type) {
1021 case HVN_NVS_TYPE_RNDIS:
1022 hvn_rndis_input(sc, cph->cph_tid, cph);
1023 break;
1024 default:
1025 device_printf(sc->sc_dev,
1026 "unhandled NVSP packet type %u "
1027 "on receive\n", nvs->nvs_type);
1028 break;
1029 }
1030 } else
1031 device_printf(sc->sc_dev,
1032 "unknown NVSP packet type %u\n", cph->cph_type);
1033 }
1034
1035 if (dotx)
1036 if_schedule_deferred_start(ifp);
1037 }
1038
1039 static int
1040 hvn_nvs_cmd(struct hvn_softc *sc, void *cmd, size_t cmdsize, uint64_t tid,
1041 int timo)
1042 {
1043 struct hvn_nvs_hdr *hdr = cmd;
1044 int tries = 10;
1045 int rv, s;
1046
1047 sc->sc_nvsdone = 0;
1048
1049 do {
1050 rv = vmbus_channel_send(sc->sc_chan, cmd, cmdsize,
1051 tid, VMBUS_CHANPKT_TYPE_INBAND,
1052 timo ? VMBUS_CHANPKT_FLAG_RC : 0);
1053 if (rv == EAGAIN) {
1054 if (cold)
1055 delay(1000);
1056 else
1057 tsleep(cmd, PRIBIO, "nvsout", mstohz(1));
1058 } else if (rv) {
1059 DPRINTF("%s: NVSP operation %u send error %d\n",
1060 device_xname(sc->sc_dev), hdr->nvs_type, rv);
1061 return rv;
1062 }
1063 } while (rv != 0 && --tries > 0);
1064
1065 if (tries == 0 && rv != 0) {
1066 device_printf(sc->sc_dev,
1067 "NVSP operation %u send error %d\n", hdr->nvs_type, rv);
1068 return rv;
1069 }
1070
1071 if (timo == 0)
1072 return 0;
1073
1074 do {
1075 if (cold) {
1076 delay(1000);
1077 s = splnet();
1078 hvn_nvs_intr(sc);
1079 splx(s);
1080 } else
1081 tsleep(sc->sc_nvsrsp, PRIBIO | PCATCH, "nvscmd",
1082 mstohz(1));
1083 } while (--timo > 0 && sc->sc_nvsdone != 1);
1084
1085 if (timo == 0 && sc->sc_nvsdone != 1) {
1086 device_printf(sc->sc_dev, "NVSP operation %u timed out\n",
1087 hdr->nvs_type);
1088 return ETIMEDOUT;
1089 }
1090 return 0;
1091 }
1092
1093 static int
1094 hvn_nvs_ack(struct hvn_softc *sc, uint64_t tid)
1095 {
1096 struct hvn_nvs_rndis_ack cmd;
1097 int tries = 5;
1098 int rv;
1099
1100 cmd.nvs_type = HVN_NVS_TYPE_RNDIS_ACK;
1101 cmd.nvs_status = HVN_NVS_STATUS_OK;
1102 do {
1103 rv = vmbus_channel_send(sc->sc_chan, &cmd, sizeof(cmd),
1104 tid, VMBUS_CHANPKT_TYPE_COMP, 0);
1105 if (rv == EAGAIN)
1106 delay(10);
1107 else if (rv) {
1108 DPRINTF("%s: NVSP acknowledgement error %d\n",
1109 device_xname(sc->sc_dev), rv);
1110 return rv;
1111 }
1112 } while (rv != 0 && --tries > 0);
1113 return rv;
1114 }
1115
1116 static void
1117 hvn_nvs_detach(struct hvn_softc *sc)
1118 {
1119
1120 if (vmbus_channel_close(sc->sc_chan) == 0) {
1121 kmem_free(sc->sc_nvsbuf, HVN_NVS_BUFSIZE);
1122 sc->sc_nvsbuf = NULL;
1123 }
1124 }
1125
1126 static inline struct rndis_cmd *
1127 hvn_alloc_cmd(struct hvn_softc *sc)
1128 {
1129 struct rndis_cmd *rc;
1130
1131 mutex_enter(&sc->sc_cntl_fqlck);
1132 while ((rc = TAILQ_FIRST(&sc->sc_cntl_fq)) == NULL)
1133 /* XXX use condvar(9) instead of mtsleep */
1134 mtsleep(&sc->sc_cntl_fq, PRIBIO, "nvsalloc", 1,
1135 &sc->sc_cntl_fqlck);
1136 TAILQ_REMOVE(&sc->sc_cntl_fq, rc, rc_entry);
1137 mutex_exit(&sc->sc_cntl_fqlck);
1138 return rc;
1139 }
1140
1141 static inline void
1142 hvn_submit_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1143 {
1144
1145 mutex_enter(&sc->sc_cntl_sqlck);
1146 TAILQ_INSERT_TAIL(&sc->sc_cntl_sq, rc, rc_entry);
1147 mutex_exit(&sc->sc_cntl_sqlck);
1148 }
1149
1150 static inline struct rndis_cmd *
1151 hvn_complete_cmd(struct hvn_softc *sc, uint32_t id)
1152 {
1153 struct rndis_cmd *rc;
1154
1155 mutex_enter(&sc->sc_cntl_sqlck);
1156 TAILQ_FOREACH(rc, &sc->sc_cntl_sq, rc_entry) {
1157 if (rc->rc_id == id) {
1158 TAILQ_REMOVE(&sc->sc_cntl_sq, rc, rc_entry);
1159 break;
1160 }
1161 }
1162 mutex_exit(&sc->sc_cntl_sqlck);
1163 if (rc != NULL) {
1164 mutex_enter(&sc->sc_cntl_cqlck);
1165 TAILQ_INSERT_TAIL(&sc->sc_cntl_cq, rc, rc_entry);
1166 mutex_exit(&sc->sc_cntl_cqlck);
1167 }
1168 return rc;
1169 }
1170
1171 static inline void
1172 hvn_release_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1173 {
1174
1175 mutex_enter(&sc->sc_cntl_cqlck);
1176 TAILQ_REMOVE(&sc->sc_cntl_cq, rc, rc_entry);
1177 mutex_exit(&sc->sc_cntl_cqlck);
1178 }
1179
1180 static inline int
1181 hvn_rollback_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1182 {
1183 struct rndis_cmd *rn;
1184
1185 mutex_enter(&sc->sc_cntl_sqlck);
1186 TAILQ_FOREACH(rn, &sc->sc_cntl_sq, rc_entry) {
1187 if (rn == rc) {
1188 TAILQ_REMOVE(&sc->sc_cntl_sq, rc, rc_entry);
1189 mutex_exit(&sc->sc_cntl_sqlck);
1190 return 0;
1191 }
1192 }
1193 mutex_exit(&sc->sc_cntl_sqlck);
1194 return -1;
1195 }
1196
1197 static inline void
1198 hvn_free_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1199 {
1200
1201 memset(rc->rc_req, 0, sizeof(struct rndis_packet_msg));
1202 memset(&rc->rc_cmp, 0, sizeof(rc->rc_cmp));
1203 memset(&rc->rc_msg, 0, sizeof(rc->rc_msg));
1204 mutex_enter(&sc->sc_cntl_fqlck);
1205 TAILQ_INSERT_TAIL(&sc->sc_cntl_fq, rc, rc_entry);
1206 mutex_exit(&sc->sc_cntl_fqlck);
1207 wakeup(&sc->sc_cntl_fq);
1208 }
1209
1210 static int
1211 hvn_rndis_attach(struct hvn_softc *sc)
1212 {
1213 const int dmaflags = cold ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK;
1214 struct rndis_init_req *req;
1215 struct rndis_init_comp *cmp;
1216 struct rndis_cmd *rc;
1217 int i, rv;
1218
1219 /* RNDIS control message queues */
1220 TAILQ_INIT(&sc->sc_cntl_sq);
1221 TAILQ_INIT(&sc->sc_cntl_cq);
1222 TAILQ_INIT(&sc->sc_cntl_fq);
1223 mutex_init(&sc->sc_cntl_sqlck, MUTEX_DEFAULT, IPL_NET);
1224 mutex_init(&sc->sc_cntl_cqlck, MUTEX_DEFAULT, IPL_NET);
1225 mutex_init(&sc->sc_cntl_fqlck, MUTEX_DEFAULT, IPL_NET);
1226
1227 for (i = 0; i < HVN_RNDIS_CTLREQS; i++) {
1228 rc = &sc->sc_cntl_msgs[i];
1229 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1230 dmaflags, &rc->rc_dmap)) {
1231 DPRINTF("%s: failed to create RNDIS command map\n",
1232 device_xname(sc->sc_dev));
1233 goto errout;
1234 }
1235 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE,
1236 0, &rc->rc_segs, 1, &rc->rc_nsegs, dmaflags)) {
1237 DPRINTF("%s: failed to allocate RNDIS command\n",
1238 device_xname(sc->sc_dev));
1239 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1240 goto errout;
1241 }
1242 if (bus_dmamem_map(sc->sc_dmat, &rc->rc_segs, rc->rc_nsegs,
1243 PAGE_SIZE, (void **)&rc->rc_req, dmaflags)) {
1244 DPRINTF("%s: failed to allocate RNDIS command\n",
1245 device_xname(sc->sc_dev));
1246 bus_dmamem_free(sc->sc_dmat, &rc->rc_segs,
1247 rc->rc_nsegs);
1248 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1249 goto errout;
1250 }
1251 memset(rc->rc_req, 0, PAGE_SIZE);
1252 if (bus_dmamap_load(sc->sc_dmat, rc->rc_dmap, rc->rc_req,
1253 PAGE_SIZE, NULL, dmaflags)) {
1254 DPRINTF("%s: failed to load RNDIS command map\n",
1255 device_xname(sc->sc_dev));
1256 bus_dmamem_free(sc->sc_dmat, &rc->rc_segs,
1257 rc->rc_nsegs);
1258 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1259 goto errout;
1260 }
1261 rc->rc_gpa = atop(rc->rc_dmap->dm_segs[0].ds_addr);
1262 TAILQ_INSERT_TAIL(&sc->sc_cntl_fq, rc, rc_entry);
1263 }
1264
1265 rc = hvn_alloc_cmd(sc);
1266
1267 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1268 BUS_DMASYNC_PREREAD);
1269
1270 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1271
1272 req = rc->rc_req;
1273 req->rm_type = REMOTE_NDIS_INITIALIZE_MSG;
1274 req->rm_len = sizeof(*req);
1275 req->rm_rid = rc->rc_id;
1276 req->rm_ver_major = RNDIS_VERSION_MAJOR;
1277 req->rm_ver_minor = RNDIS_VERSION_MINOR;
1278 req->rm_max_xfersz = HVN_RNDIS_XFER_SIZE;
1279
1280 rc->rc_cmplen = sizeof(*cmp);
1281
1282 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1283 BUS_DMASYNC_PREWRITE);
1284
1285 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1286 DPRINTF("%s: INITIALIZE_MSG failed, error %d\n",
1287 device_xname(sc->sc_dev), rv);
1288 hvn_free_cmd(sc, rc);
1289 goto errout;
1290 }
1291 cmp = (struct rndis_init_comp *)&rc->rc_cmp;
1292 if (cmp->rm_status != RNDIS_STATUS_SUCCESS) {
1293 DPRINTF("%s: failed to init RNDIS, error %#x\n",
1294 device_xname(sc->sc_dev), cmp->rm_status);
1295 hvn_free_cmd(sc, rc);
1296 goto errout;
1297 }
1298
1299 hvn_free_cmd(sc, rc);
1300
1301 /* Initialize RNDIS Data command */
1302 memset(&sc->sc_data_msg, 0, sizeof(sc->sc_data_msg));
1303 sc->sc_data_msg.nvs_type = HVN_NVS_TYPE_RNDIS;
1304 sc->sc_data_msg.nvs_rndis_mtype = HVN_NVS_RNDIS_MTYPE_DATA;
1305 sc->sc_data_msg.nvs_chim_idx = HVN_NVS_CHIM_IDX_INVALID;
1306
1307 return 0;
1308
1309 errout:
1310 for (i = 0; i < HVN_RNDIS_CTLREQS; i++) {
1311 rc = &sc->sc_cntl_msgs[i];
1312 if (rc->rc_req == NULL)
1313 continue;
1314 TAILQ_REMOVE(&sc->sc_cntl_fq, rc, rc_entry);
1315 bus_dmamem_free(sc->sc_dmat, &rc->rc_segs, rc->rc_nsegs);
1316 rc->rc_req = NULL;
1317 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1318 }
1319 return -1;
1320 }
1321
1322 static int
1323 hvn_set_capabilities(struct hvn_softc *sc)
1324 {
1325 struct ndis_offload_params params;
1326 size_t len = sizeof(params);
1327
1328 memset(¶ms, 0, sizeof(params));
1329
1330 params.ndis_hdr.ndis_type = NDIS_OBJTYPE_DEFAULT;
1331 if (sc->sc_ndisver < NDIS_VERSION_6_30) {
1332 params.ndis_hdr.ndis_rev = NDIS_OFFLOAD_PARAMS_REV_2;
1333 len = params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE_6_1;
1334 } else {
1335 params.ndis_hdr.ndis_rev = NDIS_OFFLOAD_PARAMS_REV_3;
1336 len = params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE;
1337 }
1338
1339 params.ndis_ip4csum = NDIS_OFFLOAD_PARAM_TXRX;
1340 params.ndis_tcp4csum = NDIS_OFFLOAD_PARAM_TXRX;
1341 params.ndis_tcp6csum = NDIS_OFFLOAD_PARAM_TXRX;
1342 if (sc->sc_ndisver >= NDIS_VERSION_6_30) {
1343 params.ndis_udp4csum = NDIS_OFFLOAD_PARAM_TXRX;
1344 params.ndis_udp6csum = NDIS_OFFLOAD_PARAM_TXRX;
1345 }
1346
1347 return hvn_rndis_set(sc, OID_TCP_OFFLOAD_PARAMETERS, ¶ms, len);
1348 }
1349
1350 static int
1351 hvn_rndis_cmd(struct hvn_softc *sc, struct rndis_cmd *rc, int timo)
1352 {
1353 struct hvn_nvs_rndis *msg = &rc->rc_msg;
1354 struct rndis_msghdr *hdr = rc->rc_req;
1355 struct vmbus_gpa sgl[1];
1356 int tries = 10;
1357 int rv, s;
1358
1359 KASSERT(timo > 0);
1360
1361 msg->nvs_type = HVN_NVS_TYPE_RNDIS;
1362 msg->nvs_rndis_mtype = HVN_NVS_RNDIS_MTYPE_CTRL;
1363 msg->nvs_chim_idx = HVN_NVS_CHIM_IDX_INVALID;
1364
1365 sgl[0].gpa_page = rc->rc_gpa;
1366 sgl[0].gpa_len = hdr->rm_len;
1367 sgl[0].gpa_ofs = 0;
1368
1369 rc->rc_done = 0;
1370
1371 hvn_submit_cmd(sc, rc);
1372
1373 do {
1374 rv = vmbus_channel_send_sgl(sc->sc_chan, sgl, 1, &rc->rc_msg,
1375 sizeof(*msg), rc->rc_id);
1376 if (rv == EAGAIN) {
1377 if (cold)
1378 delay(1000);
1379 else
1380 tsleep(rc, PRIBIO, "rndisout", mstohz(1));
1381 } else if (rv) {
1382 DPRINTF("%s: RNDIS operation %u send error %d\n",
1383 device_xname(sc->sc_dev), hdr->rm_type, rv);
1384 hvn_rollback_cmd(sc, rc);
1385 return rv;
1386 }
1387 } while (rv != 0 && --tries > 0);
1388
1389 if (tries == 0 && rv != 0) {
1390 device_printf(sc->sc_dev,
1391 "RNDIS operation %u send error %d\n", hdr->rm_type, rv);
1392 return rv;
1393 }
1394
1395 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1396 BUS_DMASYNC_POSTWRITE);
1397
1398 do {
1399 if (cold) {
1400 delay(1000);
1401 s = splnet();
1402 hvn_nvs_intr(sc);
1403 splx(s);
1404 } else
1405 tsleep(rc, PRIBIO | PCATCH, "rndiscmd", mstohz(1));
1406 } while (--timo > 0 && rc->rc_done != 1);
1407
1408 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1409 BUS_DMASYNC_POSTREAD);
1410
1411 if (rc->rc_done != 1) {
1412 rv = timo == 0 ? ETIMEDOUT : EINTR;
1413 if (hvn_rollback_cmd(sc, rc)) {
1414 hvn_release_cmd(sc, rc);
1415 rv = 0;
1416 } else if (rv == ETIMEDOUT) {
1417 device_printf(sc->sc_dev,
1418 "RNDIS operation %u timed out\n", hdr->rm_type);
1419 }
1420 return rv;
1421 }
1422
1423 hvn_release_cmd(sc, rc);
1424 return 0;
1425 }
1426
1427 static void
1428 hvn_rndis_input(struct hvn_softc *sc, uint64_t tid, void *arg)
1429 {
1430 struct vmbus_chanpkt_prplist *cp = arg;
1431 uint32_t off, len, type;
1432 int i;
1433
1434 if (sc->sc_rx_ring == NULL) {
1435 DPRINTF("%s: invalid rx ring\n", device_xname(sc->sc_dev));
1436 return;
1437 }
1438
1439 for (i = 0; i < cp->cp_range_cnt; i++) {
1440 off = cp->cp_range[i].gpa_ofs;
1441 len = cp->cp_range[i].gpa_len;
1442
1443 KASSERT(off + len <= sc->sc_rx_size);
1444 KASSERT(len >= RNDIS_HEADER_OFFSET + 4);
1445
1446 memcpy(&type, sc->sc_rx_ring + off, sizeof(type));
1447 switch (type) {
1448 /* data message */
1449 case REMOTE_NDIS_PACKET_MSG:
1450 hvn_rxeof(sc, sc->sc_rx_ring + off, len);
1451 break;
1452 /* completion messages */
1453 case REMOTE_NDIS_INITIALIZE_CMPLT:
1454 case REMOTE_NDIS_QUERY_CMPLT:
1455 case REMOTE_NDIS_SET_CMPLT:
1456 case REMOTE_NDIS_RESET_CMPLT:
1457 case REMOTE_NDIS_KEEPALIVE_CMPLT:
1458 hvn_rndis_complete(sc, sc->sc_rx_ring + off, len);
1459 break;
1460 /* notification message */
1461 case REMOTE_NDIS_INDICATE_STATUS_MSG:
1462 hvn_rndis_status(sc, sc->sc_rx_ring + off, len);
1463 break;
1464 default:
1465 device_printf(sc->sc_dev,
1466 "unhandled RNDIS message type %u\n", type);
1467 break;
1468 }
1469 }
1470
1471 hvn_nvs_ack(sc, tid);
1472 }
1473
1474 static inline struct mbuf *
1475 hvn_devget(struct hvn_softc *sc, void *buf, uint32_t len)
1476 {
1477 struct ifnet *ifp = SC2IFP(sc);
1478 struct mbuf *m;
1479 size_t size = len + ETHER_ALIGN;
1480
1481 MGETHDR(m, M_NOWAIT, MT_DATA);
1482 if (m == NULL)
1483 return NULL;
1484
1485 if (size > MHLEN) {
1486 if (size <= MCLBYTES)
1487 MCLGET(m, M_NOWAIT);
1488 else
1489 MEXTMALLOC(m, size, M_NOWAIT);
1490 if ((m->m_flags & M_EXT) == 0) {
1491 m_freem(m);
1492 return NULL;
1493 }
1494 }
1495
1496 m->m_len = m->m_pkthdr.len = size;
1497 m_adj(m, ETHER_ALIGN);
1498 m_copyback(m, 0, len, buf);
1499 m_set_rcvif(m, ifp);
1500 return m;
1501 }
1502
1503 static void
1504 hvn_rxeof(struct hvn_softc *sc, uint8_t *buf, uint32_t len)
1505 {
1506 struct ifnet *ifp = SC2IFP(sc);
1507 struct rndis_packet_msg *pkt;
1508 struct rndis_pktinfo *pi;
1509 uint32_t csum, vlan;
1510 struct mbuf *m;
1511
1512 if (!(ifp->if_flags & IFF_RUNNING))
1513 return;
1514
1515 if (len < sizeof(*pkt)) {
1516 device_printf(sc->sc_dev, "data packet too short: %u\n",
1517 len);
1518 return;
1519 }
1520
1521 pkt = (struct rndis_packet_msg *)buf;
1522 if (pkt->rm_dataoffset + pkt->rm_datalen > len) {
1523 device_printf(sc->sc_dev,
1524 "data packet out of bounds: %u@%u\n", pkt->rm_dataoffset,
1525 pkt->rm_datalen);
1526 return;
1527 }
1528
1529 if ((m = hvn_devget(sc, buf + RNDIS_HEADER_OFFSET + pkt->rm_dataoffset,
1530 pkt->rm_datalen)) == NULL) {
1531 ifp->if_ierrors++;
1532 return;
1533 }
1534
1535 if (pkt->rm_pktinfooffset + pkt->rm_pktinfolen > len) {
1536 device_printf(sc->sc_dev,
1537 "pktinfo is out of bounds: %u@%u vs %u\n",
1538 pkt->rm_pktinfolen, pkt->rm_pktinfooffset, len);
1539 goto done;
1540 }
1541
1542 pi = (struct rndis_pktinfo *)(buf + RNDIS_HEADER_OFFSET +
1543 pkt->rm_pktinfooffset);
1544 while (pkt->rm_pktinfolen > 0) {
1545 if (pi->rm_size > pkt->rm_pktinfolen) {
1546 device_printf(sc->sc_dev,
1547 "invalid pktinfo size: %u/%u\n", pi->rm_size,
1548 pkt->rm_pktinfolen);
1549 break;
1550 }
1551
1552 switch (pi->rm_type) {
1553 case NDIS_PKTINFO_TYPE_CSUM:
1554 memcpy(&csum, pi->rm_data, sizeof(csum));
1555 if (csum & NDIS_RXCSUM_INFO_IPCS_OK)
1556 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1557 if (csum & NDIS_RXCSUM_INFO_TCPCS_OK)
1558 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1559 if (csum & NDIS_RXCSUM_INFO_UDPCS_OK)
1560 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1561 break;
1562 case NDIS_PKTINFO_TYPE_VLAN:
1563 memcpy(&vlan, pi->rm_data, sizeof(vlan));
1564 if (vlan != 0xffffffff) {
1565 m->m_pkthdr.ether_vtag =
1566 NDIS_VLAN_INFO_ID(vlan) |
1567 (NDIS_VLAN_INFO_PRI(vlan) << EVL_PRIO_BITS);
1568 m->m_flags |= M_VLANTAG;
1569 }
1570 break;
1571 default:
1572 DPRINTF("%s: unhandled pktinfo type %u\n",
1573 device_xname(sc->sc_dev), pi->rm_type);
1574 break;
1575 }
1576
1577 pkt->rm_pktinfolen -= pi->rm_size;
1578 pi = (struct rndis_pktinfo *)((char *)pi + pi->rm_size);
1579 }
1580
1581 done:
1582 if_percpuq_enqueue(sc->sc_ipq, m);
1583 }
1584
1585 static void
1586 hvn_rndis_complete(struct hvn_softc *sc, uint8_t *buf, uint32_t len)
1587 {
1588 struct rndis_cmd *rc;
1589 uint32_t id;
1590
1591 memcpy(&id, buf + RNDIS_HEADER_OFFSET, sizeof(id));
1592 if ((rc = hvn_complete_cmd(sc, id)) != NULL) {
1593 if (len < rc->rc_cmplen)
1594 device_printf(sc->sc_dev,
1595 "RNDIS response %u too short: %u\n", id, len);
1596 else
1597 memcpy(&rc->rc_cmp, buf, rc->rc_cmplen);
1598 if (len > rc->rc_cmplen &&
1599 len - rc->rc_cmplen > HVN_RNDIS_BUFSIZE)
1600 device_printf(sc->sc_dev,
1601 "RNDIS response %u too large: %u\n", id, len);
1602 else if (len > rc->rc_cmplen)
1603 memcpy(&rc->rc_cmpbuf, buf + rc->rc_cmplen,
1604 len - rc->rc_cmplen);
1605 rc->rc_done = 1;
1606 wakeup(rc);
1607 } else {
1608 DPRINTF("%s: failed to complete RNDIS request id %u\n",
1609 device_xname(sc->sc_dev), id);
1610 }
1611 }
1612
1613 static int
1614 hvn_rndis_output(struct hvn_softc *sc, struct hvn_tx_desc *txd)
1615 {
1616 uint64_t rid = (uint64_t)txd->txd_id << 32;
1617 int rv;
1618
1619 rv = vmbus_channel_send_sgl(sc->sc_chan, txd->txd_sgl, txd->txd_nsge,
1620 &sc->sc_data_msg, sizeof(sc->sc_data_msg), rid);
1621 if (rv) {
1622 DPRINTF("%s: RNDIS data send error %d\n",
1623 device_xname(sc->sc_dev), rv);
1624 return rv;
1625 }
1626 return 0;
1627 }
1628
1629 static void
1630 hvn_rndis_status(struct hvn_softc *sc, uint8_t *buf, uint32_t len)
1631 {
1632 struct ifnet *ifp = SC2IFP(sc);
1633 uint32_t status;
1634 int link_state = sc->sc_link_state;
1635
1636 memcpy(&status, buf + RNDIS_HEADER_OFFSET, sizeof(status));
1637 switch (status) {
1638 case RNDIS_STATUS_MEDIA_CONNECT:
1639 sc->sc_link_state = LINK_STATE_UP;
1640 break;
1641 case RNDIS_STATUS_MEDIA_DISCONNECT:
1642 sc->sc_link_state = LINK_STATE_DOWN;
1643 break;
1644 /* Ignore these */
1645 case RNDIS_STATUS_OFFLOAD_CURRENT_CONFIG:
1646 return;
1647 default:
1648 DPRINTF("%s: unhandled status %#x\n", device_xname(sc->sc_dev),
1649 status);
1650 return;
1651 }
1652 if (link_state != sc->sc_link_state)
1653 if_link_state_change(ifp, sc->sc_link_state);
1654 }
1655
1656 static int
1657 hvn_rndis_query(struct hvn_softc *sc, uint32_t oid, void *res, size_t *length)
1658 {
1659 struct rndis_cmd *rc;
1660 struct rndis_query_req *req;
1661 struct rndis_query_comp *cmp;
1662 size_t olength = *length;
1663 int rv;
1664
1665 rc = hvn_alloc_cmd(sc);
1666
1667 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1668 BUS_DMASYNC_PREREAD);
1669
1670 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1671
1672 req = rc->rc_req;
1673 req->rm_type = REMOTE_NDIS_QUERY_MSG;
1674 req->rm_len = sizeof(*req);
1675 req->rm_rid = rc->rc_id;
1676 req->rm_oid = oid;
1677 req->rm_infobufoffset = sizeof(*req) - RNDIS_HEADER_OFFSET;
1678
1679 rc->rc_cmplen = sizeof(*cmp);
1680
1681 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1682 BUS_DMASYNC_PREWRITE);
1683
1684 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1685 DPRINTF("%s: QUERY_MSG failed, error %d\n",
1686 device_xname(sc->sc_dev), rv);
1687 hvn_free_cmd(sc, rc);
1688 return rv;
1689 }
1690
1691 cmp = (struct rndis_query_comp *)&rc->rc_cmp;
1692 switch (cmp->rm_status) {
1693 case RNDIS_STATUS_SUCCESS:
1694 if (cmp->rm_infobuflen > olength) {
1695 rv = EINVAL;
1696 break;
1697 }
1698 memcpy(res, rc->rc_cmpbuf, cmp->rm_infobuflen);
1699 *length = cmp->rm_infobuflen;
1700 break;
1701 default:
1702 *length = 0;
1703 rv = EIO;
1704 break;
1705 }
1706
1707 hvn_free_cmd(sc, rc);
1708 return rv;
1709 }
1710
1711 static int
1712 hvn_rndis_set(struct hvn_softc *sc, uint32_t oid, void *data, size_t length)
1713 {
1714 struct rndis_cmd *rc;
1715 struct rndis_set_req *req;
1716 struct rndis_set_comp *cmp;
1717 int rv;
1718
1719 rc = hvn_alloc_cmd(sc);
1720
1721 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1722 BUS_DMASYNC_PREREAD);
1723
1724 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1725
1726 req = rc->rc_req;
1727 req->rm_type = REMOTE_NDIS_SET_MSG;
1728 req->rm_len = sizeof(*req) + length;
1729 req->rm_rid = rc->rc_id;
1730 req->rm_oid = oid;
1731 req->rm_infobufoffset = sizeof(*req) - RNDIS_HEADER_OFFSET;
1732
1733 rc->rc_cmplen = sizeof(*cmp);
1734
1735 if (length > 0) {
1736 KASSERT(sizeof(*req) + length < PAGE_SIZE);
1737 req->rm_infobuflen = length;
1738 memcpy(req + 1, data, length);
1739 }
1740
1741 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1742 BUS_DMASYNC_PREWRITE);
1743
1744 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1745 DPRINTF("%s: SET_MSG failed, error %d\n",
1746 device_xname(sc->sc_dev), rv);
1747 hvn_free_cmd(sc, rc);
1748 return rv;
1749 }
1750
1751 cmp = (struct rndis_set_comp *)&rc->rc_cmp;
1752 if (cmp->rm_status != RNDIS_STATUS_SUCCESS)
1753 rv = EIO;
1754
1755 hvn_free_cmd(sc, rc);
1756 return rv;
1757 }
1758
1759 static int
1760 hvn_rndis_open(struct hvn_softc *sc)
1761 {
1762 uint32_t filter;
1763 int rv;
1764
1765 if (sc->sc_promisc)
1766 filter = RNDIS_PACKET_TYPE_PROMISCUOUS;
1767 else
1768 filter = RNDIS_PACKET_TYPE_BROADCAST |
1769 RNDIS_PACKET_TYPE_ALL_MULTICAST |
1770 RNDIS_PACKET_TYPE_DIRECTED;
1771
1772 rv = hvn_rndis_set(sc, OID_GEN_CURRENT_PACKET_FILTER,
1773 &filter, sizeof(filter));
1774 if (rv) {
1775 DPRINTF("%s: failed to set RNDIS filter to %#x\n",
1776 device_xname(sc->sc_dev), filter);
1777 }
1778 return rv;
1779 }
1780
1781 static int
1782 hvn_rndis_close(struct hvn_softc *sc)
1783 {
1784 uint32_t filter = 0;
1785 int rv;
1786
1787 rv = hvn_rndis_set(sc, OID_GEN_CURRENT_PACKET_FILTER,
1788 &filter, sizeof(filter));
1789 if (rv) {
1790 DPRINTF("%s: failed to clear RNDIS filter\n",
1791 device_xname(sc->sc_dev));
1792 }
1793 return rv;
1794 }
1795
1796 static void
1797 hvn_rndis_detach(struct hvn_softc *sc)
1798 {
1799 struct rndis_cmd *rc;
1800 struct rndis_halt_req *req;
1801 int rv;
1802
1803 rc = hvn_alloc_cmd(sc);
1804
1805 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1806 BUS_DMASYNC_PREREAD);
1807
1808 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1809
1810 req = rc->rc_req;
1811 req->rm_type = REMOTE_NDIS_HALT_MSG;
1812 req->rm_len = sizeof(*req);
1813 req->rm_rid = rc->rc_id;
1814
1815 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1816 BUS_DMASYNC_PREWRITE);
1817
1818 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1819 DPRINTF("%s: HALT_MSG failed, error %d\n",
1820 device_xname(sc->sc_dev), rv);
1821 }
1822 hvn_free_cmd(sc, rc);
1823 }
1824