if_hvn.c revision 1.19 1 /* $NetBSD: if_hvn.c,v 1.19 2020/12/11 08:13:08 nonaka Exp $ */
2 /* $OpenBSD: if_hvn.c,v 1.39 2018/03/11 14:31:34 mikeb Exp $ */
3
4 /*-
5 * Copyright (c) 2009-2012,2016 Microsoft Corp.
6 * Copyright (c) 2010-2012 Citrix Inc.
7 * Copyright (c) 2012 NetApp Inc.
8 * Copyright (c) 2016 Mike Belopuhov <mike (at) esdenera.com>
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice unmodified, this list of conditions, and the following
16 * disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * The OpenBSD port was done under funding by Esdenera Networks GmbH.
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: if_hvn.c,v 1.19 2020/12/11 08:13:08 nonaka Exp $");
39
40 #ifdef _KERNEL_OPT
41 #include "opt_inet.h"
42 #include "opt_inet6.h"
43 #include "opt_net_mpsafe.h"
44 #endif
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/device.h>
50 #include <sys/atomic.h>
51 #include <sys/bus.h>
52 #include <sys/intr.h>
53 #include <sys/kmem.h>
54
55 #include <net/if.h>
56 #include <net/if_ether.h>
57 #include <net/if_media.h>
58
59 #include <net/bpf.h>
60
61 #include <dev/ic/ndisreg.h>
62 #include <dev/ic/rndisreg.h>
63
64 #include <dev/hyperv/vmbusvar.h>
65 #include <dev/hyperv/if_hvnreg.h>
66
67 #ifndef EVL_PRIO_BITS
68 #define EVL_PRIO_BITS 13
69 #endif
70 #ifndef EVL_CFI_BITS
71 #define EVL_CFI_BITS 12
72 #endif
73
74 #define HVN_NVS_MSGSIZE 32
75 #define HVN_NVS_BUFSIZE PAGE_SIZE
76
77 /*
78 * RNDIS control interface
79 */
80 #define HVN_RNDIS_CTLREQS 4
81 #define HVN_RNDIS_BUFSIZE 512
82
83 struct rndis_cmd {
84 uint32_t rc_id;
85 struct hvn_nvs_rndis rc_msg;
86 void *rc_req;
87 bus_dmamap_t rc_dmap;
88 bus_dma_segment_t rc_segs;
89 int rc_nsegs;
90 uint64_t rc_gpa;
91 struct rndis_packet_msg rc_cmp;
92 uint32_t rc_cmplen;
93 uint8_t rc_cmpbuf[HVN_RNDIS_BUFSIZE];
94 int rc_done;
95 TAILQ_ENTRY(rndis_cmd) rc_entry;
96 };
97 TAILQ_HEAD(rndis_queue, rndis_cmd);
98
99 #define HVN_MAXMTU (9 * 1024)
100
101 #define HVN_RNDIS_XFER_SIZE 2048
102
103 /*
104 * Tx ring
105 */
106 #define HVN_TX_DESC 256
107 #define HVN_TX_FRAGS 15 /* 31 is the max */
108 #define HVN_TX_FRAG_SIZE PAGE_SIZE
109 #define HVN_TX_PKT_SIZE 16384
110
111 #define HVN_RNDIS_PKT_LEN \
112 (sizeof(struct rndis_packet_msg) + \
113 sizeof(struct rndis_pktinfo) + NDIS_VLAN_INFO_SIZE + \
114 sizeof(struct rndis_pktinfo) + NDIS_TXCSUM_INFO_SIZE)
115
116 struct hvn_tx_desc {
117 uint32_t txd_id;
118 int txd_ready;
119 struct vmbus_gpa txd_sgl[HVN_TX_FRAGS + 1];
120 int txd_nsge;
121 struct mbuf *txd_buf;
122 bus_dmamap_t txd_dmap;
123 struct vmbus_gpa txd_gpa;
124 struct rndis_packet_msg *txd_req;
125 };
126
127 struct hvn_softc {
128 device_t sc_dev;
129
130 struct vmbus_softc *sc_vmbus;
131 struct vmbus_channel *sc_chan;
132 bus_dma_tag_t sc_dmat;
133
134 struct ethercom sc_ec;
135 struct ifmedia sc_media;
136 kmutex_t sc_media_lock; /* XXX */
137 struct if_percpuq *sc_ipq;
138 int sc_link_state;
139 int sc_promisc;
140
141 uint32_t sc_flags;
142 #define HVN_SCF_ATTACHED __BIT(0)
143
144 /* NVS protocol */
145 int sc_proto;
146 uint32_t sc_nvstid;
147 uint8_t sc_nvsrsp[HVN_NVS_MSGSIZE];
148 uint8_t *sc_nvsbuf;
149 int sc_nvsdone;
150
151 /* RNDIS protocol */
152 int sc_ndisver;
153 uint32_t sc_rndisrid;
154 struct rndis_queue sc_cntl_sq; /* submission queue */
155 kmutex_t sc_cntl_sqlck;
156 struct rndis_queue sc_cntl_cq; /* completion queue */
157 kmutex_t sc_cntl_cqlck;
158 struct rndis_queue sc_cntl_fq; /* free queue */
159 kmutex_t sc_cntl_fqlck;
160 struct rndis_cmd sc_cntl_msgs[HVN_RNDIS_CTLREQS];
161 struct hvn_nvs_rndis sc_data_msg;
162
163 /* Rx ring */
164 uint8_t *sc_rx_ring;
165 int sc_rx_size;
166 uint32_t sc_rx_hndl;
167 struct hyperv_dma sc_rx_dma;
168
169 /* Tx ring */
170 uint32_t sc_tx_next;
171 uint32_t sc_tx_avail;
172 struct hvn_tx_desc sc_tx_desc[HVN_TX_DESC];
173 bus_dmamap_t sc_tx_rmap;
174 uint8_t *sc_tx_msgs;
175 bus_dma_segment_t sc_tx_mseg;
176 };
177
178 #define SC2IFP(_sc_) (&(_sc_)->sc_ec.ec_if)
179 #define IFP2SC(_ifp_) ((_ifp_)->if_softc)
180
181
182 static int hvn_match(device_t, cfdata_t, void *);
183 static void hvn_attach(device_t, device_t, void *);
184 static int hvn_detach(device_t, int);
185
186 CFATTACH_DECL_NEW(hvn, sizeof(struct hvn_softc),
187 hvn_match, hvn_attach, hvn_detach, NULL);
188
189 static int hvn_ioctl(struct ifnet *, u_long, void *);
190 static int hvn_media_change(struct ifnet *);
191 static void hvn_media_status(struct ifnet *, struct ifmediareq *);
192 static int hvn_iff(struct hvn_softc *);
193 static int hvn_init(struct ifnet *);
194 static void hvn_stop(struct ifnet *, int);
195 static void hvn_start(struct ifnet *);
196 static int hvn_encap(struct hvn_softc *, struct mbuf *,
197 struct hvn_tx_desc **);
198 static void hvn_decap(struct hvn_softc *, struct hvn_tx_desc *);
199 static void hvn_txeof(struct hvn_softc *, uint64_t);
200 static int hvn_rx_ring_create(struct hvn_softc *);
201 static int hvn_rx_ring_destroy(struct hvn_softc *);
202 static int hvn_tx_ring_create(struct hvn_softc *);
203 static void hvn_tx_ring_destroy(struct hvn_softc *);
204 static int hvn_set_capabilities(struct hvn_softc *);
205 static int hvn_get_lladdr(struct hvn_softc *, uint8_t *);
206 static void hvn_get_link_status(struct hvn_softc *);
207
208 /* NSVP */
209 static int hvn_nvs_attach(struct hvn_softc *);
210 static void hvn_nvs_intr(void *);
211 static int hvn_nvs_cmd(struct hvn_softc *, void *, size_t, uint64_t, int);
212 static int hvn_nvs_ack(struct hvn_softc *, uint64_t);
213 static void hvn_nvs_detach(struct hvn_softc *);
214
215 /* RNDIS */
216 static int hvn_rndis_attach(struct hvn_softc *);
217 static int hvn_rndis_cmd(struct hvn_softc *, struct rndis_cmd *, int);
218 static void hvn_rndis_input(struct hvn_softc *, uint64_t, void *);
219 static void hvn_rxeof(struct hvn_softc *, uint8_t *, uint32_t);
220 static void hvn_rndis_complete(struct hvn_softc *, uint8_t *, uint32_t);
221 static int hvn_rndis_output(struct hvn_softc *, struct hvn_tx_desc *);
222 static void hvn_rndis_status(struct hvn_softc *, uint8_t *, uint32_t);
223 static int hvn_rndis_query(struct hvn_softc *, uint32_t, void *, size_t *);
224 static int hvn_rndis_set(struct hvn_softc *, uint32_t, void *, size_t);
225 static int hvn_rndis_open(struct hvn_softc *);
226 static int hvn_rndis_close(struct hvn_softc *);
227 static void hvn_rndis_detach(struct hvn_softc *);
228
229 static int
230 hvn_match(device_t parent, cfdata_t match, void *aux)
231 {
232 struct vmbus_attach_args *aa = aux;
233
234 if (memcmp(aa->aa_type, &hyperv_guid_network, sizeof(*aa->aa_type)))
235 return 0;
236 return 1;
237 }
238
239 static void
240 hvn_attach(device_t parent, device_t self, void *aux)
241 {
242 struct hvn_softc *sc = device_private(self);
243 struct vmbus_attach_args *aa = aux;
244 struct ifnet *ifp = SC2IFP(sc);
245 uint8_t enaddr[ETHER_ADDR_LEN];
246 int error;
247
248 sc->sc_dev = self;
249 sc->sc_vmbus = (struct vmbus_softc *)device_private(parent);
250 sc->sc_chan = aa->aa_chan;
251 sc->sc_dmat = sc->sc_vmbus->sc_dmat;
252
253 aprint_naive("\n");
254 aprint_normal(": Hyper-V NetVSC\n");
255
256 if (hvn_nvs_attach(sc)) {
257 aprint_error_dev(self, "failed to init NVSP\n");
258 return;
259 }
260
261 if (hvn_rx_ring_create(sc)) {
262 aprint_error_dev(self, "failed to create Rx ring\n");
263 goto fail1;
264 }
265
266 if (hvn_tx_ring_create(sc)) {
267 aprint_error_dev(self, "failed to create Tx ring\n");
268 goto fail2;
269 }
270
271 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
272 ifp->if_softc = sc;
273 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
274 ifp->if_ioctl = hvn_ioctl;
275 ifp->if_start = hvn_start;
276 ifp->if_init = hvn_init;
277 ifp->if_stop = hvn_stop;
278 ifp->if_capabilities = IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx;
279 ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx;
280 ifp->if_capabilities |= IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_TCPv6_Rx;
281 if (sc->sc_ndisver > NDIS_VERSION_6_30) {
282 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Tx;
283 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Rx;
284 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Tx;
285 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Rx;
286 }
287 if (sc->sc_proto >= HVN_NVS_PROTO_VERSION_2) {
288 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
289 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
290 sc->sc_ec.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
291 }
292
293 IFQ_SET_MAXLEN(&ifp->if_snd, HVN_TX_DESC - 1);
294 IFQ_SET_READY(&ifp->if_snd);
295
296 /* Initialize ifmedia structures. */
297 sc->sc_ec.ec_ifmedia = &sc->sc_media;
298 /* XXX media locking needs revisiting */
299 mutex_init(&sc->sc_media_lock, MUTEX_DEFAULT, IPL_SOFTNET);
300 ifmedia_init_with_lock(&sc->sc_media, IFM_IMASK,
301 hvn_media_change, hvn_media_status, &sc->sc_media_lock);
302 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
303 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_MANUAL);
304
305 error = if_initialize(ifp);
306 if (error) {
307 aprint_error_dev(self, "if_initialize failed(%d)\n", error);
308 goto fail3;
309 }
310 sc->sc_ipq = if_percpuq_create(ifp);
311 if_deferred_start_init(ifp, NULL);
312
313 if (hvn_rndis_attach(sc)) {
314 aprint_error_dev(self, "failed to init RNDIS\n");
315 goto fail3;
316 }
317
318 aprint_normal_dev(self, "NVS %d.%d NDIS %d.%d\n",
319 sc->sc_proto >> 16, sc->sc_proto & 0xffff,
320 sc->sc_ndisver >> 16 , sc->sc_ndisver & 0xffff);
321
322 if (hvn_set_capabilities(sc)) {
323 aprint_error_dev(self, "failed to setup offloading\n");
324 goto fail4;
325 }
326
327 if (hvn_get_lladdr(sc, enaddr)) {
328 aprint_error_dev(self,
329 "failed to obtain an ethernet address\n");
330 goto fail4;
331 }
332 aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(enaddr));
333
334 ether_ifattach(ifp, enaddr);
335 if_register(ifp);
336
337 if (pmf_device_register(self, NULL, NULL))
338 pmf_class_network_register(self, ifp);
339 else
340 aprint_error_dev(self, "couldn't establish power handler\n");
341
342 SET(sc->sc_flags, HVN_SCF_ATTACHED);
343 return;
344
345 fail4: hvn_rndis_detach(sc);
346 if_percpuq_destroy(sc->sc_ipq);
347 fail3: ifmedia_fini(&sc->sc_media);
348 mutex_destroy(&sc->sc_media_lock);
349 hvn_tx_ring_destroy(sc);
350 fail2: hvn_rx_ring_destroy(sc);
351 fail1: hvn_nvs_detach(sc);
352 }
353
354 static int
355 hvn_detach(device_t self, int flags)
356 {
357 struct hvn_softc *sc = device_private(self);
358 struct ifnet *ifp = SC2IFP(sc);
359
360 if (!ISSET(sc->sc_flags, HVN_SCF_ATTACHED))
361 return 0;
362
363 if (ifp->if_flags & IFF_RUNNING)
364 hvn_stop(ifp, 1);
365
366 pmf_device_deregister(self);
367
368 ether_ifdetach(ifp);
369 if_detach(ifp);
370 ifmedia_fini(&sc->sc_media);
371 mutex_destroy(&sc->sc_media_lock);
372 if_percpuq_destroy(sc->sc_ipq);
373
374 hvn_rndis_detach(sc);
375 hvn_rx_ring_destroy(sc);
376 hvn_tx_ring_destroy(sc);
377 hvn_nvs_detach(sc);
378
379 return 0;
380 }
381
382 static int
383 hvn_ioctl(struct ifnet *ifp, u_long command, void * data)
384 {
385 struct hvn_softc *sc = IFP2SC(ifp);
386 int s, error = 0;
387
388 s = splnet();
389
390 error = ether_ioctl(ifp, command, data);
391 if (error == ENETRESET) {
392 if (ifp->if_flags & IFF_RUNNING)
393 hvn_iff(sc);
394 error = 0;
395 }
396
397 splx(s);
398
399 return error;
400 }
401
402 static int
403 hvn_media_change(struct ifnet *ifp)
404 {
405
406 return 0;
407 }
408
409 static void
410 hvn_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
411 {
412 struct hvn_softc *sc = IFP2SC(ifp);
413 int link_state;
414
415 link_state = sc->sc_link_state;
416 hvn_get_link_status(sc);
417 if (link_state != sc->sc_link_state)
418 if_link_state_change(ifp, sc->sc_link_state);
419
420 ifmr->ifm_status = IFM_AVALID;
421 ifmr->ifm_active = IFM_ETHER | IFM_MANUAL;
422 if (sc->sc_link_state == LINK_STATE_UP)
423 ifmr->ifm_status |= IFM_ACTIVE;
424 }
425
426 static int
427 hvn_iff(struct hvn_softc *sc)
428 {
429
430 /* XXX */
431 sc->sc_promisc = 0;
432
433 return 0;
434 }
435
436 static int
437 hvn_init(struct ifnet *ifp)
438 {
439 struct hvn_softc *sc = IFP2SC(ifp);
440 int error;
441
442 hvn_stop(ifp, 0);
443
444 error = hvn_iff(sc);
445 if (error)
446 return error;
447
448 error = hvn_rndis_open(sc);
449 if (error == 0) {
450 ifp->if_flags |= IFF_RUNNING;
451 ifp->if_flags &= ~IFF_OACTIVE;
452 }
453 return error;
454 }
455
456 static void
457 hvn_stop(struct ifnet *ifp, int disable)
458 {
459 struct hvn_softc *sc = IFP2SC(ifp);
460
461 hvn_rndis_close(sc);
462
463 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
464 }
465
466 static void
467 hvn_start(struct ifnet *ifp)
468 {
469 struct hvn_softc *sc = IFP2SC(ifp);
470 struct hvn_tx_desc *txd;
471 struct mbuf *m;
472
473 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
474 return;
475
476 for (;;) {
477 if (!sc->sc_tx_avail) {
478 /* transient */
479 ifp->if_flags |= IFF_OACTIVE;
480 break;
481 }
482
483 IFQ_DEQUEUE(&ifp->if_snd, m);
484 if (m == NULL)
485 break;
486
487 if (hvn_encap(sc, m, &txd)) {
488 /* the chain is too large */
489 if_statinc(ifp, if_oerrors);
490 m_freem(m);
491 continue;
492 }
493
494 bpf_mtap(ifp, m, BPF_D_OUT);
495
496 if (hvn_rndis_output(sc, txd)) {
497 hvn_decap(sc, txd);
498 if_statinc(ifp, if_oerrors);
499 m_freem(m);
500 continue;
501 }
502
503 sc->sc_tx_next++;
504 }
505 }
506
507 static inline char *
508 hvn_rndis_pktinfo_append(struct rndis_packet_msg *pkt, size_t pktsize,
509 size_t datalen, uint32_t type)
510 {
511 struct rndis_pktinfo *pi;
512 size_t pi_size = sizeof(*pi) + datalen;
513 char *cp;
514
515 KASSERT(pkt->rm_pktinfooffset + pkt->rm_pktinfolen + pi_size <=
516 pktsize);
517
518 cp = (char *)pkt + pkt->rm_pktinfooffset + pkt->rm_pktinfolen;
519 pi = (struct rndis_pktinfo *)cp;
520 pi->rm_size = pi_size;
521 pi->rm_type = type;
522 pi->rm_pktinfooffset = sizeof(*pi);
523 pkt->rm_pktinfolen += pi_size;
524 pkt->rm_dataoffset += pi_size;
525 pkt->rm_len += pi_size;
526
527 return (char *)pi->rm_data;
528 }
529
530 static int
531 hvn_encap(struct hvn_softc *sc, struct mbuf *m, struct hvn_tx_desc **txd0)
532 {
533 struct hvn_tx_desc *txd;
534 struct rndis_packet_msg *pkt;
535 bus_dma_segment_t *seg;
536 size_t pktlen;
537 int i, rv;
538
539 do {
540 txd = &sc->sc_tx_desc[sc->sc_tx_next % HVN_TX_DESC];
541 sc->sc_tx_next++;
542 } while (!txd->txd_ready);
543 txd->txd_ready = 0;
544
545 pkt = txd->txd_req;
546 memset(pkt, 0, HVN_RNDIS_PKT_LEN);
547 pkt->rm_type = REMOTE_NDIS_PACKET_MSG;
548 pkt->rm_len = sizeof(*pkt) + m->m_pkthdr.len;
549 pkt->rm_dataoffset = RNDIS_DATA_OFFSET;
550 pkt->rm_datalen = m->m_pkthdr.len;
551 pkt->rm_pktinfooffset = sizeof(*pkt); /* adjusted below */
552 pkt->rm_pktinfolen = 0;
553
554 rv = bus_dmamap_load_mbuf(sc->sc_dmat, txd->txd_dmap, m, BUS_DMA_READ |
555 BUS_DMA_NOWAIT);
556 switch (rv) {
557 case 0:
558 break;
559 case EFBIG:
560 if (m_defrag(m, M_NOWAIT) != NULL &&
561 bus_dmamap_load_mbuf(sc->sc_dmat, txd->txd_dmap, m,
562 BUS_DMA_READ | BUS_DMA_NOWAIT) == 0)
563 break;
564 /* FALLTHROUGH */
565 default:
566 DPRINTF("%s: failed to load mbuf\n", device_xname(sc->sc_dev));
567 return -1;
568 }
569 txd->txd_buf = m;
570
571 if (vlan_has_tag(m)) {
572 uint32_t vlan;
573 char *cp;
574 uint16_t tag;
575
576 tag = vlan_get_tag(m);
577 vlan = NDIS_VLAN_INFO_MAKE(EVL_VLANOFTAG(tag),
578 EVL_PRIOFTAG(tag), EVL_CFIOFTAG(tag));
579 cp = hvn_rndis_pktinfo_append(pkt, HVN_RNDIS_PKT_LEN,
580 NDIS_VLAN_INFO_SIZE, NDIS_PKTINFO_TYPE_VLAN);
581 memcpy(cp, &vlan, NDIS_VLAN_INFO_SIZE);
582 }
583
584 if (m->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_UDPv4 |
585 M_CSUM_TCPv4)) {
586 uint32_t csum = NDIS_TXCSUM_INFO_IPV4;
587 char *cp;
588
589 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4)
590 csum |= NDIS_TXCSUM_INFO_IPCS;
591 if (m->m_pkthdr.csum_flags & M_CSUM_TCPv4)
592 csum |= NDIS_TXCSUM_INFO_TCPCS;
593 if (m->m_pkthdr.csum_flags & M_CSUM_UDPv4)
594 csum |= NDIS_TXCSUM_INFO_UDPCS;
595 cp = hvn_rndis_pktinfo_append(pkt, HVN_RNDIS_PKT_LEN,
596 NDIS_TXCSUM_INFO_SIZE, NDIS_PKTINFO_TYPE_CSUM);
597 memcpy(cp, &csum, NDIS_TXCSUM_INFO_SIZE);
598 }
599
600 pktlen = pkt->rm_pktinfooffset + pkt->rm_pktinfolen;
601 pkt->rm_pktinfooffset -= RNDIS_HEADER_OFFSET;
602
603 /* Attach an RNDIS message to the first slot */
604 txd->txd_sgl[0].gpa_page = txd->txd_gpa.gpa_page;
605 txd->txd_sgl[0].gpa_ofs = txd->txd_gpa.gpa_ofs;
606 txd->txd_sgl[0].gpa_len = pktlen;
607 txd->txd_nsge = txd->txd_dmap->dm_nsegs + 1;
608
609 for (i = 0; i < txd->txd_dmap->dm_nsegs; i++) {
610 seg = &txd->txd_dmap->dm_segs[i];
611 txd->txd_sgl[1 + i].gpa_page = atop(seg->ds_addr);
612 txd->txd_sgl[1 + i].gpa_ofs = seg->ds_addr & PAGE_MASK;
613 txd->txd_sgl[1 + i].gpa_len = seg->ds_len;
614 }
615
616 *txd0 = txd;
617
618 atomic_dec_uint(&sc->sc_tx_avail);
619
620 return 0;
621 }
622
623 static void
624 hvn_decap(struct hvn_softc *sc, struct hvn_tx_desc *txd)
625 {
626 struct ifnet *ifp = SC2IFP(sc);
627
628 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmap,
629 0, txd->txd_dmap->dm_mapsize,
630 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
631 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmap);
632 txd->txd_buf = NULL;
633 txd->txd_nsge = 0;
634 txd->txd_ready = 1;
635 atomic_inc_uint(&sc->sc_tx_avail);
636 ifp->if_flags &= ~IFF_OACTIVE;
637 }
638
639 static void
640 hvn_txeof(struct hvn_softc *sc, uint64_t tid)
641 {
642 struct ifnet *ifp = SC2IFP(sc);
643 struct hvn_tx_desc *txd;
644 struct mbuf *m;
645 uint32_t id = tid >> 32;
646
647 if ((tid & 0xffffffffU) != 0)
648 return;
649
650 id -= HVN_NVS_CHIM_SIG;
651 if (id >= HVN_TX_DESC) {
652 device_printf(sc->sc_dev, "tx packet index too large: %u", id);
653 return;
654 }
655
656 txd = &sc->sc_tx_desc[id];
657
658 if ((m = txd->txd_buf) == NULL) {
659 device_printf(sc->sc_dev, "no mbuf @%u\n", id);
660 return;
661 }
662 txd->txd_buf = NULL;
663
664 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmap,
665 0, txd->txd_dmap->dm_mapsize,
666 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
667 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmap);
668 m_freem(m);
669 if_statinc(ifp, if_opackets);
670
671 txd->txd_ready = 1;
672
673 atomic_inc_uint(&sc->sc_tx_avail);
674 ifp->if_flags &= ~IFF_OACTIVE;
675 }
676
677 static int
678 hvn_rx_ring_create(struct hvn_softc *sc)
679 {
680 struct hvn_nvs_rxbuf_conn cmd;
681 struct hvn_nvs_rxbuf_conn_resp *rsp;
682 uint64_t tid;
683
684 if (sc->sc_proto <= HVN_NVS_PROTO_VERSION_2)
685 sc->sc_rx_size = 15 * 1024 * 1024; /* 15MB */
686 else
687 sc->sc_rx_size = 16 * 1024 * 1024; /* 16MB */
688 sc->sc_rx_ring = hyperv_dma_alloc(sc->sc_dmat, &sc->sc_rx_dma,
689 sc->sc_rx_size, PAGE_SIZE, PAGE_SIZE, sc->sc_rx_size / PAGE_SIZE,
690 HYPERV_DMA_SLEEPOK);
691 if (sc->sc_rx_ring == NULL) {
692 DPRINTF("%s: failed to allocate Rx ring buffer\n",
693 device_xname(sc->sc_dev));
694 return -1;
695 }
696 if (vmbus_handle_alloc(sc->sc_chan, &sc->sc_rx_dma, sc->sc_rx_size,
697 &sc->sc_rx_hndl)) {
698 DPRINTF("%s: failed to obtain a PA handle\n",
699 device_xname(sc->sc_dev));
700 goto errout;
701 }
702
703 memset(&cmd, 0, sizeof(cmd));
704 cmd.nvs_type = HVN_NVS_TYPE_RXBUF_CONN;
705 cmd.nvs_gpadl = sc->sc_rx_hndl;
706 cmd.nvs_sig = HVN_NVS_RXBUF_SIG;
707
708 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
709 if (hvn_nvs_cmd(sc, &cmd, sizeof(cmd), tid, 100))
710 goto errout;
711
712 rsp = (struct hvn_nvs_rxbuf_conn_resp *)&sc->sc_nvsrsp;
713 if (rsp->nvs_status != HVN_NVS_STATUS_OK) {
714 DPRINTF("%s: failed to set up the Rx ring\n",
715 device_xname(sc->sc_dev));
716 goto errout;
717 }
718 if (rsp->nvs_nsect > 1) {
719 DPRINTF("%s: invalid number of Rx ring sections: %u\n",
720 device_xname(sc->sc_dev), rsp->nvs_nsect);
721 hvn_rx_ring_destroy(sc);
722 return -1;
723 }
724 return 0;
725
726 errout:
727 if (sc->sc_rx_hndl) {
728 vmbus_handle_free(sc->sc_chan, sc->sc_rx_hndl);
729 sc->sc_rx_hndl = 0;
730 }
731 if (sc->sc_rx_ring) {
732 hyperv_dma_free(sc->sc_dmat, &sc->sc_rx_dma);
733 sc->sc_rx_ring = NULL;
734 }
735 return -1;
736 }
737
738 static int
739 hvn_rx_ring_destroy(struct hvn_softc *sc)
740 {
741 struct hvn_nvs_rxbuf_disconn cmd;
742 uint64_t tid;
743
744 if (sc->sc_rx_ring == NULL)
745 return 0;
746
747 memset(&cmd, 0, sizeof(cmd));
748 cmd.nvs_type = HVN_NVS_TYPE_RXBUF_DISCONN;
749 cmd.nvs_sig = HVN_NVS_RXBUF_SIG;
750
751 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
752 if (hvn_nvs_cmd(sc, &cmd, sizeof(cmd), tid, 0))
753 return -1;
754
755 delay(100);
756
757 vmbus_handle_free(sc->sc_chan, sc->sc_rx_hndl);
758 sc->sc_rx_hndl = 0;
759
760 hyperv_dma_free(sc->sc_dmat, &sc->sc_rx_dma);
761 sc->sc_rx_ring = NULL;
762
763 return 0;
764 }
765
766 static int
767 hvn_tx_ring_create(struct hvn_softc *sc)
768 {
769 const int dmaflags = cold ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK;
770 struct hvn_tx_desc *txd;
771 bus_dma_segment_t *seg;
772 size_t msgsize;
773 int i, rsegs;
774 paddr_t pa;
775
776 msgsize = roundup(HVN_RNDIS_PKT_LEN, 128);
777
778 /* Allocate memory to store RNDIS messages */
779 if (bus_dmamem_alloc(sc->sc_dmat, msgsize * HVN_TX_DESC, PAGE_SIZE, 0,
780 &sc->sc_tx_mseg, 1, &rsegs, dmaflags)) {
781 DPRINTF("%s: failed to allocate memory for RDNIS messages\n",
782 device_xname(sc->sc_dev));
783 goto errout;
784 }
785 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_tx_mseg, 1, msgsize *
786 HVN_TX_DESC, (void **)&sc->sc_tx_msgs, dmaflags)) {
787 DPRINTF("%s: failed to establish mapping for RDNIS messages\n",
788 device_xname(sc->sc_dev));
789 goto errout;
790 }
791 memset(sc->sc_tx_msgs, 0, msgsize * HVN_TX_DESC);
792 if (bus_dmamap_create(sc->sc_dmat, msgsize * HVN_TX_DESC, 1,
793 msgsize * HVN_TX_DESC, 0, dmaflags, &sc->sc_tx_rmap)) {
794 DPRINTF("%s: failed to create map for RDNIS messages\n",
795 device_xname(sc->sc_dev));
796 goto errout;
797 }
798 if (bus_dmamap_load(sc->sc_dmat, sc->sc_tx_rmap, sc->sc_tx_msgs,
799 msgsize * HVN_TX_DESC, NULL, dmaflags)) {
800 DPRINTF("%s: failed to create map for RDNIS messages\n",
801 device_xname(sc->sc_dev));
802 goto errout;
803 }
804
805 for (i = 0; i < HVN_TX_DESC; i++) {
806 txd = &sc->sc_tx_desc[i];
807 if (bus_dmamap_create(sc->sc_dmat, HVN_TX_PKT_SIZE,
808 HVN_TX_FRAGS, HVN_TX_FRAG_SIZE, PAGE_SIZE, dmaflags,
809 &txd->txd_dmap)) {
810 DPRINTF("%s: failed to create map for TX descriptors\n",
811 device_xname(sc->sc_dev));
812 goto errout;
813 }
814 seg = &sc->sc_tx_rmap->dm_segs[0];
815 pa = seg->ds_addr + (msgsize * i);
816 txd->txd_gpa.gpa_page = atop(pa);
817 txd->txd_gpa.gpa_ofs = pa & PAGE_MASK;
818 txd->txd_gpa.gpa_len = msgsize;
819 txd->txd_req = (void *)(sc->sc_tx_msgs + (msgsize * i));
820 txd->txd_id = i + HVN_NVS_CHIM_SIG;
821 txd->txd_ready = 1;
822 }
823 sc->sc_tx_avail = HVN_TX_DESC;
824
825 return 0;
826
827 errout:
828 hvn_tx_ring_destroy(sc);
829 return -1;
830 }
831
832 static void
833 hvn_tx_ring_destroy(struct hvn_softc *sc)
834 {
835 struct hvn_tx_desc *txd;
836 int i;
837
838 for (i = 0; i < HVN_TX_DESC; i++) {
839 txd = &sc->sc_tx_desc[i];
840 if (txd->txd_dmap == NULL)
841 continue;
842 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmap,
843 0, txd->txd_dmap->dm_mapsize,
844 BUS_DMASYNC_POSTWRITE);
845 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmap);
846 bus_dmamap_destroy(sc->sc_dmat, txd->txd_dmap);
847 txd->txd_dmap = NULL;
848 if (txd->txd_buf == NULL)
849 continue;
850 m_freem(txd->txd_buf);
851 txd->txd_buf = NULL;
852 }
853 if (sc->sc_tx_rmap != NULL) {
854 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap,
855 0, sc->sc_tx_rmap->dm_mapsize,
856 BUS_DMASYNC_POSTWRITE);
857 bus_dmamap_unload(sc->sc_dmat, sc->sc_tx_rmap);
858 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_rmap);
859 sc->sc_tx_rmap = NULL;
860 }
861 if (sc->sc_tx_msgs != NULL) {
862 size_t msgsize = roundup(HVN_RNDIS_PKT_LEN, 128);
863
864 bus_dmamem_unmap(sc->sc_dmat, sc->sc_tx_msgs,
865 msgsize * HVN_TX_DESC);
866 bus_dmamem_free(sc->sc_dmat, &sc->sc_tx_mseg, 1);
867 sc->sc_tx_msgs = NULL;
868 }
869 }
870
871 static int
872 hvn_get_lladdr(struct hvn_softc *sc, uint8_t *enaddr)
873 {
874 size_t addrlen = ETHER_ADDR_LEN;
875 int rv;
876
877 rv = hvn_rndis_query(sc, OID_802_3_PERMANENT_ADDRESS, enaddr, &addrlen);
878 if (rv == 0 && addrlen != ETHER_ADDR_LEN)
879 rv = -1;
880 return rv;
881 }
882
883 static void
884 hvn_get_link_status(struct hvn_softc *sc)
885 {
886 uint32_t state;
887 size_t len = sizeof(state);
888
889 if (hvn_rndis_query(sc, OID_GEN_MEDIA_CONNECT_STATUS,
890 &state, &len) == 0)
891 sc->sc_link_state = (state == NDIS_MEDIA_STATE_CONNECTED) ?
892 LINK_STATE_UP : LINK_STATE_DOWN;
893 }
894
895 static int
896 hvn_nvs_attach(struct hvn_softc *sc)
897 {
898 static const uint32_t protos[] = {
899 HVN_NVS_PROTO_VERSION_5,
900 HVN_NVS_PROTO_VERSION_4,
901 HVN_NVS_PROTO_VERSION_2,
902 HVN_NVS_PROTO_VERSION_1
903 };
904 struct hvn_nvs_init cmd;
905 struct hvn_nvs_init_resp *rsp;
906 struct hvn_nvs_ndis_init ncmd;
907 struct hvn_nvs_ndis_conf ccmd;
908 uint32_t ndisver, ringsize;
909 uint64_t tid;
910 int i;
911
912 sc->sc_nvsbuf = kmem_zalloc(HVN_NVS_BUFSIZE, KM_SLEEP);
913
914 /* We need to be able to fit all RNDIS control and data messages */
915 ringsize = HVN_RNDIS_CTLREQS *
916 (sizeof(struct hvn_nvs_rndis) + sizeof(struct vmbus_gpa)) +
917 HVN_TX_DESC * (sizeof(struct hvn_nvs_rndis) +
918 (HVN_TX_FRAGS + 1) * sizeof(struct vmbus_gpa));
919
920 sc->sc_chan->ch_flags &= ~CHF_BATCHED;
921
922 /* Associate our interrupt handler with the channel */
923 if (vmbus_channel_open(sc->sc_chan, ringsize, NULL, 0,
924 hvn_nvs_intr, sc)) {
925 DPRINTF("%s: failed to open channel\n",
926 device_xname(sc->sc_dev));
927 kmem_free(sc->sc_nvsbuf, HVN_NVS_BUFSIZE);
928 return -1;
929 }
930
931 memset(&cmd, 0, sizeof(cmd));
932 cmd.nvs_type = HVN_NVS_TYPE_INIT;
933 for (i = 0; i < __arraycount(protos); i++) {
934 cmd.nvs_ver_min = cmd.nvs_ver_max = protos[i];
935 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
936 if (hvn_nvs_cmd(sc, &cmd, sizeof(cmd), tid, 100))
937 return -1;
938
939 rsp = (struct hvn_nvs_init_resp *)&sc->sc_nvsrsp;
940 if (rsp->nvs_status == HVN_NVS_STATUS_OK) {
941 sc->sc_proto = protos[i];
942 break;
943 }
944 }
945 if (i == __arraycount(protos)) {
946 DPRINTF("%s: failed to negotiate NVSP version\n",
947 device_xname(sc->sc_dev));
948 return -1;
949 }
950
951 if (sc->sc_proto >= HVN_NVS_PROTO_VERSION_2) {
952 memset(&ccmd, 0, sizeof(ccmd));
953 ccmd.nvs_type = HVN_NVS_TYPE_NDIS_CONF;
954 ccmd.nvs_mtu = HVN_MAXMTU;
955 ccmd.nvs_caps = HVN_NVS_NDIS_CONF_VLAN;
956
957 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
958 if (hvn_nvs_cmd(sc, &ccmd, sizeof(ccmd), tid, 100))
959 return -1;
960 }
961
962 memset(&ncmd, 0, sizeof(ncmd));
963 ncmd.nvs_type = HVN_NVS_TYPE_NDIS_INIT;
964 if (sc->sc_proto <= HVN_NVS_PROTO_VERSION_4)
965 ndisver = NDIS_VERSION_6_1;
966 else
967 ndisver = NDIS_VERSION_6_30;
968 ncmd.nvs_ndis_major = (ndisver & 0xffff0000) >> 16;
969 ncmd.nvs_ndis_minor = ndisver & 0x0000ffff;
970
971 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
972 if (hvn_nvs_cmd(sc, &ncmd, sizeof(ncmd), tid, 100))
973 return -1;
974
975 sc->sc_ndisver = ndisver;
976
977 return 0;
978 }
979
980 static void
981 hvn_nvs_intr(void *arg)
982 {
983 struct hvn_softc *sc = arg;
984 struct ifnet *ifp = SC2IFP(sc);
985 struct vmbus_chanpkt_hdr *cph;
986 const struct hvn_nvs_hdr *nvs;
987 uint64_t rid;
988 uint32_t rlen;
989 int rv;
990 bool dotx = false;
991
992 for (;;) {
993 rv = vmbus_channel_recv(sc->sc_chan, sc->sc_nvsbuf,
994 HVN_NVS_BUFSIZE, &rlen, &rid, 1);
995 if (rv != 0 || rlen == 0) {
996 if (rv != EAGAIN)
997 device_printf(sc->sc_dev,
998 "failed to receive an NVSP packet\n");
999 break;
1000 }
1001 cph = (struct vmbus_chanpkt_hdr *)sc->sc_nvsbuf;
1002 nvs = (const struct hvn_nvs_hdr *)VMBUS_CHANPKT_CONST_DATA(cph);
1003
1004 if (cph->cph_type == VMBUS_CHANPKT_TYPE_COMP) {
1005 switch (nvs->nvs_type) {
1006 case HVN_NVS_TYPE_INIT_RESP:
1007 case HVN_NVS_TYPE_RXBUF_CONNRESP:
1008 case HVN_NVS_TYPE_CHIM_CONNRESP:
1009 case HVN_NVS_TYPE_SUBCH_RESP:
1010 /* copy the response back */
1011 memcpy(&sc->sc_nvsrsp, nvs, HVN_NVS_MSGSIZE);
1012 sc->sc_nvsdone = 1;
1013 wakeup(&sc->sc_nvsrsp);
1014 break;
1015 case HVN_NVS_TYPE_RNDIS_ACK:
1016 dotx = true;
1017 hvn_txeof(sc, cph->cph_tid);
1018 break;
1019 default:
1020 device_printf(sc->sc_dev,
1021 "unhandled NVSP packet type %u "
1022 "on completion\n", nvs->nvs_type);
1023 break;
1024 }
1025 } else if (cph->cph_type == VMBUS_CHANPKT_TYPE_RXBUF) {
1026 switch (nvs->nvs_type) {
1027 case HVN_NVS_TYPE_RNDIS:
1028 hvn_rndis_input(sc, cph->cph_tid, cph);
1029 break;
1030 default:
1031 device_printf(sc->sc_dev,
1032 "unhandled NVSP packet type %u "
1033 "on receive\n", nvs->nvs_type);
1034 break;
1035 }
1036 } else if (cph->cph_type == VMBUS_CHANPKT_TYPE_INBAND) {
1037 switch (nvs->nvs_type) {
1038 case HVN_NVS_TYPE_TXTBL_NOTE:
1039 /* Useless; ignore */
1040 break;
1041 default:
1042 device_printf(sc->sc_dev,
1043 "got notify, nvs type %u\n", nvs->nvs_type);
1044 break;
1045 }
1046 } else
1047 device_printf(sc->sc_dev,
1048 "unknown NVSP packet type %u\n", cph->cph_type);
1049 }
1050
1051 if (dotx)
1052 if_schedule_deferred_start(ifp);
1053 }
1054
1055 static int
1056 hvn_nvs_cmd(struct hvn_softc *sc, void *cmd, size_t cmdsize, uint64_t tid,
1057 int timo)
1058 {
1059 struct hvn_nvs_hdr *hdr = cmd;
1060 int tries = 10;
1061 int rv, s;
1062
1063 sc->sc_nvsdone = 0;
1064
1065 do {
1066 rv = vmbus_channel_send(sc->sc_chan, cmd, cmdsize,
1067 tid, VMBUS_CHANPKT_TYPE_INBAND,
1068 timo ? VMBUS_CHANPKT_FLAG_RC : 0);
1069 if (rv == EAGAIN) {
1070 if (cold)
1071 delay(1000);
1072 else
1073 tsleep(cmd, PRIBIO, "nvsout", mstohz(1));
1074 } else if (rv) {
1075 DPRINTF("%s: NVSP operation %u send error %d\n",
1076 device_xname(sc->sc_dev), hdr->nvs_type, rv);
1077 return rv;
1078 }
1079 } while (rv != 0 && --tries > 0);
1080
1081 if (tries == 0 && rv != 0) {
1082 device_printf(sc->sc_dev,
1083 "NVSP operation %u send error %d\n", hdr->nvs_type, rv);
1084 return rv;
1085 }
1086
1087 if (timo == 0)
1088 return 0;
1089
1090 do {
1091 if (cold) {
1092 delay(1000);
1093 s = splnet();
1094 hvn_nvs_intr(sc);
1095 splx(s);
1096 } else
1097 tsleep(sc->sc_nvsrsp, PRIBIO | PCATCH, "nvscmd",
1098 mstohz(1));
1099 } while (--timo > 0 && sc->sc_nvsdone != 1);
1100
1101 if (timo == 0 && sc->sc_nvsdone != 1) {
1102 device_printf(sc->sc_dev, "NVSP operation %u timed out\n",
1103 hdr->nvs_type);
1104 return ETIMEDOUT;
1105 }
1106 return 0;
1107 }
1108
1109 static int
1110 hvn_nvs_ack(struct hvn_softc *sc, uint64_t tid)
1111 {
1112 struct hvn_nvs_rndis_ack cmd;
1113 int tries = 5;
1114 int rv;
1115
1116 cmd.nvs_type = HVN_NVS_TYPE_RNDIS_ACK;
1117 cmd.nvs_status = HVN_NVS_STATUS_OK;
1118 do {
1119 rv = vmbus_channel_send(sc->sc_chan, &cmd, sizeof(cmd),
1120 tid, VMBUS_CHANPKT_TYPE_COMP, 0);
1121 if (rv == EAGAIN)
1122 delay(10);
1123 else if (rv) {
1124 DPRINTF("%s: NVSP acknowledgement error %d\n",
1125 device_xname(sc->sc_dev), rv);
1126 return rv;
1127 }
1128 } while (rv != 0 && --tries > 0);
1129 return rv;
1130 }
1131
1132 static void
1133 hvn_nvs_detach(struct hvn_softc *sc)
1134 {
1135
1136 if (vmbus_channel_close(sc->sc_chan) == 0) {
1137 kmem_free(sc->sc_nvsbuf, HVN_NVS_BUFSIZE);
1138 sc->sc_nvsbuf = NULL;
1139 }
1140 }
1141
1142 static inline struct rndis_cmd *
1143 hvn_alloc_cmd(struct hvn_softc *sc)
1144 {
1145 struct rndis_cmd *rc;
1146
1147 mutex_enter(&sc->sc_cntl_fqlck);
1148 while ((rc = TAILQ_FIRST(&sc->sc_cntl_fq)) == NULL)
1149 /* XXX use condvar(9) instead of mtsleep */
1150 mtsleep(&sc->sc_cntl_fq, PRIBIO, "nvsalloc", 1,
1151 &sc->sc_cntl_fqlck);
1152 TAILQ_REMOVE(&sc->sc_cntl_fq, rc, rc_entry);
1153 mutex_exit(&sc->sc_cntl_fqlck);
1154 return rc;
1155 }
1156
1157 static inline void
1158 hvn_submit_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1159 {
1160
1161 mutex_enter(&sc->sc_cntl_sqlck);
1162 TAILQ_INSERT_TAIL(&sc->sc_cntl_sq, rc, rc_entry);
1163 mutex_exit(&sc->sc_cntl_sqlck);
1164 }
1165
1166 static inline struct rndis_cmd *
1167 hvn_complete_cmd(struct hvn_softc *sc, uint32_t id)
1168 {
1169 struct rndis_cmd *rc;
1170
1171 mutex_enter(&sc->sc_cntl_sqlck);
1172 TAILQ_FOREACH(rc, &sc->sc_cntl_sq, rc_entry) {
1173 if (rc->rc_id == id) {
1174 TAILQ_REMOVE(&sc->sc_cntl_sq, rc, rc_entry);
1175 break;
1176 }
1177 }
1178 mutex_exit(&sc->sc_cntl_sqlck);
1179 if (rc != NULL) {
1180 mutex_enter(&sc->sc_cntl_cqlck);
1181 TAILQ_INSERT_TAIL(&sc->sc_cntl_cq, rc, rc_entry);
1182 mutex_exit(&sc->sc_cntl_cqlck);
1183 }
1184 return rc;
1185 }
1186
1187 static inline void
1188 hvn_release_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1189 {
1190
1191 mutex_enter(&sc->sc_cntl_cqlck);
1192 TAILQ_REMOVE(&sc->sc_cntl_cq, rc, rc_entry);
1193 mutex_exit(&sc->sc_cntl_cqlck);
1194 }
1195
1196 static inline int
1197 hvn_rollback_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1198 {
1199 struct rndis_cmd *rn;
1200
1201 mutex_enter(&sc->sc_cntl_sqlck);
1202 TAILQ_FOREACH(rn, &sc->sc_cntl_sq, rc_entry) {
1203 if (rn == rc) {
1204 TAILQ_REMOVE(&sc->sc_cntl_sq, rc, rc_entry);
1205 mutex_exit(&sc->sc_cntl_sqlck);
1206 return 0;
1207 }
1208 }
1209 mutex_exit(&sc->sc_cntl_sqlck);
1210 return -1;
1211 }
1212
1213 static inline void
1214 hvn_free_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1215 {
1216
1217 memset(rc->rc_req, 0, sizeof(struct rndis_packet_msg));
1218 memset(&rc->rc_cmp, 0, sizeof(rc->rc_cmp));
1219 memset(&rc->rc_msg, 0, sizeof(rc->rc_msg));
1220 mutex_enter(&sc->sc_cntl_fqlck);
1221 TAILQ_INSERT_TAIL(&sc->sc_cntl_fq, rc, rc_entry);
1222 mutex_exit(&sc->sc_cntl_fqlck);
1223 wakeup(&sc->sc_cntl_fq);
1224 }
1225
1226 static int
1227 hvn_rndis_attach(struct hvn_softc *sc)
1228 {
1229 const int dmaflags = cold ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK;
1230 struct rndis_init_req *req;
1231 struct rndis_init_comp *cmp;
1232 struct rndis_cmd *rc;
1233 int i, rv;
1234
1235 /* RNDIS control message queues */
1236 TAILQ_INIT(&sc->sc_cntl_sq);
1237 TAILQ_INIT(&sc->sc_cntl_cq);
1238 TAILQ_INIT(&sc->sc_cntl_fq);
1239 mutex_init(&sc->sc_cntl_sqlck, MUTEX_DEFAULT, IPL_NET);
1240 mutex_init(&sc->sc_cntl_cqlck, MUTEX_DEFAULT, IPL_NET);
1241 mutex_init(&sc->sc_cntl_fqlck, MUTEX_DEFAULT, IPL_NET);
1242
1243 for (i = 0; i < HVN_RNDIS_CTLREQS; i++) {
1244 rc = &sc->sc_cntl_msgs[i];
1245 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1246 dmaflags, &rc->rc_dmap)) {
1247 DPRINTF("%s: failed to create RNDIS command map\n",
1248 device_xname(sc->sc_dev));
1249 goto errout;
1250 }
1251 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE,
1252 0, &rc->rc_segs, 1, &rc->rc_nsegs, dmaflags)) {
1253 DPRINTF("%s: failed to allocate RNDIS command\n",
1254 device_xname(sc->sc_dev));
1255 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1256 goto errout;
1257 }
1258 if (bus_dmamem_map(sc->sc_dmat, &rc->rc_segs, rc->rc_nsegs,
1259 PAGE_SIZE, (void **)&rc->rc_req, dmaflags)) {
1260 DPRINTF("%s: failed to allocate RNDIS command\n",
1261 device_xname(sc->sc_dev));
1262 bus_dmamem_free(sc->sc_dmat, &rc->rc_segs,
1263 rc->rc_nsegs);
1264 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1265 goto errout;
1266 }
1267 memset(rc->rc_req, 0, PAGE_SIZE);
1268 if (bus_dmamap_load(sc->sc_dmat, rc->rc_dmap, rc->rc_req,
1269 PAGE_SIZE, NULL, dmaflags)) {
1270 DPRINTF("%s: failed to load RNDIS command map\n",
1271 device_xname(sc->sc_dev));
1272 bus_dmamem_free(sc->sc_dmat, &rc->rc_segs,
1273 rc->rc_nsegs);
1274 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1275 goto errout;
1276 }
1277 rc->rc_gpa = atop(rc->rc_dmap->dm_segs[0].ds_addr);
1278 TAILQ_INSERT_TAIL(&sc->sc_cntl_fq, rc, rc_entry);
1279 }
1280
1281 rc = hvn_alloc_cmd(sc);
1282
1283 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1284 BUS_DMASYNC_PREREAD);
1285
1286 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1287
1288 req = rc->rc_req;
1289 req->rm_type = REMOTE_NDIS_INITIALIZE_MSG;
1290 req->rm_len = sizeof(*req);
1291 req->rm_rid = rc->rc_id;
1292 req->rm_ver_major = RNDIS_VERSION_MAJOR;
1293 req->rm_ver_minor = RNDIS_VERSION_MINOR;
1294 req->rm_max_xfersz = HVN_RNDIS_XFER_SIZE;
1295
1296 rc->rc_cmplen = sizeof(*cmp);
1297
1298 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1299 BUS_DMASYNC_PREWRITE);
1300
1301 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1302 DPRINTF("%s: INITIALIZE_MSG failed, error %d\n",
1303 device_xname(sc->sc_dev), rv);
1304 hvn_free_cmd(sc, rc);
1305 goto errout;
1306 }
1307 cmp = (struct rndis_init_comp *)&rc->rc_cmp;
1308 if (cmp->rm_status != RNDIS_STATUS_SUCCESS) {
1309 DPRINTF("%s: failed to init RNDIS, error %#x\n",
1310 device_xname(sc->sc_dev), cmp->rm_status);
1311 hvn_free_cmd(sc, rc);
1312 goto errout;
1313 }
1314
1315 hvn_free_cmd(sc, rc);
1316
1317 /* Initialize RNDIS Data command */
1318 memset(&sc->sc_data_msg, 0, sizeof(sc->sc_data_msg));
1319 sc->sc_data_msg.nvs_type = HVN_NVS_TYPE_RNDIS;
1320 sc->sc_data_msg.nvs_rndis_mtype = HVN_NVS_RNDIS_MTYPE_DATA;
1321 sc->sc_data_msg.nvs_chim_idx = HVN_NVS_CHIM_IDX_INVALID;
1322
1323 return 0;
1324
1325 errout:
1326 for (i = 0; i < HVN_RNDIS_CTLREQS; i++) {
1327 rc = &sc->sc_cntl_msgs[i];
1328 if (rc->rc_req == NULL)
1329 continue;
1330 TAILQ_REMOVE(&sc->sc_cntl_fq, rc, rc_entry);
1331 bus_dmamem_free(sc->sc_dmat, &rc->rc_segs, rc->rc_nsegs);
1332 rc->rc_req = NULL;
1333 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1334 }
1335 return -1;
1336 }
1337
1338 static int
1339 hvn_set_capabilities(struct hvn_softc *sc)
1340 {
1341 struct ndis_offload_params params;
1342 size_t len = sizeof(params);
1343
1344 memset(¶ms, 0, sizeof(params));
1345
1346 params.ndis_hdr.ndis_type = NDIS_OBJTYPE_DEFAULT;
1347 if (sc->sc_ndisver < NDIS_VERSION_6_30) {
1348 params.ndis_hdr.ndis_rev = NDIS_OFFLOAD_PARAMS_REV_2;
1349 len = params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE_6_1;
1350 } else {
1351 params.ndis_hdr.ndis_rev = NDIS_OFFLOAD_PARAMS_REV_3;
1352 len = params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE;
1353 }
1354
1355 params.ndis_ip4csum = NDIS_OFFLOAD_PARAM_TXRX;
1356 params.ndis_tcp4csum = NDIS_OFFLOAD_PARAM_TXRX;
1357 params.ndis_tcp6csum = NDIS_OFFLOAD_PARAM_TXRX;
1358 if (sc->sc_ndisver >= NDIS_VERSION_6_30) {
1359 params.ndis_udp4csum = NDIS_OFFLOAD_PARAM_TXRX;
1360 params.ndis_udp6csum = NDIS_OFFLOAD_PARAM_TXRX;
1361 }
1362
1363 return hvn_rndis_set(sc, OID_TCP_OFFLOAD_PARAMETERS, ¶ms, len);
1364 }
1365
1366 static int
1367 hvn_rndis_cmd(struct hvn_softc *sc, struct rndis_cmd *rc, int timo)
1368 {
1369 struct hvn_nvs_rndis *msg = &rc->rc_msg;
1370 struct rndis_msghdr *hdr = rc->rc_req;
1371 struct vmbus_gpa sgl[1];
1372 int tries = 10;
1373 int rv, s;
1374
1375 KASSERT(timo > 0);
1376
1377 msg->nvs_type = HVN_NVS_TYPE_RNDIS;
1378 msg->nvs_rndis_mtype = HVN_NVS_RNDIS_MTYPE_CTRL;
1379 msg->nvs_chim_idx = HVN_NVS_CHIM_IDX_INVALID;
1380
1381 sgl[0].gpa_page = rc->rc_gpa;
1382 sgl[0].gpa_len = hdr->rm_len;
1383 sgl[0].gpa_ofs = 0;
1384
1385 rc->rc_done = 0;
1386
1387 hvn_submit_cmd(sc, rc);
1388
1389 do {
1390 rv = vmbus_channel_send_sgl(sc->sc_chan, sgl, 1, &rc->rc_msg,
1391 sizeof(*msg), rc->rc_id);
1392 if (rv == EAGAIN) {
1393 if (cold)
1394 delay(1000);
1395 else
1396 tsleep(rc, PRIBIO, "rndisout", mstohz(1));
1397 } else if (rv) {
1398 DPRINTF("%s: RNDIS operation %u send error %d\n",
1399 device_xname(sc->sc_dev), hdr->rm_type, rv);
1400 hvn_rollback_cmd(sc, rc);
1401 return rv;
1402 }
1403 } while (rv != 0 && --tries > 0);
1404
1405 if (tries == 0 && rv != 0) {
1406 device_printf(sc->sc_dev,
1407 "RNDIS operation %u send error %d\n", hdr->rm_type, rv);
1408 return rv;
1409 }
1410 if (vmbus_channel_is_revoked(sc->sc_chan)) {
1411 /* No response */
1412 return 0;
1413 }
1414
1415 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1416 BUS_DMASYNC_POSTWRITE);
1417
1418 do {
1419 if (cold) {
1420 delay(1000);
1421 s = splnet();
1422 hvn_nvs_intr(sc);
1423 splx(s);
1424 } else
1425 tsleep(rc, PRIBIO | PCATCH, "rndiscmd", mstohz(1));
1426 } while (--timo > 0 && rc->rc_done != 1);
1427
1428 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1429 BUS_DMASYNC_POSTREAD);
1430
1431 if (rc->rc_done != 1) {
1432 rv = timo == 0 ? ETIMEDOUT : EINTR;
1433 if (hvn_rollback_cmd(sc, rc)) {
1434 hvn_release_cmd(sc, rc);
1435 rv = 0;
1436 } else if (rv == ETIMEDOUT) {
1437 device_printf(sc->sc_dev,
1438 "RNDIS operation %u timed out\n", hdr->rm_type);
1439 }
1440 return rv;
1441 }
1442
1443 hvn_release_cmd(sc, rc);
1444 return 0;
1445 }
1446
1447 static void
1448 hvn_rndis_input(struct hvn_softc *sc, uint64_t tid, void *arg)
1449 {
1450 struct vmbus_chanpkt_prplist *cp = arg;
1451 uint32_t off, len, type;
1452 int i;
1453
1454 if (sc->sc_rx_ring == NULL) {
1455 DPRINTF("%s: invalid rx ring\n", device_xname(sc->sc_dev));
1456 return;
1457 }
1458
1459 for (i = 0; i < cp->cp_range_cnt; i++) {
1460 off = cp->cp_range[i].gpa_ofs;
1461 len = cp->cp_range[i].gpa_len;
1462
1463 KASSERT(off + len <= sc->sc_rx_size);
1464 KASSERT(len >= RNDIS_HEADER_OFFSET + 4);
1465
1466 memcpy(&type, sc->sc_rx_ring + off, sizeof(type));
1467 switch (type) {
1468 /* data message */
1469 case REMOTE_NDIS_PACKET_MSG:
1470 hvn_rxeof(sc, sc->sc_rx_ring + off, len);
1471 break;
1472 /* completion messages */
1473 case REMOTE_NDIS_INITIALIZE_CMPLT:
1474 case REMOTE_NDIS_QUERY_CMPLT:
1475 case REMOTE_NDIS_SET_CMPLT:
1476 case REMOTE_NDIS_RESET_CMPLT:
1477 case REMOTE_NDIS_KEEPALIVE_CMPLT:
1478 hvn_rndis_complete(sc, sc->sc_rx_ring + off, len);
1479 break;
1480 /* notification message */
1481 case REMOTE_NDIS_INDICATE_STATUS_MSG:
1482 hvn_rndis_status(sc, sc->sc_rx_ring + off, len);
1483 break;
1484 default:
1485 device_printf(sc->sc_dev,
1486 "unhandled RNDIS message type %u\n", type);
1487 break;
1488 }
1489 }
1490
1491 hvn_nvs_ack(sc, tid);
1492 }
1493
1494 static inline struct mbuf *
1495 hvn_devget(struct hvn_softc *sc, void *buf, uint32_t len)
1496 {
1497 struct ifnet *ifp = SC2IFP(sc);
1498 struct mbuf *m;
1499 size_t size = len + ETHER_ALIGN;
1500
1501 MGETHDR(m, M_NOWAIT, MT_DATA);
1502 if (m == NULL)
1503 return NULL;
1504
1505 if (size > MHLEN) {
1506 if (size <= MCLBYTES)
1507 MCLGET(m, M_NOWAIT);
1508 else
1509 MEXTMALLOC(m, size, M_NOWAIT);
1510 if ((m->m_flags & M_EXT) == 0) {
1511 m_freem(m);
1512 return NULL;
1513 }
1514 }
1515
1516 m->m_len = m->m_pkthdr.len = size;
1517 m_adj(m, ETHER_ALIGN);
1518 m_copyback(m, 0, len, buf);
1519 m_set_rcvif(m, ifp);
1520 return m;
1521 }
1522
1523 static void
1524 hvn_rxeof(struct hvn_softc *sc, uint8_t *buf, uint32_t len)
1525 {
1526 struct ifnet *ifp = SC2IFP(sc);
1527 struct rndis_packet_msg *pkt;
1528 struct rndis_pktinfo *pi;
1529 uint32_t csum, vlan;
1530 struct mbuf *m;
1531
1532 if (!(ifp->if_flags & IFF_RUNNING))
1533 return;
1534
1535 if (len < sizeof(*pkt)) {
1536 device_printf(sc->sc_dev, "data packet too short: %u\n",
1537 len);
1538 return;
1539 }
1540
1541 pkt = (struct rndis_packet_msg *)buf;
1542 if (pkt->rm_dataoffset + pkt->rm_datalen > len) {
1543 device_printf(sc->sc_dev,
1544 "data packet out of bounds: %u@%u\n", pkt->rm_dataoffset,
1545 pkt->rm_datalen);
1546 return;
1547 }
1548
1549 if ((m = hvn_devget(sc, buf + RNDIS_HEADER_OFFSET + pkt->rm_dataoffset,
1550 pkt->rm_datalen)) == NULL) {
1551 if_statinc(ifp, if_ierrors);
1552 return;
1553 }
1554
1555 if (pkt->rm_pktinfooffset + pkt->rm_pktinfolen > len) {
1556 device_printf(sc->sc_dev,
1557 "pktinfo is out of bounds: %u@%u vs %u\n",
1558 pkt->rm_pktinfolen, pkt->rm_pktinfooffset, len);
1559 goto done;
1560 }
1561
1562 pi = (struct rndis_pktinfo *)(buf + RNDIS_HEADER_OFFSET +
1563 pkt->rm_pktinfooffset);
1564 while (pkt->rm_pktinfolen > 0) {
1565 if (pi->rm_size > pkt->rm_pktinfolen) {
1566 device_printf(sc->sc_dev,
1567 "invalid pktinfo size: %u/%u\n", pi->rm_size,
1568 pkt->rm_pktinfolen);
1569 break;
1570 }
1571
1572 switch (pi->rm_type) {
1573 case NDIS_PKTINFO_TYPE_CSUM:
1574 memcpy(&csum, pi->rm_data, sizeof(csum));
1575 if (csum & NDIS_RXCSUM_INFO_IPCS_OK)
1576 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1577 if (csum & NDIS_RXCSUM_INFO_TCPCS_OK)
1578 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1579 if (csum & NDIS_RXCSUM_INFO_UDPCS_OK)
1580 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1581 break;
1582 case NDIS_PKTINFO_TYPE_VLAN:
1583 memcpy(&vlan, pi->rm_data, sizeof(vlan));
1584 if (vlan != 0xffffffff) {
1585 uint16_t t = NDIS_VLAN_INFO_ID(vlan);
1586 t |= NDIS_VLAN_INFO_PRI(vlan) << EVL_PRIO_BITS;
1587 t |= NDIS_VLAN_INFO_CFI(vlan) << EVL_CFI_BITS;
1588 vlan_set_tag(m, t);
1589 }
1590 break;
1591 default:
1592 DPRINTF("%s: unhandled pktinfo type %u\n",
1593 device_xname(sc->sc_dev), pi->rm_type);
1594 break;
1595 }
1596
1597 pkt->rm_pktinfolen -= pi->rm_size;
1598 pi = (struct rndis_pktinfo *)((char *)pi + pi->rm_size);
1599 }
1600
1601 done:
1602 if_percpuq_enqueue(sc->sc_ipq, m);
1603 }
1604
1605 static void
1606 hvn_rndis_complete(struct hvn_softc *sc, uint8_t *buf, uint32_t len)
1607 {
1608 struct rndis_cmd *rc;
1609 uint32_t id;
1610
1611 memcpy(&id, buf + RNDIS_HEADER_OFFSET, sizeof(id));
1612 if ((rc = hvn_complete_cmd(sc, id)) != NULL) {
1613 if (len < rc->rc_cmplen)
1614 device_printf(sc->sc_dev,
1615 "RNDIS response %u too short: %u\n", id, len);
1616 else
1617 memcpy(&rc->rc_cmp, buf, rc->rc_cmplen);
1618 if (len > rc->rc_cmplen &&
1619 len - rc->rc_cmplen > HVN_RNDIS_BUFSIZE)
1620 device_printf(sc->sc_dev,
1621 "RNDIS response %u too large: %u\n", id, len);
1622 else if (len > rc->rc_cmplen)
1623 memcpy(&rc->rc_cmpbuf, buf + rc->rc_cmplen,
1624 len - rc->rc_cmplen);
1625 rc->rc_done = 1;
1626 wakeup(rc);
1627 } else {
1628 DPRINTF("%s: failed to complete RNDIS request id %u\n",
1629 device_xname(sc->sc_dev), id);
1630 }
1631 }
1632
1633 static int
1634 hvn_rndis_output(struct hvn_softc *sc, struct hvn_tx_desc *txd)
1635 {
1636 uint64_t rid = (uint64_t)txd->txd_id << 32;
1637 int rv;
1638
1639 rv = vmbus_channel_send_sgl(sc->sc_chan, txd->txd_sgl, txd->txd_nsge,
1640 &sc->sc_data_msg, sizeof(sc->sc_data_msg), rid);
1641 if (rv) {
1642 DPRINTF("%s: RNDIS data send error %d\n",
1643 device_xname(sc->sc_dev), rv);
1644 return rv;
1645 }
1646 return 0;
1647 }
1648
1649 static void
1650 hvn_rndis_status(struct hvn_softc *sc, uint8_t *buf, uint32_t len)
1651 {
1652 struct ifnet *ifp = SC2IFP(sc);
1653 uint32_t status;
1654 int link_state = sc->sc_link_state;
1655
1656 memcpy(&status, buf + RNDIS_HEADER_OFFSET, sizeof(status));
1657 switch (status) {
1658 case RNDIS_STATUS_MEDIA_CONNECT:
1659 sc->sc_link_state = LINK_STATE_UP;
1660 break;
1661 case RNDIS_STATUS_MEDIA_DISCONNECT:
1662 sc->sc_link_state = LINK_STATE_DOWN;
1663 break;
1664 /* Ignore these */
1665 case RNDIS_STATUS_OFFLOAD_CURRENT_CONFIG:
1666 return;
1667 default:
1668 DPRINTF("%s: unhandled status %#x\n", device_xname(sc->sc_dev),
1669 status);
1670 return;
1671 }
1672 if (link_state != sc->sc_link_state)
1673 if_link_state_change(ifp, sc->sc_link_state);
1674 }
1675
1676 static int
1677 hvn_rndis_query(struct hvn_softc *sc, uint32_t oid, void *res, size_t *length)
1678 {
1679 struct rndis_cmd *rc;
1680 struct rndis_query_req *req;
1681 struct rndis_query_comp *cmp;
1682 size_t olength = *length;
1683 int rv;
1684
1685 rc = hvn_alloc_cmd(sc);
1686
1687 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1688 BUS_DMASYNC_PREREAD);
1689
1690 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1691
1692 req = rc->rc_req;
1693 req->rm_type = REMOTE_NDIS_QUERY_MSG;
1694 req->rm_len = sizeof(*req);
1695 req->rm_rid = rc->rc_id;
1696 req->rm_oid = oid;
1697 req->rm_infobufoffset = sizeof(*req) - RNDIS_HEADER_OFFSET;
1698
1699 rc->rc_cmplen = sizeof(*cmp);
1700
1701 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1702 BUS_DMASYNC_PREWRITE);
1703
1704 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1705 DPRINTF("%s: QUERY_MSG failed, error %d\n",
1706 device_xname(sc->sc_dev), rv);
1707 hvn_free_cmd(sc, rc);
1708 return rv;
1709 }
1710
1711 cmp = (struct rndis_query_comp *)&rc->rc_cmp;
1712 switch (cmp->rm_status) {
1713 case RNDIS_STATUS_SUCCESS:
1714 if (cmp->rm_infobuflen > olength) {
1715 rv = EINVAL;
1716 break;
1717 }
1718 memcpy(res, rc->rc_cmpbuf, cmp->rm_infobuflen);
1719 *length = cmp->rm_infobuflen;
1720 break;
1721 default:
1722 *length = 0;
1723 rv = EIO;
1724 break;
1725 }
1726
1727 hvn_free_cmd(sc, rc);
1728 return rv;
1729 }
1730
1731 static int
1732 hvn_rndis_set(struct hvn_softc *sc, uint32_t oid, void *data, size_t length)
1733 {
1734 struct rndis_cmd *rc;
1735 struct rndis_set_req *req;
1736 struct rndis_set_comp *cmp;
1737 int rv;
1738
1739 rc = hvn_alloc_cmd(sc);
1740
1741 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1742 BUS_DMASYNC_PREREAD);
1743
1744 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1745
1746 req = rc->rc_req;
1747 req->rm_type = REMOTE_NDIS_SET_MSG;
1748 req->rm_len = sizeof(*req) + length;
1749 req->rm_rid = rc->rc_id;
1750 req->rm_oid = oid;
1751 req->rm_infobufoffset = sizeof(*req) - RNDIS_HEADER_OFFSET;
1752
1753 rc->rc_cmplen = sizeof(*cmp);
1754
1755 if (length > 0) {
1756 KASSERT(sizeof(*req) + length < PAGE_SIZE);
1757 req->rm_infobuflen = length;
1758 memcpy(req + 1, data, length);
1759 }
1760
1761 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1762 BUS_DMASYNC_PREWRITE);
1763
1764 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1765 DPRINTF("%s: SET_MSG failed, error %d\n",
1766 device_xname(sc->sc_dev), rv);
1767 hvn_free_cmd(sc, rc);
1768 return rv;
1769 }
1770
1771 cmp = (struct rndis_set_comp *)&rc->rc_cmp;
1772 if (cmp->rm_status != RNDIS_STATUS_SUCCESS)
1773 rv = EIO;
1774
1775 hvn_free_cmd(sc, rc);
1776 return rv;
1777 }
1778
1779 static int
1780 hvn_rndis_open(struct hvn_softc *sc)
1781 {
1782 uint32_t filter;
1783 int rv;
1784
1785 if (sc->sc_promisc)
1786 filter = RNDIS_PACKET_TYPE_PROMISCUOUS;
1787 else
1788 filter = RNDIS_PACKET_TYPE_BROADCAST |
1789 RNDIS_PACKET_TYPE_ALL_MULTICAST |
1790 RNDIS_PACKET_TYPE_DIRECTED;
1791
1792 rv = hvn_rndis_set(sc, OID_GEN_CURRENT_PACKET_FILTER,
1793 &filter, sizeof(filter));
1794 if (rv) {
1795 DPRINTF("%s: failed to set RNDIS filter to %#x\n",
1796 device_xname(sc->sc_dev), filter);
1797 }
1798 return rv;
1799 }
1800
1801 static int
1802 hvn_rndis_close(struct hvn_softc *sc)
1803 {
1804 uint32_t filter = 0;
1805 int rv;
1806
1807 rv = hvn_rndis_set(sc, OID_GEN_CURRENT_PACKET_FILTER,
1808 &filter, sizeof(filter));
1809 if (rv) {
1810 DPRINTF("%s: failed to clear RNDIS filter\n",
1811 device_xname(sc->sc_dev));
1812 }
1813 return rv;
1814 }
1815
1816 static void
1817 hvn_rndis_detach(struct hvn_softc *sc)
1818 {
1819 struct rndis_cmd *rc;
1820 struct rndis_halt_req *req;
1821 int rv;
1822
1823 rc = hvn_alloc_cmd(sc);
1824
1825 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1826 BUS_DMASYNC_PREREAD);
1827
1828 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1829
1830 req = rc->rc_req;
1831 req->rm_type = REMOTE_NDIS_HALT_MSG;
1832 req->rm_len = sizeof(*req);
1833 req->rm_rid = rc->rc_id;
1834
1835 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1836 BUS_DMASYNC_PREWRITE);
1837
1838 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1839 DPRINTF("%s: HALT_MSG failed, error %d\n",
1840 device_xname(sc->sc_dev), rv);
1841 }
1842 hvn_free_cmd(sc, rc);
1843
1844 mutex_destroy(&sc->sc_cntl_sqlck);
1845 mutex_destroy(&sc->sc_cntl_cqlck);
1846 mutex_destroy(&sc->sc_cntl_fqlck);
1847 }
1848