if_hvn.c revision 1.21 1 /* $NetBSD: if_hvn.c,v 1.21 2021/06/16 00:21:18 riastradh Exp $ */
2 /* $OpenBSD: if_hvn.c,v 1.39 2018/03/11 14:31:34 mikeb Exp $ */
3
4 /*-
5 * Copyright (c) 2009-2012,2016 Microsoft Corp.
6 * Copyright (c) 2010-2012 Citrix Inc.
7 * Copyright (c) 2012 NetApp Inc.
8 * Copyright (c) 2016 Mike Belopuhov <mike (at) esdenera.com>
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice unmodified, this list of conditions, and the following
16 * disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * The OpenBSD port was done under funding by Esdenera Networks GmbH.
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: if_hvn.c,v 1.21 2021/06/16 00:21:18 riastradh Exp $");
39
40 #ifdef _KERNEL_OPT
41 #include "opt_inet.h"
42 #include "opt_inet6.h"
43 #include "opt_net_mpsafe.h"
44 #endif
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/device.h>
50 #include <sys/atomic.h>
51 #include <sys/bus.h>
52 #include <sys/intr.h>
53 #include <sys/kmem.h>
54
55 #include <net/if.h>
56 #include <net/if_ether.h>
57 #include <net/if_media.h>
58
59 #include <net/bpf.h>
60
61 #include <dev/ic/ndisreg.h>
62 #include <dev/ic/rndisreg.h>
63
64 #include <dev/hyperv/vmbusvar.h>
65 #include <dev/hyperv/if_hvnreg.h>
66
67 #ifndef EVL_PRIO_BITS
68 #define EVL_PRIO_BITS 13
69 #endif
70 #ifndef EVL_CFI_BITS
71 #define EVL_CFI_BITS 12
72 #endif
73
74 #define HVN_NVS_MSGSIZE 32
75 #define HVN_NVS_BUFSIZE PAGE_SIZE
76
77 /*
78 * RNDIS control interface
79 */
80 #define HVN_RNDIS_CTLREQS 4
81 #define HVN_RNDIS_BUFSIZE 512
82
83 struct rndis_cmd {
84 uint32_t rc_id;
85 struct hvn_nvs_rndis rc_msg;
86 void *rc_req;
87 bus_dmamap_t rc_dmap;
88 bus_dma_segment_t rc_segs;
89 int rc_nsegs;
90 uint64_t rc_gpa;
91 struct rndis_packet_msg rc_cmp;
92 uint32_t rc_cmplen;
93 uint8_t rc_cmpbuf[HVN_RNDIS_BUFSIZE];
94 int rc_done;
95 TAILQ_ENTRY(rndis_cmd) rc_entry;
96 };
97 TAILQ_HEAD(rndis_queue, rndis_cmd);
98
99 #define HVN_MAXMTU (9 * 1024)
100
101 #define HVN_RNDIS_XFER_SIZE 2048
102
103 /*
104 * Tx ring
105 */
106 #define HVN_TX_DESC 256
107 #define HVN_TX_FRAGS 15 /* 31 is the max */
108 #define HVN_TX_FRAG_SIZE PAGE_SIZE
109 #define HVN_TX_PKT_SIZE 16384
110
111 #define HVN_RNDIS_PKT_LEN \
112 (sizeof(struct rndis_packet_msg) + \
113 sizeof(struct rndis_pktinfo) + NDIS_VLAN_INFO_SIZE + \
114 sizeof(struct rndis_pktinfo) + NDIS_TXCSUM_INFO_SIZE)
115
116 struct hvn_tx_desc {
117 uint32_t txd_id;
118 int txd_ready;
119 struct vmbus_gpa txd_sgl[HVN_TX_FRAGS + 1];
120 int txd_nsge;
121 struct mbuf *txd_buf;
122 bus_dmamap_t txd_dmap;
123 struct vmbus_gpa txd_gpa;
124 struct rndis_packet_msg *txd_req;
125 };
126
127 struct hvn_softc {
128 device_t sc_dev;
129
130 struct vmbus_softc *sc_vmbus;
131 struct vmbus_channel *sc_chan;
132 bus_dma_tag_t sc_dmat;
133
134 struct ethercom sc_ec;
135 struct ifmedia sc_media;
136 kmutex_t sc_media_lock; /* XXX */
137 struct if_percpuq *sc_ipq;
138 int sc_link_state;
139 int sc_promisc;
140
141 uint32_t sc_flags;
142 #define HVN_SCF_ATTACHED __BIT(0)
143
144 /* NVS protocol */
145 int sc_proto;
146 uint32_t sc_nvstid;
147 uint8_t sc_nvsrsp[HVN_NVS_MSGSIZE];
148 uint8_t *sc_nvsbuf;
149 int sc_nvsdone;
150
151 /* RNDIS protocol */
152 int sc_ndisver;
153 uint32_t sc_rndisrid;
154 struct rndis_queue sc_cntl_sq; /* submission queue */
155 kmutex_t sc_cntl_sqlck;
156 struct rndis_queue sc_cntl_cq; /* completion queue */
157 kmutex_t sc_cntl_cqlck;
158 struct rndis_queue sc_cntl_fq; /* free queue */
159 kmutex_t sc_cntl_fqlck;
160 struct rndis_cmd sc_cntl_msgs[HVN_RNDIS_CTLREQS];
161 struct hvn_nvs_rndis sc_data_msg;
162
163 /* Rx ring */
164 uint8_t *sc_rx_ring;
165 int sc_rx_size;
166 uint32_t sc_rx_hndl;
167 struct hyperv_dma sc_rx_dma;
168
169 /* Tx ring */
170 uint32_t sc_tx_next;
171 uint32_t sc_tx_avail;
172 struct hvn_tx_desc sc_tx_desc[HVN_TX_DESC];
173 bus_dmamap_t sc_tx_rmap;
174 uint8_t *sc_tx_msgs;
175 bus_dma_segment_t sc_tx_mseg;
176 };
177
178 #define SC2IFP(_sc_) (&(_sc_)->sc_ec.ec_if)
179 #define IFP2SC(_ifp_) ((_ifp_)->if_softc)
180
181
182 static int hvn_match(device_t, cfdata_t, void *);
183 static void hvn_attach(device_t, device_t, void *);
184 static int hvn_detach(device_t, int);
185
186 CFATTACH_DECL_NEW(hvn, sizeof(struct hvn_softc),
187 hvn_match, hvn_attach, hvn_detach, NULL);
188
189 static int hvn_ioctl(struct ifnet *, u_long, void *);
190 static int hvn_media_change(struct ifnet *);
191 static void hvn_media_status(struct ifnet *, struct ifmediareq *);
192 static int hvn_iff(struct hvn_softc *);
193 static int hvn_init(struct ifnet *);
194 static void hvn_stop(struct ifnet *, int);
195 static void hvn_start(struct ifnet *);
196 static int hvn_encap(struct hvn_softc *, struct mbuf *,
197 struct hvn_tx_desc **);
198 static void hvn_decap(struct hvn_softc *, struct hvn_tx_desc *);
199 static void hvn_txeof(struct hvn_softc *, uint64_t);
200 static int hvn_rx_ring_create(struct hvn_softc *);
201 static int hvn_rx_ring_destroy(struct hvn_softc *);
202 static int hvn_tx_ring_create(struct hvn_softc *);
203 static void hvn_tx_ring_destroy(struct hvn_softc *);
204 static int hvn_set_capabilities(struct hvn_softc *);
205 static int hvn_get_lladdr(struct hvn_softc *, uint8_t *);
206 static void hvn_get_link_status(struct hvn_softc *);
207
208 /* NSVP */
209 static int hvn_nvs_attach(struct hvn_softc *);
210 static void hvn_nvs_intr(void *);
211 static int hvn_nvs_cmd(struct hvn_softc *, void *, size_t, uint64_t, int);
212 static int hvn_nvs_ack(struct hvn_softc *, uint64_t);
213 static void hvn_nvs_detach(struct hvn_softc *);
214
215 /* RNDIS */
216 static int hvn_rndis_attach(struct hvn_softc *);
217 static int hvn_rndis_cmd(struct hvn_softc *, struct rndis_cmd *, int);
218 static void hvn_rndis_input(struct hvn_softc *, uint64_t, void *);
219 static void hvn_rxeof(struct hvn_softc *, uint8_t *, uint32_t);
220 static void hvn_rndis_complete(struct hvn_softc *, uint8_t *, uint32_t);
221 static int hvn_rndis_output(struct hvn_softc *, struct hvn_tx_desc *);
222 static void hvn_rndis_status(struct hvn_softc *, uint8_t *, uint32_t);
223 static int hvn_rndis_query(struct hvn_softc *, uint32_t, void *, size_t *);
224 static int hvn_rndis_set(struct hvn_softc *, uint32_t, void *, size_t);
225 static int hvn_rndis_open(struct hvn_softc *);
226 static int hvn_rndis_close(struct hvn_softc *);
227 static void hvn_rndis_detach(struct hvn_softc *);
228
229 static int
230 hvn_match(device_t parent, cfdata_t match, void *aux)
231 {
232 struct vmbus_attach_args *aa = aux;
233
234 if (memcmp(aa->aa_type, &hyperv_guid_network, sizeof(*aa->aa_type)))
235 return 0;
236 return 1;
237 }
238
239 static void
240 hvn_attach(device_t parent, device_t self, void *aux)
241 {
242 struct hvn_softc *sc = device_private(self);
243 struct vmbus_attach_args *aa = aux;
244 struct ifnet *ifp = SC2IFP(sc);
245 uint8_t enaddr[ETHER_ADDR_LEN];
246
247 sc->sc_dev = self;
248 sc->sc_vmbus = (struct vmbus_softc *)device_private(parent);
249 sc->sc_chan = aa->aa_chan;
250 sc->sc_dmat = sc->sc_vmbus->sc_dmat;
251
252 aprint_naive("\n");
253 aprint_normal(": Hyper-V NetVSC\n");
254
255 if (hvn_nvs_attach(sc)) {
256 aprint_error_dev(self, "failed to init NVSP\n");
257 return;
258 }
259
260 if (hvn_rx_ring_create(sc)) {
261 aprint_error_dev(self, "failed to create Rx ring\n");
262 goto fail1;
263 }
264
265 if (hvn_tx_ring_create(sc)) {
266 aprint_error_dev(self, "failed to create Tx ring\n");
267 goto fail2;
268 }
269
270 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
271 ifp->if_softc = sc;
272 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
273 ifp->if_ioctl = hvn_ioctl;
274 ifp->if_start = hvn_start;
275 ifp->if_init = hvn_init;
276 ifp->if_stop = hvn_stop;
277 ifp->if_capabilities = IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx;
278 ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx;
279 ifp->if_capabilities |= IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_TCPv6_Rx;
280 if (sc->sc_ndisver > NDIS_VERSION_6_30) {
281 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Tx;
282 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Rx;
283 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Tx;
284 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Rx;
285 }
286 if (sc->sc_proto >= HVN_NVS_PROTO_VERSION_2) {
287 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
288 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
289 sc->sc_ec.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
290 }
291
292 IFQ_SET_MAXLEN(&ifp->if_snd, HVN_TX_DESC - 1);
293 IFQ_SET_READY(&ifp->if_snd);
294
295 /* Initialize ifmedia structures. */
296 sc->sc_ec.ec_ifmedia = &sc->sc_media;
297 /* XXX media locking needs revisiting */
298 mutex_init(&sc->sc_media_lock, MUTEX_DEFAULT, IPL_SOFTNET);
299 ifmedia_init_with_lock(&sc->sc_media, IFM_IMASK,
300 hvn_media_change, hvn_media_status, &sc->sc_media_lock);
301 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
302 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_MANUAL);
303
304 if_initialize(ifp);
305 sc->sc_ipq = if_percpuq_create(ifp);
306 if_deferred_start_init(ifp, NULL);
307
308 if (hvn_rndis_attach(sc)) {
309 aprint_error_dev(self, "failed to init RNDIS\n");
310 goto fail3;
311 }
312
313 aprint_normal_dev(self, "NVS %d.%d NDIS %d.%d\n",
314 sc->sc_proto >> 16, sc->sc_proto & 0xffff,
315 sc->sc_ndisver >> 16 , sc->sc_ndisver & 0xffff);
316
317 if (hvn_set_capabilities(sc)) {
318 aprint_error_dev(self, "failed to setup offloading\n");
319 goto fail4;
320 }
321
322 if (hvn_get_lladdr(sc, enaddr)) {
323 aprint_error_dev(self,
324 "failed to obtain an ethernet address\n");
325 goto fail4;
326 }
327 aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(enaddr));
328
329 ether_ifattach(ifp, enaddr);
330 if_register(ifp);
331
332 if (pmf_device_register(self, NULL, NULL))
333 pmf_class_network_register(self, ifp);
334 else
335 aprint_error_dev(self, "couldn't establish power handler\n");
336
337 SET(sc->sc_flags, HVN_SCF_ATTACHED);
338 return;
339
340 fail4: hvn_rndis_detach(sc);
341 if_percpuq_destroy(sc->sc_ipq);
342 fail3: ifmedia_fini(&sc->sc_media);
343 mutex_destroy(&sc->sc_media_lock);
344 hvn_tx_ring_destroy(sc);
345 fail2: hvn_rx_ring_destroy(sc);
346 fail1: hvn_nvs_detach(sc);
347 }
348
349 static int
350 hvn_detach(device_t self, int flags)
351 {
352 struct hvn_softc *sc = device_private(self);
353 struct ifnet *ifp = SC2IFP(sc);
354
355 if (!ISSET(sc->sc_flags, HVN_SCF_ATTACHED))
356 return 0;
357
358 if (ifp->if_flags & IFF_RUNNING)
359 hvn_stop(ifp, 1);
360
361 pmf_device_deregister(self);
362
363 ether_ifdetach(ifp);
364 if_detach(ifp);
365 ifmedia_fini(&sc->sc_media);
366 mutex_destroy(&sc->sc_media_lock);
367 if_percpuq_destroy(sc->sc_ipq);
368
369 hvn_rndis_detach(sc);
370 hvn_rx_ring_destroy(sc);
371 hvn_tx_ring_destroy(sc);
372 hvn_nvs_detach(sc);
373
374 return 0;
375 }
376
377 static int
378 hvn_ioctl(struct ifnet *ifp, u_long command, void * data)
379 {
380 struct hvn_softc *sc = IFP2SC(ifp);
381 int s, error = 0;
382
383 s = splnet();
384
385 error = ether_ioctl(ifp, command, data);
386 if (error == ENETRESET) {
387 if (ifp->if_flags & IFF_RUNNING)
388 hvn_iff(sc);
389 error = 0;
390 }
391
392 splx(s);
393
394 return error;
395 }
396
397 static int
398 hvn_media_change(struct ifnet *ifp)
399 {
400
401 return 0;
402 }
403
404 static void
405 hvn_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
406 {
407 struct hvn_softc *sc = IFP2SC(ifp);
408 int link_state;
409
410 link_state = sc->sc_link_state;
411 hvn_get_link_status(sc);
412 if (link_state != sc->sc_link_state)
413 if_link_state_change(ifp, sc->sc_link_state);
414
415 ifmr->ifm_status = IFM_AVALID;
416 ifmr->ifm_active = IFM_ETHER | IFM_MANUAL;
417 if (sc->sc_link_state == LINK_STATE_UP)
418 ifmr->ifm_status |= IFM_ACTIVE;
419 }
420
421 static int
422 hvn_iff(struct hvn_softc *sc)
423 {
424
425 /* XXX */
426 sc->sc_promisc = 0;
427
428 return 0;
429 }
430
431 static int
432 hvn_init(struct ifnet *ifp)
433 {
434 struct hvn_softc *sc = IFP2SC(ifp);
435 int error;
436
437 hvn_stop(ifp, 0);
438
439 error = hvn_iff(sc);
440 if (error)
441 return error;
442
443 error = hvn_rndis_open(sc);
444 if (error == 0) {
445 ifp->if_flags |= IFF_RUNNING;
446 ifp->if_flags &= ~IFF_OACTIVE;
447 }
448 return error;
449 }
450
451 static void
452 hvn_stop(struct ifnet *ifp, int disable)
453 {
454 struct hvn_softc *sc = IFP2SC(ifp);
455
456 hvn_rndis_close(sc);
457
458 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
459 }
460
461 static void
462 hvn_start(struct ifnet *ifp)
463 {
464 struct hvn_softc *sc = IFP2SC(ifp);
465 struct hvn_tx_desc *txd;
466 struct mbuf *m;
467
468 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
469 return;
470
471 for (;;) {
472 if (!sc->sc_tx_avail) {
473 /* transient */
474 ifp->if_flags |= IFF_OACTIVE;
475 break;
476 }
477
478 IFQ_DEQUEUE(&ifp->if_snd, m);
479 if (m == NULL)
480 break;
481
482 if (hvn_encap(sc, m, &txd)) {
483 /* the chain is too large */
484 if_statinc(ifp, if_oerrors);
485 m_freem(m);
486 continue;
487 }
488
489 bpf_mtap(ifp, m, BPF_D_OUT);
490
491 if (hvn_rndis_output(sc, txd)) {
492 hvn_decap(sc, txd);
493 if_statinc(ifp, if_oerrors);
494 m_freem(m);
495 continue;
496 }
497
498 sc->sc_tx_next++;
499 }
500 }
501
502 static inline char *
503 hvn_rndis_pktinfo_append(struct rndis_packet_msg *pkt, size_t pktsize,
504 size_t datalen, uint32_t type)
505 {
506 struct rndis_pktinfo *pi;
507 size_t pi_size = sizeof(*pi) + datalen;
508 char *cp;
509
510 KASSERT(pkt->rm_pktinfooffset + pkt->rm_pktinfolen + pi_size <=
511 pktsize);
512
513 cp = (char *)pkt + pkt->rm_pktinfooffset + pkt->rm_pktinfolen;
514 pi = (struct rndis_pktinfo *)cp;
515 pi->rm_size = pi_size;
516 pi->rm_type = type;
517 pi->rm_pktinfooffset = sizeof(*pi);
518 pkt->rm_pktinfolen += pi_size;
519 pkt->rm_dataoffset += pi_size;
520 pkt->rm_len += pi_size;
521
522 return (char *)pi->rm_data;
523 }
524
525 static int
526 hvn_encap(struct hvn_softc *sc, struct mbuf *m, struct hvn_tx_desc **txd0)
527 {
528 struct hvn_tx_desc *txd;
529 struct rndis_packet_msg *pkt;
530 bus_dma_segment_t *seg;
531 size_t pktlen;
532 int i, rv;
533
534 do {
535 txd = &sc->sc_tx_desc[sc->sc_tx_next % HVN_TX_DESC];
536 sc->sc_tx_next++;
537 } while (!txd->txd_ready);
538 txd->txd_ready = 0;
539
540 pkt = txd->txd_req;
541 memset(pkt, 0, HVN_RNDIS_PKT_LEN);
542 pkt->rm_type = REMOTE_NDIS_PACKET_MSG;
543 pkt->rm_len = sizeof(*pkt) + m->m_pkthdr.len;
544 pkt->rm_dataoffset = RNDIS_DATA_OFFSET;
545 pkt->rm_datalen = m->m_pkthdr.len;
546 pkt->rm_pktinfooffset = sizeof(*pkt); /* adjusted below */
547 pkt->rm_pktinfolen = 0;
548
549 rv = bus_dmamap_load_mbuf(sc->sc_dmat, txd->txd_dmap, m, BUS_DMA_READ |
550 BUS_DMA_NOWAIT);
551 switch (rv) {
552 case 0:
553 break;
554 case EFBIG:
555 if (m_defrag(m, M_NOWAIT) != NULL &&
556 bus_dmamap_load_mbuf(sc->sc_dmat, txd->txd_dmap, m,
557 BUS_DMA_READ | BUS_DMA_NOWAIT) == 0)
558 break;
559 /* FALLTHROUGH */
560 default:
561 DPRINTF("%s: failed to load mbuf\n", device_xname(sc->sc_dev));
562 return -1;
563 }
564 txd->txd_buf = m;
565
566 if (vlan_has_tag(m)) {
567 uint32_t vlan;
568 char *cp;
569 uint16_t tag;
570
571 tag = vlan_get_tag(m);
572 vlan = NDIS_VLAN_INFO_MAKE(EVL_VLANOFTAG(tag),
573 EVL_PRIOFTAG(tag), EVL_CFIOFTAG(tag));
574 cp = hvn_rndis_pktinfo_append(pkt, HVN_RNDIS_PKT_LEN,
575 NDIS_VLAN_INFO_SIZE, NDIS_PKTINFO_TYPE_VLAN);
576 memcpy(cp, &vlan, NDIS_VLAN_INFO_SIZE);
577 }
578
579 if (m->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_UDPv4 |
580 M_CSUM_TCPv4)) {
581 uint32_t csum = NDIS_TXCSUM_INFO_IPV4;
582 char *cp;
583
584 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4)
585 csum |= NDIS_TXCSUM_INFO_IPCS;
586 if (m->m_pkthdr.csum_flags & M_CSUM_TCPv4)
587 csum |= NDIS_TXCSUM_INFO_TCPCS;
588 if (m->m_pkthdr.csum_flags & M_CSUM_UDPv4)
589 csum |= NDIS_TXCSUM_INFO_UDPCS;
590 cp = hvn_rndis_pktinfo_append(pkt, HVN_RNDIS_PKT_LEN,
591 NDIS_TXCSUM_INFO_SIZE, NDIS_PKTINFO_TYPE_CSUM);
592 memcpy(cp, &csum, NDIS_TXCSUM_INFO_SIZE);
593 }
594
595 pktlen = pkt->rm_pktinfooffset + pkt->rm_pktinfolen;
596 pkt->rm_pktinfooffset -= RNDIS_HEADER_OFFSET;
597
598 /* Attach an RNDIS message to the first slot */
599 txd->txd_sgl[0].gpa_page = txd->txd_gpa.gpa_page;
600 txd->txd_sgl[0].gpa_ofs = txd->txd_gpa.gpa_ofs;
601 txd->txd_sgl[0].gpa_len = pktlen;
602 txd->txd_nsge = txd->txd_dmap->dm_nsegs + 1;
603
604 for (i = 0; i < txd->txd_dmap->dm_nsegs; i++) {
605 seg = &txd->txd_dmap->dm_segs[i];
606 txd->txd_sgl[1 + i].gpa_page = atop(seg->ds_addr);
607 txd->txd_sgl[1 + i].gpa_ofs = seg->ds_addr & PAGE_MASK;
608 txd->txd_sgl[1 + i].gpa_len = seg->ds_len;
609 }
610
611 *txd0 = txd;
612
613 atomic_dec_uint(&sc->sc_tx_avail);
614
615 return 0;
616 }
617
618 static void
619 hvn_decap(struct hvn_softc *sc, struct hvn_tx_desc *txd)
620 {
621 struct ifnet *ifp = SC2IFP(sc);
622
623 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmap,
624 0, txd->txd_dmap->dm_mapsize,
625 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
626 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmap);
627 txd->txd_buf = NULL;
628 txd->txd_nsge = 0;
629 txd->txd_ready = 1;
630 atomic_inc_uint(&sc->sc_tx_avail);
631 ifp->if_flags &= ~IFF_OACTIVE;
632 }
633
634 static void
635 hvn_txeof(struct hvn_softc *sc, uint64_t tid)
636 {
637 struct ifnet *ifp = SC2IFP(sc);
638 struct hvn_tx_desc *txd;
639 struct mbuf *m;
640 uint32_t id = tid >> 32;
641
642 if ((tid & 0xffffffffU) != 0)
643 return;
644
645 id -= HVN_NVS_CHIM_SIG;
646 if (id >= HVN_TX_DESC) {
647 device_printf(sc->sc_dev, "tx packet index too large: %u", id);
648 return;
649 }
650
651 txd = &sc->sc_tx_desc[id];
652
653 if ((m = txd->txd_buf) == NULL) {
654 device_printf(sc->sc_dev, "no mbuf @%u\n", id);
655 return;
656 }
657 txd->txd_buf = NULL;
658
659 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmap,
660 0, txd->txd_dmap->dm_mapsize,
661 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
662 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmap);
663 m_freem(m);
664 if_statinc(ifp, if_opackets);
665
666 txd->txd_ready = 1;
667
668 atomic_inc_uint(&sc->sc_tx_avail);
669 ifp->if_flags &= ~IFF_OACTIVE;
670 }
671
672 static int
673 hvn_rx_ring_create(struct hvn_softc *sc)
674 {
675 struct hvn_nvs_rxbuf_conn cmd;
676 struct hvn_nvs_rxbuf_conn_resp *rsp;
677 uint64_t tid;
678
679 if (sc->sc_proto <= HVN_NVS_PROTO_VERSION_2)
680 sc->sc_rx_size = 15 * 1024 * 1024; /* 15MB */
681 else
682 sc->sc_rx_size = 16 * 1024 * 1024; /* 16MB */
683 sc->sc_rx_ring = hyperv_dma_alloc(sc->sc_dmat, &sc->sc_rx_dma,
684 sc->sc_rx_size, PAGE_SIZE, PAGE_SIZE, sc->sc_rx_size / PAGE_SIZE,
685 HYPERV_DMA_SLEEPOK);
686 if (sc->sc_rx_ring == NULL) {
687 DPRINTF("%s: failed to allocate Rx ring buffer\n",
688 device_xname(sc->sc_dev));
689 return -1;
690 }
691 if (vmbus_handle_alloc(sc->sc_chan, &sc->sc_rx_dma, sc->sc_rx_size,
692 &sc->sc_rx_hndl)) {
693 DPRINTF("%s: failed to obtain a PA handle\n",
694 device_xname(sc->sc_dev));
695 goto errout;
696 }
697
698 memset(&cmd, 0, sizeof(cmd));
699 cmd.nvs_type = HVN_NVS_TYPE_RXBUF_CONN;
700 cmd.nvs_gpadl = sc->sc_rx_hndl;
701 cmd.nvs_sig = HVN_NVS_RXBUF_SIG;
702
703 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
704 if (hvn_nvs_cmd(sc, &cmd, sizeof(cmd), tid, 100))
705 goto errout;
706
707 rsp = (struct hvn_nvs_rxbuf_conn_resp *)&sc->sc_nvsrsp;
708 if (rsp->nvs_status != HVN_NVS_STATUS_OK) {
709 DPRINTF("%s: failed to set up the Rx ring\n",
710 device_xname(sc->sc_dev));
711 goto errout;
712 }
713 if (rsp->nvs_nsect > 1) {
714 DPRINTF("%s: invalid number of Rx ring sections: %u\n",
715 device_xname(sc->sc_dev), rsp->nvs_nsect);
716 hvn_rx_ring_destroy(sc);
717 return -1;
718 }
719 return 0;
720
721 errout:
722 if (sc->sc_rx_hndl) {
723 vmbus_handle_free(sc->sc_chan, sc->sc_rx_hndl);
724 sc->sc_rx_hndl = 0;
725 }
726 if (sc->sc_rx_ring) {
727 hyperv_dma_free(sc->sc_dmat, &sc->sc_rx_dma);
728 sc->sc_rx_ring = NULL;
729 }
730 return -1;
731 }
732
733 static int
734 hvn_rx_ring_destroy(struct hvn_softc *sc)
735 {
736 struct hvn_nvs_rxbuf_disconn cmd;
737 uint64_t tid;
738
739 if (sc->sc_rx_ring == NULL)
740 return 0;
741
742 memset(&cmd, 0, sizeof(cmd));
743 cmd.nvs_type = HVN_NVS_TYPE_RXBUF_DISCONN;
744 cmd.nvs_sig = HVN_NVS_RXBUF_SIG;
745
746 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
747 if (hvn_nvs_cmd(sc, &cmd, sizeof(cmd), tid, 0))
748 return -1;
749
750 delay(100);
751
752 vmbus_handle_free(sc->sc_chan, sc->sc_rx_hndl);
753 sc->sc_rx_hndl = 0;
754
755 hyperv_dma_free(sc->sc_dmat, &sc->sc_rx_dma);
756 sc->sc_rx_ring = NULL;
757
758 return 0;
759 }
760
761 static int
762 hvn_tx_ring_create(struct hvn_softc *sc)
763 {
764 const int dmaflags = cold ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK;
765 struct hvn_tx_desc *txd;
766 bus_dma_segment_t *seg;
767 size_t msgsize;
768 int i, rsegs;
769 paddr_t pa;
770
771 msgsize = roundup(HVN_RNDIS_PKT_LEN, 128);
772
773 /* Allocate memory to store RNDIS messages */
774 if (bus_dmamem_alloc(sc->sc_dmat, msgsize * HVN_TX_DESC, PAGE_SIZE, 0,
775 &sc->sc_tx_mseg, 1, &rsegs, dmaflags)) {
776 DPRINTF("%s: failed to allocate memory for RDNIS messages\n",
777 device_xname(sc->sc_dev));
778 goto errout;
779 }
780 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_tx_mseg, 1, msgsize *
781 HVN_TX_DESC, (void **)&sc->sc_tx_msgs, dmaflags)) {
782 DPRINTF("%s: failed to establish mapping for RDNIS messages\n",
783 device_xname(sc->sc_dev));
784 goto errout;
785 }
786 memset(sc->sc_tx_msgs, 0, msgsize * HVN_TX_DESC);
787 if (bus_dmamap_create(sc->sc_dmat, msgsize * HVN_TX_DESC, 1,
788 msgsize * HVN_TX_DESC, 0, dmaflags, &sc->sc_tx_rmap)) {
789 DPRINTF("%s: failed to create map for RDNIS messages\n",
790 device_xname(sc->sc_dev));
791 goto errout;
792 }
793 if (bus_dmamap_load(sc->sc_dmat, sc->sc_tx_rmap, sc->sc_tx_msgs,
794 msgsize * HVN_TX_DESC, NULL, dmaflags)) {
795 DPRINTF("%s: failed to create map for RDNIS messages\n",
796 device_xname(sc->sc_dev));
797 goto errout;
798 }
799
800 for (i = 0; i < HVN_TX_DESC; i++) {
801 txd = &sc->sc_tx_desc[i];
802 if (bus_dmamap_create(sc->sc_dmat, HVN_TX_PKT_SIZE,
803 HVN_TX_FRAGS, HVN_TX_FRAG_SIZE, PAGE_SIZE, dmaflags,
804 &txd->txd_dmap)) {
805 DPRINTF("%s: failed to create map for TX descriptors\n",
806 device_xname(sc->sc_dev));
807 goto errout;
808 }
809 seg = &sc->sc_tx_rmap->dm_segs[0];
810 pa = seg->ds_addr + (msgsize * i);
811 txd->txd_gpa.gpa_page = atop(pa);
812 txd->txd_gpa.gpa_ofs = pa & PAGE_MASK;
813 txd->txd_gpa.gpa_len = msgsize;
814 txd->txd_req = (void *)(sc->sc_tx_msgs + (msgsize * i));
815 txd->txd_id = i + HVN_NVS_CHIM_SIG;
816 txd->txd_ready = 1;
817 }
818 sc->sc_tx_avail = HVN_TX_DESC;
819
820 return 0;
821
822 errout:
823 hvn_tx_ring_destroy(sc);
824 return -1;
825 }
826
827 static void
828 hvn_tx_ring_destroy(struct hvn_softc *sc)
829 {
830 struct hvn_tx_desc *txd;
831 int i;
832
833 for (i = 0; i < HVN_TX_DESC; i++) {
834 txd = &sc->sc_tx_desc[i];
835 if (txd->txd_dmap == NULL)
836 continue;
837 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmap,
838 0, txd->txd_dmap->dm_mapsize,
839 BUS_DMASYNC_POSTWRITE);
840 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmap);
841 bus_dmamap_destroy(sc->sc_dmat, txd->txd_dmap);
842 txd->txd_dmap = NULL;
843 if (txd->txd_buf == NULL)
844 continue;
845 m_freem(txd->txd_buf);
846 txd->txd_buf = NULL;
847 }
848 if (sc->sc_tx_rmap != NULL) {
849 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap,
850 0, sc->sc_tx_rmap->dm_mapsize,
851 BUS_DMASYNC_POSTWRITE);
852 bus_dmamap_unload(sc->sc_dmat, sc->sc_tx_rmap);
853 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_rmap);
854 sc->sc_tx_rmap = NULL;
855 }
856 if (sc->sc_tx_msgs != NULL) {
857 size_t msgsize = roundup(HVN_RNDIS_PKT_LEN, 128);
858
859 bus_dmamem_unmap(sc->sc_dmat, sc->sc_tx_msgs,
860 msgsize * HVN_TX_DESC);
861 bus_dmamem_free(sc->sc_dmat, &sc->sc_tx_mseg, 1);
862 sc->sc_tx_msgs = NULL;
863 }
864 }
865
866 static int
867 hvn_get_lladdr(struct hvn_softc *sc, uint8_t *enaddr)
868 {
869 size_t addrlen = ETHER_ADDR_LEN;
870 int rv;
871
872 rv = hvn_rndis_query(sc, OID_802_3_PERMANENT_ADDRESS, enaddr, &addrlen);
873 if (rv == 0 && addrlen != ETHER_ADDR_LEN)
874 rv = -1;
875 return rv;
876 }
877
878 static void
879 hvn_get_link_status(struct hvn_softc *sc)
880 {
881 uint32_t state;
882 size_t len = sizeof(state);
883
884 if (hvn_rndis_query(sc, OID_GEN_MEDIA_CONNECT_STATUS,
885 &state, &len) == 0)
886 sc->sc_link_state = (state == NDIS_MEDIA_STATE_CONNECTED) ?
887 LINK_STATE_UP : LINK_STATE_DOWN;
888 }
889
890 static int
891 hvn_nvs_attach(struct hvn_softc *sc)
892 {
893 static const uint32_t protos[] = {
894 HVN_NVS_PROTO_VERSION_5,
895 HVN_NVS_PROTO_VERSION_4,
896 HVN_NVS_PROTO_VERSION_2,
897 HVN_NVS_PROTO_VERSION_1
898 };
899 struct hvn_nvs_init cmd;
900 struct hvn_nvs_init_resp *rsp;
901 struct hvn_nvs_ndis_init ncmd;
902 struct hvn_nvs_ndis_conf ccmd;
903 uint32_t ndisver, ringsize;
904 uint64_t tid;
905 int i;
906
907 sc->sc_nvsbuf = kmem_zalloc(HVN_NVS_BUFSIZE, KM_SLEEP);
908
909 /* We need to be able to fit all RNDIS control and data messages */
910 ringsize = HVN_RNDIS_CTLREQS *
911 (sizeof(struct hvn_nvs_rndis) + sizeof(struct vmbus_gpa)) +
912 HVN_TX_DESC * (sizeof(struct hvn_nvs_rndis) +
913 (HVN_TX_FRAGS + 1) * sizeof(struct vmbus_gpa));
914
915 sc->sc_chan->ch_flags &= ~CHF_BATCHED;
916
917 /* Associate our interrupt handler with the channel */
918 if (vmbus_channel_open(sc->sc_chan, ringsize, NULL, 0,
919 hvn_nvs_intr, sc)) {
920 DPRINTF("%s: failed to open channel\n",
921 device_xname(sc->sc_dev));
922 kmem_free(sc->sc_nvsbuf, HVN_NVS_BUFSIZE);
923 return -1;
924 }
925
926 memset(&cmd, 0, sizeof(cmd));
927 cmd.nvs_type = HVN_NVS_TYPE_INIT;
928 for (i = 0; i < __arraycount(protos); i++) {
929 cmd.nvs_ver_min = cmd.nvs_ver_max = protos[i];
930 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
931 if (hvn_nvs_cmd(sc, &cmd, sizeof(cmd), tid, 100))
932 return -1;
933
934 rsp = (struct hvn_nvs_init_resp *)&sc->sc_nvsrsp;
935 if (rsp->nvs_status == HVN_NVS_STATUS_OK) {
936 sc->sc_proto = protos[i];
937 break;
938 }
939 }
940 if (i == __arraycount(protos)) {
941 DPRINTF("%s: failed to negotiate NVSP version\n",
942 device_xname(sc->sc_dev));
943 return -1;
944 }
945
946 if (sc->sc_proto >= HVN_NVS_PROTO_VERSION_2) {
947 memset(&ccmd, 0, sizeof(ccmd));
948 ccmd.nvs_type = HVN_NVS_TYPE_NDIS_CONF;
949 ccmd.nvs_mtu = HVN_MAXMTU;
950 ccmd.nvs_caps = HVN_NVS_NDIS_CONF_VLAN;
951
952 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
953 if (hvn_nvs_cmd(sc, &ccmd, sizeof(ccmd), tid, 100))
954 return -1;
955 }
956
957 memset(&ncmd, 0, sizeof(ncmd));
958 ncmd.nvs_type = HVN_NVS_TYPE_NDIS_INIT;
959 if (sc->sc_proto <= HVN_NVS_PROTO_VERSION_4)
960 ndisver = NDIS_VERSION_6_1;
961 else
962 ndisver = NDIS_VERSION_6_30;
963 ncmd.nvs_ndis_major = (ndisver & 0xffff0000) >> 16;
964 ncmd.nvs_ndis_minor = ndisver & 0x0000ffff;
965
966 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
967 if (hvn_nvs_cmd(sc, &ncmd, sizeof(ncmd), tid, 100))
968 return -1;
969
970 sc->sc_ndisver = ndisver;
971
972 return 0;
973 }
974
975 static void
976 hvn_nvs_intr(void *arg)
977 {
978 struct hvn_softc *sc = arg;
979 struct ifnet *ifp = SC2IFP(sc);
980 struct vmbus_chanpkt_hdr *cph;
981 const struct hvn_nvs_hdr *nvs;
982 uint64_t rid;
983 uint32_t rlen;
984 int rv;
985 bool dotx = false;
986
987 for (;;) {
988 rv = vmbus_channel_recv(sc->sc_chan, sc->sc_nvsbuf,
989 HVN_NVS_BUFSIZE, &rlen, &rid, 1);
990 if (rv != 0 || rlen == 0) {
991 if (rv != EAGAIN)
992 device_printf(sc->sc_dev,
993 "failed to receive an NVSP packet\n");
994 break;
995 }
996 cph = (struct vmbus_chanpkt_hdr *)sc->sc_nvsbuf;
997 nvs = (const struct hvn_nvs_hdr *)VMBUS_CHANPKT_CONST_DATA(cph);
998
999 if (cph->cph_type == VMBUS_CHANPKT_TYPE_COMP) {
1000 switch (nvs->nvs_type) {
1001 case HVN_NVS_TYPE_INIT_RESP:
1002 case HVN_NVS_TYPE_RXBUF_CONNRESP:
1003 case HVN_NVS_TYPE_CHIM_CONNRESP:
1004 case HVN_NVS_TYPE_SUBCH_RESP:
1005 /* copy the response back */
1006 memcpy(&sc->sc_nvsrsp, nvs, HVN_NVS_MSGSIZE);
1007 sc->sc_nvsdone = 1;
1008 wakeup(&sc->sc_nvsrsp);
1009 break;
1010 case HVN_NVS_TYPE_RNDIS_ACK:
1011 dotx = true;
1012 hvn_txeof(sc, cph->cph_tid);
1013 break;
1014 default:
1015 device_printf(sc->sc_dev,
1016 "unhandled NVSP packet type %u "
1017 "on completion\n", nvs->nvs_type);
1018 break;
1019 }
1020 } else if (cph->cph_type == VMBUS_CHANPKT_TYPE_RXBUF) {
1021 switch (nvs->nvs_type) {
1022 case HVN_NVS_TYPE_RNDIS:
1023 hvn_rndis_input(sc, cph->cph_tid, cph);
1024 break;
1025 default:
1026 device_printf(sc->sc_dev,
1027 "unhandled NVSP packet type %u "
1028 "on receive\n", nvs->nvs_type);
1029 break;
1030 }
1031 } else if (cph->cph_type == VMBUS_CHANPKT_TYPE_INBAND) {
1032 switch (nvs->nvs_type) {
1033 case HVN_NVS_TYPE_TXTBL_NOTE:
1034 /* Useless; ignore */
1035 break;
1036 default:
1037 device_printf(sc->sc_dev,
1038 "got notify, nvs type %u\n", nvs->nvs_type);
1039 break;
1040 }
1041 } else
1042 device_printf(sc->sc_dev,
1043 "unknown NVSP packet type %u\n", cph->cph_type);
1044 }
1045
1046 if (dotx)
1047 if_schedule_deferred_start(ifp);
1048 }
1049
1050 static int
1051 hvn_nvs_cmd(struct hvn_softc *sc, void *cmd, size_t cmdsize, uint64_t tid,
1052 int timo)
1053 {
1054 struct hvn_nvs_hdr *hdr = cmd;
1055 int tries = 10;
1056 int rv, s;
1057
1058 sc->sc_nvsdone = 0;
1059
1060 do {
1061 rv = vmbus_channel_send(sc->sc_chan, cmd, cmdsize,
1062 tid, VMBUS_CHANPKT_TYPE_INBAND,
1063 timo ? VMBUS_CHANPKT_FLAG_RC : 0);
1064 if (rv == EAGAIN) {
1065 if (cold)
1066 delay(1000);
1067 else
1068 tsleep(cmd, PRIBIO, "nvsout",
1069 uimax(1, mstohz(1)));
1070 } else if (rv) {
1071 DPRINTF("%s: NVSP operation %u send error %d\n",
1072 device_xname(sc->sc_dev), hdr->nvs_type, rv);
1073 return rv;
1074 }
1075 } while (rv != 0 && --tries > 0);
1076
1077 if (tries == 0 && rv != 0) {
1078 device_printf(sc->sc_dev,
1079 "NVSP operation %u send error %d\n", hdr->nvs_type, rv);
1080 return rv;
1081 }
1082
1083 if (timo == 0)
1084 return 0;
1085
1086 do {
1087 if (cold) {
1088 delay(1000);
1089 s = splnet();
1090 hvn_nvs_intr(sc);
1091 splx(s);
1092 } else
1093 tsleep(sc->sc_nvsrsp, PRIBIO | PCATCH, "nvscmd",
1094 uimax(1, mstohz(1)));
1095 } while (--timo > 0 && sc->sc_nvsdone != 1);
1096
1097 if (timo == 0 && sc->sc_nvsdone != 1) {
1098 device_printf(sc->sc_dev, "NVSP operation %u timed out\n",
1099 hdr->nvs_type);
1100 return ETIMEDOUT;
1101 }
1102 return 0;
1103 }
1104
1105 static int
1106 hvn_nvs_ack(struct hvn_softc *sc, uint64_t tid)
1107 {
1108 struct hvn_nvs_rndis_ack cmd;
1109 int tries = 5;
1110 int rv;
1111
1112 cmd.nvs_type = HVN_NVS_TYPE_RNDIS_ACK;
1113 cmd.nvs_status = HVN_NVS_STATUS_OK;
1114 do {
1115 rv = vmbus_channel_send(sc->sc_chan, &cmd, sizeof(cmd),
1116 tid, VMBUS_CHANPKT_TYPE_COMP, 0);
1117 if (rv == EAGAIN)
1118 delay(10);
1119 else if (rv) {
1120 DPRINTF("%s: NVSP acknowledgement error %d\n",
1121 device_xname(sc->sc_dev), rv);
1122 return rv;
1123 }
1124 } while (rv != 0 && --tries > 0);
1125 return rv;
1126 }
1127
1128 static void
1129 hvn_nvs_detach(struct hvn_softc *sc)
1130 {
1131
1132 if (vmbus_channel_close(sc->sc_chan) == 0) {
1133 kmem_free(sc->sc_nvsbuf, HVN_NVS_BUFSIZE);
1134 sc->sc_nvsbuf = NULL;
1135 }
1136 }
1137
1138 static inline struct rndis_cmd *
1139 hvn_alloc_cmd(struct hvn_softc *sc)
1140 {
1141 struct rndis_cmd *rc;
1142
1143 mutex_enter(&sc->sc_cntl_fqlck);
1144 while ((rc = TAILQ_FIRST(&sc->sc_cntl_fq)) == NULL)
1145 /* XXX use condvar(9) instead of mtsleep */
1146 mtsleep(&sc->sc_cntl_fq, PRIBIO, "nvsalloc", 1,
1147 &sc->sc_cntl_fqlck);
1148 TAILQ_REMOVE(&sc->sc_cntl_fq, rc, rc_entry);
1149 mutex_exit(&sc->sc_cntl_fqlck);
1150 return rc;
1151 }
1152
1153 static inline void
1154 hvn_submit_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1155 {
1156
1157 mutex_enter(&sc->sc_cntl_sqlck);
1158 TAILQ_INSERT_TAIL(&sc->sc_cntl_sq, rc, rc_entry);
1159 mutex_exit(&sc->sc_cntl_sqlck);
1160 }
1161
1162 static inline struct rndis_cmd *
1163 hvn_complete_cmd(struct hvn_softc *sc, uint32_t id)
1164 {
1165 struct rndis_cmd *rc;
1166
1167 mutex_enter(&sc->sc_cntl_sqlck);
1168 TAILQ_FOREACH(rc, &sc->sc_cntl_sq, rc_entry) {
1169 if (rc->rc_id == id) {
1170 TAILQ_REMOVE(&sc->sc_cntl_sq, rc, rc_entry);
1171 break;
1172 }
1173 }
1174 mutex_exit(&sc->sc_cntl_sqlck);
1175 if (rc != NULL) {
1176 mutex_enter(&sc->sc_cntl_cqlck);
1177 TAILQ_INSERT_TAIL(&sc->sc_cntl_cq, rc, rc_entry);
1178 mutex_exit(&sc->sc_cntl_cqlck);
1179 }
1180 return rc;
1181 }
1182
1183 static inline void
1184 hvn_release_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1185 {
1186
1187 mutex_enter(&sc->sc_cntl_cqlck);
1188 TAILQ_REMOVE(&sc->sc_cntl_cq, rc, rc_entry);
1189 mutex_exit(&sc->sc_cntl_cqlck);
1190 }
1191
1192 static inline int
1193 hvn_rollback_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1194 {
1195 struct rndis_cmd *rn;
1196
1197 mutex_enter(&sc->sc_cntl_sqlck);
1198 TAILQ_FOREACH(rn, &sc->sc_cntl_sq, rc_entry) {
1199 if (rn == rc) {
1200 TAILQ_REMOVE(&sc->sc_cntl_sq, rc, rc_entry);
1201 mutex_exit(&sc->sc_cntl_sqlck);
1202 return 0;
1203 }
1204 }
1205 mutex_exit(&sc->sc_cntl_sqlck);
1206 return -1;
1207 }
1208
1209 static inline void
1210 hvn_free_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1211 {
1212
1213 memset(rc->rc_req, 0, sizeof(struct rndis_packet_msg));
1214 memset(&rc->rc_cmp, 0, sizeof(rc->rc_cmp));
1215 memset(&rc->rc_msg, 0, sizeof(rc->rc_msg));
1216 mutex_enter(&sc->sc_cntl_fqlck);
1217 TAILQ_INSERT_TAIL(&sc->sc_cntl_fq, rc, rc_entry);
1218 mutex_exit(&sc->sc_cntl_fqlck);
1219 wakeup(&sc->sc_cntl_fq);
1220 }
1221
1222 static int
1223 hvn_rndis_attach(struct hvn_softc *sc)
1224 {
1225 const int dmaflags = cold ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK;
1226 struct rndis_init_req *req;
1227 struct rndis_init_comp *cmp;
1228 struct rndis_cmd *rc;
1229 int i, rv;
1230
1231 /* RNDIS control message queues */
1232 TAILQ_INIT(&sc->sc_cntl_sq);
1233 TAILQ_INIT(&sc->sc_cntl_cq);
1234 TAILQ_INIT(&sc->sc_cntl_fq);
1235 mutex_init(&sc->sc_cntl_sqlck, MUTEX_DEFAULT, IPL_NET);
1236 mutex_init(&sc->sc_cntl_cqlck, MUTEX_DEFAULT, IPL_NET);
1237 mutex_init(&sc->sc_cntl_fqlck, MUTEX_DEFAULT, IPL_NET);
1238
1239 for (i = 0; i < HVN_RNDIS_CTLREQS; i++) {
1240 rc = &sc->sc_cntl_msgs[i];
1241 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1242 dmaflags, &rc->rc_dmap)) {
1243 DPRINTF("%s: failed to create RNDIS command map\n",
1244 device_xname(sc->sc_dev));
1245 goto errout;
1246 }
1247 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE,
1248 0, &rc->rc_segs, 1, &rc->rc_nsegs, dmaflags)) {
1249 DPRINTF("%s: failed to allocate RNDIS command\n",
1250 device_xname(sc->sc_dev));
1251 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1252 goto errout;
1253 }
1254 if (bus_dmamem_map(sc->sc_dmat, &rc->rc_segs, rc->rc_nsegs,
1255 PAGE_SIZE, (void **)&rc->rc_req, dmaflags)) {
1256 DPRINTF("%s: failed to allocate RNDIS command\n",
1257 device_xname(sc->sc_dev));
1258 bus_dmamem_free(sc->sc_dmat, &rc->rc_segs,
1259 rc->rc_nsegs);
1260 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1261 goto errout;
1262 }
1263 memset(rc->rc_req, 0, PAGE_SIZE);
1264 if (bus_dmamap_load(sc->sc_dmat, rc->rc_dmap, rc->rc_req,
1265 PAGE_SIZE, NULL, dmaflags)) {
1266 DPRINTF("%s: failed to load RNDIS command map\n",
1267 device_xname(sc->sc_dev));
1268 bus_dmamem_free(sc->sc_dmat, &rc->rc_segs,
1269 rc->rc_nsegs);
1270 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1271 goto errout;
1272 }
1273 rc->rc_gpa = atop(rc->rc_dmap->dm_segs[0].ds_addr);
1274 TAILQ_INSERT_TAIL(&sc->sc_cntl_fq, rc, rc_entry);
1275 }
1276
1277 rc = hvn_alloc_cmd(sc);
1278
1279 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1280 BUS_DMASYNC_PREREAD);
1281
1282 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1283
1284 req = rc->rc_req;
1285 req->rm_type = REMOTE_NDIS_INITIALIZE_MSG;
1286 req->rm_len = sizeof(*req);
1287 req->rm_rid = rc->rc_id;
1288 req->rm_ver_major = RNDIS_VERSION_MAJOR;
1289 req->rm_ver_minor = RNDIS_VERSION_MINOR;
1290 req->rm_max_xfersz = HVN_RNDIS_XFER_SIZE;
1291
1292 rc->rc_cmplen = sizeof(*cmp);
1293
1294 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1295 BUS_DMASYNC_PREWRITE);
1296
1297 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1298 DPRINTF("%s: INITIALIZE_MSG failed, error %d\n",
1299 device_xname(sc->sc_dev), rv);
1300 hvn_free_cmd(sc, rc);
1301 goto errout;
1302 }
1303 cmp = (struct rndis_init_comp *)&rc->rc_cmp;
1304 if (cmp->rm_status != RNDIS_STATUS_SUCCESS) {
1305 DPRINTF("%s: failed to init RNDIS, error %#x\n",
1306 device_xname(sc->sc_dev), cmp->rm_status);
1307 hvn_free_cmd(sc, rc);
1308 goto errout;
1309 }
1310
1311 hvn_free_cmd(sc, rc);
1312
1313 /* Initialize RNDIS Data command */
1314 memset(&sc->sc_data_msg, 0, sizeof(sc->sc_data_msg));
1315 sc->sc_data_msg.nvs_type = HVN_NVS_TYPE_RNDIS;
1316 sc->sc_data_msg.nvs_rndis_mtype = HVN_NVS_RNDIS_MTYPE_DATA;
1317 sc->sc_data_msg.nvs_chim_idx = HVN_NVS_CHIM_IDX_INVALID;
1318
1319 return 0;
1320
1321 errout:
1322 for (i = 0; i < HVN_RNDIS_CTLREQS; i++) {
1323 rc = &sc->sc_cntl_msgs[i];
1324 if (rc->rc_req == NULL)
1325 continue;
1326 TAILQ_REMOVE(&sc->sc_cntl_fq, rc, rc_entry);
1327 bus_dmamem_free(sc->sc_dmat, &rc->rc_segs, rc->rc_nsegs);
1328 rc->rc_req = NULL;
1329 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1330 }
1331 return -1;
1332 }
1333
1334 static int
1335 hvn_set_capabilities(struct hvn_softc *sc)
1336 {
1337 struct ndis_offload_params params;
1338 size_t len = sizeof(params);
1339
1340 memset(¶ms, 0, sizeof(params));
1341
1342 params.ndis_hdr.ndis_type = NDIS_OBJTYPE_DEFAULT;
1343 if (sc->sc_ndisver < NDIS_VERSION_6_30) {
1344 params.ndis_hdr.ndis_rev = NDIS_OFFLOAD_PARAMS_REV_2;
1345 len = params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE_6_1;
1346 } else {
1347 params.ndis_hdr.ndis_rev = NDIS_OFFLOAD_PARAMS_REV_3;
1348 len = params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE;
1349 }
1350
1351 params.ndis_ip4csum = NDIS_OFFLOAD_PARAM_TXRX;
1352 params.ndis_tcp4csum = NDIS_OFFLOAD_PARAM_TXRX;
1353 params.ndis_tcp6csum = NDIS_OFFLOAD_PARAM_TXRX;
1354 if (sc->sc_ndisver >= NDIS_VERSION_6_30) {
1355 params.ndis_udp4csum = NDIS_OFFLOAD_PARAM_TXRX;
1356 params.ndis_udp6csum = NDIS_OFFLOAD_PARAM_TXRX;
1357 }
1358
1359 return hvn_rndis_set(sc, OID_TCP_OFFLOAD_PARAMETERS, ¶ms, len);
1360 }
1361
1362 static int
1363 hvn_rndis_cmd(struct hvn_softc *sc, struct rndis_cmd *rc, int timo)
1364 {
1365 struct hvn_nvs_rndis *msg = &rc->rc_msg;
1366 struct rndis_msghdr *hdr = rc->rc_req;
1367 struct vmbus_gpa sgl[1];
1368 int tries = 10;
1369 int rv, s;
1370
1371 KASSERT(timo > 0);
1372
1373 msg->nvs_type = HVN_NVS_TYPE_RNDIS;
1374 msg->nvs_rndis_mtype = HVN_NVS_RNDIS_MTYPE_CTRL;
1375 msg->nvs_chim_idx = HVN_NVS_CHIM_IDX_INVALID;
1376
1377 sgl[0].gpa_page = rc->rc_gpa;
1378 sgl[0].gpa_len = hdr->rm_len;
1379 sgl[0].gpa_ofs = 0;
1380
1381 rc->rc_done = 0;
1382
1383 hvn_submit_cmd(sc, rc);
1384
1385 do {
1386 rv = vmbus_channel_send_sgl(sc->sc_chan, sgl, 1, &rc->rc_msg,
1387 sizeof(*msg), rc->rc_id);
1388 if (rv == EAGAIN) {
1389 if (cold)
1390 delay(1000);
1391 else
1392 tsleep(rc, PRIBIO, "rndisout",
1393 uimax(1, mstohz(1)));
1394 } else if (rv) {
1395 DPRINTF("%s: RNDIS operation %u send error %d\n",
1396 device_xname(sc->sc_dev), hdr->rm_type, rv);
1397 hvn_rollback_cmd(sc, rc);
1398 return rv;
1399 }
1400 } while (rv != 0 && --tries > 0);
1401
1402 if (tries == 0 && rv != 0) {
1403 device_printf(sc->sc_dev,
1404 "RNDIS operation %u send error %d\n", hdr->rm_type, rv);
1405 return rv;
1406 }
1407 if (vmbus_channel_is_revoked(sc->sc_chan)) {
1408 /* No response */
1409 return 0;
1410 }
1411
1412 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1413 BUS_DMASYNC_POSTWRITE);
1414
1415 do {
1416 if (cold) {
1417 delay(1000);
1418 s = splnet();
1419 hvn_nvs_intr(sc);
1420 splx(s);
1421 } else
1422 tsleep(rc, PRIBIO | PCATCH, "rndiscmd",
1423 uimax(1, mstohz(1)));
1424 } while (--timo > 0 && rc->rc_done != 1);
1425
1426 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1427 BUS_DMASYNC_POSTREAD);
1428
1429 if (rc->rc_done != 1) {
1430 rv = timo == 0 ? ETIMEDOUT : EINTR;
1431 if (hvn_rollback_cmd(sc, rc)) {
1432 hvn_release_cmd(sc, rc);
1433 rv = 0;
1434 } else if (rv == ETIMEDOUT) {
1435 device_printf(sc->sc_dev,
1436 "RNDIS operation %u timed out\n", hdr->rm_type);
1437 }
1438 return rv;
1439 }
1440
1441 hvn_release_cmd(sc, rc);
1442 return 0;
1443 }
1444
1445 static void
1446 hvn_rndis_input(struct hvn_softc *sc, uint64_t tid, void *arg)
1447 {
1448 struct vmbus_chanpkt_prplist *cp = arg;
1449 uint32_t off, len, type;
1450 int i;
1451
1452 if (sc->sc_rx_ring == NULL) {
1453 DPRINTF("%s: invalid rx ring\n", device_xname(sc->sc_dev));
1454 return;
1455 }
1456
1457 for (i = 0; i < cp->cp_range_cnt; i++) {
1458 off = cp->cp_range[i].gpa_ofs;
1459 len = cp->cp_range[i].gpa_len;
1460
1461 KASSERT(off + len <= sc->sc_rx_size);
1462 KASSERT(len >= RNDIS_HEADER_OFFSET + 4);
1463
1464 memcpy(&type, sc->sc_rx_ring + off, sizeof(type));
1465 switch (type) {
1466 /* data message */
1467 case REMOTE_NDIS_PACKET_MSG:
1468 hvn_rxeof(sc, sc->sc_rx_ring + off, len);
1469 break;
1470 /* completion messages */
1471 case REMOTE_NDIS_INITIALIZE_CMPLT:
1472 case REMOTE_NDIS_QUERY_CMPLT:
1473 case REMOTE_NDIS_SET_CMPLT:
1474 case REMOTE_NDIS_RESET_CMPLT:
1475 case REMOTE_NDIS_KEEPALIVE_CMPLT:
1476 hvn_rndis_complete(sc, sc->sc_rx_ring + off, len);
1477 break;
1478 /* notification message */
1479 case REMOTE_NDIS_INDICATE_STATUS_MSG:
1480 hvn_rndis_status(sc, sc->sc_rx_ring + off, len);
1481 break;
1482 default:
1483 device_printf(sc->sc_dev,
1484 "unhandled RNDIS message type %u\n", type);
1485 break;
1486 }
1487 }
1488
1489 hvn_nvs_ack(sc, tid);
1490 }
1491
1492 static inline struct mbuf *
1493 hvn_devget(struct hvn_softc *sc, void *buf, uint32_t len)
1494 {
1495 struct ifnet *ifp = SC2IFP(sc);
1496 struct mbuf *m;
1497 size_t size = len + ETHER_ALIGN;
1498
1499 MGETHDR(m, M_NOWAIT, MT_DATA);
1500 if (m == NULL)
1501 return NULL;
1502
1503 if (size > MHLEN) {
1504 if (size <= MCLBYTES)
1505 MCLGET(m, M_NOWAIT);
1506 else
1507 MEXTMALLOC(m, size, M_NOWAIT);
1508 if ((m->m_flags & M_EXT) == 0) {
1509 m_freem(m);
1510 return NULL;
1511 }
1512 }
1513
1514 m->m_len = m->m_pkthdr.len = size;
1515 m_adj(m, ETHER_ALIGN);
1516 m_copyback(m, 0, len, buf);
1517 m_set_rcvif(m, ifp);
1518 return m;
1519 }
1520
1521 static void
1522 hvn_rxeof(struct hvn_softc *sc, uint8_t *buf, uint32_t len)
1523 {
1524 struct ifnet *ifp = SC2IFP(sc);
1525 struct rndis_packet_msg *pkt;
1526 struct rndis_pktinfo *pi;
1527 uint32_t csum, vlan;
1528 struct mbuf *m;
1529
1530 if (!(ifp->if_flags & IFF_RUNNING))
1531 return;
1532
1533 if (len < sizeof(*pkt)) {
1534 device_printf(sc->sc_dev, "data packet too short: %u\n",
1535 len);
1536 return;
1537 }
1538
1539 pkt = (struct rndis_packet_msg *)buf;
1540 if (pkt->rm_dataoffset + pkt->rm_datalen > len) {
1541 device_printf(sc->sc_dev,
1542 "data packet out of bounds: %u@%u\n", pkt->rm_dataoffset,
1543 pkt->rm_datalen);
1544 return;
1545 }
1546
1547 if ((m = hvn_devget(sc, buf + RNDIS_HEADER_OFFSET + pkt->rm_dataoffset,
1548 pkt->rm_datalen)) == NULL) {
1549 if_statinc(ifp, if_ierrors);
1550 return;
1551 }
1552
1553 if (pkt->rm_pktinfooffset + pkt->rm_pktinfolen > len) {
1554 device_printf(sc->sc_dev,
1555 "pktinfo is out of bounds: %u@%u vs %u\n",
1556 pkt->rm_pktinfolen, pkt->rm_pktinfooffset, len);
1557 goto done;
1558 }
1559
1560 pi = (struct rndis_pktinfo *)(buf + RNDIS_HEADER_OFFSET +
1561 pkt->rm_pktinfooffset);
1562 while (pkt->rm_pktinfolen > 0) {
1563 if (pi->rm_size > pkt->rm_pktinfolen) {
1564 device_printf(sc->sc_dev,
1565 "invalid pktinfo size: %u/%u\n", pi->rm_size,
1566 pkt->rm_pktinfolen);
1567 break;
1568 }
1569
1570 switch (pi->rm_type) {
1571 case NDIS_PKTINFO_TYPE_CSUM:
1572 memcpy(&csum, pi->rm_data, sizeof(csum));
1573 if (csum & NDIS_RXCSUM_INFO_IPCS_OK)
1574 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1575 if (csum & NDIS_RXCSUM_INFO_TCPCS_OK)
1576 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1577 if (csum & NDIS_RXCSUM_INFO_UDPCS_OK)
1578 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1579 break;
1580 case NDIS_PKTINFO_TYPE_VLAN:
1581 memcpy(&vlan, pi->rm_data, sizeof(vlan));
1582 if (vlan != 0xffffffff) {
1583 uint16_t t = NDIS_VLAN_INFO_ID(vlan);
1584 t |= NDIS_VLAN_INFO_PRI(vlan) << EVL_PRIO_BITS;
1585 t |= NDIS_VLAN_INFO_CFI(vlan) << EVL_CFI_BITS;
1586 vlan_set_tag(m, t);
1587 }
1588 break;
1589 default:
1590 DPRINTF("%s: unhandled pktinfo type %u\n",
1591 device_xname(sc->sc_dev), pi->rm_type);
1592 break;
1593 }
1594
1595 pkt->rm_pktinfolen -= pi->rm_size;
1596 pi = (struct rndis_pktinfo *)((char *)pi + pi->rm_size);
1597 }
1598
1599 done:
1600 if_percpuq_enqueue(sc->sc_ipq, m);
1601 }
1602
1603 static void
1604 hvn_rndis_complete(struct hvn_softc *sc, uint8_t *buf, uint32_t len)
1605 {
1606 struct rndis_cmd *rc;
1607 uint32_t id;
1608
1609 memcpy(&id, buf + RNDIS_HEADER_OFFSET, sizeof(id));
1610 if ((rc = hvn_complete_cmd(sc, id)) != NULL) {
1611 if (len < rc->rc_cmplen)
1612 device_printf(sc->sc_dev,
1613 "RNDIS response %u too short: %u\n", id, len);
1614 else
1615 memcpy(&rc->rc_cmp, buf, rc->rc_cmplen);
1616 if (len > rc->rc_cmplen &&
1617 len - rc->rc_cmplen > HVN_RNDIS_BUFSIZE)
1618 device_printf(sc->sc_dev,
1619 "RNDIS response %u too large: %u\n", id, len);
1620 else if (len > rc->rc_cmplen)
1621 memcpy(&rc->rc_cmpbuf, buf + rc->rc_cmplen,
1622 len - rc->rc_cmplen);
1623 rc->rc_done = 1;
1624 wakeup(rc);
1625 } else {
1626 DPRINTF("%s: failed to complete RNDIS request id %u\n",
1627 device_xname(sc->sc_dev), id);
1628 }
1629 }
1630
1631 static int
1632 hvn_rndis_output(struct hvn_softc *sc, struct hvn_tx_desc *txd)
1633 {
1634 uint64_t rid = (uint64_t)txd->txd_id << 32;
1635 int rv;
1636
1637 rv = vmbus_channel_send_sgl(sc->sc_chan, txd->txd_sgl, txd->txd_nsge,
1638 &sc->sc_data_msg, sizeof(sc->sc_data_msg), rid);
1639 if (rv) {
1640 DPRINTF("%s: RNDIS data send error %d\n",
1641 device_xname(sc->sc_dev), rv);
1642 return rv;
1643 }
1644 return 0;
1645 }
1646
1647 static void
1648 hvn_rndis_status(struct hvn_softc *sc, uint8_t *buf, uint32_t len)
1649 {
1650 struct ifnet *ifp = SC2IFP(sc);
1651 uint32_t status;
1652 int link_state = sc->sc_link_state;
1653
1654 memcpy(&status, buf + RNDIS_HEADER_OFFSET, sizeof(status));
1655 switch (status) {
1656 case RNDIS_STATUS_MEDIA_CONNECT:
1657 sc->sc_link_state = LINK_STATE_UP;
1658 break;
1659 case RNDIS_STATUS_MEDIA_DISCONNECT:
1660 sc->sc_link_state = LINK_STATE_DOWN;
1661 break;
1662 /* Ignore these */
1663 case RNDIS_STATUS_OFFLOAD_CURRENT_CONFIG:
1664 return;
1665 default:
1666 DPRINTF("%s: unhandled status %#x\n", device_xname(sc->sc_dev),
1667 status);
1668 return;
1669 }
1670 if (link_state != sc->sc_link_state)
1671 if_link_state_change(ifp, sc->sc_link_state);
1672 }
1673
1674 static int
1675 hvn_rndis_query(struct hvn_softc *sc, uint32_t oid, void *res, size_t *length)
1676 {
1677 struct rndis_cmd *rc;
1678 struct rndis_query_req *req;
1679 struct rndis_query_comp *cmp;
1680 size_t olength = *length;
1681 int rv;
1682
1683 rc = hvn_alloc_cmd(sc);
1684
1685 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1686 BUS_DMASYNC_PREREAD);
1687
1688 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1689
1690 req = rc->rc_req;
1691 req->rm_type = REMOTE_NDIS_QUERY_MSG;
1692 req->rm_len = sizeof(*req);
1693 req->rm_rid = rc->rc_id;
1694 req->rm_oid = oid;
1695 req->rm_infobufoffset = sizeof(*req) - RNDIS_HEADER_OFFSET;
1696
1697 rc->rc_cmplen = sizeof(*cmp);
1698
1699 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1700 BUS_DMASYNC_PREWRITE);
1701
1702 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1703 DPRINTF("%s: QUERY_MSG failed, error %d\n",
1704 device_xname(sc->sc_dev), rv);
1705 hvn_free_cmd(sc, rc);
1706 return rv;
1707 }
1708
1709 cmp = (struct rndis_query_comp *)&rc->rc_cmp;
1710 switch (cmp->rm_status) {
1711 case RNDIS_STATUS_SUCCESS:
1712 if (cmp->rm_infobuflen > olength) {
1713 rv = EINVAL;
1714 break;
1715 }
1716 memcpy(res, rc->rc_cmpbuf, cmp->rm_infobuflen);
1717 *length = cmp->rm_infobuflen;
1718 break;
1719 default:
1720 *length = 0;
1721 rv = EIO;
1722 break;
1723 }
1724
1725 hvn_free_cmd(sc, rc);
1726 return rv;
1727 }
1728
1729 static int
1730 hvn_rndis_set(struct hvn_softc *sc, uint32_t oid, void *data, size_t length)
1731 {
1732 struct rndis_cmd *rc;
1733 struct rndis_set_req *req;
1734 struct rndis_set_comp *cmp;
1735 int rv;
1736
1737 rc = hvn_alloc_cmd(sc);
1738
1739 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1740 BUS_DMASYNC_PREREAD);
1741
1742 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1743
1744 req = rc->rc_req;
1745 req->rm_type = REMOTE_NDIS_SET_MSG;
1746 req->rm_len = sizeof(*req) + length;
1747 req->rm_rid = rc->rc_id;
1748 req->rm_oid = oid;
1749 req->rm_infobufoffset = sizeof(*req) - RNDIS_HEADER_OFFSET;
1750
1751 rc->rc_cmplen = sizeof(*cmp);
1752
1753 if (length > 0) {
1754 KASSERT(sizeof(*req) + length < PAGE_SIZE);
1755 req->rm_infobuflen = length;
1756 memcpy(req + 1, data, length);
1757 }
1758
1759 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1760 BUS_DMASYNC_PREWRITE);
1761
1762 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1763 DPRINTF("%s: SET_MSG failed, error %d\n",
1764 device_xname(sc->sc_dev), rv);
1765 hvn_free_cmd(sc, rc);
1766 return rv;
1767 }
1768
1769 cmp = (struct rndis_set_comp *)&rc->rc_cmp;
1770 if (cmp->rm_status != RNDIS_STATUS_SUCCESS)
1771 rv = EIO;
1772
1773 hvn_free_cmd(sc, rc);
1774 return rv;
1775 }
1776
1777 static int
1778 hvn_rndis_open(struct hvn_softc *sc)
1779 {
1780 uint32_t filter;
1781 int rv;
1782
1783 if (sc->sc_promisc)
1784 filter = RNDIS_PACKET_TYPE_PROMISCUOUS;
1785 else
1786 filter = RNDIS_PACKET_TYPE_BROADCAST |
1787 RNDIS_PACKET_TYPE_ALL_MULTICAST |
1788 RNDIS_PACKET_TYPE_DIRECTED;
1789
1790 rv = hvn_rndis_set(sc, OID_GEN_CURRENT_PACKET_FILTER,
1791 &filter, sizeof(filter));
1792 if (rv) {
1793 DPRINTF("%s: failed to set RNDIS filter to %#x\n",
1794 device_xname(sc->sc_dev), filter);
1795 }
1796 return rv;
1797 }
1798
1799 static int
1800 hvn_rndis_close(struct hvn_softc *sc)
1801 {
1802 uint32_t filter = 0;
1803 int rv;
1804
1805 rv = hvn_rndis_set(sc, OID_GEN_CURRENT_PACKET_FILTER,
1806 &filter, sizeof(filter));
1807 if (rv) {
1808 DPRINTF("%s: failed to clear RNDIS filter\n",
1809 device_xname(sc->sc_dev));
1810 }
1811 return rv;
1812 }
1813
1814 static void
1815 hvn_rndis_detach(struct hvn_softc *sc)
1816 {
1817 struct rndis_cmd *rc;
1818 struct rndis_halt_req *req;
1819 int rv;
1820
1821 rc = hvn_alloc_cmd(sc);
1822
1823 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1824 BUS_DMASYNC_PREREAD);
1825
1826 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1827
1828 req = rc->rc_req;
1829 req->rm_type = REMOTE_NDIS_HALT_MSG;
1830 req->rm_len = sizeof(*req);
1831 req->rm_rid = rc->rc_id;
1832
1833 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1834 BUS_DMASYNC_PREWRITE);
1835
1836 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1837 DPRINTF("%s: HALT_MSG failed, error %d\n",
1838 device_xname(sc->sc_dev), rv);
1839 }
1840 hvn_free_cmd(sc, rc);
1841
1842 mutex_destroy(&sc->sc_cntl_sqlck);
1843 mutex_destroy(&sc->sc_cntl_cqlck);
1844 mutex_destroy(&sc->sc_cntl_fqlck);
1845 }
1846