if_hvn.c revision 1.2 1 /* $NetBSD: if_hvn.c,v 1.2 2019/03/05 08:25:02 msaitoh Exp $ */
2 /* $OpenBSD: if_hvn.c,v 1.39 2018/03/11 14:31:34 mikeb Exp $ */
3
4 /*-
5 * Copyright (c) 2009-2012,2016 Microsoft Corp.
6 * Copyright (c) 2010-2012 Citrix Inc.
7 * Copyright (c) 2012 NetApp Inc.
8 * Copyright (c) 2016 Mike Belopuhov <mike (at) esdenera.com>
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice unmodified, this list of conditions, and the following
16 * disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * The OpenBSD port was done under funding by Esdenera Networks GmbH.
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: if_hvn.c,v 1.2 2019/03/05 08:25:02 msaitoh Exp $");
39
40 #ifdef _KERNEL_OPT
41 #include "opt_inet.h"
42 #include "opt_inet6.h"
43 #include "opt_net_mpsafe.h"
44 #endif
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/device.h>
50 #include <sys/atomic.h>
51 #include <sys/bus.h>
52 #include <sys/intr.h>
53 #include <sys/kmem.h>
54
55 #include <net/if.h>
56 #include <net/if_ether.h>
57 #include <net/if_media.h>
58
59 #include <net/bpf.h>
60
61 #include <dev/ic/ndisreg.h>
62 #include <dev/ic/rndisreg.h>
63
64 #include <dev/hyperv/vmbusvar.h>
65 #include <dev/hyperv/if_hvnreg.h>
66
67 #ifndef EVL_PRIO_BITS
68 #define EVL_PRIO_BITS 13
69 #endif
70
71 #define HVN_NVS_MSGSIZE 32
72 #define HVN_NVS_BUFSIZE PAGE_SIZE
73
74 /*
75 * RNDIS control interface
76 */
77 #define HVN_RNDIS_CTLREQS 4
78 #define HVN_RNDIS_BUFSIZE 512
79
80 struct rndis_cmd {
81 uint32_t rc_id;
82 struct hvn_nvs_rndis rc_msg;
83 void *rc_req;
84 bus_dmamap_t rc_dmap;
85 bus_dma_segment_t rc_segs;
86 int rc_nsegs;
87 uint64_t rc_gpa;
88 struct rndis_packet_msg rc_cmp;
89 uint32_t rc_cmplen;
90 uint8_t rc_cmpbuf[HVN_RNDIS_BUFSIZE];
91 int rc_done;
92 TAILQ_ENTRY(rndis_cmd) rc_entry;
93 };
94 TAILQ_HEAD(rndis_queue, rndis_cmd);
95
96 #define HVN_MAXMTU (9 * 1024)
97
98 #define HVN_RNDIS_XFER_SIZE 2048
99
100 /*
101 * Tx ring
102 */
103 #define HVN_TX_DESC 256
104 #define HVN_TX_FRAGS 15 /* 31 is the max */
105 #define HVN_TX_FRAG_SIZE PAGE_SIZE
106 #define HVN_TX_PKT_SIZE 16384
107
108 #define HVN_RNDIS_PKT_LEN \
109 (sizeof(struct rndis_packet_msg) + \
110 sizeof(struct rndis_pktinfo) + NDIS_VLAN_INFO_SIZE + \
111 sizeof(struct rndis_pktinfo) + NDIS_TXCSUM_INFO_SIZE)
112
113 struct hvn_tx_desc {
114 uint32_t txd_id;
115 int txd_ready;
116 struct vmbus_gpa txd_sgl[HVN_TX_FRAGS + 1];
117 int txd_nsge;
118 struct mbuf *txd_buf;
119 bus_dmamap_t txd_dmap;
120 struct vmbus_gpa txd_gpa;
121 struct rndis_packet_msg *txd_req;
122 };
123
124 struct hvn_softc {
125 device_t sc_dev;
126
127 struct vmbus_softc *sc_vmbus;
128 struct vmbus_channel *sc_chan;
129 bus_dma_tag_t sc_dmat;
130
131 struct ethercom sc_ec;
132 struct ifmedia sc_media;
133 struct if_percpuq *sc_ipq;
134 int sc_link_state;
135 int sc_promisc;
136
137 uint32_t sc_flags;
138 #define HVN_SCF_ATTACHED __BIT(0)
139
140 /* NVS protocol */
141 int sc_proto;
142 uint32_t sc_nvstid;
143 uint8_t sc_nvsrsp[HVN_NVS_MSGSIZE];
144 uint8_t *sc_nvsbuf;
145 int sc_nvsdone;
146
147 /* RNDIS protocol */
148 int sc_ndisver;
149 uint32_t sc_rndisrid;
150 struct rndis_queue sc_cntl_sq; /* submission queue */
151 kmutex_t sc_cntl_sqlck;
152 struct rndis_queue sc_cntl_cq; /* completion queue */
153 kmutex_t sc_cntl_cqlck;
154 struct rndis_queue sc_cntl_fq; /* free queue */
155 kmutex_t sc_cntl_fqlck;
156 struct rndis_cmd sc_cntl_msgs[HVN_RNDIS_CTLREQS];
157 struct hvn_nvs_rndis sc_data_msg;
158
159 /* Rx ring */
160 uint8_t *sc_rx_ring;
161 int sc_rx_size;
162 uint32_t sc_rx_hndl;
163 struct hyperv_dma sc_rx_dma;
164
165 /* Tx ring */
166 uint32_t sc_tx_next;
167 uint32_t sc_tx_avail;
168 struct hvn_tx_desc sc_tx_desc[HVN_TX_DESC];
169 bus_dmamap_t sc_tx_rmap;
170 uint8_t *sc_tx_msgs;
171 bus_dma_segment_t sc_tx_mseg;
172 };
173
174 #define SC2IFP(_sc_) (&(_sc_)->sc_ec.ec_if)
175 #define IFP2SC(_ifp_) ((_ifp_)->if_softc)
176
177
178 static int hvn_match(device_t, cfdata_t, void *);
179 static void hvn_attach(device_t, device_t, void *);
180 static int hvn_detach(device_t, int);
181
182 CFATTACH_DECL_NEW(hvn, sizeof(struct hvn_softc),
183 hvn_match, hvn_attach, hvn_detach, NULL);
184
185 static int hvn_ioctl(struct ifnet *, u_long, void *);
186 static int hvn_media_change(struct ifnet *);
187 static void hvn_media_status(struct ifnet *, struct ifmediareq *);
188 static int hvn_iff(struct hvn_softc *);
189 static int hvn_init(struct ifnet *);
190 static void hvn_stop(struct ifnet *, int);
191 static void hvn_start(struct ifnet *);
192 static int hvn_encap(struct hvn_softc *, struct mbuf *,
193 struct hvn_tx_desc **);
194 static void hvn_decap(struct hvn_softc *, struct hvn_tx_desc *);
195 static void hvn_txeof(struct hvn_softc *, uint64_t);
196 static int hvn_rx_ring_create(struct hvn_softc *);
197 static int hvn_rx_ring_destroy(struct hvn_softc *);
198 static int hvn_tx_ring_create(struct hvn_softc *);
199 static void hvn_tx_ring_destroy(struct hvn_softc *);
200 static int hvn_set_capabilities(struct hvn_softc *);
201 static int hvn_get_lladdr(struct hvn_softc *, uint8_t *);
202 static void hvn_get_link_status(struct hvn_softc *);
203
204 /* NSVP */
205 static int hvn_nvs_attach(struct hvn_softc *);
206 static void hvn_nvs_intr(void *);
207 static int hvn_nvs_cmd(struct hvn_softc *, void *, size_t, uint64_t, int);
208 static int hvn_nvs_ack(struct hvn_softc *, uint64_t);
209 static void hvn_nvs_detach(struct hvn_softc *);
210
211 /* RNDIS */
212 static int hvn_rndis_attach(struct hvn_softc *);
213 static int hvn_rndis_cmd(struct hvn_softc *, struct rndis_cmd *, int);
214 static void hvn_rndis_input(struct hvn_softc *, uint64_t, void *);
215 static void hvn_rxeof(struct hvn_softc *, uint8_t *, uint32_t);
216 static void hvn_rndis_complete(struct hvn_softc *, uint8_t *, uint32_t);
217 static int hvn_rndis_output(struct hvn_softc *, struct hvn_tx_desc *);
218 static void hvn_rndis_status(struct hvn_softc *, uint8_t *, uint32_t);
219 static int hvn_rndis_query(struct hvn_softc *, uint32_t, void *, size_t *);
220 static int hvn_rndis_set(struct hvn_softc *, uint32_t, void *, size_t);
221 static int hvn_rndis_open(struct hvn_softc *);
222 static int hvn_rndis_close(struct hvn_softc *);
223 static void hvn_rndis_detach(struct hvn_softc *);
224
225 static int
226 hvn_match(device_t parent, cfdata_t match, void *aux)
227 {
228 struct vmbus_attach_args *aa = aux;
229
230 if (memcmp(aa->aa_type, &hyperv_guid_network, sizeof(*aa->aa_type)))
231 return 0;
232 return 1;
233 }
234
235 static void
236 hvn_attach(device_t parent, device_t self, void *aux)
237 {
238 struct hvn_softc *sc = device_private(self);
239 struct vmbus_attach_args *aa = aux;
240 struct ifnet *ifp = SC2IFP(sc);
241 uint8_t enaddr[ETHER_ADDR_LEN];
242 int error;
243
244 sc->sc_dev = self;
245 sc->sc_vmbus = (struct vmbus_softc *)device_private(parent);
246 sc->sc_chan = aa->aa_chan;
247 sc->sc_dmat = sc->sc_vmbus->sc_dmat;
248
249 aprint_naive("\n");
250 aprint_normal(": Hyper-V NetVSC\n");
251
252 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
253
254 if (hvn_nvs_attach(sc)) {
255 aprint_error_dev(self, "failed to init NVSP\n");
256 return;
257 }
258
259 if (hvn_rx_ring_create(sc)) {
260 aprint_error_dev(self, "failed to create Rx ring\n");
261 goto fail1;
262 }
263
264 if (hvn_tx_ring_create(sc)) {
265 aprint_error_dev(self, "failed to create Tx ring\n");
266 goto fail1;
267 }
268
269 ifp->if_softc = sc;
270 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
271 ifp->if_ioctl = hvn_ioctl;
272 ifp->if_start = hvn_start;
273 ifp->if_init = hvn_init;
274 ifp->if_stop = hvn_stop;
275 ifp->if_capabilities = IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx;
276 ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx;
277 ifp->if_capabilities |= IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_TCPv6_Rx;
278 if (sc->sc_ndisver > NDIS_VERSION_6_30) {
279 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Tx;
280 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Rx;
281 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Tx;
282 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Rx;
283 }
284 if (sc->sc_proto >= HVN_NVS_PROTO_VERSION_2) {
285 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
286 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
287 }
288
289 IFQ_SET_MAXLEN(&ifp->if_snd, HVN_TX_DESC - 1);
290 IFQ_SET_READY(&ifp->if_snd);
291
292 ifmedia_init(&sc->sc_media, IFM_IMASK, hvn_media_change,
293 hvn_media_status);
294 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
295 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_MANUAL);
296
297 error = if_initialize(ifp);
298 if (error) {
299 aprint_error_dev(self, "if_initialize failed(%d)\n", error);
300 goto fail2;
301 }
302 sc->sc_ipq = if_percpuq_create(ifp);
303 if_deferred_start_init(ifp, NULL);
304
305 if (hvn_rndis_attach(sc)) {
306 aprint_error_dev(self, "failed to init RNDIS\n");
307 goto fail1;
308 }
309
310 aprint_normal_dev(self, "NVS %d.%d NDIS %d.%d\n",
311 sc->sc_proto >> 16, sc->sc_proto & 0xffff,
312 sc->sc_ndisver >> 16 , sc->sc_ndisver & 0xffff);
313
314 if (hvn_set_capabilities(sc)) {
315 aprint_error_dev(self, "failed to setup offloading\n");
316 goto fail2;
317 }
318
319 if (hvn_get_lladdr(sc, enaddr)) {
320 aprint_error_dev(self,
321 "failed to obtain an ethernet address\n");
322 goto fail2;
323 }
324 aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(enaddr));
325
326 ether_ifattach(ifp, enaddr);
327 if_register(ifp);
328
329 if (pmf_device_register(self, NULL, NULL))
330 pmf_class_network_register(self, ifp);
331 else
332 aprint_error_dev(self, "couldn't establish power handler\n");
333
334 SET(sc->sc_flags, HVN_SCF_ATTACHED);
335 return;
336
337 fail2: hvn_rndis_detach(sc);
338 fail1: hvn_rx_ring_destroy(sc);
339 hvn_tx_ring_destroy(sc);
340 hvn_nvs_detach(sc);
341 }
342
343 static int
344 hvn_detach(device_t self, int flags)
345 {
346 struct hvn_softc *sc = device_private(self);
347 struct ifnet *ifp = SC2IFP(sc);
348
349 if (!ISSET(sc->sc_flags, HVN_SCF_ATTACHED))
350 return 0;
351
352 hvn_stop(ifp, 1);
353
354 pmf_device_deregister(self);
355
356 ether_ifdetach(ifp);
357 if_detach(ifp);
358 if_percpuq_destroy(sc->sc_ipq);
359
360 hvn_rndis_detach(sc);
361 hvn_rx_ring_destroy(sc);
362 hvn_tx_ring_destroy(sc);
363 hvn_nvs_detach(sc);
364
365 return 0;
366 }
367
368 static int
369 hvn_ioctl(struct ifnet *ifp, u_long command, void * data)
370 {
371 struct hvn_softc *sc = IFP2SC(ifp);
372 struct ifreq *ifr = (struct ifreq *)data;
373 int s, error = 0;
374
375 s = splnet();
376
377 switch (command) {
378 case SIOCSIFFLAGS:
379 if (ifp->if_flags & IFF_UP) {
380 if (ifp->if_flags & IFF_RUNNING)
381 error = ENETRESET;
382 else {
383 error = hvn_init(ifp);
384 if (error)
385 ifp->if_flags &= ~IFF_UP;
386 }
387 } else {
388 if (ifp->if_flags & IFF_RUNNING)
389 hvn_stop(ifp, 1);
390 }
391 break;
392 case SIOCGIFMEDIA:
393 case SIOCSIFMEDIA:
394 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command);
395 break;
396 default:
397 error = ether_ioctl(ifp, command, data);
398 break;
399 }
400
401 if (error == ENETRESET) {
402 if (ifp->if_flags & IFF_RUNNING)
403 hvn_iff(sc);
404 error = 0;
405 }
406
407 splx(s);
408
409 return error;
410 }
411
412 static int
413 hvn_media_change(struct ifnet *ifp)
414 {
415
416 return 0;
417 }
418
419 static void
420 hvn_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
421 {
422 struct hvn_softc *sc = IFP2SC(ifp);
423 int link_state;
424
425 link_state = sc->sc_link_state;
426 hvn_get_link_status(sc);
427 if (link_state != sc->sc_link_state)
428 if_link_state_change(ifp, sc->sc_link_state);
429
430 ifmr->ifm_status = IFM_AVALID;
431 ifmr->ifm_active = IFM_ETHER | IFM_MANUAL;
432 if (sc->sc_link_state == LINK_STATE_UP)
433 ifmr->ifm_status |= IFM_ACTIVE;
434 }
435
436 static int
437 hvn_iff(struct hvn_softc *sc)
438 {
439
440 /* XXX */
441 sc->sc_promisc = 0;
442
443 return 0;
444 }
445
446 static int
447 hvn_init(struct ifnet *ifp)
448 {
449 struct hvn_softc *sc = IFP2SC(ifp);
450 int error;
451
452 hvn_stop(ifp, 0);
453
454 error = hvn_iff(sc);
455 if (error)
456 return error;
457
458 error = hvn_rndis_open(sc);
459 if (error == 0) {
460 ifp->if_flags |= IFF_RUNNING;
461 ifp->if_flags &= ~IFF_OACTIVE;
462 }
463 return error;
464 }
465
466 static void
467 hvn_stop(struct ifnet *ifp, int disable)
468 {
469 struct hvn_softc *sc = IFP2SC(ifp);
470
471 hvn_rndis_close(sc);
472
473 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
474 }
475
476 static void
477 hvn_start(struct ifnet *ifp)
478 {
479 struct hvn_softc *sc = IFP2SC(ifp);
480 struct hvn_tx_desc *txd;
481 struct mbuf *m;
482
483 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
484 return;
485
486 for (;;) {
487 if (!sc->sc_tx_avail) {
488 /* transient */
489 ifp->if_flags |= IFF_OACTIVE;
490 break;
491 }
492
493 IFQ_DEQUEUE(&ifp->if_snd, m);
494 if (m == NULL)
495 break;
496
497 if (hvn_encap(sc, m, &txd)) {
498 /* the chain is too large */
499 ifp->if_oerrors++;
500 m_freem(m);
501 continue;
502 }
503
504 bpf_mtap(ifp, m, BPF_D_OUT);
505
506 if (hvn_rndis_output(sc, txd)) {
507 hvn_decap(sc, txd);
508 ifp->if_oerrors++;
509 m_freem(m);
510 continue;
511 }
512
513 sc->sc_tx_next++;
514 }
515 }
516
517 static inline char *
518 hvn_rndis_pktinfo_append(struct rndis_packet_msg *pkt, size_t pktsize,
519 size_t datalen, uint32_t type)
520 {
521 struct rndis_pktinfo *pi;
522 size_t pi_size = sizeof(*pi) + datalen;
523 char *cp;
524
525 KASSERT(pkt->rm_pktinfooffset + pkt->rm_pktinfolen + pi_size <=
526 pktsize);
527
528 cp = (char *)pkt + pkt->rm_pktinfooffset + pkt->rm_pktinfolen;
529 pi = (struct rndis_pktinfo *)cp;
530 pi->rm_size = pi_size;
531 pi->rm_type = type;
532 pi->rm_pktinfooffset = sizeof(*pi);
533 pkt->rm_pktinfolen += pi_size;
534 pkt->rm_dataoffset += pi_size;
535 pkt->rm_len += pi_size;
536
537 return (char *)pi->rm_data;
538 }
539
540 static int
541 hvn_encap(struct hvn_softc *sc, struct mbuf *m, struct hvn_tx_desc **txd0)
542 {
543 struct hvn_tx_desc *txd;
544 struct rndis_packet_msg *pkt;
545 bus_dma_segment_t *seg;
546 size_t pktlen;
547 int i, rv;
548
549 do {
550 txd = &sc->sc_tx_desc[sc->sc_tx_next % HVN_TX_DESC];
551 sc->sc_tx_next++;
552 } while (!txd->txd_ready);
553 txd->txd_ready = 0;
554
555 pkt = txd->txd_req;
556 memset(pkt, 0, HVN_RNDIS_PKT_LEN);
557 pkt->rm_type = REMOTE_NDIS_PACKET_MSG;
558 pkt->rm_len = sizeof(*pkt) + m->m_pkthdr.len;
559 pkt->rm_dataoffset = RNDIS_DATA_OFFSET;
560 pkt->rm_datalen = m->m_pkthdr.len;
561 pkt->rm_pktinfooffset = sizeof(*pkt); /* adjusted below */
562 pkt->rm_pktinfolen = 0;
563
564 rv = bus_dmamap_load_mbuf(sc->sc_dmat, txd->txd_dmap, m, BUS_DMA_READ |
565 BUS_DMA_NOWAIT);
566 switch (rv) {
567 case 0:
568 break;
569 case EFBIG:
570 if (m_defrag(m, M_NOWAIT) == 0 &&
571 bus_dmamap_load_mbuf(sc->sc_dmat, txd->txd_dmap, m,
572 BUS_DMA_READ | BUS_DMA_NOWAIT) == 0)
573 break;
574 /* FALLTHROUGH */
575 default:
576 DPRINTF("%s: failed to load mbuf\n", device_xname(sc->sc_dev));
577 return -1;
578 }
579 txd->txd_buf = m;
580
581 if (m->m_flags & M_VLANTAG) {
582 uint32_t vlan;
583 char *cp;
584
585 vlan = NDIS_VLAN_INFO_MAKE(
586 EVL_VLANOFTAG(m->m_pkthdr.ether_vtag),
587 EVL_PRIOFTAG(m->m_pkthdr.ether_vtag), 0);
588 cp = hvn_rndis_pktinfo_append(pkt, HVN_RNDIS_PKT_LEN,
589 NDIS_VLAN_INFO_SIZE, NDIS_PKTINFO_TYPE_VLAN);
590 memcpy(cp, &vlan, NDIS_VLAN_INFO_SIZE);
591 }
592
593 if (m->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_UDPv4 |
594 M_CSUM_TCPv4)) {
595 uint32_t csum = NDIS_TXCSUM_INFO_IPV4;
596 char *cp;
597
598 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4)
599 csum |= NDIS_TXCSUM_INFO_IPCS;
600 if (m->m_pkthdr.csum_flags & M_CSUM_TCPv4)
601 csum |= NDIS_TXCSUM_INFO_TCPCS;
602 if (m->m_pkthdr.csum_flags & M_CSUM_UDPv4)
603 csum |= NDIS_TXCSUM_INFO_UDPCS;
604 cp = hvn_rndis_pktinfo_append(pkt, HVN_RNDIS_PKT_LEN,
605 NDIS_TXCSUM_INFO_SIZE, NDIS_PKTINFO_TYPE_CSUM);
606 memcpy(cp, &csum, NDIS_TXCSUM_INFO_SIZE);
607 }
608
609 pktlen = pkt->rm_pktinfooffset + pkt->rm_pktinfolen;
610 pkt->rm_pktinfooffset -= RNDIS_HEADER_OFFSET;
611
612 /* Attach an RNDIS message to the first slot */
613 txd->txd_sgl[0].gpa_page = txd->txd_gpa.gpa_page;
614 txd->txd_sgl[0].gpa_ofs = txd->txd_gpa.gpa_ofs;
615 txd->txd_sgl[0].gpa_len = pktlen;
616 txd->txd_nsge = txd->txd_dmap->dm_nsegs + 1;
617
618 for (i = 0; i < txd->txd_dmap->dm_nsegs; i++) {
619 seg = &txd->txd_dmap->dm_segs[i];
620 txd->txd_sgl[1 + i].gpa_page = atop(seg->ds_addr);
621 txd->txd_sgl[1 + i].gpa_ofs = seg->ds_addr & PAGE_MASK;
622 txd->txd_sgl[1 + i].gpa_len = seg->ds_len;
623 }
624
625 *txd0 = txd;
626
627 atomic_dec_uint(&sc->sc_tx_avail);
628
629 return 0;
630 }
631
632 static void
633 hvn_decap(struct hvn_softc *sc, struct hvn_tx_desc *txd)
634 {
635 struct ifnet *ifp = SC2IFP(sc);
636
637 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmap, 0, 0,
638 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
639 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmap);
640 txd->txd_buf = NULL;
641 txd->txd_nsge = 0;
642 txd->txd_ready = 1;
643 atomic_inc_uint(&sc->sc_tx_avail);
644 ifp->if_flags &= ~IFF_OACTIVE;
645 }
646
647 static void
648 hvn_txeof(struct hvn_softc *sc, uint64_t tid)
649 {
650 struct ifnet *ifp = SC2IFP(sc);
651 struct hvn_tx_desc *txd;
652 struct mbuf *m;
653 uint32_t id = tid >> 32;
654
655 if ((tid & 0xffffffffU) != 0)
656 return;
657
658 id -= HVN_NVS_CHIM_SIG;
659 if (id >= HVN_TX_DESC) {
660 device_printf(sc->sc_dev, "tx packet index too large: %u", id);
661 return;
662 }
663
664 txd = &sc->sc_tx_desc[id];
665
666 if ((m = txd->txd_buf) == NULL) {
667 device_printf(sc->sc_dev, "no mbuf @%u\n", id);
668 return;
669 }
670 txd->txd_buf = NULL;
671
672 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmap, 0, 0,
673 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
674 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmap);
675 m_freem(m);
676 ifp->if_opackets++;
677
678 txd->txd_ready = 1;
679
680 atomic_inc_uint(&sc->sc_tx_avail);
681 ifp->if_flags &= ~IFF_OACTIVE;
682 }
683
684 static int
685 hvn_rx_ring_create(struct hvn_softc *sc)
686 {
687 struct hvn_nvs_rxbuf_conn cmd;
688 struct hvn_nvs_rxbuf_conn_resp *rsp;
689 uint64_t tid;
690
691 if (sc->sc_proto <= HVN_NVS_PROTO_VERSION_2)
692 sc->sc_rx_size = 15 * 1024 * 1024; /* 15MB */
693 else
694 sc->sc_rx_size = 16 * 1024 * 1024; /* 16MB */
695 sc->sc_rx_ring = hyperv_dma_alloc(sc->sc_dmat, &sc->sc_rx_dma,
696 sc->sc_rx_size, PAGE_SIZE, PAGE_SIZE, sc->sc_rx_size / PAGE_SIZE);
697 if (sc->sc_rx_ring == NULL) {
698 DPRINTF("%s: failed to allocate Rx ring buffer\n",
699 device_xname(sc->sc_dev));
700 return -1;
701 }
702 if (vmbus_handle_alloc(sc->sc_chan, &sc->sc_rx_dma, sc->sc_rx_size,
703 &sc->sc_rx_hndl)) {
704 DPRINTF("%s: failed to obtain a PA handle\n",
705 device_xname(sc->sc_dev));
706 goto errout;
707 }
708
709 memset(&cmd, 0, sizeof(cmd));
710 cmd.nvs_type = HVN_NVS_TYPE_RXBUF_CONN;
711 cmd.nvs_gpadl = sc->sc_rx_hndl;
712 cmd.nvs_sig = HVN_NVS_RXBUF_SIG;
713
714 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
715 if (hvn_nvs_cmd(sc, &cmd, sizeof(cmd), tid, 100))
716 goto errout;
717
718 rsp = (struct hvn_nvs_rxbuf_conn_resp *)&sc->sc_nvsrsp;
719 if (rsp->nvs_status != HVN_NVS_STATUS_OK) {
720 DPRINTF("%s: failed to set up the Rx ring\n",
721 device_xname(sc->sc_dev));
722 goto errout;
723 }
724 if (rsp->nvs_nsect > 1) {
725 DPRINTF("%s: invalid number of Rx ring sections: %u\n",
726 device_xname(sc->sc_dev), rsp->nvs_nsect);
727 hvn_rx_ring_destroy(sc);
728 return -1;
729 }
730 return 0;
731
732 errout:
733 if (sc->sc_rx_hndl) {
734 vmbus_handle_free(sc->sc_chan, sc->sc_rx_hndl);
735 sc->sc_rx_hndl = 0;
736 }
737 if (sc->sc_rx_ring) {
738 kmem_free(sc->sc_rx_ring, sc->sc_rx_size);
739 sc->sc_rx_ring = NULL;
740 }
741 return -1;
742 }
743
744 static int
745 hvn_rx_ring_destroy(struct hvn_softc *sc)
746 {
747 struct hvn_nvs_rxbuf_disconn cmd;
748 uint64_t tid;
749
750 if (sc->sc_rx_ring == NULL)
751 return 0;
752
753 memset(&cmd, 0, sizeof(cmd));
754 cmd.nvs_type = HVN_NVS_TYPE_RXBUF_DISCONN;
755 cmd.nvs_sig = HVN_NVS_RXBUF_SIG;
756
757 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
758 if (hvn_nvs_cmd(sc, &cmd, sizeof(cmd), tid, 0))
759 return -1;
760
761 delay(100);
762
763 vmbus_handle_free(sc->sc_chan, sc->sc_rx_hndl);
764
765 sc->sc_rx_hndl = 0;
766
767 kmem_free(sc->sc_rx_ring, sc->sc_rx_size);
768 sc->sc_rx_ring = NULL;
769
770 return 0;
771 }
772
773 static int
774 hvn_tx_ring_create(struct hvn_softc *sc)
775 {
776 const int dmaflags = cold ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK;
777 struct hvn_tx_desc *txd;
778 bus_dma_segment_t *seg;
779 size_t msgsize;
780 int i, rsegs;
781 paddr_t pa;
782
783 msgsize = roundup(HVN_RNDIS_PKT_LEN, 128);
784
785 /* Allocate memory to store RNDIS messages */
786 if (bus_dmamem_alloc(sc->sc_dmat, msgsize * HVN_TX_DESC, PAGE_SIZE, 0,
787 &sc->sc_tx_mseg, 1, &rsegs, dmaflags)) {
788 DPRINTF("%s: failed to allocate memory for RDNIS messages\n",
789 device_xname(sc->sc_dev));
790 goto errout;
791 }
792 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_tx_mseg, 1, msgsize *
793 HVN_TX_DESC, (void **)&sc->sc_tx_msgs, dmaflags)) {
794 DPRINTF("%s: failed to establish mapping for RDNIS messages\n",
795 device_xname(sc->sc_dev));
796 goto errout;
797 }
798 memset(sc->sc_tx_msgs, 0, msgsize * HVN_TX_DESC);
799 if (bus_dmamap_create(sc->sc_dmat, msgsize * HVN_TX_DESC, 1,
800 msgsize * HVN_TX_DESC, 0, dmaflags, &sc->sc_tx_rmap)) {
801 DPRINTF("%s: failed to create map for RDNIS messages\n",
802 device_xname(sc->sc_dev));
803 goto errout;
804 }
805 if (bus_dmamap_load(sc->sc_dmat, sc->sc_tx_rmap, sc->sc_tx_msgs,
806 msgsize * HVN_TX_DESC, NULL, dmaflags)) {
807 DPRINTF("%s: failed to create map for RDNIS messages\n",
808 device_xname(sc->sc_dev));
809 goto errout;
810 }
811
812 for (i = 0; i < HVN_TX_DESC; i++) {
813 txd = &sc->sc_tx_desc[i];
814 if (bus_dmamap_create(sc->sc_dmat, HVN_TX_PKT_SIZE,
815 HVN_TX_FRAGS, HVN_TX_FRAG_SIZE, PAGE_SIZE, dmaflags,
816 &txd->txd_dmap)) {
817 DPRINTF("%s: failed to create map for TX descriptors\n",
818 device_xname(sc->sc_dev));
819 goto errout;
820 }
821 seg = &sc->sc_tx_rmap->dm_segs[0];
822 pa = seg->ds_addr + (msgsize * i);
823 txd->txd_gpa.gpa_page = atop(pa);
824 txd->txd_gpa.gpa_ofs = pa & PAGE_MASK;
825 txd->txd_gpa.gpa_len = msgsize;
826 txd->txd_req = (void *)(sc->sc_tx_msgs + (msgsize * i));
827 txd->txd_id = i + HVN_NVS_CHIM_SIG;
828 txd->txd_ready = 1;
829 }
830 sc->sc_tx_avail = HVN_TX_DESC;
831
832 return 0;
833
834 errout:
835 hvn_tx_ring_destroy(sc);
836 return -1;
837 }
838
839 static void
840 hvn_tx_ring_destroy(struct hvn_softc *sc)
841 {
842 struct hvn_tx_desc *txd;
843 int i;
844
845 for (i = 0; i < HVN_TX_DESC; i++) {
846 txd = &sc->sc_tx_desc[i];
847 if (txd->txd_dmap == NULL)
848 continue;
849 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmap, 0, 0,
850 BUS_DMASYNC_POSTWRITE);
851 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmap);
852 bus_dmamap_destroy(sc->sc_dmat, txd->txd_dmap);
853 txd->txd_dmap = NULL;
854 if (txd->txd_buf == NULL)
855 continue;
856 m_free(txd->txd_buf);
857 txd->txd_buf = NULL;
858 }
859 if (sc->sc_tx_rmap) {
860 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,
861 BUS_DMASYNC_POSTWRITE);
862 bus_dmamap_unload(sc->sc_dmat, sc->sc_tx_rmap);
863 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_rmap);
864 }
865 if (sc->sc_tx_msgs) {
866 size_t msgsize = roundup(HVN_RNDIS_PKT_LEN, 128);
867
868 bus_dmamem_unmap(sc->sc_dmat, sc->sc_tx_msgs,
869 msgsize * HVN_TX_DESC);
870 bus_dmamem_free(sc->sc_dmat, &sc->sc_tx_mseg, 1);
871 }
872 sc->sc_tx_rmap = NULL;
873 sc->sc_tx_msgs = NULL;
874 }
875
876 static int
877 hvn_get_lladdr(struct hvn_softc *sc, uint8_t *enaddr)
878 {
879 size_t addrlen = ETHER_ADDR_LEN;
880 int rv;
881
882 rv = hvn_rndis_query(sc, OID_802_3_PERMANENT_ADDRESS, enaddr, &addrlen);
883 if (rv == 0 && addrlen != ETHER_ADDR_LEN)
884 rv = -1;
885 return rv;
886 }
887
888 static void
889 hvn_get_link_status(struct hvn_softc *sc)
890 {
891 uint32_t state;
892 size_t len = sizeof(state);
893
894 if (hvn_rndis_query(sc, OID_GEN_MEDIA_CONNECT_STATUS,
895 &state, &len) == 0)
896 sc->sc_link_state = (state == NDIS_MEDIA_STATE_CONNECTED) ?
897 LINK_STATE_UP : LINK_STATE_DOWN;
898 }
899
900 static int
901 hvn_nvs_attach(struct hvn_softc *sc)
902 {
903 static const uint32_t protos[] = {
904 HVN_NVS_PROTO_VERSION_5,
905 HVN_NVS_PROTO_VERSION_4,
906 HVN_NVS_PROTO_VERSION_2,
907 HVN_NVS_PROTO_VERSION_1
908 };
909 const int kmemflags = cold ? KM_NOSLEEP : KM_SLEEP;
910 struct hvn_nvs_init cmd;
911 struct hvn_nvs_init_resp *rsp;
912 struct hvn_nvs_ndis_init ncmd;
913 struct hvn_nvs_ndis_conf ccmd;
914 uint32_t ndisver, ringsize;
915 uint64_t tid;
916 int i;
917
918 sc->sc_nvsbuf = kmem_zalloc(HVN_NVS_BUFSIZE, kmemflags);
919 if (sc->sc_nvsbuf == NULL) {
920 DPRINTF("%s: failed to allocate channel data buffer\n",
921 device_xname(sc->sc_dev));
922 return -1;
923 }
924
925 /* We need to be able to fit all RNDIS control and data messages */
926 ringsize = HVN_RNDIS_CTLREQS *
927 (sizeof(struct hvn_nvs_rndis) + sizeof(struct vmbus_gpa)) +
928 HVN_TX_DESC * (sizeof(struct hvn_nvs_rndis) +
929 (HVN_TX_FRAGS + 1) * sizeof(struct vmbus_gpa));
930
931 sc->sc_chan->ch_flags &= ~CHF_BATCHED;
932
933 if (vmbus_channel_setdeferred(sc->sc_chan, device_xname(sc->sc_dev))) {
934 aprint_error_dev(sc->sc_dev,
935 "failed to create the interrupt thread\n");
936 return -1;
937 }
938
939 /* Associate our interrupt handler with the channel */
940 if (vmbus_channel_open(sc->sc_chan, ringsize, NULL, 0,
941 hvn_nvs_intr, sc)) {
942 DPRINTF("%s: failed to open channel\n",
943 device_xname(sc->sc_dev));
944 kmem_free(sc->sc_nvsbuf, HVN_NVS_BUFSIZE);
945 return -1;
946 }
947
948 memset(&cmd, 0, sizeof(cmd));
949 cmd.nvs_type = HVN_NVS_TYPE_INIT;
950 for (i = 0; i < __arraycount(protos); i++) {
951 cmd.nvs_ver_min = cmd.nvs_ver_max = protos[i];
952 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
953 if (hvn_nvs_cmd(sc, &cmd, sizeof(cmd), tid, 100))
954 return -1;
955
956 rsp = (struct hvn_nvs_init_resp *)&sc->sc_nvsrsp;
957 if (rsp->nvs_status == HVN_NVS_STATUS_OK) {
958 sc->sc_proto = protos[i];
959 break;
960 }
961 }
962 if (i == __arraycount(protos)) {
963 DPRINTF("%s: failed to negotiate NVSP version\n",
964 device_xname(sc->sc_dev));
965 return -1;
966 }
967
968 if (sc->sc_proto >= HVN_NVS_PROTO_VERSION_2) {
969 memset(&ccmd, 0, sizeof(ccmd));
970 ccmd.nvs_type = HVN_NVS_TYPE_NDIS_CONF;
971 ccmd.nvs_mtu = HVN_MAXMTU;
972 ccmd.nvs_caps = HVN_NVS_NDIS_CONF_VLAN;
973
974 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
975 if (hvn_nvs_cmd(sc, &ccmd, sizeof(ccmd), tid, 100))
976 return -1;
977 }
978
979 memset(&ncmd, 0, sizeof(ncmd));
980 ncmd.nvs_type = HVN_NVS_TYPE_NDIS_INIT;
981 if (sc->sc_proto <= HVN_NVS_PROTO_VERSION_4)
982 ndisver = NDIS_VERSION_6_1;
983 else
984 ndisver = NDIS_VERSION_6_30;
985 ncmd.nvs_ndis_major = (ndisver & 0xffff0000) >> 16;
986 ncmd.nvs_ndis_minor = ndisver & 0x0000ffff;
987
988 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
989 if (hvn_nvs_cmd(sc, &ncmd, sizeof(ncmd), tid, 100))
990 return -1;
991
992 sc->sc_ndisver = ndisver;
993
994 return 0;
995 }
996
997 static void
998 hvn_nvs_intr(void *arg)
999 {
1000 struct hvn_softc *sc = arg;
1001 struct ifnet *ifp = SC2IFP(sc);
1002 struct vmbus_chanpkt_hdr *cph;
1003 const struct hvn_nvs_hdr *nvs;
1004 uint64_t rid;
1005 uint32_t rlen;
1006 int rv;
1007 bool dotx = false;
1008
1009 for (;;) {
1010 rv = vmbus_channel_recv(sc->sc_chan, sc->sc_nvsbuf,
1011 HVN_NVS_BUFSIZE, &rlen, &rid, 1);
1012 if (rv != 0 || rlen == 0) {
1013 if (rv != EAGAIN)
1014 device_printf(sc->sc_dev,
1015 "failed to receive an NVSP packet\n");
1016 break;
1017 }
1018 cph = (struct vmbus_chanpkt_hdr *)sc->sc_nvsbuf;
1019 nvs = (const struct hvn_nvs_hdr *)VMBUS_CHANPKT_CONST_DATA(cph);
1020
1021 if (cph->cph_type == VMBUS_CHANPKT_TYPE_COMP) {
1022 switch (nvs->nvs_type) {
1023 case HVN_NVS_TYPE_INIT_RESP:
1024 case HVN_NVS_TYPE_RXBUF_CONNRESP:
1025 case HVN_NVS_TYPE_CHIM_CONNRESP:
1026 case HVN_NVS_TYPE_SUBCH_RESP:
1027 /* copy the response back */
1028 memcpy(&sc->sc_nvsrsp, nvs, HVN_NVS_MSGSIZE);
1029 sc->sc_nvsdone = 1;
1030 wakeup(&sc->sc_nvsrsp);
1031 break;
1032 case HVN_NVS_TYPE_RNDIS_ACK:
1033 dotx = true;
1034 hvn_txeof(sc, cph->cph_tid);
1035 break;
1036 default:
1037 device_printf(sc->sc_dev,
1038 "unhandled NVSP packet type %u "
1039 "on completion\n", nvs->nvs_type);
1040 break;
1041 }
1042 } else if (cph->cph_type == VMBUS_CHANPKT_TYPE_RXBUF) {
1043 switch (nvs->nvs_type) {
1044 case HVN_NVS_TYPE_RNDIS:
1045 hvn_rndis_input(sc, cph->cph_tid, cph);
1046 break;
1047 default:
1048 device_printf(sc->sc_dev,
1049 "unhandled NVSP packet type %u "
1050 "on receive\n", nvs->nvs_type);
1051 break;
1052 }
1053 } else
1054 device_printf(sc->sc_dev,
1055 "unknown NVSP packet type %u\n", cph->cph_type);
1056 }
1057
1058 if (dotx)
1059 if_schedule_deferred_start(ifp);
1060 }
1061
1062 static int
1063 hvn_nvs_cmd(struct hvn_softc *sc, void *cmd, size_t cmdsize, uint64_t tid,
1064 int timo)
1065 {
1066 struct hvn_nvs_hdr *hdr = cmd;
1067 int tries = 10;
1068 int rv, s;
1069
1070 sc->sc_nvsdone = 0;
1071
1072 do {
1073 rv = vmbus_channel_send(sc->sc_chan, cmd, cmdsize,
1074 tid, VMBUS_CHANPKT_TYPE_INBAND,
1075 timo ? VMBUS_CHANPKT_FLAG_RC : 0);
1076 if (rv == EAGAIN) {
1077 if (cold)
1078 delay(1000);
1079 else
1080 tsleep(cmd, PRIBIO, "nvsout", 1);
1081 } else if (rv) {
1082 DPRINTF("%s: NVSP operation %u send error %d\n",
1083 device_xname(sc->sc_dev), hdr->nvs_type, rv);
1084 return rv;
1085 }
1086 } while (rv != 0 && --tries > 0);
1087
1088 if (tries == 0 && rv != 0) {
1089 device_printf(sc->sc_dev,
1090 "NVSP operation %u send error %d\n", hdr->nvs_type, rv);
1091 return rv;
1092 }
1093
1094 if (timo == 0)
1095 return 0;
1096
1097 do {
1098 if (cold)
1099 delay(1000);
1100 else
1101 tsleep(sc, PRIBIO | PCATCH, "nvscmd", 1);
1102 s = splnet();
1103 hvn_nvs_intr(sc);
1104 splx(s);
1105 } while (--timo > 0 && sc->sc_nvsdone != 1);
1106
1107 if (timo == 0 && sc->sc_nvsdone != 1) {
1108 device_printf(sc->sc_dev, "NVSP operation %u timed out\n",
1109 hdr->nvs_type);
1110 return ETIMEDOUT;
1111 }
1112 return 0;
1113 }
1114
1115 static int
1116 hvn_nvs_ack(struct hvn_softc *sc, uint64_t tid)
1117 {
1118 struct hvn_nvs_rndis_ack cmd;
1119 int tries = 5;
1120 int rv;
1121
1122 cmd.nvs_type = HVN_NVS_TYPE_RNDIS_ACK;
1123 cmd.nvs_status = HVN_NVS_STATUS_OK;
1124 do {
1125 rv = vmbus_channel_send(sc->sc_chan, &cmd, sizeof(cmd),
1126 tid, VMBUS_CHANPKT_TYPE_COMP, 0);
1127 if (rv == EAGAIN)
1128 delay(10);
1129 else if (rv) {
1130 DPRINTF("%s: NVSP acknowledgement error %d\n",
1131 device_xname(sc->sc_dev), rv);
1132 return rv;
1133 }
1134 } while (rv != 0 && --tries > 0);
1135 return rv;
1136 }
1137
1138 static void
1139 hvn_nvs_detach(struct hvn_softc *sc)
1140 {
1141
1142 if (vmbus_channel_close(sc->sc_chan) == 0) {
1143 kmem_free(sc->sc_nvsbuf, HVN_NVS_BUFSIZE);
1144 sc->sc_nvsbuf = NULL;
1145 }
1146 }
1147
1148 static inline struct rndis_cmd *
1149 hvn_alloc_cmd(struct hvn_softc *sc)
1150 {
1151 struct rndis_cmd *rc;
1152
1153 mutex_enter(&sc->sc_cntl_fqlck);
1154 while ((rc = TAILQ_FIRST(&sc->sc_cntl_fq)) == NULL)
1155 /* XXX use condvar(9) instead of mtsleep */
1156 mtsleep(&sc->sc_cntl_fq, PRIBIO, "nvsalloc", 1,
1157 &sc->sc_cntl_fqlck);
1158 TAILQ_REMOVE(&sc->sc_cntl_fq, rc, rc_entry);
1159 mutex_exit(&sc->sc_cntl_fqlck);
1160 return rc;
1161 }
1162
1163 static inline void
1164 hvn_submit_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1165 {
1166
1167 mutex_enter(&sc->sc_cntl_sqlck);
1168 TAILQ_INSERT_TAIL(&sc->sc_cntl_sq, rc, rc_entry);
1169 mutex_exit(&sc->sc_cntl_sqlck);
1170 }
1171
1172 static inline struct rndis_cmd *
1173 hvn_complete_cmd(struct hvn_softc *sc, uint32_t id)
1174 {
1175 struct rndis_cmd *rc;
1176
1177 mutex_enter(&sc->sc_cntl_sqlck);
1178 TAILQ_FOREACH(rc, &sc->sc_cntl_sq, rc_entry) {
1179 if (rc->rc_id == id) {
1180 TAILQ_REMOVE(&sc->sc_cntl_sq, rc, rc_entry);
1181 break;
1182 }
1183 }
1184 mutex_exit(&sc->sc_cntl_sqlck);
1185 if (rc != NULL) {
1186 mutex_enter(&sc->sc_cntl_cqlck);
1187 TAILQ_INSERT_TAIL(&sc->sc_cntl_cq, rc, rc_entry);
1188 mutex_exit(&sc->sc_cntl_cqlck);
1189 }
1190 return rc;
1191 }
1192
1193 static inline void
1194 hvn_release_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1195 {
1196
1197 mutex_enter(&sc->sc_cntl_cqlck);
1198 TAILQ_REMOVE(&sc->sc_cntl_cq, rc, rc_entry);
1199 mutex_exit(&sc->sc_cntl_cqlck);
1200 }
1201
1202 static inline int
1203 hvn_rollback_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1204 {
1205 struct rndis_cmd *rn;
1206
1207 mutex_enter(&sc->sc_cntl_sqlck);
1208 TAILQ_FOREACH(rn, &sc->sc_cntl_sq, rc_entry) {
1209 if (rn == rc) {
1210 TAILQ_REMOVE(&sc->sc_cntl_sq, rc, rc_entry);
1211 mutex_exit(&sc->sc_cntl_sqlck);
1212 return 0;
1213 }
1214 }
1215 mutex_exit(&sc->sc_cntl_sqlck);
1216 return -1;
1217 }
1218
1219 static inline void
1220 hvn_free_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1221 {
1222
1223 memset(rc->rc_req, 0, sizeof(struct rndis_packet_msg));
1224 memset(&rc->rc_cmp, 0, sizeof(rc->rc_cmp));
1225 memset(&rc->rc_msg, 0, sizeof(rc->rc_msg));
1226 mutex_enter(&sc->sc_cntl_fqlck);
1227 TAILQ_INSERT_TAIL(&sc->sc_cntl_fq, rc, rc_entry);
1228 mutex_exit(&sc->sc_cntl_fqlck);
1229 wakeup(&sc->sc_cntl_fq);
1230 }
1231
1232 static int
1233 hvn_rndis_attach(struct hvn_softc *sc)
1234 {
1235 const int dmaflags = cold ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK;
1236 struct rndis_init_req *req;
1237 struct rndis_init_comp *cmp;
1238 struct rndis_cmd *rc;
1239 int i, rv;
1240
1241 /* RNDIS control message queues */
1242 TAILQ_INIT(&sc->sc_cntl_sq);
1243 TAILQ_INIT(&sc->sc_cntl_cq);
1244 TAILQ_INIT(&sc->sc_cntl_fq);
1245 mutex_init(&sc->sc_cntl_sqlck, MUTEX_DEFAULT, IPL_NET);
1246 mutex_init(&sc->sc_cntl_cqlck, MUTEX_DEFAULT, IPL_NET);
1247 mutex_init(&sc->sc_cntl_fqlck, MUTEX_DEFAULT, IPL_NET);
1248
1249 for (i = 0; i < HVN_RNDIS_CTLREQS; i++) {
1250 rc = &sc->sc_cntl_msgs[i];
1251 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1252 dmaflags, &rc->rc_dmap)) {
1253 DPRINTF("%s: failed to create RNDIS command map\n",
1254 device_xname(sc->sc_dev));
1255 goto errout;
1256 }
1257 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE,
1258 0, &rc->rc_segs, 1, &rc->rc_nsegs, dmaflags)) {
1259 DPRINTF("%s: failed to allocate RNDIS command\n",
1260 device_xname(sc->sc_dev));
1261 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1262 goto errout;
1263 }
1264 if (bus_dmamem_map(sc->sc_dmat, &rc->rc_segs, rc->rc_nsegs,
1265 PAGE_SIZE, (void **)&rc->rc_req, dmaflags)) {
1266 DPRINTF("%s: failed to allocate RNDIS command\n",
1267 device_xname(sc->sc_dev));
1268 bus_dmamem_free(sc->sc_dmat, &rc->rc_segs,
1269 rc->rc_nsegs);
1270 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1271 goto errout;
1272 }
1273 memset(rc->rc_req, 0, PAGE_SIZE);
1274 if (bus_dmamap_load(sc->sc_dmat, rc->rc_dmap, rc->rc_req,
1275 PAGE_SIZE, NULL, dmaflags)) {
1276 DPRINTF("%s: failed to load RNDIS command map\n",
1277 device_xname(sc->sc_dev));
1278 bus_dmamem_free(sc->sc_dmat, &rc->rc_segs,
1279 rc->rc_nsegs);
1280 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1281 goto errout;
1282 }
1283 rc->rc_gpa = atop(rc->rc_dmap->dm_segs[0].ds_addr);
1284 TAILQ_INSERT_TAIL(&sc->sc_cntl_fq, rc, rc_entry);
1285 }
1286
1287 rc = hvn_alloc_cmd(sc);
1288
1289 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1290 BUS_DMASYNC_PREREAD);
1291
1292 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1293
1294 req = rc->rc_req;
1295 req->rm_type = REMOTE_NDIS_INITIALIZE_MSG;
1296 req->rm_len = sizeof(*req);
1297 req->rm_rid = rc->rc_id;
1298 req->rm_ver_major = RNDIS_VERSION_MAJOR;
1299 req->rm_ver_minor = RNDIS_VERSION_MINOR;
1300 req->rm_max_xfersz = HVN_RNDIS_XFER_SIZE;
1301
1302 rc->rc_cmplen = sizeof(*cmp);
1303
1304 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1305 BUS_DMASYNC_PREWRITE);
1306
1307 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1308 DPRINTF("%s: INITIALIZE_MSG failed, error %d\n",
1309 device_xname(sc->sc_dev), rv);
1310 hvn_free_cmd(sc, rc);
1311 goto errout;
1312 }
1313 cmp = (struct rndis_init_comp *)&rc->rc_cmp;
1314 if (cmp->rm_status != RNDIS_STATUS_SUCCESS) {
1315 DPRINTF("%s: failed to init RNDIS, error %#x\n",
1316 device_xname(sc->sc_dev), cmp->rm_status);
1317 hvn_free_cmd(sc, rc);
1318 goto errout;
1319 }
1320
1321 hvn_free_cmd(sc, rc);
1322
1323 /* Initialize RNDIS Data command */
1324 memset(&sc->sc_data_msg, 0, sizeof(sc->sc_data_msg));
1325 sc->sc_data_msg.nvs_type = HVN_NVS_TYPE_RNDIS;
1326 sc->sc_data_msg.nvs_rndis_mtype = HVN_NVS_RNDIS_MTYPE_DATA;
1327 sc->sc_data_msg.nvs_chim_idx = HVN_NVS_CHIM_IDX_INVALID;
1328
1329 return 0;
1330
1331 errout:
1332 for (i = 0; i < HVN_RNDIS_CTLREQS; i++) {
1333 rc = &sc->sc_cntl_msgs[i];
1334 if (rc->rc_req == NULL)
1335 continue;
1336 TAILQ_REMOVE(&sc->sc_cntl_fq, rc, rc_entry);
1337 bus_dmamem_free(sc->sc_dmat, &rc->rc_segs, rc->rc_nsegs);
1338 rc->rc_req = NULL;
1339 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1340 }
1341 return -1;
1342 }
1343
1344 static int
1345 hvn_set_capabilities(struct hvn_softc *sc)
1346 {
1347 struct ndis_offload_params params;
1348 size_t len = sizeof(params);
1349
1350 memset(¶ms, 0, sizeof(params));
1351
1352 params.ndis_hdr.ndis_type = NDIS_OBJTYPE_DEFAULT;
1353 if (sc->sc_ndisver < NDIS_VERSION_6_30) {
1354 params.ndis_hdr.ndis_rev = NDIS_OFFLOAD_PARAMS_REV_2;
1355 len = params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE_6_1;
1356 } else {
1357 params.ndis_hdr.ndis_rev = NDIS_OFFLOAD_PARAMS_REV_3;
1358 len = params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE;
1359 }
1360
1361 params.ndis_ip4csum = NDIS_OFFLOAD_PARAM_TXRX;
1362 params.ndis_tcp4csum = NDIS_OFFLOAD_PARAM_TXRX;
1363 params.ndis_tcp6csum = NDIS_OFFLOAD_PARAM_TXRX;
1364 if (sc->sc_ndisver >= NDIS_VERSION_6_30) {
1365 params.ndis_udp4csum = NDIS_OFFLOAD_PARAM_TXRX;
1366 params.ndis_udp6csum = NDIS_OFFLOAD_PARAM_TXRX;
1367 }
1368
1369 return hvn_rndis_set(sc, OID_TCP_OFFLOAD_PARAMETERS, ¶ms, len);
1370 }
1371
1372 static int
1373 hvn_rndis_cmd(struct hvn_softc *sc, struct rndis_cmd *rc, int timo)
1374 {
1375 struct hvn_nvs_rndis *msg = &rc->rc_msg;
1376 struct rndis_msghdr *hdr = rc->rc_req;
1377 struct vmbus_gpa sgl[1];
1378 int tries = 10;
1379 int rv, s;
1380
1381 KASSERT(timo > 0);
1382
1383 msg->nvs_type = HVN_NVS_TYPE_RNDIS;
1384 msg->nvs_rndis_mtype = HVN_NVS_RNDIS_MTYPE_CTRL;
1385 msg->nvs_chim_idx = HVN_NVS_CHIM_IDX_INVALID;
1386
1387 sgl[0].gpa_page = rc->rc_gpa;
1388 sgl[0].gpa_len = hdr->rm_len;
1389 sgl[0].gpa_ofs = 0;
1390
1391 rc->rc_done = 0;
1392
1393 hvn_submit_cmd(sc, rc);
1394
1395 do {
1396 rv = vmbus_channel_send_sgl(sc->sc_chan, sgl, 1, &rc->rc_msg,
1397 sizeof(*msg), rc->rc_id);
1398 if (rv == EAGAIN) {
1399 if (cold)
1400 delay(1000);
1401 else
1402 tsleep(rc, PRIBIO, "rndisout", 1);
1403 } else if (rv) {
1404 DPRINTF("%s: RNDIS operation %u send error %d\n",
1405 device_xname(sc->sc_dev), hdr->rm_type, rv);
1406 hvn_rollback_cmd(sc, rc);
1407 return rv;
1408 }
1409 } while (rv != 0 && --tries > 0);
1410
1411 if (tries == 0 && rv != 0) {
1412 device_printf(sc->sc_dev,
1413 "RNDIS operation %u send error %d\n", hdr->rm_type, rv);
1414 return rv;
1415 }
1416
1417 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1418 BUS_DMASYNC_POSTWRITE);
1419
1420 do {
1421 if (cold)
1422 delay(1000);
1423 else
1424 tsleep(rc, PRIBIO | PCATCH, "rndiscmd", 1);
1425 s = splnet();
1426 hvn_nvs_intr(sc);
1427 splx(s);
1428 } while (--timo > 0 && rc->rc_done != 1);
1429
1430 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1431 BUS_DMASYNC_POSTREAD);
1432
1433 if (rc->rc_done != 1) {
1434 rv = timo == 0 ? ETIMEDOUT : EINTR;
1435 if (hvn_rollback_cmd(sc, rc)) {
1436 hvn_release_cmd(sc, rc);
1437 rv = 0;
1438 } else if (rv == ETIMEDOUT) {
1439 device_printf(sc->sc_dev,
1440 "RNDIS operation %u timed out\n", hdr->rm_type);
1441 }
1442 return rv;
1443 }
1444
1445 hvn_release_cmd(sc, rc);
1446 return 0;
1447 }
1448
1449 static void
1450 hvn_rndis_input(struct hvn_softc *sc, uint64_t tid, void *arg)
1451 {
1452 struct vmbus_chanpkt_prplist *cp = arg;
1453 uint32_t off, len, type;
1454 int i;
1455
1456 if (sc->sc_rx_ring == NULL) {
1457 DPRINTF("%s: invalid rx ring\n", device_xname(sc->sc_dev));
1458 return;
1459 }
1460
1461 for (i = 0; i < cp->cp_range_cnt; i++) {
1462 off = cp->cp_range[i].gpa_ofs;
1463 len = cp->cp_range[i].gpa_len;
1464
1465 KASSERT(off + len <= sc->sc_rx_size);
1466 KASSERT(len >= RNDIS_HEADER_OFFSET + 4);
1467
1468 memcpy(&type, sc->sc_rx_ring + off, sizeof(type));
1469 switch (type) {
1470 /* data message */
1471 case REMOTE_NDIS_PACKET_MSG:
1472 hvn_rxeof(sc, sc->sc_rx_ring + off, len);
1473 break;
1474 /* completion messages */
1475 case REMOTE_NDIS_INITIALIZE_CMPLT:
1476 case REMOTE_NDIS_QUERY_CMPLT:
1477 case REMOTE_NDIS_SET_CMPLT:
1478 case REMOTE_NDIS_RESET_CMPLT:
1479 case REMOTE_NDIS_KEEPALIVE_CMPLT:
1480 hvn_rndis_complete(sc, sc->sc_rx_ring + off, len);
1481 break;
1482 /* notification message */
1483 case REMOTE_NDIS_INDICATE_STATUS_MSG:
1484 hvn_rndis_status(sc, sc->sc_rx_ring + off, len);
1485 break;
1486 default:
1487 device_printf(sc->sc_dev,
1488 "unhandled RNDIS message type %u\n", type);
1489 break;
1490 }
1491 }
1492
1493 hvn_nvs_ack(sc, tid);
1494 }
1495
1496 static inline struct mbuf *
1497 hvn_devget(struct hvn_softc *sc, void *buf, uint32_t len)
1498 {
1499 struct ifnet *ifp = SC2IFP(sc);
1500 struct mbuf *m;
1501 size_t size = len + ETHER_ALIGN;
1502
1503 MGETHDR(m, M_NOWAIT, MT_DATA);
1504 if (m == NULL)
1505 return NULL;
1506
1507 if (size > MHLEN) {
1508 if (size <= MCLBYTES)
1509 MCLGET(m, M_NOWAIT);
1510 else
1511 MEXTMALLOC(m, size, M_NOWAIT);
1512 if ((m->m_flags & M_EXT) == 0) {
1513 m_freem(m);
1514 return NULL;
1515 }
1516 }
1517
1518 m->m_len = m->m_pkthdr.len = size;
1519 m_adj(m, ETHER_ALIGN);
1520 m_copyback(m, 0, len, buf);
1521 m_set_rcvif(m, ifp);
1522 return m;
1523 }
1524
1525 static void
1526 hvn_rxeof(struct hvn_softc *sc, uint8_t *buf, uint32_t len)
1527 {
1528 struct ifnet *ifp = SC2IFP(sc);
1529 struct rndis_packet_msg *pkt;
1530 struct rndis_pktinfo *pi;
1531 uint32_t csum, vlan;
1532 struct mbuf *m;
1533
1534 if (!(ifp->if_flags & IFF_RUNNING))
1535 return;
1536
1537 if (len < sizeof(*pkt)) {
1538 device_printf(sc->sc_dev, "data packet too short: %u\n",
1539 len);
1540 return;
1541 }
1542
1543 pkt = (struct rndis_packet_msg *)buf;
1544 if (pkt->rm_dataoffset + pkt->rm_datalen > len) {
1545 device_printf(sc->sc_dev,
1546 "data packet out of bounds: %u@%u\n", pkt->rm_dataoffset,
1547 pkt->rm_datalen);
1548 return;
1549 }
1550
1551 if ((m = hvn_devget(sc, buf + RNDIS_HEADER_OFFSET + pkt->rm_dataoffset,
1552 pkt->rm_datalen)) == NULL) {
1553 ifp->if_ierrors++;
1554 return;
1555 }
1556
1557 if (pkt->rm_pktinfooffset + pkt->rm_pktinfolen > len) {
1558 device_printf(sc->sc_dev,
1559 "pktinfo is out of bounds: %u@%u vs %u\n",
1560 pkt->rm_pktinfolen, pkt->rm_pktinfooffset, len);
1561 goto done;
1562 }
1563
1564 pi = (struct rndis_pktinfo *)(buf + RNDIS_HEADER_OFFSET +
1565 pkt->rm_pktinfooffset);
1566 while (pkt->rm_pktinfolen > 0) {
1567 if (pi->rm_size > pkt->rm_pktinfolen) {
1568 device_printf(sc->sc_dev,
1569 "invalid pktinfo size: %u/%u\n", pi->rm_size,
1570 pkt->rm_pktinfolen);
1571 break;
1572 }
1573
1574 switch (pi->rm_type) {
1575 case NDIS_PKTINFO_TYPE_CSUM:
1576 memcpy(&csum, pi->rm_data, sizeof(csum));
1577 if (csum & NDIS_RXCSUM_INFO_IPCS_OK)
1578 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1579 if (csum & NDIS_RXCSUM_INFO_TCPCS_OK)
1580 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1581 if (csum & NDIS_RXCSUM_INFO_UDPCS_OK)
1582 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1583 break;
1584 case NDIS_PKTINFO_TYPE_VLAN:
1585 memcpy(&vlan, pi->rm_data, sizeof(vlan));
1586 if (vlan != 0xffffffff) {
1587 m->m_pkthdr.ether_vtag =
1588 NDIS_VLAN_INFO_ID(vlan) |
1589 (NDIS_VLAN_INFO_PRI(vlan) << EVL_PRIO_BITS);
1590 m->m_flags |= M_VLANTAG;
1591 }
1592 break;
1593 default:
1594 DPRINTF("%s: unhandled pktinfo type %u\n",
1595 device_xname(sc->sc_dev), pi->rm_type);
1596 break;
1597 }
1598
1599 pkt->rm_pktinfolen -= pi->rm_size;
1600 pi = (struct rndis_pktinfo *)((char *)pi + pi->rm_size);
1601 }
1602
1603 done:
1604 if_percpuq_enqueue(sc->sc_ipq, m);
1605 }
1606
1607 static void
1608 hvn_rndis_complete(struct hvn_softc *sc, uint8_t *buf, uint32_t len)
1609 {
1610 struct rndis_cmd *rc;
1611 uint32_t id;
1612
1613 memcpy(&id, buf + RNDIS_HEADER_OFFSET, sizeof(id));
1614 if ((rc = hvn_complete_cmd(sc, id)) != NULL) {
1615 if (len < rc->rc_cmplen)
1616 device_printf(sc->sc_dev,
1617 "RNDIS response %u too short: %u\n", id, len);
1618 else
1619 memcpy(&rc->rc_cmp, buf, rc->rc_cmplen);
1620 if (len > rc->rc_cmplen &&
1621 len - rc->rc_cmplen > HVN_RNDIS_BUFSIZE)
1622 device_printf(sc->sc_dev,
1623 "RNDIS response %u too large: %u\n", id, len);
1624 else if (len > rc->rc_cmplen)
1625 memcpy(&rc->rc_cmpbuf, buf + rc->rc_cmplen,
1626 len - rc->rc_cmplen);
1627 rc->rc_done = 1;
1628 wakeup(rc);
1629 } else {
1630 DPRINTF("%s: failed to complete RNDIS request id %u\n",
1631 device_xname(sc->sc_dev), id);
1632 }
1633 }
1634
1635 static int
1636 hvn_rndis_output(struct hvn_softc *sc, struct hvn_tx_desc *txd)
1637 {
1638 uint64_t rid = (uint64_t)txd->txd_id << 32;
1639 int rv;
1640
1641 rv = vmbus_channel_send_sgl(sc->sc_chan, txd->txd_sgl, txd->txd_nsge,
1642 &sc->sc_data_msg, sizeof(sc->sc_data_msg), rid);
1643 if (rv) {
1644 DPRINTF("%s: RNDIS data send error %d\n",
1645 device_xname(sc->sc_dev), rv);
1646 return rv;
1647 }
1648 return 0;
1649 }
1650
1651 static void
1652 hvn_rndis_status(struct hvn_softc *sc, uint8_t *buf, uint32_t len)
1653 {
1654 struct ifnet *ifp = SC2IFP(sc);
1655 uint32_t status;
1656 int link_state = sc->sc_link_state;
1657
1658 memcpy(&status, buf + RNDIS_HEADER_OFFSET, sizeof(status));
1659 switch (status) {
1660 case RNDIS_STATUS_MEDIA_CONNECT:
1661 sc->sc_link_state = LINK_STATE_UP;
1662 break;
1663 case RNDIS_STATUS_MEDIA_DISCONNECT:
1664 sc->sc_link_state = LINK_STATE_DOWN;
1665 break;
1666 /* Ignore these */
1667 case RNDIS_STATUS_OFFLOAD_CURRENT_CONFIG:
1668 return;
1669 default:
1670 DPRINTF("%s: unhandled status %#x\n", device_xname(sc->sc_dev),
1671 status);
1672 return;
1673 }
1674 if (link_state != sc->sc_link_state)
1675 if_link_state_change(ifp, sc->sc_link_state);
1676 }
1677
1678 static int
1679 hvn_rndis_query(struct hvn_softc *sc, uint32_t oid, void *res, size_t *length)
1680 {
1681 struct rndis_cmd *rc;
1682 struct rndis_query_req *req;
1683 struct rndis_query_comp *cmp;
1684 size_t olength = *length;
1685 int rv;
1686
1687 rc = hvn_alloc_cmd(sc);
1688
1689 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1690 BUS_DMASYNC_PREREAD);
1691
1692 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1693
1694 req = rc->rc_req;
1695 req->rm_type = REMOTE_NDIS_QUERY_MSG;
1696 req->rm_len = sizeof(*req);
1697 req->rm_rid = rc->rc_id;
1698 req->rm_oid = oid;
1699 req->rm_infobufoffset = sizeof(*req) - RNDIS_HEADER_OFFSET;
1700
1701 rc->rc_cmplen = sizeof(*cmp);
1702
1703 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1704 BUS_DMASYNC_PREWRITE);
1705
1706 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1707 DPRINTF("%s: QUERY_MSG failed, error %d\n",
1708 device_xname(sc->sc_dev), rv);
1709 hvn_free_cmd(sc, rc);
1710 return rv;
1711 }
1712
1713 cmp = (struct rndis_query_comp *)&rc->rc_cmp;
1714 switch (cmp->rm_status) {
1715 case RNDIS_STATUS_SUCCESS:
1716 if (cmp->rm_infobuflen > olength) {
1717 rv = EINVAL;
1718 break;
1719 }
1720 memcpy(res, rc->rc_cmpbuf, cmp->rm_infobuflen);
1721 *length = cmp->rm_infobuflen;
1722 break;
1723 default:
1724 *length = 0;
1725 rv = EIO;
1726 break;
1727 }
1728
1729 hvn_free_cmd(sc, rc);
1730 return rv;
1731 }
1732
1733 static int
1734 hvn_rndis_set(struct hvn_softc *sc, uint32_t oid, void *data, size_t length)
1735 {
1736 struct rndis_cmd *rc;
1737 struct rndis_set_req *req;
1738 struct rndis_set_comp *cmp;
1739 int rv;
1740
1741 rc = hvn_alloc_cmd(sc);
1742
1743 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1744 BUS_DMASYNC_PREREAD);
1745
1746 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1747
1748 req = rc->rc_req;
1749 req->rm_type = REMOTE_NDIS_SET_MSG;
1750 req->rm_len = sizeof(*req) + length;
1751 req->rm_rid = rc->rc_id;
1752 req->rm_oid = oid;
1753 req->rm_infobufoffset = sizeof(*req) - RNDIS_HEADER_OFFSET;
1754
1755 rc->rc_cmplen = sizeof(*cmp);
1756
1757 if (length > 0) {
1758 KASSERT(sizeof(*req) + length < PAGE_SIZE);
1759 req->rm_infobuflen = length;
1760 memcpy(req + 1, data, length);
1761 }
1762
1763 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1764 BUS_DMASYNC_PREWRITE);
1765
1766 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1767 DPRINTF("%s: SET_MSG failed, error %d\n",
1768 device_xname(sc->sc_dev), rv);
1769 hvn_free_cmd(sc, rc);
1770 return rv;
1771 }
1772
1773 cmp = (struct rndis_set_comp *)&rc->rc_cmp;
1774 if (cmp->rm_status != RNDIS_STATUS_SUCCESS)
1775 rv = EIO;
1776
1777 hvn_free_cmd(sc, rc);
1778 return rv;
1779 }
1780
1781 static int
1782 hvn_rndis_open(struct hvn_softc *sc)
1783 {
1784 uint32_t filter;
1785 int rv;
1786
1787 if (sc->sc_promisc)
1788 filter = RNDIS_PACKET_TYPE_PROMISCUOUS;
1789 else
1790 filter = RNDIS_PACKET_TYPE_BROADCAST |
1791 RNDIS_PACKET_TYPE_ALL_MULTICAST |
1792 RNDIS_PACKET_TYPE_DIRECTED;
1793
1794 rv = hvn_rndis_set(sc, OID_GEN_CURRENT_PACKET_FILTER,
1795 &filter, sizeof(filter));
1796 if (rv) {
1797 DPRINTF("%s: failed to set RNDIS filter to %#x\n",
1798 device_xname(sc->sc_dev), filter);
1799 }
1800 return rv;
1801 }
1802
1803 static int
1804 hvn_rndis_close(struct hvn_softc *sc)
1805 {
1806 uint32_t filter = 0;
1807 int rv;
1808
1809 rv = hvn_rndis_set(sc, OID_GEN_CURRENT_PACKET_FILTER,
1810 &filter, sizeof(filter));
1811 if (rv) {
1812 DPRINTF("%s: failed to clear RNDIS filter\n",
1813 device_xname(sc->sc_dev));
1814 }
1815 return rv;
1816 }
1817
1818 static void
1819 hvn_rndis_detach(struct hvn_softc *sc)
1820 {
1821 struct rndis_cmd *rc;
1822 struct rndis_halt_req *req;
1823 int rv;
1824
1825 rc = hvn_alloc_cmd(sc);
1826
1827 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1828 BUS_DMASYNC_PREREAD);
1829
1830 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1831
1832 req = rc->rc_req;
1833 req->rm_type = REMOTE_NDIS_HALT_MSG;
1834 req->rm_len = sizeof(*req);
1835 req->rm_rid = rc->rc_id;
1836
1837 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1838 BUS_DMASYNC_PREWRITE);
1839
1840 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1841 DPRINTF("%s: HALT_MSG failed, error %d\n",
1842 device_xname(sc->sc_dev), rv);
1843 }
1844 hvn_free_cmd(sc, rc);
1845 }
1846