if_hvn.c revision 1.1 1 /* $NetBSD: if_hvn.c,v 1.1 2019/02/15 08:54:01 nonaka Exp $ */
2 /* $OpenBSD: if_hvn.c,v 1.39 2018/03/11 14:31:34 mikeb Exp $ */
3
4 /*-
5 * Copyright (c) 2009-2012,2016 Microsoft Corp.
6 * Copyright (c) 2010-2012 Citrix Inc.
7 * Copyright (c) 2012 NetApp Inc.
8 * Copyright (c) 2016 Mike Belopuhov <mike (at) esdenera.com>
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice unmodified, this list of conditions, and the following
16 * disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * The OpenBSD port was done under funding by Esdenera Networks GmbH.
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: if_hvn.c,v 1.1 2019/02/15 08:54:01 nonaka Exp $");
39
40 #ifdef _KERNEL_OPT
41 #include "opt_inet.h"
42 #include "opt_inet6.h"
43 #include "opt_net_mpsafe.h"
44 #endif
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/device.h>
50 #include <sys/atomic.h>
51 #include <sys/bus.h>
52 #include <sys/intr.h>
53 #include <sys/kmem.h>
54
55 #include <net/if.h>
56 #include <net/if_ether.h>
57 #include <net/if_media.h>
58
59 #include <net/bpf.h>
60
61 #include <dev/ic/ndisreg.h>
62 #include <dev/ic/rndisreg.h>
63
64 #include <dev/hyperv/vmbusvar.h>
65 #include <dev/hyperv/if_hvnreg.h>
66
67 #ifndef EVL_PRIO_BITS
68 #define EVL_PRIO_BITS 13
69 #endif
70
71 #ifndef ETHER_ALIGN
72 #define ETHER_ALIGN 2
73 #endif
74
75 #define HVN_NVS_MSGSIZE 32
76 #define HVN_NVS_BUFSIZE PAGE_SIZE
77
78 /*
79 * RNDIS control interface
80 */
81 #define HVN_RNDIS_CTLREQS 4
82 #define HVN_RNDIS_BUFSIZE 512
83
84 struct rndis_cmd {
85 uint32_t rc_id;
86 struct hvn_nvs_rndis rc_msg;
87 void *rc_req;
88 bus_dmamap_t rc_dmap;
89 bus_dma_segment_t rc_segs;
90 int rc_nsegs;
91 uint64_t rc_gpa;
92 struct rndis_packet_msg rc_cmp;
93 uint32_t rc_cmplen;
94 uint8_t rc_cmpbuf[HVN_RNDIS_BUFSIZE];
95 int rc_done;
96 TAILQ_ENTRY(rndis_cmd) rc_entry;
97 };
98 TAILQ_HEAD(rndis_queue, rndis_cmd);
99
100 #define HVN_MAXMTU (9 * 1024)
101
102 #define HVN_RNDIS_XFER_SIZE 2048
103
104 /*
105 * Tx ring
106 */
107 #define HVN_TX_DESC 256
108 #define HVN_TX_FRAGS 15 /* 31 is the max */
109 #define HVN_TX_FRAG_SIZE PAGE_SIZE
110 #define HVN_TX_PKT_SIZE 16384
111
112 #define HVN_RNDIS_PKT_LEN \
113 (sizeof(struct rndis_packet_msg) + \
114 sizeof(struct rndis_pktinfo) + NDIS_VLAN_INFO_SIZE + \
115 sizeof(struct rndis_pktinfo) + NDIS_TXCSUM_INFO_SIZE)
116
117 struct hvn_tx_desc {
118 uint32_t txd_id;
119 int txd_ready;
120 struct vmbus_gpa txd_sgl[HVN_TX_FRAGS + 1];
121 int txd_nsge;
122 struct mbuf *txd_buf;
123 bus_dmamap_t txd_dmap;
124 struct vmbus_gpa txd_gpa;
125 struct rndis_packet_msg *txd_req;
126 };
127
128 struct hvn_softc {
129 device_t sc_dev;
130
131 struct vmbus_softc *sc_vmbus;
132 struct vmbus_channel *sc_chan;
133 bus_dma_tag_t sc_dmat;
134
135 struct ethercom sc_ec;
136 struct ifmedia sc_media;
137 struct if_percpuq *sc_ipq;
138 int sc_link_state;
139 int sc_promisc;
140
141 uint32_t sc_flags;
142 #define HVN_SCF_ATTACHED __BIT(0)
143
144 /* NVS protocol */
145 int sc_proto;
146 uint32_t sc_nvstid;
147 uint8_t sc_nvsrsp[HVN_NVS_MSGSIZE];
148 uint8_t *sc_nvsbuf;
149 int sc_nvsdone;
150
151 /* RNDIS protocol */
152 int sc_ndisver;
153 uint32_t sc_rndisrid;
154 struct rndis_queue sc_cntl_sq; /* submission queue */
155 kmutex_t sc_cntl_sqlck;
156 struct rndis_queue sc_cntl_cq; /* completion queue */
157 kmutex_t sc_cntl_cqlck;
158 struct rndis_queue sc_cntl_fq; /* free queue */
159 kmutex_t sc_cntl_fqlck;
160 struct rndis_cmd sc_cntl_msgs[HVN_RNDIS_CTLREQS];
161 struct hvn_nvs_rndis sc_data_msg;
162
163 /* Rx ring */
164 uint8_t *sc_rx_ring;
165 int sc_rx_size;
166 uint32_t sc_rx_hndl;
167 struct hyperv_dma sc_rx_dma;
168
169 /* Tx ring */
170 uint32_t sc_tx_next;
171 uint32_t sc_tx_avail;
172 struct hvn_tx_desc sc_tx_desc[HVN_TX_DESC];
173 bus_dmamap_t sc_tx_rmap;
174 uint8_t *sc_tx_msgs;
175 bus_dma_segment_t sc_tx_mseg;
176 };
177
178 #define SC2IFP(_sc_) (&(_sc_)->sc_ec.ec_if)
179 #define IFP2SC(_ifp_) ((_ifp_)->if_softc)
180
181
182 static int hvn_match(device_t, cfdata_t, void *);
183 static void hvn_attach(device_t, device_t, void *);
184 static int hvn_detach(device_t, int);
185
186 CFATTACH_DECL_NEW(hvn, sizeof(struct hvn_softc),
187 hvn_match, hvn_attach, hvn_detach, NULL);
188
189 static int hvn_ioctl(struct ifnet *, u_long, void *);
190 static int hvn_media_change(struct ifnet *);
191 static void hvn_media_status(struct ifnet *, struct ifmediareq *);
192 static int hvn_iff(struct hvn_softc *);
193 static int hvn_init(struct ifnet *);
194 static void hvn_stop(struct ifnet *, int);
195 static void hvn_start(struct ifnet *);
196 static int hvn_encap(struct hvn_softc *, struct mbuf *,
197 struct hvn_tx_desc **);
198 static void hvn_decap(struct hvn_softc *, struct hvn_tx_desc *);
199 static void hvn_txeof(struct hvn_softc *, uint64_t);
200 static int hvn_rx_ring_create(struct hvn_softc *);
201 static int hvn_rx_ring_destroy(struct hvn_softc *);
202 static int hvn_tx_ring_create(struct hvn_softc *);
203 static void hvn_tx_ring_destroy(struct hvn_softc *);
204 static int hvn_set_capabilities(struct hvn_softc *);
205 static int hvn_get_lladdr(struct hvn_softc *, uint8_t *);
206 static void hvn_get_link_status(struct hvn_softc *);
207
208 /* NSVP */
209 static int hvn_nvs_attach(struct hvn_softc *);
210 static void hvn_nvs_intr(void *);
211 static int hvn_nvs_cmd(struct hvn_softc *, void *, size_t, uint64_t, int);
212 static int hvn_nvs_ack(struct hvn_softc *, uint64_t);
213 static void hvn_nvs_detach(struct hvn_softc *);
214
215 /* RNDIS */
216 static int hvn_rndis_attach(struct hvn_softc *);
217 static int hvn_rndis_cmd(struct hvn_softc *, struct rndis_cmd *, int);
218 static void hvn_rndis_input(struct hvn_softc *, uint64_t, void *);
219 static void hvn_rxeof(struct hvn_softc *, uint8_t *, uint32_t);
220 static void hvn_rndis_complete(struct hvn_softc *, uint8_t *, uint32_t);
221 static int hvn_rndis_output(struct hvn_softc *, struct hvn_tx_desc *);
222 static void hvn_rndis_status(struct hvn_softc *, uint8_t *, uint32_t);
223 static int hvn_rndis_query(struct hvn_softc *, uint32_t, void *, size_t *);
224 static int hvn_rndis_set(struct hvn_softc *, uint32_t, void *, size_t);
225 static int hvn_rndis_open(struct hvn_softc *);
226 static int hvn_rndis_close(struct hvn_softc *);
227 static void hvn_rndis_detach(struct hvn_softc *);
228
229 static int
230 hvn_match(device_t parent, cfdata_t match, void *aux)
231 {
232 struct vmbus_attach_args *aa = aux;
233
234 if (memcmp(aa->aa_type, &hyperv_guid_network, sizeof(*aa->aa_type)))
235 return 0;
236 return 1;
237 }
238
239 static void
240 hvn_attach(device_t parent, device_t self, void *aux)
241 {
242 struct hvn_softc *sc = device_private(self);
243 struct vmbus_attach_args *aa = aux;
244 struct ifnet *ifp = SC2IFP(sc);
245 uint8_t enaddr[ETHER_ADDR_LEN];
246 int error;
247
248 sc->sc_dev = self;
249 sc->sc_vmbus = (struct vmbus_softc *)device_private(parent);
250 sc->sc_chan = aa->aa_chan;
251 sc->sc_dmat = sc->sc_vmbus->sc_dmat;
252
253 aprint_naive("\n");
254 aprint_normal(": Hyper-V NetVSC\n");
255
256 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
257
258 if (hvn_nvs_attach(sc)) {
259 aprint_error_dev(self, "failed to init NVSP\n");
260 return;
261 }
262
263 if (hvn_rx_ring_create(sc)) {
264 aprint_error_dev(self, "failed to create Rx ring\n");
265 goto fail1;
266 }
267
268 if (hvn_tx_ring_create(sc)) {
269 aprint_error_dev(self, "failed to create Tx ring\n");
270 goto fail1;
271 }
272
273 ifp->if_softc = sc;
274 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
275 ifp->if_ioctl = hvn_ioctl;
276 ifp->if_start = hvn_start;
277 ifp->if_init = hvn_init;
278 ifp->if_stop = hvn_stop;
279 ifp->if_capabilities = IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx;
280 ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx;
281 ifp->if_capabilities |= IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_TCPv6_Rx;
282 if (sc->sc_ndisver > NDIS_VERSION_6_30) {
283 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Tx;
284 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Rx;
285 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Tx;
286 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Rx;
287 }
288 if (sc->sc_proto >= HVN_NVS_PROTO_VERSION_2) {
289 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
290 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
291 }
292
293 IFQ_SET_MAXLEN(&ifp->if_snd, HVN_TX_DESC - 1);
294 IFQ_SET_READY(&ifp->if_snd);
295
296 ifmedia_init(&sc->sc_media, IFM_IMASK, hvn_media_change,
297 hvn_media_status);
298 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
299 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_MANUAL);
300
301 error = if_initialize(ifp);
302 if (error) {
303 aprint_error_dev(self, "if_initialize failed(%d)\n", error);
304 goto fail2;
305 }
306 sc->sc_ipq = if_percpuq_create(ifp);
307 if_deferred_start_init(ifp, NULL);
308
309 if (hvn_rndis_attach(sc)) {
310 aprint_error_dev(self, "failed to init RNDIS\n");
311 goto fail1;
312 }
313
314 aprint_normal_dev(self, "NVS %d.%d NDIS %d.%d\n",
315 sc->sc_proto >> 16, sc->sc_proto & 0xffff,
316 sc->sc_ndisver >> 16 , sc->sc_ndisver & 0xffff);
317
318 if (hvn_set_capabilities(sc)) {
319 aprint_error_dev(self, "failed to setup offloading\n");
320 goto fail2;
321 }
322
323 if (hvn_get_lladdr(sc, enaddr)) {
324 aprint_error_dev(self,
325 "failed to obtain an ethernet address\n");
326 goto fail2;
327 }
328 aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(enaddr));
329
330 ether_ifattach(ifp, enaddr);
331 if_register(ifp);
332
333 if (pmf_device_register(self, NULL, NULL))
334 pmf_class_network_register(self, ifp);
335 else
336 aprint_error_dev(self, "couldn't establish power handler\n");
337
338 SET(sc->sc_flags, HVN_SCF_ATTACHED);
339 return;
340
341 fail2: hvn_rndis_detach(sc);
342 fail1: hvn_rx_ring_destroy(sc);
343 hvn_tx_ring_destroy(sc);
344 hvn_nvs_detach(sc);
345 }
346
347 static int
348 hvn_detach(device_t self, int flags)
349 {
350 struct hvn_softc *sc = device_private(self);
351 struct ifnet *ifp = SC2IFP(sc);
352
353 if (!ISSET(sc->sc_flags, HVN_SCF_ATTACHED))
354 return 0;
355
356 hvn_stop(ifp, 1);
357
358 pmf_device_deregister(self);
359
360 ether_ifdetach(ifp);
361 if_detach(ifp);
362 if_percpuq_destroy(sc->sc_ipq);
363
364 hvn_rndis_detach(sc);
365 hvn_rx_ring_destroy(sc);
366 hvn_tx_ring_destroy(sc);
367 hvn_nvs_detach(sc);
368
369 return 0;
370 }
371
372 static int
373 hvn_ioctl(struct ifnet *ifp, u_long command, void * data)
374 {
375 struct hvn_softc *sc = IFP2SC(ifp);
376 struct ifreq *ifr = (struct ifreq *)data;
377 int s, error = 0;
378
379 s = splnet();
380
381 switch (command) {
382 case SIOCSIFFLAGS:
383 if (ifp->if_flags & IFF_UP) {
384 if (ifp->if_flags & IFF_RUNNING)
385 error = ENETRESET;
386 else {
387 error = hvn_init(ifp);
388 if (error)
389 ifp->if_flags &= ~IFF_UP;
390 }
391 } else {
392 if (ifp->if_flags & IFF_RUNNING)
393 hvn_stop(ifp, 1);
394 }
395 break;
396 case SIOCGIFMEDIA:
397 case SIOCSIFMEDIA:
398 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command);
399 break;
400 default:
401 error = ether_ioctl(ifp, command, data);
402 break;
403 }
404
405 if (error == ENETRESET) {
406 if (ifp->if_flags & IFF_RUNNING)
407 hvn_iff(sc);
408 error = 0;
409 }
410
411 splx(s);
412
413 return error;
414 }
415
416 static int
417 hvn_media_change(struct ifnet *ifp)
418 {
419
420 return 0;
421 }
422
423 static void
424 hvn_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
425 {
426 struct hvn_softc *sc = IFP2SC(ifp);
427 int link_state;
428
429 link_state = sc->sc_link_state;
430 hvn_get_link_status(sc);
431 if (link_state != sc->sc_link_state)
432 if_link_state_change(ifp, sc->sc_link_state);
433
434 ifmr->ifm_status = IFM_AVALID;
435 ifmr->ifm_active = IFM_ETHER | IFM_MANUAL;
436 if (sc->sc_link_state == LINK_STATE_UP)
437 ifmr->ifm_status |= IFM_ACTIVE;
438 }
439
440 static int
441 hvn_iff(struct hvn_softc *sc)
442 {
443
444 /* XXX */
445 sc->sc_promisc = 0;
446
447 return 0;
448 }
449
450 static int
451 hvn_init(struct ifnet *ifp)
452 {
453 struct hvn_softc *sc = IFP2SC(ifp);
454 int error;
455
456 hvn_stop(ifp, 0);
457
458 error = hvn_iff(sc);
459 if (error)
460 return error;
461
462 error = hvn_rndis_open(sc);
463 if (error == 0) {
464 ifp->if_flags |= IFF_RUNNING;
465 ifp->if_flags &= ~IFF_OACTIVE;
466 }
467 return error;
468 }
469
470 static void
471 hvn_stop(struct ifnet *ifp, int disable)
472 {
473 struct hvn_softc *sc = IFP2SC(ifp);
474
475 hvn_rndis_close(sc);
476
477 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
478 }
479
480 static void
481 hvn_start(struct ifnet *ifp)
482 {
483 struct hvn_softc *sc = IFP2SC(ifp);
484 struct hvn_tx_desc *txd;
485 struct mbuf *m;
486
487 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
488 return;
489
490 for (;;) {
491 if (!sc->sc_tx_avail) {
492 /* transient */
493 ifp->if_flags |= IFF_OACTIVE;
494 break;
495 }
496
497 IFQ_DEQUEUE(&ifp->if_snd, m);
498 if (m == NULL)
499 break;
500
501 if (hvn_encap(sc, m, &txd)) {
502 /* the chain is too large */
503 ifp->if_oerrors++;
504 m_freem(m);
505 continue;
506 }
507
508 bpf_mtap(ifp, m, BPF_D_OUT);
509
510 if (hvn_rndis_output(sc, txd)) {
511 hvn_decap(sc, txd);
512 ifp->if_oerrors++;
513 m_freem(m);
514 continue;
515 }
516
517 sc->sc_tx_next++;
518 }
519 }
520
521 static inline char *
522 hvn_rndis_pktinfo_append(struct rndis_packet_msg *pkt, size_t pktsize,
523 size_t datalen, uint32_t type)
524 {
525 struct rndis_pktinfo *pi;
526 size_t pi_size = sizeof(*pi) + datalen;
527 char *cp;
528
529 KASSERT(pkt->rm_pktinfooffset + pkt->rm_pktinfolen + pi_size <=
530 pktsize);
531
532 cp = (char *)pkt + pkt->rm_pktinfooffset + pkt->rm_pktinfolen;
533 pi = (struct rndis_pktinfo *)cp;
534 pi->rm_size = pi_size;
535 pi->rm_type = type;
536 pi->rm_pktinfooffset = sizeof(*pi);
537 pkt->rm_pktinfolen += pi_size;
538 pkt->rm_dataoffset += pi_size;
539 pkt->rm_len += pi_size;
540
541 return (char *)pi->rm_data;
542 }
543
544 static int
545 hvn_encap(struct hvn_softc *sc, struct mbuf *m, struct hvn_tx_desc **txd0)
546 {
547 struct hvn_tx_desc *txd;
548 struct rndis_packet_msg *pkt;
549 bus_dma_segment_t *seg;
550 size_t pktlen;
551 int i, rv;
552
553 do {
554 txd = &sc->sc_tx_desc[sc->sc_tx_next % HVN_TX_DESC];
555 sc->sc_tx_next++;
556 } while (!txd->txd_ready);
557 txd->txd_ready = 0;
558
559 pkt = txd->txd_req;
560 memset(pkt, 0, HVN_RNDIS_PKT_LEN);
561 pkt->rm_type = REMOTE_NDIS_PACKET_MSG;
562 pkt->rm_len = sizeof(*pkt) + m->m_pkthdr.len;
563 pkt->rm_dataoffset = RNDIS_DATA_OFFSET;
564 pkt->rm_datalen = m->m_pkthdr.len;
565 pkt->rm_pktinfooffset = sizeof(*pkt); /* adjusted below */
566 pkt->rm_pktinfolen = 0;
567
568 rv = bus_dmamap_load_mbuf(sc->sc_dmat, txd->txd_dmap, m, BUS_DMA_READ |
569 BUS_DMA_NOWAIT);
570 switch (rv) {
571 case 0:
572 break;
573 case EFBIG:
574 if (m_defrag(m, M_NOWAIT) == 0 &&
575 bus_dmamap_load_mbuf(sc->sc_dmat, txd->txd_dmap, m,
576 BUS_DMA_READ | BUS_DMA_NOWAIT) == 0)
577 break;
578 /* FALLTHROUGH */
579 default:
580 DPRINTF("%s: failed to load mbuf\n", device_xname(sc->sc_dev));
581 return -1;
582 }
583 txd->txd_buf = m;
584
585 if (m->m_flags & M_VLANTAG) {
586 uint32_t vlan;
587 char *cp;
588
589 vlan = NDIS_VLAN_INFO_MAKE(
590 EVL_VLANOFTAG(m->m_pkthdr.ether_vtag),
591 EVL_PRIOFTAG(m->m_pkthdr.ether_vtag), 0);
592 cp = hvn_rndis_pktinfo_append(pkt, HVN_RNDIS_PKT_LEN,
593 NDIS_VLAN_INFO_SIZE, NDIS_PKTINFO_TYPE_VLAN);
594 memcpy(cp, &vlan, NDIS_VLAN_INFO_SIZE);
595 }
596
597 if (m->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_UDPv4 |
598 M_CSUM_TCPv4)) {
599 uint32_t csum = NDIS_TXCSUM_INFO_IPV4;
600 char *cp;
601
602 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4)
603 csum |= NDIS_TXCSUM_INFO_IPCS;
604 if (m->m_pkthdr.csum_flags & M_CSUM_TCPv4)
605 csum |= NDIS_TXCSUM_INFO_TCPCS;
606 if (m->m_pkthdr.csum_flags & M_CSUM_UDPv4)
607 csum |= NDIS_TXCSUM_INFO_UDPCS;
608 cp = hvn_rndis_pktinfo_append(pkt, HVN_RNDIS_PKT_LEN,
609 NDIS_TXCSUM_INFO_SIZE, NDIS_PKTINFO_TYPE_CSUM);
610 memcpy(cp, &csum, NDIS_TXCSUM_INFO_SIZE);
611 }
612
613 pktlen = pkt->rm_pktinfooffset + pkt->rm_pktinfolen;
614 pkt->rm_pktinfooffset -= RNDIS_HEADER_OFFSET;
615
616 /* Attach an RNDIS message to the first slot */
617 txd->txd_sgl[0].gpa_page = txd->txd_gpa.gpa_page;
618 txd->txd_sgl[0].gpa_ofs = txd->txd_gpa.gpa_ofs;
619 txd->txd_sgl[0].gpa_len = pktlen;
620 txd->txd_nsge = txd->txd_dmap->dm_nsegs + 1;
621
622 for (i = 0; i < txd->txd_dmap->dm_nsegs; i++) {
623 seg = &txd->txd_dmap->dm_segs[i];
624 txd->txd_sgl[1 + i].gpa_page = atop(seg->ds_addr);
625 txd->txd_sgl[1 + i].gpa_ofs = seg->ds_addr & PAGE_MASK;
626 txd->txd_sgl[1 + i].gpa_len = seg->ds_len;
627 }
628
629 *txd0 = txd;
630
631 atomic_dec_uint(&sc->sc_tx_avail);
632
633 return 0;
634 }
635
636 static void
637 hvn_decap(struct hvn_softc *sc, struct hvn_tx_desc *txd)
638 {
639 struct ifnet *ifp = SC2IFP(sc);
640
641 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmap, 0, 0,
642 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
643 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmap);
644 txd->txd_buf = NULL;
645 txd->txd_nsge = 0;
646 txd->txd_ready = 1;
647 atomic_inc_uint(&sc->sc_tx_avail);
648 ifp->if_flags &= ~IFF_OACTIVE;
649 }
650
651 static void
652 hvn_txeof(struct hvn_softc *sc, uint64_t tid)
653 {
654 struct ifnet *ifp = SC2IFP(sc);
655 struct hvn_tx_desc *txd;
656 struct mbuf *m;
657 uint32_t id = tid >> 32;
658
659 if ((tid & 0xffffffffU) != 0)
660 return;
661
662 id -= HVN_NVS_CHIM_SIG;
663 if (id >= HVN_TX_DESC) {
664 device_printf(sc->sc_dev, "tx packet index too large: %u", id);
665 return;
666 }
667
668 txd = &sc->sc_tx_desc[id];
669
670 if ((m = txd->txd_buf) == NULL) {
671 device_printf(sc->sc_dev, "no mbuf @%u\n", id);
672 return;
673 }
674 txd->txd_buf = NULL;
675
676 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmap, 0, 0,
677 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
678 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmap);
679 m_freem(m);
680 ifp->if_opackets++;
681
682 txd->txd_ready = 1;
683
684 atomic_inc_uint(&sc->sc_tx_avail);
685 ifp->if_flags &= ~IFF_OACTIVE;
686 }
687
688 static int
689 hvn_rx_ring_create(struct hvn_softc *sc)
690 {
691 struct hvn_nvs_rxbuf_conn cmd;
692 struct hvn_nvs_rxbuf_conn_resp *rsp;
693 uint64_t tid;
694
695 if (sc->sc_proto <= HVN_NVS_PROTO_VERSION_2)
696 sc->sc_rx_size = 15 * 1024 * 1024; /* 15MB */
697 else
698 sc->sc_rx_size = 16 * 1024 * 1024; /* 16MB */
699 sc->sc_rx_ring = hyperv_dma_alloc(sc->sc_dmat, &sc->sc_rx_dma,
700 sc->sc_rx_size, PAGE_SIZE, PAGE_SIZE, sc->sc_rx_size / PAGE_SIZE);
701 if (sc->sc_rx_ring == NULL) {
702 DPRINTF("%s: failed to allocate Rx ring buffer\n",
703 device_xname(sc->sc_dev));
704 return -1;
705 }
706 if (vmbus_handle_alloc(sc->sc_chan, &sc->sc_rx_dma, sc->sc_rx_size,
707 &sc->sc_rx_hndl)) {
708 DPRINTF("%s: failed to obtain a PA handle\n",
709 device_xname(sc->sc_dev));
710 goto errout;
711 }
712
713 memset(&cmd, 0, sizeof(cmd));
714 cmd.nvs_type = HVN_NVS_TYPE_RXBUF_CONN;
715 cmd.nvs_gpadl = sc->sc_rx_hndl;
716 cmd.nvs_sig = HVN_NVS_RXBUF_SIG;
717
718 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
719 if (hvn_nvs_cmd(sc, &cmd, sizeof(cmd), tid, 100))
720 goto errout;
721
722 rsp = (struct hvn_nvs_rxbuf_conn_resp *)&sc->sc_nvsrsp;
723 if (rsp->nvs_status != HVN_NVS_STATUS_OK) {
724 DPRINTF("%s: failed to set up the Rx ring\n",
725 device_xname(sc->sc_dev));
726 goto errout;
727 }
728 if (rsp->nvs_nsect > 1) {
729 DPRINTF("%s: invalid number of Rx ring sections: %u\n",
730 device_xname(sc->sc_dev), rsp->nvs_nsect);
731 hvn_rx_ring_destroy(sc);
732 return -1;
733 }
734 return 0;
735
736 errout:
737 if (sc->sc_rx_hndl) {
738 vmbus_handle_free(sc->sc_chan, sc->sc_rx_hndl);
739 sc->sc_rx_hndl = 0;
740 }
741 if (sc->sc_rx_ring) {
742 kmem_free(sc->sc_rx_ring, sc->sc_rx_size);
743 sc->sc_rx_ring = NULL;
744 }
745 return -1;
746 }
747
748 static int
749 hvn_rx_ring_destroy(struct hvn_softc *sc)
750 {
751 struct hvn_nvs_rxbuf_disconn cmd;
752 uint64_t tid;
753
754 if (sc->sc_rx_ring == NULL)
755 return 0;
756
757 memset(&cmd, 0, sizeof(cmd));
758 cmd.nvs_type = HVN_NVS_TYPE_RXBUF_DISCONN;
759 cmd.nvs_sig = HVN_NVS_RXBUF_SIG;
760
761 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
762 if (hvn_nvs_cmd(sc, &cmd, sizeof(cmd), tid, 0))
763 return -1;
764
765 delay(100);
766
767 vmbus_handle_free(sc->sc_chan, sc->sc_rx_hndl);
768
769 sc->sc_rx_hndl = 0;
770
771 kmem_free(sc->sc_rx_ring, sc->sc_rx_size);
772 sc->sc_rx_ring = NULL;
773
774 return 0;
775 }
776
777 static int
778 hvn_tx_ring_create(struct hvn_softc *sc)
779 {
780 const int dmaflags = cold ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK;
781 struct hvn_tx_desc *txd;
782 bus_dma_segment_t *seg;
783 size_t msgsize;
784 int i, rsegs;
785 paddr_t pa;
786
787 msgsize = roundup(HVN_RNDIS_PKT_LEN, 128);
788
789 /* Allocate memory to store RNDIS messages */
790 if (bus_dmamem_alloc(sc->sc_dmat, msgsize * HVN_TX_DESC, PAGE_SIZE, 0,
791 &sc->sc_tx_mseg, 1, &rsegs, dmaflags)) {
792 DPRINTF("%s: failed to allocate memory for RDNIS messages\n",
793 device_xname(sc->sc_dev));
794 goto errout;
795 }
796 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_tx_mseg, 1, msgsize *
797 HVN_TX_DESC, (void **)&sc->sc_tx_msgs, dmaflags)) {
798 DPRINTF("%s: failed to establish mapping for RDNIS messages\n",
799 device_xname(sc->sc_dev));
800 goto errout;
801 }
802 memset(sc->sc_tx_msgs, 0, msgsize * HVN_TX_DESC);
803 if (bus_dmamap_create(sc->sc_dmat, msgsize * HVN_TX_DESC, 1,
804 msgsize * HVN_TX_DESC, 0, dmaflags, &sc->sc_tx_rmap)) {
805 DPRINTF("%s: failed to create map for RDNIS messages\n",
806 device_xname(sc->sc_dev));
807 goto errout;
808 }
809 if (bus_dmamap_load(sc->sc_dmat, sc->sc_tx_rmap, sc->sc_tx_msgs,
810 msgsize * HVN_TX_DESC, NULL, dmaflags)) {
811 DPRINTF("%s: failed to create map for RDNIS messages\n",
812 device_xname(sc->sc_dev));
813 goto errout;
814 }
815
816 for (i = 0; i < HVN_TX_DESC; i++) {
817 txd = &sc->sc_tx_desc[i];
818 if (bus_dmamap_create(sc->sc_dmat, HVN_TX_PKT_SIZE,
819 HVN_TX_FRAGS, HVN_TX_FRAG_SIZE, PAGE_SIZE, dmaflags,
820 &txd->txd_dmap)) {
821 DPRINTF("%s: failed to create map for TX descriptors\n",
822 device_xname(sc->sc_dev));
823 goto errout;
824 }
825 seg = &sc->sc_tx_rmap->dm_segs[0];
826 pa = seg->ds_addr + (msgsize * i);
827 txd->txd_gpa.gpa_page = atop(pa);
828 txd->txd_gpa.gpa_ofs = pa & PAGE_MASK;
829 txd->txd_gpa.gpa_len = msgsize;
830 txd->txd_req = (void *)(sc->sc_tx_msgs + (msgsize * i));
831 txd->txd_id = i + HVN_NVS_CHIM_SIG;
832 txd->txd_ready = 1;
833 }
834 sc->sc_tx_avail = HVN_TX_DESC;
835
836 return 0;
837
838 errout:
839 hvn_tx_ring_destroy(sc);
840 return -1;
841 }
842
843 static void
844 hvn_tx_ring_destroy(struct hvn_softc *sc)
845 {
846 struct hvn_tx_desc *txd;
847 int i;
848
849 for (i = 0; i < HVN_TX_DESC; i++) {
850 txd = &sc->sc_tx_desc[i];
851 if (txd->txd_dmap == NULL)
852 continue;
853 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmap, 0, 0,
854 BUS_DMASYNC_POSTWRITE);
855 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmap);
856 bus_dmamap_destroy(sc->sc_dmat, txd->txd_dmap);
857 txd->txd_dmap = NULL;
858 if (txd->txd_buf == NULL)
859 continue;
860 m_free(txd->txd_buf);
861 txd->txd_buf = NULL;
862 }
863 if (sc->sc_tx_rmap) {
864 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,
865 BUS_DMASYNC_POSTWRITE);
866 bus_dmamap_unload(sc->sc_dmat, sc->sc_tx_rmap);
867 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_rmap);
868 }
869 if (sc->sc_tx_msgs) {
870 size_t msgsize = roundup(HVN_RNDIS_PKT_LEN, 128);
871
872 bus_dmamem_unmap(sc->sc_dmat, sc->sc_tx_msgs,
873 msgsize * HVN_TX_DESC);
874 bus_dmamem_free(sc->sc_dmat, &sc->sc_tx_mseg, 1);
875 }
876 sc->sc_tx_rmap = NULL;
877 sc->sc_tx_msgs = NULL;
878 }
879
880 static int
881 hvn_get_lladdr(struct hvn_softc *sc, uint8_t *enaddr)
882 {
883 size_t addrlen = ETHER_ADDR_LEN;
884 int rv;
885
886 rv = hvn_rndis_query(sc, OID_802_3_PERMANENT_ADDRESS, enaddr, &addrlen);
887 if (rv == 0 && addrlen != ETHER_ADDR_LEN)
888 rv = -1;
889 return rv;
890 }
891
892 static void
893 hvn_get_link_status(struct hvn_softc *sc)
894 {
895 uint32_t state;
896 size_t len = sizeof(state);
897
898 if (hvn_rndis_query(sc, OID_GEN_MEDIA_CONNECT_STATUS,
899 &state, &len) == 0)
900 sc->sc_link_state = (state == NDIS_MEDIA_STATE_CONNECTED) ?
901 LINK_STATE_UP : LINK_STATE_DOWN;
902 }
903
904 static int
905 hvn_nvs_attach(struct hvn_softc *sc)
906 {
907 static const uint32_t protos[] = {
908 HVN_NVS_PROTO_VERSION_5,
909 HVN_NVS_PROTO_VERSION_4,
910 HVN_NVS_PROTO_VERSION_2,
911 HVN_NVS_PROTO_VERSION_1
912 };
913 const int kmemflags = cold ? KM_NOSLEEP : KM_SLEEP;
914 struct hvn_nvs_init cmd;
915 struct hvn_nvs_init_resp *rsp;
916 struct hvn_nvs_ndis_init ncmd;
917 struct hvn_nvs_ndis_conf ccmd;
918 uint32_t ndisver, ringsize;
919 uint64_t tid;
920 int i;
921
922 sc->sc_nvsbuf = kmem_zalloc(HVN_NVS_BUFSIZE, kmemflags);
923 if (sc->sc_nvsbuf == NULL) {
924 DPRINTF("%s: failed to allocate channel data buffer\n",
925 device_xname(sc->sc_dev));
926 return -1;
927 }
928
929 /* We need to be able to fit all RNDIS control and data messages */
930 ringsize = HVN_RNDIS_CTLREQS *
931 (sizeof(struct hvn_nvs_rndis) + sizeof(struct vmbus_gpa)) +
932 HVN_TX_DESC * (sizeof(struct hvn_nvs_rndis) +
933 (HVN_TX_FRAGS + 1) * sizeof(struct vmbus_gpa));
934
935 sc->sc_chan->ch_flags &= ~CHF_BATCHED;
936
937 if (vmbus_channel_setdeferred(sc->sc_chan, device_xname(sc->sc_dev))) {
938 aprint_error_dev(sc->sc_dev,
939 "failed to create the interrupt thread\n");
940 return -1;
941 }
942
943 /* Associate our interrupt handler with the channel */
944 if (vmbus_channel_open(sc->sc_chan, ringsize, NULL, 0,
945 hvn_nvs_intr, sc)) {
946 DPRINTF("%s: failed to open channel\n",
947 device_xname(sc->sc_dev));
948 kmem_free(sc->sc_nvsbuf, HVN_NVS_BUFSIZE);
949 return -1;
950 }
951
952 memset(&cmd, 0, sizeof(cmd));
953 cmd.nvs_type = HVN_NVS_TYPE_INIT;
954 for (i = 0; i < __arraycount(protos); i++) {
955 cmd.nvs_ver_min = cmd.nvs_ver_max = protos[i];
956 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
957 if (hvn_nvs_cmd(sc, &cmd, sizeof(cmd), tid, 100))
958 return -1;
959
960 rsp = (struct hvn_nvs_init_resp *)&sc->sc_nvsrsp;
961 if (rsp->nvs_status == HVN_NVS_STATUS_OK) {
962 sc->sc_proto = protos[i];
963 break;
964 }
965 }
966 if (i == __arraycount(protos)) {
967 DPRINTF("%s: failed to negotiate NVSP version\n",
968 device_xname(sc->sc_dev));
969 return -1;
970 }
971
972 if (sc->sc_proto >= HVN_NVS_PROTO_VERSION_2) {
973 memset(&ccmd, 0, sizeof(ccmd));
974 ccmd.nvs_type = HVN_NVS_TYPE_NDIS_CONF;
975 ccmd.nvs_mtu = HVN_MAXMTU;
976 ccmd.nvs_caps = HVN_NVS_NDIS_CONF_VLAN;
977
978 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
979 if (hvn_nvs_cmd(sc, &ccmd, sizeof(ccmd), tid, 100))
980 return -1;
981 }
982
983 memset(&ncmd, 0, sizeof(ncmd));
984 ncmd.nvs_type = HVN_NVS_TYPE_NDIS_INIT;
985 if (sc->sc_proto <= HVN_NVS_PROTO_VERSION_4)
986 ndisver = NDIS_VERSION_6_1;
987 else
988 ndisver = NDIS_VERSION_6_30;
989 ncmd.nvs_ndis_major = (ndisver & 0xffff0000) >> 16;
990 ncmd.nvs_ndis_minor = ndisver & 0x0000ffff;
991
992 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
993 if (hvn_nvs_cmd(sc, &ncmd, sizeof(ncmd), tid, 100))
994 return -1;
995
996 sc->sc_ndisver = ndisver;
997
998 return 0;
999 }
1000
1001 static void
1002 hvn_nvs_intr(void *arg)
1003 {
1004 struct hvn_softc *sc = arg;
1005 struct ifnet *ifp = SC2IFP(sc);
1006 struct vmbus_chanpkt_hdr *cph;
1007 const struct hvn_nvs_hdr *nvs;
1008 uint64_t rid;
1009 uint32_t rlen;
1010 int rv;
1011 bool dotx = false;
1012
1013 for (;;) {
1014 rv = vmbus_channel_recv(sc->sc_chan, sc->sc_nvsbuf,
1015 HVN_NVS_BUFSIZE, &rlen, &rid, 1);
1016 if (rv != 0 || rlen == 0) {
1017 if (rv != EAGAIN)
1018 device_printf(sc->sc_dev,
1019 "failed to receive an NVSP packet\n");
1020 break;
1021 }
1022 cph = (struct vmbus_chanpkt_hdr *)sc->sc_nvsbuf;
1023 nvs = (const struct hvn_nvs_hdr *)VMBUS_CHANPKT_CONST_DATA(cph);
1024
1025 if (cph->cph_type == VMBUS_CHANPKT_TYPE_COMP) {
1026 switch (nvs->nvs_type) {
1027 case HVN_NVS_TYPE_INIT_RESP:
1028 case HVN_NVS_TYPE_RXBUF_CONNRESP:
1029 case HVN_NVS_TYPE_CHIM_CONNRESP:
1030 case HVN_NVS_TYPE_SUBCH_RESP:
1031 /* copy the response back */
1032 memcpy(&sc->sc_nvsrsp, nvs, HVN_NVS_MSGSIZE);
1033 sc->sc_nvsdone = 1;
1034 wakeup(&sc->sc_nvsrsp);
1035 break;
1036 case HVN_NVS_TYPE_RNDIS_ACK:
1037 dotx = true;
1038 hvn_txeof(sc, cph->cph_tid);
1039 break;
1040 default:
1041 device_printf(sc->sc_dev,
1042 "unhandled NVSP packet type %u "
1043 "on completion\n", nvs->nvs_type);
1044 break;
1045 }
1046 } else if (cph->cph_type == VMBUS_CHANPKT_TYPE_RXBUF) {
1047 switch (nvs->nvs_type) {
1048 case HVN_NVS_TYPE_RNDIS:
1049 hvn_rndis_input(sc, cph->cph_tid, cph);
1050 break;
1051 default:
1052 device_printf(sc->sc_dev,
1053 "unhandled NVSP packet type %u "
1054 "on receive\n", nvs->nvs_type);
1055 break;
1056 }
1057 } else
1058 device_printf(sc->sc_dev,
1059 "unknown NVSP packet type %u\n", cph->cph_type);
1060 }
1061
1062 if (dotx)
1063 if_schedule_deferred_start(ifp);
1064 }
1065
1066 static int
1067 hvn_nvs_cmd(struct hvn_softc *sc, void *cmd, size_t cmdsize, uint64_t tid,
1068 int timo)
1069 {
1070 struct hvn_nvs_hdr *hdr = cmd;
1071 int tries = 10;
1072 int rv, s;
1073
1074 sc->sc_nvsdone = 0;
1075
1076 do {
1077 rv = vmbus_channel_send(sc->sc_chan, cmd, cmdsize,
1078 tid, VMBUS_CHANPKT_TYPE_INBAND,
1079 timo ? VMBUS_CHANPKT_FLAG_RC : 0);
1080 if (rv == EAGAIN) {
1081 if (cold)
1082 delay(1000);
1083 else
1084 tsleep(cmd, PRIBIO, "nvsout", 1);
1085 } else if (rv) {
1086 DPRINTF("%s: NVSP operation %u send error %d\n",
1087 device_xname(sc->sc_dev), hdr->nvs_type, rv);
1088 return rv;
1089 }
1090 } while (rv != 0 && --tries > 0);
1091
1092 if (tries == 0 && rv != 0) {
1093 device_printf(sc->sc_dev,
1094 "NVSP operation %u send error %d\n", hdr->nvs_type, rv);
1095 return rv;
1096 }
1097
1098 if (timo == 0)
1099 return 0;
1100
1101 do {
1102 if (cold)
1103 delay(1000);
1104 else
1105 tsleep(sc, PRIBIO | PCATCH, "nvscmd", 1);
1106 s = splnet();
1107 hvn_nvs_intr(sc);
1108 splx(s);
1109 } while (--timo > 0 && sc->sc_nvsdone != 1);
1110
1111 if (timo == 0 && sc->sc_nvsdone != 1) {
1112 device_printf(sc->sc_dev, "NVSP operation %u timed out\n",
1113 hdr->nvs_type);
1114 return ETIMEDOUT;
1115 }
1116 return 0;
1117 }
1118
1119 static int
1120 hvn_nvs_ack(struct hvn_softc *sc, uint64_t tid)
1121 {
1122 struct hvn_nvs_rndis_ack cmd;
1123 int tries = 5;
1124 int rv;
1125
1126 cmd.nvs_type = HVN_NVS_TYPE_RNDIS_ACK;
1127 cmd.nvs_status = HVN_NVS_STATUS_OK;
1128 do {
1129 rv = vmbus_channel_send(sc->sc_chan, &cmd, sizeof(cmd),
1130 tid, VMBUS_CHANPKT_TYPE_COMP, 0);
1131 if (rv == EAGAIN)
1132 delay(10);
1133 else if (rv) {
1134 DPRINTF("%s: NVSP acknowledgement error %d\n",
1135 device_xname(sc->sc_dev), rv);
1136 return rv;
1137 }
1138 } while (rv != 0 && --tries > 0);
1139 return rv;
1140 }
1141
1142 static void
1143 hvn_nvs_detach(struct hvn_softc *sc)
1144 {
1145
1146 if (vmbus_channel_close(sc->sc_chan) == 0) {
1147 kmem_free(sc->sc_nvsbuf, HVN_NVS_BUFSIZE);
1148 sc->sc_nvsbuf = NULL;
1149 }
1150 }
1151
1152 static inline struct rndis_cmd *
1153 hvn_alloc_cmd(struct hvn_softc *sc)
1154 {
1155 struct rndis_cmd *rc;
1156
1157 mutex_enter(&sc->sc_cntl_fqlck);
1158 while ((rc = TAILQ_FIRST(&sc->sc_cntl_fq)) == NULL)
1159 /* XXX use condvar(9) instead of mtsleep */
1160 mtsleep(&sc->sc_cntl_fq, PRIBIO, "nvsalloc", 1,
1161 &sc->sc_cntl_fqlck);
1162 TAILQ_REMOVE(&sc->sc_cntl_fq, rc, rc_entry);
1163 mutex_exit(&sc->sc_cntl_fqlck);
1164 return rc;
1165 }
1166
1167 static inline void
1168 hvn_submit_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1169 {
1170
1171 mutex_enter(&sc->sc_cntl_sqlck);
1172 TAILQ_INSERT_TAIL(&sc->sc_cntl_sq, rc, rc_entry);
1173 mutex_exit(&sc->sc_cntl_sqlck);
1174 }
1175
1176 static inline struct rndis_cmd *
1177 hvn_complete_cmd(struct hvn_softc *sc, uint32_t id)
1178 {
1179 struct rndis_cmd *rc;
1180
1181 mutex_enter(&sc->sc_cntl_sqlck);
1182 TAILQ_FOREACH(rc, &sc->sc_cntl_sq, rc_entry) {
1183 if (rc->rc_id == id) {
1184 TAILQ_REMOVE(&sc->sc_cntl_sq, rc, rc_entry);
1185 break;
1186 }
1187 }
1188 mutex_exit(&sc->sc_cntl_sqlck);
1189 if (rc != NULL) {
1190 mutex_enter(&sc->sc_cntl_cqlck);
1191 TAILQ_INSERT_TAIL(&sc->sc_cntl_cq, rc, rc_entry);
1192 mutex_exit(&sc->sc_cntl_cqlck);
1193 }
1194 return rc;
1195 }
1196
1197 static inline void
1198 hvn_release_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1199 {
1200
1201 mutex_enter(&sc->sc_cntl_cqlck);
1202 TAILQ_REMOVE(&sc->sc_cntl_cq, rc, rc_entry);
1203 mutex_exit(&sc->sc_cntl_cqlck);
1204 }
1205
1206 static inline int
1207 hvn_rollback_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1208 {
1209 struct rndis_cmd *rn;
1210
1211 mutex_enter(&sc->sc_cntl_sqlck);
1212 TAILQ_FOREACH(rn, &sc->sc_cntl_sq, rc_entry) {
1213 if (rn == rc) {
1214 TAILQ_REMOVE(&sc->sc_cntl_sq, rc, rc_entry);
1215 mutex_exit(&sc->sc_cntl_sqlck);
1216 return 0;
1217 }
1218 }
1219 mutex_exit(&sc->sc_cntl_sqlck);
1220 return -1;
1221 }
1222
1223 static inline void
1224 hvn_free_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1225 {
1226
1227 memset(rc->rc_req, 0, sizeof(struct rndis_packet_msg));
1228 memset(&rc->rc_cmp, 0, sizeof(rc->rc_cmp));
1229 memset(&rc->rc_msg, 0, sizeof(rc->rc_msg));
1230 mutex_enter(&sc->sc_cntl_fqlck);
1231 TAILQ_INSERT_TAIL(&sc->sc_cntl_fq, rc, rc_entry);
1232 mutex_exit(&sc->sc_cntl_fqlck);
1233 wakeup(&sc->sc_cntl_fq);
1234 }
1235
1236 static int
1237 hvn_rndis_attach(struct hvn_softc *sc)
1238 {
1239 const int dmaflags = cold ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK;
1240 struct rndis_init_req *req;
1241 struct rndis_init_comp *cmp;
1242 struct rndis_cmd *rc;
1243 int i, rv;
1244
1245 /* RNDIS control message queues */
1246 TAILQ_INIT(&sc->sc_cntl_sq);
1247 TAILQ_INIT(&sc->sc_cntl_cq);
1248 TAILQ_INIT(&sc->sc_cntl_fq);
1249 mutex_init(&sc->sc_cntl_sqlck, MUTEX_DEFAULT, IPL_NET);
1250 mutex_init(&sc->sc_cntl_cqlck, MUTEX_DEFAULT, IPL_NET);
1251 mutex_init(&sc->sc_cntl_fqlck, MUTEX_DEFAULT, IPL_NET);
1252
1253 for (i = 0; i < HVN_RNDIS_CTLREQS; i++) {
1254 rc = &sc->sc_cntl_msgs[i];
1255 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1256 dmaflags, &rc->rc_dmap)) {
1257 DPRINTF("%s: failed to create RNDIS command map\n",
1258 device_xname(sc->sc_dev));
1259 goto errout;
1260 }
1261 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE,
1262 0, &rc->rc_segs, 1, &rc->rc_nsegs, dmaflags)) {
1263 DPRINTF("%s: failed to allocate RNDIS command\n",
1264 device_xname(sc->sc_dev));
1265 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1266 goto errout;
1267 }
1268 if (bus_dmamem_map(sc->sc_dmat, &rc->rc_segs, rc->rc_nsegs,
1269 PAGE_SIZE, (void **)&rc->rc_req, dmaflags)) {
1270 DPRINTF("%s: failed to allocate RNDIS command\n",
1271 device_xname(sc->sc_dev));
1272 bus_dmamem_free(sc->sc_dmat, &rc->rc_segs,
1273 rc->rc_nsegs);
1274 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1275 goto errout;
1276 }
1277 memset(rc->rc_req, 0, PAGE_SIZE);
1278 if (bus_dmamap_load(sc->sc_dmat, rc->rc_dmap, rc->rc_req,
1279 PAGE_SIZE, NULL, dmaflags)) {
1280 DPRINTF("%s: failed to load RNDIS command map\n",
1281 device_xname(sc->sc_dev));
1282 bus_dmamem_free(sc->sc_dmat, &rc->rc_segs,
1283 rc->rc_nsegs);
1284 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1285 goto errout;
1286 }
1287 rc->rc_gpa = atop(rc->rc_dmap->dm_segs[0].ds_addr);
1288 TAILQ_INSERT_TAIL(&sc->sc_cntl_fq, rc, rc_entry);
1289 }
1290
1291 rc = hvn_alloc_cmd(sc);
1292
1293 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1294 BUS_DMASYNC_PREREAD);
1295
1296 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1297
1298 req = rc->rc_req;
1299 req->rm_type = REMOTE_NDIS_INITIALIZE_MSG;
1300 req->rm_len = sizeof(*req);
1301 req->rm_rid = rc->rc_id;
1302 req->rm_ver_major = RNDIS_VERSION_MAJOR;
1303 req->rm_ver_minor = RNDIS_VERSION_MINOR;
1304 req->rm_max_xfersz = HVN_RNDIS_XFER_SIZE;
1305
1306 rc->rc_cmplen = sizeof(*cmp);
1307
1308 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1309 BUS_DMASYNC_PREWRITE);
1310
1311 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1312 DPRINTF("%s: INITIALIZE_MSG failed, error %d\n",
1313 device_xname(sc->sc_dev), rv);
1314 hvn_free_cmd(sc, rc);
1315 goto errout;
1316 }
1317 cmp = (struct rndis_init_comp *)&rc->rc_cmp;
1318 if (cmp->rm_status != RNDIS_STATUS_SUCCESS) {
1319 DPRINTF("%s: failed to init RNDIS, error %#x\n",
1320 device_xname(sc->sc_dev), cmp->rm_status);
1321 hvn_free_cmd(sc, rc);
1322 goto errout;
1323 }
1324
1325 hvn_free_cmd(sc, rc);
1326
1327 /* Initialize RNDIS Data command */
1328 memset(&sc->sc_data_msg, 0, sizeof(sc->sc_data_msg));
1329 sc->sc_data_msg.nvs_type = HVN_NVS_TYPE_RNDIS;
1330 sc->sc_data_msg.nvs_rndis_mtype = HVN_NVS_RNDIS_MTYPE_DATA;
1331 sc->sc_data_msg.nvs_chim_idx = HVN_NVS_CHIM_IDX_INVALID;
1332
1333 return 0;
1334
1335 errout:
1336 for (i = 0; i < HVN_RNDIS_CTLREQS; i++) {
1337 rc = &sc->sc_cntl_msgs[i];
1338 if (rc->rc_req == NULL)
1339 continue;
1340 TAILQ_REMOVE(&sc->sc_cntl_fq, rc, rc_entry);
1341 bus_dmamem_free(sc->sc_dmat, &rc->rc_segs, rc->rc_nsegs);
1342 rc->rc_req = NULL;
1343 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1344 }
1345 return -1;
1346 }
1347
1348 static int
1349 hvn_set_capabilities(struct hvn_softc *sc)
1350 {
1351 struct ndis_offload_params params;
1352 size_t len = sizeof(params);
1353
1354 memset(¶ms, 0, sizeof(params));
1355
1356 params.ndis_hdr.ndis_type = NDIS_OBJTYPE_DEFAULT;
1357 if (sc->sc_ndisver < NDIS_VERSION_6_30) {
1358 params.ndis_hdr.ndis_rev = NDIS_OFFLOAD_PARAMS_REV_2;
1359 len = params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE_6_1;
1360 } else {
1361 params.ndis_hdr.ndis_rev = NDIS_OFFLOAD_PARAMS_REV_3;
1362 len = params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE;
1363 }
1364
1365 params.ndis_ip4csum = NDIS_OFFLOAD_PARAM_TXRX;
1366 params.ndis_tcp4csum = NDIS_OFFLOAD_PARAM_TXRX;
1367 params.ndis_tcp6csum = NDIS_OFFLOAD_PARAM_TXRX;
1368 if (sc->sc_ndisver >= NDIS_VERSION_6_30) {
1369 params.ndis_udp4csum = NDIS_OFFLOAD_PARAM_TXRX;
1370 params.ndis_udp6csum = NDIS_OFFLOAD_PARAM_TXRX;
1371 }
1372
1373 return hvn_rndis_set(sc, OID_TCP_OFFLOAD_PARAMETERS, ¶ms, len);
1374 }
1375
1376 static int
1377 hvn_rndis_cmd(struct hvn_softc *sc, struct rndis_cmd *rc, int timo)
1378 {
1379 struct hvn_nvs_rndis *msg = &rc->rc_msg;
1380 struct rndis_msghdr *hdr = rc->rc_req;
1381 struct vmbus_gpa sgl[1];
1382 int tries = 10;
1383 int rv, s;
1384
1385 KASSERT(timo > 0);
1386
1387 msg->nvs_type = HVN_NVS_TYPE_RNDIS;
1388 msg->nvs_rndis_mtype = HVN_NVS_RNDIS_MTYPE_CTRL;
1389 msg->nvs_chim_idx = HVN_NVS_CHIM_IDX_INVALID;
1390
1391 sgl[0].gpa_page = rc->rc_gpa;
1392 sgl[0].gpa_len = hdr->rm_len;
1393 sgl[0].gpa_ofs = 0;
1394
1395 rc->rc_done = 0;
1396
1397 hvn_submit_cmd(sc, rc);
1398
1399 do {
1400 rv = vmbus_channel_send_sgl(sc->sc_chan, sgl, 1, &rc->rc_msg,
1401 sizeof(*msg), rc->rc_id);
1402 if (rv == EAGAIN) {
1403 if (cold)
1404 delay(1000);
1405 else
1406 tsleep(rc, PRIBIO, "rndisout", 1);
1407 } else if (rv) {
1408 DPRINTF("%s: RNDIS operation %u send error %d\n",
1409 device_xname(sc->sc_dev), hdr->rm_type, rv);
1410 hvn_rollback_cmd(sc, rc);
1411 return rv;
1412 }
1413 } while (rv != 0 && --tries > 0);
1414
1415 if (tries == 0 && rv != 0) {
1416 device_printf(sc->sc_dev,
1417 "RNDIS operation %u send error %d\n", hdr->rm_type, rv);
1418 return rv;
1419 }
1420
1421 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1422 BUS_DMASYNC_POSTWRITE);
1423
1424 do {
1425 if (cold)
1426 delay(1000);
1427 else
1428 tsleep(rc, PRIBIO | PCATCH, "rndiscmd", 1);
1429 s = splnet();
1430 hvn_nvs_intr(sc);
1431 splx(s);
1432 } while (--timo > 0 && rc->rc_done != 1);
1433
1434 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1435 BUS_DMASYNC_POSTREAD);
1436
1437 if (rc->rc_done != 1) {
1438 rv = timo == 0 ? ETIMEDOUT : EINTR;
1439 if (hvn_rollback_cmd(sc, rc)) {
1440 hvn_release_cmd(sc, rc);
1441 rv = 0;
1442 } else if (rv == ETIMEDOUT) {
1443 device_printf(sc->sc_dev,
1444 "RNDIS operation %u timed out\n", hdr->rm_type);
1445 }
1446 return rv;
1447 }
1448
1449 hvn_release_cmd(sc, rc);
1450 return 0;
1451 }
1452
1453 static void
1454 hvn_rndis_input(struct hvn_softc *sc, uint64_t tid, void *arg)
1455 {
1456 struct vmbus_chanpkt_prplist *cp = arg;
1457 uint32_t off, len, type;
1458 int i;
1459
1460 if (sc->sc_rx_ring == NULL) {
1461 DPRINTF("%s: invalid rx ring\n", device_xname(sc->sc_dev));
1462 return;
1463 }
1464
1465 for (i = 0; i < cp->cp_range_cnt; i++) {
1466 off = cp->cp_range[i].gpa_ofs;
1467 len = cp->cp_range[i].gpa_len;
1468
1469 KASSERT(off + len <= sc->sc_rx_size);
1470 KASSERT(len >= RNDIS_HEADER_OFFSET + 4);
1471
1472 memcpy(&type, sc->sc_rx_ring + off, sizeof(type));
1473 switch (type) {
1474 /* data message */
1475 case REMOTE_NDIS_PACKET_MSG:
1476 hvn_rxeof(sc, sc->sc_rx_ring + off, len);
1477 break;
1478 /* completion messages */
1479 case REMOTE_NDIS_INITIALIZE_CMPLT:
1480 case REMOTE_NDIS_QUERY_CMPLT:
1481 case REMOTE_NDIS_SET_CMPLT:
1482 case REMOTE_NDIS_RESET_CMPLT:
1483 case REMOTE_NDIS_KEEPALIVE_CMPLT:
1484 hvn_rndis_complete(sc, sc->sc_rx_ring + off, len);
1485 break;
1486 /* notification message */
1487 case REMOTE_NDIS_INDICATE_STATUS_MSG:
1488 hvn_rndis_status(sc, sc->sc_rx_ring + off, len);
1489 break;
1490 default:
1491 device_printf(sc->sc_dev,
1492 "unhandled RNDIS message type %u\n", type);
1493 break;
1494 }
1495 }
1496
1497 hvn_nvs_ack(sc, tid);
1498 }
1499
1500 static inline struct mbuf *
1501 hvn_devget(struct hvn_softc *sc, void *buf, uint32_t len)
1502 {
1503 struct ifnet *ifp = SC2IFP(sc);
1504 struct mbuf *m;
1505 size_t size = len + ETHER_ALIGN;
1506
1507 MGETHDR(m, M_NOWAIT, MT_DATA);
1508 if (m == NULL)
1509 return NULL;
1510
1511 if (size > MHLEN) {
1512 if (size <= MCLBYTES)
1513 MCLGET(m, M_NOWAIT);
1514 else
1515 MEXTMALLOC(m, size, M_NOWAIT);
1516 if ((m->m_flags & M_EXT) == 0) {
1517 m_freem(m);
1518 return NULL;
1519 }
1520 }
1521
1522 m->m_len = m->m_pkthdr.len = size;
1523 m_adj(m, ETHER_ALIGN);
1524 m_copyback(m, 0, len, buf);
1525 m_set_rcvif(m, ifp);
1526 return m;
1527 }
1528
1529 static void
1530 hvn_rxeof(struct hvn_softc *sc, uint8_t *buf, uint32_t len)
1531 {
1532 struct ifnet *ifp = SC2IFP(sc);
1533 struct rndis_packet_msg *pkt;
1534 struct rndis_pktinfo *pi;
1535 uint32_t csum, vlan;
1536 struct mbuf *m;
1537
1538 if (!(ifp->if_flags & IFF_RUNNING))
1539 return;
1540
1541 if (len < sizeof(*pkt)) {
1542 device_printf(sc->sc_dev, "data packet too short: %u\n",
1543 len);
1544 return;
1545 }
1546
1547 pkt = (struct rndis_packet_msg *)buf;
1548 if (pkt->rm_dataoffset + pkt->rm_datalen > len) {
1549 device_printf(sc->sc_dev,
1550 "data packet out of bounds: %u@%u\n", pkt->rm_dataoffset,
1551 pkt->rm_datalen);
1552 return;
1553 }
1554
1555 if ((m = hvn_devget(sc, buf + RNDIS_HEADER_OFFSET + pkt->rm_dataoffset,
1556 pkt->rm_datalen)) == NULL) {
1557 ifp->if_ierrors++;
1558 return;
1559 }
1560
1561 if (pkt->rm_pktinfooffset + pkt->rm_pktinfolen > len) {
1562 device_printf(sc->sc_dev,
1563 "pktinfo is out of bounds: %u@%u vs %u\n",
1564 pkt->rm_pktinfolen, pkt->rm_pktinfooffset, len);
1565 goto done;
1566 }
1567
1568 pi = (struct rndis_pktinfo *)(buf + RNDIS_HEADER_OFFSET +
1569 pkt->rm_pktinfooffset);
1570 while (pkt->rm_pktinfolen > 0) {
1571 if (pi->rm_size > pkt->rm_pktinfolen) {
1572 device_printf(sc->sc_dev,
1573 "invalid pktinfo size: %u/%u\n", pi->rm_size,
1574 pkt->rm_pktinfolen);
1575 break;
1576 }
1577
1578 switch (pi->rm_type) {
1579 case NDIS_PKTINFO_TYPE_CSUM:
1580 memcpy(&csum, pi->rm_data, sizeof(csum));
1581 if (csum & NDIS_RXCSUM_INFO_IPCS_OK)
1582 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1583 if (csum & NDIS_RXCSUM_INFO_TCPCS_OK)
1584 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1585 if (csum & NDIS_RXCSUM_INFO_UDPCS_OK)
1586 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1587 break;
1588 case NDIS_PKTINFO_TYPE_VLAN:
1589 memcpy(&vlan, pi->rm_data, sizeof(vlan));
1590 if (vlan != 0xffffffff) {
1591 m->m_pkthdr.ether_vtag =
1592 NDIS_VLAN_INFO_ID(vlan) |
1593 (NDIS_VLAN_INFO_PRI(vlan) << EVL_PRIO_BITS);
1594 m->m_flags |= M_VLANTAG;
1595 }
1596 break;
1597 default:
1598 DPRINTF("%s: unhandled pktinfo type %u\n",
1599 device_xname(sc->sc_dev), pi->rm_type);
1600 break;
1601 }
1602
1603 pkt->rm_pktinfolen -= pi->rm_size;
1604 pi = (struct rndis_pktinfo *)((char *)pi + pi->rm_size);
1605 }
1606
1607 done:
1608 if_percpuq_enqueue(sc->sc_ipq, m);
1609 }
1610
1611 static void
1612 hvn_rndis_complete(struct hvn_softc *sc, uint8_t *buf, uint32_t len)
1613 {
1614 struct rndis_cmd *rc;
1615 uint32_t id;
1616
1617 memcpy(&id, buf + RNDIS_HEADER_OFFSET, sizeof(id));
1618 if ((rc = hvn_complete_cmd(sc, id)) != NULL) {
1619 if (len < rc->rc_cmplen)
1620 device_printf(sc->sc_dev,
1621 "RNDIS response %u too short: %u\n", id, len);
1622 else
1623 memcpy(&rc->rc_cmp, buf, rc->rc_cmplen);
1624 if (len > rc->rc_cmplen &&
1625 len - rc->rc_cmplen > HVN_RNDIS_BUFSIZE)
1626 device_printf(sc->sc_dev,
1627 "RNDIS response %u too large: %u\n", id, len);
1628 else if (len > rc->rc_cmplen)
1629 memcpy(&rc->rc_cmpbuf, buf + rc->rc_cmplen,
1630 len - rc->rc_cmplen);
1631 rc->rc_done = 1;
1632 wakeup(rc);
1633 } else {
1634 DPRINTF("%s: failed to complete RNDIS request id %u\n",
1635 device_xname(sc->sc_dev), id);
1636 }
1637 }
1638
1639 static int
1640 hvn_rndis_output(struct hvn_softc *sc, struct hvn_tx_desc *txd)
1641 {
1642 uint64_t rid = (uint64_t)txd->txd_id << 32;
1643 int rv;
1644
1645 rv = vmbus_channel_send_sgl(sc->sc_chan, txd->txd_sgl, txd->txd_nsge,
1646 &sc->sc_data_msg, sizeof(sc->sc_data_msg), rid);
1647 if (rv) {
1648 DPRINTF("%s: RNDIS data send error %d\n",
1649 device_xname(sc->sc_dev), rv);
1650 return rv;
1651 }
1652 return 0;
1653 }
1654
1655 static void
1656 hvn_rndis_status(struct hvn_softc *sc, uint8_t *buf, uint32_t len)
1657 {
1658 struct ifnet *ifp = SC2IFP(sc);
1659 uint32_t status;
1660 int link_state = sc->sc_link_state;
1661
1662 memcpy(&status, buf + RNDIS_HEADER_OFFSET, sizeof(status));
1663 switch (status) {
1664 case RNDIS_STATUS_MEDIA_CONNECT:
1665 sc->sc_link_state = LINK_STATE_UP;
1666 break;
1667 case RNDIS_STATUS_MEDIA_DISCONNECT:
1668 sc->sc_link_state = LINK_STATE_DOWN;
1669 break;
1670 /* Ignore these */
1671 case RNDIS_STATUS_OFFLOAD_CURRENT_CONFIG:
1672 return;
1673 default:
1674 DPRINTF("%s: unhandled status %#x\n", device_xname(sc->sc_dev),
1675 status);
1676 return;
1677 }
1678 if (link_state != sc->sc_link_state)
1679 if_link_state_change(ifp, sc->sc_link_state);
1680 }
1681
1682 static int
1683 hvn_rndis_query(struct hvn_softc *sc, uint32_t oid, void *res, size_t *length)
1684 {
1685 struct rndis_cmd *rc;
1686 struct rndis_query_req *req;
1687 struct rndis_query_comp *cmp;
1688 size_t olength = *length;
1689 int rv;
1690
1691 rc = hvn_alloc_cmd(sc);
1692
1693 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1694 BUS_DMASYNC_PREREAD);
1695
1696 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1697
1698 req = rc->rc_req;
1699 req->rm_type = REMOTE_NDIS_QUERY_MSG;
1700 req->rm_len = sizeof(*req);
1701 req->rm_rid = rc->rc_id;
1702 req->rm_oid = oid;
1703 req->rm_infobufoffset = sizeof(*req) - RNDIS_HEADER_OFFSET;
1704
1705 rc->rc_cmplen = sizeof(*cmp);
1706
1707 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1708 BUS_DMASYNC_PREWRITE);
1709
1710 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1711 DPRINTF("%s: QUERY_MSG failed, error %d\n",
1712 device_xname(sc->sc_dev), rv);
1713 hvn_free_cmd(sc, rc);
1714 return rv;
1715 }
1716
1717 cmp = (struct rndis_query_comp *)&rc->rc_cmp;
1718 switch (cmp->rm_status) {
1719 case RNDIS_STATUS_SUCCESS:
1720 if (cmp->rm_infobuflen > olength) {
1721 rv = EINVAL;
1722 break;
1723 }
1724 memcpy(res, rc->rc_cmpbuf, cmp->rm_infobuflen);
1725 *length = cmp->rm_infobuflen;
1726 break;
1727 default:
1728 *length = 0;
1729 rv = EIO;
1730 break;
1731 }
1732
1733 hvn_free_cmd(sc, rc);
1734 return rv;
1735 }
1736
1737 static int
1738 hvn_rndis_set(struct hvn_softc *sc, uint32_t oid, void *data, size_t length)
1739 {
1740 struct rndis_cmd *rc;
1741 struct rndis_set_req *req;
1742 struct rndis_set_comp *cmp;
1743 int rv;
1744
1745 rc = hvn_alloc_cmd(sc);
1746
1747 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1748 BUS_DMASYNC_PREREAD);
1749
1750 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1751
1752 req = rc->rc_req;
1753 req->rm_type = REMOTE_NDIS_SET_MSG;
1754 req->rm_len = sizeof(*req) + length;
1755 req->rm_rid = rc->rc_id;
1756 req->rm_oid = oid;
1757 req->rm_infobufoffset = sizeof(*req) - RNDIS_HEADER_OFFSET;
1758
1759 rc->rc_cmplen = sizeof(*cmp);
1760
1761 if (length > 0) {
1762 KASSERT(sizeof(*req) + length < PAGE_SIZE);
1763 req->rm_infobuflen = length;
1764 memcpy(req + 1, data, length);
1765 }
1766
1767 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1768 BUS_DMASYNC_PREWRITE);
1769
1770 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1771 DPRINTF("%s: SET_MSG failed, error %d\n",
1772 device_xname(sc->sc_dev), rv);
1773 hvn_free_cmd(sc, rc);
1774 return rv;
1775 }
1776
1777 cmp = (struct rndis_set_comp *)&rc->rc_cmp;
1778 if (cmp->rm_status != RNDIS_STATUS_SUCCESS)
1779 rv = EIO;
1780
1781 hvn_free_cmd(sc, rc);
1782 return rv;
1783 }
1784
1785 static int
1786 hvn_rndis_open(struct hvn_softc *sc)
1787 {
1788 uint32_t filter;
1789 int rv;
1790
1791 if (sc->sc_promisc)
1792 filter = RNDIS_PACKET_TYPE_PROMISCUOUS;
1793 else
1794 filter = RNDIS_PACKET_TYPE_BROADCAST |
1795 RNDIS_PACKET_TYPE_ALL_MULTICAST |
1796 RNDIS_PACKET_TYPE_DIRECTED;
1797
1798 rv = hvn_rndis_set(sc, OID_GEN_CURRENT_PACKET_FILTER,
1799 &filter, sizeof(filter));
1800 if (rv) {
1801 DPRINTF("%s: failed to set RNDIS filter to %#x\n",
1802 device_xname(sc->sc_dev), filter);
1803 }
1804 return rv;
1805 }
1806
1807 static int
1808 hvn_rndis_close(struct hvn_softc *sc)
1809 {
1810 uint32_t filter = 0;
1811 int rv;
1812
1813 rv = hvn_rndis_set(sc, OID_GEN_CURRENT_PACKET_FILTER,
1814 &filter, sizeof(filter));
1815 if (rv) {
1816 DPRINTF("%s: failed to clear RNDIS filter\n",
1817 device_xname(sc->sc_dev));
1818 }
1819 return rv;
1820 }
1821
1822 static void
1823 hvn_rndis_detach(struct hvn_softc *sc)
1824 {
1825 struct rndis_cmd *rc;
1826 struct rndis_halt_req *req;
1827 int rv;
1828
1829 rc = hvn_alloc_cmd(sc);
1830
1831 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1832 BUS_DMASYNC_PREREAD);
1833
1834 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1835
1836 req = rc->rc_req;
1837 req->rm_type = REMOTE_NDIS_HALT_MSG;
1838 req->rm_len = sizeof(*req);
1839 req->rm_rid = rc->rc_id;
1840
1841 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1842 BUS_DMASYNC_PREWRITE);
1843
1844 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1845 DPRINTF("%s: HALT_MSG failed, error %d\n",
1846 device_xname(sc->sc_dev), rv);
1847 }
1848 hvn_free_cmd(sc, rc);
1849 }
1850