if_hvn.c revision 1.3.2.3 1 /* $NetBSD: if_hvn.c,v 1.3.2.3 2020/04/08 14:08:05 martin Exp $ */
2 /* $OpenBSD: if_hvn.c,v 1.39 2018/03/11 14:31:34 mikeb Exp $ */
3
4 /*-
5 * Copyright (c) 2009-2012,2016 Microsoft Corp.
6 * Copyright (c) 2010-2012 Citrix Inc.
7 * Copyright (c) 2012 NetApp Inc.
8 * Copyright (c) 2016 Mike Belopuhov <mike (at) esdenera.com>
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice unmodified, this list of conditions, and the following
16 * disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * The OpenBSD port was done under funding by Esdenera Networks GmbH.
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: if_hvn.c,v 1.3.2.3 2020/04/08 14:08:05 martin Exp $");
39
40 #ifdef _KERNEL_OPT
41 #include "opt_inet.h"
42 #include "opt_inet6.h"
43 #include "opt_net_mpsafe.h"
44 #endif
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/device.h>
50 #include <sys/atomic.h>
51 #include <sys/bus.h>
52 #include <sys/intr.h>
53 #include <sys/kmem.h>
54
55 #include <net/if.h>
56 #include <net/if_ether.h>
57 #include <net/if_media.h>
58
59 #include <net/bpf.h>
60
61 #include <dev/ic/ndisreg.h>
62 #include <dev/ic/rndisreg.h>
63
64 #include <dev/hyperv/vmbusvar.h>
65 #include <dev/hyperv/if_hvnreg.h>
66
67 #ifndef EVL_PRIO_BITS
68 #define EVL_PRIO_BITS 13
69 #endif
70 #ifndef EVL_CFI_BITS
71 #define EVL_CFI_BITS 12
72 #endif
73
74 #define HVN_NVS_MSGSIZE 32
75 #define HVN_NVS_BUFSIZE PAGE_SIZE
76
77 /*
78 * RNDIS control interface
79 */
80 #define HVN_RNDIS_CTLREQS 4
81 #define HVN_RNDIS_BUFSIZE 512
82
83 struct rndis_cmd {
84 uint32_t rc_id;
85 struct hvn_nvs_rndis rc_msg;
86 void *rc_req;
87 bus_dmamap_t rc_dmap;
88 bus_dma_segment_t rc_segs;
89 int rc_nsegs;
90 uint64_t rc_gpa;
91 struct rndis_packet_msg rc_cmp;
92 uint32_t rc_cmplen;
93 uint8_t rc_cmpbuf[HVN_RNDIS_BUFSIZE];
94 int rc_done;
95 TAILQ_ENTRY(rndis_cmd) rc_entry;
96 };
97 TAILQ_HEAD(rndis_queue, rndis_cmd);
98
99 #define HVN_MAXMTU (9 * 1024)
100
101 #define HVN_RNDIS_XFER_SIZE 2048
102
103 /*
104 * Tx ring
105 */
106 #define HVN_TX_DESC 256
107 #define HVN_TX_FRAGS 15 /* 31 is the max */
108 #define HVN_TX_FRAG_SIZE PAGE_SIZE
109 #define HVN_TX_PKT_SIZE 16384
110
111 #define HVN_RNDIS_PKT_LEN \
112 (sizeof(struct rndis_packet_msg) + \
113 sizeof(struct rndis_pktinfo) + NDIS_VLAN_INFO_SIZE + \
114 sizeof(struct rndis_pktinfo) + NDIS_TXCSUM_INFO_SIZE)
115
116 struct hvn_tx_desc {
117 uint32_t txd_id;
118 int txd_ready;
119 struct vmbus_gpa txd_sgl[HVN_TX_FRAGS + 1];
120 int txd_nsge;
121 struct mbuf *txd_buf;
122 bus_dmamap_t txd_dmap;
123 struct vmbus_gpa txd_gpa;
124 struct rndis_packet_msg *txd_req;
125 };
126
127 struct hvn_softc {
128 device_t sc_dev;
129
130 struct vmbus_softc *sc_vmbus;
131 struct vmbus_channel *sc_chan;
132 bus_dma_tag_t sc_dmat;
133
134 struct ethercom sc_ec;
135 struct ifmedia sc_media;
136 struct if_percpuq *sc_ipq;
137 int sc_link_state;
138 int sc_promisc;
139
140 uint32_t sc_flags;
141 #define HVN_SCF_ATTACHED __BIT(0)
142
143 /* NVS protocol */
144 int sc_proto;
145 uint32_t sc_nvstid;
146 uint8_t sc_nvsrsp[HVN_NVS_MSGSIZE];
147 uint8_t *sc_nvsbuf;
148 int sc_nvsdone;
149
150 /* RNDIS protocol */
151 int sc_ndisver;
152 uint32_t sc_rndisrid;
153 struct rndis_queue sc_cntl_sq; /* submission queue */
154 kmutex_t sc_cntl_sqlck;
155 struct rndis_queue sc_cntl_cq; /* completion queue */
156 kmutex_t sc_cntl_cqlck;
157 struct rndis_queue sc_cntl_fq; /* free queue */
158 kmutex_t sc_cntl_fqlck;
159 struct rndis_cmd sc_cntl_msgs[HVN_RNDIS_CTLREQS];
160 struct hvn_nvs_rndis sc_data_msg;
161
162 /* Rx ring */
163 uint8_t *sc_rx_ring;
164 int sc_rx_size;
165 uint32_t sc_rx_hndl;
166 struct hyperv_dma sc_rx_dma;
167
168 /* Tx ring */
169 uint32_t sc_tx_next;
170 uint32_t sc_tx_avail;
171 struct hvn_tx_desc sc_tx_desc[HVN_TX_DESC];
172 bus_dmamap_t sc_tx_rmap;
173 uint8_t *sc_tx_msgs;
174 bus_dma_segment_t sc_tx_mseg;
175 };
176
177 #define SC2IFP(_sc_) (&(_sc_)->sc_ec.ec_if)
178 #define IFP2SC(_ifp_) ((_ifp_)->if_softc)
179
180
181 static int hvn_match(device_t, cfdata_t, void *);
182 static void hvn_attach(device_t, device_t, void *);
183 static int hvn_detach(device_t, int);
184
185 CFATTACH_DECL_NEW(hvn, sizeof(struct hvn_softc),
186 hvn_match, hvn_attach, hvn_detach, NULL);
187
188 static int hvn_ioctl(struct ifnet *, u_long, void *);
189 static int hvn_media_change(struct ifnet *);
190 static void hvn_media_status(struct ifnet *, struct ifmediareq *);
191 static int hvn_iff(struct hvn_softc *);
192 static int hvn_init(struct ifnet *);
193 static void hvn_stop(struct ifnet *, int);
194 static void hvn_start(struct ifnet *);
195 static int hvn_encap(struct hvn_softc *, struct mbuf *,
196 struct hvn_tx_desc **);
197 static void hvn_decap(struct hvn_softc *, struct hvn_tx_desc *);
198 static void hvn_txeof(struct hvn_softc *, uint64_t);
199 static int hvn_rx_ring_create(struct hvn_softc *);
200 static int hvn_rx_ring_destroy(struct hvn_softc *);
201 static int hvn_tx_ring_create(struct hvn_softc *);
202 static void hvn_tx_ring_destroy(struct hvn_softc *);
203 static int hvn_set_capabilities(struct hvn_softc *);
204 static int hvn_get_lladdr(struct hvn_softc *, uint8_t *);
205 static void hvn_get_link_status(struct hvn_softc *);
206
207 /* NSVP */
208 static int hvn_nvs_attach(struct hvn_softc *);
209 static void hvn_nvs_intr(void *);
210 static int hvn_nvs_cmd(struct hvn_softc *, void *, size_t, uint64_t, int);
211 static int hvn_nvs_ack(struct hvn_softc *, uint64_t);
212 static void hvn_nvs_detach(struct hvn_softc *);
213
214 /* RNDIS */
215 static int hvn_rndis_attach(struct hvn_softc *);
216 static int hvn_rndis_cmd(struct hvn_softc *, struct rndis_cmd *, int);
217 static void hvn_rndis_input(struct hvn_softc *, uint64_t, void *);
218 static void hvn_rxeof(struct hvn_softc *, uint8_t *, uint32_t);
219 static void hvn_rndis_complete(struct hvn_softc *, uint8_t *, uint32_t);
220 static int hvn_rndis_output(struct hvn_softc *, struct hvn_tx_desc *);
221 static void hvn_rndis_status(struct hvn_softc *, uint8_t *, uint32_t);
222 static int hvn_rndis_query(struct hvn_softc *, uint32_t, void *, size_t *);
223 static int hvn_rndis_set(struct hvn_softc *, uint32_t, void *, size_t);
224 static int hvn_rndis_open(struct hvn_softc *);
225 static int hvn_rndis_close(struct hvn_softc *);
226 static void hvn_rndis_detach(struct hvn_softc *);
227
228 static int
229 hvn_match(device_t parent, cfdata_t match, void *aux)
230 {
231 struct vmbus_attach_args *aa = aux;
232
233 if (memcmp(aa->aa_type, &hyperv_guid_network, sizeof(*aa->aa_type)))
234 return 0;
235 return 1;
236 }
237
238 static void
239 hvn_attach(device_t parent, device_t self, void *aux)
240 {
241 struct hvn_softc *sc = device_private(self);
242 struct vmbus_attach_args *aa = aux;
243 struct ifnet *ifp = SC2IFP(sc);
244 uint8_t enaddr[ETHER_ADDR_LEN];
245 int error;
246
247 sc->sc_dev = self;
248 sc->sc_vmbus = (struct vmbus_softc *)device_private(parent);
249 sc->sc_chan = aa->aa_chan;
250 sc->sc_dmat = sc->sc_vmbus->sc_dmat;
251
252 aprint_naive("\n");
253 aprint_normal(": Hyper-V NetVSC\n");
254
255 if (hvn_nvs_attach(sc)) {
256 aprint_error_dev(self, "failed to init NVSP\n");
257 return;
258 }
259
260 if (hvn_rx_ring_create(sc)) {
261 aprint_error_dev(self, "failed to create Rx ring\n");
262 goto fail1;
263 }
264
265 if (hvn_tx_ring_create(sc)) {
266 aprint_error_dev(self, "failed to create Tx ring\n");
267 goto fail2;
268 }
269
270 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
271 ifp->if_softc = sc;
272 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
273 ifp->if_ioctl = hvn_ioctl;
274 ifp->if_start = hvn_start;
275 ifp->if_init = hvn_init;
276 ifp->if_stop = hvn_stop;
277 ifp->if_capabilities = IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx;
278 ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx;
279 ifp->if_capabilities |= IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_TCPv6_Rx;
280 if (sc->sc_ndisver > NDIS_VERSION_6_30) {
281 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Tx;
282 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Rx;
283 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Tx;
284 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Rx;
285 }
286 if (sc->sc_proto >= HVN_NVS_PROTO_VERSION_2) {
287 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
288 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
289 }
290
291 IFQ_SET_MAXLEN(&ifp->if_snd, HVN_TX_DESC - 1);
292 IFQ_SET_READY(&ifp->if_snd);
293
294 /* Initialize ifmedia structures. */
295 sc->sc_ec.ec_ifmedia = &sc->sc_media;
296 ifmedia_init(&sc->sc_media, IFM_IMASK, hvn_media_change,
297 hvn_media_status);
298 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
299 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_MANUAL);
300
301 error = if_initialize(ifp);
302 if (error) {
303 aprint_error_dev(self, "if_initialize failed(%d)\n", error);
304 goto fail3;
305 }
306 sc->sc_ipq = if_percpuq_create(ifp);
307 if_deferred_start_init(ifp, NULL);
308
309 if (hvn_rndis_attach(sc)) {
310 aprint_error_dev(self, "failed to init RNDIS\n");
311 goto fail3;
312 }
313
314 aprint_normal_dev(self, "NVS %d.%d NDIS %d.%d\n",
315 sc->sc_proto >> 16, sc->sc_proto & 0xffff,
316 sc->sc_ndisver >> 16 , sc->sc_ndisver & 0xffff);
317
318 if (hvn_set_capabilities(sc)) {
319 aprint_error_dev(self, "failed to setup offloading\n");
320 goto fail4;
321 }
322
323 if (hvn_get_lladdr(sc, enaddr)) {
324 aprint_error_dev(self,
325 "failed to obtain an ethernet address\n");
326 goto fail4;
327 }
328 aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(enaddr));
329
330 ether_ifattach(ifp, enaddr);
331 if_register(ifp);
332
333 if (pmf_device_register(self, NULL, NULL))
334 pmf_class_network_register(self, ifp);
335 else
336 aprint_error_dev(self, "couldn't establish power handler\n");
337
338 SET(sc->sc_flags, HVN_SCF_ATTACHED);
339 return;
340
341 fail4: hvn_rndis_detach(sc);
342 if_percpuq_destroy(sc->sc_ipq);
343 fail3: hvn_tx_ring_destroy(sc);
344 fail2: hvn_rx_ring_destroy(sc);
345 fail1: hvn_nvs_detach(sc);
346 }
347
348 static int
349 hvn_detach(device_t self, int flags)
350 {
351 struct hvn_softc *sc = device_private(self);
352 struct ifnet *ifp = SC2IFP(sc);
353
354 if (!ISSET(sc->sc_flags, HVN_SCF_ATTACHED))
355 return 0;
356
357 if (ifp->if_flags & IFF_RUNNING)
358 hvn_stop(ifp, 1);
359
360 pmf_device_deregister(self);
361
362 ether_ifdetach(ifp);
363 if_detach(ifp);
364 ifmedia_fini(&sc->sc_media);
365 if_percpuq_destroy(sc->sc_ipq);
366
367 hvn_rndis_detach(sc);
368 hvn_rx_ring_destroy(sc);
369 hvn_tx_ring_destroy(sc);
370 hvn_nvs_detach(sc);
371
372 return 0;
373 }
374
375 static int
376 hvn_ioctl(struct ifnet *ifp, u_long command, void * data)
377 {
378 struct hvn_softc *sc = IFP2SC(ifp);
379 int s, error = 0;
380
381 s = splnet();
382
383 switch (command) {
384 case SIOCSIFFLAGS:
385 if (ifp->if_flags & IFF_UP) {
386 if (ifp->if_flags & IFF_RUNNING)
387 error = ENETRESET;
388 else {
389 error = hvn_init(ifp);
390 if (error)
391 ifp->if_flags &= ~IFF_UP;
392 }
393 } else {
394 if (ifp->if_flags & IFF_RUNNING)
395 hvn_stop(ifp, 1);
396 }
397 break;
398 default:
399 error = ether_ioctl(ifp, command, data);
400 break;
401 }
402
403 if (error == ENETRESET) {
404 if (ifp->if_flags & IFF_RUNNING)
405 hvn_iff(sc);
406 error = 0;
407 }
408
409 splx(s);
410
411 return error;
412 }
413
414 static int
415 hvn_media_change(struct ifnet *ifp)
416 {
417
418 return 0;
419 }
420
421 static void
422 hvn_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
423 {
424 struct hvn_softc *sc = IFP2SC(ifp);
425 int link_state;
426
427 link_state = sc->sc_link_state;
428 hvn_get_link_status(sc);
429 if (link_state != sc->sc_link_state)
430 if_link_state_change(ifp, sc->sc_link_state);
431
432 ifmr->ifm_status = IFM_AVALID;
433 ifmr->ifm_active = IFM_ETHER | IFM_MANUAL;
434 if (sc->sc_link_state == LINK_STATE_UP)
435 ifmr->ifm_status |= IFM_ACTIVE;
436 }
437
438 static int
439 hvn_iff(struct hvn_softc *sc)
440 {
441
442 /* XXX */
443 sc->sc_promisc = 0;
444
445 return 0;
446 }
447
448 static int
449 hvn_init(struct ifnet *ifp)
450 {
451 struct hvn_softc *sc = IFP2SC(ifp);
452 int error;
453
454 hvn_stop(ifp, 0);
455
456 error = hvn_iff(sc);
457 if (error)
458 return error;
459
460 error = hvn_rndis_open(sc);
461 if (error == 0) {
462 ifp->if_flags |= IFF_RUNNING;
463 ifp->if_flags &= ~IFF_OACTIVE;
464 }
465 return error;
466 }
467
468 static void
469 hvn_stop(struct ifnet *ifp, int disable)
470 {
471 struct hvn_softc *sc = IFP2SC(ifp);
472
473 hvn_rndis_close(sc);
474
475 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
476 }
477
478 static void
479 hvn_start(struct ifnet *ifp)
480 {
481 struct hvn_softc *sc = IFP2SC(ifp);
482 struct hvn_tx_desc *txd;
483 struct mbuf *m;
484
485 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
486 return;
487
488 for (;;) {
489 if (!sc->sc_tx_avail) {
490 /* transient */
491 ifp->if_flags |= IFF_OACTIVE;
492 break;
493 }
494
495 IFQ_DEQUEUE(&ifp->if_snd, m);
496 if (m == NULL)
497 break;
498
499 if (hvn_encap(sc, m, &txd)) {
500 /* the chain is too large */
501 if_statinc(ifp, if_oerrors);
502 m_freem(m);
503 continue;
504 }
505
506 bpf_mtap(ifp, m, BPF_D_OUT);
507
508 if (hvn_rndis_output(sc, txd)) {
509 hvn_decap(sc, txd);
510 if_statinc(ifp, if_oerrors);
511 m_freem(m);
512 continue;
513 }
514
515 sc->sc_tx_next++;
516 }
517 }
518
519 static inline char *
520 hvn_rndis_pktinfo_append(struct rndis_packet_msg *pkt, size_t pktsize,
521 size_t datalen, uint32_t type)
522 {
523 struct rndis_pktinfo *pi;
524 size_t pi_size = sizeof(*pi) + datalen;
525 char *cp;
526
527 KASSERT(pkt->rm_pktinfooffset + pkt->rm_pktinfolen + pi_size <=
528 pktsize);
529
530 cp = (char *)pkt + pkt->rm_pktinfooffset + pkt->rm_pktinfolen;
531 pi = (struct rndis_pktinfo *)cp;
532 pi->rm_size = pi_size;
533 pi->rm_type = type;
534 pi->rm_pktinfooffset = sizeof(*pi);
535 pkt->rm_pktinfolen += pi_size;
536 pkt->rm_dataoffset += pi_size;
537 pkt->rm_len += pi_size;
538
539 return (char *)pi->rm_data;
540 }
541
542 static int
543 hvn_encap(struct hvn_softc *sc, struct mbuf *m, struct hvn_tx_desc **txd0)
544 {
545 struct hvn_tx_desc *txd;
546 struct rndis_packet_msg *pkt;
547 bus_dma_segment_t *seg;
548 size_t pktlen;
549 int i, rv;
550
551 do {
552 txd = &sc->sc_tx_desc[sc->sc_tx_next % HVN_TX_DESC];
553 sc->sc_tx_next++;
554 } while (!txd->txd_ready);
555 txd->txd_ready = 0;
556
557 pkt = txd->txd_req;
558 memset(pkt, 0, HVN_RNDIS_PKT_LEN);
559 pkt->rm_type = REMOTE_NDIS_PACKET_MSG;
560 pkt->rm_len = sizeof(*pkt) + m->m_pkthdr.len;
561 pkt->rm_dataoffset = RNDIS_DATA_OFFSET;
562 pkt->rm_datalen = m->m_pkthdr.len;
563 pkt->rm_pktinfooffset = sizeof(*pkt); /* adjusted below */
564 pkt->rm_pktinfolen = 0;
565
566 rv = bus_dmamap_load_mbuf(sc->sc_dmat, txd->txd_dmap, m, BUS_DMA_READ |
567 BUS_DMA_NOWAIT);
568 switch (rv) {
569 case 0:
570 break;
571 case EFBIG:
572 if (m_defrag(m, M_NOWAIT) == 0 &&
573 bus_dmamap_load_mbuf(sc->sc_dmat, txd->txd_dmap, m,
574 BUS_DMA_READ | BUS_DMA_NOWAIT) == 0)
575 break;
576 /* FALLTHROUGH */
577 default:
578 DPRINTF("%s: failed to load mbuf\n", device_xname(sc->sc_dev));
579 return -1;
580 }
581 txd->txd_buf = m;
582
583 if (vlan_has_tag(m)) {
584 uint32_t vlan;
585 char *cp;
586 uint16_t tag;
587
588 tag = vlan_get_tag(m);
589 vlan = NDIS_VLAN_INFO_MAKE(EVL_VLANOFTAG(tag),
590 EVL_PRIOFTAG(tag), EVL_CFIOFTAG(tag));
591 cp = hvn_rndis_pktinfo_append(pkt, HVN_RNDIS_PKT_LEN,
592 NDIS_VLAN_INFO_SIZE, NDIS_PKTINFO_TYPE_VLAN);
593 memcpy(cp, &vlan, NDIS_VLAN_INFO_SIZE);
594 }
595
596 if (m->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_UDPv4 |
597 M_CSUM_TCPv4)) {
598 uint32_t csum = NDIS_TXCSUM_INFO_IPV4;
599 char *cp;
600
601 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4)
602 csum |= NDIS_TXCSUM_INFO_IPCS;
603 if (m->m_pkthdr.csum_flags & M_CSUM_TCPv4)
604 csum |= NDIS_TXCSUM_INFO_TCPCS;
605 if (m->m_pkthdr.csum_flags & M_CSUM_UDPv4)
606 csum |= NDIS_TXCSUM_INFO_UDPCS;
607 cp = hvn_rndis_pktinfo_append(pkt, HVN_RNDIS_PKT_LEN,
608 NDIS_TXCSUM_INFO_SIZE, NDIS_PKTINFO_TYPE_CSUM);
609 memcpy(cp, &csum, NDIS_TXCSUM_INFO_SIZE);
610 }
611
612 pktlen = pkt->rm_pktinfooffset + pkt->rm_pktinfolen;
613 pkt->rm_pktinfooffset -= RNDIS_HEADER_OFFSET;
614
615 /* Attach an RNDIS message to the first slot */
616 txd->txd_sgl[0].gpa_page = txd->txd_gpa.gpa_page;
617 txd->txd_sgl[0].gpa_ofs = txd->txd_gpa.gpa_ofs;
618 txd->txd_sgl[0].gpa_len = pktlen;
619 txd->txd_nsge = txd->txd_dmap->dm_nsegs + 1;
620
621 for (i = 0; i < txd->txd_dmap->dm_nsegs; i++) {
622 seg = &txd->txd_dmap->dm_segs[i];
623 txd->txd_sgl[1 + i].gpa_page = atop(seg->ds_addr);
624 txd->txd_sgl[1 + i].gpa_ofs = seg->ds_addr & PAGE_MASK;
625 txd->txd_sgl[1 + i].gpa_len = seg->ds_len;
626 }
627
628 *txd0 = txd;
629
630 atomic_dec_uint(&sc->sc_tx_avail);
631
632 return 0;
633 }
634
635 static void
636 hvn_decap(struct hvn_softc *sc, struct hvn_tx_desc *txd)
637 {
638 struct ifnet *ifp = SC2IFP(sc);
639
640 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmap,
641 0, txd->txd_dmap->dm_mapsize,
642 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
643 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmap);
644 txd->txd_buf = NULL;
645 txd->txd_nsge = 0;
646 txd->txd_ready = 1;
647 atomic_inc_uint(&sc->sc_tx_avail);
648 ifp->if_flags &= ~IFF_OACTIVE;
649 }
650
651 static void
652 hvn_txeof(struct hvn_softc *sc, uint64_t tid)
653 {
654 struct ifnet *ifp = SC2IFP(sc);
655 struct hvn_tx_desc *txd;
656 struct mbuf *m;
657 uint32_t id = tid >> 32;
658
659 if ((tid & 0xffffffffU) != 0)
660 return;
661
662 id -= HVN_NVS_CHIM_SIG;
663 if (id >= HVN_TX_DESC) {
664 device_printf(sc->sc_dev, "tx packet index too large: %u", id);
665 return;
666 }
667
668 txd = &sc->sc_tx_desc[id];
669
670 if ((m = txd->txd_buf) == NULL) {
671 device_printf(sc->sc_dev, "no mbuf @%u\n", id);
672 return;
673 }
674 txd->txd_buf = NULL;
675
676 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmap,
677 0, txd->txd_dmap->dm_mapsize,
678 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
679 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmap);
680 m_freem(m);
681 if_statinc(ifp, if_opackets);
682
683 txd->txd_ready = 1;
684
685 atomic_inc_uint(&sc->sc_tx_avail);
686 ifp->if_flags &= ~IFF_OACTIVE;
687 }
688
689 static int
690 hvn_rx_ring_create(struct hvn_softc *sc)
691 {
692 struct hvn_nvs_rxbuf_conn cmd;
693 struct hvn_nvs_rxbuf_conn_resp *rsp;
694 uint64_t tid;
695
696 if (sc->sc_proto <= HVN_NVS_PROTO_VERSION_2)
697 sc->sc_rx_size = 15 * 1024 * 1024; /* 15MB */
698 else
699 sc->sc_rx_size = 16 * 1024 * 1024; /* 16MB */
700 sc->sc_rx_ring = hyperv_dma_alloc(sc->sc_dmat, &sc->sc_rx_dma,
701 sc->sc_rx_size, PAGE_SIZE, PAGE_SIZE, sc->sc_rx_size / PAGE_SIZE,
702 HYPERV_DMA_SLEEPOK);
703 if (sc->sc_rx_ring == NULL) {
704 DPRINTF("%s: failed to allocate Rx ring buffer\n",
705 device_xname(sc->sc_dev));
706 return -1;
707 }
708 if (vmbus_handle_alloc(sc->sc_chan, &sc->sc_rx_dma, sc->sc_rx_size,
709 &sc->sc_rx_hndl)) {
710 DPRINTF("%s: failed to obtain a PA handle\n",
711 device_xname(sc->sc_dev));
712 goto errout;
713 }
714
715 memset(&cmd, 0, sizeof(cmd));
716 cmd.nvs_type = HVN_NVS_TYPE_RXBUF_CONN;
717 cmd.nvs_gpadl = sc->sc_rx_hndl;
718 cmd.nvs_sig = HVN_NVS_RXBUF_SIG;
719
720 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
721 if (hvn_nvs_cmd(sc, &cmd, sizeof(cmd), tid, 100))
722 goto errout;
723
724 rsp = (struct hvn_nvs_rxbuf_conn_resp *)&sc->sc_nvsrsp;
725 if (rsp->nvs_status != HVN_NVS_STATUS_OK) {
726 DPRINTF("%s: failed to set up the Rx ring\n",
727 device_xname(sc->sc_dev));
728 goto errout;
729 }
730 if (rsp->nvs_nsect > 1) {
731 DPRINTF("%s: invalid number of Rx ring sections: %u\n",
732 device_xname(sc->sc_dev), rsp->nvs_nsect);
733 hvn_rx_ring_destroy(sc);
734 return -1;
735 }
736 return 0;
737
738 errout:
739 if (sc->sc_rx_hndl) {
740 vmbus_handle_free(sc->sc_chan, sc->sc_rx_hndl);
741 sc->sc_rx_hndl = 0;
742 }
743 if (sc->sc_rx_ring) {
744 hyperv_dma_free(sc->sc_dmat, &sc->sc_rx_dma);
745 sc->sc_rx_ring = NULL;
746 }
747 return -1;
748 }
749
750 static int
751 hvn_rx_ring_destroy(struct hvn_softc *sc)
752 {
753 struct hvn_nvs_rxbuf_disconn cmd;
754 uint64_t tid;
755
756 if (sc->sc_rx_ring == NULL)
757 return 0;
758
759 memset(&cmd, 0, sizeof(cmd));
760 cmd.nvs_type = HVN_NVS_TYPE_RXBUF_DISCONN;
761 cmd.nvs_sig = HVN_NVS_RXBUF_SIG;
762
763 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
764 if (hvn_nvs_cmd(sc, &cmd, sizeof(cmd), tid, 0))
765 return -1;
766
767 delay(100);
768
769 vmbus_handle_free(sc->sc_chan, sc->sc_rx_hndl);
770 sc->sc_rx_hndl = 0;
771
772 hyperv_dma_free(sc->sc_dmat, &sc->sc_rx_dma);
773 sc->sc_rx_ring = NULL;
774
775 return 0;
776 }
777
778 static int
779 hvn_tx_ring_create(struct hvn_softc *sc)
780 {
781 const int dmaflags = cold ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK;
782 struct hvn_tx_desc *txd;
783 bus_dma_segment_t *seg;
784 size_t msgsize;
785 int i, rsegs;
786 paddr_t pa;
787
788 msgsize = roundup(HVN_RNDIS_PKT_LEN, 128);
789
790 /* Allocate memory to store RNDIS messages */
791 if (bus_dmamem_alloc(sc->sc_dmat, msgsize * HVN_TX_DESC, PAGE_SIZE, 0,
792 &sc->sc_tx_mseg, 1, &rsegs, dmaflags)) {
793 DPRINTF("%s: failed to allocate memory for RDNIS messages\n",
794 device_xname(sc->sc_dev));
795 goto errout;
796 }
797 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_tx_mseg, 1, msgsize *
798 HVN_TX_DESC, (void **)&sc->sc_tx_msgs, dmaflags)) {
799 DPRINTF("%s: failed to establish mapping for RDNIS messages\n",
800 device_xname(sc->sc_dev));
801 goto errout;
802 }
803 memset(sc->sc_tx_msgs, 0, msgsize * HVN_TX_DESC);
804 if (bus_dmamap_create(sc->sc_dmat, msgsize * HVN_TX_DESC, 1,
805 msgsize * HVN_TX_DESC, 0, dmaflags, &sc->sc_tx_rmap)) {
806 DPRINTF("%s: failed to create map for RDNIS messages\n",
807 device_xname(sc->sc_dev));
808 goto errout;
809 }
810 if (bus_dmamap_load(sc->sc_dmat, sc->sc_tx_rmap, sc->sc_tx_msgs,
811 msgsize * HVN_TX_DESC, NULL, dmaflags)) {
812 DPRINTF("%s: failed to create map for RDNIS messages\n",
813 device_xname(sc->sc_dev));
814 goto errout;
815 }
816
817 for (i = 0; i < HVN_TX_DESC; i++) {
818 txd = &sc->sc_tx_desc[i];
819 if (bus_dmamap_create(sc->sc_dmat, HVN_TX_PKT_SIZE,
820 HVN_TX_FRAGS, HVN_TX_FRAG_SIZE, PAGE_SIZE, dmaflags,
821 &txd->txd_dmap)) {
822 DPRINTF("%s: failed to create map for TX descriptors\n",
823 device_xname(sc->sc_dev));
824 goto errout;
825 }
826 seg = &sc->sc_tx_rmap->dm_segs[0];
827 pa = seg->ds_addr + (msgsize * i);
828 txd->txd_gpa.gpa_page = atop(pa);
829 txd->txd_gpa.gpa_ofs = pa & PAGE_MASK;
830 txd->txd_gpa.gpa_len = msgsize;
831 txd->txd_req = (void *)(sc->sc_tx_msgs + (msgsize * i));
832 txd->txd_id = i + HVN_NVS_CHIM_SIG;
833 txd->txd_ready = 1;
834 }
835 sc->sc_tx_avail = HVN_TX_DESC;
836
837 return 0;
838
839 errout:
840 hvn_tx_ring_destroy(sc);
841 return -1;
842 }
843
844 static void
845 hvn_tx_ring_destroy(struct hvn_softc *sc)
846 {
847 struct hvn_tx_desc *txd;
848 int i;
849
850 for (i = 0; i < HVN_TX_DESC; i++) {
851 txd = &sc->sc_tx_desc[i];
852 if (txd->txd_dmap == NULL)
853 continue;
854 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmap,
855 0, txd->txd_dmap->dm_mapsize,
856 BUS_DMASYNC_POSTWRITE);
857 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmap);
858 bus_dmamap_destroy(sc->sc_dmat, txd->txd_dmap);
859 txd->txd_dmap = NULL;
860 if (txd->txd_buf == NULL)
861 continue;
862 m_freem(txd->txd_buf);
863 txd->txd_buf = NULL;
864 }
865 if (sc->sc_tx_rmap != NULL) {
866 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap,
867 0, sc->sc_tx_rmap->dm_mapsize,
868 BUS_DMASYNC_POSTWRITE);
869 bus_dmamap_unload(sc->sc_dmat, sc->sc_tx_rmap);
870 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_rmap);
871 sc->sc_tx_rmap = NULL;
872 }
873 if (sc->sc_tx_msgs != NULL) {
874 size_t msgsize = roundup(HVN_RNDIS_PKT_LEN, 128);
875
876 bus_dmamem_unmap(sc->sc_dmat, sc->sc_tx_msgs,
877 msgsize * HVN_TX_DESC);
878 bus_dmamem_free(sc->sc_dmat, &sc->sc_tx_mseg, 1);
879 sc->sc_tx_msgs = NULL;
880 }
881 }
882
883 static int
884 hvn_get_lladdr(struct hvn_softc *sc, uint8_t *enaddr)
885 {
886 size_t addrlen = ETHER_ADDR_LEN;
887 int rv;
888
889 rv = hvn_rndis_query(sc, OID_802_3_PERMANENT_ADDRESS, enaddr, &addrlen);
890 if (rv == 0 && addrlen != ETHER_ADDR_LEN)
891 rv = -1;
892 return rv;
893 }
894
895 static void
896 hvn_get_link_status(struct hvn_softc *sc)
897 {
898 uint32_t state;
899 size_t len = sizeof(state);
900
901 if (hvn_rndis_query(sc, OID_GEN_MEDIA_CONNECT_STATUS,
902 &state, &len) == 0)
903 sc->sc_link_state = (state == NDIS_MEDIA_STATE_CONNECTED) ?
904 LINK_STATE_UP : LINK_STATE_DOWN;
905 }
906
907 static int
908 hvn_nvs_attach(struct hvn_softc *sc)
909 {
910 static const uint32_t protos[] = {
911 HVN_NVS_PROTO_VERSION_5,
912 HVN_NVS_PROTO_VERSION_4,
913 HVN_NVS_PROTO_VERSION_2,
914 HVN_NVS_PROTO_VERSION_1
915 };
916 const int kmemflags = cold ? KM_NOSLEEP : KM_SLEEP;
917 struct hvn_nvs_init cmd;
918 struct hvn_nvs_init_resp *rsp;
919 struct hvn_nvs_ndis_init ncmd;
920 struct hvn_nvs_ndis_conf ccmd;
921 uint32_t ndisver, ringsize;
922 uint64_t tid;
923 int i;
924
925 sc->sc_nvsbuf = kmem_zalloc(HVN_NVS_BUFSIZE, kmemflags);
926 if (sc->sc_nvsbuf == NULL) {
927 DPRINTF("%s: failed to allocate channel data buffer\n",
928 device_xname(sc->sc_dev));
929 return -1;
930 }
931
932 /* We need to be able to fit all RNDIS control and data messages */
933 ringsize = HVN_RNDIS_CTLREQS *
934 (sizeof(struct hvn_nvs_rndis) + sizeof(struct vmbus_gpa)) +
935 HVN_TX_DESC * (sizeof(struct hvn_nvs_rndis) +
936 (HVN_TX_FRAGS + 1) * sizeof(struct vmbus_gpa));
937
938 sc->sc_chan->ch_flags &= ~CHF_BATCHED;
939
940 if (vmbus_channel_setdeferred(sc->sc_chan, device_xname(sc->sc_dev))) {
941 aprint_error_dev(sc->sc_dev,
942 "failed to create the interrupt thread\n");
943 return -1;
944 }
945
946 /* Associate our interrupt handler with the channel */
947 if (vmbus_channel_open(sc->sc_chan, ringsize, NULL, 0,
948 hvn_nvs_intr, sc)) {
949 DPRINTF("%s: failed to open channel\n",
950 device_xname(sc->sc_dev));
951 kmem_free(sc->sc_nvsbuf, HVN_NVS_BUFSIZE);
952 return -1;
953 }
954
955 memset(&cmd, 0, sizeof(cmd));
956 cmd.nvs_type = HVN_NVS_TYPE_INIT;
957 for (i = 0; i < __arraycount(protos); i++) {
958 cmd.nvs_ver_min = cmd.nvs_ver_max = protos[i];
959 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
960 if (hvn_nvs_cmd(sc, &cmd, sizeof(cmd), tid, 100))
961 return -1;
962
963 rsp = (struct hvn_nvs_init_resp *)&sc->sc_nvsrsp;
964 if (rsp->nvs_status == HVN_NVS_STATUS_OK) {
965 sc->sc_proto = protos[i];
966 break;
967 }
968 }
969 if (i == __arraycount(protos)) {
970 DPRINTF("%s: failed to negotiate NVSP version\n",
971 device_xname(sc->sc_dev));
972 return -1;
973 }
974
975 if (sc->sc_proto >= HVN_NVS_PROTO_VERSION_2) {
976 memset(&ccmd, 0, sizeof(ccmd));
977 ccmd.nvs_type = HVN_NVS_TYPE_NDIS_CONF;
978 ccmd.nvs_mtu = HVN_MAXMTU;
979 ccmd.nvs_caps = HVN_NVS_NDIS_CONF_VLAN;
980
981 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
982 if (hvn_nvs_cmd(sc, &ccmd, sizeof(ccmd), tid, 100))
983 return -1;
984 }
985
986 memset(&ncmd, 0, sizeof(ncmd));
987 ncmd.nvs_type = HVN_NVS_TYPE_NDIS_INIT;
988 if (sc->sc_proto <= HVN_NVS_PROTO_VERSION_4)
989 ndisver = NDIS_VERSION_6_1;
990 else
991 ndisver = NDIS_VERSION_6_30;
992 ncmd.nvs_ndis_major = (ndisver & 0xffff0000) >> 16;
993 ncmd.nvs_ndis_minor = ndisver & 0x0000ffff;
994
995 tid = atomic_inc_uint_nv(&sc->sc_nvstid);
996 if (hvn_nvs_cmd(sc, &ncmd, sizeof(ncmd), tid, 100))
997 return -1;
998
999 sc->sc_ndisver = ndisver;
1000
1001 return 0;
1002 }
1003
1004 static void
1005 hvn_nvs_intr(void *arg)
1006 {
1007 struct hvn_softc *sc = arg;
1008 struct ifnet *ifp = SC2IFP(sc);
1009 struct vmbus_chanpkt_hdr *cph;
1010 const struct hvn_nvs_hdr *nvs;
1011 uint64_t rid;
1012 uint32_t rlen;
1013 int rv;
1014 bool dotx = false;
1015
1016 for (;;) {
1017 rv = vmbus_channel_recv(sc->sc_chan, sc->sc_nvsbuf,
1018 HVN_NVS_BUFSIZE, &rlen, &rid, 1);
1019 if (rv != 0 || rlen == 0) {
1020 if (rv != EAGAIN)
1021 device_printf(sc->sc_dev,
1022 "failed to receive an NVSP packet\n");
1023 break;
1024 }
1025 cph = (struct vmbus_chanpkt_hdr *)sc->sc_nvsbuf;
1026 nvs = (const struct hvn_nvs_hdr *)VMBUS_CHANPKT_CONST_DATA(cph);
1027
1028 if (cph->cph_type == VMBUS_CHANPKT_TYPE_COMP) {
1029 switch (nvs->nvs_type) {
1030 case HVN_NVS_TYPE_INIT_RESP:
1031 case HVN_NVS_TYPE_RXBUF_CONNRESP:
1032 case HVN_NVS_TYPE_CHIM_CONNRESP:
1033 case HVN_NVS_TYPE_SUBCH_RESP:
1034 /* copy the response back */
1035 memcpy(&sc->sc_nvsrsp, nvs, HVN_NVS_MSGSIZE);
1036 sc->sc_nvsdone = 1;
1037 wakeup(&sc->sc_nvsrsp);
1038 break;
1039 case HVN_NVS_TYPE_RNDIS_ACK:
1040 dotx = true;
1041 hvn_txeof(sc, cph->cph_tid);
1042 break;
1043 default:
1044 device_printf(sc->sc_dev,
1045 "unhandled NVSP packet type %u "
1046 "on completion\n", nvs->nvs_type);
1047 break;
1048 }
1049 } else if (cph->cph_type == VMBUS_CHANPKT_TYPE_RXBUF) {
1050 switch (nvs->nvs_type) {
1051 case HVN_NVS_TYPE_RNDIS:
1052 hvn_rndis_input(sc, cph->cph_tid, cph);
1053 break;
1054 default:
1055 device_printf(sc->sc_dev,
1056 "unhandled NVSP packet type %u "
1057 "on receive\n", nvs->nvs_type);
1058 break;
1059 }
1060 } else if (cph->cph_type == VMBUS_CHANPKT_TYPE_INBAND) {
1061 switch (nvs->nvs_type) {
1062 case HVN_NVS_TYPE_TXTBL_NOTE:
1063 /* Useless; ignore */
1064 break;
1065 default:
1066 device_printf(sc->sc_dev,
1067 "got notify, nvs type %u\n", nvs->nvs_type);
1068 break;
1069 }
1070 } else
1071 device_printf(sc->sc_dev,
1072 "unknown NVSP packet type %u\n", cph->cph_type);
1073 }
1074
1075 if (dotx)
1076 if_schedule_deferred_start(ifp);
1077 }
1078
1079 static int
1080 hvn_nvs_cmd(struct hvn_softc *sc, void *cmd, size_t cmdsize, uint64_t tid,
1081 int timo)
1082 {
1083 struct hvn_nvs_hdr *hdr = cmd;
1084 int tries = 10;
1085 int rv, s;
1086
1087 sc->sc_nvsdone = 0;
1088
1089 do {
1090 rv = vmbus_channel_send(sc->sc_chan, cmd, cmdsize,
1091 tid, VMBUS_CHANPKT_TYPE_INBAND,
1092 timo ? VMBUS_CHANPKT_FLAG_RC : 0);
1093 if (rv == EAGAIN) {
1094 if (cold)
1095 delay(1000);
1096 else
1097 tsleep(cmd, PRIBIO, "nvsout", mstohz(1));
1098 } else if (rv) {
1099 DPRINTF("%s: NVSP operation %u send error %d\n",
1100 device_xname(sc->sc_dev), hdr->nvs_type, rv);
1101 return rv;
1102 }
1103 } while (rv != 0 && --tries > 0);
1104
1105 if (tries == 0 && rv != 0) {
1106 device_printf(sc->sc_dev,
1107 "NVSP operation %u send error %d\n", hdr->nvs_type, rv);
1108 return rv;
1109 }
1110
1111 if (timo == 0)
1112 return 0;
1113
1114 do {
1115 if (cold) {
1116 delay(1000);
1117 s = splnet();
1118 hvn_nvs_intr(sc);
1119 splx(s);
1120 } else
1121 tsleep(sc->sc_nvsrsp, PRIBIO | PCATCH, "nvscmd",
1122 mstohz(1));
1123 } while (--timo > 0 && sc->sc_nvsdone != 1);
1124
1125 if (timo == 0 && sc->sc_nvsdone != 1) {
1126 device_printf(sc->sc_dev, "NVSP operation %u timed out\n",
1127 hdr->nvs_type);
1128 return ETIMEDOUT;
1129 }
1130 return 0;
1131 }
1132
1133 static int
1134 hvn_nvs_ack(struct hvn_softc *sc, uint64_t tid)
1135 {
1136 struct hvn_nvs_rndis_ack cmd;
1137 int tries = 5;
1138 int rv;
1139
1140 cmd.nvs_type = HVN_NVS_TYPE_RNDIS_ACK;
1141 cmd.nvs_status = HVN_NVS_STATUS_OK;
1142 do {
1143 rv = vmbus_channel_send(sc->sc_chan, &cmd, sizeof(cmd),
1144 tid, VMBUS_CHANPKT_TYPE_COMP, 0);
1145 if (rv == EAGAIN)
1146 delay(10);
1147 else if (rv) {
1148 DPRINTF("%s: NVSP acknowledgement error %d\n",
1149 device_xname(sc->sc_dev), rv);
1150 return rv;
1151 }
1152 } while (rv != 0 && --tries > 0);
1153 return rv;
1154 }
1155
1156 static void
1157 hvn_nvs_detach(struct hvn_softc *sc)
1158 {
1159
1160 if (vmbus_channel_close(sc->sc_chan) == 0) {
1161 kmem_free(sc->sc_nvsbuf, HVN_NVS_BUFSIZE);
1162 sc->sc_nvsbuf = NULL;
1163 }
1164 }
1165
1166 static inline struct rndis_cmd *
1167 hvn_alloc_cmd(struct hvn_softc *sc)
1168 {
1169 struct rndis_cmd *rc;
1170
1171 mutex_enter(&sc->sc_cntl_fqlck);
1172 while ((rc = TAILQ_FIRST(&sc->sc_cntl_fq)) == NULL)
1173 /* XXX use condvar(9) instead of mtsleep */
1174 mtsleep(&sc->sc_cntl_fq, PRIBIO, "nvsalloc", 1,
1175 &sc->sc_cntl_fqlck);
1176 TAILQ_REMOVE(&sc->sc_cntl_fq, rc, rc_entry);
1177 mutex_exit(&sc->sc_cntl_fqlck);
1178 return rc;
1179 }
1180
1181 static inline void
1182 hvn_submit_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1183 {
1184
1185 mutex_enter(&sc->sc_cntl_sqlck);
1186 TAILQ_INSERT_TAIL(&sc->sc_cntl_sq, rc, rc_entry);
1187 mutex_exit(&sc->sc_cntl_sqlck);
1188 }
1189
1190 static inline struct rndis_cmd *
1191 hvn_complete_cmd(struct hvn_softc *sc, uint32_t id)
1192 {
1193 struct rndis_cmd *rc;
1194
1195 mutex_enter(&sc->sc_cntl_sqlck);
1196 TAILQ_FOREACH(rc, &sc->sc_cntl_sq, rc_entry) {
1197 if (rc->rc_id == id) {
1198 TAILQ_REMOVE(&sc->sc_cntl_sq, rc, rc_entry);
1199 break;
1200 }
1201 }
1202 mutex_exit(&sc->sc_cntl_sqlck);
1203 if (rc != NULL) {
1204 mutex_enter(&sc->sc_cntl_cqlck);
1205 TAILQ_INSERT_TAIL(&sc->sc_cntl_cq, rc, rc_entry);
1206 mutex_exit(&sc->sc_cntl_cqlck);
1207 }
1208 return rc;
1209 }
1210
1211 static inline void
1212 hvn_release_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1213 {
1214
1215 mutex_enter(&sc->sc_cntl_cqlck);
1216 TAILQ_REMOVE(&sc->sc_cntl_cq, rc, rc_entry);
1217 mutex_exit(&sc->sc_cntl_cqlck);
1218 }
1219
1220 static inline int
1221 hvn_rollback_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1222 {
1223 struct rndis_cmd *rn;
1224
1225 mutex_enter(&sc->sc_cntl_sqlck);
1226 TAILQ_FOREACH(rn, &sc->sc_cntl_sq, rc_entry) {
1227 if (rn == rc) {
1228 TAILQ_REMOVE(&sc->sc_cntl_sq, rc, rc_entry);
1229 mutex_exit(&sc->sc_cntl_sqlck);
1230 return 0;
1231 }
1232 }
1233 mutex_exit(&sc->sc_cntl_sqlck);
1234 return -1;
1235 }
1236
1237 static inline void
1238 hvn_free_cmd(struct hvn_softc *sc, struct rndis_cmd *rc)
1239 {
1240
1241 memset(rc->rc_req, 0, sizeof(struct rndis_packet_msg));
1242 memset(&rc->rc_cmp, 0, sizeof(rc->rc_cmp));
1243 memset(&rc->rc_msg, 0, sizeof(rc->rc_msg));
1244 mutex_enter(&sc->sc_cntl_fqlck);
1245 TAILQ_INSERT_TAIL(&sc->sc_cntl_fq, rc, rc_entry);
1246 mutex_exit(&sc->sc_cntl_fqlck);
1247 wakeup(&sc->sc_cntl_fq);
1248 }
1249
1250 static int
1251 hvn_rndis_attach(struct hvn_softc *sc)
1252 {
1253 const int dmaflags = cold ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK;
1254 struct rndis_init_req *req;
1255 struct rndis_init_comp *cmp;
1256 struct rndis_cmd *rc;
1257 int i, rv;
1258
1259 /* RNDIS control message queues */
1260 TAILQ_INIT(&sc->sc_cntl_sq);
1261 TAILQ_INIT(&sc->sc_cntl_cq);
1262 TAILQ_INIT(&sc->sc_cntl_fq);
1263 mutex_init(&sc->sc_cntl_sqlck, MUTEX_DEFAULT, IPL_NET);
1264 mutex_init(&sc->sc_cntl_cqlck, MUTEX_DEFAULT, IPL_NET);
1265 mutex_init(&sc->sc_cntl_fqlck, MUTEX_DEFAULT, IPL_NET);
1266
1267 for (i = 0; i < HVN_RNDIS_CTLREQS; i++) {
1268 rc = &sc->sc_cntl_msgs[i];
1269 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1270 dmaflags, &rc->rc_dmap)) {
1271 DPRINTF("%s: failed to create RNDIS command map\n",
1272 device_xname(sc->sc_dev));
1273 goto errout;
1274 }
1275 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE,
1276 0, &rc->rc_segs, 1, &rc->rc_nsegs, dmaflags)) {
1277 DPRINTF("%s: failed to allocate RNDIS command\n",
1278 device_xname(sc->sc_dev));
1279 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1280 goto errout;
1281 }
1282 if (bus_dmamem_map(sc->sc_dmat, &rc->rc_segs, rc->rc_nsegs,
1283 PAGE_SIZE, (void **)&rc->rc_req, dmaflags)) {
1284 DPRINTF("%s: failed to allocate RNDIS command\n",
1285 device_xname(sc->sc_dev));
1286 bus_dmamem_free(sc->sc_dmat, &rc->rc_segs,
1287 rc->rc_nsegs);
1288 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1289 goto errout;
1290 }
1291 memset(rc->rc_req, 0, PAGE_SIZE);
1292 if (bus_dmamap_load(sc->sc_dmat, rc->rc_dmap, rc->rc_req,
1293 PAGE_SIZE, NULL, dmaflags)) {
1294 DPRINTF("%s: failed to load RNDIS command map\n",
1295 device_xname(sc->sc_dev));
1296 bus_dmamem_free(sc->sc_dmat, &rc->rc_segs,
1297 rc->rc_nsegs);
1298 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1299 goto errout;
1300 }
1301 rc->rc_gpa = atop(rc->rc_dmap->dm_segs[0].ds_addr);
1302 TAILQ_INSERT_TAIL(&sc->sc_cntl_fq, rc, rc_entry);
1303 }
1304
1305 rc = hvn_alloc_cmd(sc);
1306
1307 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1308 BUS_DMASYNC_PREREAD);
1309
1310 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1311
1312 req = rc->rc_req;
1313 req->rm_type = REMOTE_NDIS_INITIALIZE_MSG;
1314 req->rm_len = sizeof(*req);
1315 req->rm_rid = rc->rc_id;
1316 req->rm_ver_major = RNDIS_VERSION_MAJOR;
1317 req->rm_ver_minor = RNDIS_VERSION_MINOR;
1318 req->rm_max_xfersz = HVN_RNDIS_XFER_SIZE;
1319
1320 rc->rc_cmplen = sizeof(*cmp);
1321
1322 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1323 BUS_DMASYNC_PREWRITE);
1324
1325 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1326 DPRINTF("%s: INITIALIZE_MSG failed, error %d\n",
1327 device_xname(sc->sc_dev), rv);
1328 hvn_free_cmd(sc, rc);
1329 goto errout;
1330 }
1331 cmp = (struct rndis_init_comp *)&rc->rc_cmp;
1332 if (cmp->rm_status != RNDIS_STATUS_SUCCESS) {
1333 DPRINTF("%s: failed to init RNDIS, error %#x\n",
1334 device_xname(sc->sc_dev), cmp->rm_status);
1335 hvn_free_cmd(sc, rc);
1336 goto errout;
1337 }
1338
1339 hvn_free_cmd(sc, rc);
1340
1341 /* Initialize RNDIS Data command */
1342 memset(&sc->sc_data_msg, 0, sizeof(sc->sc_data_msg));
1343 sc->sc_data_msg.nvs_type = HVN_NVS_TYPE_RNDIS;
1344 sc->sc_data_msg.nvs_rndis_mtype = HVN_NVS_RNDIS_MTYPE_DATA;
1345 sc->sc_data_msg.nvs_chim_idx = HVN_NVS_CHIM_IDX_INVALID;
1346
1347 return 0;
1348
1349 errout:
1350 for (i = 0; i < HVN_RNDIS_CTLREQS; i++) {
1351 rc = &sc->sc_cntl_msgs[i];
1352 if (rc->rc_req == NULL)
1353 continue;
1354 TAILQ_REMOVE(&sc->sc_cntl_fq, rc, rc_entry);
1355 bus_dmamem_free(sc->sc_dmat, &rc->rc_segs, rc->rc_nsegs);
1356 rc->rc_req = NULL;
1357 bus_dmamap_destroy(sc->sc_dmat, rc->rc_dmap);
1358 }
1359 return -1;
1360 }
1361
1362 static int
1363 hvn_set_capabilities(struct hvn_softc *sc)
1364 {
1365 struct ndis_offload_params params;
1366 size_t len = sizeof(params);
1367
1368 memset(¶ms, 0, sizeof(params));
1369
1370 params.ndis_hdr.ndis_type = NDIS_OBJTYPE_DEFAULT;
1371 if (sc->sc_ndisver < NDIS_VERSION_6_30) {
1372 params.ndis_hdr.ndis_rev = NDIS_OFFLOAD_PARAMS_REV_2;
1373 len = params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE_6_1;
1374 } else {
1375 params.ndis_hdr.ndis_rev = NDIS_OFFLOAD_PARAMS_REV_3;
1376 len = params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE;
1377 }
1378
1379 params.ndis_ip4csum = NDIS_OFFLOAD_PARAM_TXRX;
1380 params.ndis_tcp4csum = NDIS_OFFLOAD_PARAM_TXRX;
1381 params.ndis_tcp6csum = NDIS_OFFLOAD_PARAM_TXRX;
1382 if (sc->sc_ndisver >= NDIS_VERSION_6_30) {
1383 params.ndis_udp4csum = NDIS_OFFLOAD_PARAM_TXRX;
1384 params.ndis_udp6csum = NDIS_OFFLOAD_PARAM_TXRX;
1385 }
1386
1387 return hvn_rndis_set(sc, OID_TCP_OFFLOAD_PARAMETERS, ¶ms, len);
1388 }
1389
1390 static int
1391 hvn_rndis_cmd(struct hvn_softc *sc, struct rndis_cmd *rc, int timo)
1392 {
1393 struct hvn_nvs_rndis *msg = &rc->rc_msg;
1394 struct rndis_msghdr *hdr = rc->rc_req;
1395 struct vmbus_gpa sgl[1];
1396 int tries = 10;
1397 int rv, s;
1398
1399 KASSERT(timo > 0);
1400
1401 msg->nvs_type = HVN_NVS_TYPE_RNDIS;
1402 msg->nvs_rndis_mtype = HVN_NVS_RNDIS_MTYPE_CTRL;
1403 msg->nvs_chim_idx = HVN_NVS_CHIM_IDX_INVALID;
1404
1405 sgl[0].gpa_page = rc->rc_gpa;
1406 sgl[0].gpa_len = hdr->rm_len;
1407 sgl[0].gpa_ofs = 0;
1408
1409 rc->rc_done = 0;
1410
1411 hvn_submit_cmd(sc, rc);
1412
1413 do {
1414 rv = vmbus_channel_send_sgl(sc->sc_chan, sgl, 1, &rc->rc_msg,
1415 sizeof(*msg), rc->rc_id);
1416 if (rv == EAGAIN) {
1417 if (cold)
1418 delay(1000);
1419 else
1420 tsleep(rc, PRIBIO, "rndisout", mstohz(1));
1421 } else if (rv) {
1422 DPRINTF("%s: RNDIS operation %u send error %d\n",
1423 device_xname(sc->sc_dev), hdr->rm_type, rv);
1424 hvn_rollback_cmd(sc, rc);
1425 return rv;
1426 }
1427 } while (rv != 0 && --tries > 0);
1428
1429 if (tries == 0 && rv != 0) {
1430 device_printf(sc->sc_dev,
1431 "RNDIS operation %u send error %d\n", hdr->rm_type, rv);
1432 return rv;
1433 }
1434 if (vmbus_channel_is_revoked(sc->sc_chan)) {
1435 /* No response */
1436 return 0;
1437 }
1438
1439 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1440 BUS_DMASYNC_POSTWRITE);
1441
1442 do {
1443 if (cold) {
1444 delay(1000);
1445 s = splnet();
1446 hvn_nvs_intr(sc);
1447 splx(s);
1448 } else
1449 tsleep(rc, PRIBIO | PCATCH, "rndiscmd", mstohz(1));
1450 } while (--timo > 0 && rc->rc_done != 1);
1451
1452 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1453 BUS_DMASYNC_POSTREAD);
1454
1455 if (rc->rc_done != 1) {
1456 rv = timo == 0 ? ETIMEDOUT : EINTR;
1457 if (hvn_rollback_cmd(sc, rc)) {
1458 hvn_release_cmd(sc, rc);
1459 rv = 0;
1460 } else if (rv == ETIMEDOUT) {
1461 device_printf(sc->sc_dev,
1462 "RNDIS operation %u timed out\n", hdr->rm_type);
1463 }
1464 return rv;
1465 }
1466
1467 hvn_release_cmd(sc, rc);
1468 return 0;
1469 }
1470
1471 static void
1472 hvn_rndis_input(struct hvn_softc *sc, uint64_t tid, void *arg)
1473 {
1474 struct vmbus_chanpkt_prplist *cp = arg;
1475 uint32_t off, len, type;
1476 int i;
1477
1478 if (sc->sc_rx_ring == NULL) {
1479 DPRINTF("%s: invalid rx ring\n", device_xname(sc->sc_dev));
1480 return;
1481 }
1482
1483 for (i = 0; i < cp->cp_range_cnt; i++) {
1484 off = cp->cp_range[i].gpa_ofs;
1485 len = cp->cp_range[i].gpa_len;
1486
1487 KASSERT(off + len <= sc->sc_rx_size);
1488 KASSERT(len >= RNDIS_HEADER_OFFSET + 4);
1489
1490 memcpy(&type, sc->sc_rx_ring + off, sizeof(type));
1491 switch (type) {
1492 /* data message */
1493 case REMOTE_NDIS_PACKET_MSG:
1494 hvn_rxeof(sc, sc->sc_rx_ring + off, len);
1495 break;
1496 /* completion messages */
1497 case REMOTE_NDIS_INITIALIZE_CMPLT:
1498 case REMOTE_NDIS_QUERY_CMPLT:
1499 case REMOTE_NDIS_SET_CMPLT:
1500 case REMOTE_NDIS_RESET_CMPLT:
1501 case REMOTE_NDIS_KEEPALIVE_CMPLT:
1502 hvn_rndis_complete(sc, sc->sc_rx_ring + off, len);
1503 break;
1504 /* notification message */
1505 case REMOTE_NDIS_INDICATE_STATUS_MSG:
1506 hvn_rndis_status(sc, sc->sc_rx_ring + off, len);
1507 break;
1508 default:
1509 device_printf(sc->sc_dev,
1510 "unhandled RNDIS message type %u\n", type);
1511 break;
1512 }
1513 }
1514
1515 hvn_nvs_ack(sc, tid);
1516 }
1517
1518 static inline struct mbuf *
1519 hvn_devget(struct hvn_softc *sc, void *buf, uint32_t len)
1520 {
1521 struct ifnet *ifp = SC2IFP(sc);
1522 struct mbuf *m;
1523 size_t size = len + ETHER_ALIGN;
1524
1525 MGETHDR(m, M_NOWAIT, MT_DATA);
1526 if (m == NULL)
1527 return NULL;
1528
1529 if (size > MHLEN) {
1530 if (size <= MCLBYTES)
1531 MCLGET(m, M_NOWAIT);
1532 else
1533 MEXTMALLOC(m, size, M_NOWAIT);
1534 if ((m->m_flags & M_EXT) == 0) {
1535 m_freem(m);
1536 return NULL;
1537 }
1538 }
1539
1540 m->m_len = m->m_pkthdr.len = size;
1541 m_adj(m, ETHER_ALIGN);
1542 m_copyback(m, 0, len, buf);
1543 m_set_rcvif(m, ifp);
1544 return m;
1545 }
1546
1547 static void
1548 hvn_rxeof(struct hvn_softc *sc, uint8_t *buf, uint32_t len)
1549 {
1550 struct ifnet *ifp = SC2IFP(sc);
1551 struct rndis_packet_msg *pkt;
1552 struct rndis_pktinfo *pi;
1553 uint32_t csum, vlan;
1554 struct mbuf *m;
1555
1556 if (!(ifp->if_flags & IFF_RUNNING))
1557 return;
1558
1559 if (len < sizeof(*pkt)) {
1560 device_printf(sc->sc_dev, "data packet too short: %u\n",
1561 len);
1562 return;
1563 }
1564
1565 pkt = (struct rndis_packet_msg *)buf;
1566 if (pkt->rm_dataoffset + pkt->rm_datalen > len) {
1567 device_printf(sc->sc_dev,
1568 "data packet out of bounds: %u@%u\n", pkt->rm_dataoffset,
1569 pkt->rm_datalen);
1570 return;
1571 }
1572
1573 if ((m = hvn_devget(sc, buf + RNDIS_HEADER_OFFSET + pkt->rm_dataoffset,
1574 pkt->rm_datalen)) == NULL) {
1575 if_statinc(ifp, if_ierrors);
1576 return;
1577 }
1578
1579 if (pkt->rm_pktinfooffset + pkt->rm_pktinfolen > len) {
1580 device_printf(sc->sc_dev,
1581 "pktinfo is out of bounds: %u@%u vs %u\n",
1582 pkt->rm_pktinfolen, pkt->rm_pktinfooffset, len);
1583 goto done;
1584 }
1585
1586 pi = (struct rndis_pktinfo *)(buf + RNDIS_HEADER_OFFSET +
1587 pkt->rm_pktinfooffset);
1588 while (pkt->rm_pktinfolen > 0) {
1589 if (pi->rm_size > pkt->rm_pktinfolen) {
1590 device_printf(sc->sc_dev,
1591 "invalid pktinfo size: %u/%u\n", pi->rm_size,
1592 pkt->rm_pktinfolen);
1593 break;
1594 }
1595
1596 switch (pi->rm_type) {
1597 case NDIS_PKTINFO_TYPE_CSUM:
1598 memcpy(&csum, pi->rm_data, sizeof(csum));
1599 if (csum & NDIS_RXCSUM_INFO_IPCS_OK)
1600 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1601 if (csum & NDIS_RXCSUM_INFO_TCPCS_OK)
1602 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1603 if (csum & NDIS_RXCSUM_INFO_UDPCS_OK)
1604 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1605 break;
1606 case NDIS_PKTINFO_TYPE_VLAN:
1607 memcpy(&vlan, pi->rm_data, sizeof(vlan));
1608 if (vlan != 0xffffffff) {
1609 uint16_t t = NDIS_VLAN_INFO_ID(vlan);
1610 t |= NDIS_VLAN_INFO_PRI(vlan) << EVL_PRIO_BITS;
1611 t |= NDIS_VLAN_INFO_CFI(vlan) << EVL_CFI_BITS;
1612 vlan_set_tag(m, t);
1613 }
1614 break;
1615 default:
1616 DPRINTF("%s: unhandled pktinfo type %u\n",
1617 device_xname(sc->sc_dev), pi->rm_type);
1618 break;
1619 }
1620
1621 pkt->rm_pktinfolen -= pi->rm_size;
1622 pi = (struct rndis_pktinfo *)((char *)pi + pi->rm_size);
1623 }
1624
1625 done:
1626 if_percpuq_enqueue(sc->sc_ipq, m);
1627 }
1628
1629 static void
1630 hvn_rndis_complete(struct hvn_softc *sc, uint8_t *buf, uint32_t len)
1631 {
1632 struct rndis_cmd *rc;
1633 uint32_t id;
1634
1635 memcpy(&id, buf + RNDIS_HEADER_OFFSET, sizeof(id));
1636 if ((rc = hvn_complete_cmd(sc, id)) != NULL) {
1637 if (len < rc->rc_cmplen)
1638 device_printf(sc->sc_dev,
1639 "RNDIS response %u too short: %u\n", id, len);
1640 else
1641 memcpy(&rc->rc_cmp, buf, rc->rc_cmplen);
1642 if (len > rc->rc_cmplen &&
1643 len - rc->rc_cmplen > HVN_RNDIS_BUFSIZE)
1644 device_printf(sc->sc_dev,
1645 "RNDIS response %u too large: %u\n", id, len);
1646 else if (len > rc->rc_cmplen)
1647 memcpy(&rc->rc_cmpbuf, buf + rc->rc_cmplen,
1648 len - rc->rc_cmplen);
1649 rc->rc_done = 1;
1650 wakeup(rc);
1651 } else {
1652 DPRINTF("%s: failed to complete RNDIS request id %u\n",
1653 device_xname(sc->sc_dev), id);
1654 }
1655 }
1656
1657 static int
1658 hvn_rndis_output(struct hvn_softc *sc, struct hvn_tx_desc *txd)
1659 {
1660 uint64_t rid = (uint64_t)txd->txd_id << 32;
1661 int rv;
1662
1663 rv = vmbus_channel_send_sgl(sc->sc_chan, txd->txd_sgl, txd->txd_nsge,
1664 &sc->sc_data_msg, sizeof(sc->sc_data_msg), rid);
1665 if (rv) {
1666 DPRINTF("%s: RNDIS data send error %d\n",
1667 device_xname(sc->sc_dev), rv);
1668 return rv;
1669 }
1670 return 0;
1671 }
1672
1673 static void
1674 hvn_rndis_status(struct hvn_softc *sc, uint8_t *buf, uint32_t len)
1675 {
1676 struct ifnet *ifp = SC2IFP(sc);
1677 uint32_t status;
1678 int link_state = sc->sc_link_state;
1679
1680 memcpy(&status, buf + RNDIS_HEADER_OFFSET, sizeof(status));
1681 switch (status) {
1682 case RNDIS_STATUS_MEDIA_CONNECT:
1683 sc->sc_link_state = LINK_STATE_UP;
1684 break;
1685 case RNDIS_STATUS_MEDIA_DISCONNECT:
1686 sc->sc_link_state = LINK_STATE_DOWN;
1687 break;
1688 /* Ignore these */
1689 case RNDIS_STATUS_OFFLOAD_CURRENT_CONFIG:
1690 return;
1691 default:
1692 DPRINTF("%s: unhandled status %#x\n", device_xname(sc->sc_dev),
1693 status);
1694 return;
1695 }
1696 if (link_state != sc->sc_link_state)
1697 if_link_state_change(ifp, sc->sc_link_state);
1698 }
1699
1700 static int
1701 hvn_rndis_query(struct hvn_softc *sc, uint32_t oid, void *res, size_t *length)
1702 {
1703 struct rndis_cmd *rc;
1704 struct rndis_query_req *req;
1705 struct rndis_query_comp *cmp;
1706 size_t olength = *length;
1707 int rv;
1708
1709 rc = hvn_alloc_cmd(sc);
1710
1711 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1712 BUS_DMASYNC_PREREAD);
1713
1714 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1715
1716 req = rc->rc_req;
1717 req->rm_type = REMOTE_NDIS_QUERY_MSG;
1718 req->rm_len = sizeof(*req);
1719 req->rm_rid = rc->rc_id;
1720 req->rm_oid = oid;
1721 req->rm_infobufoffset = sizeof(*req) - RNDIS_HEADER_OFFSET;
1722
1723 rc->rc_cmplen = sizeof(*cmp);
1724
1725 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1726 BUS_DMASYNC_PREWRITE);
1727
1728 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1729 DPRINTF("%s: QUERY_MSG failed, error %d\n",
1730 device_xname(sc->sc_dev), rv);
1731 hvn_free_cmd(sc, rc);
1732 return rv;
1733 }
1734
1735 cmp = (struct rndis_query_comp *)&rc->rc_cmp;
1736 switch (cmp->rm_status) {
1737 case RNDIS_STATUS_SUCCESS:
1738 if (cmp->rm_infobuflen > olength) {
1739 rv = EINVAL;
1740 break;
1741 }
1742 memcpy(res, rc->rc_cmpbuf, cmp->rm_infobuflen);
1743 *length = cmp->rm_infobuflen;
1744 break;
1745 default:
1746 *length = 0;
1747 rv = EIO;
1748 break;
1749 }
1750
1751 hvn_free_cmd(sc, rc);
1752 return rv;
1753 }
1754
1755 static int
1756 hvn_rndis_set(struct hvn_softc *sc, uint32_t oid, void *data, size_t length)
1757 {
1758 struct rndis_cmd *rc;
1759 struct rndis_set_req *req;
1760 struct rndis_set_comp *cmp;
1761 int rv;
1762
1763 rc = hvn_alloc_cmd(sc);
1764
1765 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1766 BUS_DMASYNC_PREREAD);
1767
1768 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1769
1770 req = rc->rc_req;
1771 req->rm_type = REMOTE_NDIS_SET_MSG;
1772 req->rm_len = sizeof(*req) + length;
1773 req->rm_rid = rc->rc_id;
1774 req->rm_oid = oid;
1775 req->rm_infobufoffset = sizeof(*req) - RNDIS_HEADER_OFFSET;
1776
1777 rc->rc_cmplen = sizeof(*cmp);
1778
1779 if (length > 0) {
1780 KASSERT(sizeof(*req) + length < PAGE_SIZE);
1781 req->rm_infobuflen = length;
1782 memcpy(req + 1, data, length);
1783 }
1784
1785 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1786 BUS_DMASYNC_PREWRITE);
1787
1788 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1789 DPRINTF("%s: SET_MSG failed, error %d\n",
1790 device_xname(sc->sc_dev), rv);
1791 hvn_free_cmd(sc, rc);
1792 return rv;
1793 }
1794
1795 cmp = (struct rndis_set_comp *)&rc->rc_cmp;
1796 if (cmp->rm_status != RNDIS_STATUS_SUCCESS)
1797 rv = EIO;
1798
1799 hvn_free_cmd(sc, rc);
1800 return rv;
1801 }
1802
1803 static int
1804 hvn_rndis_open(struct hvn_softc *sc)
1805 {
1806 uint32_t filter;
1807 int rv;
1808
1809 if (sc->sc_promisc)
1810 filter = RNDIS_PACKET_TYPE_PROMISCUOUS;
1811 else
1812 filter = RNDIS_PACKET_TYPE_BROADCAST |
1813 RNDIS_PACKET_TYPE_ALL_MULTICAST |
1814 RNDIS_PACKET_TYPE_DIRECTED;
1815
1816 rv = hvn_rndis_set(sc, OID_GEN_CURRENT_PACKET_FILTER,
1817 &filter, sizeof(filter));
1818 if (rv) {
1819 DPRINTF("%s: failed to set RNDIS filter to %#x\n",
1820 device_xname(sc->sc_dev), filter);
1821 }
1822 return rv;
1823 }
1824
1825 static int
1826 hvn_rndis_close(struct hvn_softc *sc)
1827 {
1828 uint32_t filter = 0;
1829 int rv;
1830
1831 rv = hvn_rndis_set(sc, OID_GEN_CURRENT_PACKET_FILTER,
1832 &filter, sizeof(filter));
1833 if (rv) {
1834 DPRINTF("%s: failed to clear RNDIS filter\n",
1835 device_xname(sc->sc_dev));
1836 }
1837 return rv;
1838 }
1839
1840 static void
1841 hvn_rndis_detach(struct hvn_softc *sc)
1842 {
1843 struct rndis_cmd *rc;
1844 struct rndis_halt_req *req;
1845 int rv;
1846
1847 rc = hvn_alloc_cmd(sc);
1848
1849 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1850 BUS_DMASYNC_PREREAD);
1851
1852 rc->rc_id = atomic_inc_uint_nv(&sc->sc_rndisrid);
1853
1854 req = rc->rc_req;
1855 req->rm_type = REMOTE_NDIS_HALT_MSG;
1856 req->rm_len = sizeof(*req);
1857 req->rm_rid = rc->rc_id;
1858
1859 bus_dmamap_sync(sc->sc_dmat, rc->rc_dmap, 0, PAGE_SIZE,
1860 BUS_DMASYNC_PREWRITE);
1861
1862 if ((rv = hvn_rndis_cmd(sc, rc, 500)) != 0) {
1863 DPRINTF("%s: HALT_MSG failed, error %d\n",
1864 device_xname(sc->sc_dev), rv);
1865 }
1866 hvn_free_cmd(sc, rc);
1867
1868 mutex_destroy(&sc->sc_cntl_sqlck);
1869 mutex_destroy(&sc->sc_cntl_cqlck);
1870 mutex_destroy(&sc->sc_cntl_fqlck);
1871 }
1872