vnet.c revision 1.2 1 /* $NetBSD: vnet.c,v 1.2 2021/03/04 20:59:39 palle Exp $ */
2 /* $OpenBSD: vnet.c,v 1.62 2020/07/10 13:26:36 patrick Exp $ */
3 /*
4 * Copyright (c) 2009, 2015 Mark Kettenis
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 #if 0
20 FIXME openbsd
21 #include "bpfilter.h"
22 #endif
23
24 #include <sys/kmem.h>
25 #include <sys/param.h>
26 #include <sys/atomic.h>
27 #include <sys/device.h>
28 #include <sys/malloc.h>
29 #include <sys/pool.h>
30 #include <sys/mbuf.h>
31 #include <sys/socket.h>
32 #include <sys/sockio.h>
33 #include <sys/systm.h>
34 #if 0
35 FIXME openbsd
36 #include <sys/timeout.h>
37 #endif
38
39 #include <machine/autoconf.h>
40 #include <machine/hypervisor.h>
41 #include <machine/openfirm.h>
42
43 #include <net/if.h>
44 #include <net/if_media.h>
45
46 #include <netinet/in.h>
47 #include <net/if_ether.h>
48
49 #if NBPFILTER > 0
50 #include <net/bpf.h>
51 #endif
52
53 #include <uvm/uvm_extern.h>
54
55 #include <sparc64/dev/cbusvar.h>
56 #include <sparc64/dev/ldcvar.h>
57 #include <sparc64/dev/viovar.h>
58
59 #ifdef VNET_DEBUG
60 #define DPRINTF(x) printf x
61 #else
62 #define DPRINTF(x)
63 #endif
64
65 #define VNET_TX_ENTRIES 32
66 #define VNET_RX_ENTRIES 32
67
68 struct vnet_attr_info {
69 struct vio_msg_tag tag;
70 uint8_t xfer_mode;
71 uint8_t addr_type;
72 uint16_t ack_freq;
73 uint32_t _reserved1;
74 uint64_t addr;
75 uint64_t mtu;
76 uint64_t _reserved2[3];
77 };
78
79 /* Address types. */
80 #define VNET_ADDR_ETHERMAC 0x01
81
82 /* Sub-Type envelopes. */
83 #define VNET_MCAST_INFO 0x0101
84
85 #define VNET_NUM_MCAST 7
86
87 struct vnet_mcast_info {
88 struct vio_msg_tag tag;
89 uint8_t set;
90 uint8_t count;
91 uint8_t mcast_addr[VNET_NUM_MCAST][ETHER_ADDR_LEN];
92 uint32_t _reserved;
93 };
94
95 struct vnet_desc {
96 struct vio_dring_hdr hdr;
97 uint32_t nbytes;
98 uint32_t ncookies;
99 struct ldc_cookie cookie[2];
100 };
101
102 struct vnet_desc_msg {
103 struct vio_msg_tag tag;
104 uint64_t seq_no;
105 uint64_t desc_handle;
106 uint32_t nbytes;
107 uint32_t ncookies;
108 struct ldc_cookie cookie[1];
109 };
110
111 struct vnet_dring {
112 bus_dmamap_t vd_map;
113 bus_dma_segment_t vd_seg;
114 struct vnet_desc *vd_desc;
115 int vd_nentries;
116 };
117
118 struct vnet_dring *vnet_dring_alloc(bus_dma_tag_t, int);
119 void vnet_dring_free(bus_dma_tag_t, struct vnet_dring *);
120
121 /*
122 * For now, we only support vNet 1.0.
123 */
124 #define VNET_MAJOR 1
125 #define VNET_MINOR 0
126
127 /*
128 * The vNet protocol wants the IP header to be 64-bit aligned, so
129 * define out own variant of ETHER_ALIGN.
130 */
131 #define VNET_ETHER_ALIGN 6
132
133 struct vnet_soft_desc {
134 int vsd_map_idx;
135 #if 0
136 FIXME openbsd
137 caddr_t vsd_buf;
138 #else
139 unsigned char *vsd_buf;
140 #endif
141 };
142
143 struct vnet_softc {
144 struct device sc_dv;
145 bus_space_tag_t sc_bustag;
146 bus_dma_tag_t sc_dmatag;
147
148 uint64_t sc_tx_ino;
149 uint64_t sc_rx_ino;
150 void *sc_tx_ih;
151 void *sc_rx_ih;
152
153 struct ldc_conn sc_lc;
154
155 uint16_t sc_vio_state;
156 #define VIO_SND_VER_INFO 0x0001
157 #define VIO_ACK_VER_INFO 0x0002
158 #define VIO_RCV_VER_INFO 0x0004
159 #define VIO_SND_ATTR_INFO 0x0008
160 #define VIO_ACK_ATTR_INFO 0x0010
161 #define VIO_RCV_ATTR_INFO 0x0020
162 #define VIO_SND_DRING_REG 0x0040
163 #define VIO_ACK_DRING_REG 0x0080
164 #define VIO_RCV_DRING_REG 0x0100
165 #define VIO_SND_RDX 0x0200
166 #define VIO_ACK_RDX 0x0400
167 #define VIO_RCV_RDX 0x0800
168
169 #if 0
170 FIXME openbsd
171 struct timeout sc_handshake_to;
172 #endif
173
174 uint8_t sc_xfer_mode;
175
176 uint32_t sc_local_sid;
177 uint64_t sc_dring_ident;
178 uint64_t sc_seq_no;
179
180 u_int sc_tx_prod;
181 u_int sc_tx_cons;
182
183 u_int sc_peer_state;
184
185 struct ldc_map *sc_lm;
186 struct vnet_dring *sc_vd;
187 struct vnet_soft_desc *sc_vsd;
188 #define VNET_NUM_SOFT_DESC 128
189
190 size_t sc_peer_desc_size;
191 struct ldc_cookie sc_peer_dring_cookie;
192 int sc_peer_dring_nentries;
193
194 struct pool sc_pool;
195
196 struct ethercom sc_ethercom;
197 struct ifmedia sc_media;
198 u_int8_t sc_macaddr[ETHER_ADDR_LEN];
199 };
200
201 int vnet_match (device_t, cfdata_t, void *);
202 void vnet_attach (device_t, device_t, void *);
203
204 CFATTACH_DECL_NEW(vnet, sizeof(struct vnet_softc),
205 vnet_match, vnet_attach, NULL, NULL);
206
207 int vnet_tx_intr(void *);
208 int vnet_rx_intr(void *);
209 void vnet_handshake(void *);
210
211 void vio_rx_data(struct ldc_conn *, struct ldc_pkt *);
212 void vnet_rx_vio_ctrl(struct vnet_softc *, struct vio_msg *);
213 void vnet_rx_vio_ver_info(struct vnet_softc *, struct vio_msg_tag *);
214 void vnet_rx_vio_attr_info(struct vnet_softc *, struct vio_msg_tag *);
215 void vnet_rx_vio_dring_reg(struct vnet_softc *, struct vio_msg_tag *);
216 void vnet_rx_vio_rdx(struct vnet_softc *sc, struct vio_msg_tag *);
217 void vnet_rx_vio_data(struct vnet_softc *sc, struct vio_msg *);
218 void vnet_rx_vio_desc_data(struct vnet_softc *sc, struct vio_msg_tag *);
219 void vnet_rx_vio_dring_data(struct vnet_softc *sc, struct vio_msg_tag *);
220
221 void vnet_ldc_reset(struct ldc_conn *);
222 void vnet_ldc_start(struct ldc_conn *);
223
224 void vnet_sendmsg(struct vnet_softc *, void *, size_t);
225 void vnet_send_ver_info(struct vnet_softc *, uint16_t, uint16_t);
226 void vnet_send_attr_info(struct vnet_softc *);
227 void vnet_send_dring_reg(struct vnet_softc *);
228 void vio_send_rdx(struct vnet_softc *);
229 void vnet_send_dring_data(struct vnet_softc *, uint32_t);
230
231 void vnet_start(struct ifnet *);
232 void vnet_start_desc(struct ifnet *);
233 int vnet_ioctl (struct ifnet *, u_long, void *);
234 void vnet_watchdog(struct ifnet *);
235
236 int vnet_media_change(struct ifnet *);
237 void vnet_media_status(struct ifnet *, struct ifmediareq *);
238
239 void vnet_link_state(struct vnet_softc *sc);
240
241 void vnet_setmulti(struct vnet_softc *, int);
242
243 void vnet_init(struct ifnet *);
244 void vnet_stop(struct ifnet *);
245
246 int vnet_match(device_t parent, cfdata_t match, void *aux)
247 {
248
249 struct cbus_attach_args *ca = aux;
250 if (strcmp(ca->ca_name, "network") == 0)
251 return (1);
252
253 return (0);
254 }
255
256 void
257 vnet_attach(struct device *parent, struct device *self, void *aux)
258 {
259 struct vnet_softc *sc = device_private(self);
260 struct cbus_attach_args *ca = aux;
261 struct ldc_conn *lc;
262 struct ifnet *ifp;
263
264 sc->sc_bustag = ca->ca_bustag;
265 sc->sc_dmatag = ca->ca_dmatag;
266 sc->sc_tx_ino = ca->ca_tx_ino;
267 sc->sc_rx_ino = ca->ca_rx_ino;
268
269 printf(": ivec 0x%" PRIx64 ", 0x%" PRIx64, sc->sc_tx_ino, sc->sc_rx_ino);
270
271 /*
272 * Un-configure queues before registering interrupt handlers,
273 * such that we dont get any stale LDC packets or events.
274 */
275 hv_ldc_tx_qconf(ca->ca_id, 0, 0);
276 hv_ldc_rx_qconf(ca->ca_id, 0, 0);
277
278 #if 0
279 FIXME openbsd
280 sc->sc_tx_ih = bus_intr_establish(ca->ca_bustag, sc->sc_tx_ino,
281 IPL_NET, BUS_INTR_ESTABLISH_MPSAFE, vnet_tx_intr,
282 sc, sc->sc_dv.dv_xname);
283 sc->sc_rx_ih = bus_intr_establish(ca->ca_bustag, sc->sc_rx_ino,
284 IPL_NET, BUS_INTR_ESTABLISH_MPSAFE, vnet_rx_intr,
285 sc, sc->sc_dv.dv_xname);
286 if (sc->sc_tx_ih == NULL || sc->sc_rx_ih == NULL) {
287 printf(", can't establish interrupt\n");
288 return;
289 }
290 #else
291 sc->sc_tx_ih = bus_intr_establish(ca->ca_bustag, sc->sc_tx_ino,
292 IPL_NET, vnet_tx_intr, sc);
293 sc->sc_rx_ih = bus_intr_establish(ca->ca_bustag, sc->sc_rx_ino,
294 IPL_NET, vnet_rx_intr, sc);
295 if (sc->sc_tx_ih == NULL || sc->sc_rx_ih == NULL) {
296 printf(", can't establish interrupts\n");
297 return;
298 }
299 #endif
300
301 lc = &sc->sc_lc;
302 lc->lc_id = ca->ca_id;
303 lc->lc_sc = sc;
304 lc->lc_reset = vnet_ldc_reset;
305 lc->lc_start = vnet_ldc_start;
306 lc->lc_rx_data = vio_rx_data;
307
308 #if 0
309 FIXME openbsd
310 timeout_set(&sc->sc_handshake_to, vnet_handshake, sc);
311 #endif
312 sc->sc_peer_state = VIO_DP_STOPPED;
313
314 #if OPENBSD_BUSDMA
315 lc->lc_txq = ldc_queue_alloc(sc->sc_dmatag, VNET_TX_ENTRIES);
316 if (lc->lc_txq == NULL) {
317 printf(", can't allocate tx queue\n");
318 return;
319 }
320
321 lc->lc_rxq = ldc_queue_alloc(sc->sc_dmatag, VNET_RX_ENTRIES);
322 if (lc->lc_rxq == NULL) {
323 printf(", can't allocate rx queue\n");
324 goto free_txqueue;
325 }
326 #else
327 lc->lc_txq = ldc_queue_alloc(VNET_TX_ENTRIES);
328 if (lc->lc_txq == NULL) {
329 printf(", can't allocate tx queue\n");
330 return;
331 }
332
333 lc->lc_rxq = ldc_queue_alloc(VNET_RX_ENTRIES);
334 if (lc->lc_rxq == NULL) {
335 printf(", can't allocate rx queue\n");
336 goto free_txqueue;
337 }
338 #endif
339
340 if (OF_getprop(ca->ca_node, "local-mac-address",
341 sc->sc_macaddr, ETHER_ADDR_LEN) > 0) {
342 printf(", address %s", ether_sprintf(sc->sc_macaddr));
343 } else {
344 printf(", cannot retrieve local mac address\n");
345 return;
346 }
347
348 /*
349 * Each interface gets its own pool.
350 */
351 #if 0
352 FIXME openbsd
353 pool_init(&sc->sc_pool, 2048, 0, IPL_NET, 0, sc->sc_dv.dv_xname, NULL);
354 #else
355 pool_init(&sc->sc_pool, 2048, 0, 0, 0, sc->sc_dv.dv_xname, NULL, IPL_NET);
356 #endif
357
358 ifp = &sc->sc_ethercom.ec_if;
359 ifp->if_softc = sc;
360 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
361 ifp->if_link_state = LINK_STATE_DOWN;
362 ifp->if_ioctl = vnet_ioctl;
363 ifp->if_start = vnet_start;
364 ifp->if_watchdog = vnet_watchdog;
365 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
366 #if 0
367 FIXME openbsd
368 ifq_set_maxlen(&ifp->if_snd, 31); /* XXX */
369 #else
370 IFQ_SET_MAXLEN(&ifp->if_snd, 31); /* XXX */
371 #endif
372
373 ifmedia_init(&sc->sc_media, 0, vnet_media_change, vnet_media_status);
374 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
375 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
376
377 int error = if_initialize(ifp);
378 if (error != 0) {
379 printf(", if_initialize() failed\n");
380 return;
381 }
382 ether_ifattach(ifp, sc->sc_macaddr);
383 if_register(ifp);
384
385 printf("\n");
386 return;
387 free_txqueue:
388 #if OPENBSD_BUSDMA
389 ldc_queue_free(sc->sc_dmatag, lc->lc_txq);
390 #else
391 ldc_queue_free(lc->lc_txq);
392 #endif
393 }
394
395 int
396 vnet_tx_intr(void *arg)
397 {
398 DPRINTF(("%s: entry\n", __func__));
399
400 struct vnet_softc *sc = arg;
401 struct ldc_conn *lc = &sc->sc_lc;
402 uint64_t tx_head, tx_tail, tx_state;
403
404 hv_ldc_tx_get_state(lc->lc_id, &tx_head, &tx_tail, &tx_state);
405 if (tx_state != lc->lc_tx_state) {
406 switch (tx_state) {
407 case LDC_CHANNEL_DOWN:
408 DPRINTF(("%s: Tx link down\n", __func__));
409 break;
410 case LDC_CHANNEL_UP:
411 DPRINTF(("%s: Tx link up\n", __func__));
412 break;
413 case LDC_CHANNEL_RESET:
414 DPRINTF(("%s: Tx link reset\n", __func__));
415 break;
416 }
417 lc->lc_tx_state = tx_state;
418 }
419
420 DPRINTF(("%s: exit\n", __func__));
421 return (1);
422 }
423
424 int
425 vnet_rx_intr(void *arg)
426 {
427 DPRINTF(("%s: entry\n", __func__));
428
429 struct vnet_softc *sc = arg;
430 struct ldc_conn *lc = &sc->sc_lc;
431 uint64_t rx_head, rx_tail, rx_state;
432 struct ldc_pkt *lp;
433 int err;
434
435 err = hv_ldc_rx_get_state(lc->lc_id, &rx_head, &rx_tail, &rx_state);
436 if (err == H_EINVAL)
437 return (0);
438 if (err != H_EOK) {
439 printf("hv_ldc_rx_get_state %d\n", err);
440 return (0);
441 }
442
443 if (rx_state != lc->lc_rx_state) {
444 switch (rx_state) {
445 case LDC_CHANNEL_DOWN:
446 DPRINTF(("%s: Rx link down\n", __func__));
447 lc->lc_tx_seqid = 0;
448 lc->lc_state = 0;
449 lc->lc_reset(lc);
450 if (rx_head == rx_tail)
451 break;
452 /* Discard and ack pending I/O. */
453 DPRINTF(("setting rx qhead to %lld\n", rx_tail));
454 err = hv_ldc_rx_set_qhead(lc->lc_id, rx_tail);
455 if (err == H_EOK)
456 break;
457 printf("%s: hv_ldc_rx_set_qhead %d\n", __func__, err);
458 break;
459 case LDC_CHANNEL_UP:
460 DPRINTF(("%s: Rx link up\n", __func__));
461 #if 0
462 FIXME openbsd
463 timeout_add_msec(&sc->sc_handshake_to, 500);
464 #endif
465 break;
466 case LDC_CHANNEL_RESET:
467 DPRINTF(("%s: Rx link reset\n", __func__));
468 lc->lc_tx_seqid = 0;
469 lc->lc_state = 0;
470 lc->lc_reset(lc);
471 #if 0
472 FIXME openbsd
473 timeout_add_msec(&sc->sc_handshake_to, 500);
474 #endif
475 if (rx_head == rx_tail)
476 break;
477 /* Discard and ack pending I/O. */
478 DPRINTF(("setting rx qhead to %lld\n", rx_tail));
479 err = hv_ldc_rx_set_qhead(lc->lc_id, rx_tail);
480 if (err == H_EOK)
481 break;
482 printf("%s: hv_ldc_rx_set_qhead %d\n", __func__, err);
483 break;
484 }
485 lc->lc_rx_state = rx_state;
486 return (1);
487 }
488
489 if (rx_head == rx_tail)
490 return (0);
491
492 lp = (struct ldc_pkt *)(uintptr_t)(lc->lc_rxq->lq_va + rx_head);
493 switch (lp->type) {
494 case LDC_CTRL:
495 ldc_rx_ctrl(lc, lp);
496 break;
497
498 case LDC_DATA:
499 ldc_rx_data(lc, lp);
500 break;
501
502 default:
503 DPRINTF(("%0x02/%0x02/%0x02\n", lp->type, lp->stype,
504 lp->ctrl));
505 ldc_reset(lc);
506 break;
507 }
508
509 if (lc->lc_state == 0)
510 return (1);
511
512 rx_head += sizeof(*lp);
513 rx_head &= ((lc->lc_rxq->lq_nentries * sizeof(*lp)) - 1);
514 err = hv_ldc_rx_set_qhead(lc->lc_id, rx_head);
515 if (err != H_EOK)
516 printf("%s: hv_ldc_rx_set_qhead %d\n", __func__, err);
517
518 DPRINTF(("%s: exit\n", __func__));
519 return (1);
520 }
521
522 void
523 vnet_handshake(void *arg)
524 {
525 struct vnet_softc *sc = arg;
526
527 ldc_send_vers(&sc->sc_lc);
528 }
529
530 void
531 vio_rx_data(struct ldc_conn *lc, struct ldc_pkt *lp)
532 {
533 struct vio_msg *vm = (struct vio_msg *)lp;
534
535 switch (vm->type) {
536 case VIO_TYPE_CTRL:
537 if ((lp->env & LDC_FRAG_START) == 0 &&
538 (lp->env & LDC_FRAG_STOP) == 0)
539 return;
540 vnet_rx_vio_ctrl(lc->lc_sc, vm);
541 break;
542
543 case VIO_TYPE_DATA:
544 if((lp->env & LDC_FRAG_START) == 0)
545 return;
546 vnet_rx_vio_data(lc->lc_sc, vm);
547 break;
548
549 default:
550 DPRINTF(("Unhandled packet type 0x%02x\n", vm->type));
551 ldc_reset(lc);
552 break;
553 }
554 }
555
556 void
557 vnet_rx_vio_ctrl(struct vnet_softc *sc, struct vio_msg *vm)
558 {
559 struct vio_msg_tag *tag = (struct vio_msg_tag *)&vm->type;
560
561 switch (tag->stype_env) {
562 case VIO_VER_INFO:
563 vnet_rx_vio_ver_info(sc, tag);
564 break;
565 case VIO_ATTR_INFO:
566 vnet_rx_vio_attr_info(sc, tag);
567 break;
568 case VIO_DRING_REG:
569 vnet_rx_vio_dring_reg(sc, tag);
570 break;
571 case VIO_RDX:
572 vnet_rx_vio_rdx(sc, tag);
573 break;
574 default:
575 DPRINTF(("CTRL/0x%02x/0x%04x\n", tag->stype, tag->stype_env));
576 break;
577 }
578 }
579
580 void
581 vnet_rx_vio_ver_info(struct vnet_softc *sc, struct vio_msg_tag *tag)
582 {
583 struct vio_ver_info *vi = (struct vio_ver_info *)tag;
584
585 switch (vi->tag.stype) {
586 case VIO_SUBTYPE_INFO:
587 DPRINTF(("CTRL/INFO/VER_INFO\n"));
588
589 /* Make sure we're talking to a virtual network device. */
590 if (vi->dev_class != VDEV_NETWORK &&
591 vi->dev_class != VDEV_NETWORK_SWITCH) {
592 /* Huh, we're not talking to a network device? */
593 printf("Not a network device\n");
594 vi->tag.stype = VIO_SUBTYPE_NACK;
595 vnet_sendmsg(sc, vi, sizeof(*vi));
596 return;
597 }
598
599 if (vi->major != VNET_MAJOR) {
600 vi->tag.stype = VIO_SUBTYPE_NACK;
601 vi->major = VNET_MAJOR;
602 vi->minor = VNET_MINOR;
603 vnet_sendmsg(sc, vi, sizeof(*vi));
604 return;
605 }
606
607 vi->tag.stype = VIO_SUBTYPE_ACK;
608 vi->tag.sid = sc->sc_local_sid;
609 vi->minor = VNET_MINOR;
610 vnet_sendmsg(sc, vi, sizeof(*vi));
611 sc->sc_vio_state |= VIO_RCV_VER_INFO;
612 break;
613
614 case VIO_SUBTYPE_ACK:
615 DPRINTF(("CTRL/ACK/VER_INFO\n"));
616 if (!ISSET(sc->sc_vio_state, VIO_SND_VER_INFO)) {
617 ldc_reset(&sc->sc_lc);
618 break;
619 }
620 sc->sc_vio_state |= VIO_ACK_VER_INFO;
621 break;
622
623 default:
624 DPRINTF(("CTRL/0x%02x/VER_INFO\n", vi->tag.stype));
625 break;
626 }
627
628 if (ISSET(sc->sc_vio_state, VIO_RCV_VER_INFO) &&
629 ISSET(sc->sc_vio_state, VIO_ACK_VER_INFO))
630 vnet_send_attr_info(sc);
631 }
632
633 void
634 vnet_rx_vio_attr_info(struct vnet_softc *sc, struct vio_msg_tag *tag)
635 {
636 struct vnet_attr_info *ai = (struct vnet_attr_info *)tag;
637
638 switch (ai->tag.stype) {
639 case VIO_SUBTYPE_INFO:
640 DPRINTF(("CTRL/INFO/ATTR_INFO\n"));
641 sc->sc_xfer_mode = ai->xfer_mode;
642
643 ai->tag.stype = VIO_SUBTYPE_ACK;
644 ai->tag.sid = sc->sc_local_sid;
645 vnet_sendmsg(sc, ai, sizeof(*ai));
646 sc->sc_vio_state |= VIO_RCV_ATTR_INFO;
647 break;
648
649 case VIO_SUBTYPE_ACK:
650 DPRINTF(("CTRL/ACK/ATTR_INFO\n"));
651 if (!ISSET(sc->sc_vio_state, VIO_SND_ATTR_INFO)) {
652 ldc_reset(&sc->sc_lc);
653 break;
654 }
655 sc->sc_vio_state |= VIO_ACK_ATTR_INFO;
656 break;
657
658 default:
659 DPRINTF(("CTRL/0x%02x/ATTR_INFO\n", ai->tag.stype));
660 break;
661 }
662
663 if (ISSET(sc->sc_vio_state, VIO_RCV_ATTR_INFO) &&
664 ISSET(sc->sc_vio_state, VIO_ACK_ATTR_INFO)) {
665 if (sc->sc_xfer_mode == VIO_DRING_MODE)
666 vnet_send_dring_reg(sc);
667 else
668 vio_send_rdx(sc);
669 }
670 }
671
672 void
673 vnet_rx_vio_dring_reg(struct vnet_softc *sc, struct vio_msg_tag *tag)
674 {
675 struct vio_dring_reg *dr = (struct vio_dring_reg *)tag;
676
677 switch (dr->tag.stype) {
678 case VIO_SUBTYPE_INFO:
679 DPRINTF(("CTRL/INFO/DRING_REG\n"));
680
681 sc->sc_peer_dring_nentries = dr->num_descriptors;
682 sc->sc_peer_desc_size = dr->descriptor_size;
683 sc->sc_peer_dring_cookie = dr->cookie[0];
684
685 dr->tag.stype = VIO_SUBTYPE_ACK;
686 dr->tag.sid = sc->sc_local_sid;
687 vnet_sendmsg(sc, dr, sizeof(*dr));
688 sc->sc_vio_state |= VIO_RCV_DRING_REG;
689 break;
690
691 case VIO_SUBTYPE_ACK:
692 DPRINTF(("CTRL/ACK/DRING_REG\n"));
693 if (!ISSET(sc->sc_vio_state, VIO_SND_DRING_REG)) {
694 ldc_reset(&sc->sc_lc);
695 break;
696 }
697
698 sc->sc_dring_ident = dr->dring_ident;
699 sc->sc_seq_no = 1;
700
701 sc->sc_vio_state |= VIO_ACK_DRING_REG;
702 break;
703
704 default:
705 DPRINTF(("CTRL/0x%02x/DRING_REG\n", dr->tag.stype));
706 break;
707 }
708
709 if (ISSET(sc->sc_vio_state, VIO_RCV_DRING_REG) &&
710 ISSET(sc->sc_vio_state, VIO_ACK_DRING_REG))
711 vio_send_rdx(sc);
712 }
713
714 void
715 vnet_rx_vio_rdx(struct vnet_softc *sc, struct vio_msg_tag *tag)
716 {
717 #if 0
718 FIXME openbsd
719 struct ifnet *ifp = &sc->sc_ac.ac_if;
720 #else
721 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
722 #endif
723
724 switch(tag->stype) {
725 case VIO_SUBTYPE_INFO:
726 DPRINTF(("CTRL/INFO/RDX\n"));
727
728 tag->stype = VIO_SUBTYPE_ACK;
729 tag->sid = sc->sc_local_sid;
730 vnet_sendmsg(sc, tag, sizeof(*tag));
731 sc->sc_vio_state |= VIO_RCV_RDX;
732 break;
733
734 case VIO_SUBTYPE_ACK:
735 DPRINTF(("CTRL/ACK/RDX\n"));
736 if (!ISSET(sc->sc_vio_state, VIO_SND_RDX)) {
737 ldc_reset(&sc->sc_lc);
738 break;
739 }
740 sc->sc_vio_state |= VIO_ACK_RDX;
741 break;
742
743 default:
744 DPRINTF(("CTRL/0x%02x/RDX (VIO)\n", tag->stype));
745 break;
746 }
747
748 if (ISSET(sc->sc_vio_state, VIO_RCV_RDX) &&
749 ISSET(sc->sc_vio_state, VIO_ACK_RDX)) {
750 /* Link is up! */
751 vnet_link_state(sc);
752
753 /* Configure multicast now that we can. */
754 vnet_setmulti(sc, 1);
755
756 #if 0
757 FIXME openbsd
758 KERNEL_LOCK();
759 #else
760 KERNEL_LOCK(1, curlwp);
761 #endif
762 #if 0
763 FIXME openbsd
764 ifq_clr_oactive(&ifp->if_snd);
765 #else
766 ifp->if_flags &= ~IFF_OACTIVE;
767 #endif
768 vnet_start(ifp);
769 #if 0
770 FIXME openbsd
771 KERNEL_UNLOCK();
772 #else
773 KERNEL_UNLOCK_ONE(curlwp);
774 #endif
775 }
776 }
777
778 void
779 vnet_rx_vio_data(struct vnet_softc *sc, struct vio_msg *vm)
780 {
781 struct vio_msg_tag *tag = (struct vio_msg_tag *)&vm->type;
782
783 if (!ISSET(sc->sc_vio_state, VIO_RCV_RDX) ||
784 !ISSET(sc->sc_vio_state, VIO_ACK_RDX)) {
785 DPRINTF(("Spurious DATA/0x%02x/0x%04x\n", tag->stype,
786 tag->stype_env));
787 return;
788 }
789
790 switch(tag->stype_env) {
791 case VIO_DESC_DATA:
792 vnet_rx_vio_desc_data(sc, tag);
793 break;
794
795 case VIO_DRING_DATA:
796 vnet_rx_vio_dring_data(sc, tag);
797 break;
798
799 default:
800 DPRINTF(("DATA/0x%02x/0x%04x\n", tag->stype, tag->stype_env));
801 break;
802 }
803 }
804
805 void
806 vnet_rx_vio_desc_data(struct vnet_softc *sc, struct vio_msg_tag *tag)
807 {
808 struct vnet_desc_msg *dm = (struct vnet_desc_msg *)tag;
809 struct ldc_conn *lc = &sc->sc_lc;
810 struct ldc_map *map = sc->sc_lm;
811 #if 0
812 FIXME openbsd
813 struct ifnet *ifp = &sc->sc_ac.ac_if;
814 #else
815 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
816 #endif
817 #if 0
818 FIXME openbsd
819 struct mbuf_list ml = MBUF_LIST_INITIALIZER();
820 #endif
821 struct mbuf *m;
822 unsigned char *buf;
823 paddr_t pa;
824 psize_t nbytes;
825 u_int cons;
826 int err;
827
828 switch(tag->stype) {
829 case VIO_SUBTYPE_INFO:
830 buf = pool_get(&sc->sc_pool, PR_NOWAIT|PR_ZERO);
831 if (buf == NULL) {
832 #if 0
833 FIXME openbsd
834 ifp->if_ierrors++;
835 #else
836 if_statinc(ifp, if_ierrors);
837 #endif
838 goto skip;
839 }
840 nbytes = roundup(dm->nbytes, 8);
841
842 if (dm->nbytes > (ETHER_MAX_LEN - ETHER_CRC_LEN)) {
843 #if 0
844 FIXME openbsd
845 ifp->if_ierrors++;
846 #else
847 if_statinc(ifp, if_ierrors);
848 #endif
849 goto skip;
850 }
851
852 pmap_extract(pmap_kernel(), (vaddr_t)buf, &pa);
853 err = hv_ldc_copy(lc->lc_id, LDC_COPY_IN,
854 dm->cookie[0].addr, pa, nbytes, &nbytes);
855 if (err != H_EOK) {
856 pool_put(&sc->sc_pool, buf);
857 #if 0
858 FIXME openbsd
859 ifp->if_ierrors++;
860 #else
861 if_statinc(ifp, if_ierrors);
862 #endif
863 goto skip;
864 }
865
866 /* Stupid OBP doesn't align properly. */
867 #if 0
868 FIXME openbsd
869 m = m_devget(buf, dm->nbytes, ETHER_ALIGN);
870 #else
871 m = m_devget(buf, dm->nbytes, 0, ifp);
872 #endif
873 pool_put(&sc->sc_pool, buf);
874 if (m == NULL) {
875 #if 0
876 FIXME openbsd
877 ifp->if_ierrors++;
878 #else
879 if_statinc(ifp, if_ierrors);
880 #endif
881 goto skip;
882 }
883
884 /* Pass it on. */
885 #if 0
886 FIXME openbsd
887 ml_enqueue(&ml, m);
888 if_input(ifp, &ml);
889 #else
890 if_percpuq_enqueue(ifp->if_percpuq, m);
891 #endif
892 skip:
893 dm->tag.stype = VIO_SUBTYPE_ACK;
894 dm->tag.sid = sc->sc_local_sid;
895 vnet_sendmsg(sc, dm, sizeof(*dm));
896 break;
897
898 case VIO_SUBTYPE_ACK:
899 DPRINTF(("DATA/ACK/DESC_DATA\n"));
900
901 if (dm->desc_handle != sc->sc_tx_cons) {
902 printf("out of order\n");
903 return;
904 }
905
906 cons = sc->sc_tx_cons & (sc->sc_vd->vd_nentries - 1);
907
908 map->lm_slot[sc->sc_vsd[cons].vsd_map_idx].entry = 0;
909 #if 0
910 FIXME openbsd
911 atomic_dec_int(&map->lm_count);
912 #else
913 map->lm_count--;
914 #endif
915
916 pool_put(&sc->sc_pool, sc->sc_vsd[cons].vsd_buf);
917 sc->sc_vsd[cons].vsd_buf = NULL;
918
919 sc->sc_tx_cons++;
920 break;
921
922 case VIO_SUBTYPE_NACK:
923 DPRINTF(("DATA/NACK/DESC_DATA\n"));
924 break;
925
926 default:
927 DPRINTF(("DATA/0x%02x/DESC_DATA\n", tag->stype));
928 break;
929 }
930 }
931
932 void
933 vnet_rx_vio_dring_data(struct vnet_softc *sc, struct vio_msg_tag *tag)
934 {
935 struct vio_dring_msg *dm = (struct vio_dring_msg *)tag;
936 struct ldc_conn *lc = &sc->sc_lc;
937 #if 0
938 FIXME openbsd
939 struct ifnet *ifp = &sc->sc_ac.ac_if;
940 #else
941 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
942 #endif
943 struct mbuf *m = NULL;
944 paddr_t pa;
945 psize_t nbytes;
946 int err;
947
948 switch(tag->stype) {
949 case VIO_SUBTYPE_INFO:
950 {
951 struct vnet_desc desc;
952 uint64_t cookie;
953 paddr_t desc_pa;
954 int idx, ack_end_idx = -1;
955 #if 0
956 FIXME openbsd
957 struct mbuf_list ml = MBUF_LIST_INITIALIZER();
958 #endif
959
960 idx = dm->start_idx;
961 for (;;) {
962 cookie = sc->sc_peer_dring_cookie.addr;
963 cookie += idx * sc->sc_peer_desc_size;
964 nbytes = sc->sc_peer_desc_size;
965 pmap_extract(pmap_kernel(), (vaddr_t)&desc, &desc_pa);
966 err = hv_ldc_copy(lc->lc_id, LDC_COPY_IN, cookie,
967 desc_pa, nbytes, &nbytes);
968 if (err != H_EOK) {
969 printf("hv_ldc_copy_in %d\n", err);
970 break;
971 }
972
973 if (desc.hdr.dstate != VIO_DESC_READY)
974 break;
975
976 if (desc.nbytes > (ETHER_MAX_LEN - ETHER_CRC_LEN)) {
977 #if 0
978 FIXME openbsd
979 ifp->if_ierrors++;
980 #else
981 if_statinc(ifp, if_ierrors);
982 #endif
983 goto skip;
984 }
985 #if 0
986 FIXME openbsd
987 m = MCLGETI(NULL, M_DONTWAIT, NULL, desc.nbytes);
988 if (!m)
989 break;
990 #else
991 MCLGET(NULL, M_DONTWAIT);
992 if ((m->m_flags & M_EXT) == 0)
993 break;
994 #endif
995 m->m_len = m->m_pkthdr.len = desc.nbytes;
996 nbytes = roundup(desc.nbytes + VNET_ETHER_ALIGN, 8);
997
998 pmap_extract(pmap_kernel(), (vaddr_t)m->m_data, &pa);
999 err = hv_ldc_copy(lc->lc_id, LDC_COPY_IN,
1000 desc.cookie[0].addr, pa, nbytes, &nbytes);
1001 if (err != H_EOK) {
1002 m_freem(m);
1003 goto skip;
1004 }
1005 m->m_data += VNET_ETHER_ALIGN;
1006
1007 #if 0
1008 FIXME openbsd
1009 ml_enqueue(&ml, m);
1010 #else
1011 if_percpuq_enqueue(ifp->if_percpuq, m);
1012 #endif
1013
1014 skip:
1015 desc.hdr.dstate = VIO_DESC_DONE;
1016 nbytes = sc->sc_peer_desc_size;
1017 err = hv_ldc_copy(lc->lc_id, LDC_COPY_OUT, cookie,
1018 desc_pa, nbytes, &nbytes);
1019 if (err != H_EOK)
1020 printf("hv_ldc_copy_out %d\n", err);
1021
1022 ack_end_idx = idx;
1023 if (++idx == sc->sc_peer_dring_nentries)
1024 idx = 0;
1025 }
1026 #if 0
1027 FIXME openbd
1028 if_input(ifp, &ml);
1029 #else
1030 printf("vnet_rx_vio_dring_data() ignoring if_input - FIXME\n");
1031 #endif
1032
1033 if (ack_end_idx == -1) {
1034 dm->tag.stype = VIO_SUBTYPE_NACK;
1035 } else {
1036 dm->tag.stype = VIO_SUBTYPE_ACK;
1037 dm->end_idx = ack_end_idx;
1038 }
1039 dm->tag.sid = sc->sc_local_sid;
1040 dm->proc_state = VIO_DP_STOPPED;
1041 vnet_sendmsg(sc, dm, sizeof(*dm));
1042 break;
1043 }
1044
1045 case VIO_SUBTYPE_ACK:
1046 {
1047 struct ldc_map *map = sc->sc_lm;
1048 u_int cons, count;
1049
1050 sc->sc_peer_state = dm->proc_state;
1051
1052 cons = sc->sc_tx_cons & (sc->sc_vd->vd_nentries - 1);
1053 while (sc->sc_vd->vd_desc[cons].hdr.dstate == VIO_DESC_DONE) {
1054 map->lm_slot[sc->sc_vsd[cons].vsd_map_idx].entry = 0;
1055 #if 0
1056 FIXME openbsd
1057 atomic_dec_int(&map->lm_count);
1058 #else
1059 map->lm_count--;
1060 #endif
1061
1062 pool_put(&sc->sc_pool, sc->sc_vsd[cons].vsd_buf);
1063 sc->sc_vsd[cons].vsd_buf = NULL;
1064
1065 sc->sc_vd->vd_desc[cons].hdr.dstate = VIO_DESC_FREE;
1066 sc->sc_tx_cons++;
1067 cons = sc->sc_tx_cons & (sc->sc_vd->vd_nentries - 1);
1068 }
1069
1070 count = sc->sc_tx_prod - sc->sc_tx_cons;
1071 if (count > 0 && sc->sc_peer_state != VIO_DP_ACTIVE)
1072 vnet_send_dring_data(sc, cons);
1073
1074 #if 0
1075 FIXME openbsd
1076 KERNEL_LOCK();
1077 #else
1078 KERNEL_LOCK(1, curlwp);
1079 #endif
1080 if (count < (sc->sc_vd->vd_nentries - 1))
1081 #if 0
1082 FIXME openbsd
1083 ifq_clr_oactive(&ifp->if_snd);
1084 #else
1085 ifp->if_flags &= ~IFF_OACTIVE;
1086 #endif
1087 if (count == 0)
1088 ifp->if_timer = 0;
1089
1090 vnet_start(ifp);
1091 #if 0
1092 FIXME openbsd
1093 KERNEL_UNLOCK();
1094 #else
1095 KERNEL_UNLOCK_ONE(curlwp);
1096 #endif
1097 break;
1098 }
1099
1100 case VIO_SUBTYPE_NACK:
1101 DPRINTF(("DATA/NACK/DRING_DATA\n"));
1102 sc->sc_peer_state = VIO_DP_STOPPED;
1103 break;
1104
1105 default:
1106 DPRINTF(("DATA/0x%02x/DRING_DATA\n", tag->stype));
1107 break;
1108 }
1109 }
1110
1111 void
1112 vnet_ldc_reset(struct ldc_conn *lc)
1113 {
1114 struct vnet_softc *sc = lc->lc_sc;
1115 int i;
1116 #if 0
1117 FIXME openbsd
1118 timeout_del(&sc->sc_handshake_to);
1119 #endif
1120 sc->sc_tx_prod = sc->sc_tx_cons = 0;
1121 sc->sc_peer_state = VIO_DP_STOPPED;
1122 sc->sc_vio_state = 0;
1123 vnet_link_state(sc);
1124
1125 sc->sc_lm->lm_next = 1;
1126 sc->sc_lm->lm_count = 1;
1127 for (i = 1; i < sc->sc_lm->lm_nentries; i++)
1128 sc->sc_lm->lm_slot[i].entry = 0;
1129
1130 for (i = 0; i < sc->sc_vd->vd_nentries; i++) {
1131 if (sc->sc_vsd[i].vsd_buf) {
1132 pool_put(&sc->sc_pool, sc->sc_vsd[i].vsd_buf);
1133 sc->sc_vsd[i].vsd_buf = NULL;
1134 }
1135 sc->sc_vd->vd_desc[i].hdr.dstate = VIO_DESC_FREE;
1136 }
1137 }
1138
1139 void
1140 vnet_ldc_start(struct ldc_conn *lc)
1141 {
1142 struct vnet_softc *sc = lc->lc_sc;
1143 #if 0
1144 FIXME openbsd
1145 timeout_del(&sc->sc_handshake_to);
1146 #endif
1147 vnet_send_ver_info(sc, VNET_MAJOR, VNET_MINOR);
1148 }
1149
1150 void
1151 vnet_sendmsg(struct vnet_softc *sc, void *msg, size_t len)
1152 {
1153 struct ldc_conn *lc = &sc->sc_lc;
1154 int err;
1155
1156 err = ldc_send_unreliable(lc, msg, len);
1157 if (err)
1158 printf("%s: ldc_send_unreliable: %d\n", __func__, err);
1159 }
1160
1161 void
1162 vnet_send_ver_info(struct vnet_softc *sc, uint16_t major, uint16_t minor)
1163 {
1164 struct vio_ver_info vi;
1165
1166 bzero(&vi, sizeof(vi));
1167 vi.tag.type = VIO_TYPE_CTRL;
1168 vi.tag.stype = VIO_SUBTYPE_INFO;
1169 vi.tag.stype_env = VIO_VER_INFO;
1170 vi.tag.sid = sc->sc_local_sid;
1171 vi.major = major;
1172 vi.minor = minor;
1173 vi.dev_class = VDEV_NETWORK;
1174 vnet_sendmsg(sc, &vi, sizeof(vi));
1175
1176 sc->sc_vio_state |= VIO_SND_VER_INFO;
1177 }
1178
1179 void
1180 vnet_send_attr_info(struct vnet_softc *sc)
1181 {
1182 struct vnet_attr_info ai;
1183 int i;
1184
1185 bzero(&ai, sizeof(ai));
1186 ai.tag.type = VIO_TYPE_CTRL;
1187 ai.tag.stype = VIO_SUBTYPE_INFO;
1188 ai.tag.stype_env = VIO_ATTR_INFO;
1189 ai.tag.sid = sc->sc_local_sid;
1190 ai.xfer_mode = VIO_DRING_MODE;
1191 ai.addr_type = VNET_ADDR_ETHERMAC;
1192 ai.ack_freq = 0;
1193 ai.addr = 0;
1194 for (i = 0; i < ETHER_ADDR_LEN; i++) {
1195 ai.addr <<= 8;
1196 ai.addr |= sc->sc_macaddr[i];
1197 }
1198 ai.mtu = ETHER_MAX_LEN - ETHER_CRC_LEN;
1199 vnet_sendmsg(sc, &ai, sizeof(ai));
1200
1201 sc->sc_vio_state |= VIO_SND_ATTR_INFO;
1202 }
1203
1204 void
1205 vnet_send_dring_reg(struct vnet_softc *sc)
1206 {
1207 struct vio_dring_reg dr;
1208
1209 bzero(&dr, sizeof(dr));
1210 dr.tag.type = VIO_TYPE_CTRL;
1211 dr.tag.stype = VIO_SUBTYPE_INFO;
1212 dr.tag.stype_env = VIO_DRING_REG;
1213 dr.tag.sid = sc->sc_local_sid;
1214 dr.dring_ident = 0;
1215 dr.num_descriptors = sc->sc_vd->vd_nentries;
1216 dr.descriptor_size = sizeof(struct vnet_desc);
1217 dr.options = VIO_TX_RING;
1218 dr.ncookies = 1;
1219 dr.cookie[0].addr = 0;
1220 dr.cookie[0].size = PAGE_SIZE;
1221 vnet_sendmsg(sc, &dr, sizeof(dr));
1222
1223 sc->sc_vio_state |= VIO_SND_DRING_REG;
1224 };
1225
1226 void
1227 vio_send_rdx(struct vnet_softc *sc)
1228 {
1229 struct vio_msg_tag tag;
1230
1231 tag.type = VIO_TYPE_CTRL;
1232 tag.stype = VIO_SUBTYPE_INFO;
1233 tag.stype_env = VIO_RDX;
1234 tag.sid = sc->sc_local_sid;
1235 vnet_sendmsg(sc, &tag, sizeof(tag));
1236
1237 sc->sc_vio_state |= VIO_SND_RDX;
1238 }
1239
1240 void
1241 vnet_send_dring_data(struct vnet_softc *sc, uint32_t start_idx)
1242 {
1243 struct vio_dring_msg dm;
1244 u_int peer_state;
1245
1246 peer_state = atomic_swap_uint(&sc->sc_peer_state, VIO_DP_ACTIVE);
1247 if (peer_state == VIO_DP_ACTIVE)
1248 return;
1249
1250 bzero(&dm, sizeof(dm));
1251 dm.tag.type = VIO_TYPE_DATA;
1252 dm.tag.stype = VIO_SUBTYPE_INFO;
1253 dm.tag.stype_env = VIO_DRING_DATA;
1254 dm.tag.sid = sc->sc_local_sid;
1255 dm.seq_no = sc->sc_seq_no++;
1256 dm.dring_ident = sc->sc_dring_ident;
1257 dm.start_idx = start_idx;
1258 dm.end_idx = -1;
1259 vnet_sendmsg(sc, &dm, sizeof(dm));
1260 }
1261
1262 void
1263 vnet_start(struct ifnet *ifp)
1264 {
1265 struct vnet_softc *sc = ifp->if_softc;
1266 struct ldc_conn *lc = &sc->sc_lc;
1267 struct ldc_map *map = sc->sc_lm;
1268 struct mbuf *m;
1269 paddr_t pa;
1270 unsigned char *buf;
1271 uint64_t tx_head, tx_tail, tx_state;
1272 u_int start, prod, count;
1273 int err;
1274
1275 #if 0
1276 FIXME openbsd
1277 if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
1278 #else
1279 if (!(ifp->if_flags & IFF_RUNNING) || (ifp->if_flags & IFF_OACTIVE))
1280 #endif
1281 return;
1282
1283 #if 0
1284 FIXME openbsd
1285 if (ifq_empty(&ifp->if_snd))
1286 #else
1287 if (IFQ_IS_EMPTY(&ifp->if_snd))
1288 #endif
1289 return;
1290
1291 /*
1292 * We cannot transmit packets until a VIO connection has been
1293 * established.
1294 */
1295 if (!ISSET(sc->sc_vio_state, VIO_RCV_RDX) ||
1296 !ISSET(sc->sc_vio_state, VIO_ACK_RDX))
1297 return;
1298
1299 /*
1300 * Make sure there is room in the LDC transmit queue to send a
1301 * DRING_DATA message.
1302 */
1303 err = hv_ldc_tx_get_state(lc->lc_id, &tx_head, &tx_tail, &tx_state);
1304 if (err != H_EOK)
1305 return;
1306 tx_tail += sizeof(struct ldc_pkt);
1307 tx_tail &= ((lc->lc_txq->lq_nentries * sizeof(struct ldc_pkt)) - 1);
1308 if (tx_tail == tx_head) {
1309 #if 0
1310 FIXME openbsd
1311 ifq_set_oactive(&ifp->if_snd);
1312 #else
1313 ifp->if_flags |= IFF_OACTIVE;
1314 #endif
1315 return;
1316 }
1317
1318 if (sc->sc_xfer_mode == VIO_DESC_MODE) {
1319 vnet_start_desc(ifp);
1320 return;
1321 }
1322
1323 start = prod = sc->sc_tx_prod & (sc->sc_vd->vd_nentries - 1);
1324 while (sc->sc_vd->vd_desc[prod].hdr.dstate == VIO_DESC_FREE) {
1325 count = sc->sc_tx_prod - sc->sc_tx_cons;
1326 if (count >= (sc->sc_vd->vd_nentries - 1) ||
1327 map->lm_count >= map->lm_nentries) {
1328 #if 0
1329 FIXME openbsd
1330 ifq_set_oactive(&ifp->if_snd);
1331 #else
1332 ifp->if_flags |= IFF_OACTIVE;
1333 #endif
1334 break;
1335 }
1336
1337 buf = pool_get(&sc->sc_pool, PR_NOWAIT|PR_ZERO);
1338 if (buf == NULL) {
1339 #if 0
1340 FIXME openbsd
1341 ifq_set_oactive(&ifp->if_snd);
1342 #else
1343 ifp->if_flags |= IFF_OACTIVE;
1344 #endif
1345 break;
1346 }
1347 #if 0
1348 FIXME openbsd
1349 m = ifq_dequeue(&ifp->if_snd);
1350 #else
1351 IFQ_DEQUEUE(&ifp->if_snd, m);
1352 #endif
1353 if (m == NULL) {
1354 pool_put(&sc->sc_pool, buf);
1355 break;
1356 }
1357
1358 m_copydata(m, 0, m->m_pkthdr.len, buf + VNET_ETHER_ALIGN);
1359
1360 #if NBPFILTER > 0
1361 /*
1362 * If BPF is listening on this interface, let it see the
1363 * packet before we commit it to the wire.
1364 */
1365 if (ifp->if_bpf)
1366 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1367 #endif
1368
1369 pmap_extract(pmap_kernel(), (vaddr_t)buf, &pa);
1370 KASSERT((pa & ~PAGE_MASK) == (pa & LDC_MTE_RA_MASK));
1371 while (map->lm_slot[map->lm_next].entry != 0) {
1372 map->lm_next++;
1373 map->lm_next &= (map->lm_nentries - 1);
1374 }
1375 map->lm_slot[map->lm_next].entry = (pa & LDC_MTE_RA_MASK);
1376 map->lm_slot[map->lm_next].entry |= LDC_MTE_CPR;
1377 #if 0
1378 FIXME openbsd
1379 atomic_inc_int(&map->lm_count);
1380 #else
1381 map->lm_count++;
1382 #endif
1383 #if 0
1384 FIXME openbsd
1385 sc->sc_vd->vd_desc[prod].nbytes = max(m->m_pkthdr.len, 60);
1386 #else
1387 sc->sc_vd->vd_desc[prod].nbytes = MAX(m->m_pkthdr.len, 60);
1388 #endif
1389 sc->sc_vd->vd_desc[prod].ncookies = 1;
1390 sc->sc_vd->vd_desc[prod].cookie[0].addr =
1391 map->lm_next << PAGE_SHIFT | (pa & PAGE_MASK);
1392 sc->sc_vd->vd_desc[prod].cookie[0].size = 2048;
1393 membar_producer();
1394 sc->sc_vd->vd_desc[prod].hdr.dstate = VIO_DESC_READY;
1395
1396 sc->sc_vsd[prod].vsd_map_idx = map->lm_next;
1397 sc->sc_vsd[prod].vsd_buf = buf;
1398
1399 sc->sc_tx_prod++;
1400 prod = sc->sc_tx_prod & (sc->sc_vd->vd_nentries - 1);
1401
1402 m_freem(m);
1403 }
1404
1405 membar_producer();
1406
1407 if (start != prod && sc->sc_peer_state != VIO_DP_ACTIVE) {
1408 vnet_send_dring_data(sc, start);
1409 ifp->if_timer = 5;
1410 }
1411 }
1412
1413 void
1414 vnet_start_desc(struct ifnet *ifp)
1415 {
1416 struct vnet_softc *sc = ifp->if_softc;
1417 struct ldc_map *map = sc->sc_lm;
1418 struct vnet_desc_msg dm;
1419 struct mbuf *m;
1420 paddr_t pa;
1421 unsigned char *buf;
1422 u_int prod, count;
1423
1424 for (;;) {
1425 count = sc->sc_tx_prod - sc->sc_tx_cons;
1426 if (count >= (sc->sc_vd->vd_nentries - 1) ||
1427 map->lm_count >= map->lm_nentries) {
1428 #if 0
1429 FIXME openbsd
1430 ifq_set_oactive(&ifp->if_snd);
1431 #else
1432 ifp->if_flags |= IFF_OACTIVE;
1433 #endif
1434 return;
1435 }
1436
1437 buf = pool_get(&sc->sc_pool, PR_NOWAIT|PR_ZERO);
1438 if (buf == NULL) {
1439 #if 0
1440 FIXME openbsd
1441 ifq_set_oactive(&ifp->if_snd);
1442 #else
1443 ifp->if_flags |= IFF_OACTIVE;
1444 #endif
1445 return;
1446 }
1447
1448 #if 0
1449 FIXME openbsd
1450 m = ifq_dequeue(&ifp->if_snd);
1451 #else
1452 IFQ_DEQUEUE(&ifp->if_snd, m);
1453 #endif
1454
1455 if (m == NULL) {
1456 pool_put(&sc->sc_pool, buf);
1457 return;
1458 }
1459
1460 m_copydata(m, 0, m->m_pkthdr.len, buf);
1461
1462 #if NBPFILTER > 0
1463 /*
1464 * If BPF is listening on this interface, let it see the
1465 * packet before we commit it to the wire.
1466 */
1467 if (ifp->if_bpf)
1468 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1469 #endif
1470
1471 pmap_extract(pmap_kernel(), (vaddr_t)buf, &pa);
1472 KASSERT((pa & ~PAGE_MASK) == (pa & LDC_MTE_RA_MASK));
1473 while (map->lm_slot[map->lm_next].entry != 0) {
1474 map->lm_next++;
1475 map->lm_next &= (map->lm_nentries - 1);
1476 }
1477 map->lm_slot[map->lm_next].entry = (pa & LDC_MTE_RA_MASK);
1478 map->lm_slot[map->lm_next].entry |= LDC_MTE_CPR;
1479 #if 0
1480 FIXME openbsd
1481 atomic_inc_int(&map->lm_count);
1482 #else
1483 map->lm_count++;
1484 #endif
1485
1486 prod = sc->sc_tx_prod & (sc->sc_vd->vd_nentries - 1);
1487 sc->sc_vsd[prod].vsd_map_idx = map->lm_next;
1488 sc->sc_vsd[prod].vsd_buf = buf;
1489
1490 bzero(&dm, sizeof(dm));
1491 dm.tag.type = VIO_TYPE_DATA;
1492 dm.tag.stype = VIO_SUBTYPE_INFO;
1493 dm.tag.stype_env = VIO_DESC_DATA;
1494 dm.tag.sid = sc->sc_local_sid;
1495 dm.seq_no = sc->sc_seq_no++;
1496 dm.desc_handle = sc->sc_tx_prod;
1497 #if 0
1498 FIXME openbsd
1499 dm.nbytes = max(m->m_pkthdr.len, 60);
1500 #else
1501 dm.nbytes = MAX(m->m_pkthdr.len, 60);
1502 #endif
1503 dm.ncookies = 1;
1504 dm.cookie[0].addr =
1505 map->lm_next << PAGE_SHIFT | (pa & PAGE_MASK);
1506 dm.cookie[0].size = 2048;
1507 vnet_sendmsg(sc, &dm, sizeof(dm));
1508
1509 sc->sc_tx_prod++;
1510 sc->sc_tx_prod &= (sc->sc_vd->vd_nentries - 1);
1511
1512 m_freem(m);
1513 }
1514 }
1515
1516 int
1517 vnet_ioctl(struct ifnet *ifp, u_long cmd, void* data)
1518 {
1519
1520 struct vnet_softc *sc = ifp->if_softc;
1521 struct ifreq *ifr = (struct ifreq *)data;
1522 int s, error = 0;
1523
1524 s = splnet();
1525
1526 switch (cmd) {
1527
1528 case SIOCSIFADDR:
1529 ifp->if_flags |= IFF_UP;
1530 /* FALLTHROUGH */
1531 case SIOCSIFFLAGS:
1532 if (ifp->if_flags & IFF_UP) {
1533 if ((ifp->if_flags & IFF_RUNNING) == 0)
1534 vnet_init(ifp);
1535 } else {
1536 if (ifp->if_flags & IFF_RUNNING)
1537 vnet_stop(ifp);
1538 }
1539 break;
1540
1541 case SIOCGIFMEDIA:
1542 case SIOCSIFMEDIA:
1543 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1544 break;
1545
1546 case SIOCADDMULTI:
1547 case SIOCDELMULTI:
1548 /*
1549 * XXX Removing all multicast addresses and adding
1550 * most of them back, is somewhat retarded.
1551 */
1552 vnet_setmulti(sc, 0);
1553 error = ether_ioctl(ifp, cmd, data);
1554 vnet_setmulti(sc, 1);
1555 if (error == ENETRESET)
1556 error = 0;
1557 break;
1558
1559 default:
1560 error = ether_ioctl(ifp, cmd, data);
1561 }
1562
1563 splx(s);
1564
1565 return (error);
1566 }
1567
1568 void
1569 vnet_watchdog(struct ifnet *ifp)
1570 {
1571 struct vnet_softc *sc = ifp->if_softc;
1572
1573 printf("%s: watchdog timeout\n", sc->sc_dv.dv_xname);
1574 }
1575
1576 int
1577 vnet_media_change(struct ifnet *ifp)
1578 {
1579 printf("vnet_media_change()\n");
1580 return (0);
1581 }
1582
1583 void
1584 vnet_media_status(struct ifnet *ifp, struct ifmediareq *imr)
1585 {
1586 imr->ifm_active = IFM_ETHER | IFM_AUTO;
1587 imr->ifm_status = IFM_AVALID;
1588 #if 0
1589 FIXME openbsd
1590 if (LINK_STATE_IS_UP(ifp->if_link_state) &&
1591 #else
1592 if (ifp->if_link_state == LINK_STATE_UP &&
1593 #endif
1594 ifp->if_flags & IFF_UP)
1595 imr->ifm_status |= IFM_ACTIVE;
1596 }
1597
1598 void
1599 vnet_link_state(struct vnet_softc *sc)
1600 {
1601 #if 0
1602 FIXME openbsd
1603 struct ifnet *ifp = &sc->sc_ac.ac_if;
1604 #else
1605 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1606 #endif
1607 int link_state = LINK_STATE_DOWN;
1608
1609 #if 0
1610 FIXME openbsd
1611 KERNEL_LOCK();
1612 #else
1613 KERNEL_LOCK(1, curlwp);
1614 #endif
1615 if (ISSET(sc->sc_vio_state, VIO_RCV_RDX) &&
1616 ISSET(sc->sc_vio_state, VIO_ACK_RDX))
1617 #if 0
1618 FIXME openbsd
1619 link_state = LINK_STATE_FULL_DUPLEX;
1620 #else
1621 link_state = LINK_STATE_UP;
1622 #endif
1623 if (ifp->if_link_state != link_state) {
1624 #if 0
1625 FIXME openbsd
1626 ifp->if_link_state = link_state;
1627 if_link_state_change(ifp);
1628 #else
1629 if_link_state_change(ifp, link_state);
1630 #endif
1631 }
1632 #if 0
1633 FIXME openbsd
1634 KERNEL_UNLOCK();
1635 #else
1636 KERNEL_UNLOCK_ONE(curlwp);
1637 #endif
1638 }
1639
1640 void
1641 vnet_setmulti(struct vnet_softc *sc, int set)
1642 {
1643 struct ethercom *ec = &sc->sc_ethercom;
1644 struct ether_multi *enm;
1645 struct ether_multistep step;
1646 struct vnet_mcast_info mi;
1647 int count = 0;
1648
1649 if (!ISSET(sc->sc_vio_state, VIO_RCV_RDX) ||
1650 !ISSET(sc->sc_vio_state, VIO_ACK_RDX))
1651 return;
1652
1653 bzero(&mi, sizeof(mi));
1654 mi.tag.type = VIO_TYPE_CTRL;
1655 mi.tag.stype = VIO_SUBTYPE_INFO;
1656 mi.tag.stype_env = VNET_MCAST_INFO;
1657 mi.tag.sid = sc->sc_local_sid;
1658 mi.set = set ? 1 : 0;
1659 #if 0
1660 FIXME openbsd
1661 KERNEL_LOCK();
1662 #else
1663 KERNEL_LOCK(1, curlwp);
1664 #endif
1665 ETHER_FIRST_MULTI(step, ec, enm);
1666 while (enm != NULL) {
1667 /* XXX What about multicast ranges? */
1668 bcopy(enm->enm_addrlo, mi.mcast_addr[count], ETHER_ADDR_LEN);
1669 ETHER_NEXT_MULTI(step, enm);
1670
1671 count++;
1672 if (count < VNET_NUM_MCAST)
1673 continue;
1674
1675 mi.count = VNET_NUM_MCAST;
1676 vnet_sendmsg(sc, &mi, sizeof(mi));
1677 count = 0;
1678 }
1679
1680 if (count > 0) {
1681 mi.count = count;
1682 vnet_sendmsg(sc, &mi, sizeof(mi));
1683 }
1684 #if 0
1685 FIXME openbsd
1686 KERNEL_UNLOCK();
1687 #else
1688 KERNEL_UNLOCK_ONE(curlwp);
1689 #endif
1690 }
1691
1692
1693 void
1694 vnet_init(struct ifnet *ifp)
1695 {
1696
1697 struct vnet_softc *sc = ifp->if_softc;
1698 struct ldc_conn *lc = &sc->sc_lc;
1699 int err;
1700 vaddr_t va;
1701 paddr_t pa;
1702
1703 #if OPENBSD_BUSDMA
1704 sc->sc_lm = ldc_map_alloc(sc->sc_dmatag, 2048);
1705 #else
1706 sc->sc_lm = ldc_map_alloc(2048);
1707 #endif
1708 if (sc->sc_lm == NULL)
1709 return;
1710
1711 #if OPENBSD_BUSDMA
1712 err = hv_ldc_set_map_table(lc->lc_id,
1713 sc->sc_lm->lm_map->dm_segs[0].ds_addr, sc->sc_lm->lm_nentries);
1714 #else
1715 va = (vaddr_t)sc->sc_lm->lm_slot;
1716 pa = 0;
1717 if (pmap_extract(pmap_kernel(), va, &pa) == FALSE)
1718 panic("pmap_extract failed %lx\n", va);
1719 err = hv_ldc_set_map_table(lc->lc_id, pa, 2048);
1720 #endif
1721 if (err != H_EOK) {
1722 printf("hv_ldc_set_map_table %d\n", err);
1723 return;
1724 }
1725
1726 sc->sc_vd = vnet_dring_alloc(sc->sc_dmatag, VNET_NUM_SOFT_DESC);
1727 if (sc->sc_vd == NULL)
1728 return;
1729 sc->sc_vsd = malloc(VNET_NUM_SOFT_DESC * sizeof(*sc->sc_vsd), M_DEVBUF,
1730 M_NOWAIT|M_ZERO);
1731 if (sc->sc_vsd == NULL)
1732 return;
1733
1734 sc->sc_lm->lm_slot[0].entry = sc->sc_vd->vd_map->dm_segs[0].ds_addr;
1735 sc->sc_lm->lm_slot[0].entry &= LDC_MTE_RA_MASK;
1736 sc->sc_lm->lm_slot[0].entry |= LDC_MTE_CPR | LDC_MTE_CPW;
1737 sc->sc_lm->lm_next = 1;
1738 sc->sc_lm->lm_count = 1;
1739
1740 #if OPENBSD_BUSDMA
1741 err = hv_ldc_tx_qconf(lc->lc_id,
1742 lc->lc_txq->lq_map->dm_segs[0].ds_addr, lc->lc_txq->lq_nentries);
1743 #else
1744 err = hv_ldc_tx_qconf(lc->lc_id, pa, lc->lc_txq->lq_nentries);
1745 #endif
1746 if (err != H_EOK)
1747 printf("hv_ldc_tx_qconf %d\n", err);
1748
1749 #if OPENBSD_BUSDMA
1750 err = hv_ldc_rx_qconf(lc->lc_id,
1751 lc->lc_rxq->lq_map->dm_segs[0].ds_addr, lc->lc_rxq->lq_nentries);
1752 #else
1753 err = hv_ldc_rx_qconf(lc->lc_id, pa, lc->lc_rxq->lq_nentries);
1754 #endif
1755 if (err != H_EOK)
1756 printf("hv_ldc_rx_qconf %d\n", err);
1757
1758 cbus_intr_setenabled(sc->sc_bustag, sc->sc_tx_ino, INTR_ENABLED);
1759 cbus_intr_setenabled(sc->sc_bustag, sc->sc_rx_ino, INTR_ENABLED);
1760
1761 ldc_send_vers(lc);
1762
1763 ifp->if_flags |= IFF_RUNNING;
1764
1765 }
1766
1767 void
1768 vnet_stop(struct ifnet *ifp)
1769
1770 {
1771 struct vnet_softc *sc = ifp->if_softc;
1772 struct ldc_conn *lc = &sc->sc_lc;
1773
1774 ifp->if_flags &= ~IFF_RUNNING;
1775 #if 0
1776 FIXME openbsd
1777 ifq_clr_oactive(&ifp->if_snd);
1778 #else
1779 ifp->if_flags &= ~IFF_OACTIVE;
1780 #endif
1781 ifp->if_timer = 0;
1782
1783 cbus_intr_setenabled(sc->sc_bustag, sc->sc_tx_ino, INTR_DISABLED);
1784 cbus_intr_setenabled(sc->sc_bustag, sc->sc_rx_ino, INTR_DISABLED);
1785
1786 #if 0
1787 FIXME openbsd
1788 intr_barrier(sc->sc_tx_ih);
1789 intr_barrier(sc->sc_rx_ih);
1790 #else
1791 printf("vnet_stop() intr_barrier() FIXME\n");
1792 #endif
1793
1794 hv_ldc_tx_qconf(lc->lc_id, 0, 0);
1795 hv_ldc_rx_qconf(lc->lc_id, 0, 0);
1796 lc->lc_tx_seqid = 0;
1797 lc->lc_state = 0;
1798 lc->lc_tx_state = lc->lc_rx_state = LDC_CHANNEL_DOWN;
1799 vnet_ldc_reset(lc);
1800
1801 #if 0
1802 FIXME openbsd
1803 free(sc->sc_vsd, M_DEVBUF, VNET_NUM_SOFT_DESC * sizeof(*sc->sc_vsd));
1804 #else
1805 free(sc->sc_vsd, M_DEVBUF);
1806 #endif
1807
1808 vnet_dring_free(sc->sc_dmatag, sc->sc_vd);
1809
1810 hv_ldc_set_map_table(lc->lc_id, 0, 0);
1811 #if OPENBSD_BUSDMA
1812 ldc_map_free(sc->sc_dmatag, sc->sc_lm);
1813 #else
1814 ldc_map_free(sc->sc_lm);
1815 #endif
1816 }
1817
1818 struct vnet_dring *
1819 vnet_dring_alloc(bus_dma_tag_t t, int nentries)
1820 {
1821 struct vnet_dring *vd;
1822 bus_size_t size;
1823 vaddr_t va;
1824 #if OPENBSD_BUSDMA
1825 int nsegs;
1826 #endif
1827 int i;
1828
1829 vd = kmem_zalloc(sizeof(struct vnet_dring), KM_SLEEP);
1830 if (vd == NULL)
1831 return NULL;
1832
1833 size = roundup(nentries * sizeof(struct vnet_desc), PAGE_SIZE);
1834
1835 #if OPENBSD_BUSDMA
1836 if (bus_dmamap_create(t, size, 1, size, 0,
1837 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &vd->vd_map) != 0)
1838 return (NULL);
1839
1840 if (bus_dmamem_alloc(t, size, PAGE_SIZE, 0, &vd->vd_seg, 1,
1841 &nsegs, BUS_DMA_NOWAIT) != 0)
1842 goto destroy;
1843
1844 if (bus_dmamem_map(t, &vd->vd_seg, 1, size, &va,
1845 BUS_DMA_NOWAIT) != 0)
1846 goto free;
1847
1848 if (bus_dmamap_load(t, vd->vd_map, va, size, NULL,
1849 BUS_DMA_NOWAIT) != 0)
1850 goto unmap;
1851 #else
1852 va = (vaddr_t)kmem_zalloc(size, KM_SLEEP);
1853 #endif
1854 vd->vd_desc = (struct vnet_desc *)va;
1855 vd->vd_nentries = nentries;
1856 bzero(vd->vd_desc, nentries * sizeof(struct vnet_desc));
1857 for (i = 0; i < vd->vd_nentries; i++)
1858 vd->vd_desc[i].hdr.dstate = VIO_DESC_FREE;
1859 return (vd);
1860
1861 #if OPENBSD_BUSDMA
1862 unmap:
1863 bus_dmamem_unmap(t, va, size);
1864 free:
1865 bus_dmamem_free(t, &vd->vd_seg, 1);
1866 destroy:
1867 bus_dmamap_destroy(t, vd->vd_map);
1868 #endif
1869 return (NULL);
1870 }
1871
1872 void
1873 vnet_dring_free(bus_dma_tag_t t, struct vnet_dring *vd)
1874 {
1875 bus_size_t size;
1876
1877 size = vd->vd_nentries * sizeof(struct vnet_desc);
1878 size = roundup(size, PAGE_SIZE);
1879
1880 #if OPENBSD_BUSDMA
1881 bus_dmamap_unload(t, vd->vd_map);
1882 bus_dmamem_unmap(t, (caddr_t)vd->vd_desc, size);
1883 bus_dmamem_free(t, &vd->vd_seg, 1);
1884 bus_dmamap_destroy(t, vd->vd_map);
1885 #else
1886 kmem_free(vd->vd_desc, size);
1887 #endif
1888 kmem_free(vd, size);
1889 }
1890
1891