1 1.10 andvar /* $NetBSD: vnet.c,v 1.10 2023/12/14 20:17:18 andvar Exp $ */ 2 1.1 palle /* $OpenBSD: vnet.c,v 1.62 2020/07/10 13:26:36 patrick Exp $ */ 3 1.1 palle /* 4 1.1 palle * Copyright (c) 2009, 2015 Mark Kettenis 5 1.1 palle * 6 1.1 palle * Permission to use, copy, modify, and distribute this software for any 7 1.1 palle * purpose with or without fee is hereby granted, provided that the above 8 1.1 palle * copyright notice and this permission notice appear in all copies. 9 1.1 palle * 10 1.1 palle * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 1.1 palle * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 1.1 palle * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 1.1 palle * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 1.1 palle * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 1.1 palle * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 1.1 palle * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 1.1 palle */ 18 1.1 palle 19 1.1 palle #include <sys/kmem.h> 20 1.1 palle #include <sys/param.h> 21 1.1 palle #include <sys/atomic.h> 22 1.5 palle #include <sys/callout.h> 23 1.1 palle #include <sys/device.h> 24 1.1 palle #include <sys/malloc.h> 25 1.1 palle #include <sys/pool.h> 26 1.1 palle #include <sys/mbuf.h> 27 1.1 palle #include <sys/socket.h> 28 1.1 palle #include <sys/sockio.h> 29 1.1 palle #include <sys/systm.h> 30 1.1 palle 31 1.1 palle #include <machine/autoconf.h> 32 1.1 palle #include <machine/hypervisor.h> 33 1.1 palle #include <machine/openfirm.h> 34 1.1 palle 35 1.1 palle #include <net/if.h> 36 1.1 palle #include <net/if_media.h> 37 1.1 palle 38 1.1 palle #include <netinet/in.h> 39 1.1 palle #include <net/if_ether.h> 40 1.1 palle 41 1.1 palle #if NBPFILTER > 0 42 1.1 palle #include <net/bpf.h> 43 1.1 palle #endif 44 1.1 palle 45 1.1 palle #include <uvm/uvm_extern.h> 46 1.1 palle 47 1.1 palle #include <sparc64/dev/cbusvar.h> 48 1.1 palle #include <sparc64/dev/ldcvar.h> 49 1.1 palle #include <sparc64/dev/viovar.h> 50 1.1 palle 51 1.1 palle #ifdef VNET_DEBUG 52 1.1 palle #define DPRINTF(x) printf x 53 1.1 palle #else 54 1.1 palle #define DPRINTF(x) 55 1.1 palle #endif 56 1.1 palle 57 1.1 palle #define VNET_TX_ENTRIES 32 58 1.1 palle #define VNET_RX_ENTRIES 32 59 1.1 palle 60 1.1 palle struct vnet_attr_info { 61 1.1 palle struct vio_msg_tag tag; 62 1.1 palle uint8_t xfer_mode; 63 1.1 palle uint8_t addr_type; 64 1.1 palle uint16_t ack_freq; 65 1.1 palle uint32_t _reserved1; 66 1.1 palle uint64_t addr; 67 1.1 palle uint64_t mtu; 68 1.1 palle uint64_t _reserved2[3]; 69 1.1 palle }; 70 1.1 palle 71 1.1 palle /* Address types. */ 72 1.1 palle #define VNET_ADDR_ETHERMAC 0x01 73 1.1 palle 74 1.1 palle /* Sub-Type envelopes. */ 75 1.1 palle #define VNET_MCAST_INFO 0x0101 76 1.1 palle 77 1.1 palle #define VNET_NUM_MCAST 7 78 1.1 palle 79 1.1 palle struct vnet_mcast_info { 80 1.1 palle struct vio_msg_tag tag; 81 1.1 palle uint8_t set; 82 1.1 palle uint8_t count; 83 1.1 palle uint8_t mcast_addr[VNET_NUM_MCAST][ETHER_ADDR_LEN]; 84 1.1 palle uint32_t _reserved; 85 1.1 palle }; 86 1.1 palle 87 1.1 palle struct vnet_desc { 88 1.1 palle struct vio_dring_hdr hdr; 89 1.1 palle uint32_t nbytes; 90 1.1 palle uint32_t ncookies; 91 1.1 palle struct ldc_cookie cookie[2]; 92 1.1 palle }; 93 1.1 palle 94 1.1 palle struct vnet_desc_msg { 95 1.1 palle struct vio_msg_tag tag; 96 1.1 palle uint64_t seq_no; 97 1.1 palle uint64_t desc_handle; 98 1.1 palle uint32_t nbytes; 99 1.1 palle uint32_t ncookies; 100 1.1 palle struct ldc_cookie cookie[1]; 101 1.1 palle }; 102 1.1 palle 103 1.1 palle struct vnet_dring { 104 1.1 palle bus_dmamap_t vd_map; 105 1.1 palle bus_dma_segment_t vd_seg; 106 1.1 palle struct vnet_desc *vd_desc; 107 1.1 palle int vd_nentries; 108 1.1 palle }; 109 1.1 palle 110 1.1 palle struct vnet_dring *vnet_dring_alloc(bus_dma_tag_t, int); 111 1.1 palle void vnet_dring_free(bus_dma_tag_t, struct vnet_dring *); 112 1.1 palle 113 1.1 palle /* 114 1.1 palle * For now, we only support vNet 1.0. 115 1.1 palle */ 116 1.1 palle #define VNET_MAJOR 1 117 1.1 palle #define VNET_MINOR 0 118 1.1 palle 119 1.1 palle /* 120 1.1 palle * The vNet protocol wants the IP header to be 64-bit aligned, so 121 1.1 palle * define out own variant of ETHER_ALIGN. 122 1.1 palle */ 123 1.1 palle #define VNET_ETHER_ALIGN 6 124 1.1 palle 125 1.1 palle struct vnet_soft_desc { 126 1.1 palle int vsd_map_idx; 127 1.1 palle unsigned char *vsd_buf; 128 1.1 palle }; 129 1.1 palle 130 1.1 palle struct vnet_softc { 131 1.6 riastrad device_t sc_dv; 132 1.1 palle bus_space_tag_t sc_bustag; 133 1.1 palle bus_dma_tag_t sc_dmatag; 134 1.1 palle 135 1.1 palle uint64_t sc_tx_ino; 136 1.1 palle uint64_t sc_rx_ino; 137 1.1 palle void *sc_tx_ih; 138 1.1 palle void *sc_rx_ih; 139 1.1 palle 140 1.1 palle struct ldc_conn sc_lc; 141 1.1 palle 142 1.1 palle uint16_t sc_vio_state; 143 1.1 palle #define VIO_SND_VER_INFO 0x0001 144 1.1 palle #define VIO_ACK_VER_INFO 0x0002 145 1.1 palle #define VIO_RCV_VER_INFO 0x0004 146 1.1 palle #define VIO_SND_ATTR_INFO 0x0008 147 1.1 palle #define VIO_ACK_ATTR_INFO 0x0010 148 1.1 palle #define VIO_RCV_ATTR_INFO 0x0020 149 1.1 palle #define VIO_SND_DRING_REG 0x0040 150 1.1 palle #define VIO_ACK_DRING_REG 0x0080 151 1.1 palle #define VIO_RCV_DRING_REG 0x0100 152 1.1 palle #define VIO_SND_RDX 0x0200 153 1.1 palle #define VIO_ACK_RDX 0x0400 154 1.1 palle #define VIO_RCV_RDX 0x0800 155 1.1 palle 156 1.3 palle struct callout sc_handshake_co; 157 1.1 palle 158 1.1 palle uint8_t sc_xfer_mode; 159 1.1 palle 160 1.1 palle uint32_t sc_local_sid; 161 1.1 palle uint64_t sc_dring_ident; 162 1.1 palle uint64_t sc_seq_no; 163 1.1 palle 164 1.1 palle u_int sc_tx_prod; 165 1.1 palle u_int sc_tx_cons; 166 1.1 palle 167 1.1 palle u_int sc_peer_state; 168 1.1 palle 169 1.1 palle struct ldc_map *sc_lm; 170 1.1 palle struct vnet_dring *sc_vd; 171 1.1 palle struct vnet_soft_desc *sc_vsd; 172 1.1 palle #define VNET_NUM_SOFT_DESC 128 173 1.1 palle 174 1.1 palle size_t sc_peer_desc_size; 175 1.1 palle struct ldc_cookie sc_peer_dring_cookie; 176 1.1 palle int sc_peer_dring_nentries; 177 1.1 palle 178 1.1 palle struct pool sc_pool; 179 1.1 palle 180 1.1 palle struct ethercom sc_ethercom; 181 1.1 palle struct ifmedia sc_media; 182 1.1 palle u_int8_t sc_macaddr[ETHER_ADDR_LEN]; 183 1.1 palle }; 184 1.1 palle 185 1.1 palle int vnet_match (device_t, cfdata_t, void *); 186 1.1 palle void vnet_attach (device_t, device_t, void *); 187 1.1 palle 188 1.1 palle CFATTACH_DECL_NEW(vnet, sizeof(struct vnet_softc), 189 1.1 palle vnet_match, vnet_attach, NULL, NULL); 190 1.1 palle 191 1.1 palle int vnet_tx_intr(void *); 192 1.1 palle int vnet_rx_intr(void *); 193 1.1 palle void vnet_handshake(void *); 194 1.1 palle 195 1.1 palle void vio_rx_data(struct ldc_conn *, struct ldc_pkt *); 196 1.1 palle void vnet_rx_vio_ctrl(struct vnet_softc *, struct vio_msg *); 197 1.1 palle void vnet_rx_vio_ver_info(struct vnet_softc *, struct vio_msg_tag *); 198 1.1 palle void vnet_rx_vio_attr_info(struct vnet_softc *, struct vio_msg_tag *); 199 1.1 palle void vnet_rx_vio_dring_reg(struct vnet_softc *, struct vio_msg_tag *); 200 1.1 palle void vnet_rx_vio_rdx(struct vnet_softc *sc, struct vio_msg_tag *); 201 1.4 palle void vnet_rx_vio_mcast_info(struct vnet_softc *sc, struct vio_msg_tag *); 202 1.1 palle void vnet_rx_vio_data(struct vnet_softc *sc, struct vio_msg *); 203 1.1 palle void vnet_rx_vio_desc_data(struct vnet_softc *sc, struct vio_msg_tag *); 204 1.1 palle void vnet_rx_vio_dring_data(struct vnet_softc *sc, struct vio_msg_tag *); 205 1.1 palle 206 1.1 palle void vnet_ldc_reset(struct ldc_conn *); 207 1.1 palle void vnet_ldc_start(struct ldc_conn *); 208 1.1 palle 209 1.1 palle void vnet_sendmsg(struct vnet_softc *, void *, size_t); 210 1.1 palle void vnet_send_ver_info(struct vnet_softc *, uint16_t, uint16_t); 211 1.1 palle void vnet_send_attr_info(struct vnet_softc *); 212 1.1 palle void vnet_send_dring_reg(struct vnet_softc *); 213 1.1 palle void vio_send_rdx(struct vnet_softc *); 214 1.1 palle void vnet_send_dring_data(struct vnet_softc *, uint32_t); 215 1.1 palle 216 1.1 palle void vnet_start(struct ifnet *); 217 1.1 palle void vnet_start_desc(struct ifnet *); 218 1.3 palle int vnet_ioctl(struct ifnet *, u_long, void *); 219 1.1 palle void vnet_watchdog(struct ifnet *); 220 1.1 palle 221 1.1 palle int vnet_media_change(struct ifnet *); 222 1.1 palle void vnet_media_status(struct ifnet *, struct ifmediareq *); 223 1.1 palle 224 1.1 palle void vnet_link_state(struct vnet_softc *sc); 225 1.1 palle 226 1.1 palle void vnet_setmulti(struct vnet_softc *, int); 227 1.1 palle 228 1.3 palle int vnet_init(struct ifnet *); 229 1.3 palle void vnet_stop(struct ifnet *, int); 230 1.1 palle 231 1.1 palle int vnet_match(device_t parent, cfdata_t match, void *aux) 232 1.1 palle { 233 1.1 palle 234 1.1 palle struct cbus_attach_args *ca = aux; 235 1.3 palle 236 1.1 palle if (strcmp(ca->ca_name, "network") == 0) 237 1.1 palle return (1); 238 1.1 palle 239 1.1 palle return (0); 240 1.1 palle } 241 1.1 palle 242 1.1 palle void 243 1.1 palle vnet_attach(struct device *parent, struct device *self, void *aux) 244 1.1 palle { 245 1.1 palle struct vnet_softc *sc = device_private(self); 246 1.1 palle struct cbus_attach_args *ca = aux; 247 1.1 palle struct ldc_conn *lc; 248 1.1 palle struct ifnet *ifp; 249 1.1 palle 250 1.6 riastrad sc->sc_dv = self; 251 1.1 palle sc->sc_bustag = ca->ca_bustag; 252 1.1 palle sc->sc_dmatag = ca->ca_dmatag; 253 1.1 palle sc->sc_tx_ino = ca->ca_tx_ino; 254 1.1 palle sc->sc_rx_ino = ca->ca_rx_ino; 255 1.1 palle 256 1.2 palle printf(": ivec 0x%" PRIx64 ", 0x%" PRIx64, sc->sc_tx_ino, sc->sc_rx_ino); 257 1.1 palle 258 1.1 palle /* 259 1.1 palle * Un-configure queues before registering interrupt handlers, 260 1.1 palle * such that we dont get any stale LDC packets or events. 261 1.1 palle */ 262 1.1 palle hv_ldc_tx_qconf(ca->ca_id, 0, 0); 263 1.1 palle hv_ldc_rx_qconf(ca->ca_id, 0, 0); 264 1.1 palle 265 1.1 palle sc->sc_tx_ih = bus_intr_establish(ca->ca_bustag, sc->sc_tx_ino, 266 1.1 palle IPL_NET, vnet_tx_intr, sc); 267 1.1 palle sc->sc_rx_ih = bus_intr_establish(ca->ca_bustag, sc->sc_rx_ino, 268 1.1 palle IPL_NET, vnet_rx_intr, sc); 269 1.1 palle if (sc->sc_tx_ih == NULL || sc->sc_rx_ih == NULL) { 270 1.1 palle printf(", can't establish interrupts\n"); 271 1.1 palle return; 272 1.1 palle } 273 1.1 palle 274 1.1 palle lc = &sc->sc_lc; 275 1.1 palle lc->lc_id = ca->ca_id; 276 1.1 palle lc->lc_sc = sc; 277 1.1 palle lc->lc_reset = vnet_ldc_reset; 278 1.1 palle lc->lc_start = vnet_ldc_start; 279 1.1 palle lc->lc_rx_data = vio_rx_data; 280 1.1 palle 281 1.3 palle callout_init(&sc->sc_handshake_co, 0); 282 1.5 palle 283 1.1 palle sc->sc_peer_state = VIO_DP_STOPPED; 284 1.1 palle 285 1.1 palle lc->lc_txq = ldc_queue_alloc(VNET_TX_ENTRIES); 286 1.1 palle if (lc->lc_txq == NULL) { 287 1.1 palle printf(", can't allocate tx queue\n"); 288 1.1 palle return; 289 1.1 palle } 290 1.1 palle 291 1.1 palle lc->lc_rxq = ldc_queue_alloc(VNET_RX_ENTRIES); 292 1.1 palle if (lc->lc_rxq == NULL) { 293 1.1 palle printf(", can't allocate rx queue\n"); 294 1.1 palle goto free_txqueue; 295 1.1 palle } 296 1.1 palle 297 1.1 palle if (OF_getprop(ca->ca_node, "local-mac-address", 298 1.1 palle sc->sc_macaddr, ETHER_ADDR_LEN) > 0) { 299 1.1 palle printf(", address %s", ether_sprintf(sc->sc_macaddr)); 300 1.1 palle } else { 301 1.1 palle printf(", cannot retrieve local mac address\n"); 302 1.1 palle return; 303 1.1 palle } 304 1.1 palle 305 1.1 palle /* 306 1.1 palle * Each interface gets its own pool. 307 1.1 palle */ 308 1.6 riastrad pool_init(&sc->sc_pool, /*size*/2048, /*align*/0, /*align_offset*/0, 309 1.6 riastrad /*flags*/0, /*wchan*/device_xname(sc->sc_dv), /*palloc*/NULL, 310 1.6 riastrad IPL_NET); 311 1.6 riastrad 312 1.1 palle ifp = &sc->sc_ethercom.ec_if; 313 1.1 palle ifp->if_softc = sc; 314 1.1 palle ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 315 1.3 palle ifp->if_init = vnet_init; 316 1.1 palle ifp->if_ioctl = vnet_ioctl; 317 1.1 palle ifp->if_start = vnet_start; 318 1.3 palle ifp->if_stop = vnet_stop; 319 1.1 palle ifp->if_watchdog = vnet_watchdog; 320 1.1 palle strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 321 1.5 palle IFQ_SET_MAXLEN(&ifp->if_snd, 31); /* XXX */ 322 1.1 palle 323 1.1 palle ifmedia_init(&sc->sc_media, 0, vnet_media_change, vnet_media_status); 324 1.1 palle ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL); 325 1.1 palle ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO); 326 1.1 palle 327 1.3 palle if_attach(ifp); 328 1.3 palle ether_ifattach(ifp, sc->sc_macaddr); 329 1.5 palle 330 1.1 palle printf("\n"); 331 1.1 palle return; 332 1.1 palle free_txqueue: 333 1.1 palle ldc_queue_free(lc->lc_txq); 334 1.1 palle } 335 1.1 palle 336 1.1 palle int 337 1.1 palle vnet_tx_intr(void *arg) 338 1.1 palle { 339 1.1 palle struct vnet_softc *sc = arg; 340 1.1 palle struct ldc_conn *lc = &sc->sc_lc; 341 1.1 palle uint64_t tx_head, tx_tail, tx_state; 342 1.1 palle 343 1.1 palle hv_ldc_tx_get_state(lc->lc_id, &tx_head, &tx_tail, &tx_state); 344 1.1 palle if (tx_state != lc->lc_tx_state) { 345 1.1 palle switch (tx_state) { 346 1.1 palle case LDC_CHANNEL_DOWN: 347 1.1 palle DPRINTF(("%s: Tx link down\n", __func__)); 348 1.1 palle break; 349 1.1 palle case LDC_CHANNEL_UP: 350 1.1 palle DPRINTF(("%s: Tx link up\n", __func__)); 351 1.1 palle break; 352 1.1 palle case LDC_CHANNEL_RESET: 353 1.1 palle DPRINTF(("%s: Tx link reset\n", __func__)); 354 1.1 palle break; 355 1.1 palle } 356 1.1 palle lc->lc_tx_state = tx_state; 357 1.1 palle } 358 1.1 palle 359 1.1 palle return (1); 360 1.1 palle } 361 1.1 palle 362 1.1 palle int 363 1.1 palle vnet_rx_intr(void *arg) 364 1.1 palle { 365 1.1 palle struct vnet_softc *sc = arg; 366 1.1 palle struct ldc_conn *lc = &sc->sc_lc; 367 1.1 palle uint64_t rx_head, rx_tail, rx_state; 368 1.1 palle struct ldc_pkt *lp; 369 1.1 palle int err; 370 1.1 palle 371 1.1 palle err = hv_ldc_rx_get_state(lc->lc_id, &rx_head, &rx_tail, &rx_state); 372 1.3 palle if (err == H_EINVAL) { 373 1.3 palle printf("hv_ldc_rx_get_state failed\n"); 374 1.1 palle return (0); 375 1.3 palle } 376 1.1 palle if (err != H_EOK) { 377 1.1 palle printf("hv_ldc_rx_get_state %d\n", err); 378 1.1 palle return (0); 379 1.1 palle } 380 1.1 palle 381 1.1 palle if (rx_state != lc->lc_rx_state) { 382 1.1 palle switch (rx_state) { 383 1.1 palle case LDC_CHANNEL_DOWN: 384 1.1 palle lc->lc_tx_seqid = 0; 385 1.1 palle lc->lc_state = 0; 386 1.1 palle lc->lc_reset(lc); 387 1.1 palle if (rx_head == rx_tail) 388 1.1 palle break; 389 1.1 palle /* Discard and ack pending I/O. */ 390 1.3 palle DPRINTF(("setting rx qhead to %" PRId64 "\n", rx_tail)); 391 1.1 palle err = hv_ldc_rx_set_qhead(lc->lc_id, rx_tail); 392 1.1 palle if (err == H_EOK) 393 1.1 palle break; 394 1.1 palle printf("%s: hv_ldc_rx_set_qhead %d\n", __func__, err); 395 1.1 palle break; 396 1.1 palle case LDC_CHANNEL_UP: 397 1.3 palle callout_reset(&sc->sc_handshake_co, hz / 2, vnet_handshake, sc); 398 1.1 palle break; 399 1.1 palle case LDC_CHANNEL_RESET: 400 1.1 palle DPRINTF(("%s: Rx link reset\n", __func__)); 401 1.1 palle lc->lc_tx_seqid = 0; 402 1.1 palle lc->lc_state = 0; 403 1.1 palle lc->lc_reset(lc); 404 1.3 palle callout_reset(&sc->sc_handshake_co, hz / 2, vnet_handshake, sc); 405 1.3 palle if (rx_head == rx_tail) { 406 1.1 palle break; 407 1.3 palle } 408 1.1 palle /* Discard and ack pending I/O. */ 409 1.3 palle DPRINTF(("setting rx qhead to %" PRId64 "\n", rx_tail)); 410 1.1 palle err = hv_ldc_rx_set_qhead(lc->lc_id, rx_tail); 411 1.1 palle if (err == H_EOK) 412 1.1 palle break; 413 1.1 palle printf("%s: hv_ldc_rx_set_qhead %d\n", __func__, err); 414 1.1 palle break; 415 1.3 palle default: 416 1.3 palle DPRINTF(("%s: unhandled rx_state %" PRIx64 "\n", __func__, rx_state)); 417 1.3 palle break; 418 1.1 palle } 419 1.1 palle lc->lc_rx_state = rx_state; 420 1.1 palle return (1); 421 1.3 palle } else { 422 1.1 palle } 423 1.1 palle 424 1.1 palle if (rx_head == rx_tail) 425 1.3 palle { 426 1.3 palle DPRINTF(("%s: head eq tail\n", __func__)); 427 1.1 palle return (0); 428 1.3 palle } 429 1.2 palle lp = (struct ldc_pkt *)(uintptr_t)(lc->lc_rxq->lq_va + rx_head); 430 1.1 palle switch (lp->type) { 431 1.1 palle case LDC_CTRL: 432 1.3 palle DPRINTF(("%s: LDC_CTRL\n", __func__)); 433 1.1 palle ldc_rx_ctrl(lc, lp); 434 1.1 palle break; 435 1.1 palle 436 1.1 palle case LDC_DATA: 437 1.3 palle DPRINTF(("%s: LDC_DATA\n", __func__)); 438 1.1 palle ldc_rx_data(lc, lp); 439 1.1 palle break; 440 1.1 palle 441 1.1 palle default: 442 1.3 palle DPRINTF(("%s: unhandled type %0x02/%0x02/%0x02\n", 443 1.3 palle __func__, lp->type, lp->stype, lp->ctrl)); 444 1.10 andvar console_debugger(); 445 1.1 palle ldc_reset(lc); 446 1.1 palle break; 447 1.1 palle } 448 1.1 palle 449 1.1 palle if (lc->lc_state == 0) 450 1.1 palle return (1); 451 1.1 palle 452 1.1 palle rx_head += sizeof(*lp); 453 1.1 palle rx_head &= ((lc->lc_rxq->lq_nentries * sizeof(*lp)) - 1); 454 1.1 palle err = hv_ldc_rx_set_qhead(lc->lc_id, rx_head); 455 1.1 palle if (err != H_EOK) 456 1.1 palle printf("%s: hv_ldc_rx_set_qhead %d\n", __func__, err); 457 1.1 palle return (1); 458 1.1 palle } 459 1.1 palle 460 1.1 palle void 461 1.1 palle vnet_handshake(void *arg) 462 1.1 palle { 463 1.1 palle struct vnet_softc *sc = arg; 464 1.1 palle 465 1.1 palle ldc_send_vers(&sc->sc_lc); 466 1.1 palle } 467 1.1 palle 468 1.1 palle void 469 1.1 palle vio_rx_data(struct ldc_conn *lc, struct ldc_pkt *lp) 470 1.1 palle { 471 1.1 palle struct vio_msg *vm = (struct vio_msg *)lp; 472 1.1 palle 473 1.1 palle switch (vm->type) { 474 1.1 palle case VIO_TYPE_CTRL: 475 1.1 palle if ((lp->env & LDC_FRAG_START) == 0 && 476 1.3 palle (lp->env & LDC_FRAG_STOP) == 0) { 477 1.3 palle DPRINTF(("%s: FRAG_START==0 and FRAG_STOP==0\n", __func__)); 478 1.1 palle return; 479 1.3 palle } 480 1.1 palle vnet_rx_vio_ctrl(lc->lc_sc, vm); 481 1.1 palle break; 482 1.1 palle 483 1.1 palle case VIO_TYPE_DATA: 484 1.3 palle if((lp->env & LDC_FRAG_START) == 0) { 485 1.3 palle DPRINTF(("%s: FRAG_START==0\n", __func__)); 486 1.1 palle return; 487 1.3 palle } 488 1.1 palle vnet_rx_vio_data(lc->lc_sc, vm); 489 1.1 palle break; 490 1.1 palle 491 1.1 palle default: 492 1.1 palle DPRINTF(("Unhandled packet type 0x%02x\n", vm->type)); 493 1.1 palle ldc_reset(lc); 494 1.1 palle break; 495 1.1 palle } 496 1.1 palle } 497 1.1 palle 498 1.1 palle void 499 1.1 palle vnet_rx_vio_ctrl(struct vnet_softc *sc, struct vio_msg *vm) 500 1.1 palle { 501 1.1 palle struct vio_msg_tag *tag = (struct vio_msg_tag *)&vm->type; 502 1.1 palle 503 1.1 palle switch (tag->stype_env) { 504 1.1 palle case VIO_VER_INFO: 505 1.1 palle vnet_rx_vio_ver_info(sc, tag); 506 1.1 palle break; 507 1.1 palle case VIO_ATTR_INFO: 508 1.1 palle vnet_rx_vio_attr_info(sc, tag); 509 1.1 palle break; 510 1.1 palle case VIO_DRING_REG: 511 1.1 palle vnet_rx_vio_dring_reg(sc, tag); 512 1.1 palle break; 513 1.1 palle case VIO_RDX: 514 1.1 palle vnet_rx_vio_rdx(sc, tag); 515 1.1 palle break; 516 1.4 palle case VNET_MCAST_INFO: 517 1.4 palle vnet_rx_vio_mcast_info(sc, tag); 518 1.4 palle break; 519 1.1 palle default: 520 1.3 palle printf("%s: CTRL/0x%02x/0x%04x FIXME\n", 521 1.3 palle __func__, tag->stype, tag->stype_env); 522 1.1 palle break; 523 1.1 palle } 524 1.1 palle } 525 1.1 palle 526 1.1 palle void 527 1.1 palle vnet_rx_vio_ver_info(struct vnet_softc *sc, struct vio_msg_tag *tag) 528 1.1 palle { 529 1.1 palle struct vio_ver_info *vi = (struct vio_ver_info *)tag; 530 1.1 palle 531 1.1 palle switch (vi->tag.stype) { 532 1.1 palle case VIO_SUBTYPE_INFO: 533 1.1 palle DPRINTF(("CTRL/INFO/VER_INFO\n")); 534 1.1 palle 535 1.1 palle /* Make sure we're talking to a virtual network device. */ 536 1.1 palle if (vi->dev_class != VDEV_NETWORK && 537 1.1 palle vi->dev_class != VDEV_NETWORK_SWITCH) { 538 1.3 palle DPRINTF(("Class is not network or network switch\n")); 539 1.1 palle /* Huh, we're not talking to a network device? */ 540 1.1 palle printf("Not a network device\n"); 541 1.1 palle vi->tag.stype = VIO_SUBTYPE_NACK; 542 1.1 palle vnet_sendmsg(sc, vi, sizeof(*vi)); 543 1.1 palle return; 544 1.1 palle } 545 1.1 palle 546 1.1 palle if (vi->major != VNET_MAJOR) { 547 1.3 palle DPRINTF(("Major mismatch %" PRId8 " vs %" PRId8 "\n", 548 1.3 palle vi->major, VNET_MAJOR)); 549 1.1 palle vi->tag.stype = VIO_SUBTYPE_NACK; 550 1.1 palle vi->major = VNET_MAJOR; 551 1.1 palle vi->minor = VNET_MINOR; 552 1.1 palle vnet_sendmsg(sc, vi, sizeof(*vi)); 553 1.1 palle return; 554 1.1 palle } 555 1.1 palle 556 1.1 palle vi->tag.stype = VIO_SUBTYPE_ACK; 557 1.1 palle vi->tag.sid = sc->sc_local_sid; 558 1.1 palle vi->minor = VNET_MINOR; 559 1.1 palle vnet_sendmsg(sc, vi, sizeof(*vi)); 560 1.1 palle sc->sc_vio_state |= VIO_RCV_VER_INFO; 561 1.1 palle break; 562 1.1 palle 563 1.1 palle case VIO_SUBTYPE_ACK: 564 1.1 palle DPRINTF(("CTRL/ACK/VER_INFO\n")); 565 1.1 palle if (!ISSET(sc->sc_vio_state, VIO_SND_VER_INFO)) { 566 1.1 palle ldc_reset(&sc->sc_lc); 567 1.1 palle break; 568 1.1 palle } 569 1.1 palle sc->sc_vio_state |= VIO_ACK_VER_INFO; 570 1.1 palle break; 571 1.1 palle 572 1.1 palle default: 573 1.1 palle DPRINTF(("CTRL/0x%02x/VER_INFO\n", vi->tag.stype)); 574 1.1 palle break; 575 1.1 palle } 576 1.1 palle 577 1.1 palle if (ISSET(sc->sc_vio_state, VIO_RCV_VER_INFO) && 578 1.1 palle ISSET(sc->sc_vio_state, VIO_ACK_VER_INFO)) 579 1.1 palle vnet_send_attr_info(sc); 580 1.1 palle } 581 1.1 palle 582 1.1 palle void 583 1.1 palle vnet_rx_vio_attr_info(struct vnet_softc *sc, struct vio_msg_tag *tag) 584 1.1 palle { 585 1.1 palle struct vnet_attr_info *ai = (struct vnet_attr_info *)tag; 586 1.1 palle 587 1.1 palle switch (ai->tag.stype) { 588 1.1 palle case VIO_SUBTYPE_INFO: 589 1.1 palle DPRINTF(("CTRL/INFO/ATTR_INFO\n")); 590 1.1 palle sc->sc_xfer_mode = ai->xfer_mode; 591 1.1 palle ai->tag.stype = VIO_SUBTYPE_ACK; 592 1.1 palle ai->tag.sid = sc->sc_local_sid; 593 1.1 palle vnet_sendmsg(sc, ai, sizeof(*ai)); 594 1.1 palle sc->sc_vio_state |= VIO_RCV_ATTR_INFO; 595 1.1 palle break; 596 1.1 palle 597 1.1 palle case VIO_SUBTYPE_ACK: 598 1.1 palle DPRINTF(("CTRL/ACK/ATTR_INFO\n")); 599 1.1 palle if (!ISSET(sc->sc_vio_state, VIO_SND_ATTR_INFO)) { 600 1.1 palle ldc_reset(&sc->sc_lc); 601 1.1 palle break; 602 1.1 palle } 603 1.1 palle sc->sc_vio_state |= VIO_ACK_ATTR_INFO; 604 1.1 palle break; 605 1.1 palle 606 1.1 palle default: 607 1.1 palle DPRINTF(("CTRL/0x%02x/ATTR_INFO\n", ai->tag.stype)); 608 1.1 palle break; 609 1.1 palle } 610 1.1 palle 611 1.1 palle if (ISSET(sc->sc_vio_state, VIO_RCV_ATTR_INFO) && 612 1.1 palle ISSET(sc->sc_vio_state, VIO_ACK_ATTR_INFO)) { 613 1.1 palle if (sc->sc_xfer_mode == VIO_DRING_MODE) 614 1.1 palle vnet_send_dring_reg(sc); 615 1.1 palle else 616 1.1 palle vio_send_rdx(sc); 617 1.1 palle } 618 1.1 palle } 619 1.1 palle 620 1.1 palle void 621 1.1 palle vnet_rx_vio_dring_reg(struct vnet_softc *sc, struct vio_msg_tag *tag) 622 1.1 palle { 623 1.1 palle struct vio_dring_reg *dr = (struct vio_dring_reg *)tag; 624 1.1 palle 625 1.1 palle switch (dr->tag.stype) { 626 1.1 palle case VIO_SUBTYPE_INFO: 627 1.1 palle DPRINTF(("CTRL/INFO/DRING_REG\n")); 628 1.1 palle sc->sc_peer_dring_nentries = dr->num_descriptors; 629 1.1 palle sc->sc_peer_desc_size = dr->descriptor_size; 630 1.1 palle sc->sc_peer_dring_cookie = dr->cookie[0]; 631 1.1 palle 632 1.1 palle dr->tag.stype = VIO_SUBTYPE_ACK; 633 1.1 palle dr->tag.sid = sc->sc_local_sid; 634 1.1 palle vnet_sendmsg(sc, dr, sizeof(*dr)); 635 1.1 palle sc->sc_vio_state |= VIO_RCV_DRING_REG; 636 1.1 palle break; 637 1.1 palle 638 1.1 palle case VIO_SUBTYPE_ACK: 639 1.1 palle DPRINTF(("CTRL/ACK/DRING_REG\n")); 640 1.1 palle if (!ISSET(sc->sc_vio_state, VIO_SND_DRING_REG)) { 641 1.1 palle ldc_reset(&sc->sc_lc); 642 1.1 palle break; 643 1.1 palle } 644 1.1 palle 645 1.1 palle sc->sc_dring_ident = dr->dring_ident; 646 1.1 palle sc->sc_seq_no = 1; 647 1.1 palle 648 1.1 palle sc->sc_vio_state |= VIO_ACK_DRING_REG; 649 1.1 palle break; 650 1.1 palle 651 1.1 palle default: 652 1.1 palle DPRINTF(("CTRL/0x%02x/DRING_REG\n", dr->tag.stype)); 653 1.1 palle break; 654 1.1 palle } 655 1.1 palle 656 1.1 palle if (ISSET(sc->sc_vio_state, VIO_RCV_DRING_REG) && 657 1.1 palle ISSET(sc->sc_vio_state, VIO_ACK_DRING_REG)) 658 1.1 palle vio_send_rdx(sc); 659 1.1 palle } 660 1.1 palle 661 1.1 palle void 662 1.1 palle vnet_rx_vio_rdx(struct vnet_softc *sc, struct vio_msg_tag *tag) 663 1.1 palle { 664 1.1 palle struct ifnet *ifp = &sc->sc_ethercom.ec_if; 665 1.1 palle 666 1.1 palle switch(tag->stype) { 667 1.1 palle case VIO_SUBTYPE_INFO: 668 1.1 palle DPRINTF(("CTRL/INFO/RDX\n")); 669 1.1 palle tag->stype = VIO_SUBTYPE_ACK; 670 1.1 palle tag->sid = sc->sc_local_sid; 671 1.1 palle vnet_sendmsg(sc, tag, sizeof(*tag)); 672 1.1 palle sc->sc_vio_state |= VIO_RCV_RDX; 673 1.1 palle break; 674 1.1 palle 675 1.1 palle case VIO_SUBTYPE_ACK: 676 1.1 palle DPRINTF(("CTRL/ACK/RDX\n")); 677 1.1 palle if (!ISSET(sc->sc_vio_state, VIO_SND_RDX)) { 678 1.1 palle ldc_reset(&sc->sc_lc); 679 1.1 palle break; 680 1.1 palle } 681 1.1 palle sc->sc_vio_state |= VIO_ACK_RDX; 682 1.1 palle break; 683 1.1 palle 684 1.1 palle default: 685 1.1 palle DPRINTF(("CTRL/0x%02x/RDX (VIO)\n", tag->stype)); 686 1.1 palle break; 687 1.1 palle } 688 1.1 palle 689 1.1 palle if (ISSET(sc->sc_vio_state, VIO_RCV_RDX) && 690 1.1 palle ISSET(sc->sc_vio_state, VIO_ACK_RDX)) { 691 1.1 palle /* Link is up! */ 692 1.1 palle vnet_link_state(sc); 693 1.1 palle 694 1.1 palle /* Configure multicast now that we can. */ 695 1.1 palle vnet_setmulti(sc, 1); 696 1.1 palle 697 1.5 palle KERNEL_LOCK(1, curlwp); 698 1.1 palle vnet_start(ifp); 699 1.5 palle KERNEL_UNLOCK_ONE(curlwp); 700 1.1 palle } 701 1.1 palle } 702 1.1 palle 703 1.1 palle void 704 1.4 palle vnet_rx_vio_mcast_info(struct vnet_softc *sc, struct vio_msg_tag *tag) 705 1.4 palle { 706 1.4 palle switch(tag->stype) { 707 1.4 palle 708 1.4 palle case VIO_SUBTYPE_INFO: 709 1.4 palle DPRINTF(("CTRL/INFO/MCAST_INFO\n")); 710 1.4 palle break; 711 1.4 palle 712 1.4 palle case VIO_SUBTYPE_ACK: 713 1.4 palle DPRINTF(("CTRL/ACK/MCAST_INFO\n")); 714 1.4 palle break; 715 1.4 palle 716 1.4 palle case VIO_SUBTYPE_NACK: 717 1.4 palle DPRINTF(("CTRL/NACK/MCAST_INFO\n")); 718 1.4 palle break; 719 1.4 palle 720 1.4 palle default: 721 1.4 palle printf("%s: CTRL/0x%02x/0x%04x\n", 722 1.4 palle __func__, tag->stype, tag->stype_env); 723 1.4 palle break; 724 1.4 palle } 725 1.4 palle } 726 1.4 palle 727 1.4 palle void 728 1.1 palle vnet_rx_vio_data(struct vnet_softc *sc, struct vio_msg *vm) 729 1.1 palle { 730 1.1 palle struct vio_msg_tag *tag = (struct vio_msg_tag *)&vm->type; 731 1.1 palle 732 1.1 palle if (!ISSET(sc->sc_vio_state, VIO_RCV_RDX) || 733 1.1 palle !ISSET(sc->sc_vio_state, VIO_ACK_RDX)) { 734 1.1 palle DPRINTF(("Spurious DATA/0x%02x/0x%04x\n", tag->stype, 735 1.1 palle tag->stype_env)); 736 1.1 palle return; 737 1.1 palle } 738 1.1 palle 739 1.1 palle switch(tag->stype_env) { 740 1.1 palle case VIO_DESC_DATA: 741 1.1 palle vnet_rx_vio_desc_data(sc, tag); 742 1.1 palle break; 743 1.1 palle 744 1.1 palle case VIO_DRING_DATA: 745 1.1 palle vnet_rx_vio_dring_data(sc, tag); 746 1.1 palle break; 747 1.1 palle 748 1.1 palle default: 749 1.1 palle DPRINTF(("DATA/0x%02x/0x%04x\n", tag->stype, tag->stype_env)); 750 1.1 palle break; 751 1.1 palle } 752 1.1 palle } 753 1.1 palle 754 1.1 palle void 755 1.1 palle vnet_rx_vio_desc_data(struct vnet_softc *sc, struct vio_msg_tag *tag) 756 1.1 palle { 757 1.3 palle 758 1.1 palle struct vnet_desc_msg *dm = (struct vnet_desc_msg *)tag; 759 1.1 palle struct ldc_conn *lc = &sc->sc_lc; 760 1.1 palle struct ldc_map *map = sc->sc_lm; 761 1.1 palle struct ifnet *ifp = &sc->sc_ethercom.ec_if; 762 1.1 palle struct mbuf *m; 763 1.1 palle unsigned char *buf; 764 1.1 palle paddr_t pa; 765 1.1 palle psize_t nbytes; 766 1.1 palle u_int cons; 767 1.1 palle int err; 768 1.1 palle 769 1.1 palle switch(tag->stype) { 770 1.1 palle case VIO_SUBTYPE_INFO: 771 1.1 palle buf = pool_get(&sc->sc_pool, PR_NOWAIT|PR_ZERO); 772 1.1 palle if (buf == NULL) { 773 1.1 palle if_statinc(ifp, if_ierrors); 774 1.1 palle goto skip; 775 1.1 palle } 776 1.1 palle nbytes = roundup(dm->nbytes, 8); 777 1.1 palle 778 1.1 palle if (dm->nbytes > (ETHER_MAX_LEN - ETHER_CRC_LEN)) { 779 1.1 palle if_statinc(ifp, if_ierrors); 780 1.1 palle goto skip; 781 1.1 palle } 782 1.1 palle 783 1.1 palle pmap_extract(pmap_kernel(), (vaddr_t)buf, &pa); 784 1.1 palle err = hv_ldc_copy(lc->lc_id, LDC_COPY_IN, 785 1.1 palle dm->cookie[0].addr, pa, nbytes, &nbytes); 786 1.1 palle if (err != H_EOK) { 787 1.1 palle pool_put(&sc->sc_pool, buf); 788 1.1 palle if_statinc(ifp, if_ierrors); 789 1.1 palle goto skip; 790 1.1 palle } 791 1.1 palle 792 1.1 palle /* Stupid OBP doesn't align properly. */ 793 1.1 palle m = m_devget(buf, dm->nbytes, 0, ifp); 794 1.1 palle pool_put(&sc->sc_pool, buf); 795 1.1 palle if (m == NULL) { 796 1.1 palle if_statinc(ifp, if_ierrors); 797 1.1 palle goto skip; 798 1.1 palle } 799 1.1 palle 800 1.1 palle /* Pass it on. */ 801 1.5 palle if_percpuq_enqueue(ifp->if_percpuq, m); 802 1.1 palle skip: 803 1.1 palle dm->tag.stype = VIO_SUBTYPE_ACK; 804 1.1 palle dm->tag.sid = sc->sc_local_sid; 805 1.1 palle vnet_sendmsg(sc, dm, sizeof(*dm)); 806 1.1 palle break; 807 1.1 palle 808 1.1 palle case VIO_SUBTYPE_ACK: 809 1.1 palle DPRINTF(("DATA/ACK/DESC_DATA\n")); 810 1.1 palle 811 1.1 palle if (dm->desc_handle != sc->sc_tx_cons) { 812 1.1 palle printf("out of order\n"); 813 1.1 palle return; 814 1.1 palle } 815 1.1 palle 816 1.1 palle cons = sc->sc_tx_cons & (sc->sc_vd->vd_nentries - 1); 817 1.1 palle 818 1.1 palle map->lm_slot[sc->sc_vsd[cons].vsd_map_idx].entry = 0; 819 1.5 palle atomic_dec_32(&map->lm_count); 820 1.1 palle 821 1.1 palle pool_put(&sc->sc_pool, sc->sc_vsd[cons].vsd_buf); 822 1.1 palle sc->sc_vsd[cons].vsd_buf = NULL; 823 1.1 palle 824 1.1 palle sc->sc_tx_cons++; 825 1.1 palle break; 826 1.1 palle 827 1.1 palle case VIO_SUBTYPE_NACK: 828 1.1 palle DPRINTF(("DATA/NACK/DESC_DATA\n")); 829 1.1 palle break; 830 1.1 palle 831 1.1 palle default: 832 1.1 palle DPRINTF(("DATA/0x%02x/DESC_DATA\n", tag->stype)); 833 1.1 palle break; 834 1.1 palle } 835 1.1 palle } 836 1.1 palle 837 1.1 palle void 838 1.1 palle vnet_rx_vio_dring_data(struct vnet_softc *sc, struct vio_msg_tag *tag) 839 1.1 palle { 840 1.1 palle struct vio_dring_msg *dm = (struct vio_dring_msg *)tag; 841 1.1 palle struct ldc_conn *lc = &sc->sc_lc; 842 1.1 palle struct ifnet *ifp = &sc->sc_ethercom.ec_if; 843 1.1 palle struct mbuf *m = NULL; 844 1.1 palle paddr_t pa; 845 1.1 palle psize_t nbytes; 846 1.1 palle int err; 847 1.1 palle 848 1.1 palle switch(tag->stype) { 849 1.1 palle case VIO_SUBTYPE_INFO: 850 1.1 palle { 851 1.3 palle DPRINTF(("%s: VIO_SUBTYPE_INFO\n", __func__)); 852 1.1 palle struct vnet_desc desc; 853 1.1 palle uint64_t cookie; 854 1.1 palle paddr_t desc_pa; 855 1.1 palle int idx, ack_end_idx = -1; 856 1.1 palle 857 1.1 palle idx = dm->start_idx; 858 1.1 palle for (;;) { 859 1.1 palle cookie = sc->sc_peer_dring_cookie.addr; 860 1.1 palle cookie += idx * sc->sc_peer_desc_size; 861 1.1 palle nbytes = sc->sc_peer_desc_size; 862 1.1 palle pmap_extract(pmap_kernel(), (vaddr_t)&desc, &desc_pa); 863 1.1 palle err = hv_ldc_copy(lc->lc_id, LDC_COPY_IN, cookie, 864 1.1 palle desc_pa, nbytes, &nbytes); 865 1.1 palle if (err != H_EOK) { 866 1.1 palle printf("hv_ldc_copy_in %d\n", err); 867 1.1 palle break; 868 1.1 palle } 869 1.1 palle 870 1.1 palle if (desc.hdr.dstate != VIO_DESC_READY) 871 1.1 palle break; 872 1.1 palle 873 1.1 palle if (desc.nbytes > (ETHER_MAX_LEN - ETHER_CRC_LEN)) { 874 1.1 palle if_statinc(ifp, if_ierrors); 875 1.1 palle goto skip; 876 1.1 palle } 877 1.5 palle 878 1.3 palle MGETHDR(m, M_DONTWAIT, MT_DATA); 879 1.3 palle if (m == NULL) { 880 1.3 palle DPRINTF(("%s: MGETHDR failed\n", __func__)); 881 1.3 palle if_statinc(ifp, if_ierrors); 882 1.3 palle goto skip; 883 1.3 palle } 884 1.3 palle MCLGET(m, M_DONTWAIT); 885 1.1 palle if ((m->m_flags & M_EXT) == 0) 886 1.1 palle break; 887 1.1 palle m->m_len = m->m_pkthdr.len = desc.nbytes; 888 1.1 palle nbytes = roundup(desc.nbytes + VNET_ETHER_ALIGN, 8); 889 1.5 palle 890 1.1 palle pmap_extract(pmap_kernel(), (vaddr_t)m->m_data, &pa); 891 1.1 palle err = hv_ldc_copy(lc->lc_id, LDC_COPY_IN, 892 1.1 palle desc.cookie[0].addr, pa, nbytes, &nbytes); 893 1.1 palle if (err != H_EOK) { 894 1.1 palle m_freem(m); 895 1.1 palle goto skip; 896 1.1 palle } 897 1.1 palle m->m_data += VNET_ETHER_ALIGN; 898 1.4 palle m_set_rcvif(m, ifp); 899 1.1 palle 900 1.1 palle if_percpuq_enqueue(ifp->if_percpuq, m); 901 1.1 palle 902 1.1 palle skip: 903 1.1 palle desc.hdr.dstate = VIO_DESC_DONE; 904 1.1 palle nbytes = sc->sc_peer_desc_size; 905 1.1 palle err = hv_ldc_copy(lc->lc_id, LDC_COPY_OUT, cookie, 906 1.1 palle desc_pa, nbytes, &nbytes); 907 1.1 palle if (err != H_EOK) 908 1.1 palle printf("hv_ldc_copy_out %d\n", err); 909 1.1 palle 910 1.1 palle ack_end_idx = idx; 911 1.1 palle if (++idx == sc->sc_peer_dring_nentries) 912 1.1 palle idx = 0; 913 1.1 palle } 914 1.1 palle 915 1.1 palle if (ack_end_idx == -1) { 916 1.1 palle dm->tag.stype = VIO_SUBTYPE_NACK; 917 1.1 palle } else { 918 1.1 palle dm->tag.stype = VIO_SUBTYPE_ACK; 919 1.1 palle dm->end_idx = ack_end_idx; 920 1.1 palle } 921 1.1 palle dm->tag.sid = sc->sc_local_sid; 922 1.1 palle dm->proc_state = VIO_DP_STOPPED; 923 1.1 palle vnet_sendmsg(sc, dm, sizeof(*dm)); 924 1.1 palle break; 925 1.1 palle } 926 1.1 palle 927 1.1 palle case VIO_SUBTYPE_ACK: 928 1.1 palle { 929 1.3 palle DPRINTF(("%s: VIO_SUBTYPE_ACK\n", __func__)); 930 1.1 palle struct ldc_map *map = sc->sc_lm; 931 1.1 palle u_int cons, count; 932 1.1 palle 933 1.1 palle sc->sc_peer_state = dm->proc_state; 934 1.1 palle 935 1.1 palle cons = sc->sc_tx_cons & (sc->sc_vd->vd_nentries - 1); 936 1.1 palle while (sc->sc_vd->vd_desc[cons].hdr.dstate == VIO_DESC_DONE) { 937 1.1 palle map->lm_slot[sc->sc_vsd[cons].vsd_map_idx].entry = 0; 938 1.5 palle atomic_dec_32(&map->lm_count); 939 1.1 palle 940 1.1 palle pool_put(&sc->sc_pool, sc->sc_vsd[cons].vsd_buf); 941 1.1 palle sc->sc_vsd[cons].vsd_buf = NULL; 942 1.1 palle 943 1.1 palle sc->sc_vd->vd_desc[cons].hdr.dstate = VIO_DESC_FREE; 944 1.1 palle sc->sc_tx_cons++; 945 1.1 palle cons = sc->sc_tx_cons & (sc->sc_vd->vd_nentries - 1); 946 1.1 palle } 947 1.1 palle 948 1.1 palle count = sc->sc_tx_prod - sc->sc_tx_cons; 949 1.1 palle if (count > 0 && sc->sc_peer_state != VIO_DP_ACTIVE) 950 1.1 palle vnet_send_dring_data(sc, cons); 951 1.1 palle 952 1.1 palle KERNEL_LOCK(1, curlwp); 953 1.1 palle if (count == 0) 954 1.1 palle ifp->if_timer = 0; 955 1.1 palle 956 1.1 palle vnet_start(ifp); 957 1.1 palle KERNEL_UNLOCK_ONE(curlwp); 958 1.1 palle break; 959 1.1 palle } 960 1.1 palle 961 1.1 palle case VIO_SUBTYPE_NACK: 962 1.1 palle DPRINTF(("DATA/NACK/DRING_DATA\n")); 963 1.1 palle sc->sc_peer_state = VIO_DP_STOPPED; 964 1.1 palle break; 965 1.1 palle 966 1.1 palle default: 967 1.1 palle DPRINTF(("DATA/0x%02x/DRING_DATA\n", tag->stype)); 968 1.1 palle break; 969 1.1 palle } 970 1.1 palle } 971 1.1 palle 972 1.1 palle void 973 1.1 palle vnet_ldc_reset(struct ldc_conn *lc) 974 1.1 palle { 975 1.3 palle 976 1.1 palle struct vnet_softc *sc = lc->lc_sc; 977 1.1 palle int i; 978 1.5 palle 979 1.3 palle callout_stop(&sc->sc_handshake_co); 980 1.1 palle sc->sc_tx_prod = sc->sc_tx_cons = 0; 981 1.1 palle sc->sc_peer_state = VIO_DP_STOPPED; 982 1.1 palle sc->sc_vio_state = 0; 983 1.1 palle vnet_link_state(sc); 984 1.1 palle 985 1.1 palle sc->sc_lm->lm_next = 1; 986 1.1 palle sc->sc_lm->lm_count = 1; 987 1.1 palle for (i = 1; i < sc->sc_lm->lm_nentries; i++) 988 1.1 palle sc->sc_lm->lm_slot[i].entry = 0; 989 1.1 palle 990 1.1 palle for (i = 0; i < sc->sc_vd->vd_nentries; i++) { 991 1.1 palle if (sc->sc_vsd[i].vsd_buf) { 992 1.1 palle pool_put(&sc->sc_pool, sc->sc_vsd[i].vsd_buf); 993 1.1 palle sc->sc_vsd[i].vsd_buf = NULL; 994 1.1 palle } 995 1.1 palle sc->sc_vd->vd_desc[i].hdr.dstate = VIO_DESC_FREE; 996 1.1 palle } 997 1.1 palle } 998 1.1 palle 999 1.1 palle void 1000 1.1 palle vnet_ldc_start(struct ldc_conn *lc) 1001 1.1 palle { 1002 1.1 palle struct vnet_softc *sc = lc->lc_sc; 1003 1.3 palle callout_stop(&sc->sc_handshake_co); 1004 1.1 palle vnet_send_ver_info(sc, VNET_MAJOR, VNET_MINOR); 1005 1.1 palle } 1006 1.1 palle 1007 1.1 palle void 1008 1.1 palle vnet_sendmsg(struct vnet_softc *sc, void *msg, size_t len) 1009 1.1 palle { 1010 1.1 palle struct ldc_conn *lc = &sc->sc_lc; 1011 1.1 palle int err; 1012 1.1 palle 1013 1.1 palle err = ldc_send_unreliable(lc, msg, len); 1014 1.1 palle if (err) 1015 1.1 palle printf("%s: ldc_send_unreliable: %d\n", __func__, err); 1016 1.1 palle } 1017 1.1 palle 1018 1.1 palle void 1019 1.1 palle vnet_send_ver_info(struct vnet_softc *sc, uint16_t major, uint16_t minor) 1020 1.1 palle { 1021 1.1 palle struct vio_ver_info vi; 1022 1.1 palle 1023 1.1 palle bzero(&vi, sizeof(vi)); 1024 1.1 palle vi.tag.type = VIO_TYPE_CTRL; 1025 1.1 palle vi.tag.stype = VIO_SUBTYPE_INFO; 1026 1.1 palle vi.tag.stype_env = VIO_VER_INFO; 1027 1.1 palle vi.tag.sid = sc->sc_local_sid; 1028 1.1 palle vi.major = major; 1029 1.1 palle vi.minor = minor; 1030 1.1 palle vi.dev_class = VDEV_NETWORK; 1031 1.1 palle vnet_sendmsg(sc, &vi, sizeof(vi)); 1032 1.1 palle 1033 1.1 palle sc->sc_vio_state |= VIO_SND_VER_INFO; 1034 1.1 palle } 1035 1.1 palle 1036 1.1 palle void 1037 1.1 palle vnet_send_attr_info(struct vnet_softc *sc) 1038 1.1 palle { 1039 1.1 palle struct vnet_attr_info ai; 1040 1.1 palle int i; 1041 1.1 palle 1042 1.1 palle bzero(&ai, sizeof(ai)); 1043 1.1 palle ai.tag.type = VIO_TYPE_CTRL; 1044 1.1 palle ai.tag.stype = VIO_SUBTYPE_INFO; 1045 1.1 palle ai.tag.stype_env = VIO_ATTR_INFO; 1046 1.1 palle ai.tag.sid = sc->sc_local_sid; 1047 1.1 palle ai.xfer_mode = VIO_DRING_MODE; 1048 1.1 palle ai.addr_type = VNET_ADDR_ETHERMAC; 1049 1.1 palle ai.ack_freq = 0; 1050 1.1 palle ai.addr = 0; 1051 1.1 palle for (i = 0; i < ETHER_ADDR_LEN; i++) { 1052 1.1 palle ai.addr <<= 8; 1053 1.1 palle ai.addr |= sc->sc_macaddr[i]; 1054 1.1 palle } 1055 1.1 palle ai.mtu = ETHER_MAX_LEN - ETHER_CRC_LEN; 1056 1.1 palle vnet_sendmsg(sc, &ai, sizeof(ai)); 1057 1.1 palle 1058 1.1 palle sc->sc_vio_state |= VIO_SND_ATTR_INFO; 1059 1.1 palle } 1060 1.1 palle 1061 1.1 palle void 1062 1.1 palle vnet_send_dring_reg(struct vnet_softc *sc) 1063 1.1 palle { 1064 1.1 palle struct vio_dring_reg dr; 1065 1.1 palle 1066 1.1 palle bzero(&dr, sizeof(dr)); 1067 1.1 palle dr.tag.type = VIO_TYPE_CTRL; 1068 1.1 palle dr.tag.stype = VIO_SUBTYPE_INFO; 1069 1.1 palle dr.tag.stype_env = VIO_DRING_REG; 1070 1.1 palle dr.tag.sid = sc->sc_local_sid; 1071 1.1 palle dr.dring_ident = 0; 1072 1.1 palle dr.num_descriptors = sc->sc_vd->vd_nentries; 1073 1.1 palle dr.descriptor_size = sizeof(struct vnet_desc); 1074 1.1 palle dr.options = VIO_TX_RING; 1075 1.1 palle dr.ncookies = 1; 1076 1.1 palle dr.cookie[0].addr = 0; 1077 1.1 palle dr.cookie[0].size = PAGE_SIZE; 1078 1.1 palle vnet_sendmsg(sc, &dr, sizeof(dr)); 1079 1.1 palle 1080 1.1 palle sc->sc_vio_state |= VIO_SND_DRING_REG; 1081 1.1 palle }; 1082 1.1 palle 1083 1.1 palle void 1084 1.1 palle vio_send_rdx(struct vnet_softc *sc) 1085 1.1 palle { 1086 1.1 palle struct vio_msg_tag tag; 1087 1.1 palle 1088 1.1 palle tag.type = VIO_TYPE_CTRL; 1089 1.1 palle tag.stype = VIO_SUBTYPE_INFO; 1090 1.1 palle tag.stype_env = VIO_RDX; 1091 1.1 palle tag.sid = sc->sc_local_sid; 1092 1.1 palle vnet_sendmsg(sc, &tag, sizeof(tag)); 1093 1.1 palle 1094 1.1 palle sc->sc_vio_state |= VIO_SND_RDX; 1095 1.1 palle } 1096 1.1 palle 1097 1.1 palle void 1098 1.1 palle vnet_send_dring_data(struct vnet_softc *sc, uint32_t start_idx) 1099 1.1 palle { 1100 1.1 palle struct vio_dring_msg dm; 1101 1.1 palle u_int peer_state; 1102 1.1 palle 1103 1.1 palle peer_state = atomic_swap_uint(&sc->sc_peer_state, VIO_DP_ACTIVE); 1104 1.4 palle if (peer_state == VIO_DP_ACTIVE) { 1105 1.4 palle DPRINTF(("%s: peer_state == VIO_DP_ACTIVE\n", __func__)); 1106 1.1 palle return; 1107 1.4 palle } 1108 1.1 palle 1109 1.1 palle bzero(&dm, sizeof(dm)); 1110 1.1 palle dm.tag.type = VIO_TYPE_DATA; 1111 1.1 palle dm.tag.stype = VIO_SUBTYPE_INFO; 1112 1.1 palle dm.tag.stype_env = VIO_DRING_DATA; 1113 1.1 palle dm.tag.sid = sc->sc_local_sid; 1114 1.1 palle dm.seq_no = sc->sc_seq_no++; 1115 1.1 palle dm.dring_ident = sc->sc_dring_ident; 1116 1.1 palle dm.start_idx = start_idx; 1117 1.1 palle dm.end_idx = -1; 1118 1.1 palle vnet_sendmsg(sc, &dm, sizeof(dm)); 1119 1.1 palle } 1120 1.1 palle 1121 1.1 palle void 1122 1.1 palle vnet_start(struct ifnet *ifp) 1123 1.1 palle { 1124 1.1 palle struct vnet_softc *sc = ifp->if_softc; 1125 1.1 palle struct ldc_conn *lc = &sc->sc_lc; 1126 1.1 palle struct ldc_map *map = sc->sc_lm; 1127 1.1 palle struct mbuf *m; 1128 1.1 palle paddr_t pa; 1129 1.1 palle unsigned char *buf; 1130 1.1 palle uint64_t tx_head, tx_tail, tx_state; 1131 1.1 palle u_int start, prod, count; 1132 1.1 palle int err; 1133 1.3 palle if (!(ifp->if_flags & IFF_RUNNING)) 1134 1.3 palle { 1135 1.3 palle DPRINTF(("%s: not in RUNNING state\n", __func__)); 1136 1.3 palle return; 1137 1.3 palle } 1138 1.1 palle 1139 1.1 palle if (IFQ_IS_EMPTY(&ifp->if_snd)) 1140 1.3 palle { 1141 1.3 palle DPRINTF(("%s: queue is empty\n", __func__)); 1142 1.1 palle return; 1143 1.4 palle } else { 1144 1.4 palle DPRINTF(("%s: queue size %d\n", __func__, ifp->if_snd.ifq_len)); 1145 1.3 palle } 1146 1.1 palle 1147 1.1 palle /* 1148 1.1 palle * We cannot transmit packets until a VIO connection has been 1149 1.1 palle * established. 1150 1.1 palle */ 1151 1.1 palle if (!ISSET(sc->sc_vio_state, VIO_RCV_RDX) || 1152 1.1 palle !ISSET(sc->sc_vio_state, VIO_ACK_RDX)) 1153 1.3 palle { 1154 1.3 palle DPRINTF(("%s: vio connection not established yet\n", __func__)); 1155 1.1 palle return; 1156 1.3 palle } 1157 1.1 palle 1158 1.1 palle /* 1159 1.1 palle * Make sure there is room in the LDC transmit queue to send a 1160 1.1 palle * DRING_DATA message. 1161 1.1 palle */ 1162 1.1 palle err = hv_ldc_tx_get_state(lc->lc_id, &tx_head, &tx_tail, &tx_state); 1163 1.3 palle if (err != H_EOK) { 1164 1.3 palle DPRINTF(("%s: no room in ldc transmit queue\n", __func__)); 1165 1.1 palle return; 1166 1.3 palle } 1167 1.1 palle tx_tail += sizeof(struct ldc_pkt); 1168 1.1 palle tx_tail &= ((lc->lc_txq->lq_nentries * sizeof(struct ldc_pkt)) - 1); 1169 1.1 palle if (tx_tail == tx_head) { 1170 1.3 palle { 1171 1.3 palle DPRINTF(("%s: tail equals head\n", __func__)); 1172 1.3 palle return; 1173 1.3 palle } 1174 1.1 palle } 1175 1.1 palle 1176 1.1 palle if (sc->sc_xfer_mode == VIO_DESC_MODE) { 1177 1.3 palle DPRINTF(("%s: vio_desc_mode\n", __func__)); 1178 1.1 palle vnet_start_desc(ifp); 1179 1.1 palle return; 1180 1.1 palle } 1181 1.1 palle 1182 1.1 palle start = prod = sc->sc_tx_prod & (sc->sc_vd->vd_nentries - 1); 1183 1.1 palle while (sc->sc_vd->vd_desc[prod].hdr.dstate == VIO_DESC_FREE) { 1184 1.1 palle count = sc->sc_tx_prod - sc->sc_tx_cons; 1185 1.1 palle if (count >= (sc->sc_vd->vd_nentries - 1) || 1186 1.1 palle map->lm_count >= map->lm_nentries) { 1187 1.4 palle DPRINTF(("%s: count issue\n", __func__)); 1188 1.1 palle break; 1189 1.1 palle } 1190 1.1 palle 1191 1.1 palle buf = pool_get(&sc->sc_pool, PR_NOWAIT|PR_ZERO); 1192 1.1 palle if (buf == NULL) { 1193 1.4 palle DPRINTF(("%s: buff is NULL\n", __func__)); 1194 1.1 palle break; 1195 1.1 palle } 1196 1.1 palle IFQ_DEQUEUE(&ifp->if_snd, m); 1197 1.1 palle if (m == NULL) { 1198 1.1 palle pool_put(&sc->sc_pool, buf); 1199 1.1 palle break; 1200 1.1 palle } 1201 1.1 palle 1202 1.1 palle m_copydata(m, 0, m->m_pkthdr.len, buf + VNET_ETHER_ALIGN); 1203 1.5 palle 1204 1.1 palle #if NBPFILTER > 0 1205 1.1 palle /* 1206 1.1 palle * If BPF is listening on this interface, let it see the 1207 1.1 palle * packet before we commit it to the wire. 1208 1.1 palle */ 1209 1.8 andvar DPRINTF(("%s: before bpf\n", __func__)); 1210 1.8 andvar bpf_mtap(ifp, m, BPF_D_OUT); 1211 1.8 andvar DPRINTF(("%s: after bpf\n", __func__)); 1212 1.1 palle #endif 1213 1.1 palle 1214 1.1 palle pmap_extract(pmap_kernel(), (vaddr_t)buf, &pa); 1215 1.1 palle KASSERT((pa & ~PAGE_MASK) == (pa & LDC_MTE_RA_MASK)); 1216 1.1 palle while (map->lm_slot[map->lm_next].entry != 0) { 1217 1.1 palle map->lm_next++; 1218 1.1 palle map->lm_next &= (map->lm_nentries - 1); 1219 1.1 palle } 1220 1.1 palle map->lm_slot[map->lm_next].entry = (pa & LDC_MTE_RA_MASK); 1221 1.1 palle map->lm_slot[map->lm_next].entry |= LDC_MTE_CPR; 1222 1.5 palle atomic_inc_32(&map->lm_count); 1223 1.1 palle sc->sc_vd->vd_desc[prod].nbytes = MAX(m->m_pkthdr.len, 60); 1224 1.1 palle sc->sc_vd->vd_desc[prod].ncookies = 1; 1225 1.1 palle sc->sc_vd->vd_desc[prod].cookie[0].addr = 1226 1.1 palle map->lm_next << PAGE_SHIFT | (pa & PAGE_MASK); 1227 1.1 palle sc->sc_vd->vd_desc[prod].cookie[0].size = 2048; 1228 1.1 palle membar_producer(); 1229 1.1 palle sc->sc_vd->vd_desc[prod].hdr.dstate = VIO_DESC_READY; 1230 1.1 palle 1231 1.1 palle sc->sc_vsd[prod].vsd_map_idx = map->lm_next; 1232 1.1 palle sc->sc_vsd[prod].vsd_buf = buf; 1233 1.1 palle 1234 1.1 palle sc->sc_tx_prod++; 1235 1.1 palle prod = sc->sc_tx_prod & (sc->sc_vd->vd_nentries - 1); 1236 1.1 palle 1237 1.1 palle m_freem(m); 1238 1.1 palle } 1239 1.1 palle 1240 1.1 palle membar_producer(); 1241 1.1 palle 1242 1.1 palle if (start != prod && sc->sc_peer_state != VIO_DP_ACTIVE) { 1243 1.1 palle vnet_send_dring_data(sc, start); 1244 1.1 palle ifp->if_timer = 5; 1245 1.1 palle } 1246 1.4 palle 1247 1.1 palle } 1248 1.1 palle 1249 1.1 palle void 1250 1.1 palle vnet_start_desc(struct ifnet *ifp) 1251 1.1 palle { 1252 1.1 palle struct vnet_softc *sc = ifp->if_softc; 1253 1.1 palle struct ldc_map *map = sc->sc_lm; 1254 1.1 palle struct vnet_desc_msg dm; 1255 1.1 palle struct mbuf *m; 1256 1.1 palle paddr_t pa; 1257 1.1 palle unsigned char *buf; 1258 1.1 palle u_int prod, count; 1259 1.1 palle 1260 1.1 palle for (;;) { 1261 1.1 palle count = sc->sc_tx_prod - sc->sc_tx_cons; 1262 1.1 palle if (count >= (sc->sc_vd->vd_nentries - 1) || 1263 1.1 palle map->lm_count >= map->lm_nentries) { 1264 1.1 palle return; 1265 1.1 palle } 1266 1.1 palle 1267 1.1 palle buf = pool_get(&sc->sc_pool, PR_NOWAIT|PR_ZERO); 1268 1.1 palle if (buf == NULL) { 1269 1.1 palle return; 1270 1.1 palle } 1271 1.1 palle 1272 1.1 palle IFQ_DEQUEUE(&ifp->if_snd, m); 1273 1.1 palle 1274 1.1 palle if (m == NULL) { 1275 1.1 palle pool_put(&sc->sc_pool, buf); 1276 1.1 palle return; 1277 1.1 palle } 1278 1.1 palle 1279 1.1 palle m_copydata(m, 0, m->m_pkthdr.len, buf); 1280 1.1 palle 1281 1.1 palle #if NBPFILTER > 0 1282 1.1 palle /* 1283 1.1 palle * If BPF is listening on this interface, let it see the 1284 1.1 palle * packet before we commit it to the wire. 1285 1.1 palle */ 1286 1.8 andvar bpf_mtap(ifp, m, BPF_D_OUT); 1287 1.1 palle #endif 1288 1.1 palle 1289 1.1 palle pmap_extract(pmap_kernel(), (vaddr_t)buf, &pa); 1290 1.1 palle KASSERT((pa & ~PAGE_MASK) == (pa & LDC_MTE_RA_MASK)); 1291 1.1 palle while (map->lm_slot[map->lm_next].entry != 0) { 1292 1.1 palle map->lm_next++; 1293 1.1 palle map->lm_next &= (map->lm_nentries - 1); 1294 1.1 palle } 1295 1.1 palle map->lm_slot[map->lm_next].entry = (pa & LDC_MTE_RA_MASK); 1296 1.1 palle map->lm_slot[map->lm_next].entry |= LDC_MTE_CPR; 1297 1.5 palle atomic_inc_32(&map->lm_count); 1298 1.1 palle 1299 1.1 palle prod = sc->sc_tx_prod & (sc->sc_vd->vd_nentries - 1); 1300 1.1 palle sc->sc_vsd[prod].vsd_map_idx = map->lm_next; 1301 1.1 palle sc->sc_vsd[prod].vsd_buf = buf; 1302 1.1 palle 1303 1.1 palle bzero(&dm, sizeof(dm)); 1304 1.1 palle dm.tag.type = VIO_TYPE_DATA; 1305 1.1 palle dm.tag.stype = VIO_SUBTYPE_INFO; 1306 1.1 palle dm.tag.stype_env = VIO_DESC_DATA; 1307 1.1 palle dm.tag.sid = sc->sc_local_sid; 1308 1.1 palle dm.seq_no = sc->sc_seq_no++; 1309 1.1 palle dm.desc_handle = sc->sc_tx_prod; 1310 1.1 palle dm.nbytes = MAX(m->m_pkthdr.len, 60); 1311 1.1 palle dm.ncookies = 1; 1312 1.1 palle dm.cookie[0].addr = 1313 1.1 palle map->lm_next << PAGE_SHIFT | (pa & PAGE_MASK); 1314 1.1 palle dm.cookie[0].size = 2048; 1315 1.1 palle vnet_sendmsg(sc, &dm, sizeof(dm)); 1316 1.1 palle 1317 1.1 palle sc->sc_tx_prod++; 1318 1.1 palle sc->sc_tx_prod &= (sc->sc_vd->vd_nentries - 1); 1319 1.1 palle 1320 1.1 palle m_freem(m); 1321 1.1 palle } 1322 1.1 palle } 1323 1.1 palle 1324 1.1 palle int 1325 1.1 palle vnet_ioctl(struct ifnet *ifp, u_long cmd, void* data) 1326 1.1 palle { 1327 1.1 palle struct vnet_softc *sc = ifp->if_softc; 1328 1.1 palle struct ifreq *ifr = (struct ifreq *)data; 1329 1.1 palle int s, error = 0; 1330 1.1 palle 1331 1.1 palle s = splnet(); 1332 1.1 palle 1333 1.1 palle switch (cmd) { 1334 1.1 palle 1335 1.1 palle case SIOCSIFADDR: 1336 1.1 palle ifp->if_flags |= IFF_UP; 1337 1.1 palle /* FALLTHROUGH */ 1338 1.1 palle case SIOCSIFFLAGS: 1339 1.1 palle if (ifp->if_flags & IFF_UP) { 1340 1.1 palle if ((ifp->if_flags & IFF_RUNNING) == 0) 1341 1.1 palle vnet_init(ifp); 1342 1.1 palle } else { 1343 1.1 palle if (ifp->if_flags & IFF_RUNNING) 1344 1.3 palle vnet_stop(ifp, 0); 1345 1.1 palle } 1346 1.1 palle break; 1347 1.1 palle 1348 1.1 palle case SIOCGIFMEDIA: 1349 1.1 palle case SIOCSIFMEDIA: 1350 1.1 palle error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 1351 1.1 palle break; 1352 1.1 palle 1353 1.1 palle case SIOCADDMULTI: 1354 1.1 palle case SIOCDELMULTI: 1355 1.1 palle /* 1356 1.1 palle * XXX Removing all multicast addresses and adding 1357 1.1 palle * most of them back, is somewhat retarded. 1358 1.1 palle */ 1359 1.1 palle vnet_setmulti(sc, 0); 1360 1.1 palle error = ether_ioctl(ifp, cmd, data); 1361 1.1 palle vnet_setmulti(sc, 1); 1362 1.1 palle if (error == ENETRESET) 1363 1.1 palle error = 0; 1364 1.1 palle break; 1365 1.1 palle 1366 1.1 palle default: 1367 1.1 palle error = ether_ioctl(ifp, cmd, data); 1368 1.1 palle } 1369 1.1 palle 1370 1.1 palle splx(s); 1371 1.1 palle 1372 1.1 palle return (error); 1373 1.1 palle } 1374 1.1 palle 1375 1.1 palle void 1376 1.1 palle vnet_watchdog(struct ifnet *ifp) 1377 1.1 palle { 1378 1.3 palle 1379 1.1 palle struct vnet_softc *sc = ifp->if_softc; 1380 1.1 palle 1381 1.6 riastrad printf("%s: watchdog timeout\n", device_xname(sc->sc_dv)); 1382 1.1 palle } 1383 1.1 palle 1384 1.1 palle int 1385 1.1 palle vnet_media_change(struct ifnet *ifp) 1386 1.1 palle { 1387 1.1 palle return (0); 1388 1.1 palle } 1389 1.1 palle 1390 1.1 palle void 1391 1.1 palle vnet_media_status(struct ifnet *ifp, struct ifmediareq *imr) 1392 1.1 palle { 1393 1.1 palle imr->ifm_active = IFM_ETHER | IFM_AUTO; 1394 1.1 palle imr->ifm_status = IFM_AVALID; 1395 1.1 palle if (ifp->if_link_state == LINK_STATE_UP && 1396 1.1 palle ifp->if_flags & IFF_UP) 1397 1.1 palle imr->ifm_status |= IFM_ACTIVE; 1398 1.1 palle } 1399 1.1 palle 1400 1.1 palle void 1401 1.1 palle vnet_link_state(struct vnet_softc *sc) 1402 1.1 palle { 1403 1.1 palle struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1404 1.1 palle int link_state = LINK_STATE_DOWN; 1405 1.1 palle 1406 1.1 palle KERNEL_LOCK(1, curlwp); 1407 1.1 palle if (ISSET(sc->sc_vio_state, VIO_RCV_RDX) && 1408 1.1 palle ISSET(sc->sc_vio_state, VIO_ACK_RDX)) 1409 1.1 palle link_state = LINK_STATE_UP; 1410 1.1 palle if (ifp->if_link_state != link_state) { 1411 1.1 palle if_link_state_change(ifp, link_state); 1412 1.1 palle } 1413 1.1 palle KERNEL_UNLOCK_ONE(curlwp); 1414 1.1 palle } 1415 1.1 palle 1416 1.1 palle void 1417 1.1 palle vnet_setmulti(struct vnet_softc *sc, int set) 1418 1.1 palle { 1419 1.1 palle struct ethercom *ec = &sc->sc_ethercom; 1420 1.1 palle struct ether_multi *enm; 1421 1.1 palle struct ether_multistep step; 1422 1.1 palle struct vnet_mcast_info mi; 1423 1.1 palle int count = 0; 1424 1.1 palle 1425 1.1 palle if (!ISSET(sc->sc_vio_state, VIO_RCV_RDX) || 1426 1.1 palle !ISSET(sc->sc_vio_state, VIO_ACK_RDX)) 1427 1.1 palle return; 1428 1.1 palle 1429 1.1 palle bzero(&mi, sizeof(mi)); 1430 1.1 palle mi.tag.type = VIO_TYPE_CTRL; 1431 1.1 palle mi.tag.stype = VIO_SUBTYPE_INFO; 1432 1.1 palle mi.tag.stype_env = VNET_MCAST_INFO; 1433 1.1 palle mi.tag.sid = sc->sc_local_sid; 1434 1.1 palle mi.set = set ? 1 : 0; 1435 1.1 palle KERNEL_LOCK(1, curlwp); 1436 1.1 palle ETHER_FIRST_MULTI(step, ec, enm); 1437 1.1 palle while (enm != NULL) { 1438 1.1 palle /* XXX What about multicast ranges? */ 1439 1.1 palle bcopy(enm->enm_addrlo, mi.mcast_addr[count], ETHER_ADDR_LEN); 1440 1.1 palle ETHER_NEXT_MULTI(step, enm); 1441 1.1 palle 1442 1.1 palle count++; 1443 1.1 palle if (count < VNET_NUM_MCAST) 1444 1.1 palle continue; 1445 1.1 palle 1446 1.1 palle mi.count = VNET_NUM_MCAST; 1447 1.1 palle vnet_sendmsg(sc, &mi, sizeof(mi)); 1448 1.1 palle count = 0; 1449 1.1 palle } 1450 1.1 palle 1451 1.1 palle if (count > 0) { 1452 1.1 palle mi.count = count; 1453 1.1 palle vnet_sendmsg(sc, &mi, sizeof(mi)); 1454 1.1 palle } 1455 1.1 palle KERNEL_UNLOCK_ONE(curlwp); 1456 1.1 palle } 1457 1.1 palle 1458 1.1 palle 1459 1.3 palle int 1460 1.1 palle vnet_init(struct ifnet *ifp) 1461 1.1 palle { 1462 1.1 palle struct vnet_softc *sc = ifp->if_softc; 1463 1.1 palle struct ldc_conn *lc = &sc->sc_lc; 1464 1.1 palle int err; 1465 1.1 palle vaddr_t va; 1466 1.1 palle paddr_t pa; 1467 1.1 palle sc->sc_lm = ldc_map_alloc(2048); 1468 1.1 palle if (sc->sc_lm == NULL) 1469 1.3 palle return ENOMEM; 1470 1.1 palle 1471 1.1 palle va = (vaddr_t)sc->sc_lm->lm_slot; 1472 1.1 palle pa = 0; 1473 1.1 palle if (pmap_extract(pmap_kernel(), va, &pa) == FALSE) 1474 1.1 palle panic("pmap_extract failed %lx\n", va); 1475 1.1 palle err = hv_ldc_set_map_table(lc->lc_id, pa, 2048); 1476 1.1 palle if (err != H_EOK) { 1477 1.1 palle printf("hv_ldc_set_map_table %d\n", err); 1478 1.3 palle return EINVAL; 1479 1.1 palle } 1480 1.1 palle 1481 1.1 palle sc->sc_vd = vnet_dring_alloc(sc->sc_dmatag, VNET_NUM_SOFT_DESC); 1482 1.1 palle if (sc->sc_vd == NULL) 1483 1.3 palle return ENOMEM; 1484 1.1 palle sc->sc_vsd = malloc(VNET_NUM_SOFT_DESC * sizeof(*sc->sc_vsd), M_DEVBUF, 1485 1.1 palle M_NOWAIT|M_ZERO); 1486 1.1 palle if (sc->sc_vsd == NULL) 1487 1.3 palle return ENOMEM; 1488 1.1 palle 1489 1.3 palle va = (vaddr_t)sc->sc_vd->vd_desc; 1490 1.3 palle pa = 0; 1491 1.3 palle if (pmap_extract(pmap_kernel(), va, &pa) == FALSE) 1492 1.3 palle panic("pmap_extract failed %lx\n", va); 1493 1.3 palle sc->sc_lm->lm_slot[0].entry = pa; 1494 1.1 palle sc->sc_lm->lm_slot[0].entry &= LDC_MTE_RA_MASK; 1495 1.1 palle sc->sc_lm->lm_slot[0].entry |= LDC_MTE_CPR | LDC_MTE_CPW; 1496 1.1 palle sc->sc_lm->lm_next = 1; 1497 1.1 palle sc->sc_lm->lm_count = 1; 1498 1.3 palle 1499 1.3 palle va = lc->lc_txq->lq_va; 1500 1.3 palle pa = 0; 1501 1.3 palle if (pmap_extract(pmap_kernel(), va, &pa) == FALSE) 1502 1.3 palle panic("pmap_extract failed %lx\n", va); 1503 1.1 palle err = hv_ldc_tx_qconf(lc->lc_id, pa, lc->lc_txq->lq_nentries); 1504 1.1 palle if (err != H_EOK) 1505 1.1 palle printf("hv_ldc_tx_qconf %d\n", err); 1506 1.3 palle 1507 1.3 palle va = (vaddr_t)lc->lc_rxq->lq_va; 1508 1.3 palle pa = 0; 1509 1.3 palle if (pmap_extract(pmap_kernel(), va, &pa) == FALSE) 1510 1.3 palle panic("pmap_extract failed %lx\n", va); 1511 1.1 palle 1512 1.1 palle err = hv_ldc_rx_qconf(lc->lc_id, pa, lc->lc_rxq->lq_nentries); 1513 1.1 palle if (err != H_EOK) 1514 1.1 palle printf("hv_ldc_rx_qconf %d\n", err); 1515 1.1 palle 1516 1.1 palle cbus_intr_setenabled(sc->sc_bustag, sc->sc_tx_ino, INTR_ENABLED); 1517 1.1 palle cbus_intr_setenabled(sc->sc_bustag, sc->sc_rx_ino, INTR_ENABLED); 1518 1.1 palle 1519 1.1 palle ldc_send_vers(lc); 1520 1.1 palle 1521 1.1 palle ifp->if_flags |= IFF_RUNNING; 1522 1.3 palle 1523 1.3 palle return 0; 1524 1.1 palle } 1525 1.1 palle 1526 1.1 palle void 1527 1.3 palle vnet_stop(struct ifnet *ifp, int disable) 1528 1.1 palle 1529 1.1 palle { 1530 1.1 palle struct vnet_softc *sc = ifp->if_softc; 1531 1.1 palle struct ldc_conn *lc = &sc->sc_lc; 1532 1.1 palle 1533 1.1 palle ifp->if_flags &= ~IFF_RUNNING; 1534 1.1 palle ifp->if_timer = 0; 1535 1.1 palle 1536 1.1 palle cbus_intr_setenabled(sc->sc_bustag, sc->sc_tx_ino, INTR_DISABLED); 1537 1.1 palle cbus_intr_setenabled(sc->sc_bustag, sc->sc_rx_ino, INTR_DISABLED); 1538 1.1 palle 1539 1.1 palle #if 0 1540 1.5 palle openbsd XXX 1541 1.1 palle intr_barrier(sc->sc_tx_ih); 1542 1.1 palle intr_barrier(sc->sc_rx_ih); 1543 1.1 palle #else 1544 1.5 palle printf("vnet_stop() intr_barrier() not available\n"); 1545 1.1 palle #endif 1546 1.1 palle 1547 1.1 palle hv_ldc_tx_qconf(lc->lc_id, 0, 0); 1548 1.1 palle hv_ldc_rx_qconf(lc->lc_id, 0, 0); 1549 1.1 palle lc->lc_tx_seqid = 0; 1550 1.1 palle lc->lc_state = 0; 1551 1.1 palle lc->lc_tx_state = lc->lc_rx_state = LDC_CHANNEL_DOWN; 1552 1.1 palle vnet_ldc_reset(lc); 1553 1.1 palle 1554 1.1 palle free(sc->sc_vsd, M_DEVBUF); 1555 1.1 palle 1556 1.1 palle vnet_dring_free(sc->sc_dmatag, sc->sc_vd); 1557 1.1 palle 1558 1.1 palle hv_ldc_set_map_table(lc->lc_id, 0, 0); 1559 1.1 palle ldc_map_free(sc->sc_lm); 1560 1.1 palle } 1561 1.1 palle 1562 1.1 palle struct vnet_dring * 1563 1.1 palle vnet_dring_alloc(bus_dma_tag_t t, int nentries) 1564 1.1 palle { 1565 1.1 palle struct vnet_dring *vd; 1566 1.1 palle bus_size_t size; 1567 1.1 palle vaddr_t va; 1568 1.1 palle int i; 1569 1.1 palle 1570 1.1 palle vd = kmem_zalloc(sizeof(struct vnet_dring), KM_SLEEP); 1571 1.1 palle if (vd == NULL) 1572 1.1 palle return NULL; 1573 1.1 palle 1574 1.1 palle size = roundup(nentries * sizeof(struct vnet_desc), PAGE_SIZE); 1575 1.1 palle 1576 1.1 palle va = (vaddr_t)kmem_zalloc(size, KM_SLEEP); 1577 1.1 palle vd->vd_desc = (struct vnet_desc *)va; 1578 1.1 palle vd->vd_nentries = nentries; 1579 1.1 palle bzero(vd->vd_desc, nentries * sizeof(struct vnet_desc)); 1580 1.1 palle for (i = 0; i < vd->vd_nentries; i++) 1581 1.1 palle vd->vd_desc[i].hdr.dstate = VIO_DESC_FREE; 1582 1.1 palle return (vd); 1583 1.1 palle 1584 1.1 palle return (NULL); 1585 1.1 palle } 1586 1.1 palle 1587 1.1 palle void 1588 1.1 palle vnet_dring_free(bus_dma_tag_t t, struct vnet_dring *vd) 1589 1.1 palle { 1590 1.3 palle 1591 1.1 palle bus_size_t size; 1592 1.1 palle 1593 1.1 palle size = vd->vd_nentries * sizeof(struct vnet_desc); 1594 1.1 palle size = roundup(size, PAGE_SIZE); 1595 1.1 palle 1596 1.1 palle kmem_free(vd->vd_desc, size); 1597 1.1 palle kmem_free(vd, size); 1598 1.1 palle } 1599 1.1 palle 1600