Home | History | Annotate | Line # | Download | only in ic
rrunner.c revision 1.78
      1 /*	$NetBSD: rrunner.c,v 1.77 2014/03/16 05:20:27 dholland Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code contributed to The NetBSD Foundation by Kevin M. Lahey
      8  * of the Numerical Aerospace Simulation Facility, NASA Ames Research
      9  * Center.
     10  *
     11  * Partially based on a HIPPI driver written by Essential Communications
     12  * Corporation.  Thanks to Jason Thorpe, Matt Jacob, and Fred Templin
     13  * for invaluable advice and encouragement!
     14  *
     15  * Redistribution and use in source and binary forms, with or without
     16  * modification, are permitted provided that the following conditions
     17  * are met:
     18  * 1. Redistributions of source code must retain the above copyright
     19  *    notice, this list of conditions and the following disclaimer.
     20  * 2. Redistributions in binary form must reproduce the above copyright
     21  *    notice, this list of conditions and the following disclaimer in the
     22  *    documentation and/or other materials provided with the distribution.
     23  *
     24  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     26  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     27  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     34  * POSSIBILITY OF SUCH DAMAGE.
     35  */
     36 
     37 #include <sys/cdefs.h>
     38 __KERNEL_RCSID(0, "$NetBSD: rrunner.c,v 1.77 2014/03/16 05:20:27 dholland Exp $");
     39 
     40 #include "opt_inet.h"
     41 
     42 #include "esh.h"
     43 
     44 #include <sys/param.h>
     45 #include <sys/systm.h>
     46 #include <sys/mbuf.h>
     47 #include <sys/buf.h>
     48 #include <sys/bufq.h>
     49 #include <sys/socket.h>
     50 #include <sys/ioctl.h>
     51 #include <sys/errno.h>
     52 #include <sys/syslog.h>
     53 #include <sys/select.h>
     54 #include <sys/device.h>
     55 #include <sys/proc.h>
     56 #include <sys/kernel.h>
     57 #include <sys/conf.h>
     58 #include <sys/kauth.h>
     59 
     60 #include <uvm/uvm_extern.h>
     61 
     62 #include <net/if.h>
     63 #include <net/if_dl.h>
     64 #include <net/route.h>
     65 
     66 #include <net/if_hippi.h>
     67 #include <net/if_media.h>
     68 
     69 #ifdef INET
     70 #include <netinet/in.h>
     71 #include <netinet/in_systm.h>
     72 #include <netinet/in_var.h>
     73 #include <netinet/ip.h>
     74 #include <netinet/if_inarp.h>
     75 #endif
     76 
     77 
     78 #include <net/bpf.h>
     79 #include <net/bpfdesc.h>
     80 
     81 #include <sys/cpu.h>
     82 #include <sys/bus.h>
     83 #include <sys/intr.h>
     84 
     85 #include <dev/ic/rrunnerreg.h>
     86 #include <dev/ic/rrunnervar.h>
     87 
     88 /*
     89 #define ESH_PRINTF
     90 */
     91 
     92 /* Autoconfig definition of driver back-end */
     93 extern struct cfdriver esh_cd;
     94 
     95 struct esh_softc *esh_softc_debug[22];  /* for gdb */
     96 
     97 #ifdef DIAGNOSTIC
     98 u_int32_t max_write_len;
     99 #endif
    100 
    101 /* Network device driver and initialization framework routines */
    102 
    103 void eshinit(struct esh_softc *);
    104 int  eshioctl(struct ifnet *, u_long, void *);
    105 void eshreset(struct esh_softc *);
    106 void eshstart(struct ifnet *);
    107 static int eshstatus(struct esh_softc *);
    108 void eshstop(struct esh_softc *);
    109 void eshwatchdog(struct ifnet *);
    110 
    111 /* Routines to support FP operation */
    112 
    113 dev_type_open(esh_fpopen);
    114 dev_type_close(esh_fpclose);
    115 dev_type_read(esh_fpread);
    116 dev_type_write(esh_fpwrite);
    117 #ifdef MORE_DONE
    118 dev_type_mmap(esh_fpmmap);
    119 #endif
    120 dev_type_strategy(esh_fpstrategy);
    121 
    122 const struct cdevsw esh_cdevsw = {
    123 	.d_open = esh_fpopen,
    124 	.d_close = esh_fpclose,
    125 	.d_read = esh_fpread,
    126 	.d_write = esh_fpwrite,
    127 	.d_ioctl = nullioctl,
    128 	.d_stop = nostop,
    129 	.d_tty = notty,
    130 	.d_poll = nullpoll,
    131 #ifdef MORE_DONE
    132 	.d_mmap = esh_fpmmap,
    133 #else
    134 	.d_mmap = nommap,
    135 #endif
    136 	.d_kqfilter = nullkqfilter,
    137 	.d_discard = nodiscard,
    138 	.d_flag = D_OTHER
    139 };
    140 
    141 /* General routines, not externally visable */
    142 
    143 static struct mbuf *esh_adjust_mbufs(struct esh_softc *, struct mbuf *m);
    144 static void esh_dma_sync(struct esh_softc *, void *,
    145 			      int, int, int, int, int, int);
    146 static void esh_fill_snap_ring(struct esh_softc *);
    147 static void esh_init_snap_ring(struct esh_softc *);
    148 static void esh_close_snap_ring(struct esh_softc *);
    149 static void esh_read_snap_ring(struct esh_softc *, u_int16_t, int);
    150 static void esh_fill_fp_ring(struct esh_softc *, struct esh_fp_ring_ctl *);
    151 static void esh_flush_fp_ring(struct esh_softc *,
    152 				   struct esh_fp_ring_ctl *,
    153 				   struct esh_dmainfo *);
    154 static void esh_init_fp_rings(struct esh_softc *);
    155 static void esh_read_fp_ring(struct esh_softc *, u_int16_t, int, int);
    156 static void esh_reset_runcode(struct esh_softc *);
    157 static void esh_send(struct esh_softc *);
    158 static void esh_send_cmd(struct esh_softc *, u_int8_t, u_int8_t, u_int8_t);
    159 static u_int32_t esh_read_eeprom(struct esh_softc *, u_int32_t);
    160 static void esh_write_addr(bus_space_tag_t, bus_space_handle_t,
    161 				bus_addr_t, bus_addr_t);
    162 static int esh_write_eeprom(struct esh_softc *, u_int32_t, u_int32_t);
    163 static void eshstart_cleanup(struct esh_softc *, u_int16_t, int);
    164 
    165 static struct esh_dmainfo *esh_new_dmainfo(struct esh_softc *);
    166 static void esh_free_dmainfo(struct esh_softc *, struct esh_dmainfo *);
    167 static int esh_generic_ioctl(struct esh_softc *, u_long, void *, u_long,
    168 				  struct lwp *);
    169 
    170 #ifdef ESH_PRINTF
    171 static int esh_check(struct esh_softc *);
    172 #endif
    173 
    174 #define ESHUNIT(x)	((minor(x) & 0xff00) >> 8)
    175 #define ESHULP(x)	(minor(x) & 0x00ff)
    176 
    177 
    178 /*
    179  * Back-end attach and configure.  Allocate DMA space and initialize
    180  * all structures.
    181  */
    182 
    183 void
    184 eshconfig(struct esh_softc *sc)
    185 {
    186 	struct ifnet *ifp = &sc->sc_if;
    187 	bus_space_tag_t iot = sc->sc_iot;
    188 	bus_space_handle_t ioh = sc->sc_ioh;
    189 	u_int32_t misc_host_ctl;
    190 	u_int32_t misc_local_ctl;
    191 	u_int32_t header_format;
    192 	u_int32_t ula_tmp;
    193 	bus_size_t size;
    194 	int rseg;
    195 	int error;
    196 	int i;
    197 
    198 	esh_softc_debug[device_unit(sc->sc_dev)] = sc;
    199 	sc->sc_flags = 0;
    200 
    201 	TAILQ_INIT(&sc->sc_dmainfo_freelist);
    202 	sc->sc_dmainfo_freelist_count = 0;
    203 
    204 	/*
    205 	 * Allocate and divvy up some host side memory that can hold
    206 	 * data structures that will be DMA'ed over to the NIC
    207 	 */
    208 
    209 	sc->sc_dma_size = sizeof(struct rr_gen_info) +
    210 		sizeof(struct rr_ring_ctl) * RR_ULP_COUNT +
    211 		sizeof(struct rr_descr) * RR_SEND_RING_SIZE +
    212 		sizeof(struct rr_descr) * RR_SNAP_RECV_RING_SIZE +
    213 		sizeof(struct rr_event) * RR_EVENT_RING_SIZE;
    214 
    215 	error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_dma_size,
    216 				 0, RR_DMA_BOUNDARY, &sc->sc_dmaseg, 1,
    217 				 &rseg, BUS_DMA_NOWAIT);
    218 	if (error) {
    219 		aprint_error_dev(sc->sc_dev, "couldn't allocate space for host-side"
    220 		       "data structures\n");
    221 		return;
    222 	}
    223 	if (rseg > 1) {
    224 		aprint_error_dev(sc->sc_dev, "contiguous memory not available\n");
    225 		goto bad_dmamem_map;
    226 	}
    227 
    228 	error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dmaseg, rseg,
    229 			       sc->sc_dma_size, (void **)&sc->sc_dma_addr,
    230 			       BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
    231 	if (error) {
    232 		aprint_error_dev(sc->sc_dev,
    233 		       "couldn't map memory for host-side structures\n");
    234 		goto bad_dmamem_map;
    235 	}
    236 
    237 	if (bus_dmamap_create(sc->sc_dmat, sc->sc_dma_size,
    238 			      1, sc->sc_dma_size, RR_DMA_BOUNDARY,
    239 			      BUS_DMA_ALLOCNOW | BUS_DMA_NOWAIT,
    240 			      &sc->sc_dma)) {
    241 		aprint_error_dev(sc->sc_dev, "couldn't create DMA map\n");
    242 		goto bad_dmamap_create;
    243 	}
    244 
    245 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_dma, sc->sc_dma_addr,
    246 			    sc->sc_dma_size, NULL, BUS_DMA_NOWAIT)) {
    247 		aprint_error_dev(sc->sc_dev, "couldn't load DMA map\n");
    248 		goto bad_dmamap_load;
    249 	}
    250 
    251 	memset(sc->sc_dma_addr, 0, sc->sc_dma_size);
    252 
    253 	sc->sc_gen_info_dma = sc->sc_dma->dm_segs->ds_addr;
    254 	sc->sc_gen_info = (struct rr_gen_info *) sc->sc_dma_addr;
    255 	size = sizeof(struct rr_gen_info);
    256 
    257 	sc->sc_recv_ring_table_dma = sc->sc_dma->dm_segs->ds_addr + size;
    258 	sc->sc_recv_ring_table =
    259 		(struct rr_ring_ctl *) (sc->sc_dma_addr + size);
    260 	size += sizeof(struct rr_ring_ctl) * RR_ULP_COUNT;
    261 
    262 	sc->sc_send_ring_dma = sc->sc_dma->dm_segs->ds_addr + size;
    263 	sc->sc_send_ring = (struct rr_descr *) (sc->sc_dma_addr + size);
    264 	sc->sc2_send_ring = (struct rr2_descr *) (sc->sc_dma_addr + size);
    265 	size += sizeof(struct rr_descr) * RR_SEND_RING_SIZE;
    266 
    267 	sc->sc_snap_recv_ring_dma = sc->sc_dma->dm_segs->ds_addr + size;
    268 	sc->sc_snap_recv_ring = (struct rr_descr *) (sc->sc_dma_addr + size);
    269 	sc->sc2_snap_recv_ring = (struct rr2_descr *) (sc->sc_dma_addr + size);
    270 	size += sizeof(struct rr_descr) * RR_SNAP_RECV_RING_SIZE;
    271 
    272 	sc->sc_event_ring_dma = sc->sc_dma->dm_segs->ds_addr + size;
    273 	sc->sc_event_ring = (struct rr_event *) (sc->sc_dma_addr + size);
    274 	size += sizeof(struct rr_event) * RR_EVENT_RING_SIZE;
    275 
    276 #ifdef DIAGNOSTIC
    277 	if (size > sc->sc_dmaseg.ds_len) {
    278 		aprint_error_dev(sc->sc_dev, "bogus size calculation\n");
    279 		goto bad_other;
    280 	}
    281 #endif
    282 
    283 	/*
    284 	 * Allocate DMA maps for transfers.  We do this here and now
    285 	 * so we won't have to wait for them in the middle of sending
    286 	 * or receiving something.
    287 	 */
    288 
    289 	if (bus_dmamap_create(sc->sc_dmat, ESH_MAX_NSEGS * RR_DMA_MAX,
    290 			      ESH_MAX_NSEGS, RR_DMA_MAX, RR_DMA_BOUNDARY,
    291 			      BUS_DMA_ALLOCNOW | BUS_DMA_NOWAIT,
    292 			      &sc->sc_send.ec_dma)) {
    293 		aprint_error_dev(sc->sc_dev, "failed bus_dmamap_create\n");
    294 			goto bad_other;
    295 	}
    296 	sc->sc_send.ec_offset = 0;
    297 	sc->sc_send.ec_descr = sc->sc_send_ring;
    298     	TAILQ_INIT(&sc->sc_send.ec_di_queue);
    299 	bufq_alloc(&sc->sc_send.ec_buf_queue, "fcfs", 0);
    300 
    301 	for (i = 0; i < RR_MAX_SNAP_RECV_RING_SIZE; i++)
    302 		if (bus_dmamap_create(sc->sc_dmat, RR_DMA_MAX, 1, RR_DMA_MAX,
    303 				      RR_DMA_BOUNDARY,
    304 				      BUS_DMA_ALLOCNOW | BUS_DMA_NOWAIT,
    305 				      &sc->sc_snap_recv.ec_dma[i])) {
    306 			aprint_error_dev(sc->sc_dev, "failed bus_dmamap_create\n");
    307 			for (i--; i >= 0; i--)
    308 				bus_dmamap_destroy(sc->sc_dmat,
    309 						   sc->sc_snap_recv.ec_dma[i]);
    310 			goto bad_ring_dmamap_create;
    311 		}
    312 
    313 	/*
    314 	 * If this is a coldboot, the NIC RunCode should be operational.
    315 	 * If it is a warmboot, it may or may not be operational.
    316 	 * Just to be sure, we'll stop the RunCode and reset everything.
    317 	 */
    318 
    319 	/* Halt the processor (preserve NO_SWAP, if set) */
    320 
    321 	misc_host_ctl = bus_space_read_4(iot, ioh, RR_MISC_HOST_CTL);
    322 	bus_space_write_4(iot, ioh, RR_MISC_HOST_CTL,
    323 			  (misc_host_ctl & RR_MH_NO_SWAP) | RR_MH_HALT_PROC);
    324 
    325 	/* Make the EEPROM readable */
    326 
    327 	misc_local_ctl = bus_space_read_4(iot, ioh, RR_MISC_LOCAL_CTL);
    328 	bus_space_write_4(iot, ioh, RR_MISC_LOCAL_CTL,
    329 	    misc_local_ctl & ~(RR_LC_FAST_PROM | RR_LC_ADD_SRAM |
    330 			       RR_LC_PARITY_ON));
    331 
    332 	/* Extract interesting information from the EEPROM: */
    333 
    334 	header_format = esh_read_eeprom(sc, RR_EE_HEADER_FORMAT);
    335 	if (header_format != RR_EE_HEADER_FORMAT_MAGIC) {
    336 		aprint_error_dev(sc->sc_dev, "bogus EEPROM header format value %x\n",
    337 		       header_format);
    338 		goto bad_other;
    339 	}
    340 
    341 	/*
    342 	 * As it is now, the runcode version in the EEPROM doesn't
    343 	 * reflect the actual runcode version number.  That is only
    344 	 * available once the runcode starts up.  We should probably
    345 	 * change the firmware update code to modify this value,
    346 	 * but Essential itself doesn't do it right now.
    347 	 */
    348 
    349 	sc->sc_sram_size = 4 * esh_read_eeprom(sc, RR_EE_SRAM_SIZE);
    350 	sc->sc_runcode_start = esh_read_eeprom(sc, RR_EE_RUNCODE_START);
    351 	sc->sc_runcode_version = esh_read_eeprom(sc, RR_EE_RUNCODE_VERSION);
    352 
    353 	sc->sc_pci_latency = esh_read_eeprom(sc, RR_EE_PCI_LATENCY);
    354 	sc->sc_pci_lat_gnt = esh_read_eeprom(sc, RR_EE_PCI_LAT_GNT);
    355 
    356 	/* General tuning values */
    357 
    358 	sc->sc_tune.rt_mode_and_status =
    359 		esh_read_eeprom(sc, RR_EE_MODE_AND_STATUS);
    360 	sc->sc_tune.rt_conn_retry_count =
    361 		esh_read_eeprom(sc, RR_EE_CONN_RETRY_COUNT);
    362 	sc->sc_tune.rt_conn_retry_timer =
    363 		esh_read_eeprom(sc, RR_EE_CONN_RETRY_TIMER);
    364 	sc->sc_tune.rt_conn_timeout =
    365 		esh_read_eeprom(sc, RR_EE_CONN_TIMEOUT);
    366 	sc->sc_tune.rt_interrupt_timer =
    367 		esh_read_eeprom(sc, RR_EE_INTERRUPT_TIMER);
    368 	sc->sc_tune.rt_tx_timeout =
    369 		esh_read_eeprom(sc, RR_EE_TX_TIMEOUT);
    370 	sc->sc_tune.rt_rx_timeout =
    371 		esh_read_eeprom(sc, RR_EE_RX_TIMEOUT);
    372 	sc->sc_tune.rt_stats_timer =
    373 		esh_read_eeprom(sc, RR_EE_STATS_TIMER);
    374 	sc->sc_tune.rt_stats_timer = ESH_STATS_TIMER_DEFAULT;
    375 
    376 	/* DMA tuning values */
    377 
    378 	sc->sc_tune.rt_pci_state =
    379 		esh_read_eeprom(sc, RR_EE_PCI_STATE);
    380 	sc->sc_tune.rt_dma_write_state =
    381 		esh_read_eeprom(sc, RR_EE_DMA_WRITE_STATE);
    382 	sc->sc_tune.rt_dma_read_state =
    383 		esh_read_eeprom(sc, RR_EE_DMA_READ_STATE);
    384 	sc->sc_tune.rt_driver_param =
    385 		esh_read_eeprom(sc, RR_EE_DRIVER_PARAM);
    386 
    387 	/*
    388 	 * Snag the ULA.  The first two bytes are reserved.
    389 	 * We don't really use it immediately, but it would be good to
    390 	 * have for building IPv6 addresses, etc.
    391 	 */
    392 
    393 	ula_tmp = esh_read_eeprom(sc, RR_EE_ULA_HI);
    394 	sc->sc_ula[0] = (ula_tmp >> 8) & 0xff;
    395 	sc->sc_ula[1] = ula_tmp & 0xff;
    396 
    397 	ula_tmp = esh_read_eeprom(sc, RR_EE_ULA_LO);
    398 	sc->sc_ula[2] = (ula_tmp >> 24) & 0xff;
    399 	sc->sc_ula[3] = (ula_tmp >> 16) & 0xff;
    400 	sc->sc_ula[4] = (ula_tmp >> 8) & 0xff;
    401 	sc->sc_ula[5] = ula_tmp & 0xff;
    402 
    403 	/* Reset EEPROM readability */
    404 
    405 	bus_space_write_4(iot, ioh, RR_MISC_LOCAL_CTL, misc_local_ctl);
    406 
    407 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
    408 	ifp->if_softc = sc;
    409 	ifp->if_start = eshstart;
    410 	ifp->if_ioctl = eshioctl;
    411 	ifp->if_watchdog = eshwatchdog;
    412 	ifp->if_flags = IFF_SIMPLEX | IFF_NOTRAILERS | IFF_NOARP;
    413 	IFQ_SET_READY(&ifp->if_snd);
    414 
    415 	if_attach(ifp);
    416 	hippi_ifattach(ifp, sc->sc_ula);
    417 
    418 	sc->sc_misaligned_bufs = sc->sc_bad_lens = 0;
    419 	sc->sc_fp_rings = 0;
    420 
    421 	return;
    422 
    423 bad_ring_dmamap_create:
    424 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_send.ec_dma);
    425 bad_other:
    426 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dma);
    427 bad_dmamap_load:
    428 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dma);
    429 bad_dmamap_create:
    430 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_dma_addr, sc->sc_dma_size);
    431 bad_dmamem_map:
    432 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dmaseg, rseg);
    433 	return;
    434 }
    435 
    436 
    437 /*
    438  * Bring device up.
    439  *
    440  * Assume that the on-board processor has already been stopped,
    441  * the rings have been cleared of valid buffers, and everything
    442  * is pretty much as it was when the system started.
    443  *
    444  * Stop the processor (just for good measure), clear the SRAM,
    445  * reload the boot code, and start it all up again, with the PC
    446  * pointing at the boot code.  Once the boot code has had a chance
    447  * to come up, adjust all of the appropriate parameters, and send
    448  * the 'start firmware' command.
    449  *
    450  * The NIC won't actually be up until it gets an interrupt with an
    451  * event indicating the RunCode is up.
    452  */
    453 
    454 void
    455 eshinit(struct esh_softc *sc)
    456 {
    457 	struct ifnet *ifp = &sc->sc_if;
    458 	bus_space_tag_t iot = sc->sc_iot;
    459 	bus_space_handle_t ioh = sc->sc_ioh;
    460 	struct rr_ring_ctl *ring;
    461 	u_int32_t misc_host_ctl;
    462 	u_int32_t misc_local_ctl;
    463 	u_int32_t value;
    464 	u_int32_t mode;
    465 
    466 	/* If we're already doing an init, don't try again simultaniously */
    467 
    468 	if ((sc->sc_flags & ESH_FL_INITIALIZING) != 0)
    469 		return;
    470 	sc->sc_flags = ESH_FL_INITIALIZING;
    471 
    472 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma, 0, sc->sc_dma_size,
    473 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
    474 
    475 	/* Halt the processor (preserve NO_SWAP, if set) */
    476 
    477 	misc_host_ctl = bus_space_read_4(iot, ioh, RR_MISC_HOST_CTL);
    478 	bus_space_write_4(iot, ioh, RR_MISC_HOST_CTL,
    479 			  (misc_host_ctl & RR_MH_NO_SWAP)
    480 			  | RR_MH_HALT_PROC | RR_MH_CLEAR_INT);
    481 
    482 	/* Make the EEPROM readable */
    483 
    484 	misc_local_ctl = bus_space_read_4(iot, ioh, RR_MISC_LOCAL_CTL);
    485 	bus_space_write_4(iot, ioh, RR_MISC_LOCAL_CTL,
    486 			  misc_local_ctl & ~(RR_LC_FAST_PROM |
    487 					     RR_LC_ADD_SRAM |
    488 					     RR_LC_PARITY_ON));
    489 
    490 	/* Reset DMA */
    491 
    492 	bus_space_write_4(iot, ioh, RR_RX_STATE, RR_RS_RESET);
    493 	bus_space_write_4(iot, ioh, RR_TX_STATE, 0);
    494 	bus_space_write_4(iot, ioh, RR_DMA_READ_STATE, RR_DR_RESET);
    495 	bus_space_write_4(iot, ioh, RR_DMA_WRITE_STATE, RR_DW_RESET);
    496 	bus_space_write_4(iot, ioh, RR_PCI_STATE, 0);
    497 	bus_space_write_4(iot, ioh, RR_TIMER, 0);
    498 	bus_space_write_4(iot, ioh, RR_TIMER_REF, 0);
    499 
    500 	/*
    501 	 * Reset the assist register that the documentation suggests
    502 	 * resetting.  Too bad that the docs don't mention anything
    503 	 * else about the register!
    504 	 */
    505 
    506 	bus_space_write_4(iot, ioh, 0x15C, 1);
    507 
    508 	/* Clear BIST, set the PC to the start of the code and let 'er rip */
    509 
    510 	value = bus_space_read_4(iot, ioh, RR_PCI_BIST);
    511 	bus_space_write_4(iot, ioh, RR_PCI_BIST, (value & ~0xff) | 8);
    512 
    513 	sc->sc_bist_write(sc, 0);
    514 	esh_reset_runcode(sc);
    515 
    516 	bus_space_write_4(iot, ioh, RR_PROC_PC, sc->sc_runcode_start);
    517 	bus_space_write_4(iot, ioh, RR_PROC_BREAKPT, 0x00000001);
    518 
    519 	misc_host_ctl &= ~RR_MH_HALT_PROC;
    520 	bus_space_write_4(iot, ioh, RR_MISC_HOST_CTL, misc_host_ctl);
    521 
    522 	/* XXX: should we sleep rather than delaying for 1ms!? */
    523 
    524 	delay(1000);  /* Need 500 us, but we'll give it more */
    525 
    526 	value = sc->sc_bist_read(sc);
    527 	if (value != 0) {
    528 		aprint_error_dev(sc->sc_dev, "BIST is %d, not 0!\n",
    529 		       value);
    530 		goto bad_init;
    531 	}
    532 
    533 #ifdef ESH_PRINTF
    534 	printf("%s:  BIST is %x\n", device_xname(sc->sc_dev), value);
    535 	eshstatus(sc);
    536 #endif
    537 
    538 	/* RunCode is up.  Initialize NIC */
    539 
    540 	esh_write_addr(iot, ioh, RR_GEN_INFO_PTR, sc->sc_gen_info_dma);
    541 	esh_write_addr(iot, ioh, RR_RECV_RING_PTR, sc->sc_recv_ring_table_dma);
    542 
    543 	sc->sc_event_consumer = 0;
    544 	bus_space_write_4(iot, ioh, RR_EVENT_CONSUMER, sc->sc_event_consumer);
    545 	sc->sc_event_producer = bus_space_read_4(iot, ioh, RR_EVENT_PRODUCER);
    546 	sc->sc_cmd_producer = RR_INIT_CMD;
    547 	sc->sc_cmd_consumer = 0;
    548 
    549 	mode = bus_space_read_4(iot, ioh, RR_MODE_AND_STATUS);
    550 	mode |= (RR_MS_WARNINGS |
    551 		 RR_MS_ERR_TERM |
    552 		 RR_MS_NO_RESTART |
    553 		 RR_MS_SWAP_DATA);
    554 	mode &= ~RR_MS_PH_MODE;
    555 	bus_space_write_4(iot, ioh, RR_MODE_AND_STATUS, mode);
    556 
    557 #if 0
    558 #ifdef ESH_PRINTF
    559 	printf("eshinit:  misc_local_ctl %x, SRAM size %d\n", misc_local_ctl,
    560 		sc->sc_sram_size);
    561 #endif
    562 /*
    563 	misc_local_ctl |= (RR_LC_FAST_PROM | RR_LC_PARITY_ON);
    564 */
    565 	if (sc->sc_sram_size > 256 * 1024) {
    566 		misc_local_ctl |= RR_LC_ADD_SRAM;
    567 	}
    568 #endif
    569 
    570 #ifdef ESH_PRINTF
    571 	printf("eshinit:  misc_local_ctl %x\n", misc_local_ctl);
    572 #endif
    573 	bus_space_write_4(iot, ioh, RR_MISC_LOCAL_CTL, misc_local_ctl);
    574 
    575 	/* Set tuning parameters */
    576 
    577 	bus_space_write_4(iot, ioh, RR_CONN_RETRY_COUNT,
    578 			  sc->sc_tune.rt_conn_retry_count);
    579 	bus_space_write_4(iot, ioh, RR_CONN_RETRY_TIMER,
    580 			  sc->sc_tune.rt_conn_retry_timer);
    581 	bus_space_write_4(iot, ioh, RR_CONN_TIMEOUT,
    582 			  sc->sc_tune.rt_conn_timeout);
    583 	bus_space_write_4(iot, ioh, RR_INTERRUPT_TIMER,
    584 			  sc->sc_tune.rt_interrupt_timer);
    585 	bus_space_write_4(iot, ioh, RR_TX_TIMEOUT,
    586 			  sc->sc_tune.rt_tx_timeout);
    587 	bus_space_write_4(iot, ioh, RR_RX_TIMEOUT,
    588 			  sc->sc_tune.rt_rx_timeout);
    589 	bus_space_write_4(iot, ioh, RR_STATS_TIMER,
    590 			  sc->sc_tune.rt_stats_timer);
    591 	bus_space_write_4(iot, ioh, RR_PCI_STATE,
    592 			  sc->sc_tune.rt_pci_state);
    593 	bus_space_write_4(iot, ioh, RR_DMA_WRITE_STATE,
    594 			  sc->sc_tune.rt_dma_write_state);
    595 	bus_space_write_4(iot, ioh, RR_DMA_READ_STATE,
    596 			  sc->sc_tune.rt_dma_read_state);
    597 
    598 	sc->sc_max_rings = bus_space_read_4(iot, ioh, RR_MAX_RECV_RINGS);
    599 
    600 	sc->sc_runcode_version =
    601 		bus_space_read_4(iot, ioh, RR_RUNCODE_VERSION);
    602 	sc->sc_version = sc->sc_runcode_version >> 16;
    603 	if (sc->sc_version != 1 && sc->sc_version != 2) {
    604 		aprint_error_dev(sc->sc_dev, "bad version number %d in runcode\n",
    605 		       sc->sc_version);
    606 		goto bad_init;
    607 	}
    608 
    609 	if (sc->sc_version == 1) {
    610 		sc->sc_options = 0;
    611 	} else {
    612 		value = bus_space_read_4(iot, ioh, RR_ULA);
    613 		sc->sc_options = value >> 16;
    614 	}
    615 
    616 	if (sc->sc_options & (RR_OP_LONG_TX | RR_OP_LONG_RX)) {
    617 		aprint_error_dev(sc->sc_dev, "unsupported firmware -- long descriptors\n");
    618 		goto bad_init;
    619 	}
    620 
    621 	printf("%s: startup runcode version %d.%d.%d, options %x\n",
    622 	       device_xname(sc->sc_dev),
    623 	       sc->sc_version,
    624 	       (sc->sc_runcode_version >> 8) & 0xff,
    625 	       sc->sc_runcode_version & 0xff,
    626 	       sc->sc_options);
    627 
    628 	/* Initialize the general ring information */
    629 
    630 	memset(sc->sc_recv_ring_table, 0,
    631 	      sizeof(struct rr_ring_ctl) * RR_ULP_COUNT);
    632 
    633 	ring = &sc->sc_gen_info->ri_event_ring_ctl;
    634 	ring->rr_ring_addr = sc->sc_event_ring_dma;
    635 	ring->rr_entry_size = sizeof(struct rr_event);
    636 	ring->rr_free_bufs = RR_EVENT_RING_SIZE / 4;
    637 	ring->rr_entries = RR_EVENT_RING_SIZE;
    638 	ring->rr_prod_index = 0;
    639 
    640 	ring = &sc->sc_gen_info->ri_cmd_ring_ctl;
    641 	ring->rr_free_bufs = 8;
    642 	ring->rr_entry_size = sizeof(union rr_cmd);
    643 	ring->rr_prod_index = RR_INIT_CMD;
    644 
    645 	ring = &sc->sc_gen_info->ri_send_ring_ctl;
    646 	ring->rr_ring_addr = sc->sc_send_ring_dma;
    647 	if (sc->sc_version == 1) {
    648 		ring->rr_free_bufs = RR_RR_DONT_COMPLAIN;
    649 	} else {
    650 		ring->rr_free_bufs = 0;
    651 	}
    652 
    653 	ring->rr_entries = RR_SEND_RING_SIZE;
    654 	ring->rr_entry_size = sizeof(struct rr_descr);
    655 
    656 	ring->rr_prod_index = sc->sc_send.ec_producer =
    657 		sc->sc_send.ec_consumer = 0;
    658 	sc->sc_send.ec_cur_mbuf = NULL;
    659 	sc->sc_send.ec_cur_buf = NULL;
    660 
    661 	sc->sc_snap_recv.ec_descr = sc->sc_snap_recv_ring;
    662 	sc->sc_snap_recv.ec_consumer = sc->sc_snap_recv.ec_producer = 0;
    663 
    664 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma, 0, sc->sc_dma_size,
    665 			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    666 
    667 	/* Set up the watchdog to make sure something happens! */
    668 
    669 	sc->sc_watchdog = 0;
    670 	ifp->if_timer = 5;
    671 
    672 	/*
    673 	 * Can't actually turn on interface until we see some events,
    674 	 * so set initialized flag, but don't start sending.
    675 	 */
    676 
    677 	sc->sc_flags = ESH_FL_INITIALIZED;
    678 	esh_send_cmd(sc, RR_CC_START_RUNCODE, 0, 0);
    679 	return;
    680 
    681 bad_init:
    682 	sc->sc_flags = 0;
    683 	wakeup((void *) sc);
    684 	return;
    685 }
    686 
    687 
    688 /*
    689  * Code to handle the Framing Protocol (FP) interface to the esh.
    690  * This will allow us to write directly to the wire, with no
    691  * intervening memcpy's to slow us down.
    692  */
    693 
    694 int
    695 esh_fpopen(dev_t dev, int oflags, int devtype,
    696     struct lwp *l)
    697 {
    698 	struct esh_softc *sc;
    699 	struct rr_ring_ctl *ring_ctl;
    700 	struct esh_fp_ring_ctl *recv;
    701 	int ulp = ESHULP(dev);
    702 	int error = 0;
    703 	bus_size_t size;
    704 	int rseg;
    705 	int s;
    706 
    707 	sc = device_lookup_private(&esh_cd, ESHUNIT(dev));
    708 	if (sc == NULL || ulp == HIPPI_ULP_802)
    709 		return (ENXIO);
    710 
    711 #ifdef ESH_PRINTF
    712 	printf("esh_fpopen:  opening board %d, ulp %d\n",
    713 	    device_unit(sc->sc_dev), ulp);
    714 #endif
    715 
    716 	/* If the card is not up, initialize it. */
    717 
    718 	s = splnet();
    719 
    720 	if (sc->sc_fp_rings >= sc->sc_max_rings - 1) {
    721 		splx(s);
    722 		return (ENOSPC);
    723 	}
    724 
    725 	if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0) {
    726 		eshinit(sc);
    727 		if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0)
    728 			return EIO;
    729 	}
    730 
    731 	if ((sc->sc_flags & ESH_FL_RUNCODE_UP) == 0) {
    732 		/*
    733 		 * Wait for the runcode to indicate that it is up,
    734 		 * while watching to make sure we haven't crashed.
    735 		 */
    736 
    737 		error = 0;
    738 		while (error == 0 &&
    739 		       (sc->sc_flags & ESH_FL_INITIALIZED) != 0 &&
    740 		       (sc->sc_flags & ESH_FL_RUNCODE_UP) == 0) {
    741 			error = tsleep((void *) sc, PCATCH | PRIBIO,
    742 				       "eshinit", 0);
    743 #ifdef ESH_PRINTF
    744 			printf("esh_fpopen:  tslept\n");
    745 #endif
    746 		}
    747 
    748 		if (error != 0) {
    749 			splx(s);
    750 			return error;
    751 		}
    752 
    753 		if ((sc->sc_flags & ESH_FL_RUNCODE_UP) == 0) {
    754 			splx(s);
    755 			return EIO;
    756 		}
    757 	}
    758 
    759 
    760 #ifdef ESH_PRINTF
    761 	printf("esh_fpopen:  card up\n");
    762 #endif
    763 
    764 	/* Look at the ring descriptor to see if the ULP is in use */
    765 
    766 	ring_ctl = &sc->sc_recv_ring_table[ulp];
    767 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma,
    768 			(char *) ring_ctl - (char *) sc->sc_dma_addr,
    769 			sizeof(*ring_ctl),
    770 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
    771 	if (ring_ctl->rr_entry_size != 0) {
    772 		splx(s);
    773 		return (EBUSY);
    774 	}
    775 
    776 #ifdef ESH_PRINTF
    777 	printf("esh_fpopen:  ring %d okay\n", ulp);
    778 #endif
    779 
    780 	/*
    781 	 * Allocate the DMA space for the ring;  space for the
    782 	 * ring control blocks has already been staticly allocated.
    783 	 */
    784 
    785 	recv = (struct esh_fp_ring_ctl *)
    786 	    malloc(sizeof(*recv), M_DEVBUF, M_WAITOK|M_ZERO);
    787 	if (recv == NULL)
    788 		return(ENOMEM);
    789 	TAILQ_INIT(&recv->ec_queue);
    790 
    791 	size = RR_FP_RECV_RING_SIZE * sizeof(struct rr_descr);
    792 	error = bus_dmamem_alloc(sc->sc_dmat, size, 0, RR_DMA_BOUNDARY,
    793 				 &recv->ec_dmaseg, 1,
    794 				 &rseg, BUS_DMA_WAITOK);
    795 
    796 	if (error) {
    797 		aprint_error_dev(sc->sc_dev, "couldn't allocate space for FP receive ring"
    798 		       "data structures\n");
    799 		goto bad_fp_dmamem_alloc;
    800 	}
    801 
    802 	if (rseg > 1) {
    803 		aprint_error_dev(sc->sc_dev, "contiguous memory not available for "
    804 		       "FP receive ring\n");
    805 		goto bad_fp_dmamem_map;
    806 	}
    807 
    808 	error = bus_dmamem_map(sc->sc_dmat, &recv->ec_dmaseg, rseg,
    809 			       size, (void **) &recv->ec_descr,
    810 			       BUS_DMA_WAITOK | BUS_DMA_COHERENT);
    811 	if (error) {
    812 		aprint_error_dev(sc->sc_dev, "couldn't map memory for FP receive ring\n");
    813 		goto bad_fp_dmamem_map;
    814 	}
    815 
    816 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, RR_DMA_BOUNDARY,
    817 			      BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
    818 			      &recv->ec_dma)) {
    819 		aprint_error_dev(sc->sc_dev, "couldn't create DMA map for FP receive ring\n");
    820 		goto bad_fp_dmamap_create;
    821 	}
    822 
    823 	if (bus_dmamap_load(sc->sc_dmat, recv->ec_dma, recv->ec_descr,
    824 			    size, NULL, BUS_DMA_WAITOK)) {
    825 		aprint_error_dev(sc->sc_dev, "couldn't load DMA map for FP receive ring\n");
    826 		goto bad_fp_dmamap_load;
    827 	}
    828 
    829 	memset(recv->ec_descr, 0, size);
    830 
    831 	/*
    832 	 * Create the ring:
    833 	 *
    834 	 * XXX:  HTF are we gonna deal with the fact that we don't know
    835 	 *	 if the open succeeded until we get a response from
    836 	 *	 the event handler?  I guess we could go to sleep waiting
    837 	 *	 for the interrupt, and get woken up by the eshintr
    838 	 *       case handling it.
    839 	 */
    840 
    841 	ring_ctl->rr_ring_addr = recv->ec_dma->dm_segs->ds_addr;
    842 	ring_ctl->rr_free_bufs = RR_FP_RECV_RING_SIZE / 4;
    843 	ring_ctl->rr_entries = RR_FP_RECV_RING_SIZE;
    844 	ring_ctl->rr_entry_size = sizeof(struct rr_descr);
    845 	ring_ctl->rr_prod_index = recv->ec_producer = recv->ec_consumer = 0;
    846 	ring_ctl->rr_mode = RR_RR_CHARACTER;
    847 	recv->ec_ulp = ulp;
    848 	recv->ec_index = -1;
    849 
    850 	sc->sc_fp_recv[ulp] = recv;
    851 
    852 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma,
    853 			(char *) ring_ctl - (char *) sc->sc_dma_addr,
    854 			sizeof(*ring_ctl),
    855 			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    856 
    857 	bus_dmamap_sync(sc->sc_dmat, recv->ec_dma, 0, size,
    858 			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    859 
    860 	esh_send_cmd(sc, RR_CC_ENABLE_RING, ulp, recv->ec_producer);
    861 
    862 #ifdef ESH_PRINTF
    863 	printf("esh_fpopen:  sent create ring cmd\n");
    864 #endif
    865 
    866 	while (recv->ec_index == -1) {
    867 		error = tsleep((void *) &recv->ec_ulp, PCATCH | PRIBIO,
    868 			       "eshfpopen", 0);
    869 		if (error != 0 || recv->ec_index == -1) {
    870 			splx(s);
    871 			goto bad_fp_ring_create;
    872 		}
    873 	}
    874 #ifdef ESH_PRINTF
    875 	printf("esh_fpopen:  created ring\n");
    876 #endif
    877 
    878 	/*
    879 	 * Ring is created.  Set up various pointers to the ring
    880 	 * information, fill the ring, and get going...
    881 	 */
    882 
    883 	sc->sc_fp_rings++;
    884 	splx(s);
    885 	return 0;
    886 
    887 bad_fp_ring_create:
    888 #ifdef ESH_PRINTF
    889 	printf("esh_fpopen:  bad ring create\n");
    890 #endif
    891 	sc->sc_fp_recv[ulp] = NULL;
    892 	memset(ring_ctl, 0, sizeof(*ring_ctl));
    893 	bus_dmamap_unload(sc->sc_dmat, recv->ec_dma);
    894 bad_fp_dmamap_load:
    895 	bus_dmamap_destroy(sc->sc_dmat, recv->ec_dma);
    896 bad_fp_dmamap_create:
    897 	bus_dmamem_unmap(sc->sc_dmat, (void *) recv->ec_descr, size);
    898 bad_fp_dmamem_map:
    899 	bus_dmamem_free(sc->sc_dmat, &recv->ec_dmaseg, rseg);
    900 bad_fp_dmamem_alloc:
    901 	free(recv, M_DEVBUF);
    902 	if (error == 0)
    903 		error = ENOMEM;
    904 	splx(s);
    905 	return (error);
    906 }
    907 
    908 
    909 int
    910 esh_fpclose(dev_t dev, int fflag, int devtype,
    911     struct lwp *l)
    912 {
    913 	struct esh_softc *sc;
    914 	struct rr_ring_ctl *ring_ctl;
    915 	struct esh_fp_ring_ctl *ring;
    916 	int ulp = ESHULP(dev);
    917 	int index;
    918 	int error = 0;
    919 	int s;
    920 
    921 	sc = device_lookup_private(&esh_cd, ESHUNIT(dev));
    922 	if (sc == NULL || ulp == HIPPI_ULP_802)
    923 		return (ENXIO);
    924 
    925 	s = splnet();
    926 
    927 	ring = sc->sc_fp_recv[ulp];
    928 	ring_ctl = &sc->sc_recv_ring_table[ulp];
    929 	index = ring->ec_index;
    930 
    931 #ifdef ESH_PRINTF
    932 	printf("esh_fpclose:  closing unit %d, ulp %d\n",
    933 	    device_unit(sc->sc_dev), ulp);
    934 #endif
    935 	assert(ring);
    936 	assert(ring_ctl);
    937 
    938 	/*
    939 	 * Disable the ring, wait for notification, and get rid of DMA
    940 	 * stuff and dynamically allocated memory.  Loop, waiting to
    941 	 * learn that the ring has been disabled, or the card
    942 	 * has been shut down.
    943 	 */
    944 
    945 	do {
    946 		esh_send_cmd(sc, RR_CC_DISABLE_RING, ulp, ring->ec_producer);
    947 
    948 		error = tsleep((void *) &ring->ec_index, PCATCH | PRIBIO,
    949 			       "esh_fpclose", 0);
    950 		if (error != 0 && error != EAGAIN) {
    951 			aprint_error_dev(sc->sc_dev, "esh_fpclose:  wait on ring disable bad\n");
    952 			ring->ec_index = -1;
    953 			break;
    954 		}
    955 	} while (ring->ec_index != -1 && sc->sc_flags != 0);
    956 
    957 	/*
    958 	 * XXX:  Gotta unload the ring, removing old descriptors!
    959 	 *       *Can* there be outstanding reads with a close issued!?
    960 	 */
    961 
    962 	bus_dmamap_unload(sc->sc_dmat, ring->ec_dma);
    963 	bus_dmamap_destroy(sc->sc_dmat, ring->ec_dma);
    964 	bus_dmamem_unmap(sc->sc_dmat, (void *) ring->ec_descr,
    965 			 RR_FP_RECV_RING_SIZE * sizeof(struct rr_descr));
    966 	bus_dmamem_free(sc->sc_dmat, &ring->ec_dmaseg, ring->ec_dma->dm_nsegs);
    967 	free(ring, M_DEVBUF);
    968 	memset(ring_ctl, 0, sizeof(*ring_ctl));
    969 	sc->sc_fp_recv[ulp] = NULL;
    970 	sc->sc_fp_recv_index[index] = NULL;
    971 
    972 	sc->sc_fp_rings--;
    973 	if (sc->sc_fp_rings == 0)
    974 		sc->sc_flags &= ~ESH_FL_FP_RING_UP;
    975 
    976 	splx(s);
    977 	return 0;
    978 }
    979 
    980 int
    981 esh_fpread(dev_t dev, struct uio *uio, int ioflag)
    982 {
    983 	struct lwp *l = curlwp;
    984 	struct proc *p = l->l_proc;
    985 	struct iovec *iovp;
    986 	struct esh_softc *sc;
    987 	struct esh_fp_ring_ctl *ring;
    988 	struct esh_dmainfo *di;
    989 	int ulp = ESHULP(dev);
    990 	int error;
    991 	int i;
    992 	int s;
    993 
    994 #ifdef ESH_PRINTF
    995 	printf("esh_fpread:  dev %x\n", dev);
    996 #endif
    997 
    998 	sc = device_lookup_private(&esh_cd, ESHUNIT(dev));
    999 	if (sc == NULL || ulp == HIPPI_ULP_802)
   1000 		return (ENXIO);
   1001 
   1002 	s = splnet();
   1003 
   1004 	ring = sc->sc_fp_recv[ulp];
   1005 
   1006 	if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0) {
   1007 		error = ENXIO;
   1008 		goto fpread_done;
   1009 	}
   1010 
   1011 	/* Check for validity */
   1012 	for (i = 0; i < uio->uio_iovcnt; i++) {
   1013 		/* Check for valid offsets and sizes */
   1014 		if (((u_long) uio->uio_iov[i].iov_base & 3) != 0 ||
   1015 		    (i < uio->uio_iovcnt - 1 &&
   1016 		     (uio->uio_iov[i].iov_len & 3) != 0)) {
   1017 			error = EFAULT;
   1018 			goto fpread_done;
   1019 		}
   1020 	}
   1021 
   1022 	/* Lock down the pages */
   1023 	for (i = 0; i < uio->uio_iovcnt; i++) {
   1024 		iovp = &uio->uio_iov[i];
   1025 		error = uvm_vslock(p->p_vmspace, iovp->iov_base, iovp->iov_len,
   1026 		    VM_PROT_WRITE);
   1027 		if (error) {
   1028 			/* Unlock what we've locked so far. */
   1029 			for (--i; i >= 0; i--) {
   1030 				iovp = &uio->uio_iov[i];
   1031 				uvm_vsunlock(p->p_vmspace, iovp->iov_base,
   1032 				    iovp->iov_len);
   1033 			}
   1034 			goto fpread_done;
   1035 		}
   1036 	}
   1037 
   1038 	/*
   1039 	 * Perform preliminary DMA mapping and throw the buffers
   1040 	 * onto the queue to be sent.
   1041 	 */
   1042 
   1043 	di = esh_new_dmainfo(sc);
   1044 	if (di == NULL) {
   1045 		error = ENOMEM;
   1046 		goto fpread_done;
   1047 	}
   1048 	di->ed_buf = NULL;
   1049 	di->ed_error = 0;
   1050 	di->ed_read_len = 0;
   1051 
   1052 #ifdef ESH_PRINTF
   1053 	printf("esh_fpread:  ulp %d, uio offset %qd, resid %d, iovcnt %d\n",
   1054 	       ulp, uio->uio_offset, uio->uio_resid, uio->uio_iovcnt);
   1055 #endif
   1056 
   1057 	error = bus_dmamap_load_uio(sc->sc_dmat, di->ed_dma,
   1058 				    uio, BUS_DMA_READ|BUS_DMA_WAITOK);
   1059 	if (error) {
   1060 		aprint_error_dev(sc->sc_dev, "esh_fpread:  bus_dmamap_load_uio "
   1061 		       "failed\terror code %d\n",
   1062 		       error);
   1063 		error = ENOBUFS;
   1064 		esh_free_dmainfo(sc, di);
   1065 		goto fpread_done;
   1066 	}
   1067 
   1068 	bus_dmamap_sync(sc->sc_dmat, di->ed_dma,
   1069 			0, di->ed_dma->dm_mapsize,
   1070 			BUS_DMASYNC_PREREAD);
   1071 
   1072 #ifdef ESH_PRINTF
   1073 	printf("esh_fpread:  ulp %d, di %p, nsegs %d, uio len %d\n",
   1074 	       ulp, di, di->ed_dma->dm_nsegs, uio->uio_resid);
   1075 #endif
   1076 
   1077 	di->ed_flags |= ESH_DI_BUSY;
   1078 
   1079 	TAILQ_INSERT_TAIL(&ring->ec_queue, di, ed_list);
   1080 	esh_fill_fp_ring(sc, ring);
   1081 
   1082 	while ((di->ed_flags & ESH_DI_BUSY) != 0 && error == 0) {
   1083 		error = tsleep((void *) di, PCATCH | PRIBIO, "esh_fpread", 0);
   1084 #ifdef ESH_PRINTF
   1085 		printf("esh_fpread:  ulp %d, tslept %d\n", ulp, error);
   1086 #endif
   1087 		if (error) {
   1088 			/*
   1089 			 * Remove the buffer entries from the ring;  this
   1090 			 * is gonna require a DISCARD_PKT command, and
   1091 			 * will certainly disrupt things.  This is why we
   1092 			 * can have only one outstanding read on a ring
   1093 			 * at a time.  :-(
   1094 			 */
   1095 
   1096 			printf("esh_fpread:  was that a ^C!?  error %d, ulp %d\n",
   1097 			       error, ulp);
   1098 			if (error == EINTR || error == ERESTART)
   1099 				error = 0;
   1100 			if ((di->ed_flags & ESH_DI_BUSY) != 0) {
   1101 				esh_flush_fp_ring(sc, ring, di);
   1102 				error = EINTR;
   1103 				break;
   1104 			}
   1105 		}
   1106 	}
   1107 
   1108 	if (error == 0 && di->ed_error != 0)
   1109 		error = EIO;
   1110 
   1111 	/*
   1112 	 * How do we let the caller know how much has been read?
   1113 	 * Adjust the uio_resid stuff!?
   1114 	 */
   1115 
   1116 	assert(uio->uio_resid >= di->ed_read_len);
   1117 
   1118 	uio->uio_resid -= di->ed_read_len;
   1119 	for (i = 0; i < uio->uio_iovcnt; i++) {
   1120 		iovp = &uio->uio_iov[i];
   1121 		uvm_vsunlock(p->p_vmspace, iovp->iov_base, iovp->iov_len);
   1122 	}
   1123 	esh_free_dmainfo(sc, di);
   1124 
   1125 fpread_done:
   1126 #ifdef ESH_PRINTF
   1127 	printf("esh_fpread:  ulp %d, error %d\n", ulp, error);
   1128 #endif
   1129 	splx(s);
   1130 	return error;
   1131 }
   1132 
   1133 
   1134 int
   1135 esh_fpwrite(dev_t dev, struct uio *uio, int ioflag)
   1136 {
   1137 	struct lwp *l = curlwp;
   1138 	struct proc *p = l->l_proc;
   1139 	struct iovec *iovp;
   1140 	struct esh_softc *sc;
   1141 	struct esh_send_ring_ctl *ring;
   1142 	struct esh_dmainfo *di;
   1143 	int ulp = ESHULP(dev);
   1144 	int error;
   1145 	int len;
   1146 	int i;
   1147 	int s;
   1148 
   1149 #ifdef ESH_PRINTF
   1150 	printf("esh_fpwrite:  dev %x\n", dev);
   1151 #endif
   1152 
   1153 	sc = device_lookup_private(&esh_cd, ESHUNIT(dev));
   1154 	if (sc == NULL || ulp == HIPPI_ULP_802)
   1155 		return (ENXIO);
   1156 
   1157 	s = splnet();
   1158 
   1159 	ring = &sc->sc_send;
   1160 
   1161 	if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0) {
   1162 		error = ENXIO;
   1163 		goto fpwrite_done;
   1164 	}
   1165 
   1166 	/* Check for validity */
   1167 	for (i = 0; i < uio->uio_iovcnt; i++) {
   1168 		if (((u_long) uio->uio_iov[i].iov_base & 3) != 0 ||
   1169 		    (i < uio->uio_iovcnt - 1 &&
   1170 		     (uio->uio_iov[i].iov_len & 3) != 0)) {
   1171 			error = EFAULT;
   1172 			goto fpwrite_done;
   1173 		}
   1174 	}
   1175 
   1176 	/* Lock down the pages */
   1177 	for (i = 0; i < uio->uio_iovcnt; i++) {
   1178 		iovp = &uio->uio_iov[i];
   1179 		error = uvm_vslock(p->p_vmspace, iovp->iov_base, iovp->iov_len,
   1180 		    VM_PROT_READ);
   1181 		if (error) {
   1182 			/* Unlock what we've locked so far. */
   1183 			for (--i; i >= 0; i--) {
   1184 				iovp = &uio->uio_iov[i];
   1185 				uvm_vsunlock(p->p_vmspace, iovp->iov_base,
   1186 				    iovp->iov_len);
   1187 			}
   1188 			goto fpwrite_done;
   1189 		}
   1190 	}
   1191 
   1192 	/*
   1193 	 * Perform preliminary DMA mapping and throw the buffers
   1194 	 * onto the queue to be sent.
   1195 	 */
   1196 
   1197 	di = esh_new_dmainfo(sc);
   1198 	if (di == NULL) {
   1199 		error = ENOMEM;
   1200 		goto fpwrite_done;
   1201 	}
   1202 	di->ed_buf = NULL;
   1203 	di->ed_error = 0;
   1204 
   1205 #ifdef ESH_PRINTF
   1206 	printf("esh_fpwrite:  uio offset %qd, resid %d, iovcnt %d\n",
   1207 	       uio->uio_offset, uio->uio_resid, uio->uio_iovcnt);
   1208 #endif
   1209 
   1210 	error = bus_dmamap_load_uio(sc->sc_dmat, di->ed_dma,
   1211 				    uio, BUS_DMA_WRITE|BUS_DMA_WAITOK);
   1212 	if (error) {
   1213 		aprint_error_dev(sc->sc_dev, "esh_fpwrite:  bus_dmamap_load_uio "
   1214 		       "failed\terror code %d\n",
   1215 		       error);
   1216 		error = ENOBUFS;
   1217 		esh_free_dmainfo(sc, di);
   1218 		goto fpwrite_done;
   1219 	}
   1220 
   1221 	bus_dmamap_sync(sc->sc_dmat, di->ed_dma,
   1222 			0, di->ed_dma->dm_mapsize,
   1223 			BUS_DMASYNC_PREWRITE);
   1224 
   1225 #ifdef ESH_PRINTF
   1226 	printf("esh_fpwrite:  di %p, nsegs %d, uio len %d\n",
   1227 	       di, di->ed_dma->dm_nsegs, uio->uio_resid);
   1228 #endif
   1229 
   1230 	len = di->ed_dma->dm_mapsize;
   1231 	di->ed_flags |= ESH_DI_BUSY;
   1232 
   1233 	TAILQ_INSERT_TAIL(&ring->ec_di_queue, di, ed_list);
   1234 	eshstart(&sc->sc_if);
   1235 
   1236 	while ((di->ed_flags & ESH_DI_BUSY) != 0 && error == 0) {
   1237 		error = tsleep((void *) di, PRIBIO, "esh_fpwrite", 0);
   1238 #ifdef ESH_PRINTF
   1239 		printf("esh_fpwrite:  tslept %d\n", error);
   1240 #endif
   1241 		if (error) {
   1242 			printf("esh_fpwrite:  was that a ^C!?  Shouldn't be!  Error %d\n",
   1243 			       error);
   1244 			if (error == EINTR || error == ERESTART)
   1245 				error = 0;
   1246 			if ((di->ed_flags & ESH_DI_BUSY) != 0) {
   1247 				panic("interrupted eshwrite!");
   1248 #if 0
   1249 				/* Better do *something* here! */
   1250 				esh_flush_send_ring(sc, di);
   1251 #endif
   1252 				error = EINTR;
   1253 				break;
   1254 			}
   1255 		}
   1256 	}
   1257 
   1258 	if (error == 0 && di->ed_error != 0)
   1259 		error = EIO;
   1260 
   1261 	/*
   1262 	 * How do we let the caller know how much has been written?
   1263 	 * Adjust the uio_resid stuff!?
   1264 	 */
   1265 
   1266 	uio->uio_resid -= len;
   1267 	uio->uio_offset += len;
   1268 
   1269 	for (i = 0; i < uio->uio_iovcnt; i++) {
   1270 		iovp = &uio->uio_iov[i];
   1271 		uvm_vsunlock(p->p_vmspace, iovp->iov_base, iovp->iov_len);
   1272 	}
   1273 
   1274 	esh_free_dmainfo(sc, di);
   1275 
   1276 fpwrite_done:
   1277 #ifdef ESH_PRINTF
   1278 	printf("esh_fpwrite:  error %d\n", error);
   1279 #endif
   1280 	splx(s);
   1281 	return error;
   1282 }
   1283 
   1284 void
   1285 esh_fpstrategy(struct buf *bp)
   1286 {
   1287 	struct esh_softc *sc;
   1288 	int ulp = ESHULP(bp->b_dev);
   1289 	int error = 0;
   1290 	int s;
   1291 
   1292 #ifdef ESH_PRINTF
   1293         printf("esh_fpstrategy:  starting, bcount %ld, flags %lx, dev %x\n"
   1294 	       "\tunit %x, ulp %d\n",
   1295 		bp->b_bcount, bp->b_flags, bp->b_dev, unit, ulp);
   1296 #endif
   1297 
   1298 	sc = device_lookup_private(&esh_cd, ESHUNIT(bp->b_dev));
   1299 
   1300 	s = splnet();
   1301 	if (sc == NULL || ulp == HIPPI_ULP_802) {
   1302 		bp->b_error = ENXIO;
   1303 		goto done;
   1304 	}
   1305 
   1306 	if (bp->b_bcount == 0)
   1307 		goto done;
   1308 
   1309 #define UP_FLAGS (ESH_FL_INITIALIZED | ESH_FL_RUNCODE_UP)
   1310 
   1311 	if ((sc->sc_flags & UP_FLAGS) != UP_FLAGS) {
   1312 		bp->b_error = EBUSY;
   1313 		goto done;
   1314 	}
   1315 #undef UP_FLAGS
   1316 
   1317 	if (bp->b_flags & B_READ) {
   1318 		/*
   1319 		 * Perform preliminary DMA mapping and throw the buffers
   1320 		 * onto the queue to be sent.
   1321 		 */
   1322 
   1323 		struct esh_fp_ring_ctl *ring = sc->sc_fp_recv[ulp];
   1324 		struct esh_dmainfo *di = esh_new_dmainfo(sc);
   1325 
   1326 		if (di == NULL) {
   1327 			bp->b_error = ENOMEM;
   1328 			goto done;
   1329 		}
   1330 		di->ed_buf = bp;
   1331 		error = bus_dmamap_load(sc->sc_dmat, di->ed_dma,
   1332 					bp->b_data, bp->b_bcount,
   1333 					bp->b_proc,
   1334 					BUS_DMA_READ|BUS_DMA_WAITOK);
   1335 		if (error) {
   1336 			aprint_error_dev(sc->sc_dev, "esh_fpstrategy:  "
   1337 			       "bus_dmamap_load "
   1338 			       "failed\terror code %d\n",
   1339 			       error);
   1340 			bp->b_error = ENOBUFS;
   1341 			esh_free_dmainfo(sc, di);
   1342 			goto done;
   1343 		}
   1344 
   1345 		bus_dmamap_sync(sc->sc_dmat, di->ed_dma,
   1346 				0, di->ed_dma->dm_mapsize,
   1347 				BUS_DMASYNC_PREREAD);
   1348 
   1349 #ifdef ESH_PRINTF
   1350 		printf("fpstrategy:  di %p\n", di);
   1351 #endif
   1352 
   1353 		TAILQ_INSERT_TAIL(&ring->ec_queue, di, ed_list);
   1354 		esh_fill_fp_ring(sc, ring);
   1355 	} else {
   1356 		/*
   1357 		 * Queue up the buffer for future sending.  If the card
   1358 		 * isn't already transmitting, give it a kick.
   1359 		 */
   1360 
   1361 		struct esh_send_ring_ctl *ring = &sc->sc_send;
   1362 		bufq_put(ring->ec_buf_queue, bp);
   1363 #ifdef ESH_PRINTF
   1364 		printf("esh_fpstrategy:  ready to call eshstart to write!\n");
   1365 #endif
   1366 		eshstart(&sc->sc_if);
   1367 	}
   1368 	splx(s);
   1369 	return;
   1370 
   1371 done:
   1372 	splx(s);
   1373 #ifdef ESH_PRINTF
   1374 	printf("esh_fpstrategy:  failing, bp->b_error %d!\n",
   1375 	       bp->b_error);
   1376 #endif
   1377 	biodone(bp);
   1378 }
   1379 
   1380 /*
   1381  * Handle interrupts.  This is basicly event handling code;  version two
   1382  * firmware tries to speed things up by just telling us the location
   1383  * of the producer and consumer indices, rather than sending us an event.
   1384  */
   1385 
   1386 int
   1387 eshintr(void *arg)
   1388 {
   1389 	struct esh_softc *sc = arg;
   1390 	bus_space_tag_t iot = sc->sc_iot;
   1391 	bus_space_handle_t ioh = sc->sc_ioh;
   1392 	struct ifnet *ifp = &sc->sc_if;
   1393 	u_int32_t rc_offsets;
   1394 	u_int32_t misc_host_ctl;
   1395 	int rc_send_consumer = 0;	/* shut up compiler */
   1396 	int rc_snap_ring_consumer = 0;	/* ditto */
   1397 	u_int8_t fp_ring_consumer[RR_MAX_RECV_RING];
   1398 	int start_consumer;
   1399 	int ret = 0;
   1400 
   1401 	int okay = 0;
   1402 	int blah = 0;
   1403 	char sbuf[100];
   1404 	char t[100];
   1405 
   1406 
   1407 	/* Check to see if this is our interrupt. */
   1408 
   1409 	misc_host_ctl = bus_space_read_4(iot, ioh, RR_MISC_HOST_CTL);
   1410 	if ((misc_host_ctl & RR_MH_INTERRUPT) == 0)
   1411 		return 0;
   1412 
   1413 	/* If we can't do anything with the interrupt, just drop it */
   1414 
   1415 	if (sc->sc_flags == 0)
   1416 		return 1;
   1417 
   1418 	rc_offsets = bus_space_read_4(iot, ioh, RR_EVENT_PRODUCER);
   1419 	sc->sc_event_producer = rc_offsets & 0xff;
   1420 	if (sc->sc_version == 2) {
   1421 		int i;
   1422 
   1423 		sbuf[0] = '\0';
   1424 		strlcat(sbuf, "rc:  ", sizeof(sbuf));
   1425 		rc_send_consumer = (rc_offsets >> 8) & 0xff;
   1426 		rc_snap_ring_consumer = (rc_offsets >> 16) & 0xff;
   1427 		for (i = 0; i < RR_MAX_RECV_RING; i += 4) {
   1428 			rc_offsets =
   1429 				bus_space_read_4(iot, ioh,
   1430 						 RR_RUNCODE_RECV_CONS + i);
   1431 			/* XXX:  should do this right! */
   1432 			NTOHL(rc_offsets);
   1433 			*((u_int32_t *) &fp_ring_consumer[i]) = rc_offsets;
   1434 			snprintf(t, sizeof(t), "%.8x|", rc_offsets);
   1435 			strlcat(sbuf, t, sizeof(sbuf));
   1436 		}
   1437 	}
   1438 	start_consumer = sc->sc_event_consumer;
   1439 
   1440 	/* Take care of synchronizing DMA with entries we read... */
   1441 
   1442 	esh_dma_sync(sc, sc->sc_event_ring,
   1443 		     start_consumer, sc->sc_event_producer,
   1444 		     RR_EVENT_RING_SIZE, sizeof(struct rr_event), 0,
   1445 		     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1446 
   1447 	while (sc->sc_event_consumer != sc->sc_event_producer) {
   1448 		struct rr_event *event =
   1449 			&sc->sc_event_ring[sc->sc_event_consumer];
   1450 
   1451 #ifdef ESH_PRINTF
   1452 		if (event->re_code != RR_EC_WATCHDOG &&
   1453 		    event->re_code != RR_EC_STATS_UPDATE &&
   1454 		    event->re_code != RR_EC_SET_CMD_CONSUMER) {
   1455 			printf("%s:  event code %x, ring %d, index %d\n",
   1456 			       device_xname(sc->sc_dev), event->re_code,
   1457 			       event->re_ring, event->re_index);
   1458 			if (okay == 0)
   1459 				printf("%s\n", sbuf);
   1460 			okay = 1;
   1461 		}
   1462 #endif
   1463 		ret = 1;   /* some action was taken by card */
   1464 
   1465 		switch(event->re_code) {
   1466 		case RR_EC_RUNCODE_UP:
   1467 			printf("%s:  firmware up\n", device_xname(sc->sc_dev));
   1468 			sc->sc_flags |= ESH_FL_RUNCODE_UP;
   1469 			esh_send_cmd(sc, RR_CC_WATCHDOG, 0, 0);
   1470 			esh_send_cmd(sc, RR_CC_UPDATE_STATS, 0, 0);
   1471 #ifdef ESH_PRINTF
   1472 			eshstatus(sc);
   1473 #endif
   1474 			if ((ifp->if_flags & IFF_UP) != 0)
   1475 				esh_init_snap_ring(sc);
   1476 			if (sc->sc_fp_rings > 0)
   1477 				esh_init_fp_rings(sc);
   1478 
   1479 			/*
   1480 			 * XXX:   crank up FP rings that might be
   1481 			 *        in use after a reset!
   1482 			 */
   1483 			wakeup((void *) sc);
   1484 			break;
   1485 
   1486 		case RR_EC_WATCHDOG:
   1487 			/*
   1488 			 * Record the watchdog event.
   1489 			 * This is checked by eshwatchdog
   1490 			 */
   1491 
   1492 			sc->sc_watchdog = 1;
   1493 			break;
   1494 
   1495 		case RR_EC_SET_CMD_CONSUMER:
   1496 			sc->sc_cmd_consumer = event->re_index;
   1497 			break;
   1498 
   1499 		case RR_EC_LINK_ON:
   1500 			printf("%s:  link up\n", device_xname(sc->sc_dev));
   1501 			sc->sc_flags |= ESH_FL_LINK_UP;
   1502 
   1503 			esh_send_cmd(sc, RR_CC_WATCHDOG, 0, 0);
   1504 			esh_send_cmd(sc, RR_CC_UPDATE_STATS, 0, 0);
   1505 			if ((sc->sc_flags & ESH_FL_SNAP_RING_UP) != 0) {
   1506 				/*
   1507 				 * Interface is now `running', with no
   1508 				 * output active.
   1509 				 */
   1510 				ifp->if_flags |= IFF_RUNNING;
   1511 				ifp->if_flags &= ~IFF_OACTIVE;
   1512 
   1513 				/* Attempt to start output, if any. */
   1514 			}
   1515 			eshstart(ifp);
   1516 			break;
   1517 
   1518 		case RR_EC_LINK_OFF:
   1519 			sc->sc_flags &= ~ESH_FL_LINK_UP;
   1520 			printf("%s:  link down\n", device_xname(sc->sc_dev));
   1521 			break;
   1522 
   1523 		/*
   1524 		 * These are all unexpected.  We need to handle all
   1525 		 * of them, though.
   1526 		 */
   1527 
   1528 		case RR_EC_INVALID_CMD:
   1529 		case RR_EC_INTERNAL_ERROR:
   1530 		case RR2_EC_INTERNAL_ERROR:
   1531 		case RR_EC_BAD_SEND_RING:
   1532 		case RR_EC_BAD_SEND_BUF:
   1533 		case RR_EC_BAD_SEND_DESC:
   1534 		case RR_EC_RECV_RING_FLUSH:
   1535 		case RR_EC_RECV_ERROR_INFO:
   1536 		case RR_EC_BAD_RECV_BUF:
   1537 		case RR_EC_BAD_RECV_DESC:
   1538 		case RR_EC_BAD_RECV_RING:
   1539 		case RR_EC_UNIMPLEMENTED:
   1540 			aprint_error_dev(sc->sc_dev, "unexpected event %x;"
   1541 			       "shutting down interface\n",
   1542 			       event->re_code);
   1543 			ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1544 			sc->sc_flags = ESH_FL_CRASHED;
   1545 #ifdef ESH_PRINTF
   1546 			eshstatus(sc);
   1547 #endif
   1548 			break;
   1549 
   1550 #define CALLOUT(a) case a:						\
   1551 	printf("%s:  Event " #a " received -- "				\
   1552 	       "ring %d index %d timestamp %x\n",			\
   1553 	       device_xname(sc->sc_dev), event->re_ring, event->re_index,	\
   1554 	       event->re_timestamp);					\
   1555 	break;
   1556 
   1557 		CALLOUT(RR_EC_NO_RING_FOR_ULP);
   1558 		CALLOUT(RR_EC_REJECTING);  /* dropping packets */
   1559 #undef CALLOUT
   1560 
   1561 			/* Send events */
   1562 
   1563 		case RR_EC_PACKET_SENT:   	/* not used in firmware 2.x */
   1564 			ifp->if_opackets++;
   1565 			/* FALLTHROUGH */
   1566 
   1567 		case RR_EC_SET_SND_CONSUMER:
   1568 			assert(sc->sc_version == 1);
   1569 			/* FALLTHROUGH */
   1570 
   1571 		case RR_EC_SEND_RING_LOW:
   1572 			eshstart_cleanup(sc, event->re_index, 0);
   1573 			break;
   1574 
   1575 
   1576 		case RR_EC_CONN_REJECT:
   1577 		case RR_EC_CAMPON_TIMEOUT:
   1578 		case RR_EC_CONN_TIMEOUT:
   1579 		case RR_EC_DISCONN_ERR:
   1580 		case RR_EC_INTERNAL_PARITY:
   1581 		case RR_EC_TX_IDLE:
   1582 		case RR_EC_SEND_LINK_OFF:
   1583 			eshstart_cleanup(sc, event->re_index, event->re_code);
   1584 			break;
   1585 
   1586 			/* Receive events */
   1587 
   1588 		case RR_EC_RING_ENABLED:
   1589 			if (event->re_ring == HIPPI_ULP_802) {
   1590 				rc_snap_ring_consumer = 0; /* prevent read */
   1591 				sc->sc_flags |= ESH_FL_SNAP_RING_UP;
   1592 				esh_fill_snap_ring(sc);
   1593 
   1594 				if (sc->sc_flags & ESH_FL_LINK_UP) {
   1595 					/*
   1596 					 * Interface is now `running', with no
   1597 					 * output active.
   1598 					 */
   1599 					ifp->if_flags |= IFF_RUNNING;
   1600 					ifp->if_flags &= ~IFF_OACTIVE;
   1601 
   1602 					/* Attempt to start output, if any. */
   1603 
   1604 					eshstart(ifp);
   1605 				}
   1606 #ifdef ESH_PRINTF
   1607 				if (event->re_index != 0)
   1608 					printf("ENABLE snap ring -- index %d instead of 0!\n",
   1609 					       event->re_index);
   1610 #endif
   1611 			} else {
   1612 				struct esh_fp_ring_ctl *ring =
   1613 					sc->sc_fp_recv[event->re_ring];
   1614 
   1615 				sc->sc_flags |= ESH_FL_FP_RING_UP;
   1616 #ifdef ESH_PRINTF
   1617 				printf("eshintr:  FP ring %d up\n",
   1618 				       event->re_ring);
   1619 #endif
   1620 
   1621 				sc->sc_fp_recv_index[event->re_index] = ring;
   1622 				ring->ec_index = event->re_index;
   1623 				wakeup((void *) &ring->ec_ulp);
   1624 			}
   1625 			break;
   1626 
   1627 		case RR_EC_RING_DISABLED:
   1628 #ifdef ESH_PRINTF
   1629 			printf("eshintr:  disabling ring %d\n",
   1630 			       event->re_ring);
   1631 #endif
   1632 			if (event->re_ring == HIPPI_ULP_802) {
   1633 				struct rr_ring_ctl *ring =
   1634 					sc->sc_recv_ring_table + HIPPI_ULP_802;
   1635 				memset(ring, 0, sizeof(*ring));
   1636 				sc->sc_flags &= ~ESH_FL_CLOSING_SNAP;
   1637 				sc->sc_flags &= ~ESH_FL_SNAP_RING_UP;
   1638 				while (sc->sc_snap_recv.ec_consumer
   1639 				       != sc->sc_snap_recv.ec_producer) {
   1640 					u_int16_t offset = sc->sc_snap_recv.ec_consumer;
   1641 
   1642 					bus_dmamap_unload(sc->sc_dmat,
   1643 							  sc->sc_snap_recv.ec_dma[offset]);
   1644 					m_free(sc->sc_snap_recv.ec_m[offset]);
   1645 					sc->sc_snap_recv.ec_m[offset] = NULL;
   1646 					sc->sc_snap_recv.ec_consumer =
   1647 						NEXT_RECV(sc->sc_snap_recv.ec_consumer);
   1648 				}
   1649 				sc->sc_snap_recv.ec_consumer =
   1650 					rc_snap_ring_consumer;
   1651 				sc->sc_snap_recv.ec_producer =
   1652 					rc_snap_ring_consumer;
   1653 				wakeup((void *) &sc->sc_snap_recv);
   1654 			} else {
   1655 				struct esh_fp_ring_ctl *recv =
   1656 					sc->sc_fp_recv[event->re_ring];
   1657 				assert(recv != NULL);
   1658 				recv->ec_consumer = recv->ec_producer =
   1659 					fp_ring_consumer[recv->ec_index];
   1660 				recv->ec_index = -1;
   1661 				wakeup((void *) &recv->ec_index);
   1662 			}
   1663 			break;
   1664 
   1665 		case RR_EC_RING_ENABLE_ERR:
   1666 			if (event->re_ring == HIPPI_ULP_802) {
   1667 				aprint_error_dev(sc->sc_dev, "unable to enable SNAP ring!?\n\t"
   1668 				       "shutting down interface\n");
   1669 				ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1670 #ifdef ESH_PRINTF
   1671 				eshstatus(sc);
   1672 #endif
   1673 			} else {
   1674 				/*
   1675 				 * If we just leave the ring index as-is,
   1676 				 * the driver will figure out that
   1677 				 * we failed to open the ring.
   1678 				 */
   1679 				wakeup((void *) &(sc->sc_fp_recv[event->re_ring]->ec_ulp));
   1680 			}
   1681 			break;
   1682 
   1683 		case RR_EC_PACKET_DISCARDED:
   1684 		        /*
   1685 			 * Determine the dmainfo for the current packet
   1686 			 * we just discarded and wake up the waiting
   1687 			 * process.
   1688 			 *
   1689 			 * This should never happen on the network ring!
   1690 			 */
   1691 
   1692 			if (event->re_ring == HIPPI_ULP_802) {
   1693 				aprint_error_dev(sc->sc_dev, "discard on SNAP ring!?\n\t"
   1694 				       "shutting down interface\n");
   1695 				ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1696 				sc->sc_flags = ESH_FL_CRASHED;
   1697 			} else {
   1698 				struct esh_fp_ring_ctl *ring =
   1699 					sc->sc_fp_recv[event->re_ring];
   1700 				struct esh_dmainfo *di =
   1701 					ring->ec_cur_dmainfo;
   1702 
   1703 				if (di == NULL)
   1704 					di = ring->ec_dmainfo[ring->ec_producer];
   1705 				printf("eshintr:  DISCARD:  index %d,"
   1706 				       "ring prod %d, di %p, ring[index] %p\n",
   1707 				       event->re_index, ring->ec_producer, di,
   1708 				       ring->ec_dmainfo[event->re_index]);
   1709 
   1710 				if (di == NULL)
   1711 					di = ring->ec_dmainfo[event->re_index];
   1712 
   1713 				if (di == NULL) {
   1714 					printf("eshintr:  DISCARD:  NULL di, skipping...\n");
   1715 					break;
   1716 				}
   1717 
   1718 				di->ed_flags &=
   1719 					~(ESH_DI_READING | ESH_DI_BUSY);
   1720 				wakeup((void *) &di->ed_flags);
   1721 			}
   1722 			break;
   1723 
   1724 		case RR_EC_OUT_OF_BUF:
   1725 		case RR_EC_RECV_RING_OUT:
   1726 		case RR_EC_RECV_RING_LOW:
   1727 			break;
   1728 
   1729 		case RR_EC_SET_RECV_CONSUMER:
   1730 		case RR_EC_PACKET_RECVED:
   1731 			if (event->re_ring == HIPPI_ULP_802)
   1732 				esh_read_snap_ring(sc, event->re_index, 0);
   1733 			else if (sc->sc_fp_recv[event->re_ring] != NULL)
   1734 				esh_read_fp_ring(sc, event->re_index, 0,
   1735 						 event->re_ring);
   1736 			break;
   1737 
   1738 		case RR_EC_RECV_IDLE:
   1739 		case RR_EC_PARITY_ERR:
   1740 		case RR_EC_LLRC_ERR:
   1741 		case RR_EC_PKT_LENGTH_ERR:
   1742 		case RR_EC_IP_HDR_CKSUM_ERR:
   1743 		case RR_EC_DATA_CKSUM_ERR:
   1744 		case RR_EC_SHORT_BURST_ERR:
   1745 		case RR_EC_RECV_LINK_OFF:
   1746 		case RR_EC_FLAG_SYNC_ERR:
   1747 		case RR_EC_FRAME_ERR:
   1748 		case RR_EC_STATE_TRANS_ERR:
   1749 		case RR_EC_NO_READY_PULSE:
   1750 			if (event->re_ring == HIPPI_ULP_802) {
   1751 				esh_read_snap_ring(sc, event->re_index,
   1752 						   event->re_code);
   1753 			} else {
   1754 				struct esh_fp_ring_ctl *r;
   1755 
   1756 				r = sc->sc_fp_recv[event->re_ring];
   1757 				if (r)
   1758 					r->ec_error = event->re_code;
   1759 			}
   1760 			break;
   1761 
   1762 		/*
   1763 		 * Statistics events can be ignored for now.  They might become
   1764 		 * necessary if we have to deliver stats on demand, rather than
   1765 		 * just returning the statistics block of memory.
   1766 		 */
   1767 
   1768 		case RR_EC_STATS_UPDATE:
   1769 		case RR_EC_STATS_RETRIEVED:
   1770 		case RR_EC_TRACE:
   1771 			break;
   1772 
   1773 		default:
   1774 			aprint_error_dev(sc->sc_dev, "Bogus event code %x, "
   1775 			       "ring %d, index %d, timestamp %x\n",
   1776 			       event->re_code,
   1777 			       event->re_ring, event->re_index,
   1778 			       event->re_timestamp);
   1779 			break;
   1780 		}
   1781 
   1782 		sc->sc_event_consumer = NEXT_EVENT(sc->sc_event_consumer);
   1783 	}
   1784 
   1785 	/* Do the receive and send ring processing for version 2 RunCode */
   1786 
   1787 	if (sc->sc_version == 2) {
   1788 		int i;
   1789 		if (sc->sc_send.ec_consumer != rc_send_consumer) {
   1790 			eshstart_cleanup(sc, rc_send_consumer, 0);
   1791 			ret = 1;
   1792 			blah++;
   1793 		}
   1794 		if (sc->sc_snap_recv.ec_consumer != rc_snap_ring_consumer &&
   1795 		    (sc->sc_flags & ESH_FL_SNAP_RING_UP) != 0) {
   1796 			esh_read_snap_ring(sc, rc_snap_ring_consumer, 0);
   1797 			ret = 1;
   1798 			blah++;
   1799 		}
   1800 		for (i = 0; i < RR_MAX_RECV_RING; i++) {
   1801 			struct esh_fp_ring_ctl *r = sc->sc_fp_recv_index[i];
   1802 
   1803 			if (r != NULL &&
   1804 			    r->ec_consumer != fp_ring_consumer[i]) {
   1805 #ifdef ESH_PRINTF
   1806 				printf("eshintr:  performed read on ring %d, index %d\n",
   1807 				       r->ec_ulp, i);
   1808 #endif
   1809 				blah++;
   1810 				esh_read_fp_ring(sc, fp_ring_consumer[i],
   1811 						 0, r->ec_ulp);
   1812 				fp_ring_consumer[i] = r->ec_consumer;
   1813 			}
   1814 		}
   1815 		if (blah != 0 && okay == 0) {
   1816 			okay = 1;
   1817 #ifdef ESH_PRINTF
   1818 			printf("%s\n", sbuf);
   1819 #endif
   1820 		}
   1821 		rc_offsets = (sc->sc_snap_recv.ec_consumer << 16) |
   1822 			(sc->sc_send.ec_consumer << 8) | sc->sc_event_consumer;
   1823 	} else {
   1824 		rc_offsets = sc->sc_event_consumer;
   1825 	}
   1826 
   1827 	esh_dma_sync(sc, sc->sc_event_ring,
   1828 		     start_consumer, sc->sc_event_producer,
   1829 		     RR_EVENT_RING_SIZE, sizeof(struct rr_event), 0,
   1830 		     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1831 
   1832 	/* Write out new values for the FP segments... */
   1833 
   1834 	if (sc->sc_version == 2) {
   1835 		int i;
   1836 		u_int32_t u;
   1837 
   1838 		sbuf[0] = '\0';
   1839 		strlcat(sbuf, "drv: ", sizeof(sbuf));
   1840 		for (i = 0; i < RR_MAX_RECV_RING; i += 4) {
   1841 			/* XXX:  should do this right! */
   1842 			u = *((u_int32_t *) &fp_ring_consumer[i]);
   1843 			snprintf(t, sizeof(t), "%.8x|", u);
   1844 			strlcat(sbuf, t, sizeof(sbuf));
   1845 			NTOHL(u);
   1846 			bus_space_write_4(iot, ioh,
   1847 					  RR_DRIVER_RECV_CONS + i, u);
   1848 		}
   1849 #ifdef ESH_PRINTF
   1850 		if (okay == 1)
   1851 			printf("%s\n", sbuf);
   1852 #endif
   1853 
   1854 		sbuf[0] = '\0';
   1855 		strlcat(sbuf, "rcn: ", sizeof(sbuf));
   1856 		for (i = 0; i < RR_MAX_RECV_RING; i += 4) {
   1857 			u = bus_space_read_4(iot, ioh,
   1858 					     RR_RUNCODE_RECV_CONS + i);
   1859 			/* XXX:  should do this right! */
   1860 			NTOHL(u);
   1861 			snprintf(t, sizeof(t), "%.8x|", u);
   1862 			strlcat(sbuf, t, sizeof(sbuf));
   1863 		}
   1864 #ifdef ESH_PRINTF
   1865 		if (okay == 1)
   1866 			printf("%s\n", sbuf);
   1867 #endif
   1868 	}
   1869 
   1870 	/* Clear interrupt */
   1871 	bus_space_write_4(iot, ioh, RR_EVENT_CONSUMER, rc_offsets);
   1872 
   1873 	return (ret);
   1874 }
   1875 
   1876 
   1877 /*
   1878  * Start output on the interface.  Always called at splnet().
   1879  * Check to see if there are any mbufs that didn't get sent the
   1880  * last time this was called.  If there are none, get more mbufs
   1881  * and send 'em.
   1882  *
   1883  * For now, we only send one packet at a time.
   1884  */
   1885 
   1886 void
   1887 eshstart(struct ifnet *ifp)
   1888 {
   1889 	struct esh_softc *sc = ifp->if_softc;
   1890 	struct esh_send_ring_ctl *send = &sc->sc_send;
   1891 	struct mbuf *m = NULL;
   1892 	int error;
   1893 
   1894 	/* Don't transmit if interface is busy or not running */
   1895 
   1896 #ifdef ESH_PRINTF
   1897 	printf("eshstart:  ready to look;  flags %x\n", sc->sc_flags);
   1898 #endif
   1899 
   1900 #define LINK_UP_FLAGS (ESH_FL_LINK_UP | ESH_FL_INITIALIZED | ESH_FL_RUNCODE_UP)
   1901 	if ((sc->sc_flags & LINK_UP_FLAGS) != LINK_UP_FLAGS)
   1902 		return;
   1903 #undef LINK_UP_FLAGS
   1904 
   1905 #ifdef ESH_PRINTF
   1906 	if (esh_check(sc))
   1907 		return;
   1908 #endif
   1909 
   1910 	/* If we have sent the current packet, get another */
   1911 
   1912 	while ((sc->sc_flags & ESH_FL_SNAP_RING_UP) != 0 &&
   1913 	       (m = send->ec_cur_mbuf) == NULL && send->ec_cur_buf == NULL &&
   1914 		send->ec_cur_dmainfo == NULL) {
   1915 		IFQ_DEQUEUE(&ifp->if_snd, m);
   1916 		if (m == 0)		/* not really needed */
   1917 			break;
   1918 
   1919 		if (ifp->if_bpf) {
   1920 			/*
   1921 			 * On output, the raw packet has a eight-byte CCI
   1922 			 * field prepended.  On input, there is no such field.
   1923 			 * The bpf expects the packet to look the same in both
   1924 			 * places, so we temporarily lop off the prepended CCI
   1925 			 * field here, then replace it.  Ugh.
   1926 			 *
   1927 			 * XXX:  Need to use standard mbuf manipulation
   1928 			 *       functions, first mbuf may be less than
   1929 			 *       8 bytes long.
   1930 			 */
   1931 
   1932 			m->m_len -= 8;
   1933 			m->m_data += 8;
   1934 			m->m_pkthdr.len -= 8;
   1935 			bpf_mtap(ifp, m);
   1936 			m->m_len += 8;
   1937 			m->m_data -= 8;
   1938 			m->m_pkthdr.len += 8;
   1939 		}
   1940 
   1941 		send->ec_len = m->m_pkthdr.len;
   1942 		m = send->ec_cur_mbuf = esh_adjust_mbufs(sc, m);
   1943 		if (m == NULL)
   1944 			continue;
   1945 
   1946 		error = bus_dmamap_load_mbuf(sc->sc_dmat, send->ec_dma,
   1947 					     m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   1948 		if (error)
   1949 			panic("%s:  eshstart:  "
   1950 			      "bus_dmamap_load_mbuf failed err %d\n",
   1951 			      device_xname(sc->sc_dev), error);
   1952 		send->ec_offset = 0;
   1953 	}
   1954 
   1955 	/*
   1956 	 * If there are no network packets to send, see if there
   1957 	 * are any FP packets to send.
   1958 	 *
   1959 	 * XXX:  Some users may disagree with these priorities;
   1960 	 *       this reduces network latency by increasing FP latency...
   1961 	 *	 Note that it also means that FP packets can get
   1962 	 *	 locked out so that they *never* get sent, if the
   1963 	 *	 network constantly fills up the pipe.  Not good!
   1964 	 */
   1965 
   1966 	if ((sc->sc_flags & ESH_FL_FP_RING_UP) != 0 &&
   1967 	    send->ec_cur_mbuf == NULL && send->ec_cur_buf == NULL &&
   1968 	    send->ec_cur_dmainfo == NULL &&
   1969 	    bufq_peek(send->ec_buf_queue) != NULL) {
   1970 		struct buf *bp;
   1971 
   1972 #ifdef ESH_PRINTF
   1973 		printf("eshstart:  getting a buf from send->ec_queue %p\n",
   1974 		       send->ec_queue);
   1975 #endif
   1976 
   1977 		bp = send->ec_cur_buf = bufq_get(send->ec_buf_queue);
   1978 		send->ec_offset = 0;
   1979 		send->ec_len = bp->b_bcount;
   1980 
   1981 		/*
   1982 		 * Determine the DMA mapping for the buffer.
   1983 		 * If this is too large, what do we do!?
   1984 		 */
   1985 
   1986 		error = bus_dmamap_load(sc->sc_dmat, send->ec_dma,
   1987 					bp->b_data, bp->b_bcount,
   1988 					bp->b_proc,
   1989 					BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   1990 
   1991 		if (error)
   1992 			panic("%s:  eshstart:  "
   1993 			      "bus_dmamap_load failed err %d\n",
   1994 			      device_xname(sc->sc_dev), error);
   1995 	}
   1996 
   1997 	/*
   1998 	 * If there are no packets from strategy to send, see if there
   1999 	 * are any FP packets to send from fpwrite.
   2000 	 */
   2001 
   2002 	if ((sc->sc_flags & ESH_FL_FP_RING_UP) != 0 &&
   2003 	    send->ec_cur_mbuf == NULL && send->ec_cur_buf == NULL &&
   2004 	    send->ec_cur_dmainfo == NULL) {
   2005 		struct esh_dmainfo *di;
   2006 
   2007 		di = TAILQ_FIRST(&send->ec_di_queue);
   2008 		if (di == NULL)
   2009 			return;
   2010 		TAILQ_REMOVE(&send->ec_di_queue, di, ed_list);
   2011 
   2012 #ifdef ESH_PRINTF
   2013 		printf("eshstart:  getting a di from send->ec_di_queue %p\n",
   2014 		       &send->ec_di_queue);
   2015 #endif
   2016 
   2017 		send->ec_cur_dmainfo = di;
   2018 		send->ec_offset = 0;
   2019 		send->ec_len = di->ed_dma->dm_mapsize;
   2020 	}
   2021 
   2022 	if (send->ec_cur_mbuf == NULL && send->ec_cur_buf == NULL &&
   2023 	    send->ec_cur_dmainfo == NULL)
   2024 		return;
   2025 
   2026 	assert(send->ec_len);
   2027 	assert(send->ec_dma->dm_nsegs ||
   2028 	       send->ec_cur_dmainfo->ed_dma->dm_nsegs);
   2029 	assert(send->ec_cur_mbuf || send->ec_cur_buf || send->ec_cur_dmainfo);
   2030 
   2031 	esh_send(sc);
   2032 	return;
   2033 }
   2034 
   2035 
   2036 /*
   2037  * Put the buffers from the send dmamap into the descriptors and
   2038  * send 'em off...
   2039  */
   2040 
   2041 static void
   2042 esh_send(struct esh_softc *sc)
   2043 {
   2044 	struct esh_send_ring_ctl *send = &sc->sc_send;
   2045 	u_int start_producer = send->ec_producer;
   2046 	bus_dmamap_t dma;
   2047 
   2048 	if (send->ec_cur_dmainfo != NULL)
   2049 		dma = send->ec_cur_dmainfo->ed_dma;
   2050 	else
   2051 		dma = send->ec_dma;
   2052 
   2053 #ifdef ESH_PRINTF
   2054 	printf("esh_send:  producer %x  consumer %x  nsegs %d\n",
   2055 	       send->ec_producer, send->ec_consumer, dma->dm_nsegs);
   2056 #endif
   2057 
   2058 	esh_dma_sync(sc, send->ec_descr, send->ec_producer, send->ec_consumer,
   2059 		     RR_SEND_RING_SIZE, sizeof(struct rr_descr), 1,
   2060 		     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   2061 
   2062 	while (NEXT_SEND(send->ec_producer) != send->ec_consumer &&
   2063 	       send->ec_offset < dma->dm_nsegs) {
   2064 		int offset = send->ec_producer;
   2065 
   2066 		send->ec_descr[offset].rd_buffer_addr =
   2067 			dma->dm_segs[send->ec_offset].ds_addr;
   2068 		send->ec_descr[offset].rd_length =
   2069 			dma->dm_segs[send->ec_offset].ds_len;
   2070 		send->ec_descr[offset].rd_control = 0;
   2071 
   2072 		if (send->ec_offset == 0) {
   2073 			/* Start of the dmamap... */
   2074 			send->ec_descr[offset].rd_control |=
   2075 				RR_CT_PACKET_START;
   2076 		}
   2077 
   2078 		if (send->ec_offset + 1 == dma->dm_nsegs) {
   2079 			send->ec_descr[offset].rd_control |= RR_CT_PACKET_END;
   2080 		}
   2081 
   2082 		send->ec_offset++;
   2083 		send->ec_producer = NEXT_SEND(send->ec_producer);
   2084 	}
   2085 
   2086 	/*
   2087 	 * XXX:   we could optimize the dmamap_sync to just get what we've
   2088 	 *        just set up, rather than the whole buffer...
   2089 	 */
   2090 
   2091 	bus_dmamap_sync(sc->sc_dmat, dma, 0, dma->dm_mapsize,
   2092 			BUS_DMASYNC_PREWRITE);
   2093 	esh_dma_sync(sc, send->ec_descr,
   2094 		     start_producer, send->ec_consumer,
   2095 		     RR_SEND_RING_SIZE, sizeof(struct rr_descr), 1,
   2096 		     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   2097 
   2098 #ifdef ESH_PRINTF
   2099 	if (send->ec_offset != dma->dm_nsegs)
   2100 		printf("eshstart:  couldn't fit packet in send ring!\n");
   2101 #endif
   2102 
   2103 	if (sc->sc_version == 1) {
   2104 		esh_send_cmd(sc, RR_CC_SET_SEND_PRODUCER,
   2105 			     0, send->ec_producer);
   2106 	} else {
   2107 		bus_space_write_4(sc->sc_iot, sc->sc_ioh,
   2108 				  RR_SEND_PRODUCER, send->ec_producer);
   2109 	}
   2110 	return;
   2111 }
   2112 
   2113 
   2114 /*
   2115  * Cleanup for the send routine.  When the NIC sends us an event to
   2116  * let us know that it has consumed our buffers, we need to free the
   2117  * buffers, and possibly send another packet.
   2118  */
   2119 
   2120 static void
   2121 eshstart_cleanup(struct esh_softc *sc, u_int16_t consumer, int error)
   2122 {
   2123 	struct esh_send_ring_ctl *send = &sc->sc_send;
   2124 	int start_consumer = send->ec_consumer;
   2125 	bus_dmamap_t dma;
   2126 
   2127 	if (send->ec_cur_dmainfo != NULL)
   2128 		dma = send->ec_cur_dmainfo->ed_dma;
   2129 	else
   2130 		dma = send->ec_dma;
   2131 
   2132 #ifdef ESH_PRINTF
   2133 	printf("eshstart_cleanup:  consumer %x, send->consumer %x\n",
   2134 	       consumer, send->ec_consumer);
   2135 #endif
   2136 
   2137 	esh_dma_sync(sc, send->ec_descr,
   2138 		     send->ec_consumer, consumer,
   2139 		     RR_SEND_RING_SIZE, sizeof(struct rr_descr), 0,
   2140 		     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   2141 
   2142 	while (send->ec_consumer != consumer) {
   2143 		assert(dma->dm_nsegs);
   2144 		assert(send->ec_cur_mbuf || send->ec_cur_buf ||
   2145 		       send->ec_cur_dmainfo);
   2146 
   2147 		if (send->ec_descr[send->ec_consumer].rd_control &
   2148 		    RR_CT_PACKET_END) {
   2149 #ifdef ESH_PRINT
   2150 			printf("eshstart_cleanup:  dmamap_sync mapsize %d\n",
   2151 			       send->ec_dma->dm_mapsize);
   2152 #endif
   2153 			bus_dmamap_sync(sc->sc_dmat, dma, 0, dma->dm_mapsize,
   2154 					BUS_DMASYNC_POSTWRITE);
   2155 			bus_dmamap_unload(sc->sc_dmat, dma);
   2156 			if (send->ec_cur_mbuf) {
   2157 				m_freem(send->ec_cur_mbuf);
   2158 				send->ec_cur_mbuf = NULL;
   2159 			} else if (send->ec_cur_dmainfo) {
   2160 				send->ec_cur_dmainfo->ed_flags &= ~ESH_DI_BUSY;
   2161 				send->ec_cur_dmainfo->ed_error =
   2162 					(send->ec_error ? send->ec_error : error);
   2163 				send->ec_error = 0;
   2164 				wakeup((void *) send->ec_cur_dmainfo);
   2165 				send->ec_cur_dmainfo = NULL;
   2166 			} else if (send->ec_cur_buf) {
   2167 				biodone(send->ec_cur_buf);
   2168 				send->ec_cur_buf = NULL;
   2169 			} else {
   2170 				panic("%s:  eshstart_cleanup:  "
   2171 				      "no current mbuf, buf, or dmainfo!\n",
   2172 				      device_xname(sc->sc_dev));
   2173 			}
   2174 
   2175 			/*
   2176 			 * Version 1 of the firmware sent an event each
   2177 			 * time it sent out a packet.  Later versions do not
   2178 			 * (which results in a considerable speedup), so we
   2179 			 * have to keep track here.
   2180 			 */
   2181 
   2182 			if (sc->sc_version != 1)
   2183 				sc->sc_if.if_opackets++;
   2184 		}
   2185 		if (error != 0)
   2186 			send->ec_error = error;
   2187 
   2188 		send->ec_consumer = NEXT_SEND(send->ec_consumer);
   2189 	}
   2190 
   2191 	esh_dma_sync(sc, send->ec_descr,
   2192 		     start_consumer, consumer,
   2193 		     RR_SEND_RING_SIZE, sizeof(struct rr_descr), 0,
   2194 		     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   2195 
   2196 	eshstart(&sc->sc_if);
   2197 }
   2198 
   2199 
   2200 /*
   2201  * XXX:  Ouch:  The NIC can only send word-aligned buffers, and only
   2202  *       the last buffer in the packet can have a length that is not
   2203  *       a multiple of four!
   2204  *
   2205  * Here we traverse the packet, pick out the bogus mbufs, and fix 'em
   2206  * if possible.  The fix is amazingly expensive, so we sure hope that
   2207  * this is a rare occurance (it seems to be).
   2208  */
   2209 
   2210 static struct mbuf *
   2211 esh_adjust_mbufs(struct esh_softc *sc, struct mbuf *m)
   2212 {
   2213 	struct mbuf *m0, *n, *n0;
   2214 	u_int32_t write_len;
   2215 
   2216 	write_len = m->m_pkthdr.len;
   2217 #ifdef DIAGNOSTIC
   2218 	if (write_len > max_write_len)
   2219 		max_write_len = write_len;
   2220 #endif
   2221 
   2222 	for (n0 = n = m; n; n = n->m_next) {
   2223 		while (n && n->m_len == 0) {
   2224 			MFREE(n, m0);
   2225 			if (n == m)
   2226 				n = n0 = m = m0;
   2227 			else
   2228 				n = n0->m_next = m0;
   2229 		}
   2230 		if (n == NULL)
   2231 			break;
   2232 
   2233 		if (mtod(n, long) & 3 || (n->m_next && n->m_len & 3)) {
   2234 			/* Gotta clean it up */
   2235 			struct mbuf *o;
   2236 			u_int32_t len;
   2237 
   2238 			sc->sc_misaligned_bufs++;
   2239 			MGETHDR(o, M_DONTWAIT, MT_DATA);
   2240 			if (!o)
   2241 				goto bogosity;
   2242 
   2243 			MCLGET(o, M_DONTWAIT);
   2244 			if (!(o->m_flags & M_EXT)) {
   2245 				MFREE(o, m0);
   2246 				goto bogosity;
   2247 			}
   2248 
   2249 			/*
   2250 			 * XXX: Copy as much as we can into the
   2251 			 *      cluster.  For now we can't have more
   2252 			 *      than a cluster in there.  May change.
   2253 			 *      I'd prefer not to get this
   2254 			 *      down-n-dirty, but we have to be able
   2255 			 *      to do this kind of funky copy.
   2256 			 */
   2257 
   2258 			len = min(MCLBYTES, write_len);
   2259 #ifdef DIAGNOSTIC
   2260 			assert(n->m_len <= len);
   2261 			assert(len <= MCLBYTES);
   2262 #endif
   2263 
   2264 			m_copydata(n, 0, len, mtod(o, void *));
   2265 			o->m_pkthdr.len = len;
   2266 			m_adj(n, len);
   2267 			o->m_len = len;
   2268 			o->m_next = n;
   2269 
   2270 			if (n == m)
   2271 				m = o;
   2272 			else
   2273 				n0->m_next = o;
   2274 			n = o;
   2275 		}
   2276 		n0 = n;
   2277 		write_len -= n->m_len;
   2278 	}
   2279 	return m;
   2280 
   2281 bogosity:
   2282 	aprint_error_dev(sc->sc_dev, "esh_adjust_mbuf:  unable to allocate cluster for "
   2283 	       "mbuf %p, len %x\n",
   2284 	       mtod(m, void *), m->m_len);
   2285 	m_freem(m);
   2286 	return NULL;
   2287 }
   2288 
   2289 
   2290 /*
   2291  * Read in the current valid entries from the ring and forward
   2292  * them to the upper layer protocols.  It is possible that we
   2293  * haven't received the whole packet yet, in which case we just
   2294  * add each of the buffers into the packet until we have the whole
   2295  * thing.
   2296  */
   2297 
   2298 static void
   2299 esh_read_snap_ring(struct esh_softc *sc, u_int16_t consumer, int error)
   2300 {
   2301 	struct ifnet *ifp = &sc->sc_if;
   2302 	struct esh_snap_ring_ctl *recv = &sc->sc_snap_recv;
   2303 	int start_consumer = recv->ec_consumer;
   2304 	u_int16_t control;
   2305 
   2306 	if ((sc->sc_flags & ESH_FL_SNAP_RING_UP) == 0)
   2307 		return;
   2308 
   2309 	if (error)
   2310 		recv->ec_error = error;
   2311 
   2312 	esh_dma_sync(sc, recv->ec_descr,
   2313 		     start_consumer, consumer,
   2314 		     RR_SNAP_RECV_RING_SIZE,
   2315 		     sizeof(struct rr_descr), 0,
   2316 		     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   2317 
   2318 	while (recv->ec_consumer != consumer) {
   2319 		u_int16_t offset = recv->ec_consumer;
   2320 		struct mbuf *m;
   2321 
   2322 		m = recv->ec_m[offset];
   2323 		m->m_len = recv->ec_descr[offset].rd_length;
   2324 		control = recv->ec_descr[offset].rd_control;
   2325 		bus_dmamap_sync(sc->sc_dmat, recv->ec_dma[offset], 0, m->m_len,
   2326 				BUS_DMASYNC_POSTREAD);
   2327 		bus_dmamap_unload(sc->sc_dmat, recv->ec_dma[offset]);
   2328 
   2329 #ifdef ESH_PRINTF
   2330 		printf("esh_read_snap_ring: offset %x addr %p len %x flags %x\n",
   2331 		       offset, mtod(m, void *), m->m_len, control);
   2332 #endif
   2333 		if (control & RR_CT_PACKET_START || !recv->ec_cur_mbuf) {
   2334 			if (recv->ec_cur_pkt) {
   2335 				m_freem(recv->ec_cur_pkt);
   2336 				recv->ec_cur_pkt = NULL;
   2337 				printf("%s:  possible skipped packet!\n",
   2338 				       device_xname(sc->sc_dev));
   2339 			}
   2340 			recv->ec_cur_pkt = recv->ec_cur_mbuf = m;
   2341 			/* allocated buffers all have pkthdrs... */
   2342 			m->m_pkthdr.rcvif = ifp;
   2343 			m->m_pkthdr.len = m->m_len;
   2344 		} else {
   2345 			if (!recv->ec_cur_pkt)
   2346 				panic("esh_read_snap_ring:  no cur_pkt");
   2347 
   2348 			recv->ec_cur_mbuf->m_next = m;
   2349 			recv->ec_cur_mbuf = m;
   2350 			recv->ec_cur_pkt->m_pkthdr.len += m->m_len;
   2351 		}
   2352 
   2353 		recv->ec_m[offset] = NULL;
   2354 		recv->ec_descr[offset].rd_length = 0;
   2355 		recv->ec_descr[offset].rd_buffer_addr = 0;
   2356 
   2357 		/* Note that we can START and END on the same buffer */
   2358 
   2359 		if (control & RR_CT_PACKET_END) { /* XXX: RR2_ matches */
   2360 			m = recv->ec_cur_pkt;
   2361 			if (!error && !recv->ec_error) {
   2362 				/*
   2363 				 * We have a complete packet, send it up
   2364 				 * the stack...
   2365 				 */
   2366 				ifp->if_ipackets++;
   2367 
   2368 				/*
   2369 				 * Check if there's a BPF listener on this
   2370 				 * interface.  If so, hand off the raw packet
   2371 				 * to BPF.
   2372 				 */
   2373 				bpf_mtap(ifp, m);
   2374 				if ((ifp->if_flags & IFF_RUNNING) == 0) {
   2375 					m_freem(m);
   2376 				} else {
   2377 					m = m_pullup(m,
   2378 					    sizeof(struct hippi_header));
   2379 					(*ifp->if_input)(ifp, m);
   2380 				}
   2381 			} else {
   2382 				ifp->if_ierrors++;
   2383 				recv->ec_error = 0;
   2384 				m_freem(m);
   2385 			}
   2386 			recv->ec_cur_pkt = recv->ec_cur_mbuf = NULL;
   2387 		}
   2388 
   2389 		recv->ec_descr[offset].rd_control = 0;
   2390 		recv->ec_consumer = NEXT_RECV(recv->ec_consumer);
   2391 	}
   2392 
   2393 	esh_dma_sync(sc, recv->ec_descr,
   2394 		     start_consumer, consumer,
   2395 		     RR_SNAP_RECV_RING_SIZE,
   2396 		     sizeof(struct rr_descr), 0,
   2397 		     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   2398 
   2399 	esh_fill_snap_ring(sc);
   2400 }
   2401 
   2402 
   2403 /*
   2404  * Add the SNAP (IEEE 802) receive ring to the NIC.  It is possible
   2405  * that we are doing this after resetting the card, in which case
   2406  * the structures have already been filled in and we may need to
   2407  * resume sending data.
   2408  */
   2409 
   2410 static void
   2411 esh_init_snap_ring(struct esh_softc *sc)
   2412 {
   2413 	struct rr_ring_ctl *ring = sc->sc_recv_ring_table + HIPPI_ULP_802;
   2414 
   2415 	if ((sc->sc_flags & ESH_FL_CLOSING_SNAP) != 0) {
   2416 		aprint_error_dev(sc->sc_dev, "can't reopen SNAP ring until ring disable is completed\n");
   2417 		return;
   2418 	}
   2419 
   2420 	if (ring->rr_entry_size == 0) {
   2421 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma,
   2422 				(char *) ring - (char *) sc->sc_dma_addr,
   2423 				sizeof(*ring),
   2424 				BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   2425 
   2426 		ring->rr_ring_addr = sc->sc_snap_recv_ring_dma;
   2427 		ring->rr_free_bufs = RR_SNAP_RECV_RING_SIZE / 4;
   2428 		ring->rr_entries = RR_SNAP_RECV_RING_SIZE;
   2429 		ring->rr_entry_size = sizeof(struct rr_descr);
   2430 		ring->rr_prod_index = 0;
   2431 		sc->sc_snap_recv.ec_producer = 0;
   2432 		sc->sc_snap_recv.ec_consumer = 0;
   2433 		ring->rr_mode = RR_RR_IP;
   2434 
   2435 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma,
   2436 				(char *) ring - (char *) sc->sc_dma_addr,
   2437 				sizeof(ring),
   2438 				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   2439 		esh_send_cmd(sc, RR_CC_ENABLE_RING, HIPPI_ULP_802,
   2440 			     sc->sc_snap_recv.ec_producer);
   2441 	} else {
   2442 		printf("%s:  snap receive ring already initialized!\n",
   2443 		       device_xname(sc->sc_dev));
   2444 	}
   2445 }
   2446 
   2447 static void
   2448 esh_close_snap_ring(struct esh_softc *sc)
   2449 {
   2450 #ifdef ESH_PRINTF
   2451 	printf("esh_close_snap_ring:  starting\n");
   2452 #endif
   2453 
   2454 	if ((sc->sc_flags & ESH_FL_SNAP_RING_UP) == 0)
   2455 		return;
   2456 
   2457 	sc->sc_flags |= ESH_FL_CLOSING_SNAP;
   2458 	esh_send_cmd(sc, RR_CC_DISABLE_RING, HIPPI_ULP_802, 0);
   2459 
   2460 	/* Disable event will trigger the rest of the cleanup. */
   2461 }
   2462 
   2463 /*
   2464  * Fill in the snap ring with more mbuf buffers so that we can
   2465  * receive traffic.
   2466  */
   2467 
   2468 static void
   2469 esh_fill_snap_ring(struct esh_softc *sc)
   2470 {
   2471 	struct esh_snap_ring_ctl *recv = &sc->sc_snap_recv;
   2472 	int start_producer = recv->ec_producer;
   2473 	int error;
   2474 
   2475 	esh_dma_sync(sc, recv->ec_descr,
   2476 		     recv->ec_producer, recv->ec_consumer,
   2477 		     RR_SNAP_RECV_RING_SIZE,
   2478 		     sizeof(struct rr_descr), 1,
   2479 		     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   2480 
   2481 	while (NEXT_RECV(recv->ec_producer) != recv->ec_consumer) {
   2482 		int offset = recv->ec_producer;
   2483 		struct mbuf *m;
   2484 
   2485 		MGETHDR(m, M_DONTWAIT, MT_DATA);
   2486 		if (!m)
   2487 			break;
   2488 		MCLGET(m, M_DONTWAIT);
   2489 		if ((m->m_flags & M_EXT) == 0) {
   2490 			m_free(m);
   2491 			break;
   2492 		}
   2493 
   2494 		error = bus_dmamap_load(sc->sc_dmat, recv->ec_dma[offset],
   2495 					mtod(m, void *), MCLBYTES,
   2496 					NULL, BUS_DMA_READ|BUS_DMA_NOWAIT);
   2497 		if (error) {
   2498 			printf("%s:  esh_fill_recv_ring:  bus_dmamap_load "
   2499 			       "failed\toffset %x, error code %d\n",
   2500 			       device_xname(sc->sc_dev), offset, error);
   2501 			m_free(m);
   2502 			break;
   2503 		}
   2504 
   2505 		/*
   2506 		 * In this implementation, we should only see one segment
   2507 		 * per DMA.
   2508 		 */
   2509 
   2510 		assert(recv->ec_dma[offset]->dm_nsegs == 1);
   2511 
   2512 		/*
   2513 		 * Load into the descriptors.
   2514 		 */
   2515 
   2516 		recv->ec_descr[offset].rd_ring =
   2517 			(sc->sc_version == 1) ? HIPPI_ULP_802 : 0;
   2518 		recv->ec_descr[offset].rd_buffer_addr =
   2519 			recv->ec_dma[offset]->dm_segs->ds_addr;
   2520 		recv->ec_descr[offset].rd_length =
   2521 			recv->ec_dma[offset]->dm_segs->ds_len;
   2522 		recv->ec_descr[offset].rd_control = 0;
   2523 
   2524 		bus_dmamap_sync(sc->sc_dmat, recv->ec_dma[offset], 0, MCLBYTES,
   2525 				BUS_DMASYNC_PREREAD);
   2526 
   2527 		recv->ec_m[offset] = m;
   2528 
   2529 		recv->ec_producer = NEXT_RECV(recv->ec_producer);
   2530 	}
   2531 
   2532 	esh_dma_sync(sc, recv->ec_descr,
   2533 		     start_producer, recv->ec_consumer,
   2534 		     RR_SNAP_RECV_RING_SIZE,
   2535 		     sizeof(struct rr_descr), 1,
   2536 		     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   2537 
   2538 	if (sc->sc_version == 1)
   2539 		esh_send_cmd(sc, RR_CC_SET_RECV_PRODUCER, HIPPI_ULP_802,
   2540 			     recv->ec_producer);
   2541 	else
   2542 		bus_space_write_4(sc->sc_iot, sc->sc_ioh,
   2543 				  RR_SNAP_RECV_PRODUCER, recv->ec_producer);
   2544 }
   2545 
   2546 static void
   2547 esh_init_fp_rings(struct esh_softc *sc)
   2548 {
   2549 	struct esh_fp_ring_ctl *recv;
   2550 	struct rr_ring_ctl *ring_ctl;
   2551 	int ulp;
   2552 
   2553 	for (ulp = 0; ulp < RR_ULP_COUNT; ulp++) {
   2554 		ring_ctl = &sc->sc_recv_ring_table[ulp];
   2555 		recv = sc->sc_fp_recv[ulp];
   2556 
   2557 		if (recv == NULL)
   2558 			continue;
   2559 
   2560 		ring_ctl->rr_ring_addr = recv->ec_dma->dm_segs->ds_addr;
   2561 		ring_ctl->rr_free_bufs = RR_FP_RECV_RING_SIZE / 4;
   2562 		ring_ctl->rr_entries = RR_FP_RECV_RING_SIZE;
   2563 		ring_ctl->rr_entry_size = sizeof(struct rr_descr);
   2564 		ring_ctl->rr_prod_index = 0;
   2565 		ring_ctl->rr_mode = RR_RR_CHARACTER;
   2566 		recv->ec_producer = 0;
   2567 		recv->ec_consumer = 0;
   2568 		recv->ec_index = -1;
   2569 
   2570 		esh_send_cmd(sc, RR_CC_ENABLE_RING, ulp, recv->ec_producer);
   2571 	}
   2572 }
   2573 
   2574 static void
   2575 esh_read_fp_ring(struct esh_softc *sc, u_int16_t consumer, int error, int ulp)
   2576 {
   2577 	struct esh_fp_ring_ctl *recv = sc->sc_fp_recv[ulp];
   2578 	int start_consumer = recv->ec_consumer;
   2579 	u_int16_t control;
   2580 
   2581 #ifdef ESH_PRINTF
   2582 	printf("esh_read_fp_ring:  ulp %d, consumer %d, producer %d, old consumer %d\n",
   2583 	       recv->ec_ulp, consumer, recv->ec_producer, recv->ec_consumer);
   2584 #endif
   2585 	if ((sc->sc_flags & ESH_FL_FP_RING_UP) == 0)
   2586 		return;
   2587 
   2588 	if (error != 0)
   2589 		recv->ec_error = error;
   2590 
   2591 	esh_dma_sync(sc, recv->ec_descr,
   2592 		     start_consumer, consumer,
   2593 		     RR_FP_RECV_RING_SIZE,
   2594 		     sizeof(struct rr_descr), 0,
   2595 		     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   2596 
   2597 	while (recv->ec_consumer != consumer) {
   2598 		u_int16_t offset = recv->ec_consumer;
   2599 
   2600 		control = recv->ec_descr[offset].rd_control;
   2601 
   2602 		if (control & RR_CT_PACKET_START) {
   2603 			if (recv->ec_read_len) {
   2604 				recv->ec_error = 0;
   2605 				printf("%s:  ulp %d: possible skipped FP packet!\n",
   2606 				       device_xname(sc->sc_dev), recv->ec_ulp);
   2607 			}
   2608 			recv->ec_seen_end = 0;
   2609 			recv->ec_read_len = 0;
   2610 		}
   2611 		if (recv->ec_seen_end == 0)
   2612 			recv->ec_read_len += recv->ec_descr[offset].rd_length;
   2613 
   2614 #if NOT_LAME
   2615 		recv->ec_descr[offset].rd_length = 0;
   2616 		recv->ec_descr[offset].rd_buffer_addr = 0;
   2617 #endif
   2618 
   2619 #ifdef ESH_PRINTF
   2620 		printf("esh_read_fp_ring: offset %d addr %d len %d flags %x, total %d\n",
   2621 		       offset, recv->ec_descr[offset].rd_buffer_addr,
   2622 		       recv->ec_descr[offset].rd_length, control, recv->ec_read_len);
   2623 #endif
   2624 		/* Note that we can START and END on the same buffer */
   2625 
   2626 		if ((control & RR_CT_PACKET_END) == RR_CT_PACKET_END) {
   2627 			if (recv->ec_dmainfo[offset] != NULL) {
   2628 				struct esh_dmainfo *di =
   2629 				    recv->ec_dmainfo[offset];
   2630 
   2631 				recv->ec_dmainfo[offset] = NULL;
   2632 				bus_dmamap_sync(sc->sc_dmat, di->ed_dma,
   2633 						0, recv->ec_read_len,
   2634 						BUS_DMASYNC_POSTREAD);
   2635 				bus_dmamap_unload(sc->sc_dmat, di->ed_dma);
   2636 
   2637 				if (!error && !recv->ec_error) {
   2638 				/*
   2639 				 * XXX:  we oughta do this right, with full
   2640 				 *  BPF support and the rest...
   2641 				 */
   2642 					if (di->ed_buf != NULL) {
   2643 						di->ed_buf->b_resid =
   2644 							di->ed_buf->b_bcount -
   2645 							recv->ec_read_len;
   2646 					} else {
   2647 						di->ed_read_len =
   2648 							recv->ec_read_len;
   2649 					}
   2650 				} else {
   2651 					if (di->ed_buf != NULL) {
   2652 						di->ed_buf->b_resid =
   2653 							di->ed_buf->b_bcount;
   2654 						di->ed_buf->b_error = EIO;
   2655 					} else {
   2656 						di->ed_error = EIO;
   2657 						recv->ec_error = 0;
   2658 					}
   2659 				}
   2660 
   2661 #ifdef ESH_PRINTF
   2662 				printf("esh_read_fp_ring:  ulp %d, read %d, resid %ld\n",
   2663 				       recv->ec_ulp, recv->ec_read_len, (di->ed_buf ? di->ed_buf->b_resid : di->ed_read_len));
   2664 #endif
   2665 				di->ed_flags &=
   2666 					~(ESH_DI_BUSY | ESH_DI_READING);
   2667 				if (di->ed_buf != NULL)
   2668 					biodone(di->ed_buf);
   2669 				else
   2670 					wakeup((void *) di);
   2671 				recv->ec_read_len = 0;
   2672 			} else {
   2673 #ifdef ESH_PRINTF
   2674 				printf("esh_read_fp_ring:  ulp %d, seen end at %d\n",
   2675 				       recv->ec_ulp, offset);
   2676 #endif
   2677 				recv->ec_seen_end = 1;
   2678 			}
   2679 		}
   2680 
   2681 #if NOT_LAME
   2682 		recv->ec_descr[offset].rd_control = 0;
   2683 #endif
   2684 		recv->ec_consumer = NEXT_RECV(recv->ec_consumer);
   2685 	}
   2686 
   2687 	esh_dma_sync(sc, recv->ec_descr,
   2688 		     start_consumer, consumer,
   2689 		     RR_SNAP_RECV_RING_SIZE,
   2690 		     sizeof(struct rr_descr), 0,
   2691 		     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   2692 
   2693 	esh_fill_fp_ring(sc, recv);
   2694 }
   2695 
   2696 
   2697 static void
   2698 esh_fill_fp_ring(struct esh_softc *sc, struct esh_fp_ring_ctl *recv)
   2699 {
   2700 	struct esh_dmainfo *di = recv->ec_cur_dmainfo;
   2701 	int start_producer = recv->ec_producer;
   2702 
   2703 #ifdef ESH_PRINTF
   2704         printf("esh_fill_fp_ring:  ulp %d, di %p, producer %d\n",
   2705 		recv->ec_ulp, di, start_producer);
   2706 #endif
   2707 
   2708 	esh_dma_sync(sc, recv->ec_descr,
   2709 		     recv->ec_producer, recv->ec_consumer,
   2710 		     RR_SNAP_RECV_RING_SIZE,
   2711 		     sizeof(struct rr_descr), 1,
   2712 		     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   2713 
   2714 	while (NEXT_RECV(recv->ec_producer) != recv->ec_consumer) {
   2715 		int offset = recv->ec_producer;
   2716 
   2717 		if (di == NULL) {
   2718 			/*
   2719 			 * Must allow only one reader at a time; see
   2720 			 * esh_flush_fp_ring().
   2721 			 */
   2722 
   2723 			if (offset != start_producer)
   2724 				goto fp_fill_done;
   2725 
   2726 			di = TAILQ_FIRST(&recv->ec_queue);
   2727 			if (di == NULL)
   2728 				goto fp_fill_done;
   2729 			TAILQ_REMOVE(&recv->ec_queue, di, ed_list);
   2730 			recv->ec_offset = 0;
   2731 			recv->ec_cur_dmainfo = di;
   2732 			di->ed_flags |= ESH_DI_READING;
   2733 #ifdef ESH_PRINTF
   2734 			printf("\toffset %d nsegs %d\n",
   2735 			       recv->ec_offset, di->ed_dma->dm_nsegs);
   2736 #endif
   2737 		}
   2738 
   2739 		/*
   2740 		 * Load into the descriptors.
   2741 		 */
   2742 
   2743 		recv->ec_descr[offset].rd_ring = 0;
   2744 		recv->ec_descr[offset].rd_buffer_addr =
   2745 			di->ed_dma->dm_segs[recv->ec_offset].ds_addr;
   2746 		recv->ec_descr[offset].rd_length =
   2747 			di->ed_dma->dm_segs[recv->ec_offset].ds_len;
   2748 		recv->ec_descr[offset].rd_control = 0;
   2749 		recv->ec_dmainfo[offset] = NULL;
   2750 
   2751 		if (recv->ec_offset == 0) {
   2752 			/* Start of the dmamap... */
   2753 			recv->ec_descr[offset].rd_control |=
   2754 				RR_CT_PACKET_START;
   2755 		}
   2756 
   2757 		assert(recv->ec_offset < di->ed_dma->dm_nsegs);
   2758 
   2759 		recv->ec_offset++;
   2760 		if (recv->ec_offset == di->ed_dma->dm_nsegs) {
   2761 			recv->ec_descr[offset].rd_control |= RR_CT_PACKET_END;
   2762 			recv->ec_dmainfo[offset] = di;
   2763 			di = NULL;
   2764 			recv->ec_offset = 0;
   2765 			recv->ec_cur_dmainfo = NULL;
   2766 		}
   2767 
   2768 		recv->ec_producer = NEXT_RECV(recv->ec_producer);
   2769 	}
   2770 
   2771 fp_fill_done:
   2772 	esh_dma_sync(sc, recv->ec_descr,
   2773 		     start_producer, recv->ec_consumer,
   2774 		     RR_SNAP_RECV_RING_SIZE,
   2775 		     sizeof(struct rr_descr), 1,
   2776 		     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   2777 
   2778 
   2779 	if (sc->sc_version == 1) {
   2780 		esh_send_cmd(sc, RR_CC_SET_RECV_PRODUCER, recv->ec_ulp,
   2781 			     recv->ec_producer);
   2782 	} else {
   2783 		union {
   2784 			u_int32_t producer;
   2785 			u_int8_t indices[4];
   2786 		} v;
   2787 		int which;
   2788 		int i;
   2789 		struct esh_fp_ring_ctl *r;
   2790 
   2791 		which = (recv->ec_index / 4) * 4;
   2792 #if BAD_PRODUCER
   2793 		v.producer = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
   2794 					      RR_RECVS_PRODUCER + which);
   2795 		NTOHL(v.producer);
   2796 #endif
   2797 		for (i = 0; i < 4; i++) {
   2798 			r = sc->sc_fp_recv_index[i + which];
   2799 			if (r != NULL)
   2800 				v.indices[i] = r->ec_producer;
   2801 			else
   2802 				v.indices[i] = 0;
   2803 		}
   2804 #ifdef ESH_PRINTF
   2805 		printf("esh_fill_fp_ring:  ulp %d, updating producer %d:  %.8x\n",
   2806 			recv->ec_ulp, which, v.producer);
   2807 #endif
   2808 		HTONL(v.producer);
   2809 		bus_space_write_4(sc->sc_iot, sc->sc_ioh,
   2810 				  RR_RECVS_PRODUCER + which, v.producer);
   2811 	}
   2812 #ifdef ESH_PRINTF
   2813 	printf("esh_fill_fp_ring:  ulp %d, final producer %d\n",
   2814 		recv->ec_ulp, recv->ec_producer);
   2815 #endif
   2816 }
   2817 
   2818 /*
   2819  * When a read is interrupted, we need to flush the buffers out of
   2820  * the ring;  otherwise, a driver error could lock a process up,
   2821  * with no way to exit.
   2822  */
   2823 
   2824 static void
   2825 esh_flush_fp_ring(struct esh_softc *sc, struct esh_fp_ring_ctl *recv, struct esh_dmainfo *di)
   2826 {
   2827 	int error = 0;
   2828 
   2829 	/*
   2830 	 * If the read request hasn't yet made it to the top of the queue,
   2831 	 * just remove it from the queue, and return.
   2832 	 */
   2833 
   2834 	if ((di->ed_flags & ESH_DI_READING) != ESH_DI_READING) {
   2835 		TAILQ_REMOVE(&recv->ec_queue, di, ed_list);
   2836 		return;
   2837 	}
   2838 
   2839 #ifdef ESH_PRINTF
   2840 	printf("esh_flush_fp_ring:  di->ed_flags %x, ulp %d, producer %x\n",
   2841 	       di->ed_flags, recv->ec_ulp, recv->ec_producer);
   2842 #endif
   2843 
   2844 	/* Now we gotta get tough.  Issue a discard packet command */
   2845 
   2846 	esh_send_cmd(sc, RR_CC_DISCARD_PKT, recv->ec_ulp,
   2847 		     recv->ec_producer - 1);
   2848 
   2849 	/* Wait for it to finish */
   2850 
   2851 	while ((di->ed_flags & ESH_DI_READING) != ESH_DI_READING &&
   2852 	       error == 0) {
   2853 		error = tsleep((void *) &di->ed_flags, PRIBIO,
   2854 			       "esh_flush_fp_ring", hz);
   2855 		printf("esh_flush_fp_ring:  di->ed_flags %x, error %d\n",
   2856 		       di->ed_flags, error);
   2857 		/*
   2858 		 * What do I do if this times out or gets interrupted?
   2859 		 * Reset the card?  I could get an interrupt before
   2860 		 * giving it a chance to check.  Perhaps I oughta wait
   2861 		 * awhile?  What about not giving the user a chance
   2862 		 * to interrupt, and just expecting a quick answer?
   2863 		 * That way I could reset the card if it doesn't
   2864 		 * come back right away!
   2865 		 */
   2866 		if (error != 0) {
   2867 			eshreset(sc);
   2868 			break;
   2869 		}
   2870 	}
   2871 
   2872 	/* XXX:  Do we need to clear out the dmainfo pointers */
   2873 }
   2874 
   2875 
   2876 int
   2877 eshioctl(struct ifnet *ifp, u_long cmd, void *data)
   2878 {
   2879 	int error = 0;
   2880 	struct esh_softc *sc = ifp->if_softc;
   2881 	struct ifaddr *ifa = (struct ifaddr *)data;
   2882 	struct ifdrv *ifd = (struct ifdrv *) data;
   2883 	u_long len;
   2884 	int s;
   2885 
   2886 	s = splnet();
   2887 
   2888 	while (sc->sc_flags & ESH_FL_EEPROM_BUSY) {
   2889 		error = tsleep(&sc->sc_flags, PCATCH | PRIBIO,
   2890 		    "esheeprom", 0);
   2891 		if (error != 0)
   2892 			goto ioctl_done;
   2893 	}
   2894 
   2895 	switch (cmd) {
   2896 
   2897 	case SIOCINITIFADDR:
   2898 		ifp->if_flags |= IFF_UP;
   2899 		if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0) {
   2900 			eshinit(sc);
   2901 			if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0) {
   2902 				error = EIO;
   2903 				goto ioctl_done;
   2904 			}
   2905 		}
   2906 
   2907 		if ((sc->sc_flags & (ESH_FL_RUNCODE_UP | ESH_FL_SNAP_RING_UP))
   2908 		    == ESH_FL_RUNCODE_UP) {
   2909 			while (sc->sc_flags & ESH_FL_CLOSING_SNAP) {
   2910 				error = tsleep((void *) &sc->sc_snap_recv,
   2911 					       PRIBIO, "esh_closing_fp_ring",
   2912 					       hz);
   2913 				if (error != 0)
   2914 					goto ioctl_done;
   2915 			}
   2916 			esh_init_snap_ring(sc);
   2917 		}
   2918 
   2919 		switch (ifa->ifa_addr->sa_family) {
   2920 #ifdef INET
   2921 		case AF_INET:
   2922 			/* The driver doesn't really care about IP addresses */
   2923 			break;
   2924 #endif
   2925 		default:
   2926 			break;
   2927 		}
   2928 		break;
   2929 
   2930 	case SIOCSIFFLAGS:
   2931 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
   2932 			break;
   2933 		if ((ifp->if_flags & IFF_UP) == 0 &&
   2934 		    (ifp->if_flags & IFF_RUNNING) != 0) {
   2935 			/*
   2936 			 * If interface is marked down and it is running, then
   2937 			 * stop it.
   2938 			 */
   2939 
   2940 			ifp->if_flags &= ~IFF_RUNNING;
   2941 			esh_close_snap_ring(sc);
   2942 			while (sc->sc_flags & ESH_FL_CLOSING_SNAP) {
   2943 				error = tsleep((void *) &sc->sc_snap_recv,
   2944 					       PRIBIO, "esh_closing_fp_ring",
   2945 					       hz);
   2946 				if (error != 0)
   2947 					goto ioctl_done;
   2948 			}
   2949 
   2950 		} else if ((ifp->if_flags & IFF_UP) != 0 &&
   2951 			   (ifp->if_flags & IFF_RUNNING) == 0) {
   2952 
   2953 			if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0) {
   2954 				eshinit(sc);
   2955 				if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0) {
   2956 					error = EIO;
   2957 					goto ioctl_done;
   2958 				}
   2959 			}
   2960 
   2961 			if ((sc->sc_flags & (ESH_FL_RUNCODE_UP | ESH_FL_SNAP_RING_UP)) == ESH_FL_RUNCODE_UP) {
   2962 				while (sc->sc_flags & ESH_FL_CLOSING_SNAP) {
   2963 					error = tsleep((void *) &sc->sc_snap_recv, PRIBIO, "esh_closing_fp_ring", hz);
   2964 					if (error != 0)
   2965 						goto ioctl_done;
   2966 				}
   2967 				esh_init_snap_ring(sc);
   2968 			}
   2969 		}
   2970 		break;
   2971 
   2972 	case SIOCSDRVSPEC: /* Driver-specific configuration calls */
   2973 	        cmd = ifd->ifd_cmd;
   2974 		len = ifd->ifd_len;
   2975 		data = ifd->ifd_data;
   2976 
   2977 		esh_generic_ioctl(sc, cmd, data, len, NULL);
   2978 		break;
   2979 
   2980 	default:
   2981 		error = ether_ioctl(ifp, cmd, data);
   2982 		break;
   2983 	}
   2984 
   2985 ioctl_done:
   2986 	splx(s);
   2987 	return (error);
   2988 }
   2989 
   2990 
   2991 static int
   2992 esh_generic_ioctl(struct esh_softc *sc, u_long cmd, void *data,
   2993 		  u_long len, struct lwp *l)
   2994 {
   2995 	struct ifnet *ifp = &sc->sc_if;
   2996 	struct rr_eeprom rr_eeprom;
   2997 	bus_space_tag_t iot = sc->sc_iot;
   2998 	bus_space_handle_t ioh = sc->sc_ioh;
   2999 	u_int32_t misc_host_ctl;
   3000 	u_int32_t misc_local_ctl;
   3001 	u_int32_t address;
   3002 	u_int32_t value;
   3003 	u_int32_t offset;
   3004 	u_int32_t length;
   3005 	int error = 0;
   3006 	int i;
   3007 
   3008 	/*
   3009 	 * If we have a LWP pointer, check to make sure that the
   3010 	 * user is privileged before performing any destruction operations.
   3011 	 */
   3012 
   3013 	if (l != NULL) {
   3014 		switch (cmd) {
   3015 		case EIOCGTUNE:
   3016 		case EIOCGEEPROM:
   3017 		case EIOCGSTATS:
   3018 			break;
   3019 
   3020 		default:
   3021 			error = kauth_authorize_network(l->l_cred,
   3022 			    KAUTH_NETWORK_INTERFACE,
   3023 			    KAUTH_REQ_NETWORK_INTERFACE_SETPRIV,
   3024 			    ifp, KAUTH_ARG(cmd), NULL);
   3025 			if (error)
   3026 				return (error);
   3027 		}
   3028 	}
   3029 
   3030 	switch (cmd) {
   3031 	case EIOCGTUNE:
   3032 		if (len != sizeof(struct rr_tuning))
   3033 			error = EMSGSIZE;
   3034 		else {
   3035 			error = copyout((void *) &sc->sc_tune, data,
   3036 					sizeof(struct rr_tuning));
   3037 		}
   3038 		break;
   3039 
   3040 	case EIOCSTUNE:
   3041 		if ((ifp->if_flags & IFF_UP) == 0) {
   3042 			if (len != sizeof(struct rr_tuning)) {
   3043 				error = EMSGSIZE;
   3044 			} else {
   3045 				error = copyin(data, (void *) &sc->sc_tune,
   3046 					       sizeof(struct rr_tuning));
   3047 			}
   3048 		} else {
   3049 			error = EBUSY;
   3050 		}
   3051 		break;
   3052 
   3053 	case EIOCGSTATS:
   3054 		if (len != sizeof(struct rr_stats))
   3055 			error = EMSGSIZE;
   3056 		else
   3057 			error = copyout((void *) &sc->sc_gen_info->ri_stats,
   3058 					data, sizeof(struct rr_stats));
   3059 		break;
   3060 
   3061 	case EIOCGEEPROM:
   3062 	case EIOCSEEPROM:
   3063 		if ((ifp->if_flags & IFF_UP) != 0) {
   3064 			error = EBUSY;
   3065 			break;
   3066 		}
   3067 
   3068 		if (len != sizeof(struct rr_eeprom)) {
   3069 			error = EMSGSIZE;
   3070 			break;
   3071 		}
   3072 
   3073 		error = copyin(data, (void *) &rr_eeprom, sizeof(rr_eeprom));
   3074 		if (error != 0)
   3075 			break;
   3076 
   3077 		offset = rr_eeprom.ifr_offset;
   3078 		length = rr_eeprom.ifr_length;
   3079 
   3080 		if (length > RR_EE_MAX_LEN * sizeof(u_int32_t)) {
   3081 			error = EFBIG;
   3082 			break;
   3083 		}
   3084 
   3085 		if (offset + length > RR_EE_MAX_LEN * sizeof(u_int32_t)) {
   3086 			error = EFAULT;
   3087 			break;
   3088 		}
   3089 
   3090 		if (offset % 4 || length % 4) {
   3091 			error = EIO;
   3092 			break;
   3093 		}
   3094 
   3095 		/* Halt the processor (preserve NO_SWAP, if set) */
   3096 
   3097 		misc_host_ctl = bus_space_read_4(iot, ioh, RR_MISC_HOST_CTL);
   3098 		bus_space_write_4(iot, ioh, RR_MISC_HOST_CTL,
   3099 				  (misc_host_ctl & RR_MH_NO_SWAP) |
   3100 				  RR_MH_HALT_PROC);
   3101 
   3102 		/* Make the EEPROM accessible */
   3103 
   3104 		misc_local_ctl = bus_space_read_4(iot, ioh, RR_MISC_LOCAL_CTL);
   3105 		value = misc_local_ctl &
   3106 			~(RR_LC_FAST_PROM | RR_LC_ADD_SRAM | RR_LC_PARITY_ON);
   3107 		if (cmd == EIOCSEEPROM)   /* make writable! */
   3108 			value |= RR_LC_WRITE_PROM;
   3109 		bus_space_write_4(iot, ioh, RR_MISC_LOCAL_CTL, value);
   3110 
   3111 		if (cmd == EIOCSEEPROM) {
   3112 			printf("%s:  writing EEPROM\n", device_xname(sc->sc_dev));
   3113 			sc->sc_flags |= ESH_FL_EEPROM_BUSY;
   3114 		}
   3115 
   3116 		/* Do that EEPROM voodoo that you do so well... */
   3117 
   3118 		address = offset * RR_EE_BYTE_LEN;
   3119 		for (i = 0; i < length; i += 4) {
   3120 			if (cmd == EIOCGEEPROM) {
   3121 				value = esh_read_eeprom(sc, address);
   3122 				address += RR_EE_WORD_LEN;
   3123 				if (copyout(&value,
   3124 					    (char *) rr_eeprom.ifr_buffer + i,
   3125 					    sizeof(u_int32_t)) != 0) {
   3126 					error = EFAULT;
   3127 					break;
   3128 				}
   3129 			} else {
   3130 				if (copyin((char *) rr_eeprom.ifr_buffer + i,
   3131 					   &value, sizeof(u_int32_t)) != 0) {
   3132 					error = EFAULT;
   3133 					break;
   3134 				}
   3135 				if (esh_write_eeprom(sc, address,
   3136 						     value) != 0) {
   3137 					error = EIO;
   3138 					break;
   3139 				}
   3140 
   3141 				/*
   3142 				 * Have to give up control now and
   3143 				 * then, so sleep for a clock tick.
   3144 				 * Might be good to figure out how
   3145 				 * long a tick is, so that we could
   3146 				 * intelligently chose the frequency
   3147 				 * of these pauses.
   3148 				 */
   3149 
   3150 				if (i % 40 == 0) {
   3151 					tsleep(&sc->sc_flags,
   3152 					       PRIBIO, "eshweeprom", 1);
   3153 				}
   3154 
   3155 				address += RR_EE_WORD_LEN;
   3156 			}
   3157 		}
   3158 
   3159 		bus_space_write_4(iot, ioh, RR_MISC_LOCAL_CTL, misc_local_ctl);
   3160 		if (cmd == EIOCSEEPROM) {
   3161 			sc->sc_flags &= ~ESH_FL_EEPROM_BUSY;
   3162 			wakeup(&sc->sc_flags);
   3163 			printf("%s:  done writing EEPROM\n",
   3164 			       device_xname(sc->sc_dev));
   3165 		}
   3166 		break;
   3167 
   3168 	case EIOCRESET:
   3169 		eshreset(sc);
   3170 		break;
   3171 
   3172 	default:
   3173 		error = EINVAL;
   3174 		break;
   3175 	}
   3176 
   3177 	return error;
   3178 }
   3179 
   3180 
   3181 void
   3182 eshreset(struct esh_softc *sc)
   3183 {
   3184 	int s;
   3185 
   3186 	s = splnet();
   3187 	eshstop(sc);
   3188 	eshinit(sc);
   3189 	splx(s);
   3190 }
   3191 
   3192 /*
   3193  * The NIC expects a watchdog command every 10 seconds.  If it doesn't
   3194  * get the watchdog, it figures the host is dead and stops.  When it does
   3195  * get the command, it'll generate a watchdog event to let the host know
   3196  * that it is still alive.  We watch for this.
   3197  */
   3198 
   3199 void
   3200 eshwatchdog(struct ifnet *ifp)
   3201 {
   3202 	struct esh_softc *sc = ifp->if_softc;
   3203 
   3204 	if (!sc->sc_watchdog) {
   3205 		printf("%s:  watchdog timer expired.  "
   3206 		       "Should reset interface!\n",
   3207 		       device_xname(sc->sc_dev));
   3208 		ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   3209 		eshstatus(sc);
   3210 #if 0
   3211  		eshstop(sc);  /*   DON'T DO THIS, it'll clear data we
   3212 				   could use to debug it! */
   3213 #endif
   3214 	} else {
   3215 		sc->sc_watchdog = 0;
   3216 
   3217 		esh_send_cmd(sc, RR_CC_WATCHDOG, 0, 0);
   3218 		ifp->if_timer = 5;
   3219 	}
   3220 }
   3221 
   3222 
   3223 /*
   3224  * Stop the NIC and throw away packets that have started to be sent,
   3225  * but didn't make it all the way.  Re-adjust the various queue
   3226  * pointers to account for this.
   3227  */
   3228 
   3229 void
   3230 eshstop(struct esh_softc *sc)
   3231 {
   3232 	struct ifnet *ifp = &sc->sc_if;
   3233 	bus_space_tag_t iot = sc->sc_iot;
   3234 	bus_space_handle_t ioh = sc->sc_ioh;
   3235 	u_int32_t misc_host_ctl;
   3236 	int i;
   3237 
   3238 	if (!(sc->sc_flags & ESH_FL_INITIALIZED))
   3239 		return;
   3240 
   3241 	/* Just shut it all down.  This isn't pretty, but it works */
   3242 
   3243 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma, 0, sc->sc_dma_size,
   3244 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   3245 
   3246 	misc_host_ctl = bus_space_read_4(iot, ioh, RR_MISC_HOST_CTL);
   3247 	bus_space_write_4(iot, ioh, RR_MISC_HOST_CTL,
   3248 			  (misc_host_ctl & RR_MH_NO_SWAP) | RR_MH_HALT_PROC);
   3249 	sc->sc_flags = 0;
   3250 	ifp->if_timer = 0;  /* turn off watchdog timer */
   3251 
   3252 	while (sc->sc_snap_recv.ec_consumer
   3253                != sc->sc_snap_recv.ec_producer) {
   3254 		u_int16_t offset = sc->sc_snap_recv.ec_consumer;
   3255 
   3256 		bus_dmamap_unload(sc->sc_dmat,
   3257 				  sc->sc_snap_recv.ec_dma[offset]);
   3258 		m_free(sc->sc_snap_recv.ec_m[offset]);
   3259 		sc->sc_snap_recv.ec_m[offset] = NULL;
   3260 		sc->sc_snap_recv.ec_consumer =
   3261 			NEXT_RECV(sc->sc_snap_recv.ec_consumer);
   3262 		wakeup((void *) &sc->sc_snap_recv);
   3263 	}
   3264 
   3265 	/* Handle FP rings */
   3266 
   3267 	for (i = 0; i < RR_ULP_COUNT; i++) {
   3268 		struct esh_fp_ring_ctl *ring = sc->sc_fp_recv[i];
   3269 		struct esh_dmainfo *di = NULL;
   3270 
   3271 		if (ring == NULL)
   3272 			continue;
   3273 
   3274 		/* Get rid of outstanding buffers */
   3275 
   3276 		esh_dma_sync(sc, ring->ec_descr,
   3277 			     ring->ec_consumer, ring->ec_producer,
   3278 			     RR_FP_RECV_RING_SIZE, sizeof(struct rr_descr), 0,
   3279 			     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   3280 
   3281 		while (ring->ec_consumer != ring->ec_producer) {
   3282 			di = ring->ec_dmainfo[ring->ec_consumer];
   3283 			if (di != NULL)
   3284 				break;
   3285 			ring->ec_consumer = NEXT_RECV(ring->ec_consumer);
   3286 		}
   3287 		if (di == NULL)
   3288 			di = ring->ec_cur_dmainfo;
   3289 
   3290 		if (di != NULL) {
   3291 			bus_dmamap_unload(sc->sc_dmat, di->ed_dma);
   3292 			di->ed_error = EIO;
   3293 			di->ed_flags = 0;
   3294 			wakeup((void *) &di->ed_flags);	/* packet discard */
   3295 			wakeup((void *) di);		/* wait on read */
   3296 		}
   3297 		wakeup((void *) &ring->ec_ulp);		/* ring create */
   3298 		wakeup((void *) &ring->ec_index);	/* ring disable */
   3299 	}
   3300 
   3301 	/* XXX:  doesn't clear bufs being sent */
   3302 
   3303 	bus_dmamap_unload(sc->sc_dmat, sc->sc_send.ec_dma);
   3304 	if (sc->sc_send.ec_cur_mbuf) {
   3305 		m_freem(sc->sc_send.ec_cur_mbuf);
   3306 	} else if (sc->sc_send.ec_cur_buf) {
   3307 		struct buf *bp = sc->sc_send.ec_cur_buf;
   3308 
   3309 		bp->b_resid = bp->b_bcount;
   3310 		bp->b_error = EIO;
   3311 		biodone(bp);
   3312 	} else if (sc->sc_send.ec_cur_dmainfo) {
   3313 		struct esh_dmainfo *di = sc->sc_send.ec_cur_dmainfo;
   3314 
   3315 		di->ed_flags &= ~ESH_DI_BUSY;
   3316 		di->ed_error = EIO;
   3317 		wakeup((void *) di);
   3318 	}
   3319 	sc->sc_send.ec_cur_mbuf = NULL;
   3320 	sc->sc_send.ec_cur_buf = NULL;
   3321 	sc->sc_send.ec_cur_dmainfo = NULL;
   3322 
   3323 	/*
   3324 	 * Clear out the index values, since they'll be useless
   3325 	 * when we restart.
   3326 	 */
   3327 
   3328 	memset(sc->sc_fp_recv_index, 0,
   3329 	      sizeof(struct esh_fp_ring_ctl *) * RR_MAX_RECV_RING);
   3330 
   3331 	/* Be sure to wake up any other processes waiting on driver action. */
   3332 
   3333 	wakeup(sc);		/* Wait on initialization */
   3334 	wakeup(&sc->sc_flags);	/* Wait on EEPROM write */
   3335 
   3336 	/*
   3337 	 * XXX:  I have to come up with a way to avoid handling interrupts
   3338 	 *       received before this shuts down the card, but processed
   3339 	 *       afterwards!
   3340 	 */
   3341 }
   3342 
   3343 /*
   3344  * Read a value from the eeprom.  This expects that the NIC has already
   3345  * been tweaked to put it into the right state for reading from the
   3346  * EEPROM -- the HALT bit is set in the MISC_HOST_CTL register,
   3347  * and the FAST_PROM, ADD_SRAM, and PARITY flags have been cleared
   3348  * in the MISC_LOCAL_CTL register.
   3349  *
   3350  * The EEPROM layout is a little weird.  There is a valid byte every
   3351  * eight bytes.  Words are then smeared out over 32 bytes.
   3352  * All addresses listed here are the actual starting addresses.
   3353  */
   3354 
   3355 static u_int32_t
   3356 esh_read_eeprom(struct esh_softc *sc, u_int32_t addr)
   3357 {
   3358 	int i;
   3359 	u_int32_t tmp;
   3360 	u_int32_t value = 0;
   3361 
   3362 	/* If the offset hasn't been added, add it.  Otherwise pass through */
   3363 
   3364 	if (!(addr & RR_EE_OFFSET))
   3365 		addr += RR_EE_OFFSET;
   3366 
   3367 	for (i = 0; i < 4; i++, addr += RR_EE_BYTE_LEN) {
   3368 		bus_space_write_4(sc->sc_iot, sc->sc_ioh,
   3369 				  RR_WINDOW_BASE, addr);
   3370 		tmp = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
   3371 				       RR_WINDOW_DATA);
   3372 		value = (value << 8) | ((tmp >> 24) & 0xff);
   3373 	}
   3374 	return value;
   3375 }
   3376 
   3377 
   3378 /*
   3379  * Write a value to the eeprom.  Just like esh_read_eeprom, this routine
   3380  * expects that the NIC has already been tweaked to put it into the right
   3381  * state for reading from the EEPROM.  Things are further complicated
   3382  * in that we need to read each byte after we write it to ensure that
   3383  * the new value has been successfully written.  It can take as long
   3384  * as 1ms (!) to write a byte.
   3385  */
   3386 
   3387 static int
   3388 esh_write_eeprom(struct esh_softc *sc, u_int32_t addr, u_int32_t value)
   3389 {
   3390 	int i, j;
   3391 	u_int32_t shifted_value, tmp = 0;
   3392 
   3393 	/* If the offset hasn't been added, add it.  Otherwise pass through */
   3394 
   3395 	if (!(addr & RR_EE_OFFSET))
   3396 		addr += RR_EE_OFFSET;
   3397 
   3398 	for (i = 0; i < 4; i++, addr += RR_EE_BYTE_LEN) {
   3399 		bus_space_write_4(sc->sc_iot, sc->sc_ioh,
   3400 				  RR_WINDOW_BASE, addr);
   3401 
   3402 		/*
   3403 		 * Get the byte out of value, starting with the top, and
   3404 		 * put it into the top byte of the word to write.
   3405 		 */
   3406 
   3407 		shifted_value = ((value >> ((3 - i) * 8)) & 0xff) << 24;
   3408 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, RR_WINDOW_DATA,
   3409 				  shifted_value);
   3410 		for (j = 0; j < 50; j++) {
   3411 			tmp = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
   3412 					       RR_WINDOW_DATA);
   3413 			if (tmp == shifted_value)
   3414 				break;
   3415 			delay(500);  /* 50us break * 20 = 1ms */
   3416 		}
   3417 		if (tmp != shifted_value)
   3418 			return -1;
   3419 	}
   3420 
   3421 	return 0;
   3422 }
   3423 
   3424 
   3425 /*
   3426  * Send a command to the NIC.  If there is no room in the command ring,
   3427  * panic.
   3428  */
   3429 
   3430 static void
   3431 esh_send_cmd(struct esh_softc *sc, u_int8_t cmd, u_int8_t ring, u_int8_t index)
   3432 {
   3433 	union rr_cmd c;
   3434 
   3435 #define NEXT_CMD(i) (((i) + 0x10 - 1) & 0x0f)
   3436 
   3437 	c.l = 0;
   3438 	c.b.rc_code = cmd;
   3439 	c.b.rc_ring = ring;
   3440 	c.b.rc_index = index;
   3441 
   3442 	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
   3443 			  RR_COMMAND_RING + sizeof(c) * sc->sc_cmd_producer,
   3444 			  c.l);
   3445 
   3446 #ifdef ESH_PRINTF
   3447 	/* avoid annoying messages when possible */
   3448 	if (cmd != RR_CC_WATCHDOG)
   3449 		printf("esh_send_cmd:  cmd %x ring %d index %d slot %x\n",
   3450 		       cmd, ring, index, sc->sc_cmd_producer);
   3451 #endif
   3452 
   3453 	sc->sc_cmd_producer = NEXT_CMD(sc->sc_cmd_producer);
   3454 }
   3455 
   3456 
   3457 /*
   3458  * Write an address to the device.
   3459  * XXX:  This belongs in bus-dependent land!
   3460  */
   3461 
   3462 static void
   3463 esh_write_addr(bus_space_tag_t iot, bus_space_handle_t ioh, bus_addr_t addr, bus_addr_t value)
   3464 {
   3465 	bus_space_write_4(iot, ioh, addr, 0);
   3466 	bus_space_write_4(iot, ioh, addr + sizeof(u_int32_t), value);
   3467 }
   3468 
   3469 
   3470 /* Copy the RunCode from EEPROM to SRAM.  Ughly. */
   3471 
   3472 static void
   3473 esh_reset_runcode(struct esh_softc *sc)
   3474 {
   3475 	bus_space_tag_t iot = sc->sc_iot;
   3476 	bus_space_handle_t ioh = sc->sc_ioh;
   3477 	u_int32_t value;
   3478 	u_int32_t len;
   3479 	u_int32_t i;
   3480 	u_int32_t segments;
   3481 	u_int32_t ee_addr;
   3482 	u_int32_t rc_addr;
   3483 	u_int32_t sram_addr;
   3484 
   3485 	/* Zero the SRAM */
   3486 
   3487 	for (i = 0; i < sc->sc_sram_size; i += 4) {
   3488 		bus_space_write_4(iot, ioh, RR_WINDOW_BASE, i);
   3489 		bus_space_write_4(iot, ioh, RR_WINDOW_DATA, 0);
   3490 	}
   3491 
   3492 	/* Find the address of the segment description section */
   3493 
   3494 	rc_addr = esh_read_eeprom(sc, RR_EE_RUNCODE_SEGMENTS);
   3495 	segments = esh_read_eeprom(sc, rc_addr);
   3496 
   3497 	for (i = 0; i < segments; i++) {
   3498 		rc_addr += RR_EE_WORD_LEN;
   3499 		sram_addr = esh_read_eeprom(sc, rc_addr);
   3500 		rc_addr += RR_EE_WORD_LEN;
   3501 		len = esh_read_eeprom(sc, rc_addr);
   3502 		rc_addr += RR_EE_WORD_LEN;
   3503 		ee_addr = esh_read_eeprom(sc, rc_addr);
   3504 
   3505 		while (len--) {
   3506 			value = esh_read_eeprom(sc, ee_addr);
   3507 			bus_space_write_4(iot, ioh, RR_WINDOW_BASE, sram_addr);
   3508 			bus_space_write_4(iot, ioh, RR_WINDOW_DATA, value);
   3509 
   3510 			ee_addr += RR_EE_WORD_LEN;
   3511 			sram_addr += 4;
   3512 		}
   3513 	}
   3514 }
   3515 
   3516 
   3517 /*
   3518  * Perform bus DMA syncing operations on various rings.
   3519  * We have to worry about our relative position in the ring,
   3520  * and whether the ring has wrapped.  All of this code should take
   3521  * care of those worries.
   3522  */
   3523 
   3524 static void
   3525 esh_dma_sync(struct esh_softc *sc, void *mem, int start, int end, int entries, int size, int do_equal, int ops)
   3526 {
   3527 	int offset = (char *)mem - (char *)sc->sc_dma_addr;
   3528 
   3529 	if (start < end) {
   3530 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma,
   3531 				offset + start * size,
   3532 				(end - start) * size, ops);
   3533 	} else if (do_equal || start != end) {
   3534 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma,
   3535 				offset,
   3536 				end * size, ops);
   3537 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma,
   3538 				offset + start * size,
   3539 				(entries - start) * size, ops);
   3540 	}
   3541 }
   3542 
   3543 
   3544 static struct esh_dmainfo *
   3545 esh_new_dmainfo(struct esh_softc *sc)
   3546 {
   3547 	struct esh_dmainfo *di;
   3548 	int s;
   3549 
   3550 	s = splnet();
   3551 
   3552 	di = TAILQ_FIRST(&sc->sc_dmainfo_freelist);
   3553 	if (di != NULL) {
   3554 		TAILQ_REMOVE(&sc->sc_dmainfo_freelist, di, ed_list);
   3555 		sc->sc_dmainfo_freelist_count--;
   3556 		splx(s);
   3557 		return di;
   3558 	}
   3559 
   3560 	/* None sitting around, so build one now... */
   3561 
   3562 	di = (struct esh_dmainfo *) malloc(sizeof(*di), M_DEVBUF,
   3563 	    M_WAITOK|M_ZERO);
   3564 	assert(di != NULL);
   3565 
   3566 	if (bus_dmamap_create(sc->sc_dmat, ESH_MAX_NSEGS * RR_DMA_MAX,
   3567 			      ESH_MAX_NSEGS, RR_DMA_MAX, RR_DMA_BOUNDARY,
   3568 			      BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
   3569 			      &di->ed_dma)) {
   3570 		printf("%s:  failed dmainfo bus_dmamap_create\n",
   3571 		       device_xname(sc->sc_dev));
   3572 		free(di,  M_DEVBUF);
   3573 		di = NULL;
   3574 	}
   3575 
   3576 	splx(s);
   3577 	return di;
   3578 }
   3579 
   3580 static void
   3581 esh_free_dmainfo(struct esh_softc *sc, struct esh_dmainfo *di)
   3582 {
   3583 	int s = splnet();
   3584 
   3585 	assert(di != NULL);
   3586 	di->ed_buf = NULL;
   3587 	TAILQ_INSERT_TAIL(&sc->sc_dmainfo_freelist, di, ed_list);
   3588 	sc->sc_dmainfo_freelist_count++;
   3589 #ifdef ESH_PRINTF
   3590 	printf("esh_free_dmainfo:  freelist count %d\n", sc->sc_dmainfo_freelist_count);
   3591 #endif
   3592 
   3593 	splx(s);
   3594 }
   3595 
   3596 
   3597 /* ------------------------- debugging functions --------------------------- */
   3598 
   3599 /*
   3600  * Print out status information about the NIC and the driver.
   3601  */
   3602 
   3603 static int
   3604 eshstatus(struct esh_softc *sc)
   3605 {
   3606 	bus_space_tag_t iot = sc->sc_iot;
   3607 	bus_space_handle_t ioh = sc->sc_ioh;
   3608 	int i;
   3609 
   3610 	/* XXX:   This looks pathetic, and should be improved! */
   3611 
   3612 	printf("%s:  status -- fail1 %x fail2 %x\n",
   3613 	       device_xname(sc->sc_dev),
   3614 	       bus_space_read_4(iot, ioh, RR_RUNCODE_FAIL1),
   3615 	       bus_space_read_4(iot, ioh, RR_RUNCODE_FAIL2));
   3616 	printf("\tmisc host ctl %x  misc local ctl %x\n",
   3617 	       bus_space_read_4(iot, ioh, RR_MISC_HOST_CTL),
   3618 	       bus_space_read_4(iot, ioh, RR_MISC_LOCAL_CTL));
   3619 	printf("\toperating mode %x  event producer %x\n",
   3620 	       bus_space_read_4(iot, ioh, RR_MODE_AND_STATUS),
   3621 	       bus_space_read_4(iot, ioh, RR_EVENT_PRODUCER));
   3622 	printf("\tPC %x  max rings %x\n",
   3623 	       bus_space_read_4(iot, ioh, RR_PROC_PC),
   3624 	       bus_space_read_4(iot, ioh, RR_MAX_RECV_RINGS));
   3625 	printf("\tHIPPI tx state %x  rx state %x\n",
   3626 	       bus_space_read_4(iot, ioh, RR_TX_STATE),
   3627 	       bus_space_read_4(iot, ioh, RR_RX_STATE));
   3628 	printf("\tDMA write state %x  read state %x\n",
   3629 	       bus_space_read_4(iot, ioh, RR_DMA_WRITE_STATE),
   3630 	       bus_space_read_4(iot, ioh, RR_DMA_READ_STATE));
   3631 	printf("\tDMA write addr %x%x  read addr %x%x\n",
   3632 	       bus_space_read_4(iot, ioh, RR_WRITE_HOST),
   3633 	       bus_space_read_4(iot, ioh, RR_WRITE_HOST + 4),
   3634 	       bus_space_read_4(iot, ioh, RR_READ_HOST),
   3635 	       bus_space_read_4(iot, ioh, RR_READ_HOST + 4));
   3636 
   3637 	for (i = 0; i < 64; i++)
   3638 		if (sc->sc_gen_info->ri_stats.rs_stats[i])
   3639 			printf("stat %x is %x\n", i * 4,
   3640 			       sc->sc_gen_info->ri_stats.rs_stats[i]);
   3641 
   3642 	return 0;
   3643 }
   3644 
   3645 
   3646 #ifdef ESH_PRINTF
   3647 
   3648 /* Check to make sure that the NIC is still running */
   3649 
   3650 static int
   3651 esh_check(struct esh_softc *sc)
   3652 {
   3653 	bus_space_tag_t iot = sc->sc_iot;
   3654 	bus_space_handle_t ioh = sc->sc_ioh;
   3655 
   3656 	if (bus_space_read_4(iot, ioh, RR_MISC_HOST_CTL) & RR_MH_HALT_PROC) {
   3657 		printf("esh_check:  NIC stopped\n");
   3658 		eshstatus(sc);
   3659 		return 1;
   3660 	} else {
   3661 		return 0;
   3662 	}
   3663 }
   3664 #endif
   3665 
   3666