Home | History | Annotate | Line # | Download | only in marvell
      1  1.13  riastrad /*	$NetBSD: if_gfevar.h,v 1.13 2015/04/14 20:32:36 riastradh Exp $	*/
      2   1.1      matt 
      3   1.1      matt /*
      4   1.1      matt  * Copyright (c) 2002 Allegro Networks, Inc., Wasabi Systems, Inc.
      5   1.1      matt  * All rights reserved.
      6   1.1      matt  *
      7   1.1      matt  * Redistribution and use in source and binary forms, with or without
      8   1.1      matt  * modification, are permitted provided that the following conditions
      9   1.1      matt  * are met:
     10   1.1      matt  * 1. Redistributions of source code must retain the above copyright
     11   1.1      matt  *    notice, this list of conditions and the following disclaimer.
     12   1.1      matt  * 2. Redistributions in binary form must reproduce the above copyright
     13   1.1      matt  *    notice, this list of conditions and the following disclaimer in the
     14   1.1      matt  *    documentation and/or other materials provided with the distribution.
     15   1.1      matt  * 3. All advertising materials mentioning features or use of this software
     16   1.1      matt  *    must display the following acknowledgement:
     17   1.1      matt  *      This product includes software developed for the NetBSD Project by
     18   1.1      matt  *      Allegro Networks, Inc., and Wasabi Systems, Inc.
     19   1.1      matt  * 4. The name of Allegro Networks, Inc. may not be used to endorse
     20   1.1      matt  *    or promote products derived from this software without specific prior
     21   1.1      matt  *    written permission.
     22   1.1      matt  * 5. The name of Wasabi Systems, Inc. may not be used to endorse
     23   1.1      matt  *    or promote products derived from this software without specific prior
     24   1.1      matt  *    written permission.
     25   1.1      matt  *
     26   1.1      matt  * THIS SOFTWARE IS PROVIDED BY ALLEGRO NETWORKS, INC. AND
     27   1.1      matt  * WASABI SYSTEMS, INC. ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
     28   1.1      matt  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
     29   1.1      matt  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     30   1.1      matt  * IN NO EVENT SHALL EITHER ALLEGRO NETWORKS, INC. OR WASABI SYSTEMS, INC.
     31   1.1      matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32   1.1      matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33   1.1      matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34   1.1      matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35   1.1      matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36   1.1      matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37   1.1      matt  * POSSIBILITY OF SUCH DAMAGE.
     38   1.1      matt  */
     39  1.10  kiyohara #ifndef _IF_GFEVAR_H_
     40  1.10  kiyohara #define _IF_GFEVAR_H_
     41   1.1      matt 
     42  1.13  riastrad #include <sys/rndsource.h>
     43  1.13  riastrad 
     44   1.4   thorpej #define	GE_RXDESC_MEMSIZE		(1 * PAGE_SIZE)
     45   1.1      matt #define	GE_RXDESC_MAX			64
     46   1.1      matt #define	GE_RXBUF_SIZE			2048
     47   1.1      matt #define	GE_RXBUF_MEMSIZE		(GE_RXDESC_MAX*GE_RXBUF_SIZE)
     48   1.4   thorpej #define	GE_RXBUF_NSEGS			((GE_RXBUF_MEMSIZE/PAGE_SIZE)+1)
     49   1.1      matt #define	GE_DMSEG_MAX			(GE_RXBUF_NSEGS)
     50   1.1      matt 
     51   1.1      matt struct gfe_dmamem {
     52   1.1      matt 	bus_dmamap_t gdm_map;		/* dmamem'ed memory */
     53   1.8  christos 	void *gdm_kva;		/* kva of tx memory */
     54   1.1      matt 	int gdm_nsegs;			/* # of segment in gdm_segs */
     55   1.1      matt 	int gdm_maxsegs;		/* maximum # of segments allowed */
     56   1.1      matt 	size_t gdm_size;		/* size of memory region */
     57   1.1      matt 	bus_dma_segment_t gdm_segs[GE_DMSEG_MAX]; /* dma segment of tx memory */
     58   1.1      matt };
     59   1.1      matt 
     60   1.1      matt /* With a 4096 page size, we get 256 descriptors per page.
     61   1.1      matt  */
     62   1.5      matt #define	GE_TXDESC_MEMSIZE		(1 * PAGE_SIZE)
     63   1.5      matt #define	GE_TXDESC_MAX			(GE_TXDESC_MEMSIZE / 16)
     64   1.4   thorpej #define	GE_TXBUF_SIZE			(4 * PAGE_SIZE)
     65   1.1      matt 
     66   1.1      matt struct gfe_txqueue {
     67   1.1      matt 	struct ifqueue txq_pendq;	/* these are ready to go to the GT */
     68   1.1      matt 	struct gfe_dmamem txq_desc_mem;	/* transmit descriptor memory */
     69   1.1      matt 	struct gfe_dmamem txq_buf_mem;	/* transmit buffer memory */
     70   1.1      matt 	unsigned int txq_lo;		/* next to be given to GT */
     71   1.1      matt 	unsigned int txq_fi; 		/* next to be returned to CPU */
     72   1.1      matt 	unsigned int txq_ei_gapcount;	/* counter until next EI */
     73   1.1      matt 	unsigned int txq_nactive;	/* number of active descriptors */
     74   1.1      matt 	unsigned int txq_outptr;	/* where to put next transmit packet */
     75   1.1      matt 	unsigned int txq_inptr;		/* start of 1st queued tx packet */
     76   1.1      matt 	uint32_t txq_intrbits;		/* bits to write to EIMR */
     77   1.1      matt 	uint32_t txq_esdcmrbits;	/* bits to write to ESDCMR */
     78   1.1      matt 	uint32_t txq_epsrbits;		/* bits to test with EPSR */
     79   1.1      matt 	volatile struct gt_eth_desc *txq_descs; /* ptr to tx descriptors */
     80   1.1      matt 	bus_addr_t txq_ectdp;		/* offset to cur. tx desc ptr reg */
     81   1.1      matt 	bus_addr_t txq_desc_busaddr;	/* bus addr of tx descriptors */
     82   1.1      matt 	bus_addr_t txq_buf_busaddr;	/* bus addr of tx buffers */
     83   1.1      matt };
     84   1.1      matt 
     85   1.1      matt /* With a 4096 page size, we get 256 descriptors per page.  We want 1024
     86   1.1      matt  * which will give us about 8ms of 64 byte packets (2ms for each priority
     87   1.1      matt  * queue).
     88   1.1      matt  */
     89   1.1      matt 
     90   1.1      matt struct gfe_rxbuf {
     91   1.9        he 	uint8_t	rxb_data[GE_RXBUF_SIZE];
     92   1.1      matt };
     93   1.1      matt 
     94   1.1      matt struct gfe_rxqueue {
     95   1.1      matt 	struct gfe_dmamem rxq_desc_mem;	/* receive descriptor memory */
     96   1.1      matt 	struct gfe_dmamem rxq_buf_mem;	/* receive buffer memory */
     97   1.1      matt 	struct mbuf *rxq_curpkt;	/* mbuf for current packet */
     98   1.1      matt 	volatile struct gt_eth_desc *rxq_descs;
     99   1.1      matt 	struct gfe_rxbuf *rxq_bufs;
    100   1.1      matt 	unsigned int rxq_fi; 		/* next to be returned to CPU */
    101   1.1      matt 	unsigned int rxq_active;	/* # of descriptors given to GT */
    102   1.1      matt 	uint32_t rxq_intrbits;		/* bits to write to EIMR */
    103   1.1      matt 	bus_addr_t rxq_desc_busaddr;	/* bus addr of rx descriptors */
    104   1.1      matt 	uint32_t rxq_cmdsts;		/* save cmdsts from first descriptor */
    105   1.1      matt 	bus_size_t rxq_efrdp;
    106   1.1      matt 	bus_size_t rxq_ecrdp;
    107   1.1      matt };
    108   1.1      matt 
    109   1.1      matt enum gfe_txprio {
    110   1.1      matt 	GE_TXPRIO_HI=1,
    111   1.1      matt 	GE_TXPRIO_LO=0,
    112   1.1      matt 	GE_TXPRIO_NONE=2
    113   1.1      matt };
    114   1.1      matt enum gfe_rxprio {
    115   1.1      matt 	GE_RXPRIO_HI=3,
    116   1.1      matt 	GE_RXPRIO_MEDHI=2,
    117   1.1      matt 	GE_RXPRIO_MEDLO=1,
    118   1.1      matt 	GE_RXPRIO_LO=0
    119   1.1      matt };
    120   1.1      matt 
    121  1.10  kiyohara struct gfec_softc {
    122  1.10  kiyohara 	device_t sc_dev;		/* must be first */
    123  1.10  kiyohara 
    124  1.10  kiyohara 	bus_space_tag_t sc_iot;
    125  1.10  kiyohara 	bus_space_handle_t sc_ioh;	/* subregion for ethernet */
    126  1.10  kiyohara 
    127  1.10  kiyohara 	kmutex_t sc_mtx;
    128  1.10  kiyohara };
    129  1.10  kiyohara 
    130   1.1      matt struct gfe_softc {
    131  1.10  kiyohara 	device_t sc_dev;		/* must be first */
    132   1.1      matt 	struct ethercom sc_ec;		/* common ethernet glue */
    133   1.1      matt 	struct callout sc_co;		/* resource recovery */
    134   1.1      matt 	mii_data_t sc_mii;		/* mii interface */
    135   1.1      matt 
    136  1.10  kiyohara 	bus_space_tag_t sc_memt;
    137   1.3      matt 	bus_space_handle_t sc_memh;	/* subregion for ethernet */
    138   1.1      matt 	bus_dma_tag_t sc_dmat;
    139   1.1      matt 	int sc_macno;			/* which mac? 0, 1, or 2 */
    140   1.1      matt 
    141   1.1      matt 	unsigned int sc_tickflags;
    142   1.1      matt #define	GE_TICK_TX_IFSTART	0x0001
    143   1.1      matt #define	GE_TICK_RX_RESTART	0x0002
    144   1.1      matt 	unsigned int sc_flags;
    145   1.1      matt #define	GE_ALLMULTI	0x0001
    146   1.1      matt #define	GE_PHYSTSCHG	0x0002
    147   1.1      matt #define	GE_RXACTIVE	0x0004
    148   1.5      matt #define	GE_NOFREE	0x0008		/* Don't free on disable */
    149   1.1      matt 	uint32_t sc_pcr;		/* current EPCR value */
    150   1.1      matt 	uint32_t sc_pcxr;		/* current EPCXR value */
    151   1.1      matt 	uint32_t sc_intrmask;		/* current EIMR value */
    152   1.1      matt 	uint32_t sc_idlemask;		/* suspended EIMR bits */
    153   1.1      matt 	size_t sc_max_frame_length;	/* maximum frame length */
    154   1.1      matt 
    155   1.1      matt 	/*
    156   1.1      matt 	 * Hash table related members
    157   1.1      matt 	 */
    158   1.1      matt 	struct gfe_dmamem sc_hash_mem;	/* dma'ble hash table */
    159   1.1      matt 	uint64_t *sc_hashtable;
    160   1.1      matt 	unsigned int sc_hashmask;	/* 0x1ff or 0x1fff */
    161   1.1      matt 
    162   1.1      matt 	/*
    163   1.1      matt 	 * Transmit related members
    164   1.1      matt 	 */
    165   1.5      matt 	struct gfe_txqueue sc_txq[2];	/* High & Low transmit queues */
    166   1.1      matt 
    167   1.1      matt 	/*
    168   1.1      matt 	 * Receive related members
    169   1.1      matt 	 */
    170   1.5      matt 	struct gfe_rxqueue sc_rxq[4];	/* Hi/MedHi/MedLo/Lo receive queues */
    171  1.10  kiyohara 
    172  1.11       tls 	krndsource_t sc_rnd_source;
    173   1.1      matt };
    174  1.10  kiyohara #endif	/* _IF_GFEVAR_H_ */
    175