i82596.c revision 1.1.4.5 1 1.1.4.5 skrll /* $NetBSD: i82596.c,v 1.1.4.5 2004/09/21 13:27:57 skrll Exp $ */
2 1.1.4.2 skrll
3 1.1.4.2 skrll /*
4 1.1.4.2 skrll * Copyright (c) 2003 Jochen Kunz.
5 1.1.4.2 skrll * All rights reserved.
6 1.1.4.2 skrll *
7 1.1.4.2 skrll * Redistribution and use in source and binary forms, with or without
8 1.1.4.2 skrll * modification, are permitted provided that the following conditions
9 1.1.4.2 skrll * are met:
10 1.1.4.2 skrll * 1. Redistributions of source code must retain the above copyright
11 1.1.4.2 skrll * notice, this list of conditions and the following disclaimer.
12 1.1.4.2 skrll * 2. Redistributions in binary form must reproduce the above copyright
13 1.1.4.2 skrll * notice, this list of conditions and the following disclaimer in the
14 1.1.4.2 skrll * documentation and/or other materials provided with the distribution.
15 1.1.4.2 skrll * 3. The name of Jochen Kunz may not be used to endorse or promote
16 1.1.4.2 skrll * products derived from this software without specific prior
17 1.1.4.2 skrll * written permission.
18 1.1.4.2 skrll *
19 1.1.4.2 skrll * THIS SOFTWARE IS PROVIDED BY JOCHEN KUNZ
20 1.1.4.2 skrll * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1.4.2 skrll * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1.4.2 skrll * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JOCHEN KUNZ
23 1.1.4.2 skrll * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1.4.2 skrll * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1.4.2 skrll * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1.4.2 skrll * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1.4.2 skrll * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1.4.2 skrll * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1.4.2 skrll * POSSIBILITY OF SUCH DAMAGE.
30 1.1.4.2 skrll */
31 1.1.4.2 skrll
32 1.1.4.2 skrll /*
33 1.1.4.2 skrll * Driver for the Intel i82596 10MBit/s Ethernet chip.
34 1.1.4.2 skrll * It operates the i82596 in 32-Bit Linear Mode, opposed to the old i82586
35 1.1.4.2 skrll * ie(4) driver (src/sys/dev/ic/i82586.c), that degrades the i82596 to
36 1.1.4.2 skrll * i82586 compatibility mode.
37 1.1.4.2 skrll * Documentation about this chip can be found on http://www.openpa.net/
38 1.1.4.2 skrll * file names 29021806.pdf and 29021906.pdf
39 1.1.4.2 skrll */
40 1.1.4.2 skrll
41 1.1.4.2 skrll #include <sys/cdefs.h>
42 1.1.4.5 skrll __KERNEL_RCSID(0, "$NetBSD: i82596.c,v 1.1.4.5 2004/09/21 13:27:57 skrll Exp $");
43 1.1.4.2 skrll
44 1.1.4.2 skrll /* autoconfig and device stuff */
45 1.1.4.2 skrll #include <sys/param.h>
46 1.1.4.2 skrll #include <sys/device.h>
47 1.1.4.2 skrll #include <sys/conf.h>
48 1.1.4.2 skrll #include <machine/iomod.h>
49 1.1.4.2 skrll #include <machine/autoconf.h>
50 1.1.4.2 skrll #include "locators.h"
51 1.1.4.2 skrll #include "ioconf.h"
52 1.1.4.2 skrll
53 1.1.4.2 skrll /* bus_space / bus_dma etc. */
54 1.1.4.2 skrll #include <machine/bus.h>
55 1.1.4.2 skrll #include <machine/intr.h>
56 1.1.4.2 skrll
57 1.1.4.2 skrll /* general system data and functions */
58 1.1.4.2 skrll #include <sys/systm.h>
59 1.1.4.2 skrll #include <sys/ioctl.h>
60 1.1.4.2 skrll #include <sys/ioccom.h>
61 1.1.4.2 skrll #include <sys/types.h>
62 1.1.4.2 skrll
63 1.1.4.2 skrll /* tsleep / sleep / wakeup */
64 1.1.4.2 skrll #include <sys/proc.h>
65 1.1.4.2 skrll /* hz for above */
66 1.1.4.2 skrll #include <sys/kernel.h>
67 1.1.4.2 skrll
68 1.1.4.2 skrll /* network stuff */
69 1.1.4.2 skrll #include <net/if.h>
70 1.1.4.2 skrll #include <net/if_dl.h>
71 1.1.4.2 skrll #include <net/if_media.h>
72 1.1.4.2 skrll #include <net/if_ether.h>
73 1.1.4.2 skrll #include <sys/socket.h>
74 1.1.4.2 skrll #include <sys/mbuf.h>
75 1.1.4.2 skrll
76 1.1.4.2 skrll #include "bpfilter.h"
77 1.1.4.2 skrll #if NBPFILTER > 0
78 1.1.4.2 skrll #include <net/bpf.h>
79 1.1.4.2 skrll #endif
80 1.1.4.2 skrll
81 1.1.4.2 skrll #include <dev/ic/i82596reg.h>
82 1.1.4.2 skrll #include <dev/ic/i82596var.h>
83 1.1.4.2 skrll
84 1.1.4.2 skrll
85 1.1.4.2 skrll
86 1.1.4.2 skrll /* Supported chip variants */
87 1.1.4.2 skrll char *i82596_typenames[] = { "unknowen", "DX/SX", "CA" };
88 1.1.4.2 skrll
89 1.1.4.2 skrll
90 1.1.4.2 skrll
91 1.1.4.2 skrll /* media change and status callback */
92 1.1.4.2 skrll static int iee_mediachange(struct ifnet *);
93 1.1.4.2 skrll static void iee_mediastatus(struct ifnet *, struct ifmediareq *);
94 1.1.4.2 skrll
95 1.1.4.2 skrll /* interface routines to upper protocols */
96 1.1.4.2 skrll static void iee_start(struct ifnet *); /* initiate output */
97 1.1.4.2 skrll static int iee_ioctl(struct ifnet *, u_long, caddr_t); /* ioctl routine */
98 1.1.4.2 skrll static int iee_init(struct ifnet *); /* init routine */
99 1.1.4.2 skrll static void iee_stop(struct ifnet *, int); /* stop routine */
100 1.1.4.2 skrll static void iee_watchdog(struct ifnet *); /* timer routine */
101 1.1.4.2 skrll static void iee_drain(struct ifnet *); /* release resources */
102 1.1.4.2 skrll
103 1.1.4.2 skrll /* internal helper functions */
104 1.1.4.2 skrll static void iee_cb_setup(struct iee_softc *, u_int32_t);
105 1.1.4.2 skrll
106 1.1.4.2 skrll /*
107 1.1.4.2 skrll Things a MD frontend has to provide:
108 1.1.4.2 skrll
109 1.1.4.2 skrll The functions via function pointers in the softc:
110 1.1.4.2 skrll int (*sc_iee_cmd)(struct iee_softc *sc, u_int32_t cmd);
111 1.1.4.2 skrll int (*sc_iee_reset)(struct iee_softc *sc);
112 1.1.4.2 skrll void (*sc_mediastatus)(struct ifnet *, struct ifmediareq *);
113 1.1.4.2 skrll int (*sc_mediachange)(struct ifnet *);
114 1.1.4.2 skrll
115 1.1.4.2 skrll sc_iee_cmd(): send a command to the i82596 by writing the cmd parameter
116 1.1.4.2 skrll to the SCP cmd word and issuing a Channel Attention.
117 1.1.4.2 skrll sc_iee_reset(): initiate a reset, supply the address of the SCP to the
118 1.1.4.2 skrll chip, wait for the chip to initialize and ACK interrupts that
119 1.1.4.2 skrll this may have caused by caling (sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
120 1.1.4.2 skrll This functions must carefully bus_dmamap_sync() all data they have touched!
121 1.1.4.2 skrll
122 1.1.4.2 skrll sc_mediastatus() and sc_mediachange() are just MD hooks to the according
123 1.1.4.2 skrll MI functions. The MD frontend may set this pointers to NULL when they
124 1.1.4.2 skrll are not needed.
125 1.1.4.2 skrll
126 1.1.4.2 skrll sc->sc_type has to be set to I82596_UNKNOWN or I82596_DX or I82596_CA.
127 1.1.4.2 skrll This is for printing out the correct chip type at attach time only. The
128 1.1.4.2 skrll MI backend doesn't distinguish different chip types when programming
129 1.1.4.2 skrll the chip.
130 1.1.4.2 skrll
131 1.1.4.2 skrll sc->sc_flags has to be set to 0 on litle endian hardware and to
132 1.1.4.2 skrll IEE_NEED_SWAP on big endian hardware, when endianes conversion is not
133 1.1.4.2 skrll done by the bus attachment. Usually you need to set IEE_NEED_SWAP
134 1.1.4.2 skrll when IEE_SYSBUS_BE is set in the sysbus byte.
135 1.1.4.2 skrll
136 1.1.4.2 skrll sc->sc_cl_align bust be set to 1 or to the cache line size. When set to
137 1.1.4.2 skrll 1 no special alignment of DMA descriptors is done. If sc->sc_cl_align != 1
138 1.1.4.2 skrll it forces alignment of the data structres in the shared memory to a multiple
139 1.1.4.2 skrll of sc->sc_cl_align. This is needed on archs like hp700 that have non DMA
140 1.1.4.2 skrll I/O coherent caches and are unable to map the shared memory uncachable.
141 1.1.4.2 skrll (At least pre PA7100LC CPUs are unable to map memory uncachable.)
142 1.1.4.2 skrll
143 1.1.4.2 skrll sc->sc_cl_align MUST BE INITIALIZED BEFORE THE FOLOWING MACROS ARE USED:
144 1.1.4.2 skrll SC_* IEE_*_SZ IEE_*_OFF IEE_SHMEM_MAX (shell style glob(3) pattern)
145 1.1.4.2 skrll
146 1.1.4.2 skrll The MD frontend has to allocate a piece of DMA memory at least of
147 1.1.4.2 skrll IEE_SHMEM_MAX bytes size. All communication with the chip is done via
148 1.1.4.2 skrll this shared memory. If possible map this memory non-cachable on
149 1.1.4.2 skrll archs with non DMA I/O coherent caches. The base of the memory needs
150 1.1.4.2 skrll to be aligend to an even address if sc->sc_cl_align == 1 and aligend
151 1.1.4.2 skrll to a cache line if sc->sc_cl_align != 1.
152 1.1.4.2 skrll
153 1.1.4.2 skrll An interrupt with iee_intr() as handler must be established.
154 1.1.4.2 skrll
155 1.1.4.2 skrll Call void iee_attach(struct iee_softc *sc, u_int8_t *ether_address,
156 1.1.4.2 skrll int *media, int nmedia, int defmedia); when everything is set up. First
157 1.1.4.2 skrll parameter is a pointer to the MI softc, ether_address is an array that
158 1.1.4.2 skrll contains the ethernet address. media is an array of the media types
159 1.1.4.2 skrll provided by the hardware. The members of this array are supplied to
160 1.1.4.2 skrll ifmedia_add() in sequence. nmedia is the count of elements in media.
161 1.1.4.2 skrll defmedia is the default media that is set via ifmedia_set().
162 1.1.4.2 skrll nmedia and defmedia are ignored when media == NULL.
163 1.1.4.2 skrll
164 1.1.4.2 skrll The MD backend may call iee_detach() to detach the device.
165 1.1.4.2 skrll
166 1.1.4.2 skrll See sys/arch/hp700/gsc/if_iee.c for an example.
167 1.1.4.2 skrll */
168 1.1.4.2 skrll
169 1.1.4.2 skrll
170 1.1.4.2 skrll /*
171 1.1.4.2 skrll How frame reception is done:
172 1.1.4.2 skrll Each Recieve Frame Descriptor has one associated Recieve Buffer Descriptor.
173 1.1.4.2 skrll Each RBD points to the data area of a mbuf cluster. The RFDs are linked
174 1.1.4.2 skrll together in a circular list. sc->sc_rx_done is the count of RFDs in the
175 1.1.4.2 skrll list already processed / the number of the RFD that has to be checked for
176 1.1.4.2 skrll a new frame first at the next RX interrupt. Upon successful reception of
177 1.1.4.2 skrll a frame the mbuf cluster is handled to upper protocol layers, a new mbuf
178 1.1.4.2 skrll cluster is allocated and the RFD / RBD are reinitialized accordingly.
179 1.1.4.2 skrll
180 1.1.4.2 skrll When a RFD list overrun occured the whole RFD and RBD lists are reinitialized
181 1.1.4.2 skrll and frame reception is started again.
182 1.1.4.2 skrll */
183 1.1.4.2 skrll int
184 1.1.4.2 skrll iee_intr(void *intarg)
185 1.1.4.2 skrll {
186 1.1.4.2 skrll struct iee_softc *sc = intarg;
187 1.1.4.2 skrll struct ifnet *ifp = &sc->sc_ethercom.ec_if;
188 1.1.4.2 skrll struct iee_rfd *rfd;
189 1.1.4.2 skrll struct iee_rbd *rbd;
190 1.1.4.2 skrll bus_dmamap_t rx_map;
191 1.1.4.2 skrll struct mbuf *rx_mbuf;
192 1.1.4.2 skrll struct mbuf *new_mbuf;
193 1.1.4.2 skrll int scb_status;
194 1.1.4.2 skrll int scb_cmd;
195 1.1.4.2 skrll int n;
196 1.1.4.2 skrll
197 1.1.4.2 skrll if ((ifp->if_flags & IFF_RUNNING) == 0) {
198 1.1.4.2 skrll (sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
199 1.1.4.2 skrll return(1);
200 1.1.4.2 skrll }
201 1.1.4.2 skrll bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, 0, IEE_SHMEM_MAX,
202 1.1.4.2 skrll BUS_DMASYNC_POSTREAD);
203 1.1.4.2 skrll scb_status = SC_SCB->scb_status;
204 1.1.4.2 skrll scb_cmd = SC_SCB->scb_cmd;
205 1.1.4.2 skrll rfd = SC_RFD(sc->sc_rx_done);
206 1.1.4.3 skrll while ((rfd->rfd_status & IEE_RFD_C) != 0) {
207 1.1.4.2 skrll /* At least one packet was received. */
208 1.1.4.2 skrll rbd = SC_RBD(sc->sc_rx_done);
209 1.1.4.2 skrll rx_map = sc->sc_rx_map[sc->sc_rx_done];
210 1.1.4.2 skrll rx_mbuf = sc->sc_rx_mbuf[sc->sc_rx_done];
211 1.1.4.2 skrll SC_RBD((sc->sc_rx_done + IEE_NRFD - 1) % IEE_NRFD)->rbd_size
212 1.1.4.2 skrll &= ~IEE_RBD_EL;
213 1.1.4.2 skrll if ((rfd->rfd_status & IEE_RFD_OK) == 0
214 1.1.4.2 skrll || (rbd->rbd_count & IEE_RBD_EOF) == 0
215 1.1.4.2 skrll || (rbd->rbd_count & IEE_RBD_F) == 0){
216 1.1.4.2 skrll /* Receive error, skip frame and reuse buffer. */
217 1.1.4.2 skrll rfd->rfd_status = 0;
218 1.1.4.2 skrll rbd->rbd_count = 0;
219 1.1.4.2 skrll rbd->rbd_size = IEE_RBD_EL | rx_map->dm_segs[0].ds_len;
220 1.1.4.2 skrll printf("%s: iee_intr: receive error %d, rfd_status="
221 1.1.4.2 skrll "0x%.4x, rfd_count=0x%.4x\n", sc->sc_dev.dv_xname,
222 1.1.4.2 skrll ++sc->sc_rx_err, rfd->rfd_status, rbd->rbd_count);
223 1.1.4.2 skrll sc->sc_rx_done = (sc->sc_rx_done + 1) % IEE_NRFD;
224 1.1.4.2 skrll continue;
225 1.1.4.2 skrll }
226 1.1.4.2 skrll rfd->rfd_status = 0;
227 1.1.4.2 skrll bus_dmamap_sync(sc->sc_dmat, rx_map, 0, rx_mbuf->m_ext.ext_size,
228 1.1.4.2 skrll BUS_DMASYNC_POSTREAD);
229 1.1.4.2 skrll rx_mbuf->m_pkthdr.len = rx_mbuf->m_len =
230 1.1.4.2 skrll rbd->rbd_count & IEE_RBD_COUNT;
231 1.1.4.2 skrll rx_mbuf->m_pkthdr.rcvif = ifp;
232 1.1.4.2 skrll MGETHDR(new_mbuf, M_DONTWAIT, MT_DATA);
233 1.1.4.2 skrll if (new_mbuf == NULL) {
234 1.1.4.2 skrll printf("%s: iee_intr: can't allocate mbuf\n",
235 1.1.4.2 skrll sc->sc_dev.dv_xname);
236 1.1.4.2 skrll break;
237 1.1.4.2 skrll }
238 1.1.4.2 skrll MCLAIM(new_mbuf, &sc->sc_ethercom.ec_rx_mowner);
239 1.1.4.2 skrll MCLGET(new_mbuf, M_DONTWAIT);
240 1.1.4.2 skrll if ((new_mbuf->m_flags & M_EXT) == 0) {
241 1.1.4.2 skrll printf("%s: iee_intr: can't alloc mbuf cluster\n",
242 1.1.4.2 skrll sc->sc_dev.dv_xname);
243 1.1.4.2 skrll m_freem(new_mbuf);
244 1.1.4.2 skrll break;
245 1.1.4.2 skrll }
246 1.1.4.2 skrll bus_dmamap_unload(sc->sc_dmat, rx_map);
247 1.1.4.2 skrll if (bus_dmamap_load(sc->sc_dmat, rx_map,
248 1.1.4.2 skrll new_mbuf->m_ext.ext_buf, new_mbuf->m_ext.ext_size,
249 1.1.4.2 skrll NULL, BUS_DMA_READ | BUS_DMA_NOWAIT) != 0)
250 1.1.4.2 skrll panic("%s: iee_intr: can't load RX DMA map\n",
251 1.1.4.2 skrll sc->sc_dev.dv_xname);
252 1.1.4.2 skrll bus_dmamap_sync(sc->sc_dmat, rx_map, 0,
253 1.1.4.2 skrll new_mbuf->m_ext.ext_size, BUS_DMASYNC_PREREAD);
254 1.1.4.2 skrll #if NBPFILTER > 0
255 1.1.4.2 skrll if (ifp->if_bpf != 0)
256 1.1.4.2 skrll bpf_mtap(ifp->if_bpf, rx_mbuf);
257 1.1.4.2 skrll #endif /* NBPFILTER > 0 */
258 1.1.4.2 skrll (*ifp->if_input)(ifp, rx_mbuf);
259 1.1.4.2 skrll ifp->if_ipackets++;
260 1.1.4.2 skrll sc->sc_rx_mbuf[sc->sc_rx_done] = new_mbuf;
261 1.1.4.2 skrll rbd->rbd_count = 0;
262 1.1.4.2 skrll rbd->rbd_size = IEE_RBD_EL | rx_map->dm_segs[0].ds_len;
263 1.1.4.2 skrll rbd->rbd_rb_addr = rx_map->dm_segs[0].ds_addr;
264 1.1.4.2 skrll sc->sc_rx_done = (sc->sc_rx_done + 1) % IEE_NRFD;
265 1.1.4.2 skrll rfd = SC_RFD(sc->sc_rx_done);
266 1.1.4.2 skrll }
267 1.1.4.2 skrll if ((scb_status & IEE_SCB_RUS) == IEE_SCB_RUS_NR1
268 1.1.4.2 skrll || (scb_status & IEE_SCB_RUS) == IEE_SCB_RUS_NR2
269 1.1.4.2 skrll || (scb_status & IEE_SCB_RUS) == IEE_SCB_RUS_NR3) {
270 1.1.4.2 skrll /* Receive Overrun, reinit receive ring buffer. */
271 1.1.4.2 skrll for (n = 0 ; n < IEE_NRFD ; n++) {
272 1.1.4.2 skrll SC_RFD(n)->rfd_cmd = IEE_RFD_SF;
273 1.1.4.2 skrll SC_RFD(n)->rfd_link_addr = IEE_PHYS_SHMEM(IEE_RFD_OFF
274 1.1.4.2 skrll + IEE_RFD_SZ * ((n + 1) % IEE_NRFD));
275 1.1.4.2 skrll SC_RBD(n)->rbd_next_rbd = IEE_PHYS_SHMEM(IEE_RBD_OFF
276 1.1.4.2 skrll + IEE_RBD_SZ * ((n + 1) % IEE_NRFD));
277 1.1.4.2 skrll SC_RBD(n)->rbd_size = IEE_RBD_EL |
278 1.1.4.2 skrll sc->sc_rx_map[n]->dm_segs[0].ds_len;
279 1.1.4.2 skrll SC_RBD(n)->rbd_rb_addr =
280 1.1.4.2 skrll sc->sc_rx_map[n]->dm_segs[0].ds_addr;
281 1.1.4.2 skrll }
282 1.1.4.2 skrll SC_RFD(0)->rfd_rbd_addr = IEE_PHYS_SHMEM(IEE_RBD_OFF);
283 1.1.4.2 skrll sc->sc_rx_done = 0;
284 1.1.4.2 skrll bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, IEE_RFD_OFF,
285 1.1.4.2 skrll IEE_RFD_LIST_SZ + IEE_RBD_LIST_SZ, BUS_DMASYNC_PREWRITE);
286 1.1.4.2 skrll (sc->sc_iee_cmd)(sc, IEE_SCB_RUC_ST);
287 1.1.4.2 skrll printf("%s: iee_intr: receive ring buffer overrun\n",
288 1.1.4.2 skrll sc->sc_dev.dv_xname);
289 1.1.4.3 skrll }
290 1.1.4.2 skrll
291 1.1.4.3 skrll if (sc->sc_next_cb != 0
292 1.1.4.3 skrll && (SC_CB(sc->sc_next_cb - 1)->cb_status & IEE_CB_C) != 0) {
293 1.1.4.2 skrll /* CMD list finished */
294 1.1.4.2 skrll ifp->if_timer = 0;
295 1.1.4.2 skrll if (sc->sc_next_tbd != 0) {
296 1.1.4.2 skrll /* A TX CMD list finished, clenup */
297 1.1.4.2 skrll for (n = 0 ; n < sc->sc_next_cb ; n++) {
298 1.1.4.2 skrll m_freem(sc->sc_tx_mbuf[n]);
299 1.1.4.2 skrll sc->sc_tx_mbuf[n] = NULL;
300 1.1.4.2 skrll bus_dmamap_unload(sc->sc_dmat,sc->sc_tx_map[n]);
301 1.1.4.2 skrll if ((SC_CB(n)->cb_status & IEE_CB_COL) != 0 &&
302 1.1.4.2 skrll (SC_CB(n)->cb_status & IEE_CB_MAXCOL) == 0)
303 1.1.4.2 skrll sc->sc_tx_col += 16;
304 1.1.4.2 skrll else
305 1.1.4.2 skrll sc->sc_tx_col += SC_CB(n)->cb_status
306 1.1.4.2 skrll & IEE_CB_MAXCOL;
307 1.1.4.2 skrll }
308 1.1.4.2 skrll sc->sc_next_tbd = 0;
309 1.1.4.2 skrll ifp->if_flags &= ~IFF_OACTIVE;
310 1.1.4.2 skrll }
311 1.1.4.2 skrll for (n = 0 ; n < sc->sc_next_cb ; n++) {
312 1.1.4.2 skrll /* Check if a CMD failed, but ignore TX errors. */
313 1.1.4.2 skrll if ((SC_CB(n)->cb_cmd & IEE_CB_CMD) != IEE_CB_CMD_TR
314 1.1.4.3 skrll && ((SC_CB(n)->cb_status & IEE_CB_OK) == 0))
315 1.1.4.2 skrll printf("%s: iee_intr: scb_status=0x%x "
316 1.1.4.2 skrll "scb_cmd=0x%x failed command %d: "
317 1.1.4.2 skrll "cb_status[%d]=0x%.4x cb_cmd[%d]=0x%.4x\n",
318 1.1.4.2 skrll sc->sc_dev.dv_xname, scb_status, scb_cmd,
319 1.1.4.2 skrll ++sc->sc_cmd_err, n, SC_CB(n)->cb_status,
320 1.1.4.2 skrll n, SC_CB(n)->cb_cmd);
321 1.1.4.2 skrll }
322 1.1.4.2 skrll sc->sc_next_cb = 0;
323 1.1.4.2 skrll if ((sc->sc_flags & IEE_WANT_MCAST) != 0) {
324 1.1.4.2 skrll iee_cb_setup(sc, IEE_CB_CMD_MCS | IEE_CB_S | IEE_CB_EL
325 1.1.4.2 skrll | IEE_CB_I);
326 1.1.4.2 skrll (sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE);
327 1.1.4.2 skrll } else
328 1.1.4.2 skrll /* Try to get defered packets going. */
329 1.1.4.2 skrll iee_start(ifp);
330 1.1.4.2 skrll }
331 1.1.4.2 skrll if (IEE_SWAP(SC_SCB->scb_crc_err) != sc->sc_crc_err) {
332 1.1.4.2 skrll sc->sc_crc_err = IEE_SWAP(SC_SCB->scb_crc_err);
333 1.1.4.2 skrll printf("%s: iee_intr: crc_err=%d\n", sc->sc_dev.dv_xname,
334 1.1.4.2 skrll sc->sc_crc_err);
335 1.1.4.2 skrll }
336 1.1.4.2 skrll if (IEE_SWAP(SC_SCB->scb_align_err) != sc->sc_align_err) {
337 1.1.4.2 skrll sc->sc_align_err = IEE_SWAP(SC_SCB->scb_align_err);
338 1.1.4.2 skrll printf("%s: iee_intr: align_err=%d\n", sc->sc_dev.dv_xname,
339 1.1.4.2 skrll sc->sc_align_err);
340 1.1.4.2 skrll }
341 1.1.4.2 skrll if (IEE_SWAP(SC_SCB->scb_resource_err) != sc->sc_resource_err) {
342 1.1.4.2 skrll sc->sc_resource_err = IEE_SWAP(SC_SCB->scb_resource_err);
343 1.1.4.2 skrll printf("%s: iee_intr: resource_err=%d\n", sc->sc_dev.dv_xname,
344 1.1.4.2 skrll sc->sc_resource_err);
345 1.1.4.2 skrll }
346 1.1.4.2 skrll if (IEE_SWAP(SC_SCB->scb_overrun_err) != sc->sc_overrun_err) {
347 1.1.4.2 skrll sc->sc_overrun_err = IEE_SWAP(SC_SCB->scb_overrun_err);
348 1.1.4.2 skrll printf("%s: iee_intr: overrun_err=%d\n", sc->sc_dev.dv_xname,
349 1.1.4.2 skrll sc->sc_overrun_err);
350 1.1.4.2 skrll }
351 1.1.4.2 skrll if (IEE_SWAP(SC_SCB->scb_rcvcdt_err) != sc->sc_rcvcdt_err) {
352 1.1.4.2 skrll sc->sc_rcvcdt_err = IEE_SWAP(SC_SCB->scb_rcvcdt_err);
353 1.1.4.2 skrll printf("%s: iee_intr: rcvcdt_err=%d\n", sc->sc_dev.dv_xname,
354 1.1.4.2 skrll sc->sc_rcvcdt_err);
355 1.1.4.2 skrll }
356 1.1.4.2 skrll if (IEE_SWAP(SC_SCB->scb_short_fr_err) != sc->sc_short_fr_err) {
357 1.1.4.2 skrll sc->sc_short_fr_err = IEE_SWAP(SC_SCB->scb_short_fr_err);
358 1.1.4.2 skrll printf("%s: iee_intr: short_fr_err=%d\n", sc->sc_dev.dv_xname,
359 1.1.4.2 skrll sc->sc_short_fr_err);
360 1.1.4.2 skrll }
361 1.1.4.3 skrll bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, 0, IEE_SHMEM_MAX,
362 1.1.4.3 skrll BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
363 1.1.4.2 skrll (sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
364 1.1.4.2 skrll return(1);
365 1.1.4.2 skrll }
366 1.1.4.2 skrll
367 1.1.4.2 skrll
368 1.1.4.2 skrll
369 1.1.4.2 skrll /*
370 1.1.4.2 skrll How Command Block List Processing is done.
371 1.1.4.2 skrll
372 1.1.4.2 skrll A runing CBL is never manipulated. If there is a CBL already runing,
373 1.1.4.2 skrll further CMDs are deferd until the current list is done. A new list is
374 1.1.4.2 skrll setup when the old has finished.
375 1.1.4.2 skrll This eases programming. To manipulate a runing CBL it is neccesary to
376 1.1.4.2 skrll suspend the Command Unit to avoid race conditions. After a suspend
377 1.1.4.2 skrll is sent we have to wait for an interrupt that ACKs the suspend. Then
378 1.1.4.2 skrll we can manipulate the CBL and resume operation. I am not sure that this
379 1.1.4.2 skrll is more effective then the current, much simpler approach. => KISS
380 1.1.4.2 skrll See i82596CA data sheet page 26.
381 1.1.4.2 skrll
382 1.1.4.2 skrll A CBL is runing or on the way to be set up when (sc->sc_next_cb != 0).
383 1.1.4.2 skrll
384 1.1.4.2 skrll A CBL may consist of TX CMDs, and _only_ TX CMDs.
385 1.1.4.2 skrll A TX CBL is runing or on the way to be set up when
386 1.1.4.2 skrll ((sc->sc_next_cb != 0) && (sc->sc_next_tbd != 0)).
387 1.1.4.2 skrll
388 1.1.4.2 skrll A CBL may consist of other non-TX CMDs like IAS or CONF, and _only_
389 1.1.4.2 skrll non-TX CMDs.
390 1.1.4.2 skrll
391 1.1.4.2 skrll This comes mostly through the way how an Ethernet driver works and
392 1.1.4.2 skrll because runing CBLs are not manipulated when they are on the way. If
393 1.1.4.2 skrll if_start() is called there will be TX CMDs enqueued so we have a runing
394 1.1.4.2 skrll CBL and other CMDs from e.g. if_ioctl() will be deferd and vice versa.
395 1.1.4.2 skrll
396 1.1.4.2 skrll The Multicast Setup Command is special. A MCS needs more space then
397 1.1.4.2 skrll a single CB has. Actual space requiement depends on the length of the
398 1.1.4.2 skrll multicast list. So we allways defer MCS until other CBLs are finished,
399 1.1.4.2 skrll then we setup a CONF CMD in the first CB. The CONF CMD is needed to
400 1.1.4.2 skrll turn ALLMULTI on the hardware on or off. The MCS is the 2nd CB and may
401 1.1.4.2 skrll use all the remaining space in the CBL and the Transmit Buffer Descriptor
402 1.1.4.2 skrll List. (Therefore CBL and TBDL must be continious in pysical and virtual
403 1.1.4.2 skrll memory. This is guaranteed through the definitions of the list offsets
404 1.1.4.2 skrll in i82596reg.h and because it is only a single DMA segment used for all
405 1.1.4.2 skrll lists.) When ALLMULTI is enabled via the CONF CMD, the MCS is run with
406 1.1.4.2 skrll a multicast list length of 0, thus disabling the multicast filter.
407 1.1.4.2 skrll A defered MCS is signaled via ((sc->sc_flags & IEE_WANT_MCAST) != 0)
408 1.1.4.2 skrll */
409 1.1.4.2 skrll void
410 1.1.4.2 skrll iee_cb_setup(struct iee_softc *sc, u_int32_t cmd)
411 1.1.4.2 skrll {
412 1.1.4.2 skrll struct iee_cb *cb = SC_CB(sc->sc_next_cb);
413 1.1.4.2 skrll struct ifnet *ifp = &sc->sc_ethercom.ec_if;
414 1.1.4.2 skrll struct ether_multistep step;
415 1.1.4.2 skrll struct ether_multi *enm;
416 1.1.4.2 skrll
417 1.1.4.2 skrll memset(cb, 0, IEE_CB_SZ);
418 1.1.4.2 skrll cb->cb_cmd = cmd;
419 1.1.4.2 skrll switch(cmd & IEE_CB_CMD) {
420 1.1.4.2 skrll case IEE_CB_CMD_NOP: /* NOP CMD */
421 1.1.4.2 skrll break;
422 1.1.4.2 skrll case IEE_CB_CMD_IAS: /* Individual Address Setup */
423 1.1.4.2 skrll memcpy((void*)cb->cb_ind_addr, LLADDR(ifp->if_sadl),
424 1.1.4.2 skrll ETHER_ADDR_LEN);
425 1.1.4.2 skrll break;
426 1.1.4.2 skrll case IEE_CB_CMD_CONF: /* Configure */
427 1.1.4.2 skrll memcpy((void*)cb->cb_cf, sc->sc_cf, sc->sc_cf[0]
428 1.1.4.2 skrll & IEE_CF_0_CNT_M);
429 1.1.4.2 skrll break;
430 1.1.4.2 skrll case IEE_CB_CMD_MCS: /* Multicast Setup */
431 1.1.4.2 skrll if (sc->sc_next_cb != 0) {
432 1.1.4.2 skrll sc->sc_flags |= IEE_WANT_MCAST;
433 1.1.4.2 skrll return;
434 1.1.4.2 skrll }
435 1.1.4.2 skrll sc->sc_flags &= ~IEE_WANT_MCAST;
436 1.1.4.2 skrll if ((sc->sc_cf[8] & IEE_CF_8_PRM) != 0) {
437 1.1.4.2 skrll /* Need no multicast filter in promisc mode. */
438 1.1.4.2 skrll iee_cb_setup(sc, IEE_CB_CMD_CONF | IEE_CB_S | IEE_CB_EL
439 1.1.4.2 skrll | IEE_CB_I);
440 1.1.4.2 skrll return;
441 1.1.4.2 skrll }
442 1.1.4.2 skrll /* Leave room for a CONF CMD to en/dis-able ALLMULTI mode */
443 1.1.4.2 skrll cb = SC_CB(sc->sc_next_cb + 1);
444 1.1.4.2 skrll cb->cb_cmd = cmd;
445 1.1.4.2 skrll cb->cb_mcast.mc_size = 0;
446 1.1.4.2 skrll ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
447 1.1.4.2 skrll while (enm != NULL) {
448 1.1.4.2 skrll if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
449 1.1.4.2 skrll ETHER_ADDR_LEN) != 0 || cb->cb_mcast.mc_size
450 1.1.4.2 skrll * ETHER_ADDR_LEN + 2 * IEE_CB_SZ
451 1.1.4.2 skrll > IEE_CB_LIST_SZ + IEE_TBD_LIST_SZ) {
452 1.1.4.2 skrll cb->cb_mcast.mc_size = 0;
453 1.1.4.2 skrll break;
454 1.1.4.2 skrll }
455 1.1.4.2 skrll memcpy((void*) &cb->cb_mcast.mc_addrs[
456 1.1.4.2 skrll cb->cb_mcast.mc_size * ETHER_ADDR_LEN],
457 1.1.4.2 skrll enm->enm_addrlo, ETHER_ADDR_LEN);
458 1.1.4.2 skrll ETHER_NEXT_MULTI(step, enm);
459 1.1.4.2 skrll cb->cb_mcast.mc_size++;
460 1.1.4.2 skrll }
461 1.1.4.2 skrll if (cb->cb_mcast.mc_size == 0) {
462 1.1.4.2 skrll /* Can't do exact mcast filtering, do ALLMULTI mode. */
463 1.1.4.2 skrll ifp->if_flags |= IFF_ALLMULTI;
464 1.1.4.2 skrll sc->sc_cf[11] &= ~IEE_CF_11_MCALL;
465 1.1.4.2 skrll } else {
466 1.1.4.2 skrll /* disable ALLMULTI and load mcast list */
467 1.1.4.2 skrll ifp->if_flags &= ~IFF_ALLMULTI;
468 1.1.4.2 skrll sc->sc_cf[11] |= IEE_CF_11_MCALL;
469 1.1.4.2 skrll /* Mcast setup may need more then IEE_CB_SZ bytes. */
470 1.1.4.2 skrll bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map,
471 1.1.4.2 skrll IEE_CB_OFF, IEE_CB_LIST_SZ + IEE_TBD_LIST_SZ,
472 1.1.4.2 skrll BUS_DMASYNC_PREWRITE);
473 1.1.4.2 skrll }
474 1.1.4.2 skrll iee_cb_setup(sc, IEE_CB_CMD_CONF);
475 1.1.4.2 skrll break;
476 1.1.4.2 skrll case IEE_CB_CMD_TR: /* Transmit */
477 1.1.4.2 skrll cb->cb_transmit.tx_tbd_addr = IEE_PHYS_SHMEM(IEE_TBD_OFF
478 1.1.4.2 skrll + IEE_TBD_SZ * sc->sc_next_tbd);
479 1.1.4.2 skrll cb->cb_cmd |= IEE_CB_SF; /* Allways use Flexible Mode. */
480 1.1.4.2 skrll break;
481 1.1.4.2 skrll case IEE_CB_CMD_TDR: /* Time Domain Reflectometry */
482 1.1.4.2 skrll break;
483 1.1.4.2 skrll case IEE_CB_CMD_DUMP: /* Dump */
484 1.1.4.2 skrll break;
485 1.1.4.2 skrll case IEE_CB_CMD_DIAG: /* Diagnose */
486 1.1.4.2 skrll break;
487 1.1.4.2 skrll default:
488 1.1.4.2 skrll /* can't happen */
489 1.1.4.2 skrll break;
490 1.1.4.2 skrll }
491 1.1.4.2 skrll cb->cb_link_addr = IEE_PHYS_SHMEM(IEE_CB_OFF + IEE_CB_SZ *
492 1.1.4.2 skrll (sc->sc_next_cb + 1));
493 1.1.4.2 skrll bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, IEE_CB_OFF
494 1.1.4.2 skrll + IEE_CB_SZ * sc->sc_next_cb, IEE_CB_SZ, BUS_DMASYNC_PREWRITE);
495 1.1.4.2 skrll sc->sc_next_cb++;
496 1.1.4.2 skrll ifp->if_timer = 5;
497 1.1.4.2 skrll return;
498 1.1.4.2 skrll }
499 1.1.4.2 skrll
500 1.1.4.2 skrll
501 1.1.4.2 skrll
502 1.1.4.2 skrll void
503 1.1.4.2 skrll iee_attach(struct iee_softc *sc, u_int8_t *eth_addr, int *media, int nmedia,
504 1.1.4.2 skrll int defmedia)
505 1.1.4.2 skrll {
506 1.1.4.2 skrll struct ifnet *ifp = &sc->sc_ethercom.ec_if;
507 1.1.4.2 skrll int n;
508 1.1.4.2 skrll
509 1.1.4.2 skrll /* Set pointer to Intermediate System Configuration Pointer. */
510 1.1.4.2 skrll /* Phys. addr. in big endian order. (Big endian as defined by Intel.) */
511 1.1.4.2 skrll SC_SCP->scp_iscp_addr = IEE_SWAP(IEE_PHYS_SHMEM(IEE_ISCP_OFF));
512 1.1.4.2 skrll /* Set pointer to System Control Block. */
513 1.1.4.2 skrll /* Phys. addr. in big endian order. (Big endian as defined by Intel.) */
514 1.1.4.2 skrll SC_ISCP->iscp_scb_addr = IEE_SWAP(IEE_PHYS_SHMEM(IEE_SCB_OFF));
515 1.1.4.2 skrll /* Set pointer to Receive Frame Area. (physical address) */
516 1.1.4.2 skrll SC_SCB->scb_rfa_addr = IEE_PHYS_SHMEM(IEE_RFD_OFF);
517 1.1.4.2 skrll /* Set pointer to Command Block. (physical address) */
518 1.1.4.2 skrll SC_SCB->scb_cmd_blk_addr = IEE_PHYS_SHMEM(IEE_CB_OFF);
519 1.1.4.2 skrll
520 1.1.4.2 skrll ifmedia_init(&sc->sc_ifmedia, 0, iee_mediachange, iee_mediastatus);
521 1.1.4.2 skrll if (media != NULL) {
522 1.1.4.2 skrll for (n = 0 ; n < nmedia ; n++)
523 1.1.4.2 skrll ifmedia_add(&sc->sc_ifmedia, media[n], 0, NULL);
524 1.1.4.2 skrll ifmedia_set(&sc->sc_ifmedia, defmedia);
525 1.1.4.2 skrll } else {
526 1.1.4.2 skrll ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_NONE, 0, NULL);
527 1.1.4.2 skrll ifmedia_set(&sc->sc_ifmedia, IFM_ETHER | IFM_NONE);
528 1.1.4.2 skrll }
529 1.1.4.2 skrll
530 1.1.4.2 skrll ifp->if_softc = sc;
531 1.1.4.2 skrll strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
532 1.1.4.2 skrll ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
533 1.1.4.2 skrll ifp->if_start = iee_start; /* initiate output routine */
534 1.1.4.2 skrll ifp->if_ioctl = iee_ioctl; /* ioctl routine */
535 1.1.4.2 skrll ifp->if_init = iee_init; /* init routine */
536 1.1.4.2 skrll ifp->if_stop = iee_stop; /* stop routine */
537 1.1.4.2 skrll ifp->if_watchdog = iee_watchdog; /* timer routine */
538 1.1.4.2 skrll ifp->if_drain = iee_drain; /* routine to release resources */
539 1.1.4.2 skrll IFQ_SET_READY(&ifp->if_snd);
540 1.1.4.2 skrll /* iee supports IEEE 802.1Q Virtual LANs, see vlan(4). */
541 1.1.4.2 skrll sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
542 1.1.4.2 skrll
543 1.1.4.2 skrll if_attach(ifp);
544 1.1.4.2 skrll ether_ifattach(ifp, eth_addr);
545 1.1.4.2 skrll
546 1.1.4.2 skrll aprint_normal(": Intel 82596%s address %s\n",
547 1.1.4.2 skrll i82596_typenames[ sc->sc_type], ether_sprintf(eth_addr));
548 1.1.4.2 skrll
549 1.1.4.2 skrll for (n = 0 ; n < IEE_NCB ; n++)
550 1.1.4.2 skrll sc->sc_tx_map[n] = NULL;
551 1.1.4.2 skrll for (n = 0 ; n < IEE_NRFD ; n++) {
552 1.1.4.2 skrll sc->sc_rx_mbuf[n] = NULL;
553 1.1.4.2 skrll sc->sc_rx_map[n] = NULL;
554 1.1.4.2 skrll }
555 1.1.4.2 skrll sc->sc_tx_timeout = 0;
556 1.1.4.2 skrll sc->sc_setup_timeout = 0;
557 1.1.4.2 skrll (sc->sc_iee_reset)(sc);
558 1.1.4.2 skrll return;
559 1.1.4.2 skrll }
560 1.1.4.2 skrll
561 1.1.4.2 skrll
562 1.1.4.2 skrll
563 1.1.4.2 skrll void
564 1.1.4.2 skrll iee_detach(struct iee_softc *sc, int flags)
565 1.1.4.2 skrll {
566 1.1.4.2 skrll struct ifnet *ifp = &sc->sc_ethercom.ec_if;
567 1.1.4.2 skrll
568 1.1.4.2 skrll if ((ifp->if_flags & IFF_RUNNING) != 0)
569 1.1.4.2 skrll iee_stop(ifp, 1);
570 1.1.4.2 skrll ether_ifdetach(ifp);
571 1.1.4.2 skrll if_detach(ifp);
572 1.1.4.2 skrll return;
573 1.1.4.2 skrll }
574 1.1.4.2 skrll
575 1.1.4.2 skrll
576 1.1.4.2 skrll
577 1.1.4.2 skrll /* media change and status callback */
578 1.1.4.2 skrll int
579 1.1.4.2 skrll iee_mediachange(struct ifnet *ifp)
580 1.1.4.2 skrll {
581 1.1.4.2 skrll struct iee_softc *sc = ifp->if_softc;
582 1.1.4.2 skrll
583 1.1.4.2 skrll if (sc->sc_mediachange != NULL)
584 1.1.4.2 skrll return ((sc->sc_mediachange)(ifp));
585 1.1.4.2 skrll return(0);
586 1.1.4.2 skrll }
587 1.1.4.2 skrll
588 1.1.4.2 skrll
589 1.1.4.2 skrll
590 1.1.4.2 skrll void
591 1.1.4.2 skrll iee_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmreq)
592 1.1.4.2 skrll {
593 1.1.4.2 skrll struct iee_softc *sc = ifp->if_softc;
594 1.1.4.2 skrll
595 1.1.4.2 skrll if (sc->sc_mediastatus != NULL)
596 1.1.4.2 skrll return ((sc->sc_mediastatus)(ifp, ifmreq));
597 1.1.4.2 skrll return;
598 1.1.4.2 skrll }
599 1.1.4.2 skrll
600 1.1.4.2 skrll
601 1.1.4.2 skrll
602 1.1.4.2 skrll /* initiate output routine */
603 1.1.4.2 skrll void
604 1.1.4.2 skrll iee_start(struct ifnet *ifp)
605 1.1.4.2 skrll {
606 1.1.4.2 skrll struct iee_softc *sc = ifp->if_softc;
607 1.1.4.2 skrll struct mbuf *m = NULL;
608 1.1.4.2 skrll int t;
609 1.1.4.2 skrll int n;
610 1.1.4.2 skrll
611 1.1.4.2 skrll if (sc->sc_next_cb != 0)
612 1.1.4.2 skrll /* There is already a CMD runing. Defer packet enqueueing. */
613 1.1.4.2 skrll return;
614 1.1.4.2 skrll for (t = 0 ; t < IEE_NCB ; t++) {
615 1.1.4.2 skrll IFQ_DEQUEUE(&ifp->if_snd, sc->sc_tx_mbuf[t]);
616 1.1.4.2 skrll if (sc->sc_tx_mbuf[t] == NULL)
617 1.1.4.2 skrll break;
618 1.1.4.2 skrll if (bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_tx_map[t],
619 1.1.4.2 skrll sc->sc_tx_mbuf[t], BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
620 1.1.4.2 skrll /*
621 1.1.4.2 skrll * The packet needs more TBD then we support.
622 1.1.4.2 skrll * Copy the packet into a mbuf cluster to get it out.
623 1.1.4.2 skrll */
624 1.1.4.2 skrll printf("%s: iee_start: failed to load DMA map\n",
625 1.1.4.2 skrll sc->sc_dev.dv_xname);
626 1.1.4.2 skrll MGETHDR(m, M_DONTWAIT, MT_DATA);
627 1.1.4.2 skrll if (m == NULL) {
628 1.1.4.2 skrll printf("%s: iee_start: can't allocate mbuf\n",
629 1.1.4.2 skrll sc->sc_dev.dv_xname);
630 1.1.4.2 skrll m_freem(sc->sc_tx_mbuf[t]);
631 1.1.4.2 skrll t--;
632 1.1.4.2 skrll continue;
633 1.1.4.2 skrll }
634 1.1.4.2 skrll MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
635 1.1.4.2 skrll MCLGET(m, M_DONTWAIT);
636 1.1.4.2 skrll if ((m->m_flags & M_EXT) == 0) {
637 1.1.4.2 skrll printf("%s: iee_start: can't allocate mbuf "
638 1.1.4.2 skrll "cluster\n", sc->sc_dev.dv_xname);
639 1.1.4.2 skrll m_freem(sc->sc_tx_mbuf[t]);
640 1.1.4.2 skrll m_freem(m);
641 1.1.4.2 skrll t--;
642 1.1.4.2 skrll continue;
643 1.1.4.2 skrll }
644 1.1.4.2 skrll m_copydata(sc->sc_tx_mbuf[t], 0,
645 1.1.4.2 skrll sc->sc_tx_mbuf[t]->m_pkthdr.len, mtod(m, caddr_t));
646 1.1.4.2 skrll m->m_pkthdr.len = sc->sc_tx_mbuf[t]->m_pkthdr.len;
647 1.1.4.2 skrll m->m_len = sc->sc_tx_mbuf[t]->m_pkthdr.len;
648 1.1.4.2 skrll m_freem(sc->sc_tx_mbuf[t]);
649 1.1.4.2 skrll sc->sc_tx_mbuf[t] = m;
650 1.1.4.2 skrll if(bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_tx_map[t],
651 1.1.4.2 skrll m, BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
652 1.1.4.2 skrll printf("%s: iee_start: can't load TX DMA map\n",
653 1.1.4.2 skrll sc->sc_dev.dv_xname);
654 1.1.4.2 skrll m_freem(sc->sc_tx_mbuf[t]);
655 1.1.4.2 skrll t--;
656 1.1.4.2 skrll continue;
657 1.1.4.2 skrll }
658 1.1.4.2 skrll }
659 1.1.4.2 skrll for (n = 0 ; n < sc->sc_tx_map[t]->dm_nsegs ; n++) {
660 1.1.4.2 skrll SC_TBD(sc->sc_next_tbd + n)->tbd_tb_addr =
661 1.1.4.2 skrll sc->sc_tx_map[t]->dm_segs[n].ds_addr;
662 1.1.4.2 skrll SC_TBD(sc->sc_next_tbd + n)->tbd_size =
663 1.1.4.2 skrll sc->sc_tx_map[t]->dm_segs[n].ds_len;
664 1.1.4.2 skrll SC_TBD(sc->sc_next_tbd + n)->tbd_link_addr =
665 1.1.4.2 skrll IEE_PHYS_SHMEM(IEE_TBD_OFF + IEE_TBD_SZ
666 1.1.4.2 skrll * (sc->sc_next_tbd + n + 1));
667 1.1.4.2 skrll }
668 1.1.4.2 skrll SC_TBD(sc->sc_next_tbd + n - 1)->tbd_size |= IEE_CB_EL;
669 1.1.4.2 skrll bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_map[t], 0,
670 1.1.4.2 skrll sc->sc_tx_map[t]->dm_mapsize, BUS_DMASYNC_PREWRITE);
671 1.1.4.2 skrll IFQ_POLL(&ifp->if_snd, m);
672 1.1.4.2 skrll if (m == NULL)
673 1.1.4.2 skrll iee_cb_setup(sc, IEE_CB_CMD_TR | IEE_CB_S | IEE_CB_EL
674 1.1.4.2 skrll | IEE_CB_I);
675 1.1.4.2 skrll else
676 1.1.4.2 skrll iee_cb_setup(sc, IEE_CB_CMD_TR);
677 1.1.4.2 skrll sc->sc_next_tbd += n;
678 1.1.4.2 skrll #if NBPFILTER > 0
679 1.1.4.2 skrll /* Pass packet to bpf if someone listens. */
680 1.1.4.2 skrll if (ifp->if_bpf)
681 1.1.4.2 skrll bpf_mtap(ifp->if_bpf, sc->sc_tx_mbuf[t]);
682 1.1.4.2 skrll #endif
683 1.1.4.2 skrll }
684 1.1.4.2 skrll if (t == 0)
685 1.1.4.2 skrll /* No packets got set up for TX. */
686 1.1.4.2 skrll return;
687 1.1.4.2 skrll if (t == IEE_NCB)
688 1.1.4.2 skrll ifp->if_flags |= IFF_OACTIVE;
689 1.1.4.2 skrll bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, IEE_CB_SZ,
690 1.1.4.2 skrll IEE_CB_LIST_SZ + IEE_TBD_LIST_SZ, BUS_DMASYNC_PREWRITE);
691 1.1.4.2 skrll (sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE);
692 1.1.4.2 skrll return;
693 1.1.4.2 skrll }
694 1.1.4.2 skrll
695 1.1.4.2 skrll
696 1.1.4.2 skrll
697 1.1.4.2 skrll /* ioctl routine */
698 1.1.4.2 skrll int
699 1.1.4.2 skrll iee_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
700 1.1.4.2 skrll {
701 1.1.4.2 skrll struct iee_softc *sc = ifp->if_softc;
702 1.1.4.2 skrll int s;
703 1.1.4.2 skrll int err;
704 1.1.4.2 skrll
705 1.1.4.2 skrll s = splnet();
706 1.1.4.2 skrll if (cmd == SIOCSIFMEDIA || cmd == SIOCGIFMEDIA)
707 1.1.4.2 skrll return(ifmedia_ioctl(ifp, (struct ifreq *) data,
708 1.1.4.2 skrll &sc->sc_ifmedia, cmd));
709 1.1.4.2 skrll else {
710 1.1.4.2 skrll err = ether_ioctl(ifp, cmd, data);
711 1.1.4.2 skrll if (err == ENETRESET ||
712 1.1.4.2 skrll ((ifp->if_flags & IFF_PROMISC) != 0
713 1.1.4.2 skrll && (sc->sc_cf[8] & IEE_CF_8_PRM) == 0)
714 1.1.4.2 skrll || ((ifp->if_flags & IFF_PROMISC) == 0
715 1.1.4.2 skrll && (sc->sc_cf[8] & IEE_CF_8_PRM) != 0)) {
716 1.1.4.2 skrll /* Do multicast setup / toggle promisc mode. */
717 1.1.4.2 skrll if ((ifp->if_flags & IFF_PROMISC) != 0)
718 1.1.4.2 skrll sc->sc_cf[8] |= IEE_CF_8_PRM;
719 1.1.4.2 skrll else
720 1.1.4.2 skrll sc->sc_cf[8] &= ~IEE_CF_8_PRM;
721 1.1.4.2 skrll /* Put new multicast list into the hardware filter. */
722 1.1.4.2 skrll iee_cb_setup(sc, IEE_CB_CMD_MCS | IEE_CB_S | IEE_CB_EL
723 1.1.4.2 skrll | IEE_CB_I);
724 1.1.4.2 skrll if ((sc->sc_flags & IEE_WANT_MCAST) == 0)
725 1.1.4.2 skrll /* Mcast setup is not defered. */
726 1.1.4.2 skrll (sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE);
727 1.1.4.2 skrll err = 0;
728 1.1.4.2 skrll }
729 1.1.4.2 skrll }
730 1.1.4.2 skrll splx(s);
731 1.1.4.2 skrll return(err);
732 1.1.4.2 skrll }
733 1.1.4.2 skrll
734 1.1.4.2 skrll
735 1.1.4.2 skrll
736 1.1.4.2 skrll /* init routine */
737 1.1.4.2 skrll int
738 1.1.4.2 skrll iee_init(struct ifnet *ifp)
739 1.1.4.2 skrll {
740 1.1.4.2 skrll struct iee_softc *sc = ifp->if_softc;
741 1.1.4.2 skrll int r;
742 1.1.4.2 skrll int t;
743 1.1.4.2 skrll int n;
744 1.1.4.2 skrll int err;
745 1.1.4.2 skrll
746 1.1.4.2 skrll sc->sc_next_cb = 0;
747 1.1.4.2 skrll sc->sc_next_tbd = 0;
748 1.1.4.2 skrll sc->sc_flags &= ~IEE_WANT_MCAST;
749 1.1.4.2 skrll sc->sc_rx_done = 0;
750 1.1.4.2 skrll SC_SCB->scb_crc_err = 0;
751 1.1.4.2 skrll SC_SCB->scb_align_err = 0;
752 1.1.4.2 skrll SC_SCB->scb_resource_err = 0;
753 1.1.4.2 skrll SC_SCB->scb_overrun_err = 0;
754 1.1.4.2 skrll SC_SCB->scb_rcvcdt_err = 0;
755 1.1.4.2 skrll SC_SCB->scb_short_fr_err = 0;
756 1.1.4.2 skrll sc->sc_crc_err = 0;
757 1.1.4.2 skrll sc->sc_align_err = 0;
758 1.1.4.2 skrll sc->sc_resource_err = 0;
759 1.1.4.2 skrll sc->sc_overrun_err = 0;
760 1.1.4.2 skrll sc->sc_rcvcdt_err = 0;
761 1.1.4.2 skrll sc->sc_short_fr_err = 0;
762 1.1.4.2 skrll sc->sc_tx_col = 0;
763 1.1.4.2 skrll sc->sc_rx_err = 0;
764 1.1.4.2 skrll sc->sc_cmd_err = 0;
765 1.1.4.2 skrll /* Create Transmit DMA maps. */
766 1.1.4.2 skrll for (t = 0 ; t < IEE_NCB ; t++) {
767 1.1.4.2 skrll if (sc->sc_tx_map[t] == NULL && bus_dmamap_create(sc->sc_dmat,
768 1.1.4.2 skrll MCLBYTES, IEE_NTBD, MCLBYTES, 0, BUS_DMA_NOWAIT,
769 1.1.4.2 skrll &sc->sc_tx_map[t]) != 0) {
770 1.1.4.2 skrll printf("%s: iee_init: can't create TX DMA map\n",
771 1.1.4.2 skrll sc->sc_dev.dv_xname);
772 1.1.4.2 skrll for (n = 0 ; n < t ; n++)
773 1.1.4.2 skrll bus_dmamap_destroy(sc->sc_dmat,
774 1.1.4.2 skrll sc->sc_tx_map[n]);
775 1.1.4.2 skrll return(ENOBUFS);
776 1.1.4.2 skrll }
777 1.1.4.2 skrll }
778 1.1.4.2 skrll /* Initialize Receive Frame and Receive Buffer Descriptors */
779 1.1.4.2 skrll err = 0;
780 1.1.4.2 skrll memset(SC_RFD(0), 0, IEE_RFD_LIST_SZ);
781 1.1.4.2 skrll memset(SC_RBD(0), 0, IEE_RBD_LIST_SZ);
782 1.1.4.2 skrll for (r = 0 ; r < IEE_NRFD ; r++) {
783 1.1.4.2 skrll SC_RFD(r)->rfd_cmd = IEE_RFD_SF;
784 1.1.4.2 skrll SC_RFD(r)->rfd_link_addr = IEE_PHYS_SHMEM(IEE_RFD_OFF
785 1.1.4.2 skrll + IEE_RFD_SZ * ((r + 1) % IEE_NRFD));
786 1.1.4.2 skrll
787 1.1.4.2 skrll SC_RBD(r)->rbd_next_rbd = IEE_PHYS_SHMEM(IEE_RBD_OFF
788 1.1.4.2 skrll + IEE_RBD_SZ * ((r + 1) % IEE_NRFD));
789 1.1.4.2 skrll if (sc->sc_rx_mbuf[r] == NULL) {
790 1.1.4.2 skrll MGETHDR(sc->sc_rx_mbuf[r], M_DONTWAIT, MT_DATA);
791 1.1.4.2 skrll if (sc->sc_rx_mbuf[r] == NULL) {
792 1.1.4.2 skrll printf("%s: iee_init: can't allocate mbuf\n",
793 1.1.4.2 skrll sc->sc_dev.dv_xname);
794 1.1.4.2 skrll err = 1;
795 1.1.4.2 skrll break;
796 1.1.4.2 skrll }
797 1.1.4.2 skrll MCLAIM(sc->sc_rx_mbuf[r],&sc->sc_ethercom.ec_rx_mowner);
798 1.1.4.2 skrll MCLGET(sc->sc_rx_mbuf[r], M_DONTWAIT);
799 1.1.4.2 skrll if ((sc->sc_rx_mbuf[r]->m_flags & M_EXT) == 0) {
800 1.1.4.2 skrll printf("%s: iee_init: can't allocate mbuf"
801 1.1.4.2 skrll " cluster\n", sc->sc_dev.dv_xname);
802 1.1.4.2 skrll m_freem(sc->sc_rx_mbuf[r]);
803 1.1.4.2 skrll err = 1;
804 1.1.4.2 skrll break;
805 1.1.4.2 skrll }
806 1.1.4.2 skrll }
807 1.1.4.2 skrll if (sc->sc_rx_map[r] == NULL && bus_dmamap_create(sc->sc_dmat,
808 1.1.4.2 skrll MCLBYTES, 1, MCLBYTES , 0, BUS_DMA_NOWAIT,
809 1.1.4.2 skrll &sc->sc_rx_map[r]) != 0) {
810 1.1.4.2 skrll printf("%s: iee_init: can't create RX "
811 1.1.4.2 skrll "DMA map\n", sc->sc_dev.dv_xname);
812 1.1.4.2 skrll m_freem(sc->sc_rx_mbuf[r]);
813 1.1.4.2 skrll err = 1;
814 1.1.4.2 skrll break;
815 1.1.4.2 skrll }
816 1.1.4.2 skrll if (bus_dmamap_load(sc->sc_dmat, sc->sc_rx_map[r],
817 1.1.4.2 skrll sc->sc_rx_mbuf[r]->m_ext.ext_buf,
818 1.1.4.2 skrll sc->sc_rx_mbuf[r]->m_ext.ext_size, NULL,
819 1.1.4.2 skrll BUS_DMA_READ | BUS_DMA_NOWAIT) != 0) {
820 1.1.4.2 skrll printf("%s: iee_init: can't load RX DMA map\n",
821 1.1.4.2 skrll sc->sc_dev.dv_xname);
822 1.1.4.2 skrll bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_map[r]);
823 1.1.4.2 skrll m_freem(sc->sc_rx_mbuf[r]);
824 1.1.4.2 skrll err = 1;
825 1.1.4.2 skrll break;
826 1.1.4.2 skrll }
827 1.1.4.2 skrll bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_map[r], 0,
828 1.1.4.2 skrll sc->sc_rx_mbuf[r]->m_ext.ext_size, BUS_DMASYNC_PREREAD);
829 1.1.4.2 skrll SC_RBD(r)->rbd_size = sc->sc_rx_map[r]->dm_segs[0].ds_len;
830 1.1.4.2 skrll SC_RBD(r)->rbd_rb_addr= sc->sc_rx_map[r]->dm_segs[0].ds_addr;
831 1.1.4.2 skrll }
832 1.1.4.2 skrll SC_RFD(0)->rfd_rbd_addr = IEE_PHYS_SHMEM(IEE_RBD_OFF);
833 1.1.4.2 skrll if (err != 0) {
834 1.1.4.2 skrll for (n = 0 ; n < r; n++) {
835 1.1.4.2 skrll m_freem(sc->sc_rx_mbuf[n]);
836 1.1.4.2 skrll sc->sc_rx_mbuf[n] = NULL;
837 1.1.4.2 skrll bus_dmamap_unload(sc->sc_dmat, sc->sc_rx_map[n]);
838 1.1.4.2 skrll bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_map[n]);
839 1.1.4.2 skrll sc->sc_rx_map[n] = NULL;
840 1.1.4.2 skrll }
841 1.1.4.2 skrll for (n = 0 ; n < t ; n++) {
842 1.1.4.2 skrll bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_map[n]);
843 1.1.4.2 skrll sc->sc_tx_map[n] = NULL;
844 1.1.4.2 skrll }
845 1.1.4.2 skrll return(ENOBUFS);
846 1.1.4.2 skrll }
847 1.1.4.2 skrll
848 1.1.4.2 skrll (sc->sc_iee_reset)(sc);
849 1.1.4.2 skrll iee_cb_setup(sc, IEE_CB_CMD_IAS);
850 1.1.4.2 skrll sc->sc_cf[0] = IEE_CF_0_DEF | IEE_CF_0_PREF;
851 1.1.4.2 skrll sc->sc_cf[1] = IEE_CF_1_DEF;
852 1.1.4.2 skrll sc->sc_cf[2] = IEE_CF_2_DEF;
853 1.1.4.2 skrll sc->sc_cf[3] = IEE_CF_3_ADDRLEN_DEF | IEE_CF_3_NSAI
854 1.1.4.2 skrll | IEE_CF_3_PREAMLEN_DEF;
855 1.1.4.2 skrll sc->sc_cf[4] = IEE_CF_4_DEF;
856 1.1.4.2 skrll sc->sc_cf[5] = IEE_CF_5_DEF;
857 1.1.4.2 skrll sc->sc_cf[6] = IEE_CF_6_DEF;
858 1.1.4.2 skrll sc->sc_cf[7] = IEE_CF_7_DEF;
859 1.1.4.2 skrll sc->sc_cf[8] = IEE_CF_8_DEF;
860 1.1.4.2 skrll sc->sc_cf[9] = IEE_CF_9_DEF;
861 1.1.4.2 skrll sc->sc_cf[10] = IEE_CF_10_DEF;
862 1.1.4.2 skrll sc->sc_cf[11] = IEE_CF_11_DEF & ~IEE_CF_11_LNGFLD;
863 1.1.4.2 skrll sc->sc_cf[12] = IEE_CF_12_DEF;
864 1.1.4.2 skrll sc->sc_cf[13] = IEE_CF_13_DEF;
865 1.1.4.2 skrll iee_cb_setup(sc, IEE_CB_CMD_CONF | IEE_CB_S | IEE_CB_EL);
866 1.1.4.2 skrll SC_SCB->scb_rfa_addr = IEE_PHYS_SHMEM(IEE_RFD_OFF);
867 1.1.4.2 skrll bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, 0, IEE_SHMEM_MAX,
868 1.1.4.2 skrll BUS_DMASYNC_PREWRITE);
869 1.1.4.2 skrll (sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE | IEE_SCB_RUC_ST);
870 1.1.4.2 skrll /* Issue a Channel Attention to ACK interrupts we may have caused. */
871 1.1.4.2 skrll (sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
872 1.1.4.2 skrll
873 1.1.4.2 skrll /* Mark the interface as running and ready to RX/TX packets. */
874 1.1.4.2 skrll ifp->if_flags |= IFF_RUNNING;
875 1.1.4.2 skrll ifp->if_flags &= ~IFF_OACTIVE;
876 1.1.4.2 skrll return(0);
877 1.1.4.2 skrll }
878 1.1.4.2 skrll
879 1.1.4.2 skrll
880 1.1.4.2 skrll
881 1.1.4.2 skrll /* stop routine */
882 1.1.4.2 skrll void
883 1.1.4.2 skrll iee_stop(struct ifnet *ifp, int disable)
884 1.1.4.2 skrll {
885 1.1.4.2 skrll struct iee_softc *sc = ifp->if_softc;
886 1.1.4.2 skrll int n;
887 1.1.4.2 skrll
888 1.1.4.2 skrll ifp->if_flags &= ~IFF_RUNNING;
889 1.1.4.2 skrll ifp->if_flags |= IFF_OACTIVE;
890 1.1.4.2 skrll ifp->if_timer = 0;
891 1.1.4.2 skrll /* Reset the chip to get it quiet. */
892 1.1.4.2 skrll (sc->sc_iee_reset)(ifp->if_softc);
893 1.1.4.2 skrll /* Issue a Channel Attention to ACK interrupts we may have caused. */
894 1.1.4.2 skrll (sc->sc_iee_cmd)(ifp->if_softc, IEE_SCB_ACK);
895 1.1.4.2 skrll /* Release any dynamically allocated ressources. */
896 1.1.4.2 skrll for (n = 0 ; n < IEE_NCB ; n++) {
897 1.1.4.2 skrll if (sc->sc_tx_map[n] != NULL)
898 1.1.4.2 skrll bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_map[n]);
899 1.1.4.2 skrll sc->sc_tx_map[n] = NULL;
900 1.1.4.2 skrll }
901 1.1.4.2 skrll for (n = 0 ; n < IEE_NRFD ; n++) {
902 1.1.4.2 skrll if (sc->sc_rx_mbuf[n] != NULL)
903 1.1.4.2 skrll m_freem(sc->sc_rx_mbuf[n]);
904 1.1.4.2 skrll sc->sc_rx_mbuf[n] = NULL;
905 1.1.4.2 skrll if (sc->sc_rx_map[n] != NULL) {
906 1.1.4.2 skrll bus_dmamap_unload(sc->sc_dmat, sc->sc_rx_map[n]);
907 1.1.4.2 skrll bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_map[n]);
908 1.1.4.2 skrll }
909 1.1.4.2 skrll sc->sc_rx_map[n] = NULL;
910 1.1.4.2 skrll }
911 1.1.4.2 skrll return;
912 1.1.4.2 skrll }
913 1.1.4.2 skrll
914 1.1.4.2 skrll
915 1.1.4.2 skrll
916 1.1.4.2 skrll /* timer routine */
917 1.1.4.2 skrll void
918 1.1.4.2 skrll iee_watchdog(struct ifnet *ifp)
919 1.1.4.2 skrll {
920 1.1.4.2 skrll struct iee_softc *sc = ifp->if_softc;
921 1.1.4.2 skrll
922 1.1.4.2 skrll (sc->sc_iee_reset)(sc);
923 1.1.4.2 skrll if (sc->sc_next_tbd != 0)
924 1.1.4.2 skrll printf("%s: iee_watchdog: transmit timeout %d\n",
925 1.1.4.2 skrll sc->sc_dev.dv_xname, ++sc->sc_tx_timeout);
926 1.1.4.2 skrll else
927 1.1.4.2 skrll printf("%s: iee_watchdog: setup timeout %d\n",
928 1.1.4.2 skrll sc->sc_dev.dv_xname, ++sc->sc_setup_timeout);
929 1.1.4.2 skrll iee_init(ifp);
930 1.1.4.2 skrll return;
931 1.1.4.2 skrll }
932 1.1.4.2 skrll
933 1.1.4.2 skrll
934 1.1.4.2 skrll
935 1.1.4.2 skrll /* routine to release res. */
936 1.1.4.2 skrll void
937 1.1.4.2 skrll iee_drain(struct ifnet *ifp)
938 1.1.4.2 skrll {
939 1.1.4.2 skrll iee_stop(ifp, 0);
940 1.1.4.2 skrll return;
941 1.1.4.2 skrll }
942 1.1.4.2 skrll
943 1.1.4.2 skrll
944 1.1.4.2 skrll
945