i82596.c revision 1.1.4.8 1 1.1.4.8 skrll /* $NetBSD: i82596.c,v 1.1.4.8 2005/11/10 14:04:14 skrll Exp $ */
2 1.1.4.2 skrll
3 1.1.4.2 skrll /*
4 1.1.4.2 skrll * Copyright (c) 2003 Jochen Kunz.
5 1.1.4.2 skrll * All rights reserved.
6 1.1.4.2 skrll *
7 1.1.4.2 skrll * Redistribution and use in source and binary forms, with or without
8 1.1.4.2 skrll * modification, are permitted provided that the following conditions
9 1.1.4.2 skrll * are met:
10 1.1.4.2 skrll * 1. Redistributions of source code must retain the above copyright
11 1.1.4.2 skrll * notice, this list of conditions and the following disclaimer.
12 1.1.4.2 skrll * 2. Redistributions in binary form must reproduce the above copyright
13 1.1.4.2 skrll * notice, this list of conditions and the following disclaimer in the
14 1.1.4.2 skrll * documentation and/or other materials provided with the distribution.
15 1.1.4.2 skrll * 3. The name of Jochen Kunz may not be used to endorse or promote
16 1.1.4.2 skrll * products derived from this software without specific prior
17 1.1.4.2 skrll * written permission.
18 1.1.4.2 skrll *
19 1.1.4.2 skrll * THIS SOFTWARE IS PROVIDED BY JOCHEN KUNZ
20 1.1.4.2 skrll * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1.4.2 skrll * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1.4.2 skrll * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JOCHEN KUNZ
23 1.1.4.2 skrll * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1.4.2 skrll * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1.4.2 skrll * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1.4.2 skrll * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1.4.2 skrll * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1.4.2 skrll * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1.4.2 skrll * POSSIBILITY OF SUCH DAMAGE.
30 1.1.4.2 skrll */
31 1.1.4.2 skrll
32 1.1.4.2 skrll /*
33 1.1.4.7 skrll * Driver for the Intel i82596 10MBit/s Ethernet chip.
34 1.1.4.2 skrll * It operates the i82596 in 32-Bit Linear Mode, opposed to the old i82586
35 1.1.4.7 skrll * ie(4) driver (src/sys/dev/ic/i82586.c), that degrades the i82596 to
36 1.1.4.2 skrll * i82586 compatibility mode.
37 1.1.4.2 skrll * Documentation about this chip can be found on http://www.openpa.net/
38 1.1.4.2 skrll * file names 29021806.pdf and 29021906.pdf
39 1.1.4.2 skrll */
40 1.1.4.2 skrll
41 1.1.4.2 skrll #include <sys/cdefs.h>
42 1.1.4.8 skrll __KERNEL_RCSID(0, "$NetBSD: i82596.c,v 1.1.4.8 2005/11/10 14:04:14 skrll Exp $");
43 1.1.4.2 skrll
44 1.1.4.2 skrll /* autoconfig and device stuff */
45 1.1.4.2 skrll #include <sys/param.h>
46 1.1.4.2 skrll #include <sys/device.h>
47 1.1.4.2 skrll #include <sys/conf.h>
48 1.1.4.2 skrll #include "locators.h"
49 1.1.4.2 skrll #include "ioconf.h"
50 1.1.4.2 skrll
51 1.1.4.2 skrll /* bus_space / bus_dma etc. */
52 1.1.4.2 skrll #include <machine/bus.h>
53 1.1.4.2 skrll #include <machine/intr.h>
54 1.1.4.2 skrll
55 1.1.4.2 skrll /* general system data and functions */
56 1.1.4.2 skrll #include <sys/systm.h>
57 1.1.4.2 skrll #include <sys/ioctl.h>
58 1.1.4.2 skrll
59 1.1.4.2 skrll /* tsleep / sleep / wakeup */
60 1.1.4.2 skrll #include <sys/proc.h>
61 1.1.4.2 skrll /* hz for above */
62 1.1.4.2 skrll #include <sys/kernel.h>
63 1.1.4.2 skrll
64 1.1.4.2 skrll /* network stuff */
65 1.1.4.2 skrll #include <net/if.h>
66 1.1.4.2 skrll #include <net/if_dl.h>
67 1.1.4.2 skrll #include <net/if_media.h>
68 1.1.4.2 skrll #include <net/if_ether.h>
69 1.1.4.2 skrll #include <sys/socket.h>
70 1.1.4.2 skrll #include <sys/mbuf.h>
71 1.1.4.2 skrll
72 1.1.4.2 skrll #include "bpfilter.h"
73 1.1.4.7 skrll #if NBPFILTER > 0
74 1.1.4.2 skrll #include <net/bpf.h>
75 1.1.4.7 skrll #endif
76 1.1.4.2 skrll
77 1.1.4.2 skrll #include <dev/ic/i82596reg.h>
78 1.1.4.2 skrll #include <dev/ic/i82596var.h>
79 1.1.4.2 skrll
80 1.1.4.2 skrll
81 1.1.4.2 skrll
82 1.1.4.2 skrll /* Supported chip variants */
83 1.1.4.8 skrll const char *i82596_typenames[] = { "unknown", "DX/SX", "CA" };
84 1.1.4.2 skrll
85 1.1.4.2 skrll
86 1.1.4.2 skrll
87 1.1.4.2 skrll /* media change and status callback */
88 1.1.4.2 skrll static int iee_mediachange(struct ifnet *);
89 1.1.4.2 skrll static void iee_mediastatus(struct ifnet *, struct ifmediareq *);
90 1.1.4.2 skrll
91 1.1.4.2 skrll /* interface routines to upper protocols */
92 1.1.4.2 skrll static void iee_start(struct ifnet *); /* initiate output */
93 1.1.4.2 skrll static int iee_ioctl(struct ifnet *, u_long, caddr_t); /* ioctl routine */
94 1.1.4.2 skrll static int iee_init(struct ifnet *); /* init routine */
95 1.1.4.2 skrll static void iee_stop(struct ifnet *, int); /* stop routine */
96 1.1.4.2 skrll static void iee_watchdog(struct ifnet *); /* timer routine */
97 1.1.4.2 skrll static void iee_drain(struct ifnet *); /* release resources */
98 1.1.4.2 skrll
99 1.1.4.2 skrll /* internal helper functions */
100 1.1.4.7 skrll static void iee_cb_setup(struct iee_softc *, uint32_t);
101 1.1.4.2 skrll
102 1.1.4.2 skrll /*
103 1.1.4.2 skrll Things a MD frontend has to provide:
104 1.1.4.2 skrll
105 1.1.4.2 skrll The functions via function pointers in the softc:
106 1.1.4.7 skrll int (*sc_iee_cmd)(struct iee_softc *sc, uint32_t cmd);
107 1.1.4.2 skrll int (*sc_iee_reset)(struct iee_softc *sc);
108 1.1.4.2 skrll void (*sc_mediastatus)(struct ifnet *, struct ifmediareq *);
109 1.1.4.2 skrll int (*sc_mediachange)(struct ifnet *);
110 1.1.4.2 skrll
111 1.1.4.7 skrll sc_iee_cmd(): send a command to the i82596 by writing the cmd parameter
112 1.1.4.7 skrll to the SCP cmd word and issuing a Channel Attention.
113 1.1.4.7 skrll sc_iee_reset(): initiate a reset, supply the address of the SCP to the
114 1.1.4.7 skrll chip, wait for the chip to initialize and ACK interrupts that
115 1.1.4.7 skrll this may have caused by caling (sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
116 1.1.4.2 skrll This functions must carefully bus_dmamap_sync() all data they have touched!
117 1.1.4.2 skrll
118 1.1.4.2 skrll sc_mediastatus() and sc_mediachange() are just MD hooks to the according
119 1.1.4.7 skrll MI functions. The MD frontend may set this pointers to NULL when they
120 1.1.4.7 skrll are not needed.
121 1.1.4.2 skrll
122 1.1.4.2 skrll sc->sc_type has to be set to I82596_UNKNOWN or I82596_DX or I82596_CA.
123 1.1.4.7 skrll This is for printing out the correct chip type at attach time only. The
124 1.1.4.2 skrll MI backend doesn't distinguish different chip types when programming
125 1.1.4.7 skrll the chip.
126 1.1.4.2 skrll
127 1.1.4.7 skrll sc->sc_flags has to be set to 0 on litle endian hardware and to
128 1.1.4.2 skrll IEE_NEED_SWAP on big endian hardware, when endianes conversion is not
129 1.1.4.2 skrll done by the bus attachment. Usually you need to set IEE_NEED_SWAP
130 1.1.4.7 skrll when IEE_SYSBUS_BE is set in the sysbus byte.
131 1.1.4.2 skrll
132 1.1.4.7 skrll sc->sc_cl_align bust be set to 1 or to the cache line size. When set to
133 1.1.4.7 skrll 1 no special alignment of DMA descriptors is done. If sc->sc_cl_align != 1
134 1.1.4.7 skrll it forces alignment of the data structres in the shared memory to a multiple
135 1.1.4.7 skrll of sc->sc_cl_align. This is needed on archs like hp700 that have non DMA
136 1.1.4.7 skrll I/O coherent caches and are unable to map the shared memory uncachable.
137 1.1.4.2 skrll (At least pre PA7100LC CPUs are unable to map memory uncachable.)
138 1.1.4.2 skrll
139 1.1.4.2 skrll sc->sc_cl_align MUST BE INITIALIZED BEFORE THE FOLOWING MACROS ARE USED:
140 1.1.4.2 skrll SC_* IEE_*_SZ IEE_*_OFF IEE_SHMEM_MAX (shell style glob(3) pattern)
141 1.1.4.2 skrll
142 1.1.4.7 skrll The MD frontend has to allocate a piece of DMA memory at least of
143 1.1.4.2 skrll IEE_SHMEM_MAX bytes size. All communication with the chip is done via
144 1.1.4.7 skrll this shared memory. If possible map this memory non-cachable on
145 1.1.4.2 skrll archs with non DMA I/O coherent caches. The base of the memory needs
146 1.1.4.7 skrll to be aligend to an even address if sc->sc_cl_align == 1 and aligend
147 1.1.4.2 skrll to a cache line if sc->sc_cl_align != 1.
148 1.1.4.2 skrll
149 1.1.4.2 skrll An interrupt with iee_intr() as handler must be established.
150 1.1.4.2 skrll
151 1.1.4.7 skrll Call void iee_attach(struct iee_softc *sc, uint8_t *ether_address,
152 1.1.4.7 skrll int *media, int nmedia, int defmedia); when everything is set up. First
153 1.1.4.2 skrll parameter is a pointer to the MI softc, ether_address is an array that
154 1.1.4.7 skrll contains the ethernet address. media is an array of the media types
155 1.1.4.7 skrll provided by the hardware. The members of this array are supplied to
156 1.1.4.2 skrll ifmedia_add() in sequence. nmedia is the count of elements in media.
157 1.1.4.7 skrll defmedia is the default media that is set via ifmedia_set().
158 1.1.4.7 skrll nmedia and defmedia are ignored when media == NULL.
159 1.1.4.2 skrll
160 1.1.4.2 skrll The MD backend may call iee_detach() to detach the device.
161 1.1.4.2 skrll
162 1.1.4.2 skrll See sys/arch/hp700/gsc/if_iee.c for an example.
163 1.1.4.2 skrll */
164 1.1.4.2 skrll
165 1.1.4.2 skrll
166 1.1.4.2 skrll /*
167 1.1.4.2 skrll How frame reception is done:
168 1.1.4.2 skrll Each Recieve Frame Descriptor has one associated Recieve Buffer Descriptor.
169 1.1.4.7 skrll Each RBD points to the data area of a mbuf cluster. The RFDs are linked
170 1.1.4.7 skrll together in a circular list. sc->sc_rx_done is the count of RFDs in the
171 1.1.4.7 skrll list already processed / the number of the RFD that has to be checked for
172 1.1.4.7 skrll a new frame first at the next RX interrupt. Upon successful reception of
173 1.1.4.7 skrll a frame the mbuf cluster is handled to upper protocol layers, a new mbuf
174 1.1.4.2 skrll cluster is allocated and the RFD / RBD are reinitialized accordingly.
175 1.1.4.2 skrll
176 1.1.4.2 skrll When a RFD list overrun occured the whole RFD and RBD lists are reinitialized
177 1.1.4.2 skrll and frame reception is started again.
178 1.1.4.2 skrll */
179 1.1.4.2 skrll int
180 1.1.4.2 skrll iee_intr(void *intarg)
181 1.1.4.2 skrll {
182 1.1.4.2 skrll struct iee_softc *sc = intarg;
183 1.1.4.2 skrll struct ifnet *ifp = &sc->sc_ethercom.ec_if;
184 1.1.4.2 skrll struct iee_rfd *rfd;
185 1.1.4.2 skrll struct iee_rbd *rbd;
186 1.1.4.2 skrll bus_dmamap_t rx_map;
187 1.1.4.2 skrll struct mbuf *rx_mbuf;
188 1.1.4.2 skrll struct mbuf *new_mbuf;
189 1.1.4.2 skrll int scb_status;
190 1.1.4.2 skrll int scb_cmd;
191 1.1.4.7 skrll int n, col;
192 1.1.4.2 skrll
193 1.1.4.2 skrll if ((ifp->if_flags & IFF_RUNNING) == 0) {
194 1.1.4.2 skrll (sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
195 1.1.4.2 skrll return(1);
196 1.1.4.2 skrll }
197 1.1.4.2 skrll bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, 0, IEE_SHMEM_MAX,
198 1.1.4.2 skrll BUS_DMASYNC_POSTREAD);
199 1.1.4.2 skrll scb_status = SC_SCB->scb_status;
200 1.1.4.2 skrll scb_cmd = SC_SCB->scb_cmd;
201 1.1.4.2 skrll rfd = SC_RFD(sc->sc_rx_done);
202 1.1.4.3 skrll while ((rfd->rfd_status & IEE_RFD_C) != 0) {
203 1.1.4.2 skrll /* At least one packet was received. */
204 1.1.4.2 skrll rbd = SC_RBD(sc->sc_rx_done);
205 1.1.4.2 skrll rx_map = sc->sc_rx_map[sc->sc_rx_done];
206 1.1.4.2 skrll rx_mbuf = sc->sc_rx_mbuf[sc->sc_rx_done];
207 1.1.4.2 skrll SC_RBD((sc->sc_rx_done + IEE_NRFD - 1) % IEE_NRFD)->rbd_size
208 1.1.4.2 skrll &= ~IEE_RBD_EL;
209 1.1.4.2 skrll if ((rfd->rfd_status & IEE_RFD_OK) == 0
210 1.1.4.2 skrll || (rbd->rbd_count & IEE_RBD_EOF) == 0
211 1.1.4.2 skrll || (rbd->rbd_count & IEE_RBD_F) == 0){
212 1.1.4.2 skrll /* Receive error, skip frame and reuse buffer. */
213 1.1.4.2 skrll rfd->rfd_status = 0;
214 1.1.4.2 skrll rbd->rbd_count = 0;
215 1.1.4.2 skrll rbd->rbd_size = IEE_RBD_EL | rx_map->dm_segs[0].ds_len;
216 1.1.4.2 skrll printf("%s: iee_intr: receive error %d, rfd_status="
217 1.1.4.7 skrll "0x%.4x, rfd_count=0x%.4x\n", sc->sc_dev.dv_xname,
218 1.1.4.2 skrll ++sc->sc_rx_err, rfd->rfd_status, rbd->rbd_count);
219 1.1.4.2 skrll sc->sc_rx_done = (sc->sc_rx_done + 1) % IEE_NRFD;
220 1.1.4.2 skrll continue;
221 1.1.4.2 skrll }
222 1.1.4.2 skrll rfd->rfd_status = 0;
223 1.1.4.2 skrll bus_dmamap_sync(sc->sc_dmat, rx_map, 0, rx_mbuf->m_ext.ext_size,
224 1.1.4.2 skrll BUS_DMASYNC_POSTREAD);
225 1.1.4.7 skrll rx_mbuf->m_pkthdr.len = rx_mbuf->m_len =
226 1.1.4.2 skrll rbd->rbd_count & IEE_RBD_COUNT;
227 1.1.4.2 skrll rx_mbuf->m_pkthdr.rcvif = ifp;
228 1.1.4.2 skrll MGETHDR(new_mbuf, M_DONTWAIT, MT_DATA);
229 1.1.4.2 skrll if (new_mbuf == NULL) {
230 1.1.4.2 skrll printf("%s: iee_intr: can't allocate mbuf\n",
231 1.1.4.2 skrll sc->sc_dev.dv_xname);
232 1.1.4.2 skrll break;
233 1.1.4.2 skrll }
234 1.1.4.2 skrll MCLAIM(new_mbuf, &sc->sc_ethercom.ec_rx_mowner);
235 1.1.4.2 skrll MCLGET(new_mbuf, M_DONTWAIT);
236 1.1.4.2 skrll if ((new_mbuf->m_flags & M_EXT) == 0) {
237 1.1.4.7 skrll printf("%s: iee_intr: can't alloc mbuf cluster\n",
238 1.1.4.2 skrll sc->sc_dev.dv_xname);
239 1.1.4.2 skrll m_freem(new_mbuf);
240 1.1.4.2 skrll break;
241 1.1.4.2 skrll }
242 1.1.4.2 skrll bus_dmamap_unload(sc->sc_dmat, rx_map);
243 1.1.4.7 skrll if (bus_dmamap_load(sc->sc_dmat, rx_map,
244 1.1.4.7 skrll new_mbuf->m_ext.ext_buf, new_mbuf->m_ext.ext_size,
245 1.1.4.2 skrll NULL, BUS_DMA_READ | BUS_DMA_NOWAIT) != 0)
246 1.1.4.2 skrll panic("%s: iee_intr: can't load RX DMA map\n",
247 1.1.4.2 skrll sc->sc_dev.dv_xname);
248 1.1.4.2 skrll bus_dmamap_sync(sc->sc_dmat, rx_map, 0,
249 1.1.4.2 skrll new_mbuf->m_ext.ext_size, BUS_DMASYNC_PREREAD);
250 1.1.4.2 skrll #if NBPFILTER > 0
251 1.1.4.2 skrll if (ifp->if_bpf != 0)
252 1.1.4.2 skrll bpf_mtap(ifp->if_bpf, rx_mbuf);
253 1.1.4.2 skrll #endif /* NBPFILTER > 0 */
254 1.1.4.2 skrll (*ifp->if_input)(ifp, rx_mbuf);
255 1.1.4.2 skrll ifp->if_ipackets++;
256 1.1.4.2 skrll sc->sc_rx_mbuf[sc->sc_rx_done] = new_mbuf;
257 1.1.4.2 skrll rbd->rbd_count = 0;
258 1.1.4.2 skrll rbd->rbd_size = IEE_RBD_EL | rx_map->dm_segs[0].ds_len;
259 1.1.4.2 skrll rbd->rbd_rb_addr = rx_map->dm_segs[0].ds_addr;
260 1.1.4.2 skrll sc->sc_rx_done = (sc->sc_rx_done + 1) % IEE_NRFD;
261 1.1.4.2 skrll rfd = SC_RFD(sc->sc_rx_done);
262 1.1.4.2 skrll }
263 1.1.4.2 skrll if ((scb_status & IEE_SCB_RUS) == IEE_SCB_RUS_NR1
264 1.1.4.2 skrll || (scb_status & IEE_SCB_RUS) == IEE_SCB_RUS_NR2
265 1.1.4.2 skrll || (scb_status & IEE_SCB_RUS) == IEE_SCB_RUS_NR3) {
266 1.1.4.2 skrll /* Receive Overrun, reinit receive ring buffer. */
267 1.1.4.2 skrll for (n = 0 ; n < IEE_NRFD ; n++) {
268 1.1.4.2 skrll SC_RFD(n)->rfd_cmd = IEE_RFD_SF;
269 1.1.4.2 skrll SC_RFD(n)->rfd_link_addr = IEE_PHYS_SHMEM(IEE_RFD_OFF
270 1.1.4.2 skrll + IEE_RFD_SZ * ((n + 1) % IEE_NRFD));
271 1.1.4.7 skrll SC_RBD(n)->rbd_next_rbd = IEE_PHYS_SHMEM(IEE_RBD_OFF
272 1.1.4.2 skrll + IEE_RBD_SZ * ((n + 1) % IEE_NRFD));
273 1.1.4.7 skrll SC_RBD(n)->rbd_size = IEE_RBD_EL |
274 1.1.4.2 skrll sc->sc_rx_map[n]->dm_segs[0].ds_len;
275 1.1.4.7 skrll SC_RBD(n)->rbd_rb_addr =
276 1.1.4.2 skrll sc->sc_rx_map[n]->dm_segs[0].ds_addr;
277 1.1.4.2 skrll }
278 1.1.4.2 skrll SC_RFD(0)->rfd_rbd_addr = IEE_PHYS_SHMEM(IEE_RBD_OFF);
279 1.1.4.2 skrll sc->sc_rx_done = 0;
280 1.1.4.7 skrll bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, IEE_RFD_OFF,
281 1.1.4.2 skrll IEE_RFD_LIST_SZ + IEE_RBD_LIST_SZ, BUS_DMASYNC_PREWRITE);
282 1.1.4.2 skrll (sc->sc_iee_cmd)(sc, IEE_SCB_RUC_ST);
283 1.1.4.7 skrll printf("%s: iee_intr: receive ring buffer overrun\n",
284 1.1.4.2 skrll sc->sc_dev.dv_xname);
285 1.1.4.3 skrll }
286 1.1.4.2 skrll
287 1.1.4.7 skrll if (sc->sc_next_cb != 0
288 1.1.4.3 skrll && (SC_CB(sc->sc_next_cb - 1)->cb_status & IEE_CB_C) != 0) {
289 1.1.4.2 skrll /* CMD list finished */
290 1.1.4.2 skrll ifp->if_timer = 0;
291 1.1.4.2 skrll if (sc->sc_next_tbd != 0) {
292 1.1.4.2 skrll /* A TX CMD list finished, clenup */
293 1.1.4.2 skrll for (n = 0 ; n < sc->sc_next_cb ; n++) {
294 1.1.4.2 skrll m_freem(sc->sc_tx_mbuf[n]);
295 1.1.4.2 skrll sc->sc_tx_mbuf[n] = NULL;
296 1.1.4.2 skrll bus_dmamap_unload(sc->sc_dmat,sc->sc_tx_map[n]);
297 1.1.4.7 skrll if ((SC_CB(n)->cb_status & IEE_CB_COL) != 0 &&
298 1.1.4.2 skrll (SC_CB(n)->cb_status & IEE_CB_MAXCOL) == 0)
299 1.1.4.7 skrll col = 16;
300 1.1.4.2 skrll else
301 1.1.4.7 skrll col = SC_CB(n)->cb_status
302 1.1.4.2 skrll & IEE_CB_MAXCOL;
303 1.1.4.7 skrll sc->sc_tx_col += col;
304 1.1.4.7 skrll if ((SC_CB(n)->cb_status & IEE_CB_OK) != 0) {
305 1.1.4.7 skrll ifp->if_opackets++;
306 1.1.4.7 skrll ifp->if_collisions += col;
307 1.1.4.7 skrll }
308 1.1.4.2 skrll }
309 1.1.4.2 skrll sc->sc_next_tbd = 0;
310 1.1.4.2 skrll ifp->if_flags &= ~IFF_OACTIVE;
311 1.1.4.2 skrll }
312 1.1.4.2 skrll for (n = 0 ; n < sc->sc_next_cb ; n++) {
313 1.1.4.2 skrll /* Check if a CMD failed, but ignore TX errors. */
314 1.1.4.2 skrll if ((SC_CB(n)->cb_cmd & IEE_CB_CMD) != IEE_CB_CMD_TR
315 1.1.4.3 skrll && ((SC_CB(n)->cb_status & IEE_CB_OK) == 0))
316 1.1.4.7 skrll printf("%s: iee_intr: scb_status=0x%x "
317 1.1.4.2 skrll "scb_cmd=0x%x failed command %d: "
318 1.1.4.7 skrll "cb_status[%d]=0x%.4x cb_cmd[%d]=0x%.4x\n",
319 1.1.4.7 skrll sc->sc_dev.dv_xname, scb_status, scb_cmd,
320 1.1.4.2 skrll ++sc->sc_cmd_err, n, SC_CB(n)->cb_status,
321 1.1.4.2 skrll n, SC_CB(n)->cb_cmd);
322 1.1.4.2 skrll }
323 1.1.4.2 skrll sc->sc_next_cb = 0;
324 1.1.4.2 skrll if ((sc->sc_flags & IEE_WANT_MCAST) != 0) {
325 1.1.4.7 skrll iee_cb_setup(sc, IEE_CB_CMD_MCS | IEE_CB_S | IEE_CB_EL
326 1.1.4.2 skrll | IEE_CB_I);
327 1.1.4.2 skrll (sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE);
328 1.1.4.2 skrll } else
329 1.1.4.2 skrll /* Try to get defered packets going. */
330 1.1.4.2 skrll iee_start(ifp);
331 1.1.4.2 skrll }
332 1.1.4.2 skrll if (IEE_SWAP(SC_SCB->scb_crc_err) != sc->sc_crc_err) {
333 1.1.4.2 skrll sc->sc_crc_err = IEE_SWAP(SC_SCB->scb_crc_err);
334 1.1.4.7 skrll printf("%s: iee_intr: crc_err=%d\n", sc->sc_dev.dv_xname,
335 1.1.4.2 skrll sc->sc_crc_err);
336 1.1.4.2 skrll }
337 1.1.4.2 skrll if (IEE_SWAP(SC_SCB->scb_align_err) != sc->sc_align_err) {
338 1.1.4.2 skrll sc->sc_align_err = IEE_SWAP(SC_SCB->scb_align_err);
339 1.1.4.7 skrll printf("%s: iee_intr: align_err=%d\n", sc->sc_dev.dv_xname,
340 1.1.4.2 skrll sc->sc_align_err);
341 1.1.4.2 skrll }
342 1.1.4.2 skrll if (IEE_SWAP(SC_SCB->scb_resource_err) != sc->sc_resource_err) {
343 1.1.4.2 skrll sc->sc_resource_err = IEE_SWAP(SC_SCB->scb_resource_err);
344 1.1.4.7 skrll printf("%s: iee_intr: resource_err=%d\n", sc->sc_dev.dv_xname,
345 1.1.4.2 skrll sc->sc_resource_err);
346 1.1.4.2 skrll }
347 1.1.4.2 skrll if (IEE_SWAP(SC_SCB->scb_overrun_err) != sc->sc_overrun_err) {
348 1.1.4.2 skrll sc->sc_overrun_err = IEE_SWAP(SC_SCB->scb_overrun_err);
349 1.1.4.7 skrll printf("%s: iee_intr: overrun_err=%d\n", sc->sc_dev.dv_xname,
350 1.1.4.2 skrll sc->sc_overrun_err);
351 1.1.4.2 skrll }
352 1.1.4.2 skrll if (IEE_SWAP(SC_SCB->scb_rcvcdt_err) != sc->sc_rcvcdt_err) {
353 1.1.4.2 skrll sc->sc_rcvcdt_err = IEE_SWAP(SC_SCB->scb_rcvcdt_err);
354 1.1.4.7 skrll printf("%s: iee_intr: rcvcdt_err=%d\n", sc->sc_dev.dv_xname,
355 1.1.4.2 skrll sc->sc_rcvcdt_err);
356 1.1.4.2 skrll }
357 1.1.4.2 skrll if (IEE_SWAP(SC_SCB->scb_short_fr_err) != sc->sc_short_fr_err) {
358 1.1.4.2 skrll sc->sc_short_fr_err = IEE_SWAP(SC_SCB->scb_short_fr_err);
359 1.1.4.7 skrll printf("%s: iee_intr: short_fr_err=%d\n", sc->sc_dev.dv_xname,
360 1.1.4.2 skrll sc->sc_short_fr_err);
361 1.1.4.2 skrll }
362 1.1.4.7 skrll bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, 0, IEE_SHMEM_MAX,
363 1.1.4.3 skrll BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
364 1.1.4.2 skrll (sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
365 1.1.4.2 skrll return(1);
366 1.1.4.2 skrll }
367 1.1.4.2 skrll
368 1.1.4.2 skrll
369 1.1.4.2 skrll
370 1.1.4.2 skrll /*
371 1.1.4.2 skrll How Command Block List Processing is done.
372 1.1.4.2 skrll
373 1.1.4.7 skrll A runing CBL is never manipulated. If there is a CBL already runing,
374 1.1.4.7 skrll further CMDs are deferd until the current list is done. A new list is
375 1.1.4.7 skrll setup when the old has finished.
376 1.1.4.2 skrll This eases programming. To manipulate a runing CBL it is neccesary to
377 1.1.4.2 skrll suspend the Command Unit to avoid race conditions. After a suspend
378 1.1.4.7 skrll is sent we have to wait for an interrupt that ACKs the suspend. Then
379 1.1.4.7 skrll we can manipulate the CBL and resume operation. I am not sure that this
380 1.1.4.2 skrll is more effective then the current, much simpler approach. => KISS
381 1.1.4.2 skrll See i82596CA data sheet page 26.
382 1.1.4.2 skrll
383 1.1.4.2 skrll A CBL is runing or on the way to be set up when (sc->sc_next_cb != 0).
384 1.1.4.2 skrll
385 1.1.4.2 skrll A CBL may consist of TX CMDs, and _only_ TX CMDs.
386 1.1.4.7 skrll A TX CBL is runing or on the way to be set up when
387 1.1.4.2 skrll ((sc->sc_next_cb != 0) && (sc->sc_next_tbd != 0)).
388 1.1.4.2 skrll
389 1.1.4.7 skrll A CBL may consist of other non-TX CMDs like IAS or CONF, and _only_
390 1.1.4.2 skrll non-TX CMDs.
391 1.1.4.2 skrll
392 1.1.4.7 skrll This comes mostly through the way how an Ethernet driver works and
393 1.1.4.7 skrll because runing CBLs are not manipulated when they are on the way. If
394 1.1.4.7 skrll if_start() is called there will be TX CMDs enqueued so we have a runing
395 1.1.4.7 skrll CBL and other CMDs from e.g. if_ioctl() will be deferd and vice versa.
396 1.1.4.2 skrll
397 1.1.4.2 skrll The Multicast Setup Command is special. A MCS needs more space then
398 1.1.4.2 skrll a single CB has. Actual space requiement depends on the length of the
399 1.1.4.2 skrll multicast list. So we allways defer MCS until other CBLs are finished,
400 1.1.4.7 skrll then we setup a CONF CMD in the first CB. The CONF CMD is needed to
401 1.1.4.2 skrll turn ALLMULTI on the hardware on or off. The MCS is the 2nd CB and may
402 1.1.4.7 skrll use all the remaining space in the CBL and the Transmit Buffer Descriptor
403 1.1.4.2 skrll List. (Therefore CBL and TBDL must be continious in pysical and virtual
404 1.1.4.2 skrll memory. This is guaranteed through the definitions of the list offsets
405 1.1.4.2 skrll in i82596reg.h and because it is only a single DMA segment used for all
406 1.1.4.2 skrll lists.) When ALLMULTI is enabled via the CONF CMD, the MCS is run with
407 1.1.4.2 skrll a multicast list length of 0, thus disabling the multicast filter.
408 1.1.4.2 skrll A defered MCS is signaled via ((sc->sc_flags & IEE_WANT_MCAST) != 0)
409 1.1.4.2 skrll */
410 1.1.4.2 skrll void
411 1.1.4.7 skrll iee_cb_setup(struct iee_softc *sc, uint32_t cmd)
412 1.1.4.2 skrll {
413 1.1.4.2 skrll struct iee_cb *cb = SC_CB(sc->sc_next_cb);
414 1.1.4.2 skrll struct ifnet *ifp = &sc->sc_ethercom.ec_if;
415 1.1.4.2 skrll struct ether_multistep step;
416 1.1.4.2 skrll struct ether_multi *enm;
417 1.1.4.2 skrll
418 1.1.4.2 skrll memset(cb, 0, IEE_CB_SZ);
419 1.1.4.2 skrll cb->cb_cmd = cmd;
420 1.1.4.2 skrll switch(cmd & IEE_CB_CMD) {
421 1.1.4.2 skrll case IEE_CB_CMD_NOP: /* NOP CMD */
422 1.1.4.2 skrll break;
423 1.1.4.2 skrll case IEE_CB_CMD_IAS: /* Individual Address Setup */
424 1.1.4.8 skrll memcpy(__UNVOLATILE(cb->cb_ind_addr), LLADDR(ifp->if_sadl),
425 1.1.4.2 skrll ETHER_ADDR_LEN);
426 1.1.4.2 skrll break;
427 1.1.4.2 skrll case IEE_CB_CMD_CONF: /* Configure */
428 1.1.4.8 skrll memcpy(__UNVOLATILE(cb->cb_cf), sc->sc_cf, sc->sc_cf[0]
429 1.1.4.2 skrll & IEE_CF_0_CNT_M);
430 1.1.4.2 skrll break;
431 1.1.4.2 skrll case IEE_CB_CMD_MCS: /* Multicast Setup */
432 1.1.4.2 skrll if (sc->sc_next_cb != 0) {
433 1.1.4.2 skrll sc->sc_flags |= IEE_WANT_MCAST;
434 1.1.4.2 skrll return;
435 1.1.4.2 skrll }
436 1.1.4.2 skrll sc->sc_flags &= ~IEE_WANT_MCAST;
437 1.1.4.2 skrll if ((sc->sc_cf[8] & IEE_CF_8_PRM) != 0) {
438 1.1.4.2 skrll /* Need no multicast filter in promisc mode. */
439 1.1.4.7 skrll iee_cb_setup(sc, IEE_CB_CMD_CONF | IEE_CB_S | IEE_CB_EL
440 1.1.4.2 skrll | IEE_CB_I);
441 1.1.4.2 skrll return;
442 1.1.4.2 skrll }
443 1.1.4.2 skrll /* Leave room for a CONF CMD to en/dis-able ALLMULTI mode */
444 1.1.4.2 skrll cb = SC_CB(sc->sc_next_cb + 1);
445 1.1.4.2 skrll cb->cb_cmd = cmd;
446 1.1.4.2 skrll cb->cb_mcast.mc_size = 0;
447 1.1.4.2 skrll ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
448 1.1.4.2 skrll while (enm != NULL) {
449 1.1.4.7 skrll if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
450 1.1.4.7 skrll ETHER_ADDR_LEN) != 0 || cb->cb_mcast.mc_size
451 1.1.4.2 skrll * ETHER_ADDR_LEN + 2 * IEE_CB_SZ
452 1.1.4.2 skrll > IEE_CB_LIST_SZ + IEE_TBD_LIST_SZ) {
453 1.1.4.2 skrll cb->cb_mcast.mc_size = 0;
454 1.1.4.2 skrll break;
455 1.1.4.2 skrll }
456 1.1.4.8 skrll memcpy(__UNVOLATILE(&cb->cb_mcast.mc_addrs[
457 1.1.4.8 skrll cb->cb_mcast.mc_size * ETHER_ADDR_LEN]),
458 1.1.4.2 skrll enm->enm_addrlo, ETHER_ADDR_LEN);
459 1.1.4.2 skrll ETHER_NEXT_MULTI(step, enm);
460 1.1.4.2 skrll cb->cb_mcast.mc_size++;
461 1.1.4.2 skrll }
462 1.1.4.2 skrll if (cb->cb_mcast.mc_size == 0) {
463 1.1.4.2 skrll /* Can't do exact mcast filtering, do ALLMULTI mode. */
464 1.1.4.2 skrll ifp->if_flags |= IFF_ALLMULTI;
465 1.1.4.2 skrll sc->sc_cf[11] &= ~IEE_CF_11_MCALL;
466 1.1.4.2 skrll } else {
467 1.1.4.2 skrll /* disable ALLMULTI and load mcast list */
468 1.1.4.2 skrll ifp->if_flags &= ~IFF_ALLMULTI;
469 1.1.4.2 skrll sc->sc_cf[11] |= IEE_CF_11_MCALL;
470 1.1.4.2 skrll /* Mcast setup may need more then IEE_CB_SZ bytes. */
471 1.1.4.7 skrll bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map,
472 1.1.4.7 skrll IEE_CB_OFF, IEE_CB_LIST_SZ + IEE_TBD_LIST_SZ,
473 1.1.4.2 skrll BUS_DMASYNC_PREWRITE);
474 1.1.4.2 skrll }
475 1.1.4.2 skrll iee_cb_setup(sc, IEE_CB_CMD_CONF);
476 1.1.4.2 skrll break;
477 1.1.4.2 skrll case IEE_CB_CMD_TR: /* Transmit */
478 1.1.4.2 skrll cb->cb_transmit.tx_tbd_addr = IEE_PHYS_SHMEM(IEE_TBD_OFF
479 1.1.4.2 skrll + IEE_TBD_SZ * sc->sc_next_tbd);
480 1.1.4.2 skrll cb->cb_cmd |= IEE_CB_SF; /* Allways use Flexible Mode. */
481 1.1.4.2 skrll break;
482 1.1.4.2 skrll case IEE_CB_CMD_TDR: /* Time Domain Reflectometry */
483 1.1.4.2 skrll break;
484 1.1.4.2 skrll case IEE_CB_CMD_DUMP: /* Dump */
485 1.1.4.2 skrll break;
486 1.1.4.2 skrll case IEE_CB_CMD_DIAG: /* Diagnose */
487 1.1.4.2 skrll break;
488 1.1.4.2 skrll default:
489 1.1.4.2 skrll /* can't happen */
490 1.1.4.2 skrll break;
491 1.1.4.2 skrll }
492 1.1.4.7 skrll cb->cb_link_addr = IEE_PHYS_SHMEM(IEE_CB_OFF + IEE_CB_SZ *
493 1.1.4.2 skrll (sc->sc_next_cb + 1));
494 1.1.4.7 skrll bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, IEE_CB_OFF
495 1.1.4.2 skrll + IEE_CB_SZ * sc->sc_next_cb, IEE_CB_SZ, BUS_DMASYNC_PREWRITE);
496 1.1.4.2 skrll sc->sc_next_cb++;
497 1.1.4.2 skrll ifp->if_timer = 5;
498 1.1.4.2 skrll return;
499 1.1.4.2 skrll }
500 1.1.4.2 skrll
501 1.1.4.2 skrll
502 1.1.4.2 skrll
503 1.1.4.2 skrll void
504 1.1.4.7 skrll iee_attach(struct iee_softc *sc, uint8_t *eth_addr, int *media, int nmedia,
505 1.1.4.2 skrll int defmedia)
506 1.1.4.2 skrll {
507 1.1.4.2 skrll struct ifnet *ifp = &sc->sc_ethercom.ec_if;
508 1.1.4.2 skrll int n;
509 1.1.4.2 skrll
510 1.1.4.2 skrll /* Set pointer to Intermediate System Configuration Pointer. */
511 1.1.4.2 skrll /* Phys. addr. in big endian order. (Big endian as defined by Intel.) */
512 1.1.4.2 skrll SC_SCP->scp_iscp_addr = IEE_SWAP(IEE_PHYS_SHMEM(IEE_ISCP_OFF));
513 1.1.4.2 skrll /* Set pointer to System Control Block. */
514 1.1.4.2 skrll /* Phys. addr. in big endian order. (Big endian as defined by Intel.) */
515 1.1.4.2 skrll SC_ISCP->iscp_scb_addr = IEE_SWAP(IEE_PHYS_SHMEM(IEE_SCB_OFF));
516 1.1.4.2 skrll /* Set pointer to Receive Frame Area. (physical address) */
517 1.1.4.2 skrll SC_SCB->scb_rfa_addr = IEE_PHYS_SHMEM(IEE_RFD_OFF);
518 1.1.4.2 skrll /* Set pointer to Command Block. (physical address) */
519 1.1.4.2 skrll SC_SCB->scb_cmd_blk_addr = IEE_PHYS_SHMEM(IEE_CB_OFF);
520 1.1.4.2 skrll
521 1.1.4.2 skrll ifmedia_init(&sc->sc_ifmedia, 0, iee_mediachange, iee_mediastatus);
522 1.1.4.2 skrll if (media != NULL) {
523 1.1.4.2 skrll for (n = 0 ; n < nmedia ; n++)
524 1.1.4.2 skrll ifmedia_add(&sc->sc_ifmedia, media[n], 0, NULL);
525 1.1.4.2 skrll ifmedia_set(&sc->sc_ifmedia, defmedia);
526 1.1.4.2 skrll } else {
527 1.1.4.2 skrll ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_NONE, 0, NULL);
528 1.1.4.2 skrll ifmedia_set(&sc->sc_ifmedia, IFM_ETHER | IFM_NONE);
529 1.1.4.2 skrll }
530 1.1.4.2 skrll
531 1.1.4.2 skrll ifp->if_softc = sc;
532 1.1.4.2 skrll strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
533 1.1.4.2 skrll ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
534 1.1.4.2 skrll ifp->if_start = iee_start; /* initiate output routine */
535 1.1.4.2 skrll ifp->if_ioctl = iee_ioctl; /* ioctl routine */
536 1.1.4.2 skrll ifp->if_init = iee_init; /* init routine */
537 1.1.4.2 skrll ifp->if_stop = iee_stop; /* stop routine */
538 1.1.4.2 skrll ifp->if_watchdog = iee_watchdog; /* timer routine */
539 1.1.4.2 skrll ifp->if_drain = iee_drain; /* routine to release resources */
540 1.1.4.2 skrll IFQ_SET_READY(&ifp->if_snd);
541 1.1.4.2 skrll /* iee supports IEEE 802.1Q Virtual LANs, see vlan(4). */
542 1.1.4.2 skrll sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
543 1.1.4.2 skrll
544 1.1.4.2 skrll if_attach(ifp);
545 1.1.4.2 skrll ether_ifattach(ifp, eth_addr);
546 1.1.4.2 skrll
547 1.1.4.2 skrll aprint_normal(": Intel 82596%s address %s\n",
548 1.1.4.2 skrll i82596_typenames[ sc->sc_type], ether_sprintf(eth_addr));
549 1.1.4.2 skrll
550 1.1.4.2 skrll for (n = 0 ; n < IEE_NCB ; n++)
551 1.1.4.2 skrll sc->sc_tx_map[n] = NULL;
552 1.1.4.2 skrll for (n = 0 ; n < IEE_NRFD ; n++) {
553 1.1.4.2 skrll sc->sc_rx_mbuf[n] = NULL;
554 1.1.4.2 skrll sc->sc_rx_map[n] = NULL;
555 1.1.4.2 skrll }
556 1.1.4.2 skrll sc->sc_tx_timeout = 0;
557 1.1.4.2 skrll sc->sc_setup_timeout = 0;
558 1.1.4.2 skrll (sc->sc_iee_reset)(sc);
559 1.1.4.2 skrll return;
560 1.1.4.2 skrll }
561 1.1.4.2 skrll
562 1.1.4.2 skrll
563 1.1.4.2 skrll
564 1.1.4.2 skrll void
565 1.1.4.2 skrll iee_detach(struct iee_softc *sc, int flags)
566 1.1.4.2 skrll {
567 1.1.4.2 skrll struct ifnet *ifp = &sc->sc_ethercom.ec_if;
568 1.1.4.2 skrll
569 1.1.4.2 skrll if ((ifp->if_flags & IFF_RUNNING) != 0)
570 1.1.4.2 skrll iee_stop(ifp, 1);
571 1.1.4.2 skrll ether_ifdetach(ifp);
572 1.1.4.2 skrll if_detach(ifp);
573 1.1.4.2 skrll return;
574 1.1.4.2 skrll }
575 1.1.4.2 skrll
576 1.1.4.2 skrll
577 1.1.4.2 skrll
578 1.1.4.2 skrll /* media change and status callback */
579 1.1.4.2 skrll int
580 1.1.4.2 skrll iee_mediachange(struct ifnet *ifp)
581 1.1.4.2 skrll {
582 1.1.4.2 skrll struct iee_softc *sc = ifp->if_softc;
583 1.1.4.7 skrll
584 1.1.4.2 skrll if (sc->sc_mediachange != NULL)
585 1.1.4.2 skrll return ((sc->sc_mediachange)(ifp));
586 1.1.4.2 skrll return(0);
587 1.1.4.2 skrll }
588 1.1.4.2 skrll
589 1.1.4.2 skrll
590 1.1.4.2 skrll
591 1.1.4.2 skrll void
592 1.1.4.2 skrll iee_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmreq)
593 1.1.4.2 skrll {
594 1.1.4.2 skrll struct iee_softc *sc = ifp->if_softc;
595 1.1.4.2 skrll
596 1.1.4.2 skrll if (sc->sc_mediastatus != NULL)
597 1.1.4.2 skrll return ((sc->sc_mediastatus)(ifp, ifmreq));
598 1.1.4.2 skrll return;
599 1.1.4.2 skrll }
600 1.1.4.2 skrll
601 1.1.4.2 skrll
602 1.1.4.2 skrll
603 1.1.4.2 skrll /* initiate output routine */
604 1.1.4.2 skrll void
605 1.1.4.2 skrll iee_start(struct ifnet *ifp)
606 1.1.4.2 skrll {
607 1.1.4.2 skrll struct iee_softc *sc = ifp->if_softc;
608 1.1.4.2 skrll struct mbuf *m = NULL;
609 1.1.4.2 skrll int t;
610 1.1.4.2 skrll int n;
611 1.1.4.2 skrll
612 1.1.4.2 skrll if (sc->sc_next_cb != 0)
613 1.1.4.2 skrll /* There is already a CMD runing. Defer packet enqueueing. */
614 1.1.4.2 skrll return;
615 1.1.4.2 skrll for (t = 0 ; t < IEE_NCB ; t++) {
616 1.1.4.2 skrll IFQ_DEQUEUE(&ifp->if_snd, sc->sc_tx_mbuf[t]);
617 1.1.4.2 skrll if (sc->sc_tx_mbuf[t] == NULL)
618 1.1.4.2 skrll break;
619 1.1.4.2 skrll if (bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_tx_map[t],
620 1.1.4.2 skrll sc->sc_tx_mbuf[t], BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
621 1.1.4.2 skrll /*
622 1.1.4.7 skrll * The packet needs more TBD then we support.
623 1.1.4.7 skrll * Copy the packet into a mbuf cluster to get it out.
624 1.1.4.2 skrll */
625 1.1.4.7 skrll printf("%s: iee_start: failed to load DMA map\n",
626 1.1.4.2 skrll sc->sc_dev.dv_xname);
627 1.1.4.2 skrll MGETHDR(m, M_DONTWAIT, MT_DATA);
628 1.1.4.2 skrll if (m == NULL) {
629 1.1.4.2 skrll printf("%s: iee_start: can't allocate mbuf\n",
630 1.1.4.2 skrll sc->sc_dev.dv_xname);
631 1.1.4.2 skrll m_freem(sc->sc_tx_mbuf[t]);
632 1.1.4.2 skrll t--;
633 1.1.4.2 skrll continue;
634 1.1.4.2 skrll }
635 1.1.4.2 skrll MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
636 1.1.4.2 skrll MCLGET(m, M_DONTWAIT);
637 1.1.4.2 skrll if ((m->m_flags & M_EXT) == 0) {
638 1.1.4.2 skrll printf("%s: iee_start: can't allocate mbuf "
639 1.1.4.2 skrll "cluster\n", sc->sc_dev.dv_xname);
640 1.1.4.2 skrll m_freem(sc->sc_tx_mbuf[t]);
641 1.1.4.2 skrll m_freem(m);
642 1.1.4.2 skrll t--;
643 1.1.4.2 skrll continue;
644 1.1.4.2 skrll }
645 1.1.4.7 skrll m_copydata(sc->sc_tx_mbuf[t], 0,
646 1.1.4.2 skrll sc->sc_tx_mbuf[t]->m_pkthdr.len, mtod(m, caddr_t));
647 1.1.4.2 skrll m->m_pkthdr.len = sc->sc_tx_mbuf[t]->m_pkthdr.len;
648 1.1.4.2 skrll m->m_len = sc->sc_tx_mbuf[t]->m_pkthdr.len;
649 1.1.4.2 skrll m_freem(sc->sc_tx_mbuf[t]);
650 1.1.4.2 skrll sc->sc_tx_mbuf[t] = m;
651 1.1.4.2 skrll if(bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_tx_map[t],
652 1.1.4.2 skrll m, BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
653 1.1.4.2 skrll printf("%s: iee_start: can't load TX DMA map\n",
654 1.1.4.2 skrll sc->sc_dev.dv_xname);
655 1.1.4.2 skrll m_freem(sc->sc_tx_mbuf[t]);
656 1.1.4.2 skrll t--;
657 1.1.4.2 skrll continue;
658 1.1.4.2 skrll }
659 1.1.4.2 skrll }
660 1.1.4.2 skrll for (n = 0 ; n < sc->sc_tx_map[t]->dm_nsegs ; n++) {
661 1.1.4.2 skrll SC_TBD(sc->sc_next_tbd + n)->tbd_tb_addr =
662 1.1.4.2 skrll sc->sc_tx_map[t]->dm_segs[n].ds_addr;
663 1.1.4.2 skrll SC_TBD(sc->sc_next_tbd + n)->tbd_size =
664 1.1.4.2 skrll sc->sc_tx_map[t]->dm_segs[n].ds_len;
665 1.1.4.2 skrll SC_TBD(sc->sc_next_tbd + n)->tbd_link_addr =
666 1.1.4.7 skrll IEE_PHYS_SHMEM(IEE_TBD_OFF + IEE_TBD_SZ
667 1.1.4.2 skrll * (sc->sc_next_tbd + n + 1));
668 1.1.4.2 skrll }
669 1.1.4.2 skrll SC_TBD(sc->sc_next_tbd + n - 1)->tbd_size |= IEE_CB_EL;
670 1.1.4.2 skrll bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_map[t], 0,
671 1.1.4.2 skrll sc->sc_tx_map[t]->dm_mapsize, BUS_DMASYNC_PREWRITE);
672 1.1.4.2 skrll IFQ_POLL(&ifp->if_snd, m);
673 1.1.4.2 skrll if (m == NULL)
674 1.1.4.2 skrll iee_cb_setup(sc, IEE_CB_CMD_TR | IEE_CB_S | IEE_CB_EL
675 1.1.4.2 skrll | IEE_CB_I);
676 1.1.4.2 skrll else
677 1.1.4.2 skrll iee_cb_setup(sc, IEE_CB_CMD_TR);
678 1.1.4.2 skrll sc->sc_next_tbd += n;
679 1.1.4.2 skrll #if NBPFILTER > 0
680 1.1.4.2 skrll /* Pass packet to bpf if someone listens. */
681 1.1.4.2 skrll if (ifp->if_bpf)
682 1.1.4.2 skrll bpf_mtap(ifp->if_bpf, sc->sc_tx_mbuf[t]);
683 1.1.4.2 skrll #endif
684 1.1.4.2 skrll }
685 1.1.4.2 skrll if (t == 0)
686 1.1.4.2 skrll /* No packets got set up for TX. */
687 1.1.4.2 skrll return;
688 1.1.4.2 skrll if (t == IEE_NCB)
689 1.1.4.2 skrll ifp->if_flags |= IFF_OACTIVE;
690 1.1.4.2 skrll bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, IEE_CB_SZ,
691 1.1.4.2 skrll IEE_CB_LIST_SZ + IEE_TBD_LIST_SZ, BUS_DMASYNC_PREWRITE);
692 1.1.4.2 skrll (sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE);
693 1.1.4.2 skrll return;
694 1.1.4.2 skrll }
695 1.1.4.2 skrll
696 1.1.4.2 skrll
697 1.1.4.2 skrll
698 1.1.4.2 skrll /* ioctl routine */
699 1.1.4.2 skrll int
700 1.1.4.2 skrll iee_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
701 1.1.4.2 skrll {
702 1.1.4.2 skrll struct iee_softc *sc = ifp->if_softc;
703 1.1.4.2 skrll int s;
704 1.1.4.2 skrll int err;
705 1.1.4.2 skrll
706 1.1.4.2 skrll s = splnet();
707 1.1.4.6 skrll switch (cmd) {
708 1.1.4.6 skrll case SIOCSIFMEDIA:
709 1.1.4.6 skrll case SIOCGIFMEDIA:
710 1.1.4.6 skrll err = ifmedia_ioctl(ifp, (struct ifreq *) data,
711 1.1.4.6 skrll &sc->sc_ifmedia, cmd);
712 1.1.4.6 skrll break;
713 1.1.4.6 skrll
714 1.1.4.6 skrll default:
715 1.1.4.2 skrll err = ether_ioctl(ifp, cmd, data);
716 1.1.4.6 skrll if (err == ENETRESET) {
717 1.1.4.6 skrll /*
718 1.1.4.6 skrll * Multicast list as changed; set the hardware filter
719 1.1.4.6 skrll * accordingly.
720 1.1.4.6 skrll */
721 1.1.4.6 skrll if (ifp->if_flags & IFF_RUNNING) {
722 1.1.4.6 skrll iee_cb_setup(sc, IEE_CB_CMD_MCS | IEE_CB_S |
723 1.1.4.6 skrll IEE_CB_EL | IEE_CB_I);
724 1.1.4.6 skrll if ((sc->sc_flags & IEE_WANT_MCAST) == 0)
725 1.1.4.6 skrll (*sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE);
726 1.1.4.6 skrll }
727 1.1.4.2 skrll err = 0;
728 1.1.4.2 skrll }
729 1.1.4.6 skrll break;
730 1.1.4.2 skrll }
731 1.1.4.2 skrll splx(s);
732 1.1.4.2 skrll return(err);
733 1.1.4.2 skrll }
734 1.1.4.2 skrll
735 1.1.4.2 skrll
736 1.1.4.2 skrll
737 1.1.4.2 skrll /* init routine */
738 1.1.4.2 skrll int
739 1.1.4.2 skrll iee_init(struct ifnet *ifp)
740 1.1.4.2 skrll {
741 1.1.4.2 skrll struct iee_softc *sc = ifp->if_softc;
742 1.1.4.2 skrll int r;
743 1.1.4.2 skrll int t;
744 1.1.4.2 skrll int n;
745 1.1.4.2 skrll int err;
746 1.1.4.2 skrll
747 1.1.4.2 skrll sc->sc_next_cb = 0;
748 1.1.4.2 skrll sc->sc_next_tbd = 0;
749 1.1.4.2 skrll sc->sc_flags &= ~IEE_WANT_MCAST;
750 1.1.4.2 skrll sc->sc_rx_done = 0;
751 1.1.4.2 skrll SC_SCB->scb_crc_err = 0;
752 1.1.4.2 skrll SC_SCB->scb_align_err = 0;
753 1.1.4.2 skrll SC_SCB->scb_resource_err = 0;
754 1.1.4.2 skrll SC_SCB->scb_overrun_err = 0;
755 1.1.4.2 skrll SC_SCB->scb_rcvcdt_err = 0;
756 1.1.4.2 skrll SC_SCB->scb_short_fr_err = 0;
757 1.1.4.2 skrll sc->sc_crc_err = 0;
758 1.1.4.2 skrll sc->sc_align_err = 0;
759 1.1.4.2 skrll sc->sc_resource_err = 0;
760 1.1.4.2 skrll sc->sc_overrun_err = 0;
761 1.1.4.2 skrll sc->sc_rcvcdt_err = 0;
762 1.1.4.2 skrll sc->sc_short_fr_err = 0;
763 1.1.4.2 skrll sc->sc_tx_col = 0;
764 1.1.4.2 skrll sc->sc_rx_err = 0;
765 1.1.4.2 skrll sc->sc_cmd_err = 0;
766 1.1.4.2 skrll /* Create Transmit DMA maps. */
767 1.1.4.2 skrll for (t = 0 ; t < IEE_NCB ; t++) {
768 1.1.4.2 skrll if (sc->sc_tx_map[t] == NULL && bus_dmamap_create(sc->sc_dmat,
769 1.1.4.7 skrll MCLBYTES, IEE_NTBD, MCLBYTES, 0, BUS_DMA_NOWAIT,
770 1.1.4.2 skrll &sc->sc_tx_map[t]) != 0) {
771 1.1.4.7 skrll printf("%s: iee_init: can't create TX DMA map\n",
772 1.1.4.2 skrll sc->sc_dev.dv_xname);
773 1.1.4.2 skrll for (n = 0 ; n < t ; n++)
774 1.1.4.7 skrll bus_dmamap_destroy(sc->sc_dmat,
775 1.1.4.2 skrll sc->sc_tx_map[n]);
776 1.1.4.2 skrll return(ENOBUFS);
777 1.1.4.2 skrll }
778 1.1.4.2 skrll }
779 1.1.4.2 skrll /* Initialize Receive Frame and Receive Buffer Descriptors */
780 1.1.4.2 skrll err = 0;
781 1.1.4.2 skrll memset(SC_RFD(0), 0, IEE_RFD_LIST_SZ);
782 1.1.4.2 skrll memset(SC_RBD(0), 0, IEE_RBD_LIST_SZ);
783 1.1.4.2 skrll for (r = 0 ; r < IEE_NRFD ; r++) {
784 1.1.4.2 skrll SC_RFD(r)->rfd_cmd = IEE_RFD_SF;
785 1.1.4.2 skrll SC_RFD(r)->rfd_link_addr = IEE_PHYS_SHMEM(IEE_RFD_OFF
786 1.1.4.2 skrll + IEE_RFD_SZ * ((r + 1) % IEE_NRFD));
787 1.1.4.2 skrll
788 1.1.4.7 skrll SC_RBD(r)->rbd_next_rbd = IEE_PHYS_SHMEM(IEE_RBD_OFF
789 1.1.4.2 skrll + IEE_RBD_SZ * ((r + 1) % IEE_NRFD));
790 1.1.4.2 skrll if (sc->sc_rx_mbuf[r] == NULL) {
791 1.1.4.2 skrll MGETHDR(sc->sc_rx_mbuf[r], M_DONTWAIT, MT_DATA);
792 1.1.4.2 skrll if (sc->sc_rx_mbuf[r] == NULL) {
793 1.1.4.7 skrll printf("%s: iee_init: can't allocate mbuf\n",
794 1.1.4.2 skrll sc->sc_dev.dv_xname);
795 1.1.4.2 skrll err = 1;
796 1.1.4.2 skrll break;
797 1.1.4.2 skrll }
798 1.1.4.2 skrll MCLAIM(sc->sc_rx_mbuf[r],&sc->sc_ethercom.ec_rx_mowner);
799 1.1.4.2 skrll MCLGET(sc->sc_rx_mbuf[r], M_DONTWAIT);
800 1.1.4.2 skrll if ((sc->sc_rx_mbuf[r]->m_flags & M_EXT) == 0) {
801 1.1.4.2 skrll printf("%s: iee_init: can't allocate mbuf"
802 1.1.4.2 skrll " cluster\n", sc->sc_dev.dv_xname);
803 1.1.4.2 skrll m_freem(sc->sc_rx_mbuf[r]);
804 1.1.4.2 skrll err = 1;
805 1.1.4.2 skrll break;
806 1.1.4.2 skrll }
807 1.1.4.2 skrll }
808 1.1.4.2 skrll if (sc->sc_rx_map[r] == NULL && bus_dmamap_create(sc->sc_dmat,
809 1.1.4.7 skrll MCLBYTES, 1, MCLBYTES , 0, BUS_DMA_NOWAIT,
810 1.1.4.2 skrll &sc->sc_rx_map[r]) != 0) {
811 1.1.4.2 skrll printf("%s: iee_init: can't create RX "
812 1.1.4.2 skrll "DMA map\n", sc->sc_dev.dv_xname);
813 1.1.4.2 skrll m_freem(sc->sc_rx_mbuf[r]);
814 1.1.4.2 skrll err = 1;
815 1.1.4.2 skrll break;
816 1.1.4.2 skrll }
817 1.1.4.2 skrll if (bus_dmamap_load(sc->sc_dmat, sc->sc_rx_map[r],
818 1.1.4.7 skrll sc->sc_rx_mbuf[r]->m_ext.ext_buf,
819 1.1.4.2 skrll sc->sc_rx_mbuf[r]->m_ext.ext_size, NULL,
820 1.1.4.2 skrll BUS_DMA_READ | BUS_DMA_NOWAIT) != 0) {
821 1.1.4.2 skrll printf("%s: iee_init: can't load RX DMA map\n",
822 1.1.4.2 skrll sc->sc_dev.dv_xname);
823 1.1.4.2 skrll bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_map[r]);
824 1.1.4.2 skrll m_freem(sc->sc_rx_mbuf[r]);
825 1.1.4.2 skrll err = 1;
826 1.1.4.2 skrll break;
827 1.1.4.2 skrll }
828 1.1.4.2 skrll bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_map[r], 0,
829 1.1.4.2 skrll sc->sc_rx_mbuf[r]->m_ext.ext_size, BUS_DMASYNC_PREREAD);
830 1.1.4.2 skrll SC_RBD(r)->rbd_size = sc->sc_rx_map[r]->dm_segs[0].ds_len;
831 1.1.4.2 skrll SC_RBD(r)->rbd_rb_addr= sc->sc_rx_map[r]->dm_segs[0].ds_addr;
832 1.1.4.2 skrll }
833 1.1.4.2 skrll SC_RFD(0)->rfd_rbd_addr = IEE_PHYS_SHMEM(IEE_RBD_OFF);
834 1.1.4.2 skrll if (err != 0) {
835 1.1.4.2 skrll for (n = 0 ; n < r; n++) {
836 1.1.4.2 skrll m_freem(sc->sc_rx_mbuf[n]);
837 1.1.4.2 skrll sc->sc_rx_mbuf[n] = NULL;
838 1.1.4.2 skrll bus_dmamap_unload(sc->sc_dmat, sc->sc_rx_map[n]);
839 1.1.4.2 skrll bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_map[n]);
840 1.1.4.2 skrll sc->sc_rx_map[n] = NULL;
841 1.1.4.2 skrll }
842 1.1.4.2 skrll for (n = 0 ; n < t ; n++) {
843 1.1.4.2 skrll bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_map[n]);
844 1.1.4.2 skrll sc->sc_tx_map[n] = NULL;
845 1.1.4.2 skrll }
846 1.1.4.2 skrll return(ENOBUFS);
847 1.1.4.2 skrll }
848 1.1.4.2 skrll
849 1.1.4.2 skrll (sc->sc_iee_reset)(sc);
850 1.1.4.2 skrll iee_cb_setup(sc, IEE_CB_CMD_IAS);
851 1.1.4.2 skrll sc->sc_cf[0] = IEE_CF_0_DEF | IEE_CF_0_PREF;
852 1.1.4.2 skrll sc->sc_cf[1] = IEE_CF_1_DEF;
853 1.1.4.2 skrll sc->sc_cf[2] = IEE_CF_2_DEF;
854 1.1.4.7 skrll sc->sc_cf[3] = IEE_CF_3_ADDRLEN_DEF | IEE_CF_3_NSAI
855 1.1.4.2 skrll | IEE_CF_3_PREAMLEN_DEF;
856 1.1.4.2 skrll sc->sc_cf[4] = IEE_CF_4_DEF;
857 1.1.4.2 skrll sc->sc_cf[5] = IEE_CF_5_DEF;
858 1.1.4.2 skrll sc->sc_cf[6] = IEE_CF_6_DEF;
859 1.1.4.2 skrll sc->sc_cf[7] = IEE_CF_7_DEF;
860 1.1.4.2 skrll sc->sc_cf[8] = IEE_CF_8_DEF;
861 1.1.4.2 skrll sc->sc_cf[9] = IEE_CF_9_DEF;
862 1.1.4.2 skrll sc->sc_cf[10] = IEE_CF_10_DEF;
863 1.1.4.2 skrll sc->sc_cf[11] = IEE_CF_11_DEF & ~IEE_CF_11_LNGFLD;
864 1.1.4.2 skrll sc->sc_cf[12] = IEE_CF_12_DEF;
865 1.1.4.2 skrll sc->sc_cf[13] = IEE_CF_13_DEF;
866 1.1.4.2 skrll iee_cb_setup(sc, IEE_CB_CMD_CONF | IEE_CB_S | IEE_CB_EL);
867 1.1.4.2 skrll SC_SCB->scb_rfa_addr = IEE_PHYS_SHMEM(IEE_RFD_OFF);
868 1.1.4.2 skrll bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, 0, IEE_SHMEM_MAX,
869 1.1.4.2 skrll BUS_DMASYNC_PREWRITE);
870 1.1.4.2 skrll (sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE | IEE_SCB_RUC_ST);
871 1.1.4.2 skrll /* Issue a Channel Attention to ACK interrupts we may have caused. */
872 1.1.4.2 skrll (sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
873 1.1.4.2 skrll
874 1.1.4.2 skrll /* Mark the interface as running and ready to RX/TX packets. */
875 1.1.4.2 skrll ifp->if_flags |= IFF_RUNNING;
876 1.1.4.2 skrll ifp->if_flags &= ~IFF_OACTIVE;
877 1.1.4.2 skrll return(0);
878 1.1.4.2 skrll }
879 1.1.4.2 skrll
880 1.1.4.2 skrll
881 1.1.4.2 skrll
882 1.1.4.2 skrll /* stop routine */
883 1.1.4.2 skrll void
884 1.1.4.2 skrll iee_stop(struct ifnet *ifp, int disable)
885 1.1.4.2 skrll {
886 1.1.4.2 skrll struct iee_softc *sc = ifp->if_softc;
887 1.1.4.2 skrll int n;
888 1.1.4.2 skrll
889 1.1.4.2 skrll ifp->if_flags &= ~IFF_RUNNING;
890 1.1.4.2 skrll ifp->if_flags |= IFF_OACTIVE;
891 1.1.4.2 skrll ifp->if_timer = 0;
892 1.1.4.2 skrll /* Reset the chip to get it quiet. */
893 1.1.4.2 skrll (sc->sc_iee_reset)(ifp->if_softc);
894 1.1.4.2 skrll /* Issue a Channel Attention to ACK interrupts we may have caused. */
895 1.1.4.2 skrll (sc->sc_iee_cmd)(ifp->if_softc, IEE_SCB_ACK);
896 1.1.4.2 skrll /* Release any dynamically allocated ressources. */
897 1.1.4.2 skrll for (n = 0 ; n < IEE_NCB ; n++) {
898 1.1.4.2 skrll if (sc->sc_tx_map[n] != NULL)
899 1.1.4.2 skrll bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_map[n]);
900 1.1.4.2 skrll sc->sc_tx_map[n] = NULL;
901 1.1.4.2 skrll }
902 1.1.4.2 skrll for (n = 0 ; n < IEE_NRFD ; n++) {
903 1.1.4.2 skrll if (sc->sc_rx_mbuf[n] != NULL)
904 1.1.4.2 skrll m_freem(sc->sc_rx_mbuf[n]);
905 1.1.4.2 skrll sc->sc_rx_mbuf[n] = NULL;
906 1.1.4.2 skrll if (sc->sc_rx_map[n] != NULL) {
907 1.1.4.2 skrll bus_dmamap_unload(sc->sc_dmat, sc->sc_rx_map[n]);
908 1.1.4.2 skrll bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_map[n]);
909 1.1.4.2 skrll }
910 1.1.4.2 skrll sc->sc_rx_map[n] = NULL;
911 1.1.4.2 skrll }
912 1.1.4.2 skrll return;
913 1.1.4.2 skrll }
914 1.1.4.2 skrll
915 1.1.4.2 skrll
916 1.1.4.2 skrll
917 1.1.4.2 skrll /* timer routine */
918 1.1.4.2 skrll void
919 1.1.4.2 skrll iee_watchdog(struct ifnet *ifp)
920 1.1.4.2 skrll {
921 1.1.4.2 skrll struct iee_softc *sc = ifp->if_softc;
922 1.1.4.2 skrll
923 1.1.4.2 skrll (sc->sc_iee_reset)(sc);
924 1.1.4.2 skrll if (sc->sc_next_tbd != 0)
925 1.1.4.7 skrll printf("%s: iee_watchdog: transmit timeout %d\n",
926 1.1.4.2 skrll sc->sc_dev.dv_xname, ++sc->sc_tx_timeout);
927 1.1.4.2 skrll else
928 1.1.4.7 skrll printf("%s: iee_watchdog: setup timeout %d\n",
929 1.1.4.2 skrll sc->sc_dev.dv_xname, ++sc->sc_setup_timeout);
930 1.1.4.2 skrll iee_init(ifp);
931 1.1.4.2 skrll return;
932 1.1.4.2 skrll }
933 1.1.4.2 skrll
934 1.1.4.2 skrll
935 1.1.4.2 skrll
936 1.1.4.2 skrll /* routine to release res. */
937 1.1.4.2 skrll void
938 1.1.4.2 skrll iee_drain(struct ifnet *ifp)
939 1.1.4.2 skrll {
940 1.1.4.2 skrll iee_stop(ifp, 0);
941 1.1.4.2 skrll return;
942 1.1.4.2 skrll }
943 1.1.4.2 skrll
944 1.1.4.2 skrll
945 1.1.4.2 skrll
946