i82596.c revision 1.5 1 /* $NetBSD: i82596.c,v 1.5 2005/02/17 11:23:36 tsutsui Exp $ */
2
3 /*
4 * Copyright (c) 2003 Jochen Kunz.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of Jochen Kunz may not be used to endorse or promote
16 * products derived from this software without specific prior
17 * written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY JOCHEN KUNZ
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JOCHEN KUNZ
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Driver for the Intel i82596 10MBit/s Ethernet chip.
34 * It operates the i82596 in 32-Bit Linear Mode, opposed to the old i82586
35 * ie(4) driver (src/sys/dev/ic/i82586.c), that degrades the i82596 to
36 * i82586 compatibility mode.
37 * Documentation about this chip can be found on http://www.openpa.net/
38 * file names 29021806.pdf and 29021906.pdf
39 */
40
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: i82596.c,v 1.5 2005/02/17 11:23:36 tsutsui Exp $");
43
44 /* autoconfig and device stuff */
45 #include <sys/param.h>
46 #include <sys/device.h>
47 #include <sys/conf.h>
48 #include "locators.h"
49 #include "ioconf.h"
50
51 /* bus_space / bus_dma etc. */
52 #include <machine/bus.h>
53 #include <machine/intr.h>
54
55 /* general system data and functions */
56 #include <sys/systm.h>
57 #include <sys/ioctl.h>
58
59 /* tsleep / sleep / wakeup */
60 #include <sys/proc.h>
61 /* hz for above */
62 #include <sys/kernel.h>
63
64 /* network stuff */
65 #include <net/if.h>
66 #include <net/if_dl.h>
67 #include <net/if_media.h>
68 #include <net/if_ether.h>
69 #include <sys/socket.h>
70 #include <sys/mbuf.h>
71
72 #include "bpfilter.h"
73 #if NBPFILTER > 0
74 #include <net/bpf.h>
75 #endif
76
77 #include <dev/ic/i82596reg.h>
78 #include <dev/ic/i82596var.h>
79
80
81
82 /* Supported chip variants */
83 char *i82596_typenames[] = { "unknowen", "DX/SX", "CA" };
84
85
86
87 /* media change and status callback */
88 static int iee_mediachange(struct ifnet *);
89 static void iee_mediastatus(struct ifnet *, struct ifmediareq *);
90
91 /* interface routines to upper protocols */
92 static void iee_start(struct ifnet *); /* initiate output */
93 static int iee_ioctl(struct ifnet *, u_long, caddr_t); /* ioctl routine */
94 static int iee_init(struct ifnet *); /* init routine */
95 static void iee_stop(struct ifnet *, int); /* stop routine */
96 static void iee_watchdog(struct ifnet *); /* timer routine */
97 static void iee_drain(struct ifnet *); /* release resources */
98
99 /* internal helper functions */
100 static void iee_cb_setup(struct iee_softc *, u_int32_t);
101
102 /*
103 Things a MD frontend has to provide:
104
105 The functions via function pointers in the softc:
106 int (*sc_iee_cmd)(struct iee_softc *sc, u_int32_t cmd);
107 int (*sc_iee_reset)(struct iee_softc *sc);
108 void (*sc_mediastatus)(struct ifnet *, struct ifmediareq *);
109 int (*sc_mediachange)(struct ifnet *);
110
111 sc_iee_cmd(): send a command to the i82596 by writing the cmd parameter
112 to the SCP cmd word and issuing a Channel Attention.
113 sc_iee_reset(): initiate a reset, supply the address of the SCP to the
114 chip, wait for the chip to initialize and ACK interrupts that
115 this may have caused by caling (sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
116 This functions must carefully bus_dmamap_sync() all data they have touched!
117
118 sc_mediastatus() and sc_mediachange() are just MD hooks to the according
119 MI functions. The MD frontend may set this pointers to NULL when they
120 are not needed.
121
122 sc->sc_type has to be set to I82596_UNKNOWN or I82596_DX or I82596_CA.
123 This is for printing out the correct chip type at attach time only. The
124 MI backend doesn't distinguish different chip types when programming
125 the chip.
126
127 sc->sc_flags has to be set to 0 on litle endian hardware and to
128 IEE_NEED_SWAP on big endian hardware, when endianes conversion is not
129 done by the bus attachment. Usually you need to set IEE_NEED_SWAP
130 when IEE_SYSBUS_BE is set in the sysbus byte.
131
132 sc->sc_cl_align bust be set to 1 or to the cache line size. When set to
133 1 no special alignment of DMA descriptors is done. If sc->sc_cl_align != 1
134 it forces alignment of the data structres in the shared memory to a multiple
135 of sc->sc_cl_align. This is needed on archs like hp700 that have non DMA
136 I/O coherent caches and are unable to map the shared memory uncachable.
137 (At least pre PA7100LC CPUs are unable to map memory uncachable.)
138
139 sc->sc_cl_align MUST BE INITIALIZED BEFORE THE FOLOWING MACROS ARE USED:
140 SC_* IEE_*_SZ IEE_*_OFF IEE_SHMEM_MAX (shell style glob(3) pattern)
141
142 The MD frontend has to allocate a piece of DMA memory at least of
143 IEE_SHMEM_MAX bytes size. All communication with the chip is done via
144 this shared memory. If possible map this memory non-cachable on
145 archs with non DMA I/O coherent caches. The base of the memory needs
146 to be aligend to an even address if sc->sc_cl_align == 1 and aligend
147 to a cache line if sc->sc_cl_align != 1.
148
149 An interrupt with iee_intr() as handler must be established.
150
151 Call void iee_attach(struct iee_softc *sc, u_int8_t *ether_address,
152 int *media, int nmedia, int defmedia); when everything is set up. First
153 parameter is a pointer to the MI softc, ether_address is an array that
154 contains the ethernet address. media is an array of the media types
155 provided by the hardware. The members of this array are supplied to
156 ifmedia_add() in sequence. nmedia is the count of elements in media.
157 defmedia is the default media that is set via ifmedia_set().
158 nmedia and defmedia are ignored when media == NULL.
159
160 The MD backend may call iee_detach() to detach the device.
161
162 See sys/arch/hp700/gsc/if_iee.c for an example.
163 */
164
165
166 /*
167 How frame reception is done:
168 Each Recieve Frame Descriptor has one associated Recieve Buffer Descriptor.
169 Each RBD points to the data area of a mbuf cluster. The RFDs are linked
170 together in a circular list. sc->sc_rx_done is the count of RFDs in the
171 list already processed / the number of the RFD that has to be checked for
172 a new frame first at the next RX interrupt. Upon successful reception of
173 a frame the mbuf cluster is handled to upper protocol layers, a new mbuf
174 cluster is allocated and the RFD / RBD are reinitialized accordingly.
175
176 When a RFD list overrun occured the whole RFD and RBD lists are reinitialized
177 and frame reception is started again.
178 */
179 int
180 iee_intr(void *intarg)
181 {
182 struct iee_softc *sc = intarg;
183 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
184 struct iee_rfd *rfd;
185 struct iee_rbd *rbd;
186 bus_dmamap_t rx_map;
187 struct mbuf *rx_mbuf;
188 struct mbuf *new_mbuf;
189 int scb_status;
190 int scb_cmd;
191 int n;
192
193 if ((ifp->if_flags & IFF_RUNNING) == 0) {
194 (sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
195 return(1);
196 }
197 bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, 0, IEE_SHMEM_MAX,
198 BUS_DMASYNC_POSTREAD);
199 scb_status = SC_SCB->scb_status;
200 scb_cmd = SC_SCB->scb_cmd;
201 rfd = SC_RFD(sc->sc_rx_done);
202 while ((rfd->rfd_status & IEE_RFD_C) != 0) {
203 /* At least one packet was received. */
204 rbd = SC_RBD(sc->sc_rx_done);
205 rx_map = sc->sc_rx_map[sc->sc_rx_done];
206 rx_mbuf = sc->sc_rx_mbuf[sc->sc_rx_done];
207 SC_RBD((sc->sc_rx_done + IEE_NRFD - 1) % IEE_NRFD)->rbd_size
208 &= ~IEE_RBD_EL;
209 if ((rfd->rfd_status & IEE_RFD_OK) == 0
210 || (rbd->rbd_count & IEE_RBD_EOF) == 0
211 || (rbd->rbd_count & IEE_RBD_F) == 0){
212 /* Receive error, skip frame and reuse buffer. */
213 rfd->rfd_status = 0;
214 rbd->rbd_count = 0;
215 rbd->rbd_size = IEE_RBD_EL | rx_map->dm_segs[0].ds_len;
216 printf("%s: iee_intr: receive error %d, rfd_status="
217 "0x%.4x, rfd_count=0x%.4x\n", sc->sc_dev.dv_xname,
218 ++sc->sc_rx_err, rfd->rfd_status, rbd->rbd_count);
219 sc->sc_rx_done = (sc->sc_rx_done + 1) % IEE_NRFD;
220 continue;
221 }
222 rfd->rfd_status = 0;
223 bus_dmamap_sync(sc->sc_dmat, rx_map, 0, rx_mbuf->m_ext.ext_size,
224 BUS_DMASYNC_POSTREAD);
225 rx_mbuf->m_pkthdr.len = rx_mbuf->m_len =
226 rbd->rbd_count & IEE_RBD_COUNT;
227 rx_mbuf->m_pkthdr.rcvif = ifp;
228 MGETHDR(new_mbuf, M_DONTWAIT, MT_DATA);
229 if (new_mbuf == NULL) {
230 printf("%s: iee_intr: can't allocate mbuf\n",
231 sc->sc_dev.dv_xname);
232 break;
233 }
234 MCLAIM(new_mbuf, &sc->sc_ethercom.ec_rx_mowner);
235 MCLGET(new_mbuf, M_DONTWAIT);
236 if ((new_mbuf->m_flags & M_EXT) == 0) {
237 printf("%s: iee_intr: can't alloc mbuf cluster\n",
238 sc->sc_dev.dv_xname);
239 m_freem(new_mbuf);
240 break;
241 }
242 bus_dmamap_unload(sc->sc_dmat, rx_map);
243 if (bus_dmamap_load(sc->sc_dmat, rx_map,
244 new_mbuf->m_ext.ext_buf, new_mbuf->m_ext.ext_size,
245 NULL, BUS_DMA_READ | BUS_DMA_NOWAIT) != 0)
246 panic("%s: iee_intr: can't load RX DMA map\n",
247 sc->sc_dev.dv_xname);
248 bus_dmamap_sync(sc->sc_dmat, rx_map, 0,
249 new_mbuf->m_ext.ext_size, BUS_DMASYNC_PREREAD);
250 #if NBPFILTER > 0
251 if (ifp->if_bpf != 0)
252 bpf_mtap(ifp->if_bpf, rx_mbuf);
253 #endif /* NBPFILTER > 0 */
254 (*ifp->if_input)(ifp, rx_mbuf);
255 ifp->if_ipackets++;
256 sc->sc_rx_mbuf[sc->sc_rx_done] = new_mbuf;
257 rbd->rbd_count = 0;
258 rbd->rbd_size = IEE_RBD_EL | rx_map->dm_segs[0].ds_len;
259 rbd->rbd_rb_addr = rx_map->dm_segs[0].ds_addr;
260 sc->sc_rx_done = (sc->sc_rx_done + 1) % IEE_NRFD;
261 rfd = SC_RFD(sc->sc_rx_done);
262 }
263 if ((scb_status & IEE_SCB_RUS) == IEE_SCB_RUS_NR1
264 || (scb_status & IEE_SCB_RUS) == IEE_SCB_RUS_NR2
265 || (scb_status & IEE_SCB_RUS) == IEE_SCB_RUS_NR3) {
266 /* Receive Overrun, reinit receive ring buffer. */
267 for (n = 0 ; n < IEE_NRFD ; n++) {
268 SC_RFD(n)->rfd_cmd = IEE_RFD_SF;
269 SC_RFD(n)->rfd_link_addr = IEE_PHYS_SHMEM(IEE_RFD_OFF
270 + IEE_RFD_SZ * ((n + 1) % IEE_NRFD));
271 SC_RBD(n)->rbd_next_rbd = IEE_PHYS_SHMEM(IEE_RBD_OFF
272 + IEE_RBD_SZ * ((n + 1) % IEE_NRFD));
273 SC_RBD(n)->rbd_size = IEE_RBD_EL |
274 sc->sc_rx_map[n]->dm_segs[0].ds_len;
275 SC_RBD(n)->rbd_rb_addr =
276 sc->sc_rx_map[n]->dm_segs[0].ds_addr;
277 }
278 SC_RFD(0)->rfd_rbd_addr = IEE_PHYS_SHMEM(IEE_RBD_OFF);
279 sc->sc_rx_done = 0;
280 bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, IEE_RFD_OFF,
281 IEE_RFD_LIST_SZ + IEE_RBD_LIST_SZ, BUS_DMASYNC_PREWRITE);
282 (sc->sc_iee_cmd)(sc, IEE_SCB_RUC_ST);
283 printf("%s: iee_intr: receive ring buffer overrun\n",
284 sc->sc_dev.dv_xname);
285 }
286
287 if (sc->sc_next_cb != 0
288 && (SC_CB(sc->sc_next_cb - 1)->cb_status & IEE_CB_C) != 0) {
289 /* CMD list finished */
290 ifp->if_timer = 0;
291 if (sc->sc_next_tbd != 0) {
292 /* A TX CMD list finished, clenup */
293 for (n = 0 ; n < sc->sc_next_cb ; n++) {
294 m_freem(sc->sc_tx_mbuf[n]);
295 sc->sc_tx_mbuf[n] = NULL;
296 bus_dmamap_unload(sc->sc_dmat,sc->sc_tx_map[n]);
297 if ((SC_CB(n)->cb_status & IEE_CB_COL) != 0 &&
298 (SC_CB(n)->cb_status & IEE_CB_MAXCOL) == 0)
299 sc->sc_tx_col += 16;
300 else
301 sc->sc_tx_col += SC_CB(n)->cb_status
302 & IEE_CB_MAXCOL;
303 }
304 sc->sc_next_tbd = 0;
305 ifp->if_flags &= ~IFF_OACTIVE;
306 }
307 for (n = 0 ; n < sc->sc_next_cb ; n++) {
308 /* Check if a CMD failed, but ignore TX errors. */
309 if ((SC_CB(n)->cb_cmd & IEE_CB_CMD) != IEE_CB_CMD_TR
310 && ((SC_CB(n)->cb_status & IEE_CB_OK) == 0))
311 printf("%s: iee_intr: scb_status=0x%x "
312 "scb_cmd=0x%x failed command %d: "
313 "cb_status[%d]=0x%.4x cb_cmd[%d]=0x%.4x\n",
314 sc->sc_dev.dv_xname, scb_status, scb_cmd,
315 ++sc->sc_cmd_err, n, SC_CB(n)->cb_status,
316 n, SC_CB(n)->cb_cmd);
317 }
318 sc->sc_next_cb = 0;
319 if ((sc->sc_flags & IEE_WANT_MCAST) != 0) {
320 iee_cb_setup(sc, IEE_CB_CMD_MCS | IEE_CB_S | IEE_CB_EL
321 | IEE_CB_I);
322 (sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE);
323 } else
324 /* Try to get defered packets going. */
325 iee_start(ifp);
326 }
327 if (IEE_SWAP(SC_SCB->scb_crc_err) != sc->sc_crc_err) {
328 sc->sc_crc_err = IEE_SWAP(SC_SCB->scb_crc_err);
329 printf("%s: iee_intr: crc_err=%d\n", sc->sc_dev.dv_xname,
330 sc->sc_crc_err);
331 }
332 if (IEE_SWAP(SC_SCB->scb_align_err) != sc->sc_align_err) {
333 sc->sc_align_err = IEE_SWAP(SC_SCB->scb_align_err);
334 printf("%s: iee_intr: align_err=%d\n", sc->sc_dev.dv_xname,
335 sc->sc_align_err);
336 }
337 if (IEE_SWAP(SC_SCB->scb_resource_err) != sc->sc_resource_err) {
338 sc->sc_resource_err = IEE_SWAP(SC_SCB->scb_resource_err);
339 printf("%s: iee_intr: resource_err=%d\n", sc->sc_dev.dv_xname,
340 sc->sc_resource_err);
341 }
342 if (IEE_SWAP(SC_SCB->scb_overrun_err) != sc->sc_overrun_err) {
343 sc->sc_overrun_err = IEE_SWAP(SC_SCB->scb_overrun_err);
344 printf("%s: iee_intr: overrun_err=%d\n", sc->sc_dev.dv_xname,
345 sc->sc_overrun_err);
346 }
347 if (IEE_SWAP(SC_SCB->scb_rcvcdt_err) != sc->sc_rcvcdt_err) {
348 sc->sc_rcvcdt_err = IEE_SWAP(SC_SCB->scb_rcvcdt_err);
349 printf("%s: iee_intr: rcvcdt_err=%d\n", sc->sc_dev.dv_xname,
350 sc->sc_rcvcdt_err);
351 }
352 if (IEE_SWAP(SC_SCB->scb_short_fr_err) != sc->sc_short_fr_err) {
353 sc->sc_short_fr_err = IEE_SWAP(SC_SCB->scb_short_fr_err);
354 printf("%s: iee_intr: short_fr_err=%d\n", sc->sc_dev.dv_xname,
355 sc->sc_short_fr_err);
356 }
357 bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, 0, IEE_SHMEM_MAX,
358 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
359 (sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
360 return(1);
361 }
362
363
364
365 /*
366 How Command Block List Processing is done.
367
368 A runing CBL is never manipulated. If there is a CBL already runing,
369 further CMDs are deferd until the current list is done. A new list is
370 setup when the old has finished.
371 This eases programming. To manipulate a runing CBL it is neccesary to
372 suspend the Command Unit to avoid race conditions. After a suspend
373 is sent we have to wait for an interrupt that ACKs the suspend. Then
374 we can manipulate the CBL and resume operation. I am not sure that this
375 is more effective then the current, much simpler approach. => KISS
376 See i82596CA data sheet page 26.
377
378 A CBL is runing or on the way to be set up when (sc->sc_next_cb != 0).
379
380 A CBL may consist of TX CMDs, and _only_ TX CMDs.
381 A TX CBL is runing or on the way to be set up when
382 ((sc->sc_next_cb != 0) && (sc->sc_next_tbd != 0)).
383
384 A CBL may consist of other non-TX CMDs like IAS or CONF, and _only_
385 non-TX CMDs.
386
387 This comes mostly through the way how an Ethernet driver works and
388 because runing CBLs are not manipulated when they are on the way. If
389 if_start() is called there will be TX CMDs enqueued so we have a runing
390 CBL and other CMDs from e.g. if_ioctl() will be deferd and vice versa.
391
392 The Multicast Setup Command is special. A MCS needs more space then
393 a single CB has. Actual space requiement depends on the length of the
394 multicast list. So we allways defer MCS until other CBLs are finished,
395 then we setup a CONF CMD in the first CB. The CONF CMD is needed to
396 turn ALLMULTI on the hardware on or off. The MCS is the 2nd CB and may
397 use all the remaining space in the CBL and the Transmit Buffer Descriptor
398 List. (Therefore CBL and TBDL must be continious in pysical and virtual
399 memory. This is guaranteed through the definitions of the list offsets
400 in i82596reg.h and because it is only a single DMA segment used for all
401 lists.) When ALLMULTI is enabled via the CONF CMD, the MCS is run with
402 a multicast list length of 0, thus disabling the multicast filter.
403 A defered MCS is signaled via ((sc->sc_flags & IEE_WANT_MCAST) != 0)
404 */
405 void
406 iee_cb_setup(struct iee_softc *sc, u_int32_t cmd)
407 {
408 struct iee_cb *cb = SC_CB(sc->sc_next_cb);
409 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
410 struct ether_multistep step;
411 struct ether_multi *enm;
412
413 memset(cb, 0, IEE_CB_SZ);
414 cb->cb_cmd = cmd;
415 switch(cmd & IEE_CB_CMD) {
416 case IEE_CB_CMD_NOP: /* NOP CMD */
417 break;
418 case IEE_CB_CMD_IAS: /* Individual Address Setup */
419 memcpy((void*)cb->cb_ind_addr, LLADDR(ifp->if_sadl),
420 ETHER_ADDR_LEN);
421 break;
422 case IEE_CB_CMD_CONF: /* Configure */
423 memcpy((void*)cb->cb_cf, sc->sc_cf, sc->sc_cf[0]
424 & IEE_CF_0_CNT_M);
425 break;
426 case IEE_CB_CMD_MCS: /* Multicast Setup */
427 if (sc->sc_next_cb != 0) {
428 sc->sc_flags |= IEE_WANT_MCAST;
429 return;
430 }
431 sc->sc_flags &= ~IEE_WANT_MCAST;
432 if ((sc->sc_cf[8] & IEE_CF_8_PRM) != 0) {
433 /* Need no multicast filter in promisc mode. */
434 iee_cb_setup(sc, IEE_CB_CMD_CONF | IEE_CB_S | IEE_CB_EL
435 | IEE_CB_I);
436 return;
437 }
438 /* Leave room for a CONF CMD to en/dis-able ALLMULTI mode */
439 cb = SC_CB(sc->sc_next_cb + 1);
440 cb->cb_cmd = cmd;
441 cb->cb_mcast.mc_size = 0;
442 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
443 while (enm != NULL) {
444 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
445 ETHER_ADDR_LEN) != 0 || cb->cb_mcast.mc_size
446 * ETHER_ADDR_LEN + 2 * IEE_CB_SZ
447 > IEE_CB_LIST_SZ + IEE_TBD_LIST_SZ) {
448 cb->cb_mcast.mc_size = 0;
449 break;
450 }
451 memcpy((void*) &cb->cb_mcast.mc_addrs[
452 cb->cb_mcast.mc_size * ETHER_ADDR_LEN],
453 enm->enm_addrlo, ETHER_ADDR_LEN);
454 ETHER_NEXT_MULTI(step, enm);
455 cb->cb_mcast.mc_size++;
456 }
457 if (cb->cb_mcast.mc_size == 0) {
458 /* Can't do exact mcast filtering, do ALLMULTI mode. */
459 ifp->if_flags |= IFF_ALLMULTI;
460 sc->sc_cf[11] &= ~IEE_CF_11_MCALL;
461 } else {
462 /* disable ALLMULTI and load mcast list */
463 ifp->if_flags &= ~IFF_ALLMULTI;
464 sc->sc_cf[11] |= IEE_CF_11_MCALL;
465 /* Mcast setup may need more then IEE_CB_SZ bytes. */
466 bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map,
467 IEE_CB_OFF, IEE_CB_LIST_SZ + IEE_TBD_LIST_SZ,
468 BUS_DMASYNC_PREWRITE);
469 }
470 iee_cb_setup(sc, IEE_CB_CMD_CONF);
471 break;
472 case IEE_CB_CMD_TR: /* Transmit */
473 cb->cb_transmit.tx_tbd_addr = IEE_PHYS_SHMEM(IEE_TBD_OFF
474 + IEE_TBD_SZ * sc->sc_next_tbd);
475 cb->cb_cmd |= IEE_CB_SF; /* Allways use Flexible Mode. */
476 break;
477 case IEE_CB_CMD_TDR: /* Time Domain Reflectometry */
478 break;
479 case IEE_CB_CMD_DUMP: /* Dump */
480 break;
481 case IEE_CB_CMD_DIAG: /* Diagnose */
482 break;
483 default:
484 /* can't happen */
485 break;
486 }
487 cb->cb_link_addr = IEE_PHYS_SHMEM(IEE_CB_OFF + IEE_CB_SZ *
488 (sc->sc_next_cb + 1));
489 bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, IEE_CB_OFF
490 + IEE_CB_SZ * sc->sc_next_cb, IEE_CB_SZ, BUS_DMASYNC_PREWRITE);
491 sc->sc_next_cb++;
492 ifp->if_timer = 5;
493 return;
494 }
495
496
497
498 void
499 iee_attach(struct iee_softc *sc, u_int8_t *eth_addr, int *media, int nmedia,
500 int defmedia)
501 {
502 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
503 int n;
504
505 /* Set pointer to Intermediate System Configuration Pointer. */
506 /* Phys. addr. in big endian order. (Big endian as defined by Intel.) */
507 SC_SCP->scp_iscp_addr = IEE_SWAP(IEE_PHYS_SHMEM(IEE_ISCP_OFF));
508 /* Set pointer to System Control Block. */
509 /* Phys. addr. in big endian order. (Big endian as defined by Intel.) */
510 SC_ISCP->iscp_scb_addr = IEE_SWAP(IEE_PHYS_SHMEM(IEE_SCB_OFF));
511 /* Set pointer to Receive Frame Area. (physical address) */
512 SC_SCB->scb_rfa_addr = IEE_PHYS_SHMEM(IEE_RFD_OFF);
513 /* Set pointer to Command Block. (physical address) */
514 SC_SCB->scb_cmd_blk_addr = IEE_PHYS_SHMEM(IEE_CB_OFF);
515
516 ifmedia_init(&sc->sc_ifmedia, 0, iee_mediachange, iee_mediastatus);
517 if (media != NULL) {
518 for (n = 0 ; n < nmedia ; n++)
519 ifmedia_add(&sc->sc_ifmedia, media[n], 0, NULL);
520 ifmedia_set(&sc->sc_ifmedia, defmedia);
521 } else {
522 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_NONE, 0, NULL);
523 ifmedia_set(&sc->sc_ifmedia, IFM_ETHER | IFM_NONE);
524 }
525
526 ifp->if_softc = sc;
527 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
528 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
529 ifp->if_start = iee_start; /* initiate output routine */
530 ifp->if_ioctl = iee_ioctl; /* ioctl routine */
531 ifp->if_init = iee_init; /* init routine */
532 ifp->if_stop = iee_stop; /* stop routine */
533 ifp->if_watchdog = iee_watchdog; /* timer routine */
534 ifp->if_drain = iee_drain; /* routine to release resources */
535 IFQ_SET_READY(&ifp->if_snd);
536 /* iee supports IEEE 802.1Q Virtual LANs, see vlan(4). */
537 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
538
539 if_attach(ifp);
540 ether_ifattach(ifp, eth_addr);
541
542 aprint_normal(": Intel 82596%s address %s\n",
543 i82596_typenames[ sc->sc_type], ether_sprintf(eth_addr));
544
545 for (n = 0 ; n < IEE_NCB ; n++)
546 sc->sc_tx_map[n] = NULL;
547 for (n = 0 ; n < IEE_NRFD ; n++) {
548 sc->sc_rx_mbuf[n] = NULL;
549 sc->sc_rx_map[n] = NULL;
550 }
551 sc->sc_tx_timeout = 0;
552 sc->sc_setup_timeout = 0;
553 (sc->sc_iee_reset)(sc);
554 return;
555 }
556
557
558
559 void
560 iee_detach(struct iee_softc *sc, int flags)
561 {
562 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
563
564 if ((ifp->if_flags & IFF_RUNNING) != 0)
565 iee_stop(ifp, 1);
566 ether_ifdetach(ifp);
567 if_detach(ifp);
568 return;
569 }
570
571
572
573 /* media change and status callback */
574 int
575 iee_mediachange(struct ifnet *ifp)
576 {
577 struct iee_softc *sc = ifp->if_softc;
578
579 if (sc->sc_mediachange != NULL)
580 return ((sc->sc_mediachange)(ifp));
581 return(0);
582 }
583
584
585
586 void
587 iee_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmreq)
588 {
589 struct iee_softc *sc = ifp->if_softc;
590
591 if (sc->sc_mediastatus != NULL)
592 return ((sc->sc_mediastatus)(ifp, ifmreq));
593 return;
594 }
595
596
597
598 /* initiate output routine */
599 void
600 iee_start(struct ifnet *ifp)
601 {
602 struct iee_softc *sc = ifp->if_softc;
603 struct mbuf *m = NULL;
604 int t;
605 int n;
606
607 if (sc->sc_next_cb != 0)
608 /* There is already a CMD runing. Defer packet enqueueing. */
609 return;
610 for (t = 0 ; t < IEE_NCB ; t++) {
611 IFQ_DEQUEUE(&ifp->if_snd, sc->sc_tx_mbuf[t]);
612 if (sc->sc_tx_mbuf[t] == NULL)
613 break;
614 if (bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_tx_map[t],
615 sc->sc_tx_mbuf[t], BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
616 /*
617 * The packet needs more TBD then we support.
618 * Copy the packet into a mbuf cluster to get it out.
619 */
620 printf("%s: iee_start: failed to load DMA map\n",
621 sc->sc_dev.dv_xname);
622 MGETHDR(m, M_DONTWAIT, MT_DATA);
623 if (m == NULL) {
624 printf("%s: iee_start: can't allocate mbuf\n",
625 sc->sc_dev.dv_xname);
626 m_freem(sc->sc_tx_mbuf[t]);
627 t--;
628 continue;
629 }
630 MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
631 MCLGET(m, M_DONTWAIT);
632 if ((m->m_flags & M_EXT) == 0) {
633 printf("%s: iee_start: can't allocate mbuf "
634 "cluster\n", sc->sc_dev.dv_xname);
635 m_freem(sc->sc_tx_mbuf[t]);
636 m_freem(m);
637 t--;
638 continue;
639 }
640 m_copydata(sc->sc_tx_mbuf[t], 0,
641 sc->sc_tx_mbuf[t]->m_pkthdr.len, mtod(m, caddr_t));
642 m->m_pkthdr.len = sc->sc_tx_mbuf[t]->m_pkthdr.len;
643 m->m_len = sc->sc_tx_mbuf[t]->m_pkthdr.len;
644 m_freem(sc->sc_tx_mbuf[t]);
645 sc->sc_tx_mbuf[t] = m;
646 if(bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_tx_map[t],
647 m, BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
648 printf("%s: iee_start: can't load TX DMA map\n",
649 sc->sc_dev.dv_xname);
650 m_freem(sc->sc_tx_mbuf[t]);
651 t--;
652 continue;
653 }
654 }
655 for (n = 0 ; n < sc->sc_tx_map[t]->dm_nsegs ; n++) {
656 SC_TBD(sc->sc_next_tbd + n)->tbd_tb_addr =
657 sc->sc_tx_map[t]->dm_segs[n].ds_addr;
658 SC_TBD(sc->sc_next_tbd + n)->tbd_size =
659 sc->sc_tx_map[t]->dm_segs[n].ds_len;
660 SC_TBD(sc->sc_next_tbd + n)->tbd_link_addr =
661 IEE_PHYS_SHMEM(IEE_TBD_OFF + IEE_TBD_SZ
662 * (sc->sc_next_tbd + n + 1));
663 }
664 SC_TBD(sc->sc_next_tbd + n - 1)->tbd_size |= IEE_CB_EL;
665 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_map[t], 0,
666 sc->sc_tx_map[t]->dm_mapsize, BUS_DMASYNC_PREWRITE);
667 IFQ_POLL(&ifp->if_snd, m);
668 if (m == NULL)
669 iee_cb_setup(sc, IEE_CB_CMD_TR | IEE_CB_S | IEE_CB_EL
670 | IEE_CB_I);
671 else
672 iee_cb_setup(sc, IEE_CB_CMD_TR);
673 sc->sc_next_tbd += n;
674 #if NBPFILTER > 0
675 /* Pass packet to bpf if someone listens. */
676 if (ifp->if_bpf)
677 bpf_mtap(ifp->if_bpf, sc->sc_tx_mbuf[t]);
678 #endif
679 }
680 if (t == 0)
681 /* No packets got set up for TX. */
682 return;
683 if (t == IEE_NCB)
684 ifp->if_flags |= IFF_OACTIVE;
685 bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, IEE_CB_SZ,
686 IEE_CB_LIST_SZ + IEE_TBD_LIST_SZ, BUS_DMASYNC_PREWRITE);
687 (sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE);
688 return;
689 }
690
691
692
693 /* ioctl routine */
694 int
695 iee_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
696 {
697 struct iee_softc *sc = ifp->if_softc;
698 int s;
699 int err;
700
701 s = splnet();
702 switch (cmd) {
703 case SIOCSIFMEDIA:
704 case SIOCGIFMEDIA:
705 err = ifmedia_ioctl(ifp, (struct ifreq *) data,
706 &sc->sc_ifmedia, cmd);
707 break;
708
709 default:
710 err = ether_ioctl(ifp, cmd, data);
711 if (err == ENETRESET) {
712 /*
713 * Multicast list as changed; set the hardware filter
714 * accordingly.
715 */
716 if (ifp->if_flags & IFF_RUNNING) {
717 iee_cb_setup(sc, IEE_CB_CMD_MCS | IEE_CB_S |
718 IEE_CB_EL | IEE_CB_I);
719 if ((sc->sc_flags & IEE_WANT_MCAST) == 0)
720 (*sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE);
721 }
722 err = 0;
723 }
724 break;
725 }
726 splx(s);
727 return(err);
728 }
729
730
731
732 /* init routine */
733 int
734 iee_init(struct ifnet *ifp)
735 {
736 struct iee_softc *sc = ifp->if_softc;
737 int r;
738 int t;
739 int n;
740 int err;
741
742 sc->sc_next_cb = 0;
743 sc->sc_next_tbd = 0;
744 sc->sc_flags &= ~IEE_WANT_MCAST;
745 sc->sc_rx_done = 0;
746 SC_SCB->scb_crc_err = 0;
747 SC_SCB->scb_align_err = 0;
748 SC_SCB->scb_resource_err = 0;
749 SC_SCB->scb_overrun_err = 0;
750 SC_SCB->scb_rcvcdt_err = 0;
751 SC_SCB->scb_short_fr_err = 0;
752 sc->sc_crc_err = 0;
753 sc->sc_align_err = 0;
754 sc->sc_resource_err = 0;
755 sc->sc_overrun_err = 0;
756 sc->sc_rcvcdt_err = 0;
757 sc->sc_short_fr_err = 0;
758 sc->sc_tx_col = 0;
759 sc->sc_rx_err = 0;
760 sc->sc_cmd_err = 0;
761 /* Create Transmit DMA maps. */
762 for (t = 0 ; t < IEE_NCB ; t++) {
763 if (sc->sc_tx_map[t] == NULL && bus_dmamap_create(sc->sc_dmat,
764 MCLBYTES, IEE_NTBD, MCLBYTES, 0, BUS_DMA_NOWAIT,
765 &sc->sc_tx_map[t]) != 0) {
766 printf("%s: iee_init: can't create TX DMA map\n",
767 sc->sc_dev.dv_xname);
768 for (n = 0 ; n < t ; n++)
769 bus_dmamap_destroy(sc->sc_dmat,
770 sc->sc_tx_map[n]);
771 return(ENOBUFS);
772 }
773 }
774 /* Initialize Receive Frame and Receive Buffer Descriptors */
775 err = 0;
776 memset(SC_RFD(0), 0, IEE_RFD_LIST_SZ);
777 memset(SC_RBD(0), 0, IEE_RBD_LIST_SZ);
778 for (r = 0 ; r < IEE_NRFD ; r++) {
779 SC_RFD(r)->rfd_cmd = IEE_RFD_SF;
780 SC_RFD(r)->rfd_link_addr = IEE_PHYS_SHMEM(IEE_RFD_OFF
781 + IEE_RFD_SZ * ((r + 1) % IEE_NRFD));
782
783 SC_RBD(r)->rbd_next_rbd = IEE_PHYS_SHMEM(IEE_RBD_OFF
784 + IEE_RBD_SZ * ((r + 1) % IEE_NRFD));
785 if (sc->sc_rx_mbuf[r] == NULL) {
786 MGETHDR(sc->sc_rx_mbuf[r], M_DONTWAIT, MT_DATA);
787 if (sc->sc_rx_mbuf[r] == NULL) {
788 printf("%s: iee_init: can't allocate mbuf\n",
789 sc->sc_dev.dv_xname);
790 err = 1;
791 break;
792 }
793 MCLAIM(sc->sc_rx_mbuf[r],&sc->sc_ethercom.ec_rx_mowner);
794 MCLGET(sc->sc_rx_mbuf[r], M_DONTWAIT);
795 if ((sc->sc_rx_mbuf[r]->m_flags & M_EXT) == 0) {
796 printf("%s: iee_init: can't allocate mbuf"
797 " cluster\n", sc->sc_dev.dv_xname);
798 m_freem(sc->sc_rx_mbuf[r]);
799 err = 1;
800 break;
801 }
802 }
803 if (sc->sc_rx_map[r] == NULL && bus_dmamap_create(sc->sc_dmat,
804 MCLBYTES, 1, MCLBYTES , 0, BUS_DMA_NOWAIT,
805 &sc->sc_rx_map[r]) != 0) {
806 printf("%s: iee_init: can't create RX "
807 "DMA map\n", sc->sc_dev.dv_xname);
808 m_freem(sc->sc_rx_mbuf[r]);
809 err = 1;
810 break;
811 }
812 if (bus_dmamap_load(sc->sc_dmat, sc->sc_rx_map[r],
813 sc->sc_rx_mbuf[r]->m_ext.ext_buf,
814 sc->sc_rx_mbuf[r]->m_ext.ext_size, NULL,
815 BUS_DMA_READ | BUS_DMA_NOWAIT) != 0) {
816 printf("%s: iee_init: can't load RX DMA map\n",
817 sc->sc_dev.dv_xname);
818 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_map[r]);
819 m_freem(sc->sc_rx_mbuf[r]);
820 err = 1;
821 break;
822 }
823 bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_map[r], 0,
824 sc->sc_rx_mbuf[r]->m_ext.ext_size, BUS_DMASYNC_PREREAD);
825 SC_RBD(r)->rbd_size = sc->sc_rx_map[r]->dm_segs[0].ds_len;
826 SC_RBD(r)->rbd_rb_addr= sc->sc_rx_map[r]->dm_segs[0].ds_addr;
827 }
828 SC_RFD(0)->rfd_rbd_addr = IEE_PHYS_SHMEM(IEE_RBD_OFF);
829 if (err != 0) {
830 for (n = 0 ; n < r; n++) {
831 m_freem(sc->sc_rx_mbuf[n]);
832 sc->sc_rx_mbuf[n] = NULL;
833 bus_dmamap_unload(sc->sc_dmat, sc->sc_rx_map[n]);
834 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_map[n]);
835 sc->sc_rx_map[n] = NULL;
836 }
837 for (n = 0 ; n < t ; n++) {
838 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_map[n]);
839 sc->sc_tx_map[n] = NULL;
840 }
841 return(ENOBUFS);
842 }
843
844 (sc->sc_iee_reset)(sc);
845 iee_cb_setup(sc, IEE_CB_CMD_IAS);
846 sc->sc_cf[0] = IEE_CF_0_DEF | IEE_CF_0_PREF;
847 sc->sc_cf[1] = IEE_CF_1_DEF;
848 sc->sc_cf[2] = IEE_CF_2_DEF;
849 sc->sc_cf[3] = IEE_CF_3_ADDRLEN_DEF | IEE_CF_3_NSAI
850 | IEE_CF_3_PREAMLEN_DEF;
851 sc->sc_cf[4] = IEE_CF_4_DEF;
852 sc->sc_cf[5] = IEE_CF_5_DEF;
853 sc->sc_cf[6] = IEE_CF_6_DEF;
854 sc->sc_cf[7] = IEE_CF_7_DEF;
855 sc->sc_cf[8] = IEE_CF_8_DEF;
856 sc->sc_cf[9] = IEE_CF_9_DEF;
857 sc->sc_cf[10] = IEE_CF_10_DEF;
858 sc->sc_cf[11] = IEE_CF_11_DEF & ~IEE_CF_11_LNGFLD;
859 sc->sc_cf[12] = IEE_CF_12_DEF;
860 sc->sc_cf[13] = IEE_CF_13_DEF;
861 iee_cb_setup(sc, IEE_CB_CMD_CONF | IEE_CB_S | IEE_CB_EL);
862 SC_SCB->scb_rfa_addr = IEE_PHYS_SHMEM(IEE_RFD_OFF);
863 bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, 0, IEE_SHMEM_MAX,
864 BUS_DMASYNC_PREWRITE);
865 (sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE | IEE_SCB_RUC_ST);
866 /* Issue a Channel Attention to ACK interrupts we may have caused. */
867 (sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
868
869 /* Mark the interface as running and ready to RX/TX packets. */
870 ifp->if_flags |= IFF_RUNNING;
871 ifp->if_flags &= ~IFF_OACTIVE;
872 return(0);
873 }
874
875
876
877 /* stop routine */
878 void
879 iee_stop(struct ifnet *ifp, int disable)
880 {
881 struct iee_softc *sc = ifp->if_softc;
882 int n;
883
884 ifp->if_flags &= ~IFF_RUNNING;
885 ifp->if_flags |= IFF_OACTIVE;
886 ifp->if_timer = 0;
887 /* Reset the chip to get it quiet. */
888 (sc->sc_iee_reset)(ifp->if_softc);
889 /* Issue a Channel Attention to ACK interrupts we may have caused. */
890 (sc->sc_iee_cmd)(ifp->if_softc, IEE_SCB_ACK);
891 /* Release any dynamically allocated ressources. */
892 for (n = 0 ; n < IEE_NCB ; n++) {
893 if (sc->sc_tx_map[n] != NULL)
894 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_map[n]);
895 sc->sc_tx_map[n] = NULL;
896 }
897 for (n = 0 ; n < IEE_NRFD ; n++) {
898 if (sc->sc_rx_mbuf[n] != NULL)
899 m_freem(sc->sc_rx_mbuf[n]);
900 sc->sc_rx_mbuf[n] = NULL;
901 if (sc->sc_rx_map[n] != NULL) {
902 bus_dmamap_unload(sc->sc_dmat, sc->sc_rx_map[n]);
903 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_map[n]);
904 }
905 sc->sc_rx_map[n] = NULL;
906 }
907 return;
908 }
909
910
911
912 /* timer routine */
913 void
914 iee_watchdog(struct ifnet *ifp)
915 {
916 struct iee_softc *sc = ifp->if_softc;
917
918 (sc->sc_iee_reset)(sc);
919 if (sc->sc_next_tbd != 0)
920 printf("%s: iee_watchdog: transmit timeout %d\n",
921 sc->sc_dev.dv_xname, ++sc->sc_tx_timeout);
922 else
923 printf("%s: iee_watchdog: setup timeout %d\n",
924 sc->sc_dev.dv_xname, ++sc->sc_setup_timeout);
925 iee_init(ifp);
926 return;
927 }
928
929
930
931 /* routine to release res. */
932 void
933 iee_drain(struct ifnet *ifp)
934 {
935 iee_stop(ifp, 0);
936 return;
937 }
938
939
940
941