i82596.c revision 1.2 1 /* $NetBSD: i82596.c,v 1.2 2004/08/26 16:56:07 jkunz Exp $ */
2
3 /*
4 * Copyright (c) 2003 Jochen Kunz.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of Jochen Kunz may not be used to endorse or promote
16 * products derived from this software without specific prior
17 * written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY JOCHEN KUNZ
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JOCHEN KUNZ
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Driver for the Intel i82596 10MBit/s Ethernet chip.
34 * It operates the i82596 in 32-Bit Linear Mode, opposed to the old i82586
35 * ie(4) driver (src/sys/dev/ic/i82586.c), that degrades the i82596 to
36 * i82586 compatibility mode.
37 * Documentation about this chip can be found on http://www.openpa.net/
38 * file names 29021806.pdf and 29021906.pdf
39 */
40
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: i82596.c,v 1.2 2004/08/26 16:56:07 jkunz Exp $");
43
44 /* autoconfig and device stuff */
45 #include <sys/param.h>
46 #include <sys/device.h>
47 #include <sys/conf.h>
48 #include <machine/iomod.h>
49 #include <machine/autoconf.h>
50 #include "locators.h"
51 #include "ioconf.h"
52
53 /* bus_space / bus_dma etc. */
54 #include <machine/bus.h>
55 #include <machine/intr.h>
56
57 /* general system data and functions */
58 #include <sys/systm.h>
59 #include <sys/ioctl.h>
60 #include <sys/ioccom.h>
61 #include <sys/types.h>
62
63 /* tsleep / sleep / wakeup */
64 #include <sys/proc.h>
65 /* hz for above */
66 #include <sys/kernel.h>
67
68 /* network stuff */
69 #include <net/if.h>
70 #include <net/if_dl.h>
71 #include <net/if_media.h>
72 #include <net/if_ether.h>
73 #include <sys/socket.h>
74 #include <sys/mbuf.h>
75
76 #include "bpfilter.h"
77 #if NBPFILTER > 0
78 #include <net/bpf.h>
79 #endif
80
81 #include <dev/ic/i82596reg.h>
82 #include <dev/ic/i82596var.h>
83
84
85
86 /* Supported chip variants */
87 char *i82596_typenames[] = { "unknowen", "DX/SX", "CA" };
88
89
90
91 /* media change and status callback */
92 static int iee_mediachange(struct ifnet *);
93 static void iee_mediastatus(struct ifnet *, struct ifmediareq *);
94
95 /* interface routines to upper protocols */
96 static void iee_start(struct ifnet *); /* initiate output */
97 static int iee_ioctl(struct ifnet *, u_long, caddr_t); /* ioctl routine */
98 static int iee_init(struct ifnet *); /* init routine */
99 static void iee_stop(struct ifnet *, int); /* stop routine */
100 static void iee_watchdog(struct ifnet *); /* timer routine */
101 static void iee_drain(struct ifnet *); /* release resources */
102
103 /* internal helper functions */
104 static void iee_cb_setup(struct iee_softc *, u_int32_t);
105
106 /*
107 Things a MD frontend has to provide:
108
109 The functions via function pointers in the softc:
110 int (*sc_iee_cmd)(struct iee_softc *sc, u_int32_t cmd);
111 int (*sc_iee_reset)(struct iee_softc *sc);
112 void (*sc_mediastatus)(struct ifnet *, struct ifmediareq *);
113 int (*sc_mediachange)(struct ifnet *);
114
115 sc_iee_cmd(): send a command to the i82596 by writing the cmd parameter
116 to the SCP cmd word and issuing a Channel Attention.
117 sc_iee_reset(): initiate a reset, supply the address of the SCP to the
118 chip, wait for the chip to initialize and ACK interrupts that
119 this may have caused by caling (sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
120 This functions must carefully bus_dmamap_sync() all data they have touched!
121
122 sc_mediastatus() and sc_mediachange() are just MD hooks to the according
123 MI functions. The MD frontend may set this pointers to NULL when they
124 are not needed.
125
126 sc->sc_type has to be set to I82596_UNKNOWN or I82596_DX or I82596_CA.
127 This is for printing out the correct chip type at attach time only. The
128 MI backend doesn't distinguish different chip types when programming
129 the chip.
130
131 sc->sc_flags has to be set to 0 on litle endian hardware and to
132 IEE_NEED_SWAP on big endian hardware, when endianes conversion is not
133 done by the bus attachment. Usually you need to set IEE_NEED_SWAP
134 when IEE_SYSBUS_BE is set in the sysbus byte.
135
136 sc->sc_cl_align bust be set to 1 or to the cache line size. When set to
137 1 no special alignment of DMA descriptors is done. If sc->sc_cl_align != 1
138 it forces alignment of the data structres in the shared memory to a multiple
139 of sc->sc_cl_align. This is needed on archs like hp700 that have non DMA
140 I/O coherent caches and are unable to map the shared memory uncachable.
141 (At least pre PA7100LC CPUs are unable to map memory uncachable.)
142
143 sc->sc_cl_align MUST BE INITIALIZED BEFORE THE FOLOWING MACROS ARE USED:
144 SC_* IEE_*_SZ IEE_*_OFF IEE_SHMEM_MAX (shell style glob(3) pattern)
145
146 The MD frontend has to allocate a piece of DMA memory at least of
147 IEE_SHMEM_MAX bytes size. All communication with the chip is done via
148 this shared memory. If possible map this memory non-cachable on
149 archs with non DMA I/O coherent caches. The base of the memory needs
150 to be aligend to an even address if sc->sc_cl_align == 1 and aligend
151 to a cache line if sc->sc_cl_align != 1.
152
153 An interrupt with iee_intr() as handler must be established.
154
155 Call void iee_attach(struct iee_softc *sc, u_int8_t *ether_address,
156 int *media, int nmedia, int defmedia); when everything is set up. First
157 parameter is a pointer to the MI softc, ether_address is an array that
158 contains the ethernet address. media is an array of the media types
159 provided by the hardware. The members of this array are supplied to
160 ifmedia_add() in sequence. nmedia is the count of elements in media.
161 defmedia is the default media that is set via ifmedia_set().
162 nmedia and defmedia are ignored when media == NULL.
163
164 The MD backend may call iee_detach() to detach the device.
165
166 See sys/arch/hp700/gsc/if_iee.c for an example.
167 */
168
169
170 /*
171 How frame reception is done:
172 Each Recieve Frame Descriptor has one associated Recieve Buffer Descriptor.
173 Each RBD points to the data area of a mbuf cluster. The RFDs are linked
174 together in a circular list. sc->sc_rx_done is the count of RFDs in the
175 list already processed / the number of the RFD that has to be checked for
176 a new frame first at the next RX interrupt. Upon successful reception of
177 a frame the mbuf cluster is handled to upper protocol layers, a new mbuf
178 cluster is allocated and the RFD / RBD are reinitialized accordingly.
179
180 When a RFD list overrun occured the whole RFD and RBD lists are reinitialized
181 and frame reception is started again.
182 */
183 int
184 iee_intr(void *intarg)
185 {
186 struct iee_softc *sc = intarg;
187 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
188 struct iee_rfd *rfd;
189 struct iee_rbd *rbd;
190 bus_dmamap_t rx_map;
191 struct mbuf *rx_mbuf;
192 struct mbuf *new_mbuf;
193 int scb_status;
194 int scb_cmd;
195 int n;
196
197 if ((ifp->if_flags & IFF_RUNNING) == 0) {
198 (sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
199 return(1);
200 }
201 bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, 0, IEE_SHMEM_MAX,
202 BUS_DMASYNC_POSTREAD);
203 scb_status = SC_SCB->scb_status;
204 scb_cmd = SC_SCB->scb_cmd;
205 rfd = SC_RFD(sc->sc_rx_done);
206 while ((rfd->rfd_status & IEE_RFD_C) != 0) {
207 /* At least one packet was received. */
208 rbd = SC_RBD(sc->sc_rx_done);
209 rx_map = sc->sc_rx_map[sc->sc_rx_done];
210 rx_mbuf = sc->sc_rx_mbuf[sc->sc_rx_done];
211 SC_RBD((sc->sc_rx_done + IEE_NRFD - 1) % IEE_NRFD)->rbd_size
212 &= ~IEE_RBD_EL;
213 if ((rfd->rfd_status & IEE_RFD_OK) == 0
214 || (rbd->rbd_count & IEE_RBD_EOF) == 0
215 || (rbd->rbd_count & IEE_RBD_F) == 0){
216 /* Receive error, skip frame and reuse buffer. */
217 rfd->rfd_status = 0;
218 rbd->rbd_count = 0;
219 rbd->rbd_size = IEE_RBD_EL | rx_map->dm_segs[0].ds_len;
220 printf("%s: iee_intr: receive error %d, rfd_status="
221 "0x%.4x, rfd_count=0x%.4x\n", sc->sc_dev.dv_xname,
222 ++sc->sc_rx_err, rfd->rfd_status, rbd->rbd_count);
223 sc->sc_rx_done = (sc->sc_rx_done + 1) % IEE_NRFD;
224 continue;
225 }
226 rfd->rfd_status = 0;
227 bus_dmamap_sync(sc->sc_dmat, rx_map, 0, rx_mbuf->m_ext.ext_size,
228 BUS_DMASYNC_POSTREAD);
229 rx_mbuf->m_pkthdr.len = rx_mbuf->m_len =
230 rbd->rbd_count & IEE_RBD_COUNT;
231 rx_mbuf->m_pkthdr.rcvif = ifp;
232 MGETHDR(new_mbuf, M_DONTWAIT, MT_DATA);
233 if (new_mbuf == NULL) {
234 printf("%s: iee_intr: can't allocate mbuf\n",
235 sc->sc_dev.dv_xname);
236 break;
237 }
238 MCLAIM(new_mbuf, &sc->sc_ethercom.ec_rx_mowner);
239 MCLGET(new_mbuf, M_DONTWAIT);
240 if ((new_mbuf->m_flags & M_EXT) == 0) {
241 printf("%s: iee_intr: can't alloc mbuf cluster\n",
242 sc->sc_dev.dv_xname);
243 m_freem(new_mbuf);
244 break;
245 }
246 bus_dmamap_unload(sc->sc_dmat, rx_map);
247 if (bus_dmamap_load(sc->sc_dmat, rx_map,
248 new_mbuf->m_ext.ext_buf, new_mbuf->m_ext.ext_size,
249 NULL, BUS_DMA_READ | BUS_DMA_NOWAIT) != 0)
250 panic("%s: iee_intr: can't load RX DMA map\n",
251 sc->sc_dev.dv_xname);
252 bus_dmamap_sync(sc->sc_dmat, rx_map, 0,
253 new_mbuf->m_ext.ext_size, BUS_DMASYNC_PREREAD);
254 #if NBPFILTER > 0
255 if (ifp->if_bpf != 0)
256 bpf_mtap(ifp->if_bpf, rx_mbuf);
257 #endif /* NBPFILTER > 0 */
258 (*ifp->if_input)(ifp, rx_mbuf);
259 ifp->if_ipackets++;
260 sc->sc_rx_mbuf[sc->sc_rx_done] = new_mbuf;
261 rbd->rbd_count = 0;
262 rbd->rbd_size = IEE_RBD_EL | rx_map->dm_segs[0].ds_len;
263 rbd->rbd_rb_addr = rx_map->dm_segs[0].ds_addr;
264 sc->sc_rx_done = (sc->sc_rx_done + 1) % IEE_NRFD;
265 rfd = SC_RFD(sc->sc_rx_done);
266 }
267 if ((scb_status & IEE_SCB_RUS) == IEE_SCB_RUS_NR1
268 || (scb_status & IEE_SCB_RUS) == IEE_SCB_RUS_NR2
269 || (scb_status & IEE_SCB_RUS) == IEE_SCB_RUS_NR3) {
270 /* Receive Overrun, reinit receive ring buffer. */
271 for (n = 0 ; n < IEE_NRFD ; n++) {
272 SC_RFD(n)->rfd_cmd = IEE_RFD_SF;
273 SC_RFD(n)->rfd_link_addr = IEE_PHYS_SHMEM(IEE_RFD_OFF
274 + IEE_RFD_SZ * ((n + 1) % IEE_NRFD));
275 SC_RBD(n)->rbd_next_rbd = IEE_PHYS_SHMEM(IEE_RBD_OFF
276 + IEE_RBD_SZ * ((n + 1) % IEE_NRFD));
277 SC_RBD(n)->rbd_size = IEE_RBD_EL |
278 sc->sc_rx_map[n]->dm_segs[0].ds_len;
279 SC_RBD(n)->rbd_rb_addr =
280 sc->sc_rx_map[n]->dm_segs[0].ds_addr;
281 }
282 SC_RFD(0)->rfd_rbd_addr = IEE_PHYS_SHMEM(IEE_RBD_OFF);
283 sc->sc_rx_done = 0;
284 bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, IEE_RFD_OFF,
285 IEE_RFD_LIST_SZ + IEE_RBD_LIST_SZ, BUS_DMASYNC_PREWRITE);
286 (sc->sc_iee_cmd)(sc, IEE_SCB_RUC_ST);
287 printf("%s: iee_intr: receive ring buffer overrun\n",
288 sc->sc_dev.dv_xname);
289 }
290
291 if (sc->sc_next_cb != 0
292 && (SC_CB(sc->sc_next_cb - 1)->cb_status & IEE_CB_C) != 0) {
293 /* CMD list finished */
294 ifp->if_timer = 0;
295 if (sc->sc_next_tbd != 0) {
296 /* A TX CMD list finished, clenup */
297 for (n = 0 ; n < sc->sc_next_cb ; n++) {
298 m_freem(sc->sc_tx_mbuf[n]);
299 sc->sc_tx_mbuf[n] = NULL;
300 bus_dmamap_unload(sc->sc_dmat,sc->sc_tx_map[n]);
301 if ((SC_CB(n)->cb_status & IEE_CB_COL) != 0 &&
302 (SC_CB(n)->cb_status & IEE_CB_MAXCOL) == 0)
303 sc->sc_tx_col += 16;
304 else
305 sc->sc_tx_col += SC_CB(n)->cb_status
306 & IEE_CB_MAXCOL;
307 }
308 sc->sc_next_tbd = 0;
309 ifp->if_flags &= ~IFF_OACTIVE;
310 }
311 for (n = 0 ; n < sc->sc_next_cb ; n++) {
312 /* Check if a CMD failed, but ignore TX errors. */
313 if ((SC_CB(n)->cb_cmd & IEE_CB_CMD) != IEE_CB_CMD_TR
314 && ((SC_CB(n)->cb_status & IEE_CB_OK) == 0))
315 printf("%s: iee_intr: scb_status=0x%x "
316 "scb_cmd=0x%x failed command %d: "
317 "cb_status[%d]=0x%.4x cb_cmd[%d]=0x%.4x\n",
318 sc->sc_dev.dv_xname, scb_status, scb_cmd,
319 ++sc->sc_cmd_err, n, SC_CB(n)->cb_status,
320 n, SC_CB(n)->cb_cmd);
321 }
322 sc->sc_next_cb = 0;
323 if ((sc->sc_flags & IEE_WANT_MCAST) != 0) {
324 iee_cb_setup(sc, IEE_CB_CMD_MCS | IEE_CB_S | IEE_CB_EL
325 | IEE_CB_I);
326 (sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE);
327 } else
328 /* Try to get defered packets going. */
329 iee_start(ifp);
330 }
331 if (IEE_SWAP(SC_SCB->scb_crc_err) != sc->sc_crc_err) {
332 sc->sc_crc_err = IEE_SWAP(SC_SCB->scb_crc_err);
333 printf("%s: iee_intr: crc_err=%d\n", sc->sc_dev.dv_xname,
334 sc->sc_crc_err);
335 }
336 if (IEE_SWAP(SC_SCB->scb_align_err) != sc->sc_align_err) {
337 sc->sc_align_err = IEE_SWAP(SC_SCB->scb_align_err);
338 printf("%s: iee_intr: align_err=%d\n", sc->sc_dev.dv_xname,
339 sc->sc_align_err);
340 }
341 if (IEE_SWAP(SC_SCB->scb_resource_err) != sc->sc_resource_err) {
342 sc->sc_resource_err = IEE_SWAP(SC_SCB->scb_resource_err);
343 printf("%s: iee_intr: resource_err=%d\n", sc->sc_dev.dv_xname,
344 sc->sc_resource_err);
345 }
346 if (IEE_SWAP(SC_SCB->scb_overrun_err) != sc->sc_overrun_err) {
347 sc->sc_overrun_err = IEE_SWAP(SC_SCB->scb_overrun_err);
348 printf("%s: iee_intr: overrun_err=%d\n", sc->sc_dev.dv_xname,
349 sc->sc_overrun_err);
350 }
351 if (IEE_SWAP(SC_SCB->scb_rcvcdt_err) != sc->sc_rcvcdt_err) {
352 sc->sc_rcvcdt_err = IEE_SWAP(SC_SCB->scb_rcvcdt_err);
353 printf("%s: iee_intr: rcvcdt_err=%d\n", sc->sc_dev.dv_xname,
354 sc->sc_rcvcdt_err);
355 }
356 if (IEE_SWAP(SC_SCB->scb_short_fr_err) != sc->sc_short_fr_err) {
357 sc->sc_short_fr_err = IEE_SWAP(SC_SCB->scb_short_fr_err);
358 printf("%s: iee_intr: short_fr_err=%d\n", sc->sc_dev.dv_xname,
359 sc->sc_short_fr_err);
360 }
361 bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, 0, IEE_SHMEM_MAX,
362 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
363 (sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
364 return(1);
365 }
366
367
368
369 /*
370 How Command Block List Processing is done.
371
372 A runing CBL is never manipulated. If there is a CBL already runing,
373 further CMDs are deferd until the current list is done. A new list is
374 setup when the old has finished.
375 This eases programming. To manipulate a runing CBL it is neccesary to
376 suspend the Command Unit to avoid race conditions. After a suspend
377 is sent we have to wait for an interrupt that ACKs the suspend. Then
378 we can manipulate the CBL and resume operation. I am not sure that this
379 is more effective then the current, much simpler approach. => KISS
380 See i82596CA data sheet page 26.
381
382 A CBL is runing or on the way to be set up when (sc->sc_next_cb != 0).
383
384 A CBL may consist of TX CMDs, and _only_ TX CMDs.
385 A TX CBL is runing or on the way to be set up when
386 ((sc->sc_next_cb != 0) && (sc->sc_next_tbd != 0)).
387
388 A CBL may consist of other non-TX CMDs like IAS or CONF, and _only_
389 non-TX CMDs.
390
391 This comes mostly through the way how an Ethernet driver works and
392 because runing CBLs are not manipulated when they are on the way. If
393 if_start() is called there will be TX CMDs enqueued so we have a runing
394 CBL and other CMDs from e.g. if_ioctl() will be deferd and vice versa.
395
396 The Multicast Setup Command is special. A MCS needs more space then
397 a single CB has. Actual space requiement depends on the length of the
398 multicast list. So we allways defer MCS until other CBLs are finished,
399 then we setup a CONF CMD in the first CB. The CONF CMD is needed to
400 turn ALLMULTI on the hardware on or off. The MCS is the 2nd CB and may
401 use all the remaining space in the CBL and the Transmit Buffer Descriptor
402 List. (Therefore CBL and TBDL must be continious in pysical and virtual
403 memory. This is guaranteed through the definitions of the list offsets
404 in i82596reg.h and because it is only a single DMA segment used for all
405 lists.) When ALLMULTI is enabled via the CONF CMD, the MCS is run with
406 a multicast list length of 0, thus disabling the multicast filter.
407 A defered MCS is signaled via ((sc->sc_flags & IEE_WANT_MCAST) != 0)
408 */
409 void
410 iee_cb_setup(struct iee_softc *sc, u_int32_t cmd)
411 {
412 struct iee_cb *cb = SC_CB(sc->sc_next_cb);
413 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
414 struct ether_multistep step;
415 struct ether_multi *enm;
416
417 memset(cb, 0, IEE_CB_SZ);
418 cb->cb_cmd = cmd;
419 switch(cmd & IEE_CB_CMD) {
420 case IEE_CB_CMD_NOP: /* NOP CMD */
421 break;
422 case IEE_CB_CMD_IAS: /* Individual Address Setup */
423 memcpy((void*)cb->cb_ind_addr, LLADDR(ifp->if_sadl),
424 ETHER_ADDR_LEN);
425 break;
426 case IEE_CB_CMD_CONF: /* Configure */
427 memcpy((void*)cb->cb_cf, sc->sc_cf, sc->sc_cf[0]
428 & IEE_CF_0_CNT_M);
429 break;
430 case IEE_CB_CMD_MCS: /* Multicast Setup */
431 if (sc->sc_next_cb != 0) {
432 sc->sc_flags |= IEE_WANT_MCAST;
433 return;
434 }
435 sc->sc_flags &= ~IEE_WANT_MCAST;
436 if ((sc->sc_cf[8] & IEE_CF_8_PRM) != 0) {
437 /* Need no multicast filter in promisc mode. */
438 iee_cb_setup(sc, IEE_CB_CMD_CONF | IEE_CB_S | IEE_CB_EL
439 | IEE_CB_I);
440 return;
441 }
442 /* Leave room for a CONF CMD to en/dis-able ALLMULTI mode */
443 cb = SC_CB(sc->sc_next_cb + 1);
444 cb->cb_cmd = cmd;
445 cb->cb_mcast.mc_size = 0;
446 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
447 while (enm != NULL) {
448 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
449 ETHER_ADDR_LEN) != 0 || cb->cb_mcast.mc_size
450 * ETHER_ADDR_LEN + 2 * IEE_CB_SZ
451 > IEE_CB_LIST_SZ + IEE_TBD_LIST_SZ) {
452 cb->cb_mcast.mc_size = 0;
453 break;
454 }
455 memcpy((void*) &cb->cb_mcast.mc_addrs[
456 cb->cb_mcast.mc_size * ETHER_ADDR_LEN],
457 enm->enm_addrlo, ETHER_ADDR_LEN);
458 ETHER_NEXT_MULTI(step, enm);
459 cb->cb_mcast.mc_size++;
460 }
461 if (cb->cb_mcast.mc_size == 0) {
462 /* Can't do exact mcast filtering, do ALLMULTI mode. */
463 ifp->if_flags |= IFF_ALLMULTI;
464 sc->sc_cf[11] &= ~IEE_CF_11_MCALL;
465 } else {
466 /* disable ALLMULTI and load mcast list */
467 ifp->if_flags &= ~IFF_ALLMULTI;
468 sc->sc_cf[11] |= IEE_CF_11_MCALL;
469 /* Mcast setup may need more then IEE_CB_SZ bytes. */
470 bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map,
471 IEE_CB_OFF, IEE_CB_LIST_SZ + IEE_TBD_LIST_SZ,
472 BUS_DMASYNC_PREWRITE);
473 }
474 iee_cb_setup(sc, IEE_CB_CMD_CONF);
475 break;
476 case IEE_CB_CMD_TR: /* Transmit */
477 cb->cb_transmit.tx_tbd_addr = IEE_PHYS_SHMEM(IEE_TBD_OFF
478 + IEE_TBD_SZ * sc->sc_next_tbd);
479 cb->cb_cmd |= IEE_CB_SF; /* Allways use Flexible Mode. */
480 break;
481 case IEE_CB_CMD_TDR: /* Time Domain Reflectometry */
482 break;
483 case IEE_CB_CMD_DUMP: /* Dump */
484 break;
485 case IEE_CB_CMD_DIAG: /* Diagnose */
486 break;
487 default:
488 /* can't happen */
489 break;
490 }
491 cb->cb_link_addr = IEE_PHYS_SHMEM(IEE_CB_OFF + IEE_CB_SZ *
492 (sc->sc_next_cb + 1));
493 bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, IEE_CB_OFF
494 + IEE_CB_SZ * sc->sc_next_cb, IEE_CB_SZ, BUS_DMASYNC_PREWRITE);
495 sc->sc_next_cb++;
496 ifp->if_timer = 5;
497 return;
498 }
499
500
501
502 void
503 iee_attach(struct iee_softc *sc, u_int8_t *eth_addr, int *media, int nmedia,
504 int defmedia)
505 {
506 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
507 int n;
508
509 /* Set pointer to Intermediate System Configuration Pointer. */
510 /* Phys. addr. in big endian order. (Big endian as defined by Intel.) */
511 SC_SCP->scp_iscp_addr = IEE_SWAP(IEE_PHYS_SHMEM(IEE_ISCP_OFF));
512 /* Set pointer to System Control Block. */
513 /* Phys. addr. in big endian order. (Big endian as defined by Intel.) */
514 SC_ISCP->iscp_scb_addr = IEE_SWAP(IEE_PHYS_SHMEM(IEE_SCB_OFF));
515 /* Set pointer to Receive Frame Area. (physical address) */
516 SC_SCB->scb_rfa_addr = IEE_PHYS_SHMEM(IEE_RFD_OFF);
517 /* Set pointer to Command Block. (physical address) */
518 SC_SCB->scb_cmd_blk_addr = IEE_PHYS_SHMEM(IEE_CB_OFF);
519
520 ifmedia_init(&sc->sc_ifmedia, 0, iee_mediachange, iee_mediastatus);
521 if (media != NULL) {
522 for (n = 0 ; n < nmedia ; n++)
523 ifmedia_add(&sc->sc_ifmedia, media[n], 0, NULL);
524 ifmedia_set(&sc->sc_ifmedia, defmedia);
525 } else {
526 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_NONE, 0, NULL);
527 ifmedia_set(&sc->sc_ifmedia, IFM_ETHER | IFM_NONE);
528 }
529
530 ifp->if_softc = sc;
531 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
532 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
533 ifp->if_start = iee_start; /* initiate output routine */
534 ifp->if_ioctl = iee_ioctl; /* ioctl routine */
535 ifp->if_init = iee_init; /* init routine */
536 ifp->if_stop = iee_stop; /* stop routine */
537 ifp->if_watchdog = iee_watchdog; /* timer routine */
538 ifp->if_drain = iee_drain; /* routine to release resources */
539 IFQ_SET_READY(&ifp->if_snd);
540 /* iee supports IEEE 802.1Q Virtual LANs, see vlan(4). */
541 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
542
543 if_attach(ifp);
544 ether_ifattach(ifp, eth_addr);
545
546 aprint_normal(": Intel 82596%s address %s\n",
547 i82596_typenames[ sc->sc_type], ether_sprintf(eth_addr));
548
549 for (n = 0 ; n < IEE_NCB ; n++)
550 sc->sc_tx_map[n] = NULL;
551 for (n = 0 ; n < IEE_NRFD ; n++) {
552 sc->sc_rx_mbuf[n] = NULL;
553 sc->sc_rx_map[n] = NULL;
554 }
555 sc->sc_tx_timeout = 0;
556 sc->sc_setup_timeout = 0;
557 (sc->sc_iee_reset)(sc);
558 return;
559 }
560
561
562
563 void
564 iee_detach(struct iee_softc *sc, int flags)
565 {
566 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
567
568 if ((ifp->if_flags & IFF_RUNNING) != 0)
569 iee_stop(ifp, 1);
570 ether_ifdetach(ifp);
571 if_detach(ifp);
572 return;
573 }
574
575
576
577 /* media change and status callback */
578 int
579 iee_mediachange(struct ifnet *ifp)
580 {
581 struct iee_softc *sc = ifp->if_softc;
582
583 if (sc->sc_mediachange != NULL)
584 return ((sc->sc_mediachange)(ifp));
585 return(0);
586 }
587
588
589
590 void
591 iee_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmreq)
592 {
593 struct iee_softc *sc = ifp->if_softc;
594
595 if (sc->sc_mediastatus != NULL)
596 return ((sc->sc_mediastatus)(ifp, ifmreq));
597 return;
598 }
599
600
601
602 /* initiate output routine */
603 void
604 iee_start(struct ifnet *ifp)
605 {
606 struct iee_softc *sc = ifp->if_softc;
607 struct mbuf *m = NULL;
608 int t;
609 int n;
610
611 if (sc->sc_next_cb != 0)
612 /* There is already a CMD runing. Defer packet enqueueing. */
613 return;
614 for (t = 0 ; t < IEE_NCB ; t++) {
615 IFQ_DEQUEUE(&ifp->if_snd, sc->sc_tx_mbuf[t]);
616 if (sc->sc_tx_mbuf[t] == NULL)
617 break;
618 if (bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_tx_map[t],
619 sc->sc_tx_mbuf[t], BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
620 /*
621 * The packet needs more TBD then we support.
622 * Copy the packet into a mbuf cluster to get it out.
623 */
624 printf("%s: iee_start: failed to load DMA map\n",
625 sc->sc_dev.dv_xname);
626 MGETHDR(m, M_DONTWAIT, MT_DATA);
627 if (m == NULL) {
628 printf("%s: iee_start: can't allocate mbuf\n",
629 sc->sc_dev.dv_xname);
630 m_freem(sc->sc_tx_mbuf[t]);
631 t--;
632 continue;
633 }
634 MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
635 MCLGET(m, M_DONTWAIT);
636 if ((m->m_flags & M_EXT) == 0) {
637 printf("%s: iee_start: can't allocate mbuf "
638 "cluster\n", sc->sc_dev.dv_xname);
639 m_freem(sc->sc_tx_mbuf[t]);
640 m_freem(m);
641 t--;
642 continue;
643 }
644 m_copydata(sc->sc_tx_mbuf[t], 0,
645 sc->sc_tx_mbuf[t]->m_pkthdr.len, mtod(m, caddr_t));
646 m->m_pkthdr.len = sc->sc_tx_mbuf[t]->m_pkthdr.len;
647 m->m_len = sc->sc_tx_mbuf[t]->m_pkthdr.len;
648 m_freem(sc->sc_tx_mbuf[t]);
649 sc->sc_tx_mbuf[t] = m;
650 if(bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_tx_map[t],
651 m, BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
652 printf("%s: iee_start: can't load TX DMA map\n",
653 sc->sc_dev.dv_xname);
654 m_freem(sc->sc_tx_mbuf[t]);
655 t--;
656 continue;
657 }
658 }
659 for (n = 0 ; n < sc->sc_tx_map[t]->dm_nsegs ; n++) {
660 SC_TBD(sc->sc_next_tbd + n)->tbd_tb_addr =
661 sc->sc_tx_map[t]->dm_segs[n].ds_addr;
662 SC_TBD(sc->sc_next_tbd + n)->tbd_size =
663 sc->sc_tx_map[t]->dm_segs[n].ds_len;
664 SC_TBD(sc->sc_next_tbd + n)->tbd_link_addr =
665 IEE_PHYS_SHMEM(IEE_TBD_OFF + IEE_TBD_SZ
666 * (sc->sc_next_tbd + n + 1));
667 }
668 SC_TBD(sc->sc_next_tbd + n - 1)->tbd_size |= IEE_CB_EL;
669 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_map[t], 0,
670 sc->sc_tx_map[t]->dm_mapsize, BUS_DMASYNC_PREWRITE);
671 IFQ_POLL(&ifp->if_snd, m);
672 if (m == NULL)
673 iee_cb_setup(sc, IEE_CB_CMD_TR | IEE_CB_S | IEE_CB_EL
674 | IEE_CB_I);
675 else
676 iee_cb_setup(sc, IEE_CB_CMD_TR);
677 sc->sc_next_tbd += n;
678 #if NBPFILTER > 0
679 /* Pass packet to bpf if someone listens. */
680 if (ifp->if_bpf)
681 bpf_mtap(ifp->if_bpf, sc->sc_tx_mbuf[t]);
682 #endif
683 }
684 if (t == 0)
685 /* No packets got set up for TX. */
686 return;
687 if (t == IEE_NCB)
688 ifp->if_flags |= IFF_OACTIVE;
689 bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, IEE_CB_SZ,
690 IEE_CB_LIST_SZ + IEE_TBD_LIST_SZ, BUS_DMASYNC_PREWRITE);
691 (sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE);
692 return;
693 }
694
695
696
697 /* ioctl routine */
698 int
699 iee_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
700 {
701 struct iee_softc *sc = ifp->if_softc;
702 int s;
703 int err;
704
705 s = splnet();
706 if (cmd == SIOCSIFMEDIA || cmd == SIOCGIFMEDIA)
707 return(ifmedia_ioctl(ifp, (struct ifreq *) data,
708 &sc->sc_ifmedia, cmd));
709 else {
710 err = ether_ioctl(ifp, cmd, data);
711 if (err == ENETRESET ||
712 ((ifp->if_flags & IFF_PROMISC) != 0
713 && (sc->sc_cf[8] & IEE_CF_8_PRM) == 0)
714 || ((ifp->if_flags & IFF_PROMISC) == 0
715 && (sc->sc_cf[8] & IEE_CF_8_PRM) != 0)) {
716 /* Do multicast setup / toggle promisc mode. */
717 if ((ifp->if_flags & IFF_PROMISC) != 0)
718 sc->sc_cf[8] |= IEE_CF_8_PRM;
719 else
720 sc->sc_cf[8] &= ~IEE_CF_8_PRM;
721 /* Put new multicast list into the hardware filter. */
722 iee_cb_setup(sc, IEE_CB_CMD_MCS | IEE_CB_S | IEE_CB_EL
723 | IEE_CB_I);
724 if ((sc->sc_flags & IEE_WANT_MCAST) == 0)
725 /* Mcast setup is not defered. */
726 (sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE);
727 err = 0;
728 }
729 }
730 splx(s);
731 return(err);
732 }
733
734
735
736 /* init routine */
737 int
738 iee_init(struct ifnet *ifp)
739 {
740 struct iee_softc *sc = ifp->if_softc;
741 int r;
742 int t;
743 int n;
744 int err;
745
746 sc->sc_next_cb = 0;
747 sc->sc_next_tbd = 0;
748 sc->sc_flags &= ~IEE_WANT_MCAST;
749 sc->sc_rx_done = 0;
750 SC_SCB->scb_crc_err = 0;
751 SC_SCB->scb_align_err = 0;
752 SC_SCB->scb_resource_err = 0;
753 SC_SCB->scb_overrun_err = 0;
754 SC_SCB->scb_rcvcdt_err = 0;
755 SC_SCB->scb_short_fr_err = 0;
756 sc->sc_crc_err = 0;
757 sc->sc_align_err = 0;
758 sc->sc_resource_err = 0;
759 sc->sc_overrun_err = 0;
760 sc->sc_rcvcdt_err = 0;
761 sc->sc_short_fr_err = 0;
762 sc->sc_tx_col = 0;
763 sc->sc_rx_err = 0;
764 sc->sc_cmd_err = 0;
765 /* Create Transmit DMA maps. */
766 for (t = 0 ; t < IEE_NCB ; t++) {
767 if (sc->sc_tx_map[t] == NULL && bus_dmamap_create(sc->sc_dmat,
768 MCLBYTES, IEE_NTBD, MCLBYTES, 0, BUS_DMA_NOWAIT,
769 &sc->sc_tx_map[t]) != 0) {
770 printf("%s: iee_init: can't create TX DMA map\n",
771 sc->sc_dev.dv_xname);
772 for (n = 0 ; n < t ; n++)
773 bus_dmamap_destroy(sc->sc_dmat,
774 sc->sc_tx_map[n]);
775 return(ENOBUFS);
776 }
777 }
778 /* Initialize Receive Frame and Receive Buffer Descriptors */
779 err = 0;
780 memset(SC_RFD(0), 0, IEE_RFD_LIST_SZ);
781 memset(SC_RBD(0), 0, IEE_RBD_LIST_SZ);
782 for (r = 0 ; r < IEE_NRFD ; r++) {
783 SC_RFD(r)->rfd_cmd = IEE_RFD_SF;
784 SC_RFD(r)->rfd_link_addr = IEE_PHYS_SHMEM(IEE_RFD_OFF
785 + IEE_RFD_SZ * ((r + 1) % IEE_NRFD));
786
787 SC_RBD(r)->rbd_next_rbd = IEE_PHYS_SHMEM(IEE_RBD_OFF
788 + IEE_RBD_SZ * ((r + 1) % IEE_NRFD));
789 if (sc->sc_rx_mbuf[r] == NULL) {
790 MGETHDR(sc->sc_rx_mbuf[r], M_DONTWAIT, MT_DATA);
791 if (sc->sc_rx_mbuf[r] == NULL) {
792 printf("%s: iee_init: can't allocate mbuf\n",
793 sc->sc_dev.dv_xname);
794 err = 1;
795 break;
796 }
797 MCLAIM(sc->sc_rx_mbuf[r],&sc->sc_ethercom.ec_rx_mowner);
798 MCLGET(sc->sc_rx_mbuf[r], M_DONTWAIT);
799 if ((sc->sc_rx_mbuf[r]->m_flags & M_EXT) == 0) {
800 printf("%s: iee_init: can't allocate mbuf"
801 " cluster\n", sc->sc_dev.dv_xname);
802 m_freem(sc->sc_rx_mbuf[r]);
803 err = 1;
804 break;
805 }
806 }
807 if (sc->sc_rx_map[r] == NULL && bus_dmamap_create(sc->sc_dmat,
808 MCLBYTES, 1, MCLBYTES , 0, BUS_DMA_NOWAIT,
809 &sc->sc_rx_map[r]) != 0) {
810 printf("%s: iee_init: can't create RX "
811 "DMA map\n", sc->sc_dev.dv_xname);
812 m_freem(sc->sc_rx_mbuf[r]);
813 err = 1;
814 break;
815 }
816 if (bus_dmamap_load(sc->sc_dmat, sc->sc_rx_map[r],
817 sc->sc_rx_mbuf[r]->m_ext.ext_buf,
818 sc->sc_rx_mbuf[r]->m_ext.ext_size, NULL,
819 BUS_DMA_READ | BUS_DMA_NOWAIT) != 0) {
820 printf("%s: iee_init: can't load RX DMA map\n",
821 sc->sc_dev.dv_xname);
822 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_map[r]);
823 m_freem(sc->sc_rx_mbuf[r]);
824 err = 1;
825 break;
826 }
827 bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_map[r], 0,
828 sc->sc_rx_mbuf[r]->m_ext.ext_size, BUS_DMASYNC_PREREAD);
829 SC_RBD(r)->rbd_size = sc->sc_rx_map[r]->dm_segs[0].ds_len;
830 SC_RBD(r)->rbd_rb_addr= sc->sc_rx_map[r]->dm_segs[0].ds_addr;
831 }
832 SC_RFD(0)->rfd_rbd_addr = IEE_PHYS_SHMEM(IEE_RBD_OFF);
833 if (err != 0) {
834 for (n = 0 ; n < r; n++) {
835 m_freem(sc->sc_rx_mbuf[n]);
836 sc->sc_rx_mbuf[n] = NULL;
837 bus_dmamap_unload(sc->sc_dmat, sc->sc_rx_map[n]);
838 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_map[n]);
839 sc->sc_rx_map[n] = NULL;
840 }
841 for (n = 0 ; n < t ; n++) {
842 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_map[n]);
843 sc->sc_tx_map[n] = NULL;
844 }
845 return(ENOBUFS);
846 }
847
848 (sc->sc_iee_reset)(sc);
849 iee_cb_setup(sc, IEE_CB_CMD_IAS);
850 sc->sc_cf[0] = IEE_CF_0_DEF | IEE_CF_0_PREF;
851 sc->sc_cf[1] = IEE_CF_1_DEF;
852 sc->sc_cf[2] = IEE_CF_2_DEF;
853 sc->sc_cf[3] = IEE_CF_3_ADDRLEN_DEF | IEE_CF_3_NSAI
854 | IEE_CF_3_PREAMLEN_DEF;
855 sc->sc_cf[4] = IEE_CF_4_DEF;
856 sc->sc_cf[5] = IEE_CF_5_DEF;
857 sc->sc_cf[6] = IEE_CF_6_DEF;
858 sc->sc_cf[7] = IEE_CF_7_DEF;
859 sc->sc_cf[8] = IEE_CF_8_DEF;
860 sc->sc_cf[9] = IEE_CF_9_DEF;
861 sc->sc_cf[10] = IEE_CF_10_DEF;
862 sc->sc_cf[11] = IEE_CF_11_DEF & ~IEE_CF_11_LNGFLD;
863 sc->sc_cf[12] = IEE_CF_12_DEF;
864 sc->sc_cf[13] = IEE_CF_13_DEF;
865 iee_cb_setup(sc, IEE_CB_CMD_CONF | IEE_CB_S | IEE_CB_EL);
866 SC_SCB->scb_rfa_addr = IEE_PHYS_SHMEM(IEE_RFD_OFF);
867 bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, 0, IEE_SHMEM_MAX,
868 BUS_DMASYNC_PREWRITE);
869 (sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE | IEE_SCB_RUC_ST);
870 /* Issue a Channel Attention to ACK interrupts we may have caused. */
871 (sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
872
873 /* Mark the interface as running and ready to RX/TX packets. */
874 ifp->if_flags |= IFF_RUNNING;
875 ifp->if_flags &= ~IFF_OACTIVE;
876 return(0);
877 }
878
879
880
881 /* stop routine */
882 void
883 iee_stop(struct ifnet *ifp, int disable)
884 {
885 struct iee_softc *sc = ifp->if_softc;
886 int n;
887
888 ifp->if_flags &= ~IFF_RUNNING;
889 ifp->if_flags |= IFF_OACTIVE;
890 ifp->if_timer = 0;
891 /* Reset the chip to get it quiet. */
892 (sc->sc_iee_reset)(ifp->if_softc);
893 /* Issue a Channel Attention to ACK interrupts we may have caused. */
894 (sc->sc_iee_cmd)(ifp->if_softc, IEE_SCB_ACK);
895 /* Release any dynamically allocated ressources. */
896 for (n = 0 ; n < IEE_NCB ; n++) {
897 if (sc->sc_tx_map[n] != NULL)
898 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_map[n]);
899 sc->sc_tx_map[n] = NULL;
900 }
901 for (n = 0 ; n < IEE_NRFD ; n++) {
902 if (sc->sc_rx_mbuf[n] != NULL)
903 m_freem(sc->sc_rx_mbuf[n]);
904 sc->sc_rx_mbuf[n] = NULL;
905 if (sc->sc_rx_map[n] != NULL) {
906 bus_dmamap_unload(sc->sc_dmat, sc->sc_rx_map[n]);
907 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_map[n]);
908 }
909 sc->sc_rx_map[n] = NULL;
910 }
911 return;
912 }
913
914
915
916 /* timer routine */
917 void
918 iee_watchdog(struct ifnet *ifp)
919 {
920 struct iee_softc *sc = ifp->if_softc;
921
922 (sc->sc_iee_reset)(sc);
923 if (sc->sc_next_tbd != 0)
924 printf("%s: iee_watchdog: transmit timeout %d\n",
925 sc->sc_dev.dv_xname, ++sc->sc_tx_timeout);
926 else
927 printf("%s: iee_watchdog: setup timeout %d\n",
928 sc->sc_dev.dv_xname, ++sc->sc_setup_timeout);
929 iee_init(ifp);
930 return;
931 }
932
933
934
935 /* routine to release res. */
936 void
937 iee_drain(struct ifnet *ifp)
938 {
939 iee_stop(ifp, 0);
940 return;
941 }
942
943
944
945