hme.c revision 1.26 1 /* $NetBSD: hme.c,v 1.26 2001/11/25 22:12:01 tron Exp $ */
2
3 /*-
4 * Copyright (c) 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * HME Ethernet module driver.
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: hme.c,v 1.26 2001/11/25 22:12:01 tron Exp $");
45
46 #undef HMEDEBUG
47
48 #include "opt_inet.h"
49 #include "opt_ns.h"
50 #include "bpfilter.h"
51 #include "rnd.h"
52 #include "vlan.h"
53
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/kernel.h>
57 #include <sys/mbuf.h>
58 #include <sys/syslog.h>
59 #include <sys/socket.h>
60 #include <sys/device.h>
61 #include <sys/malloc.h>
62 #include <sys/ioctl.h>
63 #include <sys/errno.h>
64 #if NRND > 0
65 #include <sys/rnd.h>
66 #endif
67
68 #include <net/if.h>
69 #include <net/if_dl.h>
70 #include <net/if_ether.h>
71 #include <net/if_media.h>
72
73 #ifdef INET
74 #include <netinet/in.h>
75 #include <netinet/if_inarp.h>
76 #include <netinet/in_systm.h>
77 #include <netinet/in_var.h>
78 #include <netinet/ip.h>
79 #endif
80
81 #ifdef NS
82 #include <netns/ns.h>
83 #include <netns/ns_if.h>
84 #endif
85
86 #if NBPFILTER > 0
87 #include <net/bpf.h>
88 #include <net/bpfdesc.h>
89 #endif
90
91 #include <dev/mii/mii.h>
92 #include <dev/mii/miivar.h>
93
94 #include <machine/bus.h>
95
96 #if NVLAN > 0
97 #include <net/if_vlan_var.h>
98 #endif
99
100 #include <dev/ic/hmereg.h>
101 #include <dev/ic/hmevar.h>
102
103 #define HME_RX_OFFSET 2
104
105 void hme_start __P((struct ifnet *));
106 void hme_stop __P((struct hme_softc *));
107 int hme_ioctl __P((struct ifnet *, u_long, caddr_t));
108 void hme_tick __P((void *));
109 void hme_watchdog __P((struct ifnet *));
110 void hme_shutdown __P((void *));
111 void hme_init __P((struct hme_softc *));
112 void hme_meminit __P((struct hme_softc *));
113 void hme_mifinit __P((struct hme_softc *));
114 void hme_reset __P((struct hme_softc *));
115 void hme_setladrf __P((struct hme_softc *));
116 int hme_newbuf __P((struct hme_softc *, struct hme_sxd *, int));
117 int hme_encap __P((struct hme_softc *, struct mbuf *, int *));
118
119 /* MII methods & callbacks */
120 static int hme_mii_readreg __P((struct device *, int, int));
121 static void hme_mii_writereg __P((struct device *, int, int, int));
122 static void hme_mii_statchg __P((struct device *));
123
124 int hme_mediachange __P((struct ifnet *));
125 void hme_mediastatus __P((struct ifnet *, struct ifmediareq *));
126
127 int hme_eint __P((struct hme_softc *, u_int));
128 int hme_rint __P((struct hme_softc *));
129 int hme_tint __P((struct hme_softc *));
130
131 void
132 hme_config(sc)
133 struct hme_softc *sc;
134 {
135 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
136 struct mii_data *mii = &sc->sc_mii;
137 struct mii_softc *child;
138 bus_dma_tag_t dmatag = sc->sc_dmatag;
139 bus_dma_segment_t seg;
140 bus_size_t size;
141 int rseg, error, i;
142
143 /*
144 * HME common initialization.
145 *
146 * hme_softc fields that must be initialized by the front-end:
147 *
148 * the bus tag:
149 * sc_bustag
150 *
151 * the dma bus tag:
152 * sc_dmatag
153 *
154 * the bus handles:
155 * sc_seb (Shared Ethernet Block registers)
156 * sc_erx (Receiver Unit registers)
157 * sc_etx (Transmitter Unit registers)
158 * sc_mac (MAC registers)
159 * sc_mif (Managment Interface registers)
160 *
161 * the maximum bus burst size:
162 * sc_burst
163 *
164 * the local Ethernet address:
165 * sc_enaddr
166 *
167 */
168
169 /* Make sure the chip is stopped. */
170 hme_stop(sc);
171
172
173 for (i = 0; i < HME_TX_RING_SIZE; i++) {
174 if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1,
175 MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
176 &sc->sc_txd[i].sd_map) != 0) {
177 sc->sc_txd[i].sd_map = NULL;
178 goto fail;
179 }
180 }
181 for (i = 0; i < HME_RX_RING_SIZE; i++) {
182 if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1,
183 MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
184 &sc->sc_rxd[i].sd_map) != 0) {
185 sc->sc_rxd[i].sd_map = NULL;
186 goto fail;
187 }
188 }
189 if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1, MCLBYTES, 0,
190 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_rxmap_spare) != 0) {
191 sc->sc_rxmap_spare = NULL;
192 goto fail;
193 }
194
195 /*
196 * Allocate DMA capable memory
197 * Buffer descriptors must be aligned on a 2048 byte boundary;
198 * take this into account when calculating the size. Note that
199 * the maximum number of descriptors (256) occupies 2048 bytes,
200 * so we allocate that much regardless of the number of descriptors.
201 */
202 size = (HME_XD_SIZE * HME_RX_RING_MAX) + /* RX descriptors */
203 (HME_XD_SIZE * HME_TX_RING_MAX); /* TX descriptors */
204
205 /* Allocate DMA buffer */
206 if ((error = bus_dmamem_alloc(dmatag, size, 2048, 0, &seg, 1, &rseg,
207 BUS_DMA_NOWAIT)) != 0) {
208 printf("%s: DMA buffer alloc error %d\n",
209 sc->sc_dev.dv_xname, error);
210 return;
211 }
212
213 /* Map DMA memory in CPU addressable space */
214 if ((error = bus_dmamem_map(dmatag, &seg, rseg, size,
215 &sc->sc_rb.rb_membase, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
216 printf("%s: DMA buffer map error %d\n",
217 sc->sc_dev.dv_xname, error);
218 bus_dmamap_unload(dmatag, sc->sc_dmamap);
219 bus_dmamem_free(dmatag, &seg, rseg);
220 return;
221 }
222
223 if ((error = bus_dmamap_create(dmatag, size, 1, size, 0,
224 BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
225 printf("%s: DMA map create error %d\n",
226 sc->sc_dev.dv_xname, error);
227 return;
228 }
229
230 /* Load the buffer */
231 if ((error = bus_dmamap_load(dmatag, sc->sc_dmamap,
232 sc->sc_rb.rb_membase, size, NULL,
233 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
234 printf("%s: DMA buffer map load error %d\n",
235 sc->sc_dev.dv_xname, error);
236 bus_dmamem_free(dmatag, &seg, rseg);
237 return;
238 }
239 sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr;
240
241 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
242 ether_sprintf(sc->sc_enaddr));
243
244 /* Initialize ifnet structure. */
245 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
246 ifp->if_softc = sc;
247 ifp->if_start = hme_start;
248 ifp->if_ioctl = hme_ioctl;
249 ifp->if_watchdog = hme_watchdog;
250 ifp->if_flags =
251 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
252 IFQ_SET_READY(&ifp->if_snd);
253
254 /* Initialize ifmedia structures and MII info */
255 mii->mii_ifp = ifp;
256 mii->mii_readreg = hme_mii_readreg;
257 mii->mii_writereg = hme_mii_writereg;
258 mii->mii_statchg = hme_mii_statchg;
259
260 ifmedia_init(&mii->mii_media, 0, hme_mediachange, hme_mediastatus);
261
262 hme_mifinit(sc);
263
264 mii_attach(&sc->sc_dev, mii, 0xffffffff,
265 MII_PHY_ANY, MII_OFFSET_ANY, 0);
266
267 child = LIST_FIRST(&mii->mii_phys);
268 if (child == NULL) {
269 /* No PHY attached */
270 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
271 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
272 } else {
273 /*
274 * Walk along the list of attached MII devices and
275 * establish an `MII instance' to `phy number'
276 * mapping. We'll use this mapping in media change
277 * requests to determine which phy to use to program
278 * the MIF configuration register.
279 */
280 for (; child != NULL; child = LIST_NEXT(child, mii_list)) {
281 /*
282 * Note: we support just two PHYs: the built-in
283 * internal device and an external on the MII
284 * connector.
285 */
286 if (child->mii_phy > 1 || child->mii_inst > 1) {
287 printf("%s: cannot accomodate MII device %s"
288 " at phy %d, instance %d\n",
289 sc->sc_dev.dv_xname,
290 child->mii_dev.dv_xname,
291 child->mii_phy, child->mii_inst);
292 continue;
293 }
294
295 sc->sc_phys[child->mii_inst] = child->mii_phy;
296 }
297
298 /*
299 * XXX - we can really do the following ONLY if the
300 * phy indeed has the auto negotiation capability!!
301 */
302 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
303 }
304
305 /* Attach the interface. */
306 if_attach(ifp);
307 ether_ifattach(ifp, sc->sc_enaddr);
308
309 sc->sc_sh = shutdownhook_establish(hme_shutdown, sc);
310 if (sc->sc_sh == NULL)
311 panic("hme_config: can't establish shutdownhook");
312
313 #if NRND > 0
314 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
315 RND_TYPE_NET, 0);
316 #endif
317
318 callout_init(&sc->sc_tick_ch);
319 return;
320
321 fail:
322 if (sc->sc_rxmap_spare != NULL)
323 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_rxmap_spare);
324 for (i = 0; i < HME_TX_RING_SIZE; i++)
325 if (sc->sc_txd[i].sd_map != NULL)
326 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_txd[i].sd_map);
327 for (i = 0; i < HME_RX_RING_SIZE; i++)
328 if (sc->sc_rxd[i].sd_map != NULL)
329 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_rxd[i].sd_map);
330 }
331
332 void
333 hme_tick(arg)
334 void *arg;
335 {
336 struct hme_softc *sc = arg;
337 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
338 bus_space_tag_t t = sc->sc_bustag;
339 bus_space_handle_t mac = sc->sc_mac;
340 int s;
341
342 s = splnet();
343 /*
344 * Unload collision counters
345 */
346 ifp->if_collisions +=
347 bus_space_read_4(t, mac, HME_MACI_NCCNT) +
348 bus_space_read_4(t, mac, HME_MACI_FCCNT) +
349 bus_space_read_4(t, mac, HME_MACI_EXCNT) +
350 bus_space_read_4(t, mac, HME_MACI_LTCNT);
351
352 /*
353 * then clear the hardware counters.
354 */
355 bus_space_write_4(t, mac, HME_MACI_NCCNT, 0);
356 bus_space_write_4(t, mac, HME_MACI_FCCNT, 0);
357 bus_space_write_4(t, mac, HME_MACI_EXCNT, 0);
358 bus_space_write_4(t, mac, HME_MACI_LTCNT, 0);
359
360 mii_tick(&sc->sc_mii);
361 splx(s);
362
363 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
364 }
365
366 void
367 hme_reset(sc)
368 struct hme_softc *sc;
369 {
370 int s;
371
372 s = splnet();
373 hme_init(sc);
374 splx(s);
375 }
376
377 void
378 hme_stop(sc)
379 struct hme_softc *sc;
380 {
381 bus_space_tag_t t = sc->sc_bustag;
382 bus_space_handle_t seb = sc->sc_seb;
383 int n;
384
385 callout_stop(&sc->sc_tick_ch);
386 mii_down(&sc->sc_mii);
387
388 /* Reset transmitter and receiver */
389 bus_space_write_4(t, seb, HME_SEBI_RESET,
390 (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX));
391
392 for (n = 0; n < 20; n++) {
393 u_int32_t v = bus_space_read_4(t, seb, HME_SEBI_RESET);
394 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
395 return;
396 DELAY(20);
397 }
398
399 for (n = 0; n < HME_TX_RING_SIZE; n++) {
400 if (sc->sc_txd[n].sd_loaded) {
401 bus_dmamap_sync(sc->sc_dmatag, sc->sc_txd[n].sd_map,
402 0, sc->sc_txd[n].sd_map->dm_mapsize,
403 BUS_DMASYNC_POSTWRITE);
404 bus_dmamap_unload(sc->sc_dmatag, sc->sc_txd[n].sd_map);
405 sc->sc_txd[n].sd_loaded = 0;
406 }
407 if (sc->sc_txd[n].sd_mbuf != NULL) {
408 m_freem(sc->sc_txd[n].sd_mbuf);
409 sc->sc_txd[n].sd_mbuf = NULL;
410 }
411 }
412
413 printf("%s: hme_stop: reset failed\n", sc->sc_dev.dv_xname);
414 }
415
416 void
417 hme_meminit(sc)
418 struct hme_softc *sc;
419 {
420 bus_addr_t dma;
421 caddr_t p;
422 unsigned int i;
423 struct hme_ring *hr = &sc->sc_rb;
424
425 p = hr->rb_membase;
426 dma = hr->rb_dmabase;
427
428 /*
429 * Allocate transmit descriptors
430 */
431 hr->rb_txd = p;
432 hr->rb_txddma = dma;
433 p += HME_TX_RING_SIZE * HME_XD_SIZE;
434 dma += HME_TX_RING_SIZE * HME_XD_SIZE;
435 /* We have reserved descriptor space until the next 2048 byte boundary.*/
436 dma = (bus_addr_t)roundup((u_long)dma, 2048);
437 p = (caddr_t)roundup((u_long)p, 2048);
438
439 /*
440 * Allocate receive descriptors
441 */
442 hr->rb_rxd = p;
443 hr->rb_rxddma = dma;
444 p += HME_RX_RING_SIZE * HME_XD_SIZE;
445 dma += HME_RX_RING_SIZE * HME_XD_SIZE;
446 /* Again move forward to the next 2048 byte boundary.*/
447 dma = (bus_addr_t)roundup((u_long)dma, 2048);
448 p = (caddr_t)roundup((u_long)p, 2048);
449
450 /*
451 * Initialize transmit descriptors
452 */
453 for (i = 0; i < HME_TX_RING_SIZE; i++) {
454 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, 0);
455 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0);
456 sc->sc_txd[i].sd_mbuf = NULL;
457 }
458
459 /*
460 * Initialize receive descriptors
461 */
462 for (i = 0; i < HME_RX_RING_SIZE; i++) {
463 if (hme_newbuf(sc, &sc->sc_rxd[i], 1)) {
464 printf("%s: rx allocation failed\n",
465 sc->sc_dev.dv_xname);
466 break;
467 }
468 HME_XD_SETADDR(sc->sc_pci, hr->rb_rxd, i,
469 sc->sc_rxd[i].sd_map->dm_segs[0].ds_addr);
470 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_rxd, i,
471 HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_RX_PKTSIZE));
472 }
473
474 sc->sc_tx_prod = sc->sc_tx_cons = sc->sc_tx_cnt = 0;
475 sc->sc_last_rd = 0;
476 }
477
478 /*
479 * Initialization of interface; set up initialization block
480 * and transmit/receive descriptor rings.
481 */
482 void
483 hme_init(sc)
484 struct hme_softc *sc;
485 {
486 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
487 bus_space_tag_t t = sc->sc_bustag;
488 bus_space_handle_t seb = sc->sc_seb;
489 bus_space_handle_t etx = sc->sc_etx;
490 bus_space_handle_t erx = sc->sc_erx;
491 bus_space_handle_t mac = sc->sc_mac;
492 bus_space_handle_t mif = sc->sc_mif;
493 u_int8_t *ea;
494 u_int32_t v;
495
496 /*
497 * Initialization sequence. The numbered steps below correspond
498 * to the sequence outlined in section 6.3.5.1 in the Ethernet
499 * Channel Engine manual (part of the PCIO manual).
500 * See also the STP2002-STQ document from Sun Microsystems.
501 */
502
503 /* step 1 & 2. Reset the Ethernet Channel */
504 hme_stop(sc);
505
506 /* Re-initialize the MIF */
507 hme_mifinit(sc);
508
509 /* Call MI reset function if any */
510 if (sc->sc_hwreset)
511 (*sc->sc_hwreset)(sc);
512
513 #if 0
514 /* Mask all MIF interrupts, just in case */
515 bus_space_write_4(t, mif, HME_MIFI_IMASK, 0xffff);
516 #endif
517
518 /* step 3. Setup data structures in host memory */
519 hme_meminit(sc);
520
521 /* step 4. TX MAC registers & counters */
522 bus_space_write_4(t, mac, HME_MACI_NCCNT, 0);
523 bus_space_write_4(t, mac, HME_MACI_FCCNT, 0);
524 bus_space_write_4(t, mac, HME_MACI_EXCNT, 0);
525 bus_space_write_4(t, mac, HME_MACI_LTCNT, 0);
526 bus_space_write_4(t, mac, HME_MACI_TXSIZE, HME_MTU);
527
528 /* Load station MAC address */
529 ea = sc->sc_enaddr;
530 bus_space_write_4(t, mac, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
531 bus_space_write_4(t, mac, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
532 bus_space_write_4(t, mac, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
533
534 /*
535 * Init seed for backoff
536 * (source suggested by manual: low 10 bits of MAC address)
537 */
538 v = ((ea[4] << 8) | ea[5]) & 0x3fff;
539 bus_space_write_4(t, mac, HME_MACI_RANDSEED, v);
540
541
542 /* Note: Accepting power-on default for other MAC registers here.. */
543
544
545 /* step 5. RX MAC registers & counters */
546 hme_setladrf(sc);
547
548 /* step 6 & 7. Program Descriptor Ring Base Addresses */
549 bus_space_write_4(t, etx, HME_ETXI_RING, sc->sc_rb.rb_txddma);
550 bus_space_write_4(t, etx, HME_ETXI_RSIZE, HME_TX_RING_SIZE);
551
552 bus_space_write_4(t, erx, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
553 bus_space_write_4(t, mac, HME_MACI_RXSIZE, HME_MTU);
554
555 /* step 8. Global Configuration & Interrupt Mask */
556 bus_space_write_4(t, seb, HME_SEBI_IMASK,
557 ~(HME_SEB_STAT_HOSTTOTX | HME_SEB_STAT_RXTOHOST |
558 HME_SEB_STAT_TXALL | HME_SEB_STAT_TXPERR |
559 HME_SEB_STAT_RCNTEXP | HME_SEB_STAT_ALL_ERRORS));
560
561 switch (sc->sc_burst) {
562 default:
563 v = 0;
564 break;
565 case 16:
566 v = HME_SEB_CFG_BURST16;
567 break;
568 case 32:
569 v = HME_SEB_CFG_BURST32;
570 break;
571 case 64:
572 v = HME_SEB_CFG_BURST64;
573 break;
574 }
575 bus_space_write_4(t, seb, HME_SEBI_CFG, v);
576
577 /* step 9. ETX Configuration: use mostly default values */
578
579 /* Enable DMA */
580 v = bus_space_read_4(t, etx, HME_ETXI_CFG);
581 v |= HME_ETX_CFG_DMAENABLE;
582 bus_space_write_4(t, etx, HME_ETXI_CFG, v);
583
584 /* Transmit Descriptor ring size: in increments of 16 */
585 bus_space_write_4(t, etx, HME_ETXI_RSIZE, HME_TX_RING_SIZE / 16 - 1);
586
587 /* step 10. ERX Configuration */
588 v = bus_space_read_4(t, erx, HME_ERXI_CFG);
589 v &= ~HME_ERX_CFG_RINGSIZE256;
590 #if HME_RX_RING_SIZE == 32
591 v |= HME_ERX_CFG_RINGSIZE32;
592 #elif HME_RX_RING_SIZE == 64
593 v |= HME_ERX_CFG_RINGSIZE64;
594 #elif HME_RX_RING_SIZE == 128
595 v |= HME_ERX_CFG_RINGSIZE128;
596 #elif HME_RX_RING_SIZE == 256
597 v |= HME_ERX_CFG_RINGSIZE256;
598 #else
599 # error "RX ring size must be 32, 64, 128, or 256"
600 #endif
601 /* Enable DMA */
602 v |= HME_ERX_CFG_DMAENABLE | (HME_RX_OFFSET << 3);
603 bus_space_write_4(t, erx, HME_ERXI_CFG, v);
604
605 /* step 11. XIF Configuration */
606 v = bus_space_read_4(t, mac, HME_MACI_XIF);
607 v |= HME_MAC_XIF_OE;
608 /* If an external transceiver is connected, enable its MII drivers */
609 if ((bus_space_read_4(t, mif, HME_MIFI_CFG) & HME_MIF_CFG_MDI1) != 0)
610 v |= HME_MAC_XIF_MIIENABLE;
611 bus_space_write_4(t, mac, HME_MACI_XIF, v);
612
613
614 /* step 12. RX_MAC Configuration Register */
615 v = bus_space_read_4(t, mac, HME_MACI_RXCFG);
616 v |= HME_MAC_RXCFG_ENABLE;
617 bus_space_write_4(t, mac, HME_MACI_RXCFG, v);
618
619 /* step 13. TX_MAC Configuration Register */
620 v = bus_space_read_4(t, mac, HME_MACI_TXCFG);
621 v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
622 bus_space_write_4(t, mac, HME_MACI_TXCFG, v);
623
624 /* step 14. Issue Transmit Pending command */
625
626 /* Call MI initialization function if any */
627 if (sc->sc_hwinit)
628 (*sc->sc_hwinit)(sc);
629
630 /* Start the one second timer. */
631 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
632
633 ifp->if_flags |= IFF_RUNNING;
634 ifp->if_flags &= ~IFF_OACTIVE;
635 ifp->if_timer = 0;
636 hme_start(ifp);
637 }
638
639 void
640 hme_start(ifp)
641 struct ifnet *ifp;
642 {
643 struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
644 struct mbuf *m;
645 int bix;
646
647 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
648 return;
649
650 bix = sc->sc_tx_prod;
651 while (sc->sc_txd[bix].sd_mbuf == NULL) {
652 IFQ_POLL(&ifp->if_snd, m);
653 if (m == NULL)
654 break;
655
656 #if NBPFILTER > 0
657 /*
658 * If BPF is listening on this interface, let it see the
659 * packet before we commit it to the wire.
660 */
661 if (ifp->if_bpf)
662 bpf_mtap(ifp->if_bpf, m);
663 #endif
664
665 if (hme_encap(sc, m, &bix)) {
666 ifp->if_flags |= IFF_OACTIVE;
667 break;
668 }
669
670 IFQ_DEQUEUE(&ifp->if_snd, m);
671
672 bus_space_write_4(sc->sc_bustag, sc->sc_etx, HME_ETXI_PENDING,
673 HME_ETX_TP_DMAWAKEUP);
674 }
675
676 sc->sc_tx_prod = bix;
677 ifp->if_timer = 5;
678 }
679
680 /*
681 * Transmit interrupt.
682 */
683 int
684 hme_tint(sc)
685 struct hme_softc *sc;
686 {
687 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
688 unsigned int ri, txflags;
689 struct hme_sxd *sd;
690
691 /* Fetch current position in the transmit ring */
692 ri = sc->sc_tx_cons;
693 sd = &sc->sc_txd[ri];
694
695 for (;;) {
696 if (sc->sc_tx_cnt <= 0)
697 break;
698
699 txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri);
700
701 if (txflags & HME_XD_OWN)
702 break;
703
704 ifp->if_flags &= ~IFF_OACTIVE;
705 if (txflags & HME_XD_EOP)
706 ifp->if_opackets++;
707
708 bus_dmamap_sync(sc->sc_dmatag, sd->sd_map,
709 0, sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
710 bus_dmamap_unload(sc->sc_dmatag, sd->sd_map);
711 sd->sd_loaded = 0;
712
713 if (sd->sd_mbuf != NULL) {
714 m_freem(sd->sd_mbuf);
715 sd->sd_mbuf = NULL;
716 }
717
718 if (++ri == HME_TX_RING_SIZE) {
719 ri = 0;
720 sd = sc->sc_txd;
721 } else
722 sd++;
723
724 --sc->sc_tx_cnt;
725 }
726
727 /* Update ring */
728 sc->sc_tx_cons = ri;
729
730 hme_start(ifp);
731
732 if (sc->sc_tx_cnt == 0)
733 ifp->if_timer = 0;
734
735 return (1);
736 }
737
738 /*
739 * Receive interrupt.
740 */
741 int
742 hme_rint(sc)
743 struct hme_softc *sc;
744 {
745 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
746 struct mbuf *m;
747 struct hme_sxd *sd;
748 unsigned int ri, len;
749 u_int32_t flags;
750
751 ri = sc->sc_last_rd;
752 sd = &sc->sc_rxd[ri];
753
754 /*
755 * Process all buffers with valid data.
756 */
757 for (;;) {
758 flags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri);
759 if (flags & HME_XD_OWN)
760 break;
761
762 if (flags & HME_XD_OFL) {
763 printf("%s: buffer overflow, ri=%d; flags=0x%x\n",
764 sc->sc_dev.dv_xname, ri, flags);
765 goto again;
766 }
767
768 m = sd->sd_mbuf;
769 len = HME_XD_DECODE_RSIZE(flags);
770 m->m_pkthdr.len = m->m_len = len;
771
772 if (hme_newbuf(sc, sd, 0)) {
773 /*
774 * Allocation of new mbuf cluster failed, leave the
775 * old one in place and keep going.
776 */
777 ifp->if_ierrors++;
778 goto again;
779 }
780
781 ifp->if_ipackets++;
782
783 #if NBPFILTER > 0
784 if (ifp->if_bpf) {
785 m->m_pkthdr.len = m->m_len = len;
786 bpf_mtap(ifp->if_bpf, m);
787 }
788 #endif
789
790 /* Pass the packet up. */
791 (*ifp->if_input)(ifp, m);
792
793 again:
794 HME_XD_SETADDR(sc->sc_pci, sc->sc_rb.rb_rxd, ri,
795 sd->sd_map->dm_segs[0].ds_addr);
796 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri,
797 HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_RX_PKTSIZE));
798
799 if (++ri == HME_RX_RING_SIZE) {
800 ri = 0;
801 sd = sc->sc_rxd;
802 } else
803 sd++;
804 }
805
806 sc->sc_last_rd = ri;
807 return (1);
808 }
809
810 int
811 hme_eint(sc, status)
812 struct hme_softc *sc;
813 u_int status;
814 {
815 char bits[128];
816
817 if ((status & HME_SEB_STAT_MIFIRQ) != 0) {
818 printf("%s: XXXlink status changed\n", sc->sc_dev.dv_xname);
819 return (1);
820 }
821
822 printf("%s: status=%s\n", sc->sc_dev.dv_xname,
823 bitmask_snprintf(status, HME_SEB_STAT_BITS, bits, sizeof(bits)));
824 return (1);
825 }
826
827 int
828 hme_intr(v)
829 void *v;
830 {
831 struct hme_softc *sc = (struct hme_softc *)v;
832 bus_space_tag_t t = sc->sc_bustag;
833 bus_space_handle_t seb = sc->sc_seb;
834 u_int32_t status;
835 int r = 0;
836
837 status = bus_space_read_4(t, seb, HME_SEBI_STAT);
838
839 if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
840 r |= hme_eint(sc, status);
841
842 if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
843 r |= hme_tint(sc);
844
845 if ((status & HME_SEB_STAT_RXTOHOST) != 0)
846 r |= hme_rint(sc);
847
848 return (r);
849 }
850
851
852 void
853 hme_watchdog(ifp)
854 struct ifnet *ifp;
855 {
856 struct hme_softc *sc = ifp->if_softc;
857
858 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
859 ++ifp->if_oerrors;
860
861 hme_reset(sc);
862 }
863
864 /*
865 * Initialize the MII Management Interface
866 */
867 void
868 hme_mifinit(sc)
869 struct hme_softc *sc;
870 {
871 bus_space_tag_t t = sc->sc_bustag;
872 bus_space_handle_t mif = sc->sc_mif;
873 u_int32_t v;
874
875 /* Configure the MIF in frame mode */
876 v = bus_space_read_4(t, mif, HME_MIFI_CFG);
877 v &= ~HME_MIF_CFG_BBMODE;
878 bus_space_write_4(t, mif, HME_MIFI_CFG, v);
879 }
880
881 /*
882 * MII interface
883 */
884 static int
885 hme_mii_readreg(self, phy, reg)
886 struct device *self;
887 int phy, reg;
888 {
889 struct hme_softc *sc = (void *)self;
890 bus_space_tag_t t = sc->sc_bustag;
891 bus_space_handle_t mif = sc->sc_mif;
892 int n;
893 u_int32_t v;
894
895 /* Select the desired PHY in the MIF configuration register */
896 v = bus_space_read_4(t, mif, HME_MIFI_CFG);
897 /* Clear PHY select bit */
898 v &= ~HME_MIF_CFG_PHY;
899 if (phy == HME_PHYAD_EXTERNAL)
900 /* Set PHY select bit to get at external device */
901 v |= HME_MIF_CFG_PHY;
902 bus_space_write_4(t, mif, HME_MIFI_CFG, v);
903
904 /* Construct the frame command */
905 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
906 HME_MIF_FO_TAMSB |
907 (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
908 (phy << HME_MIF_FO_PHYAD_SHIFT) |
909 (reg << HME_MIF_FO_REGAD_SHIFT);
910
911 bus_space_write_4(t, mif, HME_MIFI_FO, v);
912 for (n = 0; n < 100; n++) {
913 DELAY(1);
914 v = bus_space_read_4(t, mif, HME_MIFI_FO);
915 if (v & HME_MIF_FO_TALSB)
916 return (v & HME_MIF_FO_DATA);
917 }
918
919 printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname);
920 return (0);
921 }
922
923 static void
924 hme_mii_writereg(self, phy, reg, val)
925 struct device *self;
926 int phy, reg, val;
927 {
928 struct hme_softc *sc = (void *)self;
929 bus_space_tag_t t = sc->sc_bustag;
930 bus_space_handle_t mif = sc->sc_mif;
931 int n;
932 u_int32_t v;
933
934 /* Select the desired PHY in the MIF configuration register */
935 v = bus_space_read_4(t, mif, HME_MIFI_CFG);
936 /* Clear PHY select bit */
937 v &= ~HME_MIF_CFG_PHY;
938 if (phy == HME_PHYAD_EXTERNAL)
939 /* Set PHY select bit to get at external device */
940 v |= HME_MIF_CFG_PHY;
941 bus_space_write_4(t, mif, HME_MIFI_CFG, v);
942
943 /* Construct the frame command */
944 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
945 HME_MIF_FO_TAMSB |
946 (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) |
947 (phy << HME_MIF_FO_PHYAD_SHIFT) |
948 (reg << HME_MIF_FO_REGAD_SHIFT) |
949 (val & HME_MIF_FO_DATA);
950
951 bus_space_write_4(t, mif, HME_MIFI_FO, v);
952 for (n = 0; n < 100; n++) {
953 DELAY(1);
954 v = bus_space_read_4(t, mif, HME_MIFI_FO);
955 if (v & HME_MIF_FO_TALSB)
956 return;
957 }
958
959 printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname);
960 }
961
962 static void
963 hme_mii_statchg(dev)
964 struct device *dev;
965 {
966 struct hme_softc *sc = (void *)dev;
967 int instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media);
968 int phy = sc->sc_phys[instance];
969 bus_space_tag_t t = sc->sc_bustag;
970 bus_space_handle_t mif = sc->sc_mif;
971 bus_space_handle_t mac = sc->sc_mac;
972 u_int32_t v;
973
974 #ifdef HMEDEBUG
975 if (sc->sc_debug)
976 printf("hme_mii_statchg: status change: phy = %d\n", phy);
977 #endif
978
979 /* Select the current PHY in the MIF configuration register */
980 v = bus_space_read_4(t, mif, HME_MIFI_CFG);
981 v &= ~HME_MIF_CFG_PHY;
982 if (phy == HME_PHYAD_EXTERNAL)
983 v |= HME_MIF_CFG_PHY;
984 bus_space_write_4(t, mif, HME_MIFI_CFG, v);
985
986 /* Set the MAC Full Duplex bit appropriately */
987 v = bus_space_read_4(t, mac, HME_MACI_TXCFG);
988 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0)
989 v |= HME_MAC_TXCFG_FULLDPLX;
990 else
991 v &= ~HME_MAC_TXCFG_FULLDPLX;
992 bus_space_write_4(t, mac, HME_MACI_TXCFG, v);
993
994 /* If an external transceiver is selected, enable its MII drivers */
995 v = bus_space_read_4(t, mac, HME_MACI_XIF);
996 v &= ~HME_MAC_XIF_MIIENABLE;
997 if (phy == HME_PHYAD_EXTERNAL)
998 v |= HME_MAC_XIF_MIIENABLE;
999 bus_space_write_4(t, mac, HME_MACI_XIF, v);
1000 }
1001
1002 int
1003 hme_mediachange(ifp)
1004 struct ifnet *ifp;
1005 {
1006 struct hme_softc *sc = ifp->if_softc;
1007
1008 if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
1009 return (EINVAL);
1010
1011 return (mii_mediachg(&sc->sc_mii));
1012 }
1013
1014 void
1015 hme_mediastatus(ifp, ifmr)
1016 struct ifnet *ifp;
1017 struct ifmediareq *ifmr;
1018 {
1019 struct hme_softc *sc = ifp->if_softc;
1020
1021 if ((ifp->if_flags & IFF_UP) == 0)
1022 return;
1023
1024 mii_pollstat(&sc->sc_mii);
1025 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1026 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1027 }
1028
1029 /*
1030 * Process an ioctl request.
1031 */
1032 int
1033 hme_ioctl(ifp, cmd, data)
1034 struct ifnet *ifp;
1035 u_long cmd;
1036 caddr_t data;
1037 {
1038 struct hme_softc *sc = ifp->if_softc;
1039 struct ifaddr *ifa = (struct ifaddr *)data;
1040 struct ifreq *ifr = (struct ifreq *)data;
1041 int s, error = 0;
1042
1043 s = splnet();
1044
1045 switch (cmd) {
1046
1047 case SIOCSIFADDR:
1048 ifp->if_flags |= IFF_UP;
1049
1050 switch (ifa->ifa_addr->sa_family) {
1051 #ifdef INET
1052 case AF_INET:
1053 hme_init(sc);
1054 arp_ifinit(ifp, ifa);
1055 break;
1056 #endif
1057 #ifdef NS
1058 case AF_NS:
1059 {
1060 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
1061
1062 if (ns_nullhost(*ina))
1063 ina->x_host =
1064 *(union ns_host *)LLADDR(ifp->if_sadl);
1065 else {
1066 memcpy(LLADDR(ifp->if_sadl),
1067 ina->x_host.c_host, sizeof(sc->sc_enaddr));
1068 }
1069 /* Set new address. */
1070 hme_init(sc);
1071 break;
1072 }
1073 #endif
1074 default:
1075 hme_init(sc);
1076 break;
1077 }
1078 break;
1079
1080 case SIOCSIFFLAGS:
1081 if ((ifp->if_flags & IFF_UP) == 0 &&
1082 (ifp->if_flags & IFF_RUNNING) != 0) {
1083 /*
1084 * If interface is marked down and it is running, then
1085 * stop it.
1086 */
1087 hme_stop(sc);
1088 ifp->if_flags &= ~IFF_RUNNING;
1089 } else if ((ifp->if_flags & IFF_UP) != 0 &&
1090 (ifp->if_flags & IFF_RUNNING) == 0) {
1091 /*
1092 * If interface is marked up and it is stopped, then
1093 * start it.
1094 */
1095 hme_init(sc);
1096 } else if ((ifp->if_flags & IFF_UP) != 0) {
1097 /*
1098 * Reset the interface to pick up changes in any other
1099 * flags that affect hardware registers.
1100 */
1101 hme_init(sc);
1102 }
1103 #ifdef HMEDEBUG
1104 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
1105 #endif
1106 break;
1107
1108 case SIOCADDMULTI:
1109 case SIOCDELMULTI:
1110 error = (cmd == SIOCADDMULTI) ?
1111 ether_addmulti(ifr, &sc->sc_ethercom) :
1112 ether_delmulti(ifr, &sc->sc_ethercom);
1113
1114 if (error == ENETRESET) {
1115 /*
1116 * Multicast list has changed; set the hardware filter
1117 * accordingly.
1118 */
1119 hme_setladrf(sc);
1120 error = 0;
1121 }
1122 break;
1123
1124 case SIOCGIFMEDIA:
1125 case SIOCSIFMEDIA:
1126 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1127 break;
1128
1129 default:
1130 error = EINVAL;
1131 break;
1132 }
1133
1134 splx(s);
1135 return (error);
1136 }
1137
1138 void
1139 hme_shutdown(arg)
1140 void *arg;
1141 {
1142 hme_stop((struct hme_softc *)arg);
1143 }
1144
1145 /*
1146 * Set up the logical address filter.
1147 */
1148 void
1149 hme_setladrf(sc)
1150 struct hme_softc *sc;
1151 {
1152 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1153 struct ether_multi *enm;
1154 struct ether_multistep step;
1155 struct ethercom *ac = &sc->sc_ethercom;
1156 bus_space_tag_t t = sc->sc_bustag;
1157 bus_space_handle_t mac = sc->sc_mac;
1158 u_char *cp;
1159 u_int32_t crc;
1160 u_int32_t hash[4];
1161 u_int32_t v;
1162 int len;
1163
1164 /* Clear hash table */
1165 hash[3] = hash[2] = hash[1] = hash[0] = 0;
1166
1167 /* Get current RX configuration */
1168 v = bus_space_read_4(t, mac, HME_MACI_RXCFG);
1169
1170 if ((ifp->if_flags & IFF_PROMISC) != 0) {
1171 /* Turn on promiscuous mode; turn off the hash filter */
1172 v |= HME_MAC_RXCFG_PMISC;
1173 v &= ~HME_MAC_RXCFG_HENABLE;
1174 ifp->if_flags |= IFF_ALLMULTI;
1175 goto chipit;
1176 }
1177
1178 /* Turn off promiscuous mode; turn on the hash filter */
1179 v &= ~HME_MAC_RXCFG_PMISC;
1180 v |= HME_MAC_RXCFG_HENABLE;
1181
1182 /*
1183 * Set up multicast address filter by passing all multicast addresses
1184 * through a crc generator, and then using the high order 6 bits as an
1185 * index into the 64 bit logical address filter. The high order bit
1186 * selects the word, while the rest of the bits select the bit within
1187 * the word.
1188 */
1189
1190 ETHER_FIRST_MULTI(step, ac, enm);
1191 while (enm != NULL) {
1192 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1193 /*
1194 * We must listen to a range of multicast addresses.
1195 * For now, just accept all multicasts, rather than
1196 * trying to set only those filter bits needed to match
1197 * the range. (At this time, the only use of address
1198 * ranges is for IP multicast routing, for which the
1199 * range is big enough to require all bits set.)
1200 */
1201 hash[3] = hash[2] = hash[1] = hash[0] = 0xffff;
1202 ifp->if_flags |= IFF_ALLMULTI;
1203 goto chipit;
1204 }
1205
1206 cp = enm->enm_addrlo;
1207 crc = 0xffffffff;
1208 for (len = sizeof(enm->enm_addrlo); --len >= 0;) {
1209 int octet = *cp++;
1210 int i;
1211
1212 #define MC_POLY_LE 0xedb88320UL /* mcast crc, little endian */
1213 for (i = 0; i < 8; i++) {
1214 if ((crc & 1) ^ (octet & 1)) {
1215 crc >>= 1;
1216 crc ^= MC_POLY_LE;
1217 } else {
1218 crc >>= 1;
1219 }
1220 octet >>= 1;
1221 }
1222 }
1223 /* Just want the 6 most significant bits. */
1224 crc >>= 26;
1225
1226 /* Set the corresponding bit in the filter. */
1227 hash[crc >> 4] |= 1 << (crc & 0xf);
1228
1229 ETHER_NEXT_MULTI(step, enm);
1230 }
1231
1232 ifp->if_flags &= ~IFF_ALLMULTI;
1233
1234 chipit:
1235 /* Now load the hash table into the chip */
1236 bus_space_write_4(t, mac, HME_MACI_HASHTAB0, hash[0]);
1237 bus_space_write_4(t, mac, HME_MACI_HASHTAB1, hash[1]);
1238 bus_space_write_4(t, mac, HME_MACI_HASHTAB2, hash[2]);
1239 bus_space_write_4(t, mac, HME_MACI_HASHTAB3, hash[3]);
1240 bus_space_write_4(t, mac, HME_MACI_RXCFG, v);
1241 }
1242
1243 int
1244 hme_encap(sc, mhead, bixp)
1245 struct hme_softc *sc;
1246 struct mbuf *mhead;
1247 int *bixp;
1248 {
1249 struct hme_sxd *sd;
1250 struct mbuf *m;
1251 int frag, cur, cnt = 0;
1252 u_int32_t flags;
1253 struct hme_ring *hr = &sc->sc_rb;
1254
1255 cur = frag = *bixp;
1256 sd = &sc->sc_txd[frag];
1257
1258 for (m = mhead; m != NULL; m = m->m_next) {
1259 if (m->m_len == 0)
1260 continue;
1261
1262 if ((HME_TX_RING_SIZE - (sc->sc_tx_cnt + cnt)) < 5)
1263 goto err;
1264
1265 if (bus_dmamap_load(sc->sc_dmatag, sd->sd_map,
1266 mtod(m, caddr_t), m->m_len, NULL, BUS_DMA_NOWAIT) != 0)
1267 goto err;
1268
1269 sd->sd_loaded = 1;
1270 bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0,
1271 sd->sd_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1272
1273 sd->sd_mbuf = NULL;
1274
1275 flags = HME_XD_ENCODE_TSIZE(m->m_len);
1276 if (cnt == 0)
1277 flags |= HME_XD_SOP;
1278 else
1279 flags |= HME_XD_OWN;
1280
1281 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, frag,
1282 sd->sd_map->dm_segs[0].ds_addr);
1283 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, frag, flags);
1284
1285 cur = frag;
1286 cnt++;
1287 if (++frag == HME_TX_RING_SIZE) {
1288 frag = 0;
1289 sd = sc->sc_txd;
1290 } else
1291 sd++;
1292 }
1293
1294 /* Set end of packet on last descriptor. */
1295 flags = HME_XD_GETFLAGS(sc->sc_pci, hr->rb_txd, cur);
1296 flags |= HME_XD_EOP;
1297 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, cur, flags);
1298 sc->sc_txd[cur].sd_mbuf = mhead;
1299
1300 /* Give first frame over to the hardware. */
1301 flags = HME_XD_GETFLAGS(sc->sc_pci, hr->rb_txd, (*bixp));
1302 flags |= HME_XD_OWN;
1303 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, (*bixp), flags);
1304
1305 sc->sc_tx_cnt += cnt;
1306 *bixp = frag;
1307
1308 /* sync descriptors */
1309
1310 return (0);
1311
1312 err:
1313 /*
1314 * Invalidate the stuff we may have already put into place. We
1315 * will be called again to queue it later.
1316 */
1317 for (; cnt > 0; cnt--) {
1318 if (--frag == -1)
1319 frag = HME_TX_RING_SIZE - 1;
1320 sd = &sc->sc_txd[frag];
1321 bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0,
1322 sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1323 bus_dmamap_unload(sc->sc_dmatag, sd->sd_map);
1324 sd->sd_loaded = 0;
1325 sd->sd_mbuf = NULL;
1326 }
1327 return (ENOBUFS);
1328 }
1329
1330 int
1331 hme_newbuf(sc, d, freeit)
1332 struct hme_softc *sc;
1333 struct hme_sxd *d;
1334 int freeit;
1335 {
1336 struct mbuf *m;
1337 bus_dmamap_t map;
1338
1339 MGETHDR(m, M_DONTWAIT, MT_DATA);
1340 if (m == NULL)
1341 return (ENOBUFS);
1342 m->m_pkthdr.rcvif = &sc->sc_ethercom.ec_if;
1343
1344 MCLGET(m, M_DONTWAIT);
1345 if ((m->m_flags & M_EXT) == 0) {
1346 m_freem(m);
1347 return (ENOBUFS);
1348 }
1349
1350 if (d->sd_loaded) {
1351 bus_dmamap_sync(sc->sc_dmatag, d->sd_map,
1352 0, d->sd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1353 bus_dmamap_unload(sc->sc_dmatag, d->sd_map);
1354 d->sd_loaded = 0;
1355 }
1356
1357 if (bus_dmamap_load(sc->sc_dmatag, sc->sc_rxmap_spare,
1358 mtod(m, caddr_t), MCLBYTES - HME_RX_OFFSET, NULL,
1359 BUS_DMA_NOWAIT) != 0) {
1360 if (d->sd_mbuf == NULL)
1361 return (ENOBUFS);
1362 m_freem(m);
1363 return (ENOBUFS);
1364 }
1365
1366 if (d->sd_loaded) {
1367 bus_dmamap_sync(sc->sc_dmatag, d->sd_map, 0,
1368 d->sd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1369 bus_dmamap_unload(sc->sc_dmatag, d->sd_map);
1370 d->sd_loaded = 0;
1371 }
1372 if ((d->sd_mbuf != NULL) && freeit) {
1373 m_freem(d->sd_mbuf);
1374 d->sd_mbuf = NULL;
1375 }
1376
1377 map = d->sd_map;
1378 d->sd_map = sc->sc_rxmap_spare;
1379 sc->sc_rxmap_spare = map;
1380
1381 d->sd_loaded = 1;
1382
1383 bus_dmamap_sync(sc->sc_dmatag, d->sd_map, 0, d->sd_map->dm_mapsize,
1384 BUS_DMASYNC_PREREAD);
1385
1386 m->m_data += HME_RX_OFFSET;
1387 d->sd_mbuf = m;
1388 return (0);
1389 }
1390