if_xe.c revision 1.5.8.3 1 1.5.8.3 nathanw /* $NetBSD: if_xe.c,v 1.5.8.3 2002/09/17 21:16:24 nathanw Exp $ */
2 1.5.8.2 nathanw /*
3 1.5.8.2 nathanw * Copyright (c) 1998 Darrin B. Jewell
4 1.5.8.2 nathanw * All rights reserved.
5 1.5.8.2 nathanw *
6 1.5.8.2 nathanw * Redistribution and use in source and binary forms, with or without
7 1.5.8.2 nathanw * modification, are permitted provided that the following conditions
8 1.5.8.2 nathanw * are met:
9 1.5.8.2 nathanw * 1. Redistributions of source code must retain the above copyright
10 1.5.8.2 nathanw * notice, this list of conditions and the following disclaimer.
11 1.5.8.2 nathanw * 2. Redistributions in binary form must reproduce the above copyright
12 1.5.8.2 nathanw * notice, this list of conditions and the following disclaimer in the
13 1.5.8.2 nathanw * documentation and/or other materials provided with the distribution.
14 1.5.8.2 nathanw * 3. All advertising materials mentioning features or use of this software
15 1.5.8.2 nathanw * must display the following acknowledgement:
16 1.5.8.2 nathanw * This product includes software developed by Darrin B. Jewell
17 1.5.8.2 nathanw * 4. The name of the author may not be used to endorse or promote products
18 1.5.8.2 nathanw * derived from this software without specific prior written permission
19 1.5.8.2 nathanw *
20 1.5.8.2 nathanw * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 1.5.8.2 nathanw * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 1.5.8.2 nathanw * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 1.5.8.2 nathanw * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 1.5.8.2 nathanw * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 1.5.8.2 nathanw * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 1.5.8.2 nathanw * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 1.5.8.2 nathanw * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 1.5.8.2 nathanw * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 1.5.8.2 nathanw * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 1.5.8.2 nathanw */
31 1.5.8.2 nathanw
32 1.5.8.2 nathanw #include "opt_inet.h"
33 1.5.8.2 nathanw #include "bpfilter.h"
34 1.5.8.2 nathanw
35 1.5.8.2 nathanw #include <sys/param.h>
36 1.5.8.2 nathanw #include <sys/systm.h>
37 1.5.8.2 nathanw #include <sys/mbuf.h>
38 1.5.8.2 nathanw #include <sys/syslog.h>
39 1.5.8.2 nathanw #include <sys/socket.h>
40 1.5.8.2 nathanw #include <sys/device.h>
41 1.5.8.2 nathanw
42 1.5.8.2 nathanw #include <net/if.h>
43 1.5.8.2 nathanw #include <net/if_ether.h>
44 1.5.8.2 nathanw #include <net/if_media.h>
45 1.5.8.2 nathanw
46 1.5.8.2 nathanw #ifdef INET
47 1.5.8.2 nathanw #include <netinet/in.h>
48 1.5.8.2 nathanw #include <netinet/if_inarp.h>
49 1.5.8.2 nathanw #endif
50 1.5.8.2 nathanw
51 1.5.8.2 nathanw #include <machine/autoconf.h>
52 1.5.8.2 nathanw #include <machine/cpu.h>
53 1.5.8.2 nathanw #include <machine/intr.h>
54 1.5.8.2 nathanw #include <machine/bus.h>
55 1.5.8.2 nathanw
56 1.5.8.2 nathanw #include <next68k/next68k/isr.h>
57 1.5.8.2 nathanw
58 1.5.8.2 nathanw #include <next68k/dev/mb8795reg.h>
59 1.5.8.2 nathanw #include <next68k/dev/mb8795var.h>
60 1.5.8.2 nathanw
61 1.5.8.3 nathanw #include <next68k/dev/bmapreg.h>
62 1.5.8.3 nathanw #include <next68k/dev/intiovar.h>
63 1.5.8.2 nathanw #include <next68k/dev/nextdmareg.h>
64 1.5.8.2 nathanw #include <next68k/dev/nextdmavar.h>
65 1.5.8.2 nathanw
66 1.5.8.2 nathanw #include <next68k/dev/if_xevar.h>
67 1.5.8.3 nathanw #include <next68k/dev/if_xereg.h>
68 1.5.8.2 nathanw
69 1.5.8.3 nathanw #ifdef DEBUG
70 1.5.8.3 nathanw #define XE_DEBUG
71 1.5.8.3 nathanw #endif
72 1.5.8.3 nathanw
73 1.5.8.3 nathanw #ifdef XE_DEBUG
74 1.5.8.3 nathanw int xe_debug = 0;
75 1.5.8.3 nathanw #define DPRINTF(x) if (xe_debug) printf x;
76 1.5.8.3 nathanw extern char *ndtracep;
77 1.5.8.3 nathanw extern char ndtrace[];
78 1.5.8.3 nathanw extern int ndtraceshow;
79 1.5.8.3 nathanw #define NDTRACEIF(x) if (10 && ndtracep < (ndtrace + 8192)) do {x;} while (0)
80 1.5.8.3 nathanw #else
81 1.5.8.3 nathanw #define DPRINTF(x)
82 1.5.8.3 nathanw #define NDTRACEIF(x)
83 1.5.8.3 nathanw #endif
84 1.5.8.3 nathanw #define PRINTF(x) printf x;
85 1.5.8.3 nathanw
86 1.5.8.3 nathanw extern int turbo;
87 1.5.8.2 nathanw
88 1.5.8.2 nathanw int xe_match __P((struct device *, struct cfdata *, void *));
89 1.5.8.2 nathanw void xe_attach __P((struct device *, struct device *, void *));
90 1.5.8.2 nathanw int xe_tint __P((void *));
91 1.5.8.2 nathanw int xe_rint __P((void *));
92 1.5.8.2 nathanw
93 1.5.8.3 nathanw struct mbuf * xe_dma_rxmap_load __P((struct mb8795_softc *,
94 1.5.8.3 nathanw bus_dmamap_t map));
95 1.5.8.3 nathanw
96 1.5.8.3 nathanw bus_dmamap_t xe_dma_rx_continue __P((void *));
97 1.5.8.3 nathanw void xe_dma_rx_completed __P((bus_dmamap_t,void *));
98 1.5.8.3 nathanw bus_dmamap_t xe_dma_tx_continue __P((void *));
99 1.5.8.3 nathanw void xe_dma_tx_completed __P((bus_dmamap_t,void *));
100 1.5.8.3 nathanw void xe_dma_rx_shutdown __P((void *));
101 1.5.8.3 nathanw void xe_dma_tx_shutdown __P((void *));
102 1.5.8.3 nathanw
103 1.5.8.3 nathanw static void findchannel_defer __P((struct device *));
104 1.5.8.3 nathanw
105 1.5.8.2 nathanw struct cfattach xe_ca = {
106 1.5.8.2 nathanw sizeof(struct xe_softc), xe_match, xe_attach
107 1.5.8.2 nathanw };
108 1.5.8.2 nathanw
109 1.5.8.3 nathanw static int xe_dma_medias[] = {
110 1.5.8.2 nathanw IFM_ETHER|IFM_AUTO,
111 1.5.8.2 nathanw IFM_ETHER|IFM_10_T,
112 1.5.8.2 nathanw IFM_ETHER|IFM_10_2,
113 1.5.8.2 nathanw };
114 1.5.8.3 nathanw static int nxe_dma_medias = (sizeof(xe_dma_medias)/sizeof(xe_dma_medias[0]));
115 1.5.8.3 nathanw
116 1.5.8.3 nathanw static int attached = 0;
117 1.5.8.3 nathanw
118 1.5.8.3 nathanw /*
119 1.5.8.3 nathanw * Functions and the switch for the MI code.
120 1.5.8.3 nathanw */
121 1.5.8.3 nathanw u_char xe_read_reg __P((struct mb8795_softc *, int));
122 1.5.8.3 nathanw void xe_write_reg __P((struct mb8795_softc *, int, u_char));
123 1.5.8.3 nathanw void xe_dma_reset __P((struct mb8795_softc *));
124 1.5.8.3 nathanw void xe_dma_rx_setup __P((struct mb8795_softc *));
125 1.5.8.3 nathanw void xe_dma_rx_go __P((struct mb8795_softc *));
126 1.5.8.3 nathanw struct mbuf * xe_dma_rx_mbuf __P((struct mb8795_softc *));
127 1.5.8.3 nathanw void xe_dma_tx_setup __P((struct mb8795_softc *));
128 1.5.8.3 nathanw void xe_dma_tx_go __P((struct mb8795_softc *));
129 1.5.8.3 nathanw int xe_dma_tx_mbuf __P((struct mb8795_softc *, struct mbuf *));
130 1.5.8.3 nathanw int xe_dma_tx_isactive __P((struct mb8795_softc *));
131 1.5.8.3 nathanw #if 0
132 1.5.8.3 nathanw int xe_dma_setup __P((struct mb8795_softc *, caddr_t *,
133 1.5.8.3 nathanw size_t *, int, size_t *));
134 1.5.8.3 nathanw void xe_dma_go __P((struct mb8795_softc *));
135 1.5.8.3 nathanw void xe_dma_stop __P((struct mb8795_softc *));
136 1.5.8.3 nathanw int xe_dma_isactive __P((struct mb8795_softc *));
137 1.5.8.3 nathanw #endif
138 1.5.8.3 nathanw
139 1.5.8.3 nathanw struct mb8795_glue xe_glue = {
140 1.5.8.3 nathanw xe_read_reg,
141 1.5.8.3 nathanw xe_write_reg,
142 1.5.8.3 nathanw xe_dma_reset,
143 1.5.8.3 nathanw xe_dma_rx_setup,
144 1.5.8.3 nathanw xe_dma_rx_go,
145 1.5.8.3 nathanw xe_dma_rx_mbuf,
146 1.5.8.3 nathanw xe_dma_tx_setup,
147 1.5.8.3 nathanw xe_dma_tx_go,
148 1.5.8.3 nathanw xe_dma_tx_mbuf,
149 1.5.8.3 nathanw xe_dma_tx_isactive,
150 1.5.8.3 nathanw #if 0
151 1.5.8.3 nathanw xe_dma_setup,
152 1.5.8.3 nathanw xe_dma_go,
153 1.5.8.3 nathanw xe_dma_stop,
154 1.5.8.3 nathanw xe_dma_isactive,
155 1.5.8.3 nathanw NULL, /* gl_clear_latched_intr */
156 1.5.8.3 nathanw #endif
157 1.5.8.3 nathanw };
158 1.5.8.2 nathanw
159 1.5.8.2 nathanw int
160 1.5.8.2 nathanw xe_match(parent, match, aux)
161 1.5.8.2 nathanw struct device *parent;
162 1.5.8.2 nathanw struct cfdata *match;
163 1.5.8.2 nathanw void *aux;
164 1.5.8.2 nathanw {
165 1.5.8.3 nathanw struct intio_attach_args *ia = (struct intio_attach_args *)aux;
166 1.5.8.3 nathanw
167 1.5.8.3 nathanw if (attached)
168 1.5.8.3 nathanw return (0);
169 1.5.8.3 nathanw
170 1.5.8.3 nathanw ia->ia_addr = (void *)NEXT_P_ENET;
171 1.5.8.3 nathanw
172 1.5.8.3 nathanw return (1);
173 1.5.8.3 nathanw }
174 1.5.8.3 nathanw
175 1.5.8.3 nathanw static void
176 1.5.8.3 nathanw findchannel_defer(self)
177 1.5.8.3 nathanw struct device *self;
178 1.5.8.3 nathanw {
179 1.5.8.3 nathanw struct xe_softc *xsc = (struct xe_softc *)self;
180 1.5.8.3 nathanw struct mb8795_softc *sc = &xsc->sc_mb8795;
181 1.5.8.3 nathanw int i, error;
182 1.5.8.3 nathanw
183 1.5.8.3 nathanw if (!xsc->sc_txdma) {
184 1.5.8.3 nathanw xsc->sc_txdma = nextdma_findchannel ("enetx");
185 1.5.8.3 nathanw if (xsc->sc_txdma == NULL)
186 1.5.8.3 nathanw panic ("%s: can't find enetx dma channel",
187 1.5.8.3 nathanw sc->sc_dev.dv_xname);
188 1.5.8.3 nathanw }
189 1.5.8.3 nathanw if (!xsc->sc_rxdma) {
190 1.5.8.3 nathanw xsc->sc_rxdma = nextdma_findchannel ("enetr");
191 1.5.8.3 nathanw if (xsc->sc_rxdma == NULL)
192 1.5.8.3 nathanw panic ("%s: can't find enetr dma channel",
193 1.5.8.3 nathanw sc->sc_dev.dv_xname);
194 1.5.8.3 nathanw }
195 1.5.8.3 nathanw printf ("%s: using dma channels %s %s\n", sc->sc_dev.dv_xname,
196 1.5.8.3 nathanw xsc->sc_txdma->sc_dev.dv_xname, xsc->sc_rxdma->sc_dev.dv_xname);
197 1.5.8.3 nathanw
198 1.5.8.3 nathanw nextdma_setconf (xsc->sc_rxdma, continue_cb, xe_dma_rx_continue);
199 1.5.8.3 nathanw nextdma_setconf (xsc->sc_rxdma, completed_cb, xe_dma_rx_completed);
200 1.5.8.3 nathanw nextdma_setconf (xsc->sc_rxdma, shutdown_cb, xe_dma_rx_shutdown);
201 1.5.8.3 nathanw nextdma_setconf (xsc->sc_rxdma, cb_arg, sc);
202 1.5.8.3 nathanw
203 1.5.8.3 nathanw nextdma_setconf (xsc->sc_txdma, continue_cb, xe_dma_tx_continue);
204 1.5.8.3 nathanw nextdma_setconf (xsc->sc_txdma, completed_cb, xe_dma_tx_completed);
205 1.5.8.3 nathanw nextdma_setconf (xsc->sc_txdma, shutdown_cb, xe_dma_tx_shutdown);
206 1.5.8.3 nathanw nextdma_setconf (xsc->sc_txdma, cb_arg, sc);
207 1.5.8.3 nathanw
208 1.5.8.3 nathanw /* Initialize the dma maps */
209 1.5.8.3 nathanw error = bus_dmamap_create(xsc->sc_txdma->sc_dmat, MCLBYTES,
210 1.5.8.3 nathanw (MCLBYTES/MSIZE), MCLBYTES, 0, BUS_DMA_ALLOCNOW,
211 1.5.8.3 nathanw &xsc->sc_tx_dmamap);
212 1.5.8.3 nathanw if (error) {
213 1.5.8.3 nathanw panic("%s: can't create tx DMA map, error = %d\n",
214 1.5.8.3 nathanw sc->sc_dev.dv_xname, error);
215 1.5.8.3 nathanw }
216 1.5.8.3 nathanw
217 1.5.8.3 nathanw for(i = 0; i < MB8795_NRXBUFS; i++) {
218 1.5.8.3 nathanw error = bus_dmamap_create(xsc->sc_rxdma->sc_dmat, MCLBYTES,
219 1.5.8.3 nathanw (MCLBYTES/MSIZE), MCLBYTES, 0, BUS_DMA_ALLOCNOW,
220 1.5.8.3 nathanw &xsc->sc_rx_dmamap[i]);
221 1.5.8.3 nathanw if (error) {
222 1.5.8.3 nathanw panic("%s: can't create rx DMA map, error = %d\n",
223 1.5.8.3 nathanw sc->sc_dev.dv_xname, error);
224 1.5.8.3 nathanw }
225 1.5.8.3 nathanw xsc->sc_rx_mb_head[i] = NULL;
226 1.5.8.3 nathanw }
227 1.5.8.3 nathanw xsc->sc_rx_loaded_idx = 0;
228 1.5.8.3 nathanw xsc->sc_rx_completed_idx = 0;
229 1.5.8.3 nathanw xsc->sc_rx_handled_idx = 0;
230 1.5.8.3 nathanw
231 1.5.8.3 nathanw /* @@@ more next hacks
232 1.5.8.3 nathanw * the 2000 covers at least a 1500 mtu + headers
233 1.5.8.3 nathanw * + DMA_BEGINALIGNMENT+ DMA_ENDALIGNMENT
234 1.5.8.3 nathanw */
235 1.5.8.3 nathanw xsc->sc_txbuf = malloc(2000, M_DEVBUF, M_NOWAIT);
236 1.5.8.3 nathanw if (!xsc->sc_txbuf)
237 1.5.8.3 nathanw panic("%s: can't malloc tx DMA buffer", sc->sc_dev.dv_xname);
238 1.5.8.3 nathanw
239 1.5.8.3 nathanw xsc->sc_tx_mb_head = NULL;
240 1.5.8.3 nathanw xsc->sc_tx_loaded = 0;
241 1.5.8.3 nathanw
242 1.5.8.3 nathanw mb8795_config(sc, xe_dma_medias, nxe_dma_medias, xe_dma_medias[0]);
243 1.5.8.3 nathanw
244 1.5.8.3 nathanw isrlink_autovec(xe_tint, sc, NEXT_I_IPL(NEXT_I_ENETX), 1, NULL);
245 1.5.8.3 nathanw INTR_ENABLE(NEXT_I_ENETX);
246 1.5.8.3 nathanw isrlink_autovec(xe_rint, sc, NEXT_I_IPL(NEXT_I_ENETR), 1, NULL);
247 1.5.8.3 nathanw INTR_ENABLE(NEXT_I_ENETR);
248 1.5.8.2 nathanw }
249 1.5.8.2 nathanw
250 1.5.8.2 nathanw void
251 1.5.8.2 nathanw xe_attach(parent, self, aux)
252 1.5.8.2 nathanw struct device *parent, *self;
253 1.5.8.2 nathanw void *aux;
254 1.5.8.2 nathanw {
255 1.5.8.3 nathanw struct intio_attach_args *ia = (struct intio_attach_args *)aux;
256 1.5.8.3 nathanw struct xe_softc *xsc = (struct xe_softc *)self;
257 1.5.8.3 nathanw struct mb8795_softc *sc = &xsc->sc_mb8795;
258 1.5.8.3 nathanw
259 1.5.8.3 nathanw DPRINTF(("%s: xe_attach()\n",sc->sc_dev.dv_xname));
260 1.5.8.3 nathanw
261 1.5.8.3 nathanw {
262 1.5.8.3 nathanw extern u_char rom_enetaddr[6]; /* kludge from machdep.c:next68k_bootargs() */
263 1.5.8.3 nathanw int i;
264 1.5.8.3 nathanw for(i=0;i<6;i++) {
265 1.5.8.3 nathanw sc->sc_enaddr[i] = rom_enetaddr[i];
266 1.5.8.3 nathanw }
267 1.5.8.3 nathanw }
268 1.5.8.3 nathanw
269 1.5.8.3 nathanw printf("\n%s: MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
270 1.5.8.3 nathanw sc->sc_dev.dv_xname,
271 1.5.8.3 nathanw sc->sc_enaddr[0],sc->sc_enaddr[1],sc->sc_enaddr[2],
272 1.5.8.3 nathanw sc->sc_enaddr[3],sc->sc_enaddr[4],sc->sc_enaddr[5]);
273 1.5.8.3 nathanw
274 1.5.8.3 nathanw xsc->sc_bst = ia->ia_bst;
275 1.5.8.3 nathanw if (bus_space_map(xsc->sc_bst, NEXT_P_ENET,
276 1.5.8.3 nathanw XE_DEVICE_SIZE, 0, &xsc->sc_bsh)) {
277 1.5.8.3 nathanw panic("\n%s: can't map mb8795 registers\n",
278 1.5.8.3 nathanw sc->sc_dev.dv_xname);
279 1.5.8.3 nathanw }
280 1.5.8.2 nathanw
281 1.5.8.3 nathanw sc->sc_bmap_bst = ia->ia_bst;
282 1.5.8.3 nathanw if (bus_space_map(sc->sc_bmap_bst, NEXT_P_BMAP,
283 1.5.8.3 nathanw BMAP_SIZE, 0, &sc->sc_bmap_bsh)) {
284 1.5.8.3 nathanw panic("\n%s: can't map bmap registers\n",
285 1.5.8.3 nathanw sc->sc_dev.dv_xname);
286 1.5.8.3 nathanw }
287 1.5.8.2 nathanw
288 1.5.8.3 nathanw /*
289 1.5.8.3 nathanw * Set up glue for MI code.
290 1.5.8.2 nathanw */
291 1.5.8.3 nathanw sc->sc_glue = &xe_glue;
292 1.5.8.2 nathanw
293 1.5.8.3 nathanw xsc->sc_txdma = nextdma_findchannel ("enetx");
294 1.5.8.3 nathanw xsc->sc_rxdma = nextdma_findchannel ("enetr");
295 1.5.8.3 nathanw if (xsc->sc_rxdma && xsc->sc_txdma) {
296 1.5.8.3 nathanw findchannel_defer (self);
297 1.5.8.3 nathanw } else {
298 1.5.8.3 nathanw config_defer (self, findchannel_defer);
299 1.5.8.3 nathanw }
300 1.5.8.2 nathanw
301 1.5.8.3 nathanw attached = 1;
302 1.5.8.2 nathanw }
303 1.5.8.2 nathanw
304 1.5.8.2 nathanw int
305 1.5.8.2 nathanw xe_tint(arg)
306 1.5.8.3 nathanw void *arg;
307 1.5.8.2 nathanw {
308 1.5.8.3 nathanw if (!INTR_OCCURRED(NEXT_I_ENETX))
309 1.5.8.3 nathanw return 0;
310 1.5.8.3 nathanw mb8795_tint((struct mb8795_softc *)arg);
311 1.5.8.3 nathanw return(1);
312 1.5.8.2 nathanw }
313 1.5.8.2 nathanw
314 1.5.8.2 nathanw int
315 1.5.8.2 nathanw xe_rint(arg)
316 1.5.8.3 nathanw void *arg;
317 1.5.8.3 nathanw {
318 1.5.8.3 nathanw if (!INTR_OCCURRED(NEXT_I_ENETR))
319 1.5.8.3 nathanw return(0);
320 1.5.8.3 nathanw mb8795_rint((struct mb8795_softc *)arg);
321 1.5.8.3 nathanw return(1);
322 1.5.8.3 nathanw }
323 1.5.8.3 nathanw
324 1.5.8.3 nathanw /*
325 1.5.8.3 nathanw * Glue functions.
326 1.5.8.3 nathanw */
327 1.5.8.3 nathanw
328 1.5.8.3 nathanw u_char
329 1.5.8.3 nathanw xe_read_reg(sc, reg)
330 1.5.8.3 nathanw struct mb8795_softc *sc;
331 1.5.8.3 nathanw int reg;
332 1.5.8.3 nathanw {
333 1.5.8.3 nathanw struct xe_softc *xsc = (struct xe_softc *)sc;
334 1.5.8.3 nathanw
335 1.5.8.3 nathanw return(bus_space_read_1(xsc->sc_bst, xsc->sc_bsh, reg));
336 1.5.8.3 nathanw }
337 1.5.8.3 nathanw
338 1.5.8.3 nathanw void
339 1.5.8.3 nathanw xe_write_reg(sc, reg, val)
340 1.5.8.3 nathanw struct mb8795_softc *sc;
341 1.5.8.3 nathanw int reg;
342 1.5.8.3 nathanw u_char val;
343 1.5.8.3 nathanw {
344 1.5.8.3 nathanw struct xe_softc *xsc = (struct xe_softc *)sc;
345 1.5.8.3 nathanw
346 1.5.8.3 nathanw bus_space_write_1(xsc->sc_bst, xsc->sc_bsh, reg, val);
347 1.5.8.3 nathanw }
348 1.5.8.3 nathanw
349 1.5.8.3 nathanw void
350 1.5.8.3 nathanw xe_dma_reset(sc)
351 1.5.8.3 nathanw struct mb8795_softc *sc;
352 1.5.8.3 nathanw {
353 1.5.8.3 nathanw struct xe_softc *xsc = (struct xe_softc *)sc;
354 1.5.8.3 nathanw int i;
355 1.5.8.3 nathanw
356 1.5.8.3 nathanw DPRINTF(("xe dma reset\n"));
357 1.5.8.3 nathanw
358 1.5.8.3 nathanw nextdma_reset(xsc->sc_rxdma);
359 1.5.8.3 nathanw nextdma_reset(xsc->sc_txdma);
360 1.5.8.3 nathanw
361 1.5.8.3 nathanw if (xsc->sc_tx_loaded) {
362 1.5.8.3 nathanw bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
363 1.5.8.3 nathanw 0, xsc->sc_tx_dmamap->dm_mapsize,
364 1.5.8.3 nathanw BUS_DMASYNC_POSTWRITE);
365 1.5.8.3 nathanw bus_dmamap_unload(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap);
366 1.5.8.3 nathanw xsc->sc_tx_loaded = 0;
367 1.5.8.3 nathanw }
368 1.5.8.3 nathanw if (xsc->sc_tx_mb_head) {
369 1.5.8.3 nathanw m_freem(xsc->sc_tx_mb_head);
370 1.5.8.3 nathanw xsc->sc_tx_mb_head = NULL;
371 1.5.8.3 nathanw }
372 1.5.8.3 nathanw
373 1.5.8.3 nathanw for(i = 0; i < MB8795_NRXBUFS; i++) {
374 1.5.8.3 nathanw if (xsc->sc_rx_mb_head[i]) {
375 1.5.8.3 nathanw bus_dmamap_unload(xsc->sc_rxdma->sc_dmat, xsc->sc_rx_dmamap[i]);
376 1.5.8.3 nathanw m_freem(xsc->sc_rx_mb_head[i]);
377 1.5.8.3 nathanw xsc->sc_rx_mb_head[i] = NULL;
378 1.5.8.3 nathanw }
379 1.5.8.3 nathanw }
380 1.5.8.3 nathanw }
381 1.5.8.3 nathanw
382 1.5.8.3 nathanw void
383 1.5.8.3 nathanw xe_dma_rx_setup (sc)
384 1.5.8.3 nathanw struct mb8795_softc *sc;
385 1.5.8.3 nathanw {
386 1.5.8.3 nathanw struct xe_softc *xsc = (struct xe_softc *)sc;
387 1.5.8.3 nathanw int i;
388 1.5.8.3 nathanw
389 1.5.8.3 nathanw DPRINTF(("xe dma rx setup\n"));
390 1.5.8.3 nathanw
391 1.5.8.3 nathanw for(i = 0; i < MB8795_NRXBUFS; i++) {
392 1.5.8.3 nathanw xsc->sc_rx_mb_head[i] =
393 1.5.8.3 nathanw xe_dma_rxmap_load(sc, xsc->sc_rx_dmamap[i]);
394 1.5.8.3 nathanw }
395 1.5.8.3 nathanw xsc->sc_rx_loaded_idx = 0;
396 1.5.8.3 nathanw xsc->sc_rx_completed_idx = 0;
397 1.5.8.3 nathanw xsc->sc_rx_handled_idx = 0;
398 1.5.8.3 nathanw
399 1.5.8.3 nathanw nextdma_init(xsc->sc_rxdma);
400 1.5.8.3 nathanw }
401 1.5.8.3 nathanw
402 1.5.8.3 nathanw void
403 1.5.8.3 nathanw xe_dma_rx_go (sc)
404 1.5.8.3 nathanw struct mb8795_softc *sc;
405 1.5.8.3 nathanw {
406 1.5.8.3 nathanw struct xe_softc *xsc = (struct xe_softc *)sc;
407 1.5.8.3 nathanw
408 1.5.8.3 nathanw DPRINTF(("xe dma rx go\n"));
409 1.5.8.3 nathanw
410 1.5.8.3 nathanw nextdma_start(xsc->sc_rxdma, DMACSR_SETREAD);
411 1.5.8.3 nathanw }
412 1.5.8.3 nathanw
413 1.5.8.3 nathanw struct mbuf *
414 1.5.8.3 nathanw xe_dma_rx_mbuf (sc)
415 1.5.8.3 nathanw struct mb8795_softc *sc;
416 1.5.8.3 nathanw {
417 1.5.8.3 nathanw struct xe_softc *xsc = (struct xe_softc *)sc;
418 1.5.8.3 nathanw bus_dmamap_t map;
419 1.5.8.3 nathanw struct mbuf *m;
420 1.5.8.3 nathanw
421 1.5.8.3 nathanw m = NULL;
422 1.5.8.3 nathanw if (xsc->sc_rx_handled_idx != xsc->sc_rx_completed_idx) {
423 1.5.8.3 nathanw xsc->sc_rx_handled_idx++;
424 1.5.8.3 nathanw xsc->sc_rx_handled_idx %= MB8795_NRXBUFS;
425 1.5.8.3 nathanw
426 1.5.8.3 nathanw map = xsc->sc_rx_dmamap[xsc->sc_rx_handled_idx];
427 1.5.8.3 nathanw m = xsc->sc_rx_mb_head[xsc->sc_rx_handled_idx];
428 1.5.8.3 nathanw
429 1.5.8.3 nathanw m->m_len = map->dm_xfer_len;
430 1.5.8.3 nathanw
431 1.5.8.3 nathanw bus_dmamap_sync(xsc->sc_rxdma->sc_dmat, map,
432 1.5.8.3 nathanw 0, map->dm_mapsize, BUS_DMASYNC_POSTREAD);
433 1.5.8.3 nathanw
434 1.5.8.3 nathanw bus_dmamap_unload(xsc->sc_rxdma->sc_dmat, map);
435 1.5.8.3 nathanw
436 1.5.8.3 nathanw /* Install a fresh mbuf for next packet */
437 1.5.8.3 nathanw
438 1.5.8.3 nathanw xsc->sc_rx_mb_head[xsc->sc_rx_handled_idx] =
439 1.5.8.3 nathanw xe_dma_rxmap_load(sc,map);
440 1.5.8.3 nathanw
441 1.5.8.3 nathanw /* Punt runt packets
442 1.5.8.3 nathanw * dma restarts create 0 length packets for example
443 1.5.8.3 nathanw */
444 1.5.8.3 nathanw if (m->m_len < ETHER_MIN_LEN) {
445 1.5.8.3 nathanw m_freem(m);
446 1.5.8.3 nathanw m = NULL;
447 1.5.8.3 nathanw }
448 1.5.8.3 nathanw }
449 1.5.8.3 nathanw return (m);
450 1.5.8.3 nathanw }
451 1.5.8.3 nathanw
452 1.5.8.3 nathanw void
453 1.5.8.3 nathanw xe_dma_tx_setup (sc)
454 1.5.8.3 nathanw struct mb8795_softc *sc;
455 1.5.8.3 nathanw {
456 1.5.8.3 nathanw struct xe_softc *xsc = (struct xe_softc *)sc;
457 1.5.8.3 nathanw
458 1.5.8.3 nathanw DPRINTF(("xe dma tx setup\n"));
459 1.5.8.3 nathanw
460 1.5.8.3 nathanw nextdma_init(xsc->sc_txdma);
461 1.5.8.3 nathanw }
462 1.5.8.3 nathanw
463 1.5.8.3 nathanw void
464 1.5.8.3 nathanw xe_dma_tx_go (sc)
465 1.5.8.3 nathanw struct mb8795_softc *sc;
466 1.5.8.3 nathanw {
467 1.5.8.3 nathanw struct xe_softc *xsc = (struct xe_softc *)sc;
468 1.5.8.3 nathanw
469 1.5.8.3 nathanw DPRINTF(("xe dma tx go\n"));
470 1.5.8.3 nathanw
471 1.5.8.3 nathanw nextdma_start(xsc->sc_txdma, DMACSR_SETWRITE);
472 1.5.8.3 nathanw }
473 1.5.8.3 nathanw
474 1.5.8.3 nathanw int
475 1.5.8.3 nathanw xe_dma_tx_mbuf (sc, m)
476 1.5.8.3 nathanw struct mb8795_softc *sc;
477 1.5.8.3 nathanw struct mbuf *m;
478 1.5.8.3 nathanw {
479 1.5.8.3 nathanw struct xe_softc *xsc = (struct xe_softc *)sc;
480 1.5.8.3 nathanw int error;
481 1.5.8.3 nathanw
482 1.5.8.3 nathanw xsc->sc_tx_mb_head = m;
483 1.5.8.3 nathanw
484 1.5.8.3 nathanw /* The following is a next specific hack that should
485 1.5.8.3 nathanw * probably be moved out of MI code.
486 1.5.8.3 nathanw * This macro assumes it can move forward as needed
487 1.5.8.3 nathanw * in the buffer. Perhaps it should zero the extra buffer.
488 1.5.8.3 nathanw */
489 1.5.8.3 nathanw #define REALIGN_DMABUF(s,l) \
490 1.5.8.3 nathanw { (s) = ((u_char *)(((unsigned)(s)+DMA_BEGINALIGNMENT-1) \
491 1.5.8.3 nathanw &~(DMA_BEGINALIGNMENT-1))); \
492 1.5.8.3 nathanw (l) = ((u_char *)(((unsigned)((s)+(l))+DMA_ENDALIGNMENT-1) \
493 1.5.8.3 nathanw &~(DMA_ENDALIGNMENT-1)))-(s);}
494 1.5.8.3 nathanw
495 1.5.8.3 nathanw #if 0
496 1.5.8.3 nathanw error = bus_dmamap_load_mbuf(xsc->sc_txdma->sc_dmat,
497 1.5.8.3 nathanw xsc->sc_tx_dmamap, xsc->sc_tx_mb_head, BUS_DMA_NOWAIT);
498 1.5.8.3 nathanw #else
499 1.5.8.3 nathanw {
500 1.5.8.3 nathanw u_char *buf = xsc->sc_txbuf;
501 1.5.8.3 nathanw int buflen = 0;
502 1.5.8.3 nathanw
503 1.5.8.3 nathanw buflen = m->m_pkthdr.len;
504 1.5.8.3 nathanw
505 1.5.8.3 nathanw /* Fix runt packets, @@@ memory overrun */
506 1.5.8.3 nathanw if (buflen < ETHERMIN+sizeof(struct ether_header)) {
507 1.5.8.3 nathanw buflen = ETHERMIN+sizeof(struct ether_header);
508 1.5.8.3 nathanw }
509 1.5.8.3 nathanw
510 1.5.8.3 nathanw {
511 1.5.8.3 nathanw u_char *p = buf;
512 1.5.8.3 nathanw for (m=xsc->sc_tx_mb_head; m; m = m->m_next) {
513 1.5.8.3 nathanw if (m->m_len == 0) continue;
514 1.5.8.3 nathanw bcopy(mtod(m, u_char *), p, m->m_len);
515 1.5.8.3 nathanw p += m->m_len;
516 1.5.8.3 nathanw }
517 1.5.8.3 nathanw }
518 1.5.8.3 nathanw
519 1.5.8.3 nathanw error = bus_dmamap_load(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
520 1.5.8.3 nathanw buf,buflen,NULL,BUS_DMA_NOWAIT);
521 1.5.8.3 nathanw }
522 1.5.8.3 nathanw #endif
523 1.5.8.3 nathanw if (error) {
524 1.5.8.3 nathanw printf("%s: can't load mbuf chain, error = %d\n",
525 1.5.8.3 nathanw sc->sc_dev.dv_xname, error);
526 1.5.8.3 nathanw m_freem(xsc->sc_tx_mb_head);
527 1.5.8.3 nathanw xsc->sc_tx_mb_head = NULL;
528 1.5.8.3 nathanw return (error);
529 1.5.8.3 nathanw }
530 1.5.8.3 nathanw
531 1.5.8.3 nathanw #ifdef DIAGNOSTIC
532 1.5.8.3 nathanw if (xsc->sc_tx_loaded != 0) {
533 1.5.8.3 nathanw panic("%s: xsc->sc_tx_loaded is %d",sc->sc_dev.dv_xname,
534 1.5.8.3 nathanw xsc->sc_tx_loaded);
535 1.5.8.3 nathanw }
536 1.5.8.3 nathanw #endif
537 1.5.8.3 nathanw
538 1.5.8.3 nathanw bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap, 0,
539 1.5.8.3 nathanw xsc->sc_tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
540 1.5.8.3 nathanw
541 1.5.8.3 nathanw return (0);
542 1.5.8.3 nathanw }
543 1.5.8.3 nathanw
544 1.5.8.3 nathanw int
545 1.5.8.3 nathanw xe_dma_tx_isactive (sc)
546 1.5.8.3 nathanw struct mb8795_softc *sc;
547 1.5.8.3 nathanw {
548 1.5.8.3 nathanw struct xe_softc *xsc = (struct xe_softc *)sc;
549 1.5.8.3 nathanw
550 1.5.8.3 nathanw return (xsc->sc_tx_loaded != 0);
551 1.5.8.3 nathanw }
552 1.5.8.3 nathanw
553 1.5.8.3 nathanw /****************************************************************/
554 1.5.8.3 nathanw
555 1.5.8.3 nathanw void
556 1.5.8.3 nathanw xe_dma_tx_completed(map, arg)
557 1.5.8.3 nathanw bus_dmamap_t map;
558 1.5.8.3 nathanw void *arg;
559 1.5.8.3 nathanw {
560 1.5.8.3 nathanw struct mb8795_softc *sc = arg;
561 1.5.8.3 nathanw struct xe_softc *xsc = (struct xe_softc *)sc;
562 1.5.8.3 nathanw
563 1.5.8.3 nathanw DPRINTF(("%s: xe_dma_tx_completed()\n",sc->sc_dev.dv_xname));
564 1.5.8.3 nathanw
565 1.5.8.3 nathanw #ifdef DIAGNOSTIC
566 1.5.8.3 nathanw if (!xsc->sc_tx_loaded) {
567 1.5.8.3 nathanw panic("%s: tx completed never loaded ",sc->sc_dev.dv_xname);
568 1.5.8.3 nathanw }
569 1.5.8.3 nathanw if (map != xsc->sc_tx_dmamap) {
570 1.5.8.3 nathanw panic("%s: unexpected tx completed map",sc->sc_dev.dv_xname);
571 1.5.8.3 nathanw }
572 1.5.8.3 nathanw
573 1.5.8.3 nathanw #endif
574 1.5.8.3 nathanw }
575 1.5.8.3 nathanw
576 1.5.8.3 nathanw void
577 1.5.8.3 nathanw xe_dma_tx_shutdown(arg)
578 1.5.8.3 nathanw void *arg;
579 1.5.8.2 nathanw {
580 1.5.8.3 nathanw struct mb8795_softc *sc = arg;
581 1.5.8.3 nathanw struct xe_softc *xsc = (struct xe_softc *)sc;
582 1.5.8.3 nathanw struct ifnet *ifp = &sc->sc_ethercom.ec_if;
583 1.5.8.3 nathanw
584 1.5.8.3 nathanw DPRINTF(("%s: xe_dma_tx_shutdown()\n",sc->sc_dev.dv_xname));
585 1.5.8.3 nathanw
586 1.5.8.3 nathanw #ifdef DIAGNOSTIC
587 1.5.8.3 nathanw if (!xsc->sc_tx_loaded) {
588 1.5.8.3 nathanw panic("%s: tx shutdown never loaded ",sc->sc_dev.dv_xname);
589 1.5.8.3 nathanw }
590 1.5.8.3 nathanw #endif
591 1.5.8.3 nathanw
592 1.5.8.3 nathanw if (turbo)
593 1.5.8.3 nathanw MB_WRITE_REG(sc, MB8795_TXMODE, MB8795_TXMODE_TURBO1);
594 1.5.8.3 nathanw if (xsc->sc_tx_loaded) {
595 1.5.8.3 nathanw bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
596 1.5.8.3 nathanw 0, xsc->sc_tx_dmamap->dm_mapsize,
597 1.5.8.3 nathanw BUS_DMASYNC_POSTWRITE);
598 1.5.8.3 nathanw bus_dmamap_unload(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap);
599 1.5.8.3 nathanw m_freem(xsc->sc_tx_mb_head);
600 1.5.8.3 nathanw xsc->sc_tx_mb_head = NULL;
601 1.5.8.3 nathanw
602 1.5.8.3 nathanw xsc->sc_tx_loaded--;
603 1.5.8.3 nathanw }
604 1.5.8.3 nathanw
605 1.5.8.3 nathanw #ifdef DIAGNOSTIC
606 1.5.8.3 nathanw if (xsc->sc_tx_loaded != 0) {
607 1.5.8.3 nathanw panic("%s: sc->sc_tx_loaded is %d",sc->sc_dev.dv_xname,
608 1.5.8.3 nathanw xsc->sc_tx_loaded);
609 1.5.8.3 nathanw }
610 1.5.8.3 nathanw #endif
611 1.5.8.3 nathanw
612 1.5.8.3 nathanw ifp->if_timer = 0;
613 1.5.8.3 nathanw
614 1.5.8.3 nathanw #if 1
615 1.5.8.3 nathanw if ((ifp->if_flags & IFF_RUNNING) && !IF_IS_EMPTY(&sc->sc_tx_snd)) {
616 1.5.8.3 nathanw void mb8795_start_dma __P((struct mb8795_softc *)); /* XXXX */
617 1.5.8.3 nathanw mb8795_start_dma(sc);
618 1.5.8.3 nathanw }
619 1.5.8.3 nathanw #endif
620 1.5.8.3 nathanw
621 1.5.8.3 nathanw #if 0
622 1.5.8.3 nathanw /* Enable ready interrupt */
623 1.5.8.3 nathanw MB_WRITE_REG(sc, MB8795_TXMASK,
624 1.5.8.3 nathanw MB_READ_REG(sc, MB8795_TXMASK)
625 1.5.8.3 nathanw | MB8795_TXMASK_TXRXIE/* READYIE */);
626 1.5.8.3 nathanw #endif
627 1.5.8.3 nathanw }
628 1.5.8.3 nathanw
629 1.5.8.3 nathanw
630 1.5.8.3 nathanw void
631 1.5.8.3 nathanw xe_dma_rx_completed(map, arg)
632 1.5.8.3 nathanw bus_dmamap_t map;
633 1.5.8.3 nathanw void *arg;
634 1.5.8.3 nathanw {
635 1.5.8.3 nathanw struct mb8795_softc *sc = arg;
636 1.5.8.3 nathanw struct xe_softc *xsc = (struct xe_softc *)sc;
637 1.5.8.3 nathanw struct ifnet *ifp = &sc->sc_ethercom.ec_if;
638 1.5.8.3 nathanw
639 1.5.8.3 nathanw if (ifp->if_flags & IFF_RUNNING) {
640 1.5.8.3 nathanw xsc->sc_rx_completed_idx++;
641 1.5.8.3 nathanw xsc->sc_rx_completed_idx %= MB8795_NRXBUFS;
642 1.5.8.3 nathanw
643 1.5.8.3 nathanw DPRINTF(("%s: xe_dma_rx_completed(), sc->sc_rx_completed_idx = %d\n",
644 1.5.8.3 nathanw sc->sc_dev.dv_xname, xsc->sc_rx_completed_idx));
645 1.5.8.3 nathanw
646 1.5.8.3 nathanw #if (defined(DIAGNOSTIC))
647 1.5.8.3 nathanw if (map != xsc->sc_rx_dmamap[xsc->sc_rx_completed_idx]) {
648 1.5.8.3 nathanw panic("%s: Unexpected rx dmamap completed\n",
649 1.5.8.3 nathanw sc->sc_dev.dv_xname);
650 1.5.8.3 nathanw }
651 1.5.8.3 nathanw #endif
652 1.5.8.3 nathanw }
653 1.5.8.3 nathanw #ifdef DIAGNOSTIC
654 1.5.8.3 nathanw else
655 1.5.8.3 nathanw DPRINTF(("%s: Unexpected rx dmamap completed while if not running\n",
656 1.5.8.3 nathanw sc->sc_dev.dv_xname));
657 1.5.8.3 nathanw #endif
658 1.5.8.3 nathanw }
659 1.5.8.3 nathanw
660 1.5.8.3 nathanw void
661 1.5.8.3 nathanw xe_dma_rx_shutdown(arg)
662 1.5.8.3 nathanw void *arg;
663 1.5.8.3 nathanw {
664 1.5.8.3 nathanw struct mb8795_softc *sc = arg;
665 1.5.8.3 nathanw struct xe_softc *xsc = (struct xe_softc *)sc;
666 1.5.8.3 nathanw struct ifnet *ifp = &sc->sc_ethercom.ec_if;
667 1.5.8.3 nathanw
668 1.5.8.3 nathanw if (ifp->if_flags & IFF_RUNNING) {
669 1.5.8.3 nathanw DPRINTF(("%s: xe_dma_rx_shutdown(), restarting.\n",
670 1.5.8.3 nathanw sc->sc_dev.dv_xname));
671 1.5.8.3 nathanw
672 1.5.8.3 nathanw nextdma_start(xsc->sc_rxdma, DMACSR_SETREAD);
673 1.5.8.3 nathanw if (turbo)
674 1.5.8.3 nathanw MB_WRITE_REG(sc, MB8795_RXMODE, MB8795_RXMODE_TEST | MB8795_RXMODE_MULTICAST);
675 1.5.8.3 nathanw }
676 1.5.8.3 nathanw #ifdef DIAGNOSTIC
677 1.5.8.3 nathanw else
678 1.5.8.3 nathanw DPRINTF(("%s: Unexpected rx dma shutdown while if not running\n",
679 1.5.8.3 nathanw sc->sc_dev.dv_xname));
680 1.5.8.3 nathanw #endif
681 1.5.8.3 nathanw }
682 1.5.8.3 nathanw
683 1.5.8.3 nathanw /*
684 1.5.8.3 nathanw * load a dmamap with a freshly allocated mbuf
685 1.5.8.3 nathanw */
686 1.5.8.3 nathanw struct mbuf *
687 1.5.8.3 nathanw xe_dma_rxmap_load(sc,map)
688 1.5.8.3 nathanw struct mb8795_softc *sc;
689 1.5.8.3 nathanw bus_dmamap_t map;
690 1.5.8.3 nathanw {
691 1.5.8.3 nathanw struct xe_softc *xsc = (struct xe_softc *)sc;
692 1.5.8.3 nathanw struct ifnet *ifp = &sc->sc_ethercom.ec_if;
693 1.5.8.3 nathanw struct mbuf *m;
694 1.5.8.3 nathanw int error;
695 1.5.8.3 nathanw
696 1.5.8.3 nathanw MGETHDR(m, M_DONTWAIT, MT_DATA);
697 1.5.8.3 nathanw if (m) {
698 1.5.8.3 nathanw MCLGET(m, M_DONTWAIT);
699 1.5.8.3 nathanw if ((m->m_flags & M_EXT) == 0) {
700 1.5.8.3 nathanw m_freem(m);
701 1.5.8.3 nathanw m = NULL;
702 1.5.8.3 nathanw } else {
703 1.5.8.3 nathanw m->m_len = MCLBYTES;
704 1.5.8.3 nathanw }
705 1.5.8.3 nathanw }
706 1.5.8.3 nathanw if (!m) {
707 1.5.8.3 nathanw /* @@@ Handle this gracefully by reusing a scratch buffer
708 1.5.8.3 nathanw * or something.
709 1.5.8.3 nathanw */
710 1.5.8.3 nathanw panic("Unable to get memory for incoming ethernet\n");
711 1.5.8.3 nathanw }
712 1.5.8.3 nathanw
713 1.5.8.3 nathanw /* Align buffer, @@@ next specific.
714 1.5.8.3 nathanw * perhaps should be using M_ALIGN here instead?
715 1.5.8.3 nathanw * First we give us a little room to align with.
716 1.5.8.3 nathanw */
717 1.5.8.3 nathanw {
718 1.5.8.3 nathanw u_char *buf = m->m_data;
719 1.5.8.3 nathanw int buflen = m->m_len;
720 1.5.8.3 nathanw buflen -= DMA_ENDALIGNMENT+DMA_BEGINALIGNMENT;
721 1.5.8.3 nathanw REALIGN_DMABUF(buf, buflen);
722 1.5.8.3 nathanw m->m_data = buf;
723 1.5.8.3 nathanw m->m_len = buflen;
724 1.5.8.3 nathanw }
725 1.5.8.3 nathanw
726 1.5.8.3 nathanw m->m_pkthdr.rcvif = ifp;
727 1.5.8.3 nathanw m->m_pkthdr.len = m->m_len;
728 1.5.8.3 nathanw
729 1.5.8.3 nathanw error = bus_dmamap_load_mbuf(xsc->sc_rxdma->sc_dmat,
730 1.5.8.3 nathanw map, m, BUS_DMA_NOWAIT);
731 1.5.8.3 nathanw
732 1.5.8.3 nathanw bus_dmamap_sync(xsc->sc_rxdma->sc_dmat, map, 0,
733 1.5.8.3 nathanw map->dm_mapsize, BUS_DMASYNC_PREREAD);
734 1.5.8.3 nathanw
735 1.5.8.3 nathanw if (error) {
736 1.5.8.3 nathanw DPRINTF(("DEBUG: m->m_data = %p, m->m_len = %d\n",
737 1.5.8.3 nathanw m->m_data, m->m_len));
738 1.5.8.3 nathanw DPRINTF(("DEBUG: MCLBYTES = %d, map->_dm_size = %ld\n",
739 1.5.8.3 nathanw MCLBYTES, map->_dm_size));
740 1.5.8.3 nathanw
741 1.5.8.3 nathanw panic("%s: can't load rx mbuf chain, error = %d\n",
742 1.5.8.3 nathanw sc->sc_dev.dv_xname, error);
743 1.5.8.3 nathanw m_freem(m);
744 1.5.8.3 nathanw m = NULL;
745 1.5.8.3 nathanw }
746 1.5.8.3 nathanw
747 1.5.8.3 nathanw return(m);
748 1.5.8.3 nathanw }
749 1.5.8.3 nathanw
750 1.5.8.3 nathanw bus_dmamap_t
751 1.5.8.3 nathanw xe_dma_rx_continue(arg)
752 1.5.8.3 nathanw void *arg;
753 1.5.8.3 nathanw {
754 1.5.8.3 nathanw struct mb8795_softc *sc = arg;
755 1.5.8.3 nathanw struct xe_softc *xsc = (struct xe_softc *)sc;
756 1.5.8.3 nathanw struct ifnet *ifp = &sc->sc_ethercom.ec_if;
757 1.5.8.3 nathanw bus_dmamap_t map = NULL;
758 1.5.8.3 nathanw
759 1.5.8.3 nathanw if (ifp->if_flags & IFF_RUNNING) {
760 1.5.8.3 nathanw if (((xsc->sc_rx_loaded_idx+1)%MB8795_NRXBUFS) == xsc->sc_rx_handled_idx) {
761 1.5.8.3 nathanw /* make space for one packet by dropping one */
762 1.5.8.3 nathanw struct mbuf *m;
763 1.5.8.3 nathanw m = xe_dma_rx_mbuf (sc);
764 1.5.8.3 nathanw if (m)
765 1.5.8.3 nathanw m_freem(m);
766 1.5.8.3 nathanw #if (defined(DIAGNOSTIC))
767 1.5.8.3 nathanw DPRINTF(("%s: out of receive DMA buffers\n",sc->sc_dev.dv_xname));
768 1.5.8.3 nathanw #endif
769 1.5.8.3 nathanw }
770 1.5.8.3 nathanw xsc->sc_rx_loaded_idx++;
771 1.5.8.3 nathanw xsc->sc_rx_loaded_idx %= MB8795_NRXBUFS;
772 1.5.8.3 nathanw map = xsc->sc_rx_dmamap[xsc->sc_rx_loaded_idx];
773 1.5.8.3 nathanw
774 1.5.8.3 nathanw DPRINTF(("%s: xe_dma_rx_continue() xsc->sc_rx_loaded_idx = %d\nn",
775 1.5.8.3 nathanw sc->sc_dev.dv_xname,xsc->sc_rx_loaded_idx));
776 1.5.8.3 nathanw }
777 1.5.8.3 nathanw #ifdef DIAGNOSTIC
778 1.5.8.3 nathanw else
779 1.5.8.3 nathanw panic("%s: Unexpected rx dma continue while if not running\n",
780 1.5.8.3 nathanw sc->sc_dev.dv_xname);
781 1.5.8.3 nathanw #endif
782 1.5.8.3 nathanw
783 1.5.8.3 nathanw return(map);
784 1.5.8.3 nathanw }
785 1.5.8.3 nathanw
786 1.5.8.3 nathanw bus_dmamap_t
787 1.5.8.3 nathanw xe_dma_tx_continue(arg)
788 1.5.8.3 nathanw void *arg;
789 1.5.8.3 nathanw {
790 1.5.8.3 nathanw struct mb8795_softc *sc = arg;
791 1.5.8.3 nathanw struct xe_softc *xsc = (struct xe_softc *)sc;
792 1.5.8.3 nathanw bus_dmamap_t map;
793 1.5.8.3 nathanw
794 1.5.8.3 nathanw DPRINTF(("%s: xe_dma_tx_continue()\n",sc->sc_dev.dv_xname));
795 1.5.8.3 nathanw
796 1.5.8.3 nathanw if (xsc->sc_tx_loaded) {
797 1.5.8.3 nathanw map = NULL;
798 1.5.8.3 nathanw } else {
799 1.5.8.3 nathanw map = xsc->sc_tx_dmamap;
800 1.5.8.3 nathanw xsc->sc_tx_loaded++;
801 1.5.8.3 nathanw }
802 1.5.8.3 nathanw
803 1.5.8.3 nathanw #ifdef DIAGNOSTIC
804 1.5.8.3 nathanw if (xsc->sc_tx_loaded != 1) {
805 1.5.8.3 nathanw panic("%s: sc->sc_tx_loaded is %d",sc->sc_dev.dv_xname,
806 1.5.8.3 nathanw xsc->sc_tx_loaded);
807 1.5.8.3 nathanw }
808 1.5.8.3 nathanw #endif
809 1.5.8.3 nathanw
810 1.5.8.3 nathanw return(map);
811 1.5.8.2 nathanw }
812