if_xe.c revision 1.5.8.4 1 1.5.8.4 nathanw /* $NetBSD: if_xe.c,v 1.5.8.4 2002/10/18 02:39:14 nathanw Exp $ */
2 1.5.8.2 nathanw /*
3 1.5.8.2 nathanw * Copyright (c) 1998 Darrin B. Jewell
4 1.5.8.2 nathanw * All rights reserved.
5 1.5.8.2 nathanw *
6 1.5.8.2 nathanw * Redistribution and use in source and binary forms, with or without
7 1.5.8.2 nathanw * modification, are permitted provided that the following conditions
8 1.5.8.2 nathanw * are met:
9 1.5.8.2 nathanw * 1. Redistributions of source code must retain the above copyright
10 1.5.8.2 nathanw * notice, this list of conditions and the following disclaimer.
11 1.5.8.2 nathanw * 2. Redistributions in binary form must reproduce the above copyright
12 1.5.8.2 nathanw * notice, this list of conditions and the following disclaimer in the
13 1.5.8.2 nathanw * documentation and/or other materials provided with the distribution.
14 1.5.8.2 nathanw * 3. All advertising materials mentioning features or use of this software
15 1.5.8.2 nathanw * must display the following acknowledgement:
16 1.5.8.2 nathanw * This product includes software developed by Darrin B. Jewell
17 1.5.8.2 nathanw * 4. The name of the author may not be used to endorse or promote products
18 1.5.8.2 nathanw * derived from this software without specific prior written permission
19 1.5.8.2 nathanw *
20 1.5.8.2 nathanw * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 1.5.8.2 nathanw * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 1.5.8.2 nathanw * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 1.5.8.2 nathanw * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 1.5.8.2 nathanw * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 1.5.8.2 nathanw * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 1.5.8.2 nathanw * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 1.5.8.2 nathanw * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 1.5.8.2 nathanw * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 1.5.8.2 nathanw * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 1.5.8.2 nathanw */
31 1.5.8.2 nathanw
32 1.5.8.2 nathanw #include "opt_inet.h"
33 1.5.8.2 nathanw #include "bpfilter.h"
34 1.5.8.2 nathanw
35 1.5.8.2 nathanw #include <sys/param.h>
36 1.5.8.2 nathanw #include <sys/systm.h>
37 1.5.8.2 nathanw #include <sys/mbuf.h>
38 1.5.8.2 nathanw #include <sys/syslog.h>
39 1.5.8.2 nathanw #include <sys/socket.h>
40 1.5.8.2 nathanw #include <sys/device.h>
41 1.5.8.2 nathanw
42 1.5.8.2 nathanw #include <net/if.h>
43 1.5.8.2 nathanw #include <net/if_ether.h>
44 1.5.8.2 nathanw #include <net/if_media.h>
45 1.5.8.2 nathanw
46 1.5.8.2 nathanw #ifdef INET
47 1.5.8.2 nathanw #include <netinet/in.h>
48 1.5.8.2 nathanw #include <netinet/if_inarp.h>
49 1.5.8.2 nathanw #endif
50 1.5.8.2 nathanw
51 1.5.8.2 nathanw #include <machine/autoconf.h>
52 1.5.8.2 nathanw #include <machine/cpu.h>
53 1.5.8.2 nathanw #include <machine/intr.h>
54 1.5.8.2 nathanw #include <machine/bus.h>
55 1.5.8.2 nathanw
56 1.5.8.2 nathanw #include <next68k/next68k/isr.h>
57 1.5.8.2 nathanw
58 1.5.8.2 nathanw #include <next68k/dev/mb8795reg.h>
59 1.5.8.2 nathanw #include <next68k/dev/mb8795var.h>
60 1.5.8.2 nathanw
61 1.5.8.3 nathanw #include <next68k/dev/bmapreg.h>
62 1.5.8.3 nathanw #include <next68k/dev/intiovar.h>
63 1.5.8.2 nathanw #include <next68k/dev/nextdmareg.h>
64 1.5.8.2 nathanw #include <next68k/dev/nextdmavar.h>
65 1.5.8.2 nathanw
66 1.5.8.2 nathanw #include <next68k/dev/if_xevar.h>
67 1.5.8.3 nathanw #include <next68k/dev/if_xereg.h>
68 1.5.8.2 nathanw
69 1.5.8.3 nathanw #ifdef DEBUG
70 1.5.8.3 nathanw #define XE_DEBUG
71 1.5.8.3 nathanw #endif
72 1.5.8.3 nathanw
73 1.5.8.3 nathanw #ifdef XE_DEBUG
74 1.5.8.3 nathanw int xe_debug = 0;
75 1.5.8.3 nathanw #define DPRINTF(x) if (xe_debug) printf x;
76 1.5.8.3 nathanw extern char *ndtracep;
77 1.5.8.3 nathanw extern char ndtrace[];
78 1.5.8.3 nathanw extern int ndtraceshow;
79 1.5.8.3 nathanw #define NDTRACEIF(x) if (10 && ndtracep < (ndtrace + 8192)) do {x;} while (0)
80 1.5.8.3 nathanw #else
81 1.5.8.3 nathanw #define DPRINTF(x)
82 1.5.8.3 nathanw #define NDTRACEIF(x)
83 1.5.8.3 nathanw #endif
84 1.5.8.3 nathanw #define PRINTF(x) printf x;
85 1.5.8.3 nathanw
86 1.5.8.3 nathanw extern int turbo;
87 1.5.8.2 nathanw
88 1.5.8.2 nathanw int xe_match __P((struct device *, struct cfdata *, void *));
89 1.5.8.2 nathanw void xe_attach __P((struct device *, struct device *, void *));
90 1.5.8.2 nathanw int xe_tint __P((void *));
91 1.5.8.2 nathanw int xe_rint __P((void *));
92 1.5.8.2 nathanw
93 1.5.8.3 nathanw struct mbuf * xe_dma_rxmap_load __P((struct mb8795_softc *,
94 1.5.8.3 nathanw bus_dmamap_t map));
95 1.5.8.3 nathanw
96 1.5.8.3 nathanw bus_dmamap_t xe_dma_rx_continue __P((void *));
97 1.5.8.3 nathanw void xe_dma_rx_completed __P((bus_dmamap_t,void *));
98 1.5.8.3 nathanw bus_dmamap_t xe_dma_tx_continue __P((void *));
99 1.5.8.3 nathanw void xe_dma_tx_completed __P((bus_dmamap_t,void *));
100 1.5.8.3 nathanw void xe_dma_rx_shutdown __P((void *));
101 1.5.8.3 nathanw void xe_dma_tx_shutdown __P((void *));
102 1.5.8.3 nathanw
103 1.5.8.3 nathanw static void findchannel_defer __P((struct device *));
104 1.5.8.3 nathanw
105 1.5.8.4 nathanw CFATTACH_DECL(xe, sizeof(struct xe_softc),
106 1.5.8.4 nathanw xe_match, xe_attach, NULL, NULL);
107 1.5.8.2 nathanw
108 1.5.8.3 nathanw static int xe_dma_medias[] = {
109 1.5.8.2 nathanw IFM_ETHER|IFM_AUTO,
110 1.5.8.2 nathanw IFM_ETHER|IFM_10_T,
111 1.5.8.2 nathanw IFM_ETHER|IFM_10_2,
112 1.5.8.2 nathanw };
113 1.5.8.3 nathanw static int nxe_dma_medias = (sizeof(xe_dma_medias)/sizeof(xe_dma_medias[0]));
114 1.5.8.3 nathanw
115 1.5.8.3 nathanw static int attached = 0;
116 1.5.8.3 nathanw
117 1.5.8.3 nathanw /*
118 1.5.8.3 nathanw * Functions and the switch for the MI code.
119 1.5.8.3 nathanw */
120 1.5.8.3 nathanw u_char xe_read_reg __P((struct mb8795_softc *, int));
121 1.5.8.3 nathanw void xe_write_reg __P((struct mb8795_softc *, int, u_char));
122 1.5.8.3 nathanw void xe_dma_reset __P((struct mb8795_softc *));
123 1.5.8.3 nathanw void xe_dma_rx_setup __P((struct mb8795_softc *));
124 1.5.8.3 nathanw void xe_dma_rx_go __P((struct mb8795_softc *));
125 1.5.8.3 nathanw struct mbuf * xe_dma_rx_mbuf __P((struct mb8795_softc *));
126 1.5.8.3 nathanw void xe_dma_tx_setup __P((struct mb8795_softc *));
127 1.5.8.3 nathanw void xe_dma_tx_go __P((struct mb8795_softc *));
128 1.5.8.3 nathanw int xe_dma_tx_mbuf __P((struct mb8795_softc *, struct mbuf *));
129 1.5.8.3 nathanw int xe_dma_tx_isactive __P((struct mb8795_softc *));
130 1.5.8.3 nathanw #if 0
131 1.5.8.3 nathanw int xe_dma_setup __P((struct mb8795_softc *, caddr_t *,
132 1.5.8.3 nathanw size_t *, int, size_t *));
133 1.5.8.3 nathanw void xe_dma_go __P((struct mb8795_softc *));
134 1.5.8.3 nathanw void xe_dma_stop __P((struct mb8795_softc *));
135 1.5.8.3 nathanw int xe_dma_isactive __P((struct mb8795_softc *));
136 1.5.8.3 nathanw #endif
137 1.5.8.3 nathanw
138 1.5.8.3 nathanw struct mb8795_glue xe_glue = {
139 1.5.8.3 nathanw xe_read_reg,
140 1.5.8.3 nathanw xe_write_reg,
141 1.5.8.3 nathanw xe_dma_reset,
142 1.5.8.3 nathanw xe_dma_rx_setup,
143 1.5.8.3 nathanw xe_dma_rx_go,
144 1.5.8.3 nathanw xe_dma_rx_mbuf,
145 1.5.8.3 nathanw xe_dma_tx_setup,
146 1.5.8.3 nathanw xe_dma_tx_go,
147 1.5.8.3 nathanw xe_dma_tx_mbuf,
148 1.5.8.3 nathanw xe_dma_tx_isactive,
149 1.5.8.3 nathanw #if 0
150 1.5.8.3 nathanw xe_dma_setup,
151 1.5.8.3 nathanw xe_dma_go,
152 1.5.8.3 nathanw xe_dma_stop,
153 1.5.8.3 nathanw xe_dma_isactive,
154 1.5.8.3 nathanw NULL, /* gl_clear_latched_intr */
155 1.5.8.3 nathanw #endif
156 1.5.8.3 nathanw };
157 1.5.8.2 nathanw
158 1.5.8.2 nathanw int
159 1.5.8.2 nathanw xe_match(parent, match, aux)
160 1.5.8.2 nathanw struct device *parent;
161 1.5.8.2 nathanw struct cfdata *match;
162 1.5.8.2 nathanw void *aux;
163 1.5.8.2 nathanw {
164 1.5.8.3 nathanw struct intio_attach_args *ia = (struct intio_attach_args *)aux;
165 1.5.8.3 nathanw
166 1.5.8.3 nathanw if (attached)
167 1.5.8.3 nathanw return (0);
168 1.5.8.3 nathanw
169 1.5.8.3 nathanw ia->ia_addr = (void *)NEXT_P_ENET;
170 1.5.8.3 nathanw
171 1.5.8.3 nathanw return (1);
172 1.5.8.3 nathanw }
173 1.5.8.3 nathanw
174 1.5.8.3 nathanw static void
175 1.5.8.3 nathanw findchannel_defer(self)
176 1.5.8.3 nathanw struct device *self;
177 1.5.8.3 nathanw {
178 1.5.8.3 nathanw struct xe_softc *xsc = (struct xe_softc *)self;
179 1.5.8.3 nathanw struct mb8795_softc *sc = &xsc->sc_mb8795;
180 1.5.8.3 nathanw int i, error;
181 1.5.8.3 nathanw
182 1.5.8.3 nathanw if (!xsc->sc_txdma) {
183 1.5.8.3 nathanw xsc->sc_txdma = nextdma_findchannel ("enetx");
184 1.5.8.3 nathanw if (xsc->sc_txdma == NULL)
185 1.5.8.3 nathanw panic ("%s: can't find enetx dma channel",
186 1.5.8.3 nathanw sc->sc_dev.dv_xname);
187 1.5.8.3 nathanw }
188 1.5.8.3 nathanw if (!xsc->sc_rxdma) {
189 1.5.8.3 nathanw xsc->sc_rxdma = nextdma_findchannel ("enetr");
190 1.5.8.3 nathanw if (xsc->sc_rxdma == NULL)
191 1.5.8.3 nathanw panic ("%s: can't find enetr dma channel",
192 1.5.8.3 nathanw sc->sc_dev.dv_xname);
193 1.5.8.3 nathanw }
194 1.5.8.3 nathanw printf ("%s: using dma channels %s %s\n", sc->sc_dev.dv_xname,
195 1.5.8.3 nathanw xsc->sc_txdma->sc_dev.dv_xname, xsc->sc_rxdma->sc_dev.dv_xname);
196 1.5.8.3 nathanw
197 1.5.8.3 nathanw nextdma_setconf (xsc->sc_rxdma, continue_cb, xe_dma_rx_continue);
198 1.5.8.3 nathanw nextdma_setconf (xsc->sc_rxdma, completed_cb, xe_dma_rx_completed);
199 1.5.8.3 nathanw nextdma_setconf (xsc->sc_rxdma, shutdown_cb, xe_dma_rx_shutdown);
200 1.5.8.3 nathanw nextdma_setconf (xsc->sc_rxdma, cb_arg, sc);
201 1.5.8.3 nathanw
202 1.5.8.3 nathanw nextdma_setconf (xsc->sc_txdma, continue_cb, xe_dma_tx_continue);
203 1.5.8.3 nathanw nextdma_setconf (xsc->sc_txdma, completed_cb, xe_dma_tx_completed);
204 1.5.8.3 nathanw nextdma_setconf (xsc->sc_txdma, shutdown_cb, xe_dma_tx_shutdown);
205 1.5.8.3 nathanw nextdma_setconf (xsc->sc_txdma, cb_arg, sc);
206 1.5.8.3 nathanw
207 1.5.8.3 nathanw /* Initialize the dma maps */
208 1.5.8.3 nathanw error = bus_dmamap_create(xsc->sc_txdma->sc_dmat, MCLBYTES,
209 1.5.8.3 nathanw (MCLBYTES/MSIZE), MCLBYTES, 0, BUS_DMA_ALLOCNOW,
210 1.5.8.3 nathanw &xsc->sc_tx_dmamap);
211 1.5.8.3 nathanw if (error) {
212 1.5.8.4 nathanw panic("%s: can't create tx DMA map, error = %d",
213 1.5.8.3 nathanw sc->sc_dev.dv_xname, error);
214 1.5.8.3 nathanw }
215 1.5.8.3 nathanw
216 1.5.8.3 nathanw for(i = 0; i < MB8795_NRXBUFS; i++) {
217 1.5.8.3 nathanw error = bus_dmamap_create(xsc->sc_rxdma->sc_dmat, MCLBYTES,
218 1.5.8.3 nathanw (MCLBYTES/MSIZE), MCLBYTES, 0, BUS_DMA_ALLOCNOW,
219 1.5.8.3 nathanw &xsc->sc_rx_dmamap[i]);
220 1.5.8.3 nathanw if (error) {
221 1.5.8.4 nathanw panic("%s: can't create rx DMA map, error = %d",
222 1.5.8.3 nathanw sc->sc_dev.dv_xname, error);
223 1.5.8.3 nathanw }
224 1.5.8.3 nathanw xsc->sc_rx_mb_head[i] = NULL;
225 1.5.8.3 nathanw }
226 1.5.8.3 nathanw xsc->sc_rx_loaded_idx = 0;
227 1.5.8.3 nathanw xsc->sc_rx_completed_idx = 0;
228 1.5.8.3 nathanw xsc->sc_rx_handled_idx = 0;
229 1.5.8.3 nathanw
230 1.5.8.3 nathanw /* @@@ more next hacks
231 1.5.8.3 nathanw * the 2000 covers at least a 1500 mtu + headers
232 1.5.8.3 nathanw * + DMA_BEGINALIGNMENT+ DMA_ENDALIGNMENT
233 1.5.8.3 nathanw */
234 1.5.8.3 nathanw xsc->sc_txbuf = malloc(2000, M_DEVBUF, M_NOWAIT);
235 1.5.8.3 nathanw if (!xsc->sc_txbuf)
236 1.5.8.3 nathanw panic("%s: can't malloc tx DMA buffer", sc->sc_dev.dv_xname);
237 1.5.8.3 nathanw
238 1.5.8.3 nathanw xsc->sc_tx_mb_head = NULL;
239 1.5.8.3 nathanw xsc->sc_tx_loaded = 0;
240 1.5.8.3 nathanw
241 1.5.8.3 nathanw mb8795_config(sc, xe_dma_medias, nxe_dma_medias, xe_dma_medias[0]);
242 1.5.8.3 nathanw
243 1.5.8.3 nathanw isrlink_autovec(xe_tint, sc, NEXT_I_IPL(NEXT_I_ENETX), 1, NULL);
244 1.5.8.3 nathanw INTR_ENABLE(NEXT_I_ENETX);
245 1.5.8.3 nathanw isrlink_autovec(xe_rint, sc, NEXT_I_IPL(NEXT_I_ENETR), 1, NULL);
246 1.5.8.3 nathanw INTR_ENABLE(NEXT_I_ENETR);
247 1.5.8.2 nathanw }
248 1.5.8.2 nathanw
249 1.5.8.2 nathanw void
250 1.5.8.2 nathanw xe_attach(parent, self, aux)
251 1.5.8.2 nathanw struct device *parent, *self;
252 1.5.8.2 nathanw void *aux;
253 1.5.8.2 nathanw {
254 1.5.8.3 nathanw struct intio_attach_args *ia = (struct intio_attach_args *)aux;
255 1.5.8.3 nathanw struct xe_softc *xsc = (struct xe_softc *)self;
256 1.5.8.3 nathanw struct mb8795_softc *sc = &xsc->sc_mb8795;
257 1.5.8.3 nathanw
258 1.5.8.3 nathanw DPRINTF(("%s: xe_attach()\n",sc->sc_dev.dv_xname));
259 1.5.8.3 nathanw
260 1.5.8.3 nathanw {
261 1.5.8.3 nathanw extern u_char rom_enetaddr[6]; /* kludge from machdep.c:next68k_bootargs() */
262 1.5.8.3 nathanw int i;
263 1.5.8.3 nathanw for(i=0;i<6;i++) {
264 1.5.8.3 nathanw sc->sc_enaddr[i] = rom_enetaddr[i];
265 1.5.8.3 nathanw }
266 1.5.8.3 nathanw }
267 1.5.8.3 nathanw
268 1.5.8.3 nathanw printf("\n%s: MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
269 1.5.8.3 nathanw sc->sc_dev.dv_xname,
270 1.5.8.3 nathanw sc->sc_enaddr[0],sc->sc_enaddr[1],sc->sc_enaddr[2],
271 1.5.8.3 nathanw sc->sc_enaddr[3],sc->sc_enaddr[4],sc->sc_enaddr[5]);
272 1.5.8.3 nathanw
273 1.5.8.3 nathanw xsc->sc_bst = ia->ia_bst;
274 1.5.8.3 nathanw if (bus_space_map(xsc->sc_bst, NEXT_P_ENET,
275 1.5.8.3 nathanw XE_DEVICE_SIZE, 0, &xsc->sc_bsh)) {
276 1.5.8.4 nathanw panic("\n%s: can't map mb8795 registers",
277 1.5.8.3 nathanw sc->sc_dev.dv_xname);
278 1.5.8.3 nathanw }
279 1.5.8.2 nathanw
280 1.5.8.3 nathanw sc->sc_bmap_bst = ia->ia_bst;
281 1.5.8.3 nathanw if (bus_space_map(sc->sc_bmap_bst, NEXT_P_BMAP,
282 1.5.8.3 nathanw BMAP_SIZE, 0, &sc->sc_bmap_bsh)) {
283 1.5.8.4 nathanw panic("\n%s: can't map bmap registers",
284 1.5.8.3 nathanw sc->sc_dev.dv_xname);
285 1.5.8.3 nathanw }
286 1.5.8.2 nathanw
287 1.5.8.3 nathanw /*
288 1.5.8.3 nathanw * Set up glue for MI code.
289 1.5.8.2 nathanw */
290 1.5.8.3 nathanw sc->sc_glue = &xe_glue;
291 1.5.8.2 nathanw
292 1.5.8.3 nathanw xsc->sc_txdma = nextdma_findchannel ("enetx");
293 1.5.8.3 nathanw xsc->sc_rxdma = nextdma_findchannel ("enetr");
294 1.5.8.3 nathanw if (xsc->sc_rxdma && xsc->sc_txdma) {
295 1.5.8.3 nathanw findchannel_defer (self);
296 1.5.8.3 nathanw } else {
297 1.5.8.3 nathanw config_defer (self, findchannel_defer);
298 1.5.8.3 nathanw }
299 1.5.8.2 nathanw
300 1.5.8.3 nathanw attached = 1;
301 1.5.8.2 nathanw }
302 1.5.8.2 nathanw
303 1.5.8.2 nathanw int
304 1.5.8.2 nathanw xe_tint(arg)
305 1.5.8.3 nathanw void *arg;
306 1.5.8.2 nathanw {
307 1.5.8.3 nathanw if (!INTR_OCCURRED(NEXT_I_ENETX))
308 1.5.8.3 nathanw return 0;
309 1.5.8.3 nathanw mb8795_tint((struct mb8795_softc *)arg);
310 1.5.8.3 nathanw return(1);
311 1.5.8.2 nathanw }
312 1.5.8.2 nathanw
313 1.5.8.2 nathanw int
314 1.5.8.2 nathanw xe_rint(arg)
315 1.5.8.3 nathanw void *arg;
316 1.5.8.3 nathanw {
317 1.5.8.3 nathanw if (!INTR_OCCURRED(NEXT_I_ENETR))
318 1.5.8.3 nathanw return(0);
319 1.5.8.3 nathanw mb8795_rint((struct mb8795_softc *)arg);
320 1.5.8.3 nathanw return(1);
321 1.5.8.3 nathanw }
322 1.5.8.3 nathanw
323 1.5.8.3 nathanw /*
324 1.5.8.3 nathanw * Glue functions.
325 1.5.8.3 nathanw */
326 1.5.8.3 nathanw
327 1.5.8.3 nathanw u_char
328 1.5.8.3 nathanw xe_read_reg(sc, reg)
329 1.5.8.3 nathanw struct mb8795_softc *sc;
330 1.5.8.3 nathanw int reg;
331 1.5.8.3 nathanw {
332 1.5.8.3 nathanw struct xe_softc *xsc = (struct xe_softc *)sc;
333 1.5.8.3 nathanw
334 1.5.8.3 nathanw return(bus_space_read_1(xsc->sc_bst, xsc->sc_bsh, reg));
335 1.5.8.3 nathanw }
336 1.5.8.3 nathanw
337 1.5.8.3 nathanw void
338 1.5.8.3 nathanw xe_write_reg(sc, reg, val)
339 1.5.8.3 nathanw struct mb8795_softc *sc;
340 1.5.8.3 nathanw int reg;
341 1.5.8.3 nathanw u_char val;
342 1.5.8.3 nathanw {
343 1.5.8.3 nathanw struct xe_softc *xsc = (struct xe_softc *)sc;
344 1.5.8.3 nathanw
345 1.5.8.3 nathanw bus_space_write_1(xsc->sc_bst, xsc->sc_bsh, reg, val);
346 1.5.8.3 nathanw }
347 1.5.8.3 nathanw
348 1.5.8.3 nathanw void
349 1.5.8.3 nathanw xe_dma_reset(sc)
350 1.5.8.3 nathanw struct mb8795_softc *sc;
351 1.5.8.3 nathanw {
352 1.5.8.3 nathanw struct xe_softc *xsc = (struct xe_softc *)sc;
353 1.5.8.3 nathanw int i;
354 1.5.8.3 nathanw
355 1.5.8.3 nathanw DPRINTF(("xe dma reset\n"));
356 1.5.8.3 nathanw
357 1.5.8.3 nathanw nextdma_reset(xsc->sc_rxdma);
358 1.5.8.3 nathanw nextdma_reset(xsc->sc_txdma);
359 1.5.8.3 nathanw
360 1.5.8.3 nathanw if (xsc->sc_tx_loaded) {
361 1.5.8.3 nathanw bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
362 1.5.8.3 nathanw 0, xsc->sc_tx_dmamap->dm_mapsize,
363 1.5.8.3 nathanw BUS_DMASYNC_POSTWRITE);
364 1.5.8.3 nathanw bus_dmamap_unload(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap);
365 1.5.8.3 nathanw xsc->sc_tx_loaded = 0;
366 1.5.8.3 nathanw }
367 1.5.8.3 nathanw if (xsc->sc_tx_mb_head) {
368 1.5.8.3 nathanw m_freem(xsc->sc_tx_mb_head);
369 1.5.8.3 nathanw xsc->sc_tx_mb_head = NULL;
370 1.5.8.3 nathanw }
371 1.5.8.3 nathanw
372 1.5.8.3 nathanw for(i = 0; i < MB8795_NRXBUFS; i++) {
373 1.5.8.3 nathanw if (xsc->sc_rx_mb_head[i]) {
374 1.5.8.3 nathanw bus_dmamap_unload(xsc->sc_rxdma->sc_dmat, xsc->sc_rx_dmamap[i]);
375 1.5.8.3 nathanw m_freem(xsc->sc_rx_mb_head[i]);
376 1.5.8.3 nathanw xsc->sc_rx_mb_head[i] = NULL;
377 1.5.8.3 nathanw }
378 1.5.8.3 nathanw }
379 1.5.8.3 nathanw }
380 1.5.8.3 nathanw
381 1.5.8.3 nathanw void
382 1.5.8.3 nathanw xe_dma_rx_setup (sc)
383 1.5.8.3 nathanw struct mb8795_softc *sc;
384 1.5.8.3 nathanw {
385 1.5.8.3 nathanw struct xe_softc *xsc = (struct xe_softc *)sc;
386 1.5.8.3 nathanw int i;
387 1.5.8.3 nathanw
388 1.5.8.3 nathanw DPRINTF(("xe dma rx setup\n"));
389 1.5.8.3 nathanw
390 1.5.8.3 nathanw for(i = 0; i < MB8795_NRXBUFS; i++) {
391 1.5.8.3 nathanw xsc->sc_rx_mb_head[i] =
392 1.5.8.3 nathanw xe_dma_rxmap_load(sc, xsc->sc_rx_dmamap[i]);
393 1.5.8.3 nathanw }
394 1.5.8.3 nathanw xsc->sc_rx_loaded_idx = 0;
395 1.5.8.3 nathanw xsc->sc_rx_completed_idx = 0;
396 1.5.8.3 nathanw xsc->sc_rx_handled_idx = 0;
397 1.5.8.3 nathanw
398 1.5.8.3 nathanw nextdma_init(xsc->sc_rxdma);
399 1.5.8.3 nathanw }
400 1.5.8.3 nathanw
401 1.5.8.3 nathanw void
402 1.5.8.3 nathanw xe_dma_rx_go (sc)
403 1.5.8.3 nathanw struct mb8795_softc *sc;
404 1.5.8.3 nathanw {
405 1.5.8.3 nathanw struct xe_softc *xsc = (struct xe_softc *)sc;
406 1.5.8.3 nathanw
407 1.5.8.3 nathanw DPRINTF(("xe dma rx go\n"));
408 1.5.8.3 nathanw
409 1.5.8.3 nathanw nextdma_start(xsc->sc_rxdma, DMACSR_SETREAD);
410 1.5.8.3 nathanw }
411 1.5.8.3 nathanw
412 1.5.8.3 nathanw struct mbuf *
413 1.5.8.3 nathanw xe_dma_rx_mbuf (sc)
414 1.5.8.3 nathanw struct mb8795_softc *sc;
415 1.5.8.3 nathanw {
416 1.5.8.3 nathanw struct xe_softc *xsc = (struct xe_softc *)sc;
417 1.5.8.3 nathanw bus_dmamap_t map;
418 1.5.8.3 nathanw struct mbuf *m;
419 1.5.8.3 nathanw
420 1.5.8.3 nathanw m = NULL;
421 1.5.8.3 nathanw if (xsc->sc_rx_handled_idx != xsc->sc_rx_completed_idx) {
422 1.5.8.3 nathanw xsc->sc_rx_handled_idx++;
423 1.5.8.3 nathanw xsc->sc_rx_handled_idx %= MB8795_NRXBUFS;
424 1.5.8.3 nathanw
425 1.5.8.3 nathanw map = xsc->sc_rx_dmamap[xsc->sc_rx_handled_idx];
426 1.5.8.3 nathanw m = xsc->sc_rx_mb_head[xsc->sc_rx_handled_idx];
427 1.5.8.3 nathanw
428 1.5.8.3 nathanw m->m_len = map->dm_xfer_len;
429 1.5.8.3 nathanw
430 1.5.8.3 nathanw bus_dmamap_sync(xsc->sc_rxdma->sc_dmat, map,
431 1.5.8.3 nathanw 0, map->dm_mapsize, BUS_DMASYNC_POSTREAD);
432 1.5.8.3 nathanw
433 1.5.8.3 nathanw bus_dmamap_unload(xsc->sc_rxdma->sc_dmat, map);
434 1.5.8.3 nathanw
435 1.5.8.3 nathanw /* Install a fresh mbuf for next packet */
436 1.5.8.3 nathanw
437 1.5.8.3 nathanw xsc->sc_rx_mb_head[xsc->sc_rx_handled_idx] =
438 1.5.8.3 nathanw xe_dma_rxmap_load(sc,map);
439 1.5.8.3 nathanw
440 1.5.8.3 nathanw /* Punt runt packets
441 1.5.8.3 nathanw * dma restarts create 0 length packets for example
442 1.5.8.3 nathanw */
443 1.5.8.3 nathanw if (m->m_len < ETHER_MIN_LEN) {
444 1.5.8.3 nathanw m_freem(m);
445 1.5.8.3 nathanw m = NULL;
446 1.5.8.3 nathanw }
447 1.5.8.3 nathanw }
448 1.5.8.3 nathanw return (m);
449 1.5.8.3 nathanw }
450 1.5.8.3 nathanw
451 1.5.8.3 nathanw void
452 1.5.8.3 nathanw xe_dma_tx_setup (sc)
453 1.5.8.3 nathanw struct mb8795_softc *sc;
454 1.5.8.3 nathanw {
455 1.5.8.3 nathanw struct xe_softc *xsc = (struct xe_softc *)sc;
456 1.5.8.3 nathanw
457 1.5.8.3 nathanw DPRINTF(("xe dma tx setup\n"));
458 1.5.8.3 nathanw
459 1.5.8.3 nathanw nextdma_init(xsc->sc_txdma);
460 1.5.8.3 nathanw }
461 1.5.8.3 nathanw
462 1.5.8.3 nathanw void
463 1.5.8.3 nathanw xe_dma_tx_go (sc)
464 1.5.8.3 nathanw struct mb8795_softc *sc;
465 1.5.8.3 nathanw {
466 1.5.8.3 nathanw struct xe_softc *xsc = (struct xe_softc *)sc;
467 1.5.8.3 nathanw
468 1.5.8.3 nathanw DPRINTF(("xe dma tx go\n"));
469 1.5.8.3 nathanw
470 1.5.8.3 nathanw nextdma_start(xsc->sc_txdma, DMACSR_SETWRITE);
471 1.5.8.3 nathanw }
472 1.5.8.3 nathanw
473 1.5.8.3 nathanw int
474 1.5.8.3 nathanw xe_dma_tx_mbuf (sc, m)
475 1.5.8.3 nathanw struct mb8795_softc *sc;
476 1.5.8.3 nathanw struct mbuf *m;
477 1.5.8.3 nathanw {
478 1.5.8.3 nathanw struct xe_softc *xsc = (struct xe_softc *)sc;
479 1.5.8.3 nathanw int error;
480 1.5.8.3 nathanw
481 1.5.8.3 nathanw xsc->sc_tx_mb_head = m;
482 1.5.8.3 nathanw
483 1.5.8.3 nathanw /* The following is a next specific hack that should
484 1.5.8.3 nathanw * probably be moved out of MI code.
485 1.5.8.3 nathanw * This macro assumes it can move forward as needed
486 1.5.8.3 nathanw * in the buffer. Perhaps it should zero the extra buffer.
487 1.5.8.3 nathanw */
488 1.5.8.3 nathanw #define REALIGN_DMABUF(s,l) \
489 1.5.8.3 nathanw { (s) = ((u_char *)(((unsigned)(s)+DMA_BEGINALIGNMENT-1) \
490 1.5.8.3 nathanw &~(DMA_BEGINALIGNMENT-1))); \
491 1.5.8.3 nathanw (l) = ((u_char *)(((unsigned)((s)+(l))+DMA_ENDALIGNMENT-1) \
492 1.5.8.3 nathanw &~(DMA_ENDALIGNMENT-1)))-(s);}
493 1.5.8.3 nathanw
494 1.5.8.3 nathanw #if 0
495 1.5.8.3 nathanw error = bus_dmamap_load_mbuf(xsc->sc_txdma->sc_dmat,
496 1.5.8.3 nathanw xsc->sc_tx_dmamap, xsc->sc_tx_mb_head, BUS_DMA_NOWAIT);
497 1.5.8.3 nathanw #else
498 1.5.8.3 nathanw {
499 1.5.8.3 nathanw u_char *buf = xsc->sc_txbuf;
500 1.5.8.3 nathanw int buflen = 0;
501 1.5.8.3 nathanw
502 1.5.8.3 nathanw buflen = m->m_pkthdr.len;
503 1.5.8.3 nathanw
504 1.5.8.3 nathanw /* Fix runt packets, @@@ memory overrun */
505 1.5.8.3 nathanw if (buflen < ETHERMIN+sizeof(struct ether_header)) {
506 1.5.8.3 nathanw buflen = ETHERMIN+sizeof(struct ether_header);
507 1.5.8.3 nathanw }
508 1.5.8.3 nathanw
509 1.5.8.3 nathanw {
510 1.5.8.3 nathanw u_char *p = buf;
511 1.5.8.3 nathanw for (m=xsc->sc_tx_mb_head; m; m = m->m_next) {
512 1.5.8.3 nathanw if (m->m_len == 0) continue;
513 1.5.8.3 nathanw bcopy(mtod(m, u_char *), p, m->m_len);
514 1.5.8.3 nathanw p += m->m_len;
515 1.5.8.3 nathanw }
516 1.5.8.3 nathanw }
517 1.5.8.3 nathanw
518 1.5.8.3 nathanw error = bus_dmamap_load(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
519 1.5.8.3 nathanw buf,buflen,NULL,BUS_DMA_NOWAIT);
520 1.5.8.3 nathanw }
521 1.5.8.3 nathanw #endif
522 1.5.8.3 nathanw if (error) {
523 1.5.8.3 nathanw printf("%s: can't load mbuf chain, error = %d\n",
524 1.5.8.3 nathanw sc->sc_dev.dv_xname, error);
525 1.5.8.3 nathanw m_freem(xsc->sc_tx_mb_head);
526 1.5.8.3 nathanw xsc->sc_tx_mb_head = NULL;
527 1.5.8.3 nathanw return (error);
528 1.5.8.3 nathanw }
529 1.5.8.3 nathanw
530 1.5.8.3 nathanw #ifdef DIAGNOSTIC
531 1.5.8.3 nathanw if (xsc->sc_tx_loaded != 0) {
532 1.5.8.3 nathanw panic("%s: xsc->sc_tx_loaded is %d",sc->sc_dev.dv_xname,
533 1.5.8.3 nathanw xsc->sc_tx_loaded);
534 1.5.8.3 nathanw }
535 1.5.8.3 nathanw #endif
536 1.5.8.3 nathanw
537 1.5.8.3 nathanw bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap, 0,
538 1.5.8.3 nathanw xsc->sc_tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
539 1.5.8.3 nathanw
540 1.5.8.3 nathanw return (0);
541 1.5.8.3 nathanw }
542 1.5.8.3 nathanw
543 1.5.8.3 nathanw int
544 1.5.8.3 nathanw xe_dma_tx_isactive (sc)
545 1.5.8.3 nathanw struct mb8795_softc *sc;
546 1.5.8.3 nathanw {
547 1.5.8.3 nathanw struct xe_softc *xsc = (struct xe_softc *)sc;
548 1.5.8.3 nathanw
549 1.5.8.3 nathanw return (xsc->sc_tx_loaded != 0);
550 1.5.8.3 nathanw }
551 1.5.8.3 nathanw
552 1.5.8.3 nathanw /****************************************************************/
553 1.5.8.3 nathanw
554 1.5.8.3 nathanw void
555 1.5.8.3 nathanw xe_dma_tx_completed(map, arg)
556 1.5.8.3 nathanw bus_dmamap_t map;
557 1.5.8.3 nathanw void *arg;
558 1.5.8.3 nathanw {
559 1.5.8.3 nathanw struct mb8795_softc *sc = arg;
560 1.5.8.3 nathanw struct xe_softc *xsc = (struct xe_softc *)sc;
561 1.5.8.3 nathanw
562 1.5.8.3 nathanw DPRINTF(("%s: xe_dma_tx_completed()\n",sc->sc_dev.dv_xname));
563 1.5.8.3 nathanw
564 1.5.8.3 nathanw #ifdef DIAGNOSTIC
565 1.5.8.3 nathanw if (!xsc->sc_tx_loaded) {
566 1.5.8.4 nathanw panic("%s: tx completed never loaded",sc->sc_dev.dv_xname);
567 1.5.8.3 nathanw }
568 1.5.8.3 nathanw if (map != xsc->sc_tx_dmamap) {
569 1.5.8.3 nathanw panic("%s: unexpected tx completed map",sc->sc_dev.dv_xname);
570 1.5.8.3 nathanw }
571 1.5.8.3 nathanw
572 1.5.8.3 nathanw #endif
573 1.5.8.3 nathanw }
574 1.5.8.3 nathanw
575 1.5.8.3 nathanw void
576 1.5.8.3 nathanw xe_dma_tx_shutdown(arg)
577 1.5.8.3 nathanw void *arg;
578 1.5.8.2 nathanw {
579 1.5.8.3 nathanw struct mb8795_softc *sc = arg;
580 1.5.8.3 nathanw struct xe_softc *xsc = (struct xe_softc *)sc;
581 1.5.8.3 nathanw struct ifnet *ifp = &sc->sc_ethercom.ec_if;
582 1.5.8.3 nathanw
583 1.5.8.3 nathanw DPRINTF(("%s: xe_dma_tx_shutdown()\n",sc->sc_dev.dv_xname));
584 1.5.8.3 nathanw
585 1.5.8.3 nathanw #ifdef DIAGNOSTIC
586 1.5.8.3 nathanw if (!xsc->sc_tx_loaded) {
587 1.5.8.4 nathanw panic("%s: tx shutdown never loaded",sc->sc_dev.dv_xname);
588 1.5.8.3 nathanw }
589 1.5.8.3 nathanw #endif
590 1.5.8.3 nathanw
591 1.5.8.3 nathanw if (turbo)
592 1.5.8.3 nathanw MB_WRITE_REG(sc, MB8795_TXMODE, MB8795_TXMODE_TURBO1);
593 1.5.8.3 nathanw if (xsc->sc_tx_loaded) {
594 1.5.8.3 nathanw bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
595 1.5.8.3 nathanw 0, xsc->sc_tx_dmamap->dm_mapsize,
596 1.5.8.3 nathanw BUS_DMASYNC_POSTWRITE);
597 1.5.8.3 nathanw bus_dmamap_unload(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap);
598 1.5.8.3 nathanw m_freem(xsc->sc_tx_mb_head);
599 1.5.8.3 nathanw xsc->sc_tx_mb_head = NULL;
600 1.5.8.3 nathanw
601 1.5.8.3 nathanw xsc->sc_tx_loaded--;
602 1.5.8.3 nathanw }
603 1.5.8.3 nathanw
604 1.5.8.3 nathanw #ifdef DIAGNOSTIC
605 1.5.8.3 nathanw if (xsc->sc_tx_loaded != 0) {
606 1.5.8.3 nathanw panic("%s: sc->sc_tx_loaded is %d",sc->sc_dev.dv_xname,
607 1.5.8.3 nathanw xsc->sc_tx_loaded);
608 1.5.8.3 nathanw }
609 1.5.8.3 nathanw #endif
610 1.5.8.3 nathanw
611 1.5.8.3 nathanw ifp->if_timer = 0;
612 1.5.8.3 nathanw
613 1.5.8.3 nathanw #if 1
614 1.5.8.3 nathanw if ((ifp->if_flags & IFF_RUNNING) && !IF_IS_EMPTY(&sc->sc_tx_snd)) {
615 1.5.8.3 nathanw void mb8795_start_dma __P((struct mb8795_softc *)); /* XXXX */
616 1.5.8.3 nathanw mb8795_start_dma(sc);
617 1.5.8.3 nathanw }
618 1.5.8.3 nathanw #endif
619 1.5.8.3 nathanw
620 1.5.8.3 nathanw #if 0
621 1.5.8.3 nathanw /* Enable ready interrupt */
622 1.5.8.3 nathanw MB_WRITE_REG(sc, MB8795_TXMASK,
623 1.5.8.3 nathanw MB_READ_REG(sc, MB8795_TXMASK)
624 1.5.8.3 nathanw | MB8795_TXMASK_TXRXIE/* READYIE */);
625 1.5.8.3 nathanw #endif
626 1.5.8.3 nathanw }
627 1.5.8.3 nathanw
628 1.5.8.3 nathanw
629 1.5.8.3 nathanw void
630 1.5.8.3 nathanw xe_dma_rx_completed(map, arg)
631 1.5.8.3 nathanw bus_dmamap_t map;
632 1.5.8.3 nathanw void *arg;
633 1.5.8.3 nathanw {
634 1.5.8.3 nathanw struct mb8795_softc *sc = arg;
635 1.5.8.3 nathanw struct xe_softc *xsc = (struct xe_softc *)sc;
636 1.5.8.3 nathanw struct ifnet *ifp = &sc->sc_ethercom.ec_if;
637 1.5.8.3 nathanw
638 1.5.8.3 nathanw if (ifp->if_flags & IFF_RUNNING) {
639 1.5.8.3 nathanw xsc->sc_rx_completed_idx++;
640 1.5.8.3 nathanw xsc->sc_rx_completed_idx %= MB8795_NRXBUFS;
641 1.5.8.3 nathanw
642 1.5.8.3 nathanw DPRINTF(("%s: xe_dma_rx_completed(), sc->sc_rx_completed_idx = %d\n",
643 1.5.8.3 nathanw sc->sc_dev.dv_xname, xsc->sc_rx_completed_idx));
644 1.5.8.3 nathanw
645 1.5.8.3 nathanw #if (defined(DIAGNOSTIC))
646 1.5.8.3 nathanw if (map != xsc->sc_rx_dmamap[xsc->sc_rx_completed_idx]) {
647 1.5.8.4 nathanw panic("%s: Unexpected rx dmamap completed",
648 1.5.8.3 nathanw sc->sc_dev.dv_xname);
649 1.5.8.3 nathanw }
650 1.5.8.3 nathanw #endif
651 1.5.8.3 nathanw }
652 1.5.8.3 nathanw #ifdef DIAGNOSTIC
653 1.5.8.3 nathanw else
654 1.5.8.3 nathanw DPRINTF(("%s: Unexpected rx dmamap completed while if not running\n",
655 1.5.8.3 nathanw sc->sc_dev.dv_xname));
656 1.5.8.3 nathanw #endif
657 1.5.8.3 nathanw }
658 1.5.8.3 nathanw
659 1.5.8.3 nathanw void
660 1.5.8.3 nathanw xe_dma_rx_shutdown(arg)
661 1.5.8.3 nathanw void *arg;
662 1.5.8.3 nathanw {
663 1.5.8.3 nathanw struct mb8795_softc *sc = arg;
664 1.5.8.3 nathanw struct xe_softc *xsc = (struct xe_softc *)sc;
665 1.5.8.3 nathanw struct ifnet *ifp = &sc->sc_ethercom.ec_if;
666 1.5.8.3 nathanw
667 1.5.8.3 nathanw if (ifp->if_flags & IFF_RUNNING) {
668 1.5.8.3 nathanw DPRINTF(("%s: xe_dma_rx_shutdown(), restarting.\n",
669 1.5.8.3 nathanw sc->sc_dev.dv_xname));
670 1.5.8.3 nathanw
671 1.5.8.3 nathanw nextdma_start(xsc->sc_rxdma, DMACSR_SETREAD);
672 1.5.8.3 nathanw if (turbo)
673 1.5.8.3 nathanw MB_WRITE_REG(sc, MB8795_RXMODE, MB8795_RXMODE_TEST | MB8795_RXMODE_MULTICAST);
674 1.5.8.3 nathanw }
675 1.5.8.3 nathanw #ifdef DIAGNOSTIC
676 1.5.8.3 nathanw else
677 1.5.8.3 nathanw DPRINTF(("%s: Unexpected rx dma shutdown while if not running\n",
678 1.5.8.3 nathanw sc->sc_dev.dv_xname));
679 1.5.8.3 nathanw #endif
680 1.5.8.3 nathanw }
681 1.5.8.3 nathanw
682 1.5.8.3 nathanw /*
683 1.5.8.3 nathanw * load a dmamap with a freshly allocated mbuf
684 1.5.8.3 nathanw */
685 1.5.8.3 nathanw struct mbuf *
686 1.5.8.3 nathanw xe_dma_rxmap_load(sc,map)
687 1.5.8.3 nathanw struct mb8795_softc *sc;
688 1.5.8.3 nathanw bus_dmamap_t map;
689 1.5.8.3 nathanw {
690 1.5.8.3 nathanw struct xe_softc *xsc = (struct xe_softc *)sc;
691 1.5.8.3 nathanw struct ifnet *ifp = &sc->sc_ethercom.ec_if;
692 1.5.8.3 nathanw struct mbuf *m;
693 1.5.8.3 nathanw int error;
694 1.5.8.3 nathanw
695 1.5.8.3 nathanw MGETHDR(m, M_DONTWAIT, MT_DATA);
696 1.5.8.3 nathanw if (m) {
697 1.5.8.3 nathanw MCLGET(m, M_DONTWAIT);
698 1.5.8.3 nathanw if ((m->m_flags & M_EXT) == 0) {
699 1.5.8.3 nathanw m_freem(m);
700 1.5.8.3 nathanw m = NULL;
701 1.5.8.3 nathanw } else {
702 1.5.8.3 nathanw m->m_len = MCLBYTES;
703 1.5.8.3 nathanw }
704 1.5.8.3 nathanw }
705 1.5.8.3 nathanw if (!m) {
706 1.5.8.3 nathanw /* @@@ Handle this gracefully by reusing a scratch buffer
707 1.5.8.3 nathanw * or something.
708 1.5.8.3 nathanw */
709 1.5.8.4 nathanw panic("Unable to get memory for incoming ethernet");
710 1.5.8.3 nathanw }
711 1.5.8.3 nathanw
712 1.5.8.3 nathanw /* Align buffer, @@@ next specific.
713 1.5.8.3 nathanw * perhaps should be using M_ALIGN here instead?
714 1.5.8.3 nathanw * First we give us a little room to align with.
715 1.5.8.3 nathanw */
716 1.5.8.3 nathanw {
717 1.5.8.3 nathanw u_char *buf = m->m_data;
718 1.5.8.3 nathanw int buflen = m->m_len;
719 1.5.8.3 nathanw buflen -= DMA_ENDALIGNMENT+DMA_BEGINALIGNMENT;
720 1.5.8.3 nathanw REALIGN_DMABUF(buf, buflen);
721 1.5.8.3 nathanw m->m_data = buf;
722 1.5.8.3 nathanw m->m_len = buflen;
723 1.5.8.3 nathanw }
724 1.5.8.3 nathanw
725 1.5.8.3 nathanw m->m_pkthdr.rcvif = ifp;
726 1.5.8.3 nathanw m->m_pkthdr.len = m->m_len;
727 1.5.8.3 nathanw
728 1.5.8.3 nathanw error = bus_dmamap_load_mbuf(xsc->sc_rxdma->sc_dmat,
729 1.5.8.3 nathanw map, m, BUS_DMA_NOWAIT);
730 1.5.8.3 nathanw
731 1.5.8.3 nathanw bus_dmamap_sync(xsc->sc_rxdma->sc_dmat, map, 0,
732 1.5.8.3 nathanw map->dm_mapsize, BUS_DMASYNC_PREREAD);
733 1.5.8.3 nathanw
734 1.5.8.3 nathanw if (error) {
735 1.5.8.3 nathanw DPRINTF(("DEBUG: m->m_data = %p, m->m_len = %d\n",
736 1.5.8.3 nathanw m->m_data, m->m_len));
737 1.5.8.3 nathanw DPRINTF(("DEBUG: MCLBYTES = %d, map->_dm_size = %ld\n",
738 1.5.8.3 nathanw MCLBYTES, map->_dm_size));
739 1.5.8.3 nathanw
740 1.5.8.4 nathanw panic("%s: can't load rx mbuf chain, error = %d",
741 1.5.8.3 nathanw sc->sc_dev.dv_xname, error);
742 1.5.8.3 nathanw m_freem(m);
743 1.5.8.3 nathanw m = NULL;
744 1.5.8.3 nathanw }
745 1.5.8.3 nathanw
746 1.5.8.3 nathanw return(m);
747 1.5.8.3 nathanw }
748 1.5.8.3 nathanw
749 1.5.8.3 nathanw bus_dmamap_t
750 1.5.8.3 nathanw xe_dma_rx_continue(arg)
751 1.5.8.3 nathanw void *arg;
752 1.5.8.3 nathanw {
753 1.5.8.3 nathanw struct mb8795_softc *sc = arg;
754 1.5.8.3 nathanw struct xe_softc *xsc = (struct xe_softc *)sc;
755 1.5.8.3 nathanw struct ifnet *ifp = &sc->sc_ethercom.ec_if;
756 1.5.8.3 nathanw bus_dmamap_t map = NULL;
757 1.5.8.3 nathanw
758 1.5.8.3 nathanw if (ifp->if_flags & IFF_RUNNING) {
759 1.5.8.3 nathanw if (((xsc->sc_rx_loaded_idx+1)%MB8795_NRXBUFS) == xsc->sc_rx_handled_idx) {
760 1.5.8.3 nathanw /* make space for one packet by dropping one */
761 1.5.8.3 nathanw struct mbuf *m;
762 1.5.8.3 nathanw m = xe_dma_rx_mbuf (sc);
763 1.5.8.3 nathanw if (m)
764 1.5.8.3 nathanw m_freem(m);
765 1.5.8.3 nathanw #if (defined(DIAGNOSTIC))
766 1.5.8.3 nathanw DPRINTF(("%s: out of receive DMA buffers\n",sc->sc_dev.dv_xname));
767 1.5.8.3 nathanw #endif
768 1.5.8.3 nathanw }
769 1.5.8.3 nathanw xsc->sc_rx_loaded_idx++;
770 1.5.8.3 nathanw xsc->sc_rx_loaded_idx %= MB8795_NRXBUFS;
771 1.5.8.3 nathanw map = xsc->sc_rx_dmamap[xsc->sc_rx_loaded_idx];
772 1.5.8.3 nathanw
773 1.5.8.3 nathanw DPRINTF(("%s: xe_dma_rx_continue() xsc->sc_rx_loaded_idx = %d\nn",
774 1.5.8.3 nathanw sc->sc_dev.dv_xname,xsc->sc_rx_loaded_idx));
775 1.5.8.3 nathanw }
776 1.5.8.3 nathanw #ifdef DIAGNOSTIC
777 1.5.8.3 nathanw else
778 1.5.8.4 nathanw panic("%s: Unexpected rx dma continue while if not running",
779 1.5.8.3 nathanw sc->sc_dev.dv_xname);
780 1.5.8.3 nathanw #endif
781 1.5.8.3 nathanw
782 1.5.8.3 nathanw return(map);
783 1.5.8.3 nathanw }
784 1.5.8.3 nathanw
785 1.5.8.3 nathanw bus_dmamap_t
786 1.5.8.3 nathanw xe_dma_tx_continue(arg)
787 1.5.8.3 nathanw void *arg;
788 1.5.8.3 nathanw {
789 1.5.8.3 nathanw struct mb8795_softc *sc = arg;
790 1.5.8.3 nathanw struct xe_softc *xsc = (struct xe_softc *)sc;
791 1.5.8.3 nathanw bus_dmamap_t map;
792 1.5.8.3 nathanw
793 1.5.8.3 nathanw DPRINTF(("%s: xe_dma_tx_continue()\n",sc->sc_dev.dv_xname));
794 1.5.8.3 nathanw
795 1.5.8.3 nathanw if (xsc->sc_tx_loaded) {
796 1.5.8.3 nathanw map = NULL;
797 1.5.8.3 nathanw } else {
798 1.5.8.3 nathanw map = xsc->sc_tx_dmamap;
799 1.5.8.3 nathanw xsc->sc_tx_loaded++;
800 1.5.8.3 nathanw }
801 1.5.8.3 nathanw
802 1.5.8.3 nathanw #ifdef DIAGNOSTIC
803 1.5.8.3 nathanw if (xsc->sc_tx_loaded != 1) {
804 1.5.8.3 nathanw panic("%s: sc->sc_tx_loaded is %d",sc->sc_dev.dv_xname,
805 1.5.8.3 nathanw xsc->sc_tx_loaded);
806 1.5.8.3 nathanw }
807 1.5.8.3 nathanw #endif
808 1.5.8.3 nathanw
809 1.5.8.3 nathanw return(map);
810 1.5.8.2 nathanw }
811