if_xge.c revision 1.10 1 1.10 christos /* $NetBSD: if_xge.c,v 1.10 2008/12/16 22:35:33 christos Exp $ */
2 1.1 ragge
3 1.1 ragge /*
4 1.1 ragge * Copyright (c) 2004, SUNET, Swedish University Computer Network.
5 1.1 ragge * All rights reserved.
6 1.1 ragge *
7 1.1 ragge * Written by Anders Magnusson for SUNET, Swedish University Computer Network.
8 1.1 ragge *
9 1.1 ragge * Redistribution and use in source and binary forms, with or without
10 1.1 ragge * modification, are permitted provided that the following conditions
11 1.1 ragge * are met:
12 1.1 ragge * 1. Redistributions of source code must retain the above copyright
13 1.1 ragge * notice, this list of conditions and the following disclaimer.
14 1.1 ragge * 2. Redistributions in binary form must reproduce the above copyright
15 1.1 ragge * notice, this list of conditions and the following disclaimer in the
16 1.1 ragge * documentation and/or other materials provided with the distribution.
17 1.1 ragge * 3. All advertising materials mentioning features or use of this software
18 1.1 ragge * must display the following acknowledgement:
19 1.1 ragge * This product includes software developed for the NetBSD Project by
20 1.1 ragge * SUNET, Swedish University Computer Network.
21 1.1 ragge * 4. The name of SUNET may not be used to endorse or promote products
22 1.1 ragge * derived from this software without specific prior written permission.
23 1.1 ragge *
24 1.1 ragge * THIS SOFTWARE IS PROVIDED BY SUNET ``AS IS'' AND
25 1.1 ragge * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 1.1 ragge * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 1.1 ragge * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SUNET
28 1.1 ragge * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 1.1 ragge * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 1.1 ragge * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 1.1 ragge * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 1.1 ragge * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 1.1 ragge * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 1.1 ragge * POSSIBILITY OF SUCH DAMAGE.
35 1.1 ragge */
36 1.1 ragge
37 1.1 ragge /*
38 1.1 ragge * Device driver for the S2io Xframe Ten Gigabit Ethernet controller.
39 1.1 ragge *
40 1.1 ragge * TODO (in no specific order):
41 1.1 ragge * HW VLAN support.
42 1.1 ragge * IPv6 HW cksum.
43 1.1 ragge */
44 1.1 ragge
45 1.1 ragge #include <sys/cdefs.h>
46 1.10 christos __KERNEL_RCSID(0, "$NetBSD: if_xge.c,v 1.10 2008/12/16 22:35:33 christos Exp $");
47 1.1 ragge
48 1.1 ragge #include "bpfilter.h"
49 1.1 ragge #include "rnd.h"
50 1.1 ragge
51 1.1 ragge #include <sys/param.h>
52 1.1 ragge #include <sys/systm.h>
53 1.1 ragge #include <sys/mbuf.h>
54 1.1 ragge #include <sys/malloc.h>
55 1.1 ragge #include <sys/kernel.h>
56 1.1 ragge #include <sys/socket.h>
57 1.1 ragge #include <sys/device.h>
58 1.1 ragge
59 1.1 ragge #if NRND > 0
60 1.1 ragge #include <sys/rnd.h>
61 1.1 ragge #endif
62 1.1 ragge
63 1.1 ragge #include <net/if.h>
64 1.1 ragge #include <net/if_dl.h>
65 1.1 ragge #include <net/if_media.h>
66 1.1 ragge #include <net/if_ether.h>
67 1.1 ragge
68 1.1 ragge #if NBPFILTER > 0
69 1.1 ragge #include <net/bpf.h>
70 1.1 ragge #endif
71 1.1 ragge
72 1.6 ad #include <sys/bus.h>
73 1.6 ad #include <sys/intr.h>
74 1.1 ragge #include <machine/endian.h>
75 1.1 ragge
76 1.1 ragge #include <dev/mii/mii.h>
77 1.1 ragge #include <dev/mii/miivar.h>
78 1.1 ragge
79 1.1 ragge #include <dev/pci/pcivar.h>
80 1.1 ragge #include <dev/pci/pcireg.h>
81 1.1 ragge #include <dev/pci/pcidevs.h>
82 1.1 ragge
83 1.1 ragge #include <sys/proc.h>
84 1.1 ragge
85 1.1 ragge #include <dev/pci/if_xgereg.h>
86 1.1 ragge
87 1.1 ragge /*
88 1.1 ragge * Some tunable constants, tune with care!
89 1.1 ragge */
90 1.1 ragge #define RX_MODE RX_MODE_1 /* Receive mode (buffer usage, see below) */
91 1.1 ragge #define NRXDESCS 1016 /* # of receive descriptors (requested) */
92 1.1 ragge #define NTXDESCS 8192 /* Number of transmit descriptors */
93 1.1 ragge #define NTXFRAGS 100 /* Max fragments per packet */
94 1.1 ragge #define XGE_EVENT_COUNTERS /* Instrumentation */
95 1.1 ragge
96 1.1 ragge /*
97 1.1 ragge * Receive buffer modes; 1, 3 or 5 buffers.
98 1.1 ragge */
99 1.1 ragge #define RX_MODE_1 1
100 1.1 ragge #define RX_MODE_3 3
101 1.1 ragge #define RX_MODE_5 5
102 1.1 ragge
103 1.1 ragge /*
104 1.1 ragge * Use clever macros to avoid a bunch of #ifdef's.
105 1.1 ragge */
106 1.1 ragge #define XCONCAT3(x,y,z) x ## y ## z
107 1.1 ragge #define CONCAT3(x,y,z) XCONCAT3(x,y,z)
108 1.1 ragge #define NDESC_BUFMODE CONCAT3(NDESC_,RX_MODE,BUFMODE)
109 1.1 ragge #define rxd_4k CONCAT3(rxd,RX_MODE,_4k)
110 1.1 ragge #define rxdesc ___CONCAT(rxd,RX_MODE)
111 1.1 ragge
112 1.1 ragge #define NEXTTX(x) (((x)+1) % NTXDESCS)
113 1.1 ragge #define NRXFRAGS RX_MODE /* hardware imposed frags */
114 1.1 ragge #define NRXPAGES ((NRXDESCS/NDESC_BUFMODE)+1)
115 1.1 ragge #define NRXREAL (NRXPAGES*NDESC_BUFMODE)
116 1.1 ragge #define RXMAPSZ (NRXPAGES*PAGE_SIZE)
117 1.1 ragge
118 1.1 ragge #ifdef XGE_EVENT_COUNTERS
119 1.1 ragge #define XGE_EVCNT_INCR(ev) (ev)->ev_count++
120 1.1 ragge #else
121 1.1 ragge #define XGE_EVCNT_INCR(ev) /* nothing */
122 1.1 ragge #endif
123 1.1 ragge
124 1.1 ragge /*
125 1.1 ragge * Magics to fix a bug when the mac address can't be read correctly.
126 1.1 ragge * Comes from the Linux driver.
127 1.1 ragge */
128 1.1 ragge static uint64_t fix_mac[] = {
129 1.1 ragge 0x0060000000000000ULL, 0x0060600000000000ULL,
130 1.1 ragge 0x0040600000000000ULL, 0x0000600000000000ULL,
131 1.1 ragge 0x0020600000000000ULL, 0x0060600000000000ULL,
132 1.1 ragge 0x0020600000000000ULL, 0x0060600000000000ULL,
133 1.1 ragge 0x0020600000000000ULL, 0x0060600000000000ULL,
134 1.1 ragge 0x0020600000000000ULL, 0x0060600000000000ULL,
135 1.1 ragge 0x0020600000000000ULL, 0x0060600000000000ULL,
136 1.1 ragge 0x0020600000000000ULL, 0x0060600000000000ULL,
137 1.1 ragge 0x0020600000000000ULL, 0x0060600000000000ULL,
138 1.1 ragge 0x0020600000000000ULL, 0x0060600000000000ULL,
139 1.1 ragge 0x0020600000000000ULL, 0x0060600000000000ULL,
140 1.1 ragge 0x0020600000000000ULL, 0x0060600000000000ULL,
141 1.1 ragge 0x0020600000000000ULL, 0x0000600000000000ULL,
142 1.1 ragge 0x0040600000000000ULL, 0x0060600000000000ULL,
143 1.1 ragge };
144 1.1 ragge
145 1.1 ragge
146 1.1 ragge struct xge_softc {
147 1.1 ragge struct device sc_dev;
148 1.1 ragge struct ethercom sc_ethercom;
149 1.1 ragge #define sc_if sc_ethercom.ec_if
150 1.1 ragge bus_dma_tag_t sc_dmat;
151 1.1 ragge bus_space_tag_t sc_st;
152 1.1 ragge bus_space_handle_t sc_sh;
153 1.1 ragge bus_space_tag_t sc_txt;
154 1.1 ragge bus_space_handle_t sc_txh;
155 1.1 ragge void *sc_ih;
156 1.1 ragge
157 1.1 ragge struct ifmedia xena_media;
158 1.1 ragge pcireg_t sc_pciregs[16];
159 1.1 ragge
160 1.1 ragge /* Transmit structures */
161 1.1 ragge struct txd *sc_txd[NTXDESCS]; /* transmit frags array */
162 1.1 ragge bus_addr_t sc_txdp[NTXDESCS]; /* bus address of transmit frags */
163 1.1 ragge bus_dmamap_t sc_txm[NTXDESCS]; /* transmit frags map */
164 1.1 ragge struct mbuf *sc_txb[NTXDESCS]; /* transmit mbuf pointer */
165 1.1 ragge int sc_nexttx, sc_lasttx;
166 1.1 ragge bus_dmamap_t sc_txmap; /* transmit descriptor map */
167 1.1 ragge
168 1.1 ragge /* Receive data */
169 1.1 ragge bus_dmamap_t sc_rxmap; /* receive descriptor map */
170 1.1 ragge struct rxd_4k *sc_rxd_4k[NRXPAGES]; /* receive desc pages */
171 1.1 ragge bus_dmamap_t sc_rxm[NRXREAL]; /* receive buffer map */
172 1.1 ragge struct mbuf *sc_rxb[NRXREAL]; /* mbufs on receive descriptors */
173 1.1 ragge int sc_nextrx; /* next descriptor to check */
174 1.1 ragge
175 1.1 ragge #ifdef XGE_EVENT_COUNTERS
176 1.1 ragge struct evcnt sc_intr; /* # of interrupts */
177 1.1 ragge struct evcnt sc_txintr; /* # of transmit interrupts */
178 1.1 ragge struct evcnt sc_rxintr; /* # of receive interrupts */
179 1.1 ragge struct evcnt sc_txqe; /* # of xmit intrs when board queue empty */
180 1.1 ragge #endif
181 1.1 ragge };
182 1.1 ragge
183 1.1 ragge static int xge_match(struct device *parent, struct cfdata *cf, void *aux);
184 1.1 ragge static void xge_attach(struct device *parent, struct device *self, void *aux);
185 1.1 ragge static int xge_alloc_txmem(struct xge_softc *);
186 1.1 ragge static int xge_alloc_rxmem(struct xge_softc *);
187 1.1 ragge static void xge_start(struct ifnet *);
188 1.1 ragge static void xge_stop(struct ifnet *, int);
189 1.1 ragge static int xge_add_rxbuf(struct xge_softc *, int);
190 1.1 ragge static void xge_mcast_filter(struct xge_softc *sc);
191 1.1 ragge static int xge_setup_xgxs(struct xge_softc *sc);
192 1.5 christos static int xge_ioctl(struct ifnet *ifp, u_long cmd, void *data);
193 1.1 ragge static int xge_init(struct ifnet *ifp);
194 1.1 ragge static void xge_ifmedia_status(struct ifnet *, struct ifmediareq *);
195 1.1 ragge static int xge_xgmii_mediachange(struct ifnet *);
196 1.1 ragge static int xge_intr(void *);
197 1.1 ragge
198 1.1 ragge /*
199 1.1 ragge * Helpers to address registers.
200 1.1 ragge */
201 1.1 ragge #define PIF_WCSR(csr, val) pif_wcsr(sc, csr, val)
202 1.1 ragge #define PIF_RCSR(csr) pif_rcsr(sc, csr)
203 1.1 ragge #define TXP_WCSR(csr, val) txp_wcsr(sc, csr, val)
204 1.1 ragge #define PIF_WKEY(csr, val) pif_wkey(sc, csr, val)
205 1.1 ragge
206 1.1 ragge static inline void
207 1.1 ragge pif_wcsr(struct xge_softc *sc, bus_size_t csr, uint64_t val)
208 1.1 ragge {
209 1.1 ragge uint32_t lval, hval;
210 1.1 ragge
211 1.1 ragge lval = val&0xffffffff;
212 1.1 ragge hval = val>>32;
213 1.1 ragge bus_space_write_4(sc->sc_st, sc->sc_sh, csr, lval);
214 1.1 ragge bus_space_write_4(sc->sc_st, sc->sc_sh, csr+4, hval);
215 1.1 ragge }
216 1.1 ragge
217 1.1 ragge static inline uint64_t
218 1.1 ragge pif_rcsr(struct xge_softc *sc, bus_size_t csr)
219 1.1 ragge {
220 1.1 ragge uint64_t val, val2;
221 1.1 ragge val = bus_space_read_4(sc->sc_st, sc->sc_sh, csr);
222 1.1 ragge val2 = bus_space_read_4(sc->sc_st, sc->sc_sh, csr+4);
223 1.1 ragge val |= (val2 << 32);
224 1.1 ragge return val;
225 1.1 ragge }
226 1.1 ragge
227 1.1 ragge static inline void
228 1.1 ragge txp_wcsr(struct xge_softc *sc, bus_size_t csr, uint64_t val)
229 1.1 ragge {
230 1.1 ragge uint32_t lval, hval;
231 1.1 ragge
232 1.1 ragge lval = val&0xffffffff;
233 1.1 ragge hval = val>>32;
234 1.1 ragge bus_space_write_4(sc->sc_txt, sc->sc_txh, csr, lval);
235 1.1 ragge bus_space_write_4(sc->sc_txt, sc->sc_txh, csr+4, hval);
236 1.1 ragge }
237 1.1 ragge
238 1.1 ragge
239 1.1 ragge static inline void
240 1.1 ragge pif_wkey(struct xge_softc *sc, bus_size_t csr, uint64_t val)
241 1.1 ragge {
242 1.1 ragge uint32_t lval, hval;
243 1.1 ragge
244 1.1 ragge lval = val&0xffffffff;
245 1.1 ragge hval = val>>32;
246 1.1 ragge PIF_WCSR(RMAC_CFG_KEY, RMAC_KEY_VALUE);
247 1.1 ragge bus_space_write_4(sc->sc_st, sc->sc_sh, csr, lval);
248 1.1 ragge PIF_WCSR(RMAC_CFG_KEY, RMAC_KEY_VALUE);
249 1.1 ragge bus_space_write_4(sc->sc_st, sc->sc_sh, csr+4, hval);
250 1.1 ragge }
251 1.1 ragge
252 1.1 ragge
253 1.1 ragge CFATTACH_DECL(xge, sizeof(struct xge_softc),
254 1.1 ragge xge_match, xge_attach, NULL, NULL);
255 1.1 ragge
256 1.9 cegger #define XNAME device_xname(&sc->sc_dev)
257 1.1 ragge
258 1.1 ragge #define XGE_RXSYNC(desc, what) \
259 1.1 ragge bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap, \
260 1.1 ragge (desc/NDESC_BUFMODE) * XGE_PAGE + sizeof(struct rxdesc) * \
261 1.1 ragge (desc%NDESC_BUFMODE), sizeof(struct rxdesc), what)
262 1.1 ragge #define XGE_RXD(desc) &sc->sc_rxd_4k[desc/NDESC_BUFMODE]-> \
263 1.1 ragge r4_rxd[desc%NDESC_BUFMODE]
264 1.1 ragge
265 1.1 ragge /*
266 1.1 ragge * Non-tunable constants.
267 1.1 ragge */
268 1.1 ragge #define XGE_MAX_MTU 9600
269 1.1 ragge #define XGE_IP_MAXPACKET 65535 /* same as IP_MAXPACKET */
270 1.1 ragge
271 1.1 ragge static int
272 1.4 christos xge_match(struct device *parent, struct cfdata *cf, void *aux)
273 1.1 ragge {
274 1.1 ragge struct pci_attach_args *pa = aux;
275 1.1 ragge
276 1.1 ragge if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_S2IO &&
277 1.1 ragge PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_S2IO_XFRAME)
278 1.1 ragge return (1);
279 1.1 ragge
280 1.1 ragge return (0);
281 1.1 ragge }
282 1.1 ragge
283 1.1 ragge void
284 1.4 christos xge_attach(struct device *parent, struct device *self, void *aux)
285 1.1 ragge {
286 1.1 ragge struct pci_attach_args *pa = aux;
287 1.1 ragge struct xge_softc *sc;
288 1.1 ragge struct ifnet *ifp;
289 1.1 ragge pcireg_t memtype;
290 1.1 ragge pci_intr_handle_t ih;
291 1.1 ragge const char *intrstr = NULL;
292 1.1 ragge pci_chipset_tag_t pc = pa->pa_pc;
293 1.1 ragge uint8_t enaddr[ETHER_ADDR_LEN];
294 1.1 ragge uint64_t val;
295 1.1 ragge int i;
296 1.1 ragge
297 1.1 ragge sc = (struct xge_softc *)self;
298 1.1 ragge
299 1.1 ragge sc->sc_dmat = pa->pa_dmat;
300 1.1 ragge
301 1.1 ragge /* Get BAR0 address */
302 1.1 ragge memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, XGE_PIF_BAR);
303 1.1 ragge if (pci_mapreg_map(pa, XGE_PIF_BAR, memtype, 0,
304 1.1 ragge &sc->sc_st, &sc->sc_sh, 0, 0)) {
305 1.1 ragge aprint_error("%s: unable to map PIF BAR registers\n", XNAME);
306 1.1 ragge return;
307 1.1 ragge }
308 1.1 ragge
309 1.1 ragge memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, XGE_TXP_BAR);
310 1.1 ragge if (pci_mapreg_map(pa, XGE_TXP_BAR, memtype, 0,
311 1.1 ragge &sc->sc_txt, &sc->sc_txh, 0, 0)) {
312 1.1 ragge aprint_error("%s: unable to map TXP BAR registers\n", XNAME);
313 1.1 ragge return;
314 1.1 ragge }
315 1.1 ragge
316 1.1 ragge /* Save PCI config space */
317 1.1 ragge for (i = 0; i < 64; i += 4)
318 1.1 ragge sc->sc_pciregs[i/4] = pci_conf_read(pa->pa_pc, pa->pa_tag, i);
319 1.1 ragge
320 1.1 ragge #if BYTE_ORDER == LITTLE_ENDIAN
321 1.1 ragge val = (uint64_t)0xFFFFFFFFFFFFFFFFULL;
322 1.1 ragge val &= ~(TxF_R_SE|RxF_W_SE);
323 1.1 ragge PIF_WCSR(SWAPPER_CTRL, val);
324 1.1 ragge PIF_WCSR(SWAPPER_CTRL, val);
325 1.1 ragge #elif BYTE_ORDER == BIG_ENDIAN
326 1.1 ragge /* do nothing */
327 1.1 ragge #else
328 1.1 ragge #error bad endianness!
329 1.1 ragge #endif
330 1.1 ragge
331 1.1 ragge if ((val = PIF_RCSR(PIF_RD_SWAPPER_Fb)) != SWAPPER_MAGIC)
332 1.1 ragge return printf("%s: failed configuring endian, %llx != %llx!\n",
333 1.1 ragge XNAME, (unsigned long long)val, SWAPPER_MAGIC);
334 1.1 ragge
335 1.1 ragge /*
336 1.1 ragge * The MAC addr may be all FF's, which is not good.
337 1.1 ragge * Resolve it by writing some magics to GPIO_CONTROL and
338 1.1 ragge * force a chip reset to read in the serial eeprom again.
339 1.1 ragge */
340 1.1 ragge for (i = 0; i < sizeof(fix_mac)/sizeof(fix_mac[0]); i++) {
341 1.1 ragge PIF_WCSR(GPIO_CONTROL, fix_mac[i]);
342 1.1 ragge PIF_RCSR(GPIO_CONTROL);
343 1.1 ragge }
344 1.1 ragge
345 1.1 ragge /*
346 1.1 ragge * Reset the chip and restore the PCI registers.
347 1.1 ragge */
348 1.1 ragge PIF_WCSR(SW_RESET, 0xa5a5a50000000000ULL);
349 1.1 ragge DELAY(500000);
350 1.1 ragge for (i = 0; i < 64; i += 4)
351 1.1 ragge pci_conf_write(pa->pa_pc, pa->pa_tag, i, sc->sc_pciregs[i/4]);
352 1.1 ragge
353 1.1 ragge /*
354 1.1 ragge * Restore the byte order registers.
355 1.1 ragge */
356 1.1 ragge #if BYTE_ORDER == LITTLE_ENDIAN
357 1.1 ragge val = (uint64_t)0xFFFFFFFFFFFFFFFFULL;
358 1.1 ragge val &= ~(TxF_R_SE|RxF_W_SE);
359 1.1 ragge PIF_WCSR(SWAPPER_CTRL, val);
360 1.1 ragge PIF_WCSR(SWAPPER_CTRL, val);
361 1.1 ragge #elif BYTE_ORDER == BIG_ENDIAN
362 1.1 ragge /* do nothing */
363 1.1 ragge #else
364 1.1 ragge #error bad endianness!
365 1.1 ragge #endif
366 1.1 ragge
367 1.1 ragge if ((val = PIF_RCSR(PIF_RD_SWAPPER_Fb)) != SWAPPER_MAGIC)
368 1.1 ragge return printf("%s: failed configuring endian2, %llx != %llx!\n",
369 1.1 ragge XNAME, (unsigned long long)val, SWAPPER_MAGIC);
370 1.1 ragge
371 1.1 ragge /*
372 1.1 ragge * XGXS initialization.
373 1.1 ragge */
374 1.1 ragge /* 29, reset */
375 1.1 ragge PIF_WCSR(SW_RESET, 0);
376 1.1 ragge DELAY(500000);
377 1.1 ragge
378 1.1 ragge /* 30, configure XGXS transceiver */
379 1.1 ragge xge_setup_xgxs(sc);
380 1.1 ragge
381 1.1 ragge /* 33, program MAC address (not needed here) */
382 1.1 ragge /* Get ethernet address */
383 1.1 ragge PIF_WCSR(RMAC_ADDR_CMD_MEM,
384 1.1 ragge RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(0));
385 1.1 ragge while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR)
386 1.1 ragge ;
387 1.1 ragge val = PIF_RCSR(RMAC_ADDR_DATA0_MEM);
388 1.1 ragge for (i = 0; i < ETHER_ADDR_LEN; i++)
389 1.1 ragge enaddr[i] = (uint8_t)(val >> (56 - (8*i)));
390 1.1 ragge
391 1.1 ragge /*
392 1.1 ragge * Get memory for transmit descriptor lists.
393 1.1 ragge */
394 1.1 ragge if (xge_alloc_txmem(sc))
395 1.1 ragge return printf("%s: failed allocating txmem.\n", XNAME);
396 1.1 ragge
397 1.1 ragge /* 9 and 10 - set FIFO number/prio */
398 1.1 ragge PIF_WCSR(TX_FIFO_P0, TX_FIFO_LEN0(NTXDESCS));
399 1.1 ragge PIF_WCSR(TX_FIFO_P1, 0ULL);
400 1.1 ragge PIF_WCSR(TX_FIFO_P2, 0ULL);
401 1.1 ragge PIF_WCSR(TX_FIFO_P3, 0ULL);
402 1.1 ragge
403 1.1 ragge /* 11, XXX set round-robin prio? */
404 1.1 ragge
405 1.1 ragge /* 12, enable transmit FIFO */
406 1.1 ragge val = PIF_RCSR(TX_FIFO_P0);
407 1.1 ragge val |= TX_FIFO_ENABLE;
408 1.1 ragge PIF_WCSR(TX_FIFO_P0, val);
409 1.1 ragge
410 1.1 ragge /* 13, disable some error checks */
411 1.1 ragge PIF_WCSR(TX_PA_CFG,
412 1.1 ragge TX_PA_CFG_IFR|TX_PA_CFG_ISO|TX_PA_CFG_ILC|TX_PA_CFG_ILE);
413 1.1 ragge
414 1.1 ragge /*
415 1.1 ragge * Create transmit DMA maps.
416 1.1 ragge * Make them large for TSO.
417 1.1 ragge */
418 1.1 ragge for (i = 0; i < NTXDESCS; i++) {
419 1.1 ragge if (bus_dmamap_create(sc->sc_dmat, XGE_IP_MAXPACKET,
420 1.1 ragge NTXFRAGS, MCLBYTES, 0, 0, &sc->sc_txm[i]))
421 1.1 ragge return printf("%s: cannot create TX DMA maps\n", XNAME);
422 1.1 ragge }
423 1.1 ragge
424 1.1 ragge sc->sc_lasttx = NTXDESCS-1;
425 1.1 ragge
426 1.1 ragge /*
427 1.1 ragge * RxDMA initialization.
428 1.1 ragge * Only use one out of 8 possible receive queues.
429 1.1 ragge */
430 1.1 ragge if (xge_alloc_rxmem(sc)) /* allocate rx descriptor memory */
431 1.1 ragge return printf("%s: failed allocating rxmem\n", XNAME);
432 1.1 ragge
433 1.1 ragge /* Create receive buffer DMA maps */
434 1.1 ragge for (i = 0; i < NRXREAL; i++) {
435 1.1 ragge if (bus_dmamap_create(sc->sc_dmat, XGE_MAX_MTU,
436 1.1 ragge NRXFRAGS, MCLBYTES, 0, 0, &sc->sc_rxm[i]))
437 1.1 ragge return printf("%s: cannot create RX DMA maps\n", XNAME);
438 1.1 ragge }
439 1.1 ragge
440 1.1 ragge /* allocate mbufs to receive descriptors */
441 1.1 ragge for (i = 0; i < NRXREAL; i++)
442 1.1 ragge if (xge_add_rxbuf(sc, i))
443 1.1 ragge panic("out of mbufs too early");
444 1.1 ragge
445 1.1 ragge /* 14, setup receive ring priority */
446 1.1 ragge PIF_WCSR(RX_QUEUE_PRIORITY, 0ULL); /* only use one ring */
447 1.1 ragge
448 1.1 ragge /* 15, setup receive ring round-robin calendar */
449 1.1 ragge PIF_WCSR(RX_W_ROUND_ROBIN_0, 0ULL); /* only use one ring */
450 1.1 ragge PIF_WCSR(RX_W_ROUND_ROBIN_1, 0ULL);
451 1.1 ragge PIF_WCSR(RX_W_ROUND_ROBIN_2, 0ULL);
452 1.1 ragge PIF_WCSR(RX_W_ROUND_ROBIN_3, 0ULL);
453 1.1 ragge PIF_WCSR(RX_W_ROUND_ROBIN_4, 0ULL);
454 1.1 ragge
455 1.1 ragge /* 16, write receive ring start address */
456 1.1 ragge PIF_WCSR(PRC_RXD0_0, (uint64_t)sc->sc_rxmap->dm_segs[0].ds_addr);
457 1.1 ragge /* PRC_RXD0_[1-7] are not used */
458 1.1 ragge
459 1.1 ragge /* 17, Setup alarm registers */
460 1.1 ragge PIF_WCSR(PRC_ALARM_ACTION, 0ULL); /* Default everything to retry */
461 1.1 ragge
462 1.1 ragge /* 18, init receive ring controller */
463 1.1 ragge #if RX_MODE == RX_MODE_1
464 1.1 ragge val = RING_MODE_1;
465 1.1 ragge #elif RX_MODE == RX_MODE_3
466 1.1 ragge val = RING_MODE_3;
467 1.1 ragge #else /* RX_MODE == RX_MODE_5 */
468 1.1 ragge val = RING_MODE_5;
469 1.1 ragge #endif
470 1.1 ragge PIF_WCSR(PRC_CTRL_0, RC_IN_SVC|val);
471 1.1 ragge /* leave 1-7 disabled */
472 1.1 ragge /* XXXX snoop configuration? */
473 1.1 ragge
474 1.1 ragge /* 19, set chip memory assigned to the queue */
475 1.1 ragge PIF_WCSR(RX_QUEUE_CFG, MC_QUEUE(0, 64)); /* all 64M to queue 0 */
476 1.1 ragge
477 1.1 ragge /* 20, setup RLDRAM parameters */
478 1.1 ragge /* do not touch it for now */
479 1.1 ragge
480 1.1 ragge /* 21, setup pause frame thresholds */
481 1.1 ragge /* so not touch the defaults */
482 1.1 ragge /* XXX - must 0xff be written as stated in the manual? */
483 1.1 ragge
484 1.1 ragge /* 22, configure RED */
485 1.1 ragge /* we do not want to drop packets, so ignore */
486 1.1 ragge
487 1.1 ragge /* 23, initiate RLDRAM */
488 1.1 ragge val = PIF_RCSR(MC_RLDRAM_MRS);
489 1.1 ragge val |= MC_QUEUE_SIZE_ENABLE|MC_RLDRAM_MRS_ENABLE;
490 1.1 ragge PIF_WCSR(MC_RLDRAM_MRS, val);
491 1.1 ragge DELAY(1000);
492 1.1 ragge
493 1.1 ragge /*
494 1.1 ragge * Setup interrupt policies.
495 1.1 ragge */
496 1.1 ragge /* 40, Transmit interrupts */
497 1.1 ragge PIF_WCSR(TTI_DATA1_MEM, TX_TIMER_VAL(0x1ff) | TX_TIMER_AC |
498 1.1 ragge TX_URNG_A(5) | TX_URNG_B(20) | TX_URNG_C(48));
499 1.1 ragge PIF_WCSR(TTI_DATA2_MEM,
500 1.1 ragge TX_UFC_A(25) | TX_UFC_B(64) | TX_UFC_C(128) | TX_UFC_D(512));
501 1.1 ragge PIF_WCSR(TTI_COMMAND_MEM, TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE);
502 1.1 ragge while (PIF_RCSR(TTI_COMMAND_MEM) & TTI_CMD_MEM_STROBE)
503 1.1 ragge ;
504 1.1 ragge
505 1.1 ragge /* 41, Receive interrupts */
506 1.1 ragge PIF_WCSR(RTI_DATA1_MEM, RX_TIMER_VAL(0x800) | RX_TIMER_AC |
507 1.1 ragge RX_URNG_A(5) | RX_URNG_B(20) | RX_URNG_C(50));
508 1.1 ragge PIF_WCSR(RTI_DATA2_MEM,
509 1.1 ragge RX_UFC_A(64) | RX_UFC_B(128) | RX_UFC_C(256) | RX_UFC_D(512));
510 1.1 ragge PIF_WCSR(RTI_COMMAND_MEM, RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE);
511 1.1 ragge while (PIF_RCSR(RTI_COMMAND_MEM) & RTI_CMD_MEM_STROBE)
512 1.1 ragge ;
513 1.1 ragge
514 1.1 ragge /*
515 1.1 ragge * Setup media stuff.
516 1.1 ragge */
517 1.1 ragge ifmedia_init(&sc->xena_media, IFM_IMASK, xge_xgmii_mediachange,
518 1.1 ragge xge_ifmedia_status);
519 1.1 ragge ifmedia_add(&sc->xena_media, IFM_ETHER|IFM_10G_LR, 0, NULL);
520 1.1 ragge ifmedia_set(&sc->xena_media, IFM_ETHER|IFM_10G_LR);
521 1.1 ragge
522 1.1 ragge aprint_normal("%s: Ethernet address %s\n", XNAME,
523 1.1 ragge ether_sprintf(enaddr));
524 1.1 ragge
525 1.1 ragge ifp = &sc->sc_ethercom.ec_if;
526 1.9 cegger strlcpy(ifp->if_xname, device_xname(&sc->sc_dev), IFNAMSIZ);
527 1.1 ragge ifp->if_baudrate = 10000000000LL;
528 1.1 ragge ifp->if_init = xge_init;
529 1.1 ragge ifp->if_stop = xge_stop;
530 1.1 ragge ifp->if_softc = sc;
531 1.1 ragge ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
532 1.1 ragge ifp->if_ioctl = xge_ioctl;
533 1.1 ragge ifp->if_start = xge_start;
534 1.1 ragge IFQ_SET_MAXLEN(&ifp->if_snd, max(NTXDESCS - 1, IFQ_MAXLEN));
535 1.1 ragge IFQ_SET_READY(&ifp->if_snd);
536 1.1 ragge
537 1.1 ragge /*
538 1.1 ragge * Offloading capabilities.
539 1.1 ragge */
540 1.1 ragge sc->sc_ethercom.ec_capabilities |=
541 1.1 ragge ETHERCAP_JUMBO_MTU | ETHERCAP_VLAN_MTU;
542 1.1 ragge ifp->if_capabilities |=
543 1.1 ragge IFCAP_CSUM_IPv4_Rx | IFCAP_CSUM_IPv4_Tx |
544 1.1 ragge IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv4_Tx |
545 1.1 ragge IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv4_Tx | IFCAP_TSOv4;
546 1.1 ragge
547 1.1 ragge /*
548 1.1 ragge * Attach the interface.
549 1.1 ragge */
550 1.1 ragge if_attach(ifp);
551 1.1 ragge ether_ifattach(ifp, enaddr);
552 1.1 ragge
553 1.1 ragge /*
554 1.1 ragge * Setup interrupt vector before initializing.
555 1.1 ragge */
556 1.1 ragge if (pci_intr_map(pa, &ih))
557 1.9 cegger return aprint_error_dev(&sc->sc_dev, "unable to map interrupt\n");
558 1.1 ragge intrstr = pci_intr_string(pc, ih);
559 1.1 ragge if ((sc->sc_ih =
560 1.1 ragge pci_intr_establish(pc, ih, IPL_NET, xge_intr, sc)) == NULL)
561 1.9 cegger return aprint_error_dev(&sc->sc_dev, "unable to establish interrupt at %s\n",
562 1.9 cegger intrstr ? intrstr : "<unknown>");
563 1.9 cegger aprint_normal_dev(&sc->sc_dev, "interrupting at %s\n", intrstr);
564 1.1 ragge
565 1.1 ragge #ifdef XGE_EVENT_COUNTERS
566 1.1 ragge evcnt_attach_dynamic(&sc->sc_intr, EVCNT_TYPE_MISC,
567 1.1 ragge NULL, XNAME, "intr");
568 1.1 ragge evcnt_attach_dynamic(&sc->sc_txintr, EVCNT_TYPE_MISC,
569 1.1 ragge NULL, XNAME, "txintr");
570 1.1 ragge evcnt_attach_dynamic(&sc->sc_rxintr, EVCNT_TYPE_MISC,
571 1.1 ragge NULL, XNAME, "rxintr");
572 1.1 ragge evcnt_attach_dynamic(&sc->sc_txqe, EVCNT_TYPE_MISC,
573 1.1 ragge NULL, XNAME, "txqe");
574 1.1 ragge #endif
575 1.1 ragge }
576 1.1 ragge
577 1.1 ragge void
578 1.1 ragge xge_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
579 1.1 ragge {
580 1.1 ragge struct xge_softc *sc = ifp->if_softc;
581 1.1 ragge uint64_t reg;
582 1.1 ragge
583 1.1 ragge ifmr->ifm_status = IFM_AVALID;
584 1.1 ragge ifmr->ifm_active = IFM_ETHER|IFM_10G_LR;
585 1.1 ragge
586 1.1 ragge reg = PIF_RCSR(ADAPTER_STATUS);
587 1.1 ragge if ((reg & (RMAC_REMOTE_FAULT|RMAC_LOCAL_FAULT)) == 0)
588 1.1 ragge ifmr->ifm_status |= IFM_ACTIVE;
589 1.1 ragge }
590 1.1 ragge
591 1.1 ragge int
592 1.4 christos xge_xgmii_mediachange(struct ifnet *ifp)
593 1.1 ragge {
594 1.1 ragge return 0;
595 1.1 ragge }
596 1.1 ragge
597 1.1 ragge static void
598 1.1 ragge xge_enable(struct xge_softc *sc)
599 1.1 ragge {
600 1.1 ragge uint64_t val;
601 1.1 ragge
602 1.1 ragge /* 2, enable adapter */
603 1.1 ragge val = PIF_RCSR(ADAPTER_CONTROL);
604 1.1 ragge val |= ADAPTER_EN;
605 1.1 ragge PIF_WCSR(ADAPTER_CONTROL, val);
606 1.1 ragge
607 1.1 ragge /* 3, light the card enable led */
608 1.1 ragge val = PIF_RCSR(ADAPTER_CONTROL);
609 1.1 ragge val |= LED_ON;
610 1.1 ragge PIF_WCSR(ADAPTER_CONTROL, val);
611 1.1 ragge printf("%s: link up\n", XNAME);
612 1.1 ragge
613 1.1 ragge }
614 1.1 ragge
615 1.1 ragge int
616 1.1 ragge xge_init(struct ifnet *ifp)
617 1.1 ragge {
618 1.1 ragge struct xge_softc *sc = ifp->if_softc;
619 1.1 ragge uint64_t val;
620 1.1 ragge
621 1.1 ragge if (ifp->if_flags & IFF_RUNNING)
622 1.1 ragge return 0;
623 1.1 ragge
624 1.1 ragge /* 31+32, setup MAC config */
625 1.1 ragge PIF_WKEY(MAC_CFG, TMAC_EN|RMAC_EN|TMAC_APPEND_PAD|RMAC_STRIP_FCS|
626 1.1 ragge RMAC_BCAST_EN|RMAC_DISCARD_PFRM|RMAC_PROM_EN);
627 1.1 ragge
628 1.1 ragge DELAY(1000);
629 1.1 ragge
630 1.1 ragge /* 54, ensure that the adapter is 'quiescent' */
631 1.1 ragge val = PIF_RCSR(ADAPTER_STATUS);
632 1.1 ragge if ((val & QUIESCENT) != QUIESCENT) {
633 1.1 ragge char buf[200];
634 1.1 ragge printf("%s: adapter not quiescent, aborting\n", XNAME);
635 1.1 ragge val = (val & QUIESCENT) ^ QUIESCENT;
636 1.10 christos snprintb(buf, sizeof buf, QUIESCENT_BMSK, val);
637 1.1 ragge printf("%s: ADAPTER_STATUS missing bits %s\n", XNAME, buf);
638 1.1 ragge return 1;
639 1.1 ragge }
640 1.1 ragge
641 1.1 ragge /* 56, enable the transmit laser */
642 1.1 ragge val = PIF_RCSR(ADAPTER_CONTROL);
643 1.1 ragge val |= EOI_TX_ON;
644 1.1 ragge PIF_WCSR(ADAPTER_CONTROL, val);
645 1.1 ragge
646 1.1 ragge xge_enable(sc);
647 1.1 ragge /*
648 1.1 ragge * Enable all interrupts
649 1.1 ragge */
650 1.1 ragge PIF_WCSR(TX_TRAFFIC_MASK, 0);
651 1.1 ragge PIF_WCSR(RX_TRAFFIC_MASK, 0);
652 1.1 ragge PIF_WCSR(GENERAL_INT_MASK, 0);
653 1.1 ragge PIF_WCSR(TXPIC_INT_MASK, 0);
654 1.1 ragge PIF_WCSR(RXPIC_INT_MASK, 0);
655 1.1 ragge PIF_WCSR(MAC_INT_MASK, MAC_TMAC_INT); /* only from RMAC */
656 1.1 ragge PIF_WCSR(MAC_RMAC_ERR_MASK, ~RMAC_LINK_STATE_CHANGE_INT);
657 1.1 ragge
658 1.1 ragge
659 1.1 ragge /* Done... */
660 1.1 ragge ifp->if_flags |= IFF_RUNNING;
661 1.1 ragge ifp->if_flags &= ~IFF_OACTIVE;
662 1.1 ragge
663 1.1 ragge return 0;
664 1.1 ragge }
665 1.1 ragge
666 1.1 ragge static void
667 1.4 christos xge_stop(struct ifnet *ifp, int disable)
668 1.1 ragge {
669 1.1 ragge struct xge_softc *sc = ifp->if_softc;
670 1.1 ragge uint64_t val;
671 1.1 ragge
672 1.1 ragge val = PIF_RCSR(ADAPTER_CONTROL);
673 1.1 ragge val &= ~ADAPTER_EN;
674 1.1 ragge PIF_WCSR(ADAPTER_CONTROL, val);
675 1.1 ragge
676 1.1 ragge while ((PIF_RCSR(ADAPTER_STATUS) & QUIESCENT) != QUIESCENT)
677 1.1 ragge ;
678 1.1 ragge }
679 1.1 ragge
680 1.1 ragge int
681 1.1 ragge xge_intr(void *pv)
682 1.1 ragge {
683 1.1 ragge struct xge_softc *sc = pv;
684 1.1 ragge struct txd *txd;
685 1.1 ragge struct ifnet *ifp = &sc->sc_if;
686 1.1 ragge bus_dmamap_t dmp;
687 1.1 ragge uint64_t val;
688 1.1 ragge int i, lasttx, plen;
689 1.1 ragge
690 1.1 ragge val = PIF_RCSR(GENERAL_INT_STATUS);
691 1.1 ragge if (val == 0)
692 1.1 ragge return 0; /* no interrupt here */
693 1.1 ragge
694 1.1 ragge XGE_EVCNT_INCR(&sc->sc_intr);
695 1.1 ragge
696 1.1 ragge PIF_WCSR(GENERAL_INT_STATUS, val);
697 1.1 ragge
698 1.1 ragge if ((val = PIF_RCSR(MAC_RMAC_ERR_REG)) & RMAC_LINK_STATE_CHANGE_INT) {
699 1.1 ragge /* Wait for quiescence */
700 1.1 ragge printf("%s: link down\n", XNAME);
701 1.1 ragge while ((PIF_RCSR(ADAPTER_STATUS) & QUIESCENT) != QUIESCENT)
702 1.1 ragge ;
703 1.1 ragge PIF_WCSR(MAC_RMAC_ERR_REG, RMAC_LINK_STATE_CHANGE_INT);
704 1.1 ragge
705 1.1 ragge val = PIF_RCSR(ADAPTER_STATUS);
706 1.1 ragge if ((val & (RMAC_REMOTE_FAULT|RMAC_LOCAL_FAULT)) == 0)
707 1.1 ragge xge_enable(sc); /* Only if link restored */
708 1.1 ragge }
709 1.1 ragge
710 1.1 ragge if ((val = PIF_RCSR(TX_TRAFFIC_INT))) {
711 1.1 ragge XGE_EVCNT_INCR(&sc->sc_txintr);
712 1.1 ragge PIF_WCSR(TX_TRAFFIC_INT, val); /* clear interrupt bits */
713 1.1 ragge }
714 1.1 ragge /*
715 1.1 ragge * Collect sent packets.
716 1.1 ragge */
717 1.1 ragge lasttx = sc->sc_lasttx;
718 1.1 ragge while ((i = NEXTTX(sc->sc_lasttx)) != sc->sc_nexttx) {
719 1.1 ragge txd = sc->sc_txd[i];
720 1.1 ragge dmp = sc->sc_txm[i];
721 1.1 ragge
722 1.1 ragge bus_dmamap_sync(sc->sc_dmat, dmp, 0,
723 1.1 ragge dmp->dm_mapsize,
724 1.1 ragge BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
725 1.1 ragge
726 1.1 ragge if (txd->txd_control1 & TXD_CTL1_OWN) {
727 1.1 ragge bus_dmamap_sync(sc->sc_dmat, dmp, 0,
728 1.1 ragge dmp->dm_mapsize, BUS_DMASYNC_PREREAD);
729 1.1 ragge break;
730 1.1 ragge }
731 1.1 ragge bus_dmamap_unload(sc->sc_dmat, dmp);
732 1.1 ragge m_freem(sc->sc_txb[i]);
733 1.1 ragge ifp->if_opackets++;
734 1.1 ragge sc->sc_lasttx = i;
735 1.1 ragge }
736 1.1 ragge if (i == sc->sc_nexttx) {
737 1.1 ragge XGE_EVCNT_INCR(&sc->sc_txqe);
738 1.1 ragge }
739 1.1 ragge
740 1.1 ragge if (sc->sc_lasttx != lasttx)
741 1.1 ragge ifp->if_flags &= ~IFF_OACTIVE;
742 1.1 ragge
743 1.1 ragge xge_start(ifp); /* Try to get more packets on the wire */
744 1.1 ragge
745 1.1 ragge if ((val = PIF_RCSR(RX_TRAFFIC_INT))) {
746 1.1 ragge XGE_EVCNT_INCR(&sc->sc_rxintr);
747 1.1 ragge PIF_WCSR(RX_TRAFFIC_INT, val); /* clear interrupt bits */
748 1.1 ragge }
749 1.1 ragge
750 1.1 ragge for (;;) {
751 1.1 ragge struct rxdesc *rxd;
752 1.1 ragge struct mbuf *m;
753 1.1 ragge
754 1.1 ragge XGE_RXSYNC(sc->sc_nextrx,
755 1.1 ragge BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
756 1.1 ragge
757 1.1 ragge rxd = XGE_RXD(sc->sc_nextrx);
758 1.1 ragge if (rxd->rxd_control1 & RXD_CTL1_OWN) {
759 1.1 ragge XGE_RXSYNC(sc->sc_nextrx, BUS_DMASYNC_PREREAD);
760 1.1 ragge break;
761 1.1 ragge }
762 1.1 ragge
763 1.1 ragge /* got a packet */
764 1.1 ragge m = sc->sc_rxb[sc->sc_nextrx];
765 1.1 ragge #if RX_MODE == RX_MODE_1
766 1.1 ragge plen = m->m_len = RXD_CTL2_BUF0SIZ(rxd->rxd_control2);
767 1.1 ragge #elif RX_MODE == RX_MODE_3
768 1.1 ragge #error Fix rxmodes in xge_intr
769 1.1 ragge #elif RX_MODE == RX_MODE_5
770 1.1 ragge plen = m->m_len = RXD_CTL2_BUF0SIZ(rxd->rxd_control2);
771 1.1 ragge plen += m->m_next->m_len = RXD_CTL2_BUF1SIZ(rxd->rxd_control2);
772 1.1 ragge plen += m->m_next->m_next->m_len =
773 1.1 ragge RXD_CTL2_BUF2SIZ(rxd->rxd_control2);
774 1.1 ragge plen += m->m_next->m_next->m_next->m_len =
775 1.1 ragge RXD_CTL3_BUF3SIZ(rxd->rxd_control3);
776 1.1 ragge plen += m->m_next->m_next->m_next->m_next->m_len =
777 1.1 ragge RXD_CTL3_BUF4SIZ(rxd->rxd_control3);
778 1.1 ragge #endif
779 1.1 ragge m->m_pkthdr.rcvif = ifp;
780 1.1 ragge m->m_pkthdr.len = plen;
781 1.1 ragge
782 1.1 ragge val = rxd->rxd_control1;
783 1.1 ragge
784 1.1 ragge if (xge_add_rxbuf(sc, sc->sc_nextrx)) {
785 1.1 ragge /* Failed, recycle this mbuf */
786 1.1 ragge #if RX_MODE == RX_MODE_1
787 1.1 ragge rxd->rxd_control2 = RXD_MKCTL2(MCLBYTES, 0, 0);
788 1.1 ragge rxd->rxd_control1 = RXD_CTL1_OWN;
789 1.1 ragge #elif RX_MODE == RX_MODE_3
790 1.1 ragge #elif RX_MODE == RX_MODE_5
791 1.1 ragge #endif
792 1.1 ragge XGE_RXSYNC(sc->sc_nextrx,
793 1.1 ragge BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
794 1.1 ragge ifp->if_ierrors++;
795 1.1 ragge break;
796 1.1 ragge }
797 1.1 ragge
798 1.1 ragge ifp->if_ipackets++;
799 1.1 ragge
800 1.1 ragge if (RXD_CTL1_PROTOS(val) & (RXD_CTL1_P_IPv4|RXD_CTL1_P_IPv6)) {
801 1.1 ragge m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
802 1.1 ragge if (RXD_CTL1_L3CSUM(val) != 0xffff)
803 1.1 ragge m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
804 1.1 ragge }
805 1.1 ragge if (RXD_CTL1_PROTOS(val) & RXD_CTL1_P_TCP) {
806 1.1 ragge m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_TCPv6;
807 1.1 ragge if (RXD_CTL1_L4CSUM(val) != 0xffff)
808 1.1 ragge m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
809 1.1 ragge }
810 1.1 ragge if (RXD_CTL1_PROTOS(val) & RXD_CTL1_P_UDP) {
811 1.1 ragge m->m_pkthdr.csum_flags |= M_CSUM_UDPv4|M_CSUM_UDPv6;
812 1.1 ragge if (RXD_CTL1_L4CSUM(val) != 0xffff)
813 1.1 ragge m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
814 1.1 ragge }
815 1.1 ragge
816 1.1 ragge #if NBPFILTER > 0
817 1.1 ragge if (ifp->if_bpf)
818 1.1 ragge bpf_mtap(ifp->if_bpf, m);
819 1.1 ragge #endif /* NBPFILTER > 0 */
820 1.1 ragge
821 1.1 ragge (*ifp->if_input)(ifp, m);
822 1.1 ragge
823 1.1 ragge if (++sc->sc_nextrx == NRXREAL)
824 1.1 ragge sc->sc_nextrx = 0;
825 1.1 ragge
826 1.1 ragge }
827 1.1 ragge
828 1.1 ragge return 0;
829 1.1 ragge }
830 1.1 ragge
831 1.1 ragge int
832 1.5 christos xge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
833 1.1 ragge {
834 1.1 ragge struct xge_softc *sc = ifp->if_softc;
835 1.1 ragge struct ifreq *ifr = (struct ifreq *) data;
836 1.1 ragge int s, error = 0;
837 1.1 ragge
838 1.1 ragge s = splnet();
839 1.1 ragge
840 1.1 ragge switch (cmd) {
841 1.1 ragge case SIOCSIFMTU:
842 1.8 dyoung if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > XGE_MAX_MTU)
843 1.1 ragge error = EINVAL;
844 1.8 dyoung else if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET){
845 1.1 ragge PIF_WCSR(RMAC_MAX_PYLD_LEN,
846 1.1 ragge RMAC_PYLD_LEN(ifr->ifr_mtu));
847 1.8 dyoung error = 0;
848 1.1 ragge }
849 1.1 ragge break;
850 1.1 ragge
851 1.1 ragge case SIOCGIFMEDIA:
852 1.1 ragge case SIOCSIFMEDIA:
853 1.1 ragge error = ifmedia_ioctl(ifp, ifr, &sc->xena_media, cmd);
854 1.1 ragge break;
855 1.1 ragge
856 1.1 ragge default:
857 1.8 dyoung if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
858 1.8 dyoung break;
859 1.8 dyoung
860 1.8 dyoung error = 0;
861 1.8 dyoung
862 1.8 dyoung if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
863 1.8 dyoung ;
864 1.8 dyoung else if (ifp->if_flags & IFF_RUNNING) {
865 1.1 ragge /* Change multicast list */
866 1.1 ragge xge_mcast_filter(sc);
867 1.1 ragge }
868 1.1 ragge break;
869 1.1 ragge }
870 1.1 ragge
871 1.1 ragge splx(s);
872 1.1 ragge return(error);
873 1.1 ragge }
874 1.1 ragge
875 1.1 ragge void
876 1.1 ragge xge_mcast_filter(struct xge_softc *sc)
877 1.1 ragge {
878 1.1 ragge struct ifnet *ifp = &sc->sc_ethercom.ec_if;
879 1.1 ragge struct ethercom *ec = &sc->sc_ethercom;
880 1.1 ragge struct ether_multi *enm;
881 1.1 ragge struct ether_multistep step;
882 1.1 ragge int i, numaddr = 1; /* first slot used for card unicast address */
883 1.1 ragge uint64_t val;
884 1.1 ragge
885 1.1 ragge ETHER_FIRST_MULTI(step, ec, enm);
886 1.1 ragge while (enm != NULL) {
887 1.1 ragge if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
888 1.1 ragge /* Skip ranges */
889 1.1 ragge goto allmulti;
890 1.1 ragge }
891 1.1 ragge if (numaddr == MAX_MCAST_ADDR)
892 1.1 ragge goto allmulti;
893 1.1 ragge for (val = 0, i = 0; i < ETHER_ADDR_LEN; i++) {
894 1.1 ragge val <<= 8;
895 1.1 ragge val |= enm->enm_addrlo[i];
896 1.1 ragge }
897 1.1 ragge PIF_WCSR(RMAC_ADDR_DATA0_MEM, val << 16);
898 1.1 ragge PIF_WCSR(RMAC_ADDR_DATA1_MEM, 0xFFFFFFFFFFFFFFFFULL);
899 1.1 ragge PIF_WCSR(RMAC_ADDR_CMD_MEM, RMAC_ADDR_CMD_MEM_WE|
900 1.1 ragge RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(numaddr));
901 1.1 ragge while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR)
902 1.1 ragge ;
903 1.1 ragge numaddr++;
904 1.1 ragge ETHER_NEXT_MULTI(step, enm);
905 1.1 ragge }
906 1.1 ragge /* set the remaining entries to the broadcast address */
907 1.1 ragge for (i = numaddr; i < MAX_MCAST_ADDR; i++) {
908 1.1 ragge PIF_WCSR(RMAC_ADDR_DATA0_MEM, 0xffffffffffff0000ULL);
909 1.1 ragge PIF_WCSR(RMAC_ADDR_DATA1_MEM, 0xFFFFFFFFFFFFFFFFULL);
910 1.1 ragge PIF_WCSR(RMAC_ADDR_CMD_MEM, RMAC_ADDR_CMD_MEM_WE|
911 1.1 ragge RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(i));
912 1.1 ragge while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR)
913 1.1 ragge ;
914 1.1 ragge }
915 1.1 ragge ifp->if_flags &= ~IFF_ALLMULTI;
916 1.1 ragge return;
917 1.1 ragge
918 1.1 ragge allmulti:
919 1.1 ragge /* Just receive everything with the multicast bit set */
920 1.1 ragge ifp->if_flags |= IFF_ALLMULTI;
921 1.1 ragge PIF_WCSR(RMAC_ADDR_DATA0_MEM, 0x8000000000000000ULL);
922 1.1 ragge PIF_WCSR(RMAC_ADDR_DATA1_MEM, 0xF000000000000000ULL);
923 1.1 ragge PIF_WCSR(RMAC_ADDR_CMD_MEM, RMAC_ADDR_CMD_MEM_WE|
924 1.1 ragge RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(1));
925 1.1 ragge while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR)
926 1.1 ragge ;
927 1.1 ragge }
928 1.1 ragge
929 1.1 ragge void
930 1.1 ragge xge_start(struct ifnet *ifp)
931 1.1 ragge {
932 1.1 ragge struct xge_softc *sc = ifp->if_softc;
933 1.1 ragge struct txd *txd = NULL; /* XXX - gcc */
934 1.1 ragge bus_dmamap_t dmp;
935 1.1 ragge struct mbuf *m;
936 1.1 ragge uint64_t par, lcr;
937 1.1 ragge int nexttx = 0, ntxd, error, i;
938 1.1 ragge
939 1.1 ragge if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
940 1.1 ragge return;
941 1.1 ragge
942 1.1 ragge par = lcr = 0;
943 1.1 ragge for (;;) {
944 1.1 ragge IFQ_POLL(&ifp->if_snd, m);
945 1.1 ragge if (m == NULL)
946 1.1 ragge break; /* out of packets */
947 1.1 ragge
948 1.1 ragge if (sc->sc_nexttx == sc->sc_lasttx)
949 1.1 ragge break; /* No more space */
950 1.1 ragge
951 1.1 ragge nexttx = sc->sc_nexttx;
952 1.1 ragge dmp = sc->sc_txm[nexttx];
953 1.1 ragge
954 1.1 ragge if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, dmp, m,
955 1.1 ragge BUS_DMA_WRITE|BUS_DMA_NOWAIT)) != 0) {
956 1.1 ragge printf("%s: bus_dmamap_load_mbuf error %d\n",
957 1.1 ragge XNAME, error);
958 1.1 ragge break;
959 1.1 ragge }
960 1.1 ragge IFQ_DEQUEUE(&ifp->if_snd, m);
961 1.1 ragge
962 1.1 ragge bus_dmamap_sync(sc->sc_dmat, dmp, 0, dmp->dm_mapsize,
963 1.1 ragge BUS_DMASYNC_PREWRITE);
964 1.1 ragge
965 1.1 ragge txd = sc->sc_txd[nexttx];
966 1.1 ragge sc->sc_txb[nexttx] = m;
967 1.1 ragge for (i = 0; i < dmp->dm_nsegs; i++) {
968 1.1 ragge if (dmp->dm_segs[i].ds_len == 0)
969 1.1 ragge continue;
970 1.1 ragge txd->txd_control1 = dmp->dm_segs[i].ds_len;
971 1.1 ragge txd->txd_control2 = 0;
972 1.1 ragge txd->txd_bufaddr = dmp->dm_segs[i].ds_addr;
973 1.1 ragge txd++;
974 1.1 ragge }
975 1.1 ragge ntxd = txd - sc->sc_txd[nexttx] - 1;
976 1.1 ragge txd = sc->sc_txd[nexttx];
977 1.1 ragge txd->txd_control1 |= TXD_CTL1_OWN|TXD_CTL1_GCF;
978 1.1 ragge txd->txd_control2 = TXD_CTL2_UTIL;
979 1.1 ragge if (m->m_pkthdr.csum_flags & M_CSUM_TSOv4) {
980 1.1 ragge txd->txd_control1 |= TXD_CTL1_MSS(m->m_pkthdr.segsz);
981 1.1 ragge txd->txd_control1 |= TXD_CTL1_LSO;
982 1.1 ragge }
983 1.1 ragge
984 1.1 ragge if (m->m_pkthdr.csum_flags & M_CSUM_IPv4)
985 1.1 ragge txd->txd_control2 |= TXD_CTL2_CIPv4;
986 1.1 ragge if (m->m_pkthdr.csum_flags & M_CSUM_TCPv4)
987 1.1 ragge txd->txd_control2 |= TXD_CTL2_CTCP;
988 1.1 ragge if (m->m_pkthdr.csum_flags & M_CSUM_UDPv4)
989 1.1 ragge txd->txd_control2 |= TXD_CTL2_CUDP;
990 1.1 ragge txd[ntxd].txd_control1 |= TXD_CTL1_GCL;
991 1.1 ragge
992 1.1 ragge bus_dmamap_sync(sc->sc_dmat, dmp, 0, dmp->dm_mapsize,
993 1.1 ragge BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
994 1.1 ragge
995 1.1 ragge par = sc->sc_txdp[nexttx];
996 1.1 ragge lcr = TXDL_NUMTXD(ntxd) | TXDL_LGC_FIRST | TXDL_LGC_LAST;
997 1.1 ragge if (m->m_pkthdr.csum_flags & M_CSUM_TSOv4)
998 1.1 ragge lcr |= TXDL_SFF;
999 1.1 ragge TXP_WCSR(TXDL_PAR, par);
1000 1.1 ragge TXP_WCSR(TXDL_LCR, lcr);
1001 1.1 ragge
1002 1.1 ragge #if NBPFILTER > 0
1003 1.1 ragge if (ifp->if_bpf)
1004 1.1 ragge bpf_mtap(ifp->if_bpf, m);
1005 1.1 ragge #endif /* NBPFILTER > 0 */
1006 1.1 ragge
1007 1.1 ragge sc->sc_nexttx = NEXTTX(nexttx);
1008 1.1 ragge }
1009 1.1 ragge }
1010 1.1 ragge
1011 1.1 ragge /*
1012 1.1 ragge * Allocate DMA memory for transmit descriptor fragments.
1013 1.1 ragge * Only one map is used for all descriptors.
1014 1.1 ragge */
1015 1.1 ragge int
1016 1.1 ragge xge_alloc_txmem(struct xge_softc *sc)
1017 1.1 ragge {
1018 1.1 ragge struct txd *txp;
1019 1.1 ragge bus_dma_segment_t seg;
1020 1.1 ragge bus_addr_t txdp;
1021 1.5 christos void *kva;
1022 1.1 ragge int i, rseg, state;
1023 1.1 ragge
1024 1.1 ragge #define TXMAPSZ (NTXDESCS*NTXFRAGS*sizeof(struct txd))
1025 1.1 ragge state = 0;
1026 1.1 ragge if (bus_dmamem_alloc(sc->sc_dmat, TXMAPSZ, PAGE_SIZE, 0,
1027 1.1 ragge &seg, 1, &rseg, BUS_DMA_NOWAIT))
1028 1.1 ragge goto err;
1029 1.1 ragge state++;
1030 1.1 ragge if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, TXMAPSZ, &kva,
1031 1.1 ragge BUS_DMA_NOWAIT))
1032 1.1 ragge goto err;
1033 1.1 ragge
1034 1.1 ragge state++;
1035 1.1 ragge if (bus_dmamap_create(sc->sc_dmat, TXMAPSZ, 1, TXMAPSZ, 0,
1036 1.1 ragge BUS_DMA_NOWAIT, &sc->sc_txmap))
1037 1.1 ragge goto err;
1038 1.1 ragge state++;
1039 1.1 ragge if (bus_dmamap_load(sc->sc_dmat, sc->sc_txmap,
1040 1.1 ragge kva, TXMAPSZ, NULL, BUS_DMA_NOWAIT))
1041 1.1 ragge goto err;
1042 1.1 ragge
1043 1.1 ragge /* setup transmit array pointers */
1044 1.1 ragge txp = (struct txd *)kva;
1045 1.1 ragge txdp = seg.ds_addr;
1046 1.1 ragge for (txp = (struct txd *)kva, i = 0; i < NTXDESCS; i++) {
1047 1.1 ragge sc->sc_txd[i] = txp;
1048 1.1 ragge sc->sc_txdp[i] = txdp;
1049 1.1 ragge txp += NTXFRAGS;
1050 1.1 ragge txdp += (NTXFRAGS * sizeof(struct txd));
1051 1.1 ragge }
1052 1.1 ragge
1053 1.1 ragge return 0;
1054 1.1 ragge
1055 1.1 ragge err:
1056 1.1 ragge if (state > 2)
1057 1.1 ragge bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap);
1058 1.1 ragge if (state > 1)
1059 1.1 ragge bus_dmamem_unmap(sc->sc_dmat, kva, TXMAPSZ);
1060 1.1 ragge if (state > 0)
1061 1.1 ragge bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1062 1.1 ragge return ENOBUFS;
1063 1.1 ragge }
1064 1.1 ragge
1065 1.1 ragge /*
1066 1.1 ragge * Allocate DMA memory for receive descriptor,
1067 1.1 ragge * only one map is used for all descriptors.
1068 1.1 ragge * link receive descriptor pages together.
1069 1.1 ragge */
1070 1.1 ragge int
1071 1.1 ragge xge_alloc_rxmem(struct xge_softc *sc)
1072 1.1 ragge {
1073 1.1 ragge struct rxd_4k *rxpp;
1074 1.1 ragge bus_dma_segment_t seg;
1075 1.5 christos void *kva;
1076 1.1 ragge int i, rseg, state;
1077 1.1 ragge
1078 1.1 ragge /* sanity check */
1079 1.1 ragge if (sizeof(struct rxd_4k) != XGE_PAGE) {
1080 1.1 ragge printf("bad compiler struct alignment, %d != %d\n",
1081 1.1 ragge (int)sizeof(struct rxd_4k), XGE_PAGE);
1082 1.1 ragge return EINVAL;
1083 1.1 ragge }
1084 1.1 ragge
1085 1.1 ragge state = 0;
1086 1.1 ragge if (bus_dmamem_alloc(sc->sc_dmat, RXMAPSZ, PAGE_SIZE, 0,
1087 1.1 ragge &seg, 1, &rseg, BUS_DMA_NOWAIT))
1088 1.1 ragge goto err;
1089 1.1 ragge state++;
1090 1.1 ragge if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, RXMAPSZ, &kva,
1091 1.1 ragge BUS_DMA_NOWAIT))
1092 1.1 ragge goto err;
1093 1.1 ragge
1094 1.1 ragge state++;
1095 1.1 ragge if (bus_dmamap_create(sc->sc_dmat, RXMAPSZ, 1, RXMAPSZ, 0,
1096 1.1 ragge BUS_DMA_NOWAIT, &sc->sc_rxmap))
1097 1.1 ragge goto err;
1098 1.1 ragge state++;
1099 1.1 ragge if (bus_dmamap_load(sc->sc_dmat, sc->sc_rxmap,
1100 1.1 ragge kva, RXMAPSZ, NULL, BUS_DMA_NOWAIT))
1101 1.1 ragge goto err;
1102 1.1 ragge
1103 1.1 ragge /* setup receive page link pointers */
1104 1.1 ragge for (rxpp = (struct rxd_4k *)kva, i = 0; i < NRXPAGES; i++, rxpp++) {
1105 1.1 ragge sc->sc_rxd_4k[i] = rxpp;
1106 1.1 ragge rxpp->r4_next = (uint64_t)sc->sc_rxmap->dm_segs[0].ds_addr +
1107 1.1 ragge (i*sizeof(struct rxd_4k)) + sizeof(struct rxd_4k);
1108 1.1 ragge }
1109 1.1 ragge sc->sc_rxd_4k[NRXPAGES-1]->r4_next =
1110 1.1 ragge (uint64_t)sc->sc_rxmap->dm_segs[0].ds_addr;
1111 1.1 ragge
1112 1.1 ragge return 0;
1113 1.1 ragge
1114 1.1 ragge err:
1115 1.1 ragge if (state > 2)
1116 1.1 ragge bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap);
1117 1.1 ragge if (state > 1)
1118 1.1 ragge bus_dmamem_unmap(sc->sc_dmat, kva, TXMAPSZ);
1119 1.1 ragge if (state > 0)
1120 1.1 ragge bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1121 1.1 ragge return ENOBUFS;
1122 1.1 ragge }
1123 1.1 ragge
1124 1.1 ragge
1125 1.1 ragge /*
1126 1.1 ragge * Add a new mbuf chain to descriptor id.
1127 1.1 ragge */
1128 1.1 ragge int
1129 1.1 ragge xge_add_rxbuf(struct xge_softc *sc, int id)
1130 1.1 ragge {
1131 1.1 ragge struct rxdesc *rxd;
1132 1.1 ragge struct mbuf *m[5];
1133 1.1 ragge int page, desc, error;
1134 1.1 ragge #if RX_MODE == RX_MODE_5
1135 1.1 ragge int i;
1136 1.1 ragge #endif
1137 1.1 ragge
1138 1.1 ragge page = id/NDESC_BUFMODE;
1139 1.1 ragge desc = id%NDESC_BUFMODE;
1140 1.1 ragge
1141 1.1 ragge rxd = &sc->sc_rxd_4k[page]->r4_rxd[desc];
1142 1.1 ragge
1143 1.1 ragge /*
1144 1.1 ragge * Allocate mbufs.
1145 1.1 ragge * Currently five mbufs and two clusters are used,
1146 1.1 ragge * the hardware will put (ethernet, ip, tcp/udp) headers in
1147 1.1 ragge * their own buffer and the clusters are only used for data.
1148 1.1 ragge */
1149 1.1 ragge #if RX_MODE == RX_MODE_1
1150 1.1 ragge MGETHDR(m[0], M_DONTWAIT, MT_DATA);
1151 1.1 ragge if (m[0] == NULL)
1152 1.1 ragge return ENOBUFS;
1153 1.1 ragge MCLGET(m[0], M_DONTWAIT);
1154 1.1 ragge if ((m[0]->m_flags & M_EXT) == 0) {
1155 1.1 ragge m_freem(m[0]);
1156 1.1 ragge return ENOBUFS;
1157 1.1 ragge }
1158 1.1 ragge m[0]->m_len = m[0]->m_pkthdr.len = m[0]->m_ext.ext_size;
1159 1.1 ragge #elif RX_MODE == RX_MODE_3
1160 1.1 ragge #error missing rxmode 3.
1161 1.1 ragge #elif RX_MODE == RX_MODE_5
1162 1.1 ragge MGETHDR(m[0], M_DONTWAIT, MT_DATA);
1163 1.1 ragge for (i = 1; i < 5; i++) {
1164 1.1 ragge MGET(m[i], M_DONTWAIT, MT_DATA);
1165 1.1 ragge }
1166 1.1 ragge if (m[3])
1167 1.1 ragge MCLGET(m[3], M_DONTWAIT);
1168 1.1 ragge if (m[4])
1169 1.1 ragge MCLGET(m[4], M_DONTWAIT);
1170 1.1 ragge if (!m[0] || !m[1] || !m[2] || !m[3] || !m[4] ||
1171 1.1 ragge ((m[3]->m_flags & M_EXT) == 0) || ((m[4]->m_flags & M_EXT) == 0)) {
1172 1.1 ragge /* Out of something */
1173 1.1 ragge for (i = 0; i < 5; i++)
1174 1.1 ragge if (m[i] != NULL)
1175 1.1 ragge m_free(m[i]);
1176 1.1 ragge return ENOBUFS;
1177 1.1 ragge }
1178 1.1 ragge /* Link'em together */
1179 1.1 ragge m[0]->m_next = m[1];
1180 1.1 ragge m[1]->m_next = m[2];
1181 1.1 ragge m[2]->m_next = m[3];
1182 1.1 ragge m[3]->m_next = m[4];
1183 1.1 ragge #else
1184 1.1 ragge #error bad mode RX_MODE
1185 1.1 ragge #endif
1186 1.1 ragge
1187 1.1 ragge if (sc->sc_rxb[id])
1188 1.1 ragge bus_dmamap_unload(sc->sc_dmat, sc->sc_rxm[id]);
1189 1.1 ragge sc->sc_rxb[id] = m[0];
1190 1.1 ragge
1191 1.1 ragge error = bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_rxm[id], m[0],
1192 1.1 ragge BUS_DMA_READ|BUS_DMA_NOWAIT);
1193 1.1 ragge if (error)
1194 1.1 ragge return error;
1195 1.1 ragge bus_dmamap_sync(sc->sc_dmat, sc->sc_rxm[id], 0,
1196 1.1 ragge sc->sc_rxm[id]->dm_mapsize, BUS_DMASYNC_PREREAD);
1197 1.1 ragge
1198 1.1 ragge #if RX_MODE == RX_MODE_1
1199 1.1 ragge rxd->rxd_control2 = RXD_MKCTL2(m[0]->m_len, 0, 0);
1200 1.1 ragge rxd->rxd_buf0 = (uint64_t)sc->sc_rxm[id]->dm_segs[0].ds_addr;
1201 1.1 ragge rxd->rxd_control1 = RXD_CTL1_OWN;
1202 1.1 ragge #elif RX_MODE == RX_MODE_3
1203 1.1 ragge #elif RX_MODE == RX_MODE_5
1204 1.1 ragge rxd->rxd_control3 = RXD_MKCTL3(0, m[3]->m_len, m[4]->m_len);
1205 1.1 ragge rxd->rxd_control2 = RXD_MKCTL2(m[0]->m_len, m[1]->m_len, m[2]->m_len);
1206 1.1 ragge rxd->rxd_buf0 = (uint64_t)sc->sc_rxm[id]->dm_segs[0].ds_addr;
1207 1.1 ragge rxd->rxd_buf1 = (uint64_t)sc->sc_rxm[id]->dm_segs[1].ds_addr;
1208 1.1 ragge rxd->rxd_buf2 = (uint64_t)sc->sc_rxm[id]->dm_segs[2].ds_addr;
1209 1.1 ragge rxd->rxd_buf3 = (uint64_t)sc->sc_rxm[id]->dm_segs[3].ds_addr;
1210 1.1 ragge rxd->rxd_buf4 = (uint64_t)sc->sc_rxm[id]->dm_segs[4].ds_addr;
1211 1.1 ragge rxd->rxd_control1 = RXD_CTL1_OWN;
1212 1.1 ragge #endif
1213 1.1 ragge
1214 1.1 ragge XGE_RXSYNC(id, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1215 1.1 ragge return 0;
1216 1.1 ragge }
1217 1.1 ragge
1218 1.1 ragge /*
1219 1.1 ragge * These magics comes from the FreeBSD driver.
1220 1.1 ragge */
1221 1.1 ragge int
1222 1.1 ragge xge_setup_xgxs(struct xge_softc *sc)
1223 1.1 ragge {
1224 1.1 ragge /* The magic numbers are described in the users guide */
1225 1.1 ragge
1226 1.1 ragge /* Writing to MDIO 0x8000 (Global Config 0) */
1227 1.1 ragge PIF_WCSR(DTX_CONTROL, 0x8000051500000000ULL); DELAY(50);
1228 1.1 ragge PIF_WCSR(DTX_CONTROL, 0x80000515000000E0ULL); DELAY(50);
1229 1.1 ragge PIF_WCSR(DTX_CONTROL, 0x80000515D93500E4ULL); DELAY(50);
1230 1.1 ragge
1231 1.1 ragge /* Writing to MDIO 0x8000 (Global Config 1) */
1232 1.1 ragge PIF_WCSR(DTX_CONTROL, 0x8001051500000000ULL); DELAY(50);
1233 1.1 ragge PIF_WCSR(DTX_CONTROL, 0x80010515000000e0ULL); DELAY(50);
1234 1.1 ragge PIF_WCSR(DTX_CONTROL, 0x80010515001e00e4ULL); DELAY(50);
1235 1.1 ragge
1236 1.1 ragge /* Reset the Gigablaze */
1237 1.1 ragge PIF_WCSR(DTX_CONTROL, 0x8002051500000000ULL); DELAY(50);
1238 1.1 ragge PIF_WCSR(DTX_CONTROL, 0x80020515000000E0ULL); DELAY(50);
1239 1.1 ragge PIF_WCSR(DTX_CONTROL, 0x80020515F21000E4ULL); DELAY(50);
1240 1.1 ragge
1241 1.1 ragge /* read the pole settings */
1242 1.1 ragge PIF_WCSR(DTX_CONTROL, 0x8000051500000000ULL); DELAY(50);
1243 1.1 ragge PIF_WCSR(DTX_CONTROL, 0x80000515000000e0ULL); DELAY(50);
1244 1.1 ragge PIF_WCSR(DTX_CONTROL, 0x80000515000000ecULL); DELAY(50);
1245 1.1 ragge
1246 1.1 ragge PIF_WCSR(DTX_CONTROL, 0x8001051500000000ULL); DELAY(50);
1247 1.1 ragge PIF_WCSR(DTX_CONTROL, 0x80010515000000e0ULL); DELAY(50);
1248 1.1 ragge PIF_WCSR(DTX_CONTROL, 0x80010515000000ecULL); DELAY(50);
1249 1.1 ragge
1250 1.1 ragge PIF_WCSR(DTX_CONTROL, 0x8002051500000000ULL); DELAY(50);
1251 1.1 ragge PIF_WCSR(DTX_CONTROL, 0x80020515000000e0ULL); DELAY(50);
1252 1.1 ragge PIF_WCSR(DTX_CONTROL, 0x80020515000000ecULL); DELAY(50);
1253 1.1 ragge
1254 1.1 ragge /* Workaround for TX Lane XAUI initialization error.
1255 1.1 ragge Read Xpak PHY register 24 for XAUI lane status */
1256 1.1 ragge PIF_WCSR(DTX_CONTROL, 0x0018040000000000ULL); DELAY(50);
1257 1.1 ragge PIF_WCSR(DTX_CONTROL, 0x00180400000000e0ULL); DELAY(50);
1258 1.1 ragge PIF_WCSR(DTX_CONTROL, 0x00180400000000ecULL); DELAY(50);
1259 1.1 ragge
1260 1.1 ragge /*
1261 1.1 ragge * Reading the MDIO control with value 0x1804001c0F001c
1262 1.1 ragge * means the TxLanes were already in sync
1263 1.1 ragge * Reading the MDIO control with value 0x1804000c0x001c
1264 1.1 ragge * means some TxLanes are not in sync where x is a 4-bit
1265 1.1 ragge * value representing each lanes
1266 1.1 ragge */
1267 1.1 ragge #if 0
1268 1.1 ragge val = PIF_RCSR(MDIO_CONTROL);
1269 1.1 ragge if (val != 0x1804001c0F001cULL) {
1270 1.1 ragge printf("%s: MDIO_CONTROL: %llx != %llx\n",
1271 1.1 ragge XNAME, val, 0x1804001c0F001cULL);
1272 1.1 ragge return 1;
1273 1.1 ragge }
1274 1.1 ragge #endif
1275 1.1 ragge
1276 1.1 ragge /* Set and remove the DTE XS INTLoopBackN */
1277 1.1 ragge PIF_WCSR(DTX_CONTROL, 0x0000051500000000ULL); DELAY(50);
1278 1.1 ragge PIF_WCSR(DTX_CONTROL, 0x00000515604000e0ULL); DELAY(50);
1279 1.1 ragge PIF_WCSR(DTX_CONTROL, 0x00000515604000e4ULL); DELAY(50);
1280 1.1 ragge PIF_WCSR(DTX_CONTROL, 0x00000515204000e4ULL); DELAY(50);
1281 1.1 ragge PIF_WCSR(DTX_CONTROL, 0x00000515204000ecULL); DELAY(50);
1282 1.1 ragge
1283 1.1 ragge #if 0
1284 1.1 ragge /* Reading the DTX control register Should be 0x5152040001c */
1285 1.1 ragge val = PIF_RCSR(DTX_CONTROL);
1286 1.1 ragge if (val != 0x5152040001cULL) {
1287 1.1 ragge printf("%s: DTX_CONTROL: %llx != %llx\n",
1288 1.1 ragge XNAME, val, 0x5152040001cULL);
1289 1.1 ragge return 1;
1290 1.1 ragge }
1291 1.1 ragge #endif
1292 1.1 ragge
1293 1.1 ragge PIF_WCSR(MDIO_CONTROL, 0x0018040000000000ULL); DELAY(50);
1294 1.1 ragge PIF_WCSR(MDIO_CONTROL, 0x00180400000000e0ULL); DELAY(50);
1295 1.1 ragge PIF_WCSR(MDIO_CONTROL, 0x00180400000000ecULL); DELAY(50);
1296 1.1 ragge
1297 1.1 ragge #if 0
1298 1.1 ragge /* Reading the MIOD control should be 0x1804001c0f001c */
1299 1.1 ragge val = PIF_RCSR(MDIO_CONTROL);
1300 1.1 ragge if (val != 0x1804001c0f001cULL) {
1301 1.1 ragge printf("%s: MDIO_CONTROL2: %llx != %llx\n",
1302 1.1 ragge XNAME, val, 0x1804001c0f001cULL);
1303 1.1 ragge return 1;
1304 1.1 ragge }
1305 1.1 ragge #endif
1306 1.1 ragge return 0;
1307 1.1 ragge }
1308