if_xge.c revision 1.28 1 /* $NetBSD: if_xge.c,v 1.28 2018/12/09 11:14:02 jdolecek Exp $ */
2
3 /*
4 * Copyright (c) 2004, SUNET, Swedish University Computer Network.
5 * All rights reserved.
6 *
7 * Written by Anders Magnusson for SUNET, Swedish University Computer Network.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * SUNET, Swedish University Computer Network.
21 * 4. The name of SUNET may not be used to endorse or promote products
22 * derived from this software without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY SUNET ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SUNET
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 /*
38 * Device driver for the S2io Xframe Ten Gigabit Ethernet controller.
39 *
40 * TODO (in no specific order):
41 * HW VLAN support.
42 * IPv6 HW cksum.
43 */
44
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: if_xge.c,v 1.28 2018/12/09 11:14:02 jdolecek Exp $");
47
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/mbuf.h>
52 #include <sys/malloc.h>
53 #include <sys/kernel.h>
54 #include <sys/socket.h>
55 #include <sys/device.h>
56
57 #include <net/if.h>
58 #include <net/if_dl.h>
59 #include <net/if_media.h>
60 #include <net/if_ether.h>
61
62 #include <net/bpf.h>
63
64 #include <sys/bus.h>
65 #include <sys/intr.h>
66 #include <machine/endian.h>
67
68 #include <dev/mii/mii.h>
69 #include <dev/mii/miivar.h>
70
71 #include <dev/pci/pcivar.h>
72 #include <dev/pci/pcireg.h>
73 #include <dev/pci/pcidevs.h>
74
75 #include <sys/proc.h>
76
77 #include <dev/pci/if_xgereg.h>
78
79 /*
80 * Some tunable constants, tune with care!
81 */
82 #define RX_MODE RX_MODE_1 /* Receive mode (buffer usage, see below) */
83 #define NRXDESCS 1016 /* # of receive descriptors (requested) */
84 #define NTXDESCS 8192 /* Number of transmit descriptors */
85 #define NTXFRAGS 100 /* Max fragments per packet */
86 #define XGE_EVENT_COUNTERS /* Instrumentation */
87
88 /*
89 * Receive buffer modes; 1, 3 or 5 buffers.
90 */
91 #define RX_MODE_1 1
92 #define RX_MODE_3 3
93 #define RX_MODE_5 5
94
95 /*
96 * Use clever macros to avoid a bunch of #ifdef's.
97 */
98 #define XCONCAT3(x,y,z) x ## y ## z
99 #define CONCAT3(x,y,z) XCONCAT3(x,y,z)
100 #define NDESC_BUFMODE CONCAT3(NDESC_,RX_MODE,BUFMODE)
101 #define rxd_4k CONCAT3(rxd,RX_MODE,_4k)
102 #define rxdesc ___CONCAT(rxd,RX_MODE)
103
104 #define NEXTTX(x) (((x)+1) % NTXDESCS)
105 #define NRXFRAGS RX_MODE /* hardware imposed frags */
106 #define NRXPAGES ((NRXDESCS/NDESC_BUFMODE)+1)
107 #define NRXREAL (NRXPAGES*NDESC_BUFMODE)
108 #define RXMAPSZ (NRXPAGES*PAGE_SIZE)
109
110 #ifdef XGE_EVENT_COUNTERS
111 #define XGE_EVCNT_INCR(ev) (ev)->ev_count++
112 #else
113 #define XGE_EVCNT_INCR(ev) /* nothing */
114 #endif
115
116 /*
117 * Magics to fix a bug when the mac address can't be read correctly.
118 * Comes from the Linux driver.
119 */
120 static uint64_t fix_mac[] = {
121 0x0060000000000000ULL, 0x0060600000000000ULL,
122 0x0040600000000000ULL, 0x0000600000000000ULL,
123 0x0020600000000000ULL, 0x0060600000000000ULL,
124 0x0020600000000000ULL, 0x0060600000000000ULL,
125 0x0020600000000000ULL, 0x0060600000000000ULL,
126 0x0020600000000000ULL, 0x0060600000000000ULL,
127 0x0020600000000000ULL, 0x0060600000000000ULL,
128 0x0020600000000000ULL, 0x0060600000000000ULL,
129 0x0020600000000000ULL, 0x0060600000000000ULL,
130 0x0020600000000000ULL, 0x0060600000000000ULL,
131 0x0020600000000000ULL, 0x0060600000000000ULL,
132 0x0020600000000000ULL, 0x0060600000000000ULL,
133 0x0020600000000000ULL, 0x0000600000000000ULL,
134 0x0040600000000000ULL, 0x0060600000000000ULL,
135 };
136
137
138 struct xge_softc {
139 device_t sc_dev;
140 struct ethercom sc_ethercom;
141 #define sc_if sc_ethercom.ec_if
142 bus_dma_tag_t sc_dmat;
143 bus_space_tag_t sc_st;
144 bus_space_handle_t sc_sh;
145 bus_space_tag_t sc_txt;
146 bus_space_handle_t sc_txh;
147 void *sc_ih;
148
149 struct ifmedia xena_media;
150 pcireg_t sc_pciregs[16];
151
152 /* Transmit structures */
153 struct txd *sc_txd[NTXDESCS]; /* transmit frags array */
154 bus_addr_t sc_txdp[NTXDESCS]; /* bus address of transmit frags */
155 bus_dmamap_t sc_txm[NTXDESCS]; /* transmit frags map */
156 struct mbuf *sc_txb[NTXDESCS]; /* transmit mbuf pointer */
157 int sc_nexttx, sc_lasttx;
158 bus_dmamap_t sc_txmap; /* transmit descriptor map */
159
160 /* Receive data */
161 bus_dmamap_t sc_rxmap; /* receive descriptor map */
162 struct rxd_4k *sc_rxd_4k[NRXPAGES]; /* receive desc pages */
163 bus_dmamap_t sc_rxm[NRXREAL]; /* receive buffer map */
164 struct mbuf *sc_rxb[NRXREAL]; /* mbufs on receive descriptors */
165 int sc_nextrx; /* next descriptor to check */
166
167 #ifdef XGE_EVENT_COUNTERS
168 struct evcnt sc_intr; /* # of interrupts */
169 struct evcnt sc_txintr; /* # of transmit interrupts */
170 struct evcnt sc_rxintr; /* # of receive interrupts */
171 struct evcnt sc_txqe; /* # of xmit intrs when board queue empty */
172 #endif
173 };
174
175 static int xge_match(device_t parent, cfdata_t cf, void *aux);
176 static void xge_attach(device_t parent, device_t self, void *aux);
177 static int xge_alloc_txmem(struct xge_softc *);
178 static int xge_alloc_rxmem(struct xge_softc *);
179 static void xge_start(struct ifnet *);
180 static void xge_stop(struct ifnet *, int);
181 static int xge_add_rxbuf(struct xge_softc *, int);
182 static void xge_mcast_filter(struct xge_softc *sc);
183 static int xge_setup_xgxs(struct xge_softc *sc);
184 static int xge_ioctl(struct ifnet *ifp, u_long cmd, void *data);
185 static int xge_init(struct ifnet *ifp);
186 static void xge_ifmedia_status(struct ifnet *, struct ifmediareq *);
187 static int xge_xgmii_mediachange(struct ifnet *);
188 static int xge_intr(void *);
189
190 /*
191 * Helpers to address registers.
192 */
193 #define PIF_WCSR(csr, val) pif_wcsr(sc, csr, val)
194 #define PIF_RCSR(csr) pif_rcsr(sc, csr)
195 #define TXP_WCSR(csr, val) txp_wcsr(sc, csr, val)
196 #define PIF_WKEY(csr, val) pif_wkey(sc, csr, val)
197
198 static inline void
199 pif_wcsr(struct xge_softc *sc, bus_size_t csr, uint64_t val)
200 {
201 uint32_t lval, hval;
202
203 lval = val&0xffffffff;
204 hval = val>>32;
205 bus_space_write_4(sc->sc_st, sc->sc_sh, csr, lval);
206 bus_space_write_4(sc->sc_st, sc->sc_sh, csr+4, hval);
207 }
208
209 static inline uint64_t
210 pif_rcsr(struct xge_softc *sc, bus_size_t csr)
211 {
212 uint64_t val, val2;
213 val = bus_space_read_4(sc->sc_st, sc->sc_sh, csr);
214 val2 = bus_space_read_4(sc->sc_st, sc->sc_sh, csr+4);
215 val |= (val2 << 32);
216 return val;
217 }
218
219 static inline void
220 txp_wcsr(struct xge_softc *sc, bus_size_t csr, uint64_t val)
221 {
222 uint32_t lval, hval;
223
224 lval = val&0xffffffff;
225 hval = val>>32;
226 bus_space_write_4(sc->sc_txt, sc->sc_txh, csr, lval);
227 bus_space_write_4(sc->sc_txt, sc->sc_txh, csr+4, hval);
228 }
229
230
231 static inline void
232 pif_wkey(struct xge_softc *sc, bus_size_t csr, uint64_t val)
233 {
234 uint32_t lval, hval;
235
236 lval = val&0xffffffff;
237 hval = val>>32;
238 PIF_WCSR(RMAC_CFG_KEY, RMAC_KEY_VALUE);
239 bus_space_write_4(sc->sc_st, sc->sc_sh, csr, lval);
240 PIF_WCSR(RMAC_CFG_KEY, RMAC_KEY_VALUE);
241 bus_space_write_4(sc->sc_st, sc->sc_sh, csr+4, hval);
242 }
243
244
245 CFATTACH_DECL_NEW(xge, sizeof(struct xge_softc),
246 xge_match, xge_attach, NULL, NULL);
247
248 #define XNAME device_xname(sc->sc_dev)
249
250 #define XGE_RXSYNC(desc, what) \
251 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap, \
252 (desc/NDESC_BUFMODE) * XGE_PAGE + sizeof(struct rxdesc) * \
253 (desc%NDESC_BUFMODE), sizeof(struct rxdesc), what)
254 #define XGE_RXD(desc) &sc->sc_rxd_4k[desc/NDESC_BUFMODE]-> \
255 r4_rxd[desc%NDESC_BUFMODE]
256
257 /*
258 * Non-tunable constants.
259 */
260 #define XGE_MAX_MTU 9600
261 #define XGE_IP_MAXPACKET 65535 /* same as IP_MAXPACKET */
262
263 static int
264 xge_match(device_t parent, cfdata_t cf, void *aux)
265 {
266 struct pci_attach_args *pa = aux;
267
268 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_S2IO &&
269 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_S2IO_XFRAME)
270 return (1);
271
272 return (0);
273 }
274
275 void
276 xge_attach(device_t parent, device_t self, void *aux)
277 {
278 struct pci_attach_args *pa = aux;
279 struct xge_softc *sc;
280 struct ifnet *ifp;
281 pcireg_t memtype;
282 pci_intr_handle_t ih;
283 const char *intrstr = NULL;
284 pci_chipset_tag_t pc = pa->pa_pc;
285 uint8_t enaddr[ETHER_ADDR_LEN];
286 uint64_t val;
287 int i;
288 char intrbuf[PCI_INTRSTR_LEN];
289
290 sc = device_private(self);
291 sc->sc_dev = self;
292 sc->sc_dmat = pa->pa_dmat;
293
294 /* Get BAR0 address */
295 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, XGE_PIF_BAR);
296 if (pci_mapreg_map(pa, XGE_PIF_BAR, memtype, 0,
297 &sc->sc_st, &sc->sc_sh, 0, 0)) {
298 aprint_error("%s: unable to map PIF BAR registers\n", XNAME);
299 return;
300 }
301
302 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, XGE_TXP_BAR);
303 if (pci_mapreg_map(pa, XGE_TXP_BAR, memtype, 0,
304 &sc->sc_txt, &sc->sc_txh, 0, 0)) {
305 aprint_error("%s: unable to map TXP BAR registers\n", XNAME);
306 return;
307 }
308
309 /* Save PCI config space */
310 for (i = 0; i < 64; i += 4)
311 sc->sc_pciregs[i/4] = pci_conf_read(pa->pa_pc, pa->pa_tag, i);
312
313 #if BYTE_ORDER == LITTLE_ENDIAN
314 val = (uint64_t)0xFFFFFFFFFFFFFFFFULL;
315 val &= ~(TxF_R_SE|RxF_W_SE);
316 PIF_WCSR(SWAPPER_CTRL, val);
317 PIF_WCSR(SWAPPER_CTRL, val);
318 #elif BYTE_ORDER == BIG_ENDIAN
319 /* do nothing */
320 #else
321 #error bad endianness!
322 #endif
323
324 if ((val = PIF_RCSR(PIF_RD_SWAPPER_Fb)) != SWAPPER_MAGIC) {
325 aprint_error("%s: failed configuring endian, %llx != %llx!\n",
326 XNAME, (unsigned long long)val, SWAPPER_MAGIC);
327 return;
328 }
329
330 /*
331 * The MAC addr may be all FF's, which is not good.
332 * Resolve it by writing some magics to GPIO_CONTROL and
333 * force a chip reset to read in the serial eeprom again.
334 */
335 for (i = 0; i < sizeof(fix_mac)/sizeof(fix_mac[0]); i++) {
336 PIF_WCSR(GPIO_CONTROL, fix_mac[i]);
337 PIF_RCSR(GPIO_CONTROL);
338 }
339
340 /*
341 * Reset the chip and restore the PCI registers.
342 */
343 PIF_WCSR(SW_RESET, 0xa5a5a50000000000ULL);
344 DELAY(500000);
345 for (i = 0; i < 64; i += 4)
346 pci_conf_write(pa->pa_pc, pa->pa_tag, i, sc->sc_pciregs[i/4]);
347
348 /*
349 * Restore the byte order registers.
350 */
351 #if BYTE_ORDER == LITTLE_ENDIAN
352 val = (uint64_t)0xFFFFFFFFFFFFFFFFULL;
353 val &= ~(TxF_R_SE|RxF_W_SE);
354 PIF_WCSR(SWAPPER_CTRL, val);
355 PIF_WCSR(SWAPPER_CTRL, val);
356 #elif BYTE_ORDER == BIG_ENDIAN
357 /* do nothing */
358 #else
359 #error bad endianness!
360 #endif
361
362 if ((val = PIF_RCSR(PIF_RD_SWAPPER_Fb)) != SWAPPER_MAGIC) {
363 aprint_error("%s: failed configuring endian2, %llx != %llx!\n",
364 XNAME, (unsigned long long)val, SWAPPER_MAGIC);
365 return;
366 }
367
368 /*
369 * XGXS initialization.
370 */
371 /* 29, reset */
372 PIF_WCSR(SW_RESET, 0);
373 DELAY(500000);
374
375 /* 30, configure XGXS transceiver */
376 xge_setup_xgxs(sc);
377
378 /* 33, program MAC address (not needed here) */
379 /* Get ethernet address */
380 PIF_WCSR(RMAC_ADDR_CMD_MEM,
381 RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(0));
382 while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR)
383 ;
384 val = PIF_RCSR(RMAC_ADDR_DATA0_MEM);
385 for (i = 0; i < ETHER_ADDR_LEN; i++)
386 enaddr[i] = (uint8_t)(val >> (56 - (8*i)));
387
388 /*
389 * Get memory for transmit descriptor lists.
390 */
391 if (xge_alloc_txmem(sc)) {
392 aprint_error("%s: failed allocating txmem.\n", XNAME);
393 return;
394 }
395
396 /* 9 and 10 - set FIFO number/prio */
397 PIF_WCSR(TX_FIFO_P0, TX_FIFO_LEN0(NTXDESCS));
398 PIF_WCSR(TX_FIFO_P1, 0ULL);
399 PIF_WCSR(TX_FIFO_P2, 0ULL);
400 PIF_WCSR(TX_FIFO_P3, 0ULL);
401
402 /* 11, XXX set round-robin prio? */
403
404 /* 12, enable transmit FIFO */
405 val = PIF_RCSR(TX_FIFO_P0);
406 val |= TX_FIFO_ENABLE;
407 PIF_WCSR(TX_FIFO_P0, val);
408
409 /* 13, disable some error checks */
410 PIF_WCSR(TX_PA_CFG,
411 TX_PA_CFG_IFR|TX_PA_CFG_ISO|TX_PA_CFG_ILC|TX_PA_CFG_ILE);
412
413 /*
414 * Create transmit DMA maps.
415 * Make them large for TSO.
416 */
417 for (i = 0; i < NTXDESCS; i++) {
418 if (bus_dmamap_create(sc->sc_dmat, XGE_IP_MAXPACKET,
419 NTXFRAGS, MCLBYTES, 0, 0, &sc->sc_txm[i])) {
420 aprint_error("%s: cannot create TX DMA maps\n", XNAME);
421 return;
422 }
423 }
424
425 sc->sc_lasttx = NTXDESCS-1;
426
427 /*
428 * RxDMA initialization.
429 * Only use one out of 8 possible receive queues.
430 */
431 if (xge_alloc_rxmem(sc)) { /* allocate rx descriptor memory */
432 aprint_error("%s: failed allocating rxmem\n", XNAME);
433 return;
434 }
435
436 /* Create receive buffer DMA maps */
437 for (i = 0; i < NRXREAL; i++) {
438 if (bus_dmamap_create(sc->sc_dmat, XGE_MAX_MTU,
439 NRXFRAGS, MCLBYTES, 0, 0, &sc->sc_rxm[i])) {
440 aprint_error("%s: cannot create RX DMA maps\n", XNAME);
441 return;
442 }
443 }
444
445 /* allocate mbufs to receive descriptors */
446 for (i = 0; i < NRXREAL; i++)
447 if (xge_add_rxbuf(sc, i))
448 panic("out of mbufs too early");
449
450 /* 14, setup receive ring priority */
451 PIF_WCSR(RX_QUEUE_PRIORITY, 0ULL); /* only use one ring */
452
453 /* 15, setup receive ring round-robin calendar */
454 PIF_WCSR(RX_W_ROUND_ROBIN_0, 0ULL); /* only use one ring */
455 PIF_WCSR(RX_W_ROUND_ROBIN_1, 0ULL);
456 PIF_WCSR(RX_W_ROUND_ROBIN_2, 0ULL);
457 PIF_WCSR(RX_W_ROUND_ROBIN_3, 0ULL);
458 PIF_WCSR(RX_W_ROUND_ROBIN_4, 0ULL);
459
460 /* 16, write receive ring start address */
461 PIF_WCSR(PRC_RXD0_0, (uint64_t)sc->sc_rxmap->dm_segs[0].ds_addr);
462 /* PRC_RXD0_[1-7] are not used */
463
464 /* 17, Setup alarm registers */
465 PIF_WCSR(PRC_ALARM_ACTION, 0ULL); /* Default everything to retry */
466
467 /* 18, init receive ring controller */
468 #if RX_MODE == RX_MODE_1
469 val = RING_MODE_1;
470 #elif RX_MODE == RX_MODE_3
471 val = RING_MODE_3;
472 #else /* RX_MODE == RX_MODE_5 */
473 val = RING_MODE_5;
474 #endif
475 PIF_WCSR(PRC_CTRL_0, RC_IN_SVC|val);
476 /* leave 1-7 disabled */
477 /* XXXX snoop configuration? */
478
479 /* 19, set chip memory assigned to the queue */
480 PIF_WCSR(RX_QUEUE_CFG, MC_QUEUE(0, 64)); /* all 64M to queue 0 */
481
482 /* 20, setup RLDRAM parameters */
483 /* do not touch it for now */
484
485 /* 21, setup pause frame thresholds */
486 /* so not touch the defaults */
487 /* XXX - must 0xff be written as stated in the manual? */
488
489 /* 22, configure RED */
490 /* we do not want to drop packets, so ignore */
491
492 /* 23, initiate RLDRAM */
493 val = PIF_RCSR(MC_RLDRAM_MRS);
494 val |= MC_QUEUE_SIZE_ENABLE|MC_RLDRAM_MRS_ENABLE;
495 PIF_WCSR(MC_RLDRAM_MRS, val);
496 DELAY(1000);
497
498 /*
499 * Setup interrupt policies.
500 */
501 /* 40, Transmit interrupts */
502 PIF_WCSR(TTI_DATA1_MEM, TX_TIMER_VAL(0x1ff) | TX_TIMER_AC |
503 TX_URNG_A(5) | TX_URNG_B(20) | TX_URNG_C(48));
504 PIF_WCSR(TTI_DATA2_MEM,
505 TX_UFC_A(25) | TX_UFC_B(64) | TX_UFC_C(128) | TX_UFC_D(512));
506 PIF_WCSR(TTI_COMMAND_MEM, TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE);
507 while (PIF_RCSR(TTI_COMMAND_MEM) & TTI_CMD_MEM_STROBE)
508 ;
509
510 /* 41, Receive interrupts */
511 PIF_WCSR(RTI_DATA1_MEM, RX_TIMER_VAL(0x800) | RX_TIMER_AC |
512 RX_URNG_A(5) | RX_URNG_B(20) | RX_URNG_C(50));
513 PIF_WCSR(RTI_DATA2_MEM,
514 RX_UFC_A(64) | RX_UFC_B(128) | RX_UFC_C(256) | RX_UFC_D(512));
515 PIF_WCSR(RTI_COMMAND_MEM, RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE);
516 while (PIF_RCSR(RTI_COMMAND_MEM) & RTI_CMD_MEM_STROBE)
517 ;
518
519 /*
520 * Setup media stuff.
521 */
522 ifmedia_init(&sc->xena_media, IFM_IMASK, xge_xgmii_mediachange,
523 xge_ifmedia_status);
524 ifmedia_add(&sc->xena_media, IFM_ETHER|IFM_10G_LR, 0, NULL);
525 ifmedia_set(&sc->xena_media, IFM_ETHER|IFM_10G_LR);
526
527 aprint_normal("%s: Ethernet address %s\n", XNAME,
528 ether_sprintf(enaddr));
529
530 ifp = &sc->sc_ethercom.ec_if;
531 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
532 ifp->if_baudrate = 10000000000LL;
533 ifp->if_init = xge_init;
534 ifp->if_stop = xge_stop;
535 ifp->if_softc = sc;
536 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
537 ifp->if_ioctl = xge_ioctl;
538 ifp->if_start = xge_start;
539 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(NTXDESCS - 1, IFQ_MAXLEN));
540 IFQ_SET_READY(&ifp->if_snd);
541
542 /*
543 * Offloading capabilities.
544 */
545 sc->sc_ethercom.ec_capabilities |=
546 ETHERCAP_JUMBO_MTU | ETHERCAP_VLAN_MTU;
547 ifp->if_capabilities |=
548 IFCAP_CSUM_IPv4_Rx | IFCAP_CSUM_IPv4_Tx |
549 IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv4_Tx |
550 IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv4_Tx | IFCAP_TSOv4;
551
552 /*
553 * Attach the interface.
554 */
555 if_attach(ifp);
556 if_deferred_start_init(ifp, NULL);
557 ether_ifattach(ifp, enaddr);
558
559 /*
560 * Setup interrupt vector before initializing.
561 */
562 if (pci_intr_map(pa, &ih)) {
563 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
564 return;
565 }
566 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
567 sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_NET, xge_intr, sc,
568 device_xname(self));
569 if (sc->sc_ih == NULL) {
570 aprint_error_dev(sc->sc_dev,
571 "unable to establish interrupt at %s\n",
572 intrstr ? intrstr : "<unknown>");
573 return;
574 }
575 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
576
577 #ifdef XGE_EVENT_COUNTERS
578 evcnt_attach_dynamic(&sc->sc_intr, EVCNT_TYPE_MISC,
579 NULL, XNAME, "intr");
580 evcnt_attach_dynamic(&sc->sc_txintr, EVCNT_TYPE_MISC,
581 NULL, XNAME, "txintr");
582 evcnt_attach_dynamic(&sc->sc_rxintr, EVCNT_TYPE_MISC,
583 NULL, XNAME, "rxintr");
584 evcnt_attach_dynamic(&sc->sc_txqe, EVCNT_TYPE_MISC,
585 NULL, XNAME, "txqe");
586 #endif
587 }
588
589 void
590 xge_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
591 {
592 struct xge_softc *sc = ifp->if_softc;
593 uint64_t reg;
594
595 ifmr->ifm_status = IFM_AVALID;
596 ifmr->ifm_active = IFM_ETHER|IFM_10G_LR;
597
598 reg = PIF_RCSR(ADAPTER_STATUS);
599 if ((reg & (RMAC_REMOTE_FAULT|RMAC_LOCAL_FAULT)) == 0)
600 ifmr->ifm_status |= IFM_ACTIVE;
601 }
602
603 int
604 xge_xgmii_mediachange(struct ifnet *ifp)
605 {
606 return 0;
607 }
608
609 static void
610 xge_enable(struct xge_softc *sc)
611 {
612 uint64_t val;
613
614 /* 2, enable adapter */
615 val = PIF_RCSR(ADAPTER_CONTROL);
616 val |= ADAPTER_EN;
617 PIF_WCSR(ADAPTER_CONTROL, val);
618
619 /* 3, light the card enable led */
620 val = PIF_RCSR(ADAPTER_CONTROL);
621 val |= LED_ON;
622 PIF_WCSR(ADAPTER_CONTROL, val);
623 printf("%s: link up\n", XNAME);
624
625 }
626
627 int
628 xge_init(struct ifnet *ifp)
629 {
630 struct xge_softc *sc = ifp->if_softc;
631 uint64_t val;
632
633 if (ifp->if_flags & IFF_RUNNING)
634 return 0;
635
636 /* 31+32, setup MAC config */
637 PIF_WKEY(MAC_CFG, TMAC_EN|RMAC_EN|TMAC_APPEND_PAD|RMAC_STRIP_FCS|
638 RMAC_BCAST_EN|RMAC_DISCARD_PFRM|RMAC_PROM_EN);
639
640 DELAY(1000);
641
642 /* 54, ensure that the adapter is 'quiescent' */
643 val = PIF_RCSR(ADAPTER_STATUS);
644 if ((val & QUIESCENT) != QUIESCENT) {
645 char buf[200];
646 printf("%s: adapter not quiescent, aborting\n", XNAME);
647 val = (val & QUIESCENT) ^ QUIESCENT;
648 snprintb(buf, sizeof buf, QUIESCENT_BMSK, val);
649 printf("%s: ADAPTER_STATUS missing bits %s\n", XNAME, buf);
650 return 1;
651 }
652
653 /* 56, enable the transmit laser */
654 val = PIF_RCSR(ADAPTER_CONTROL);
655 val |= EOI_TX_ON;
656 PIF_WCSR(ADAPTER_CONTROL, val);
657
658 xge_enable(sc);
659 /*
660 * Enable all interrupts
661 */
662 PIF_WCSR(TX_TRAFFIC_MASK, 0);
663 PIF_WCSR(RX_TRAFFIC_MASK, 0);
664 PIF_WCSR(GENERAL_INT_MASK, 0);
665 PIF_WCSR(TXPIC_INT_MASK, 0);
666 PIF_WCSR(RXPIC_INT_MASK, 0);
667 PIF_WCSR(MAC_INT_MASK, MAC_TMAC_INT); /* only from RMAC */
668 PIF_WCSR(MAC_RMAC_ERR_MASK, ~RMAC_LINK_STATE_CHANGE_INT);
669
670
671 /* Done... */
672 ifp->if_flags |= IFF_RUNNING;
673 ifp->if_flags &= ~IFF_OACTIVE;
674
675 return 0;
676 }
677
678 static void
679 xge_stop(struct ifnet *ifp, int disable)
680 {
681 struct xge_softc *sc = ifp->if_softc;
682 uint64_t val;
683
684 val = PIF_RCSR(ADAPTER_CONTROL);
685 val &= ~ADAPTER_EN;
686 PIF_WCSR(ADAPTER_CONTROL, val);
687
688 while ((PIF_RCSR(ADAPTER_STATUS) & QUIESCENT) != QUIESCENT)
689 ;
690 }
691
692 int
693 xge_intr(void *pv)
694 {
695 struct xge_softc *sc = pv;
696 struct txd *txd;
697 struct ifnet *ifp = &sc->sc_if;
698 bus_dmamap_t dmp;
699 uint64_t val;
700 int i, lasttx, plen;
701
702 val = PIF_RCSR(GENERAL_INT_STATUS);
703 if (val == 0)
704 return 0; /* no interrupt here */
705
706 XGE_EVCNT_INCR(&sc->sc_intr);
707
708 PIF_WCSR(GENERAL_INT_STATUS, val);
709
710 if ((val = PIF_RCSR(MAC_RMAC_ERR_REG)) & RMAC_LINK_STATE_CHANGE_INT) {
711 /* Wait for quiescence */
712 printf("%s: link down\n", XNAME);
713 while ((PIF_RCSR(ADAPTER_STATUS) & QUIESCENT) != QUIESCENT)
714 ;
715 PIF_WCSR(MAC_RMAC_ERR_REG, RMAC_LINK_STATE_CHANGE_INT);
716
717 val = PIF_RCSR(ADAPTER_STATUS);
718 if ((val & (RMAC_REMOTE_FAULT|RMAC_LOCAL_FAULT)) == 0)
719 xge_enable(sc); /* Only if link restored */
720 }
721
722 if ((val = PIF_RCSR(TX_TRAFFIC_INT))) {
723 XGE_EVCNT_INCR(&sc->sc_txintr);
724 PIF_WCSR(TX_TRAFFIC_INT, val); /* clear interrupt bits */
725 }
726 /*
727 * Collect sent packets.
728 */
729 lasttx = sc->sc_lasttx;
730 while ((i = NEXTTX(sc->sc_lasttx)) != sc->sc_nexttx) {
731 txd = sc->sc_txd[i];
732 dmp = sc->sc_txm[i];
733
734 bus_dmamap_sync(sc->sc_dmat, dmp, 0,
735 dmp->dm_mapsize,
736 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
737
738 if (txd->txd_control1 & TXD_CTL1_OWN) {
739 bus_dmamap_sync(sc->sc_dmat, dmp, 0,
740 dmp->dm_mapsize, BUS_DMASYNC_PREREAD);
741 break;
742 }
743 bus_dmamap_unload(sc->sc_dmat, dmp);
744 m_freem(sc->sc_txb[i]);
745 ifp->if_opackets++;
746 sc->sc_lasttx = i;
747 }
748 if (i == sc->sc_nexttx) {
749 XGE_EVCNT_INCR(&sc->sc_txqe);
750 }
751
752 if (sc->sc_lasttx != lasttx)
753 ifp->if_flags &= ~IFF_OACTIVE;
754
755 if_schedule_deferred_start(ifp); /* Try to get more packets on the wire */
756
757 if ((val = PIF_RCSR(RX_TRAFFIC_INT))) {
758 XGE_EVCNT_INCR(&sc->sc_rxintr);
759 PIF_WCSR(RX_TRAFFIC_INT, val); /* clear interrupt bits */
760 }
761
762 for (;;) {
763 struct rxdesc *rxd;
764 struct mbuf *m;
765
766 XGE_RXSYNC(sc->sc_nextrx,
767 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
768
769 rxd = XGE_RXD(sc->sc_nextrx);
770 if (rxd->rxd_control1 & RXD_CTL1_OWN) {
771 XGE_RXSYNC(sc->sc_nextrx, BUS_DMASYNC_PREREAD);
772 break;
773 }
774
775 /* got a packet */
776 m = sc->sc_rxb[sc->sc_nextrx];
777 #if RX_MODE == RX_MODE_1
778 plen = m->m_len = RXD_CTL2_BUF0SIZ(rxd->rxd_control2);
779 #elif RX_MODE == RX_MODE_3
780 #error Fix rxmodes in xge_intr
781 #elif RX_MODE == RX_MODE_5
782 plen = m->m_len = RXD_CTL2_BUF0SIZ(rxd->rxd_control2);
783 plen += m->m_next->m_len = RXD_CTL2_BUF1SIZ(rxd->rxd_control2);
784 plen += m->m_next->m_next->m_len =
785 RXD_CTL2_BUF2SIZ(rxd->rxd_control2);
786 plen += m->m_next->m_next->m_next->m_len =
787 RXD_CTL3_BUF3SIZ(rxd->rxd_control3);
788 plen += m->m_next->m_next->m_next->m_next->m_len =
789 RXD_CTL3_BUF4SIZ(rxd->rxd_control3);
790 #endif
791 m_set_rcvif(m, ifp);
792 m->m_pkthdr.len = plen;
793
794 val = rxd->rxd_control1;
795
796 if (xge_add_rxbuf(sc, sc->sc_nextrx)) {
797 /* Failed, recycle this mbuf */
798 #if RX_MODE == RX_MODE_1
799 rxd->rxd_control2 = RXD_MKCTL2(MCLBYTES, 0, 0);
800 rxd->rxd_control1 = RXD_CTL1_OWN;
801 #elif RX_MODE == RX_MODE_3
802 #elif RX_MODE == RX_MODE_5
803 #endif
804 XGE_RXSYNC(sc->sc_nextrx,
805 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
806 ifp->if_ierrors++;
807 break;
808 }
809
810 if (RXD_CTL1_PROTOS(val) & (RXD_CTL1_P_IPv4|RXD_CTL1_P_IPv6)) {
811 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
812 if (RXD_CTL1_L3CSUM(val) != 0xffff)
813 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
814 }
815 if (RXD_CTL1_PROTOS(val) & RXD_CTL1_P_TCP) {
816 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_TCPv6;
817 if (RXD_CTL1_L4CSUM(val) != 0xffff)
818 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
819 }
820 if (RXD_CTL1_PROTOS(val) & RXD_CTL1_P_UDP) {
821 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4|M_CSUM_UDPv6;
822 if (RXD_CTL1_L4CSUM(val) != 0xffff)
823 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
824 }
825
826 if_percpuq_enqueue(ifp->if_percpuq, m);
827
828 if (++sc->sc_nextrx == NRXREAL)
829 sc->sc_nextrx = 0;
830
831 }
832
833 return 0;
834 }
835
836 int
837 xge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
838 {
839 struct xge_softc *sc = ifp->if_softc;
840 struct ifreq *ifr = (struct ifreq *) data;
841 int s, error = 0;
842
843 s = splnet();
844
845 switch (cmd) {
846 case SIOCSIFMTU:
847 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > XGE_MAX_MTU)
848 error = EINVAL;
849 else if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET){
850 PIF_WCSR(RMAC_MAX_PYLD_LEN,
851 RMAC_PYLD_LEN(ifr->ifr_mtu));
852 error = 0;
853 }
854 break;
855
856 case SIOCGIFMEDIA:
857 case SIOCSIFMEDIA:
858 error = ifmedia_ioctl(ifp, ifr, &sc->xena_media, cmd);
859 break;
860
861 default:
862 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
863 break;
864
865 error = 0;
866
867 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
868 ;
869 else if (ifp->if_flags & IFF_RUNNING) {
870 /* Change multicast list */
871 xge_mcast_filter(sc);
872 }
873 break;
874 }
875
876 splx(s);
877 return(error);
878 }
879
880 void
881 xge_mcast_filter(struct xge_softc *sc)
882 {
883 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
884 struct ethercom *ec = &sc->sc_ethercom;
885 struct ether_multi *enm;
886 struct ether_multistep step;
887 int i, numaddr = 1; /* first slot used for card unicast address */
888 uint64_t val;
889
890 ETHER_FIRST_MULTI(step, ec, enm);
891 while (enm != NULL) {
892 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
893 /* Skip ranges */
894 goto allmulti;
895 }
896 if (numaddr == MAX_MCAST_ADDR)
897 goto allmulti;
898 for (val = 0, i = 0; i < ETHER_ADDR_LEN; i++) {
899 val <<= 8;
900 val |= enm->enm_addrlo[i];
901 }
902 PIF_WCSR(RMAC_ADDR_DATA0_MEM, val << 16);
903 PIF_WCSR(RMAC_ADDR_DATA1_MEM, 0xFFFFFFFFFFFFFFFFULL);
904 PIF_WCSR(RMAC_ADDR_CMD_MEM, RMAC_ADDR_CMD_MEM_WE|
905 RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(numaddr));
906 while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR)
907 ;
908 numaddr++;
909 ETHER_NEXT_MULTI(step, enm);
910 }
911 /* set the remaining entries to the broadcast address */
912 for (i = numaddr; i < MAX_MCAST_ADDR; i++) {
913 PIF_WCSR(RMAC_ADDR_DATA0_MEM, 0xffffffffffff0000ULL);
914 PIF_WCSR(RMAC_ADDR_DATA1_MEM, 0xFFFFFFFFFFFFFFFFULL);
915 PIF_WCSR(RMAC_ADDR_CMD_MEM, RMAC_ADDR_CMD_MEM_WE|
916 RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(i));
917 while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR)
918 ;
919 }
920 ifp->if_flags &= ~IFF_ALLMULTI;
921 return;
922
923 allmulti:
924 /* Just receive everything with the multicast bit set */
925 ifp->if_flags |= IFF_ALLMULTI;
926 PIF_WCSR(RMAC_ADDR_DATA0_MEM, 0x8000000000000000ULL);
927 PIF_WCSR(RMAC_ADDR_DATA1_MEM, 0xF000000000000000ULL);
928 PIF_WCSR(RMAC_ADDR_CMD_MEM, RMAC_ADDR_CMD_MEM_WE|
929 RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(1));
930 while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR)
931 ;
932 }
933
934 void
935 xge_start(struct ifnet *ifp)
936 {
937 struct xge_softc *sc = ifp->if_softc;
938 struct txd *txd = NULL; /* XXX - gcc */
939 bus_dmamap_t dmp;
940 struct mbuf *m;
941 uint64_t par, lcr;
942 int nexttx = 0, ntxd, error, i;
943
944 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
945 return;
946
947 par = lcr = 0;
948 for (;;) {
949 IFQ_POLL(&ifp->if_snd, m);
950 if (m == NULL)
951 break; /* out of packets */
952
953 if (sc->sc_nexttx == sc->sc_lasttx)
954 break; /* No more space */
955
956 nexttx = sc->sc_nexttx;
957 dmp = sc->sc_txm[nexttx];
958
959 if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, dmp, m,
960 BUS_DMA_WRITE|BUS_DMA_NOWAIT)) != 0) {
961 printf("%s: bus_dmamap_load_mbuf error %d\n",
962 XNAME, error);
963 break;
964 }
965 IFQ_DEQUEUE(&ifp->if_snd, m);
966
967 bus_dmamap_sync(sc->sc_dmat, dmp, 0, dmp->dm_mapsize,
968 BUS_DMASYNC_PREWRITE);
969
970 txd = sc->sc_txd[nexttx];
971 sc->sc_txb[nexttx] = m;
972 for (i = 0; i < dmp->dm_nsegs; i++) {
973 if (dmp->dm_segs[i].ds_len == 0)
974 continue;
975 txd->txd_control1 = dmp->dm_segs[i].ds_len;
976 txd->txd_control2 = 0;
977 txd->txd_bufaddr = dmp->dm_segs[i].ds_addr;
978 txd++;
979 }
980 ntxd = txd - sc->sc_txd[nexttx] - 1;
981 txd = sc->sc_txd[nexttx];
982 txd->txd_control1 |= TXD_CTL1_OWN|TXD_CTL1_GCF;
983 txd->txd_control2 = TXD_CTL2_UTIL;
984 if (m->m_pkthdr.csum_flags & M_CSUM_TSOv4) {
985 txd->txd_control1 |= TXD_CTL1_MSS(m->m_pkthdr.segsz);
986 txd->txd_control1 |= TXD_CTL1_LSO;
987 }
988
989 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4)
990 txd->txd_control2 |= TXD_CTL2_CIPv4;
991 if (m->m_pkthdr.csum_flags & M_CSUM_TCPv4)
992 txd->txd_control2 |= TXD_CTL2_CTCP;
993 if (m->m_pkthdr.csum_flags & M_CSUM_UDPv4)
994 txd->txd_control2 |= TXD_CTL2_CUDP;
995 txd[ntxd].txd_control1 |= TXD_CTL1_GCL;
996
997 bus_dmamap_sync(sc->sc_dmat, dmp, 0, dmp->dm_mapsize,
998 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
999
1000 par = sc->sc_txdp[nexttx];
1001 lcr = TXDL_NUMTXD(ntxd) | TXDL_LGC_FIRST | TXDL_LGC_LAST;
1002 if (m->m_pkthdr.csum_flags & M_CSUM_TSOv4)
1003 lcr |= TXDL_SFF;
1004 TXP_WCSR(TXDL_PAR, par);
1005 TXP_WCSR(TXDL_LCR, lcr);
1006
1007 bpf_mtap(ifp, m, BPF_D_OUT);
1008
1009 sc->sc_nexttx = NEXTTX(nexttx);
1010 }
1011 }
1012
1013 /*
1014 * Allocate DMA memory for transmit descriptor fragments.
1015 * Only one map is used for all descriptors.
1016 */
1017 int
1018 xge_alloc_txmem(struct xge_softc *sc)
1019 {
1020 struct txd *txp;
1021 bus_dma_segment_t seg;
1022 bus_addr_t txdp;
1023 void *kva;
1024 int i, rseg, state;
1025
1026 #define TXMAPSZ (NTXDESCS*NTXFRAGS*sizeof(struct txd))
1027 state = 0;
1028 if (bus_dmamem_alloc(sc->sc_dmat, TXMAPSZ, PAGE_SIZE, 0,
1029 &seg, 1, &rseg, BUS_DMA_NOWAIT))
1030 goto err;
1031 state++;
1032 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, TXMAPSZ, &kva,
1033 BUS_DMA_NOWAIT))
1034 goto err;
1035
1036 state++;
1037 if (bus_dmamap_create(sc->sc_dmat, TXMAPSZ, 1, TXMAPSZ, 0,
1038 BUS_DMA_NOWAIT, &sc->sc_txmap))
1039 goto err;
1040 state++;
1041 if (bus_dmamap_load(sc->sc_dmat, sc->sc_txmap,
1042 kva, TXMAPSZ, NULL, BUS_DMA_NOWAIT))
1043 goto err;
1044
1045 /* setup transmit array pointers */
1046 txp = (struct txd *)kva;
1047 txdp = seg.ds_addr;
1048 for (txp = (struct txd *)kva, i = 0; i < NTXDESCS; i++) {
1049 sc->sc_txd[i] = txp;
1050 sc->sc_txdp[i] = txdp;
1051 txp += NTXFRAGS;
1052 txdp += (NTXFRAGS * sizeof(struct txd));
1053 }
1054
1055 return 0;
1056
1057 err:
1058 if (state > 2)
1059 bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap);
1060 if (state > 1)
1061 bus_dmamem_unmap(sc->sc_dmat, kva, TXMAPSZ);
1062 if (state > 0)
1063 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1064 return ENOBUFS;
1065 }
1066
1067 /*
1068 * Allocate DMA memory for receive descriptor,
1069 * only one map is used for all descriptors.
1070 * link receive descriptor pages together.
1071 */
1072 int
1073 xge_alloc_rxmem(struct xge_softc *sc)
1074 {
1075 struct rxd_4k *rxpp;
1076 bus_dma_segment_t seg;
1077 void *kva;
1078 int i, rseg, state;
1079
1080 /* sanity check */
1081 if (sizeof(struct rxd_4k) != XGE_PAGE) {
1082 printf("bad compiler struct alignment, %d != %d\n",
1083 (int)sizeof(struct rxd_4k), XGE_PAGE);
1084 return EINVAL;
1085 }
1086
1087 state = 0;
1088 if (bus_dmamem_alloc(sc->sc_dmat, RXMAPSZ, PAGE_SIZE, 0,
1089 &seg, 1, &rseg, BUS_DMA_NOWAIT))
1090 goto err;
1091 state++;
1092 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, RXMAPSZ, &kva,
1093 BUS_DMA_NOWAIT))
1094 goto err;
1095
1096 state++;
1097 if (bus_dmamap_create(sc->sc_dmat, RXMAPSZ, 1, RXMAPSZ, 0,
1098 BUS_DMA_NOWAIT, &sc->sc_rxmap))
1099 goto err;
1100 state++;
1101 if (bus_dmamap_load(sc->sc_dmat, sc->sc_rxmap,
1102 kva, RXMAPSZ, NULL, BUS_DMA_NOWAIT))
1103 goto err;
1104
1105 /* setup receive page link pointers */
1106 for (rxpp = (struct rxd_4k *)kva, i = 0; i < NRXPAGES; i++, rxpp++) {
1107 sc->sc_rxd_4k[i] = rxpp;
1108 rxpp->r4_next = (uint64_t)sc->sc_rxmap->dm_segs[0].ds_addr +
1109 (i*sizeof(struct rxd_4k)) + sizeof(struct rxd_4k);
1110 }
1111 sc->sc_rxd_4k[NRXPAGES-1]->r4_next =
1112 (uint64_t)sc->sc_rxmap->dm_segs[0].ds_addr;
1113
1114 return 0;
1115
1116 err:
1117 if (state > 2)
1118 bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap);
1119 if (state > 1)
1120 bus_dmamem_unmap(sc->sc_dmat, kva, TXMAPSZ);
1121 if (state > 0)
1122 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1123 return ENOBUFS;
1124 }
1125
1126
1127 /*
1128 * Add a new mbuf chain to descriptor id.
1129 */
1130 int
1131 xge_add_rxbuf(struct xge_softc *sc, int id)
1132 {
1133 struct rxdesc *rxd;
1134 struct mbuf *m[5];
1135 int page, desc, error;
1136 #if RX_MODE == RX_MODE_5
1137 int i;
1138 #endif
1139
1140 page = id/NDESC_BUFMODE;
1141 desc = id%NDESC_BUFMODE;
1142
1143 rxd = &sc->sc_rxd_4k[page]->r4_rxd[desc];
1144
1145 /*
1146 * Allocate mbufs.
1147 * Currently five mbufs and two clusters are used,
1148 * the hardware will put (ethernet, ip, tcp/udp) headers in
1149 * their own buffer and the clusters are only used for data.
1150 */
1151 #if RX_MODE == RX_MODE_1
1152 MGETHDR(m[0], M_DONTWAIT, MT_DATA);
1153 if (m[0] == NULL)
1154 return ENOBUFS;
1155 MCLGET(m[0], M_DONTWAIT);
1156 if ((m[0]->m_flags & M_EXT) == 0) {
1157 m_freem(m[0]);
1158 return ENOBUFS;
1159 }
1160 m[0]->m_len = m[0]->m_pkthdr.len = m[0]->m_ext.ext_size;
1161 #elif RX_MODE == RX_MODE_3
1162 #error missing rxmode 3.
1163 #elif RX_MODE == RX_MODE_5
1164 MGETHDR(m[0], M_DONTWAIT, MT_DATA);
1165 for (i = 1; i < 5; i++) {
1166 MGET(m[i], M_DONTWAIT, MT_DATA);
1167 }
1168 if (m[3])
1169 MCLGET(m[3], M_DONTWAIT);
1170 if (m[4])
1171 MCLGET(m[4], M_DONTWAIT);
1172 if (!m[0] || !m[1] || !m[2] || !m[3] || !m[4] ||
1173 ((m[3]->m_flags & M_EXT) == 0) || ((m[4]->m_flags & M_EXT) == 0)) {
1174 /* Out of something */
1175 for (i = 0; i < 5; i++)
1176 if (m[i] != NULL)
1177 m_free(m[i]);
1178 return ENOBUFS;
1179 }
1180 /* Link'em together */
1181 m[0]->m_next = m[1];
1182 m[1]->m_next = m[2];
1183 m[2]->m_next = m[3];
1184 m[3]->m_next = m[4];
1185 #else
1186 #error bad mode RX_MODE
1187 #endif
1188
1189 if (sc->sc_rxb[id])
1190 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxm[id]);
1191 sc->sc_rxb[id] = m[0];
1192
1193 error = bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_rxm[id], m[0],
1194 BUS_DMA_READ|BUS_DMA_NOWAIT);
1195 if (error)
1196 return error;
1197 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxm[id], 0,
1198 sc->sc_rxm[id]->dm_mapsize, BUS_DMASYNC_PREREAD);
1199
1200 #if RX_MODE == RX_MODE_1
1201 rxd->rxd_control2 = RXD_MKCTL2(m[0]->m_len, 0, 0);
1202 rxd->rxd_buf0 = (uint64_t)sc->sc_rxm[id]->dm_segs[0].ds_addr;
1203 rxd->rxd_control1 = RXD_CTL1_OWN;
1204 #elif RX_MODE == RX_MODE_3
1205 #elif RX_MODE == RX_MODE_5
1206 rxd->rxd_control3 = RXD_MKCTL3(0, m[3]->m_len, m[4]->m_len);
1207 rxd->rxd_control2 = RXD_MKCTL2(m[0]->m_len, m[1]->m_len, m[2]->m_len);
1208 rxd->rxd_buf0 = (uint64_t)sc->sc_rxm[id]->dm_segs[0].ds_addr;
1209 rxd->rxd_buf1 = (uint64_t)sc->sc_rxm[id]->dm_segs[1].ds_addr;
1210 rxd->rxd_buf2 = (uint64_t)sc->sc_rxm[id]->dm_segs[2].ds_addr;
1211 rxd->rxd_buf3 = (uint64_t)sc->sc_rxm[id]->dm_segs[3].ds_addr;
1212 rxd->rxd_buf4 = (uint64_t)sc->sc_rxm[id]->dm_segs[4].ds_addr;
1213 rxd->rxd_control1 = RXD_CTL1_OWN;
1214 #endif
1215
1216 XGE_RXSYNC(id, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1217 return 0;
1218 }
1219
1220 /*
1221 * These magics comes from the FreeBSD driver.
1222 */
1223 int
1224 xge_setup_xgxs(struct xge_softc *sc)
1225 {
1226 /* The magic numbers are described in the users guide */
1227
1228 /* Writing to MDIO 0x8000 (Global Config 0) */
1229 PIF_WCSR(DTX_CONTROL, 0x8000051500000000ULL); DELAY(50);
1230 PIF_WCSR(DTX_CONTROL, 0x80000515000000E0ULL); DELAY(50);
1231 PIF_WCSR(DTX_CONTROL, 0x80000515D93500E4ULL); DELAY(50);
1232
1233 /* Writing to MDIO 0x8000 (Global Config 1) */
1234 PIF_WCSR(DTX_CONTROL, 0x8001051500000000ULL); DELAY(50);
1235 PIF_WCSR(DTX_CONTROL, 0x80010515000000e0ULL); DELAY(50);
1236 PIF_WCSR(DTX_CONTROL, 0x80010515001e00e4ULL); DELAY(50);
1237
1238 /* Reset the Gigablaze */
1239 PIF_WCSR(DTX_CONTROL, 0x8002051500000000ULL); DELAY(50);
1240 PIF_WCSR(DTX_CONTROL, 0x80020515000000E0ULL); DELAY(50);
1241 PIF_WCSR(DTX_CONTROL, 0x80020515F21000E4ULL); DELAY(50);
1242
1243 /* read the pole settings */
1244 PIF_WCSR(DTX_CONTROL, 0x8000051500000000ULL); DELAY(50);
1245 PIF_WCSR(DTX_CONTROL, 0x80000515000000e0ULL); DELAY(50);
1246 PIF_WCSR(DTX_CONTROL, 0x80000515000000ecULL); DELAY(50);
1247
1248 PIF_WCSR(DTX_CONTROL, 0x8001051500000000ULL); DELAY(50);
1249 PIF_WCSR(DTX_CONTROL, 0x80010515000000e0ULL); DELAY(50);
1250 PIF_WCSR(DTX_CONTROL, 0x80010515000000ecULL); DELAY(50);
1251
1252 PIF_WCSR(DTX_CONTROL, 0x8002051500000000ULL); DELAY(50);
1253 PIF_WCSR(DTX_CONTROL, 0x80020515000000e0ULL); DELAY(50);
1254 PIF_WCSR(DTX_CONTROL, 0x80020515000000ecULL); DELAY(50);
1255
1256 /* Workaround for TX Lane XAUI initialization error.
1257 Read Xpak PHY register 24 for XAUI lane status */
1258 PIF_WCSR(DTX_CONTROL, 0x0018040000000000ULL); DELAY(50);
1259 PIF_WCSR(DTX_CONTROL, 0x00180400000000e0ULL); DELAY(50);
1260 PIF_WCSR(DTX_CONTROL, 0x00180400000000ecULL); DELAY(50);
1261
1262 /*
1263 * Reading the MDIO control with value 0x1804001c0F001c
1264 * means the TxLanes were already in sync
1265 * Reading the MDIO control with value 0x1804000c0x001c
1266 * means some TxLanes are not in sync where x is a 4-bit
1267 * value representing each lanes
1268 */
1269 #if 0
1270 val = PIF_RCSR(MDIO_CONTROL);
1271 if (val != 0x1804001c0F001cULL) {
1272 printf("%s: MDIO_CONTROL: %llx != %llx\n",
1273 XNAME, val, 0x1804001c0F001cULL);
1274 return 1;
1275 }
1276 #endif
1277
1278 /* Set and remove the DTE XS INTLoopBackN */
1279 PIF_WCSR(DTX_CONTROL, 0x0000051500000000ULL); DELAY(50);
1280 PIF_WCSR(DTX_CONTROL, 0x00000515604000e0ULL); DELAY(50);
1281 PIF_WCSR(DTX_CONTROL, 0x00000515604000e4ULL); DELAY(50);
1282 PIF_WCSR(DTX_CONTROL, 0x00000515204000e4ULL); DELAY(50);
1283 PIF_WCSR(DTX_CONTROL, 0x00000515204000ecULL); DELAY(50);
1284
1285 #if 0
1286 /* Reading the DTX control register Should be 0x5152040001c */
1287 val = PIF_RCSR(DTX_CONTROL);
1288 if (val != 0x5152040001cULL) {
1289 printf("%s: DTX_CONTROL: %llx != %llx\n",
1290 XNAME, val, 0x5152040001cULL);
1291 return 1;
1292 }
1293 #endif
1294
1295 PIF_WCSR(MDIO_CONTROL, 0x0018040000000000ULL); DELAY(50);
1296 PIF_WCSR(MDIO_CONTROL, 0x00180400000000e0ULL); DELAY(50);
1297 PIF_WCSR(MDIO_CONTROL, 0x00180400000000ecULL); DELAY(50);
1298
1299 #if 0
1300 /* Reading the MIOD control should be 0x1804001c0f001c */
1301 val = PIF_RCSR(MDIO_CONTROL);
1302 if (val != 0x1804001c0f001cULL) {
1303 printf("%s: MDIO_CONTROL2: %llx != %llx\n",
1304 XNAME, val, 0x1804001c0f001cULL);
1305 return 1;
1306 }
1307 #endif
1308 return 0;
1309 }
1310