if_scx.c revision 1.20 1 /* $NetBSD: if_scx.c,v 1.20 2020/03/27 09:19:33 nisimura Exp $ */
2
3 /*-
4 * Copyright (c) 2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Tohru Nishimura.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #define NOT_MP_SAFE 0
33
34 /*
35 * Socionext SC2A11 SynQuacer NetSec GbE driver
36 *
37 * (possibly incorrect notes to be removed eventually)
38 * - 32 byte descriptor for 64 bit paddr design.
39 * - multiple rings seems available. There are special descriptor fields
40 * to designify ring number from which to arrive or to which go.
41 * - memory mapped EEPROM to hold MAC address. The rest of the area is
42 * occupied by a set of ucode for two DMA engines and one packet engine.
43 * - The size of frame address filter is 16 plus 16.
44 * - The first slot is my own station address. Always enabled to perform
45 * to identify oneself.
46 * - 1~15 are for supplimental MAC addresses. Independently enabled for
47 * use. Good to catch multicast. Byte-wise selective match available.
48 * Use the mask to catch { 0x01, 0x00, 0x00 } and/or { 0x33, 0x33 }.
49 * - 16~32 might be exact match without byte-mask.
50 * - The size of multicast hash filter store is 64 bit.
51 * - Socionext/Linaro "NetSec" code contains some constants left unexplained.
52 * Fortunately, Intel/Altera CycloneV PDFs describe every detail of
53 * "such the instance of" DW EMAC IP and most of them are likely applicable
54 * to SC2A11 GbE.
55 * - not known "NetSec" instanciates DW timestamp or builds its own.
56 * - DW EMAC implmentation (0x20) is known 0x10.36
57 */
58
59 #include <sys/cdefs.h>
60 __KERNEL_RCSID(0, "$NetBSD: if_scx.c,v 1.20 2020/03/27 09:19:33 nisimura Exp $");
61
62 #include <sys/param.h>
63 #include <sys/bus.h>
64 #include <sys/intr.h>
65 #include <sys/device.h>
66 #include <sys/callout.h>
67 #include <sys/mbuf.h>
68 #include <sys/malloc.h>
69 #include <sys/errno.h>
70 #include <sys/rndsource.h>
71 #include <sys/kernel.h>
72 #include <sys/systm.h>
73
74 #include <net/if.h>
75 #include <net/if_media.h>
76 #include <net/if_dl.h>
77 #include <net/if_ether.h>
78 #include <dev/mii/mii.h>
79 #include <dev/mii/miivar.h>
80 #include <net/bpf.h>
81
82 #include <dev/fdt/fdtvar.h>
83 #include <dev/acpi/acpireg.h>
84 #include <dev/acpi/acpivar.h>
85 #include <dev/acpi/acpi_intr.h>
86
87 /*
88 * SC2A11 register block 0x100-0x1204?
89 */
90 #define SWRESET 0x104
91 #define COMINIT 0x120
92 #define xINTSR 0x200 /* aggregated interrupt status report */
93 #define IRQ_RX (1U<<1) /* top level Rx interrupt */
94 #define IRQ_TX (1U<<0) /* top level Rx interrupt */
95 #define xINTAEN 0x204 /* INT_A enable */
96 #define xINTA_SET 0x234 /* bit to set */
97 #define xINTA_CLR 0x238 /* bit to clr */
98 #define xINTBEN 0x23c /* INT_B enable */
99 #define xINTB_SET 0x240 /* bit to set */
100 #define xINTB_CLR 0x244 /* bit to clr */
101 /* 0x00c-048 */ /* pkt,tls,s0,s1 SR/IE/SET/CLR */
102 #define TXISR 0x400
103 #define TXIEN 0x404
104 #define TXI_SET 0x428
105 #define TXI_CLR 0x42c
106 #define TXI_NTOWNR (1U<<17)
107 #define TXI_TR_ERR (1U<<16)
108 #define TXI_TXDONE (1U<<15)
109 #define TXI_TMREXP (1U<<14)
110 #define RXISR 0x440
111 #define RXIEN 0x444
112 #define RXI_SET 0x468
113 #define RXI_CLR 0x46c
114 #define RXI_RC_ERR (1U<<16)
115 #define RXI_PKTCNT (1U<<15)
116 #define RXI_TMREXP (1U<<14)
117 #define TXTIMER 0x41c
118 #define RXTIMER 0x45c
119 #define TXCOUNT 0x410
120 #define RXCOUNT 0x454
121 #define H2MENG 0x210 /* DMAC host2media ucode port */
122 #define M2HENG 0x21c /* DMAC media2host ucode port */
123 #define PKTENG 0x0d0 /* packet engine ucode port */
124 #define CLKEN 0x100 /* clock distribution enable */
125 #define CLK_G (1U<<5)
126 #define CLK_ALL 0x24
127 #define MACADRH 0x10c /* ??? */
128 #define MACADRL 0x110 /* ??? */
129 #define MCVER 0x22c /* micro controller version */
130 #define HWVER 0x230 /* hardware version */
131
132 /* 0x800 */ /* dec Tx SR/EN/SET/CLR */
133 /* 0x840 */ /* enc Rx SR/EN/SET/CLR */
134 /* 0x880 */ /* enc TLS Tx SR/IE/SET/CLR */
135 /* 0x8c0 */ /* dec TLS Tx SR/IE/SET/CLR */
136 /* 0x900 */ /* enc TLS Rx SR/IE/SET/CLR */
137 /* 0x940 */ /* dec TLS Rx SR/IE/SET/CLR */
138 /* 0x980 */ /* enc RAW Tx SR/IE/SET/CLR */
139 /* 0x9c0 */ /* dec RAW Tx SR/IE/SET/CLR */
140 /* 0xA00 */ /* enc RAW Rx SR/IE/SET/CLR */
141 /* 0xA40 */ /* dec RAW Rx SR/IE/SET/CLR */
142
143 #define MACCMD 0x11c4 /* gmac operation */
144 #define CMD_IOWR (1U<<28) /* write op */
145 #define CMD_BUSY (1U<<31) /* busy bit */
146 #define MACSTAT 0x1024 /* gmac status */
147 #define MACDATA 0x11c0 /* gmac rd/wr data */
148 #define MACINTE 0x1028 /* interrupt enable */
149 #define DESC_INIT 0x11fc /* desc engine init */
150 #define DESC_SRST 0x1204 /* desc engine sw reset */
151
152 /*
153 * GMAC register block. use mac_write()/mac_read() to handle
154 */
155 #define GMACMCR 0x0000 /* MAC configuration */
156 #define MCR_IBN (1U<<30) /* ??? */
157 #define MCR_CST (1U<<25) /* strip CRC */
158 #define MCR_TC (1U<<24) /* keep RGMII PHY notified */
159 #define MCR_JE (1U<<20) /* ignore oversized >9018 condition */
160 #define MCR_IFG (7U<<17) /* 19:17 IFG value 0~7 */
161 #define MCR_DRCS (1U<<16) /* ignore (G)MII HDX Tx error */
162 #define MCR_USEMII (1U<<15) /* 1: RMII/MII, 0: RGMII (_PS) */
163 #define MCR_SPD100 (1U<<14) /* force speed 100 (_FES) */
164 #define MCR_DO (1U<<13) /* */
165 #define MCR_LOOP (1U<<12) /* */
166 #define MCR_USEFDX (1U<<11) /* force full duplex */
167 #define MCR_IPCEN (1U<<10) /* handle checksum */
168 #define MCR_ACS (1U<<7) /* auto pad strip CRC */
169 #define MCR_TE (1U<<3) /* run Tx MAC engine, 0 to stop */
170 #define MCR_RE (1U<<2) /* run Rx MAC engine, 0 to stop */
171 #define MCR_PREA (3U) /* 1:0 preamble len. 0~2 */
172 #define _MCR_FDX 0x0000280c /* XXX TBD */
173 #define _MCR_HDX 0x0001a00c /* XXX TBD */
174 #define GMACAFR 0x0004 /* frame DA/SA address filter */
175 #define AFR_RA (1U<<31) /* accept all irrecspective of filt. */
176 #define AFR_HPF (1U<<10) /* hash+perfect filter, or hash only */
177 #define AFR_SAF (1U<<9) /* source address filter */
178 #define AFR_SAIF (1U<<8) /* SA inverse filtering */
179 #define AFR_PCF (2U<<6) /* */
180 #define AFR_DBF (1U<<5) /* reject broadcast frame */
181 #define AFR_PM (1U<<4) /* accept all multicast frame */
182 #define AFR_DAIF (1U<<3) /* DA inverse filtering */
183 #define AFR_MHTE (1U<<2) /* use multicast hash table */
184 #define AFR_UHTE (1U<<1) /* use hash table for unicast */
185 #define AFR_PR (1U<<0) /* run promisc mode */
186 #define GMACMHTH 0x0008 /* 64bit multicast hash table 63:32 */
187 #define GMACMHTL 0x000c /* 64bit multicast hash table 31:0 */
188 #define GMACGAR 0x0010 /* MDIO operation */
189 #define GAR_PHY (11) /* mii phy 15:11 */
190 #define GAR_REG (6) /* mii reg 10:6 */
191 #define GAR_CTL (2) /* control 5:2 */
192 #define GAR_IOWR (1U<<1) /* MDIO write op */
193 #define GAR_BUSY (1U) /* busy bit */
194 #define GMACGDR 0x0014 /* MDIO rd/wr data */
195 #define GMACFCR 0x0018 /* 802.3x flowcontrol */
196 /* 31:16 pause timer value */
197 /* 5:4 pause timer threthold */
198 #define FCR_RFE (1U<<2) /* accept PAUSE to throttle Tx */
199 #define FCR_TFE (1U<<1) /* generate PAUSE to moderate Rx lvl */
200 #define GMACVTAG 0x001c /* VLAN tag control */
201 #define GMACIMPL 0x0020 /* implementation number XX.YY */
202 #define GMACLPIS 0x0030 /* AXI LPI control */
203 #define GMACLPIC 0x0034 /* AXI LPI control */
204 #define GMACISR 0x0038 /* interrupt status, clear when read */
205 #define GMACIMR 0x003c /* interrupt enable */
206 #define ISR_TS (1U<<9) /* time stamp operation detected */
207 #define ISR_CO (1U<<7) /* Rx checksum offload completed */
208 #define ISR_TX (1U<<6) /* Tx completed */
209 #define ISR_RX (1U<<5) /* Rx completed */
210 #define ISR_ANY (1U<<4) /* any of above 5-7 report */
211 #define ISR_LC (1U<<0) /* link status change detected */
212 #define GMACMAH0 0x0040 /* MAC address 0 47:32 */
213 #define GMACMAL0 0x0044 /* MAC address 0 31:0 */
214 #define GMACMAH(i) ((i)*8+0x40) /* supplimental MAC addr 1 - 15 */
215 #define GMACMAL(i) ((i)*8+0x44) /* bit 31 to use, 30 SA,
216 * 29:24 byte-wise don'care */
217 #define GMACMIISR 0x00d8 /* resolved xMII link status */
218 /* 3 link up detected
219 * 2:1 resovled speed
220 * 0 2.5Mhz (10Mbps)
221 * 1 25Mhz (100Mbps)
222 * 2 125Mhz (1000Mbps)
223 * 1 full duplex detected */
224 #define GMACEVCTL 0x0100 /* event counter control */
225 #define GMACEVCNT(i) ((i)*4+0x114) /* event counter 0x114~284 */
226
227 #define GMACMHT0 0x0500 /* 256bit multicast hash table 0 - 7 */
228 #define GMACMHT(i) ((i)*4+0x500)
229 #define GMACVHT 0x0588 /* VLAN tag hash */
230
231 /* 0x0700-0734 ??? */
232 #define GMACAMAH(i) ((i)*8+0x800) /* supplimental MAC addr 16-31 */
233 #define GMACAMAL(i) ((i)*8+0x804) /* bit 31 to use */
234
235 #define GMACBMR 0x1000 /* DMA bus mode control
236 * 24 4PBL 8???
237 * 23 USP
238 * 22:17 RPBL
239 * 16 fixed burst, or undefined b.
240 * 15:14 priority between Rx and Tx
241 * 3 rxtx ratio 41
242 * 2 rxtx ratio 31
243 * 1 rxtx ratio 21
244 * 0 rxtx ratio 11
245 * 13:8 PBL possible DMA burst len
246 * 7 alternative des8
247 * 0 reset op. self clear
248 */
249 #define _BMR 0x00412080 /* XXX TBD */
250 #define _BMR0 0x00020181 /* XXX TBD */
251 #define BMR_RST (1) /* reset op. self clear when done */
252 #define GMACTPD 0x1004 /* write any to resume tdes */
253 #define GMACRPD 0x1008 /* write any to resume rdes */
254 #define GMACRDLA 0x100c /* rdes base address 32bit paddr */
255 #define GMACTDLA 0x1010 /* tdes base address 32bit paddr */
256 #define _RDLA 0x18000 /* XXX TBD system SRAM with CC ? */
257 #define _TDLA 0x1c000 /* XXX TBD system SRAM with CC ? */
258 #define GMACDSR 0x1014 /* DMA status detail report; W1C */
259 #define GMACOMR 0x1018 /* DMA operation */
260 #define OMR_TSF (1U<<25) /* 1: Tx store&forword, 0: immed. */
261 #define OMR_RSF (1U<<21) /* 1: Rx store&forward, 0: immed. */
262 #define OMR_ST (1U<<13) /* run Tx DMA engine, 0 to stop */
263 #define OMR_EFC (1U<<8) /* transmit PAUSE to throttle Rx lvl. */
264 #define OMR_FEF (1U<<7) /* allow to receive error frames */
265 #define OMR_RS (1U<<1) /* run Rx DMA engine, 0 to stop */
266 #define GMACIE 0x101c /* interrupt enable */
267 #define GMACEVCS 0x1020 /* missed frame or ovf detected */
268 #define GMACRWDT 0x1024 /* receive watchdog timer count */
269 #define GMACAXIB 0x1028 /* AXI bus mode control */
270 #define GMACAXIS 0x102c /* AXI status report */
271 /* 0x1048-1054 */ /* descriptor and buffer cur. address */
272 #define HWFEA 0x1058 /* feature report */
273
274 /* descriptor format definition */
275 struct tdes {
276 uint32_t t0, t1, t2, t3;
277 };
278
279 struct rdes {
280 uint32_t r0, r1, r2, r3;
281 };
282
283 #define T0_OWN (1U<<31) /* desc is ready to Tx */
284 #define T0_EOD (1U<<30) /* end of descriptor array */
285 #define T0_DRID (24) /* 29:24 D-RID */
286 #define T0_PT (1U<<21) /* 23:21 PT */
287 #define T0_TRID (16) /* 20:16 T-RID */
288 #define T0_FS (1U<<9) /* first segment of frame */
289 #define T0_LS (1U<<8) /* last segment of frame */
290 #define T0_CSUM (1U<<7) /* enable check sum offload */
291 #define T0_SGOL (1U<<6) /* enable TCP segment offload */
292 #define T0_TRS (1U<<4) /* 5:4 TRS */
293 #define T0_IOC (0) /* XXX TBD interrupt when completed */
294 /* T1 segment address 63:32 */
295 /* T2 segment address 31:0 */
296 /* T3 31:16 TCP segment length, 15:0 segment length to transmit */
297 #define R0_OWN (1U<<31) /* desc is empty */
298 #define R0_EOD (1U<<30) /* end of descriptor array */
299 #define R0_SRID (24) /* 29:24 S-RID */
300 #define R0_FR (1U<<23) /* FR */
301 #define R0_ER (1U<<21) /* Rx error indication */
302 #define R0_ERR (3U<<16) /* 18:16 receive error code */
303 #define R0_TDRID (14) /* 15:14 TD-RID */
304 #define R0_FS (1U<<9) /* first segment of frame */
305 #define R0_LS (1U<<8) /* last segment of frame */
306 #define R0_CSUM (3U<<6) /* 7:6 checksum status */
307 #define R0_CERR (2U<<6) /* 0 (undone), 1 (found ok), 2 (bad) */
308 /* R1 frame address 63:32 */
309 /* R2 frame address 31:0 */
310 /* R3 31:16 received frame length, 15:0 buffer length to receive */
311
312 /*
313 * software constraction
314 */
315 #define MD_NTXSEGS 16 /* fixed */
316 #define MD_TXQUEUELEN 16 /* tunable */
317 #define MD_TXQUEUELEN_MASK (MD_TXQUEUELEN - 1)
318 #define MD_TXQUEUE_GC (MD_TXQUEUELEN / 4)
319 #define MD_NTXDESC (MD_TXQUEUELEN * MD_NTXSEGS)
320 #define MD_NTXDESC_MASK (MD_NTXDESC - 1)
321 #define MD_NEXTTX(x) (((x) + 1) & MD_NTXDESC_MASK)
322 #define MD_NEXTTXS(x) (((x) + 1) & MD_TXQUEUELEN_MASK)
323
324 #define MD_NRXDESC 64 /* tunable */
325 #define MD_NRXDESC_MASK (MD_NRXDESC - 1)
326 #define MD_NEXTRX(x) (((x) + 1) & MD_NRXDESC_MASK)
327
328 #define SCX_INIT_RXDESC(sc, x) \
329 do { \
330 struct scx_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
331 struct rdes *__rxd = &(sc)->sc_rxdescs[(x)]; \
332 struct mbuf *__m = __rxs->rxs_mbuf; \
333 bus_addr_t __paddr =__rxs->rxs_dmamap->dm_segs[0].ds_addr; \
334 __m->m_data = __m->m_ext.ext_buf; \
335 __rxd->r3 = __rxs->rxs_dmamap->dm_segs[0].ds_len; \
336 __rxd->r2 = htole32(BUS_ADDR_LO32(__paddr)); \
337 __rxd->r1 = htole32(BUS_ADDR_HI32(__paddr)); \
338 __rxd->r0 = R0_OWN | R0_FS | R0_LS; \
339 if ((x) == MD_NRXDESC - 1) __rxd->r0 |= R0_EOD; \
340 } while (/*CONSTCOND*/0)
341
342 struct control_data {
343 struct tdes cd_txdescs[MD_NTXDESC];
344 struct rdes cd_rxdescs[MD_NRXDESC];
345 };
346 #define SCX_CDOFF(x) offsetof(struct control_data, x)
347 #define SCX_CDTXOFF(x) SCX_CDOFF(cd_txdescs[(x)])
348 #define SCX_CDRXOFF(x) SCX_CDOFF(cd_rxdescs[(x)])
349
350 struct scx_txsoft {
351 struct mbuf *txs_mbuf; /* head of our mbuf chain */
352 bus_dmamap_t txs_dmamap; /* our DMA map */
353 int txs_firstdesc; /* first descriptor in packet */
354 int txs_lastdesc; /* last descriptor in packet */
355 int txs_ndesc; /* # of descriptors used */
356 };
357
358 struct scx_rxsoft {
359 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
360 bus_dmamap_t rxs_dmamap; /* our DMA map */
361 };
362
363 struct scx_softc {
364 device_t sc_dev; /* generic device information */
365 bus_space_tag_t sc_st; /* bus space tag */
366 bus_space_handle_t sc_sh; /* bus space handle */
367 bus_size_t sc_sz; /* csr map size */
368 bus_space_handle_t sc_eesh; /* eeprom section handle */
369 bus_size_t sc_eesz; /* eeprom map size */
370 bus_dma_tag_t sc_dmat; /* bus DMA tag */
371 bus_dma_tag_t sc_dmat32;
372 struct ethercom sc_ethercom; /* Ethernet common data */
373 struct mii_data sc_mii; /* MII */
374 callout_t sc_tick_ch; /* PHY monitor callout */
375 bus_dma_segment_t sc_seg; /* descriptor store seg */
376 int sc_nseg; /* descriptor store nseg */
377 void *sc_ih; /* interrupt cookie */
378 int sc_phy_id; /* PHY address */
379 int sc_flowflags; /* 802.3x PAUSE flow control */
380 uint32_t sc_mdclk; /* GAR 5:2 clock selection */
381 uint32_t sc_t0coso; /* T0_CSUM | T0_SGOL to run */
382 int sc_ucodeloaded; /* ucode for H2M/M2H/PKT */
383 int sc_100mii; /* 1 for RMII/MII, 0 for RGMII */
384 int sc_phandle; /* fdt phandle */
385 uint64_t sc_freq;
386
387 bus_dmamap_t sc_cddmamap; /* control data DMA map */
388 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
389
390 struct control_data *sc_control_data;
391 #define sc_txdescs sc_control_data->cd_txdescs
392 #define sc_rxdescs sc_control_data->cd_rxdescs
393
394 struct scx_txsoft sc_txsoft[MD_TXQUEUELEN];
395 struct scx_rxsoft sc_rxsoft[MD_NRXDESC];
396 int sc_txfree; /* number of free Tx descriptors */
397 int sc_txnext; /* next ready Tx descriptor */
398 int sc_txsfree; /* number of free Tx jobs */
399 int sc_txsnext; /* next ready Tx job */
400 int sc_txsdirty; /* dirty Tx jobs */
401 int sc_rxptr; /* next ready Rx descriptor/descsoft */
402
403 krndsource_t rnd_source; /* random source */
404 };
405
406 #define SCX_CDTXADDR(sc, x) ((sc)->sc_cddma + SCX_CDTXOFF((x)))
407 #define SCX_CDRXADDR(sc, x) ((sc)->sc_cddma + SCX_CDRXOFF((x)))
408
409 #define SCX_CDTXSYNC(sc, x, n, ops) \
410 do { \
411 int __x, __n; \
412 \
413 __x = (x); \
414 __n = (n); \
415 \
416 /* If it will wrap around, sync to the end of the ring. */ \
417 if ((__x + __n) > MD_NTXDESC) { \
418 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
419 SCX_CDTXOFF(__x), sizeof(struct tdes) * \
420 (MD_NTXDESC - __x), (ops)); \
421 __n -= (MD_NTXDESC - __x); \
422 __x = 0; \
423 } \
424 \
425 /* Now sync whatever is left. */ \
426 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
427 SCX_CDTXOFF(__x), sizeof(struct tdes) * __n, (ops)); \
428 } while (/*CONSTCOND*/0)
429
430 #define SCX_CDRXSYNC(sc, x, ops) \
431 do { \
432 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
433 SCX_CDRXOFF((x)), sizeof(struct rdes), (ops)); \
434 } while (/*CONSTCOND*/0)
435
436 static int scx_fdt_match(device_t, cfdata_t, void *);
437 static void scx_fdt_attach(device_t, device_t, void *);
438 static int scx_acpi_match(device_t, cfdata_t, void *);
439 static void scx_acpi_attach(device_t, device_t, void *);
440
441 CFATTACH_DECL_NEW(scx_fdt, sizeof(struct scx_softc),
442 scx_fdt_match, scx_fdt_attach, NULL, NULL);
443
444 CFATTACH_DECL_NEW(scx_acpi, sizeof(struct scx_softc),
445 scx_acpi_match, scx_acpi_attach, NULL, NULL);
446
447 static void scx_attach_i(struct scx_softc *);
448 static void scx_reset(struct scx_softc *);
449 static int scx_init(struct ifnet *);
450 static void scx_start(struct ifnet *);
451 static void scx_stop(struct ifnet *, int);
452 static void scx_watchdog(struct ifnet *);
453 static int scx_ioctl(struct ifnet *, u_long, void *);
454 static void scx_set_rcvfilt(struct scx_softc *);
455 static int scx_ifmedia_upd(struct ifnet *);
456 static void scx_ifmedia_sts(struct ifnet *, struct ifmediareq *);
457 static void mii_statchg(struct ifnet *);
458 static void phy_tick(void *);
459 static int mii_readreg(device_t, int, int, uint16_t *);
460 static int mii_writereg(device_t, int, int, uint16_t);
461 static int scx_intr(void *);
462 static void txreap(struct scx_softc *);
463 static void rxintr(struct scx_softc *);
464 static int add_rxbuf(struct scx_softc *, int);
465
466 static int spin_waitfor(struct scx_softc *, int, int);
467 static int mac_read(struct scx_softc *, int);
468 static void mac_write(struct scx_softc *, int, int);
469 static void loaducode(struct scx_softc *);
470 static void injectucode(struct scx_softc *, int, bus_addr_t, bus_size_t);
471 static int get_mdioclk(uint32_t);
472
473 #define CSR_READ(sc,off) \
474 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (off))
475 #define CSR_WRITE(sc,off,val) \
476 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (off), (val))
477 #define EE_READ(sc,off) \
478 bus_space_read_4((sc)->sc_st, (sc)->sc_eesh, (off))
479
480 static int
481 scx_fdt_match(device_t parent, cfdata_t cf, void *aux)
482 {
483 static const char * compatible[] = {
484 "socionext,synquacer-netsec",
485 NULL
486 };
487 struct fdt_attach_args * const faa = aux;
488
489 return of_match_compatible(faa->faa_phandle, compatible);
490 }
491
492 static void
493 scx_fdt_attach(device_t parent, device_t self, void *aux)
494 {
495 struct scx_softc * const sc = device_private(self);
496 struct fdt_attach_args * const faa = aux;
497 const int phandle = faa->faa_phandle;
498 bus_space_tag_t bst = faa->faa_bst;
499 bus_space_handle_t bsh;
500 bus_space_handle_t eebsh;
501 bus_addr_t addr[2];
502 bus_size_t size[2];
503 char intrstr[128];
504 const char *phy_mode;
505
506 if (fdtbus_get_reg(phandle, 0, addr+0, size+0) != 0
507 || bus_space_map(faa->faa_bst, addr[0], size[0], 0, &bsh) != 0) {
508 aprint_error(": unable to map device csr\n");
509 return;
510 }
511 if (!fdtbus_intr_str(phandle, 0, intrstr, sizeof(intrstr))) {
512 aprint_error(": failed to decode interrupt\n");
513 goto fail;
514 }
515 sc->sc_ih = fdtbus_intr_establish(phandle, 0, IPL_NET,
516 NOT_MP_SAFE, scx_intr, sc);
517 if (sc->sc_ih == NULL) {
518 aprint_error_dev(self, "couldn't establish interrupt\n");
519 goto fail;
520 }
521 if (fdtbus_get_reg(phandle, 1, addr+1, size+1) != 0
522 || bus_space_map(faa->faa_bst, addr[1], size[1], 0, &eebsh) != 0) {
523 aprint_error(": unable to map device eeprom\n");
524 goto fail;
525 }
526
527 aprint_naive("\n");
528 /* aprint_normal(": Gigabit Ethernet Controller\n"); */
529 aprint_normal_dev(self, "interrupt on %s\n", intrstr);
530
531 sc->sc_dev = self;
532 sc->sc_st = bst;
533 sc->sc_sh = bsh;
534 sc->sc_sz = size[0];
535 sc->sc_eesh = eebsh;
536 sc->sc_eesz = size[1];
537 sc->sc_dmat = faa->faa_dmat;
538 sc->sc_dmat32 = faa->faa_dmat; /* XXX */
539 sc->sc_phandle = phandle;
540
541 phy_mode = fdtbus_get_string(phandle, "phy-mode");
542 if (phy_mode == NULL)
543 aprint_error(": missing 'phy-mode' property\n");
544 sc->sc_100mii = (phy_mode && strcmp(phy_mode, "rgmii") != 0);
545 sc->sc_phy_id = 7; /* XXX */
546 sc->sc_freq = 125 * 1000 * 1000; /* XXX */
547 aprint_normal_dev(self,
548 "phy mode %s, phy id %d, freq %ld\n", phy_mode, sc->sc_phy_id, sc->sc_freq);
549
550 scx_attach_i(sc);
551 return;
552 fail:
553 if (sc->sc_eesz)
554 bus_space_unmap(sc->sc_st, sc->sc_eesh, sc->sc_eesz);
555 if (sc->sc_sz)
556 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_sz);
557 return;
558 }
559
560 static int
561 scx_acpi_match(device_t parent, cfdata_t cf, void *aux)
562 {
563 static const char * compatible[] = {
564 "SCX0001",
565 NULL
566 };
567 struct acpi_attach_args *aa = aux;
568
569 if (aa->aa_node->ad_type != ACPI_TYPE_DEVICE)
570 return 0;
571 return acpi_match_hid(aa->aa_node->ad_devinfo, compatible);
572 }
573
574 static void
575 scx_acpi_attach(device_t parent, device_t self, void *aux)
576 {
577 struct scx_softc * const sc = device_private(self);
578 struct acpi_attach_args * const aa = aux;
579 ACPI_HANDLE handle = aa->aa_node->ad_handle;
580 bus_space_tag_t bst = aa->aa_memt;
581 bus_space_handle_t bsh, eebsh;
582 struct acpi_resources res;
583 struct acpi_mem *mem;
584 struct acpi_irq *irq;
585 char *phy_mode;
586 ACPI_INTEGER acpi_phy, acpi_freq;
587 ACPI_STATUS rv;
588
589 rv = acpi_resource_parse(self, handle, "_CRS",
590 &res, &acpi_resource_parse_ops_default);
591 if (ACPI_FAILURE(rv))
592 return;
593 mem = acpi_res_mem(&res, 0);
594 irq = acpi_res_irq(&res, 0);
595 if (mem == NULL || irq == NULL || mem->ar_length == 0) {
596 aprint_error(": incomplete csr resources\n");
597 return;
598 }
599 if (bus_space_map(bst, mem->ar_base, mem->ar_length, 0, &bsh) != 0) {
600 aprint_error(": couldn't map registers\n");
601 return;
602 }
603 sc->sc_sz = mem->ar_length;
604 sc->sc_ih = acpi_intr_establish(self, (uint64_t)handle, IPL_NET,
605 NOT_MP_SAFE, scx_intr, sc, device_xname(self));
606 if (sc->sc_ih == NULL) {
607 aprint_error_dev(self, "couldn't establish interrupt\n");
608 goto fail;
609 }
610 mem = acpi_res_mem(&res, 1); /* EEPROM for MAC address and ucode */
611 if (mem == NULL || mem->ar_length == 0) {
612 aprint_error(": incomplete eeprom resources\n");
613 goto fail;
614 }
615 if (bus_space_map(bst, mem->ar_base, mem->ar_length, 0, &eebsh) != 0) {
616 aprint_error(": couldn't map registers\n");
617 goto fail;
618 }
619 sc->sc_eesz = mem->ar_length;
620
621 rv = acpi_dsd_string(handle, "phy-mode", &phy_mode);
622 if (ACPI_FAILURE(rv)) {
623 aprint_error(": missing 'phy-mode' property\n");
624 phy_mode = NULL;
625 }
626 rv = acpi_dsd_integer(handle, "phy-channel", &acpi_phy);
627 if (ACPI_FAILURE(rv))
628 acpi_phy = 31;
629 rv = acpi_dsd_integer(handle, "socionext,phy-clock-frequency",
630 &acpi_freq);
631 if (ACPI_FAILURE(rv))
632 acpi_freq = 999;
633
634 aprint_naive("\n");
635 /* aprint_normal(": Gigabit Ethernet Controller\n"); */
636
637 sc->sc_dev = self;
638 sc->sc_st = bst;
639 sc->sc_sh = bsh;
640 sc->sc_eesh = eebsh;
641 sc->sc_dmat = aa->aa_dmat64;
642 sc->sc_dmat32 = aa->aa_dmat; /* descriptor needs dma32 */
643
644 aprint_normal_dev(self,
645 "phy mode %s, phy id %d, freq %ld\n", phy_mode, (int)acpi_phy, acpi_freq);
646 sc->sc_100mii = (phy_mode && strcmp(phy_mode, "rgmii") != 0);
647 sc->sc_phy_id = (int)acpi_phy;
648 sc->sc_freq = acpi_freq;
649 aprint_normal_dev(self,
650 "GMACGAR %08x\n", mac_read(sc, GMACGAR));
651
652 scx_attach_i(sc);
653
654 acpi_resource_cleanup(&res);
655 return;
656 fail:
657 if (sc->sc_eesz > 0)
658 bus_space_unmap(sc->sc_st, sc->sc_eesh, sc->sc_eesz);
659 if (sc->sc_sz > 0)
660 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_sz);
661 acpi_resource_cleanup(&res);
662 return;
663 }
664
665 static void
666 scx_attach_i(struct scx_softc *sc)
667 {
668 struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
669 struct mii_data * const mii = &sc->sc_mii;
670 struct ifmedia * const ifm = &mii->mii_media;
671 uint32_t hwver, dwimp;
672 uint8_t enaddr[ETHER_ADDR_LEN];
673 bus_dma_segment_t seg;
674 uint32_t csr;
675 int i, nseg, error = 0;
676
677 hwver = CSR_READ(sc, HWVER); /* Socionext HW */
678 /* stored in big endian order */
679 csr = bus_space_read_4(sc->sc_st, sc->sc_eesh, 0);
680 enaddr[0] = csr >> 24;
681 enaddr[1] = csr >> 16;
682 enaddr[2] = csr >> 8;
683 enaddr[3] = csr;
684 csr = bus_space_read_4(sc->sc_st, sc->sc_eesh, 4);
685 enaddr[4] = csr >> 24;
686 enaddr[5] = csr >> 16;
687 dwimp = mac_read(sc, GMACIMPL); /* DW EMAC XX.YY */
688
689 aprint_normal_dev(sc->sc_dev,
690 "Socionext NetSec GbE hw %d.%d impl 0x%x\n",
691 hwver >> 16, hwver & 0xffff, dwimp);
692 aprint_normal_dev(sc->sc_dev,
693 "Ethernet address %s\n", ether_sprintf(enaddr));
694
695 sc->sc_phy_id = MII_PHY_ANY;
696 sc->sc_mdclk = get_mdioclk(sc->sc_freq); /* 5:2 clk control */
697 sc->sc_mdclk = 5; /* XXX */
698 aprint_normal_dev(sc->sc_dev, "using %d for mdclk\n", sc->sc_mdclk);
699 sc->sc_mdclk <<= 2;
700
701 sc->sc_flowflags = 0;
702
703 if (sc->sc_ucodeloaded == 0)
704 loaducode(sc);
705
706 mii->mii_ifp = ifp;
707 mii->mii_readreg = mii_readreg;
708 mii->mii_writereg = mii_writereg;
709 mii->mii_statchg = mii_statchg;
710
711 sc->sc_ethercom.ec_mii = mii;
712 ifmedia_init(ifm, 0, scx_ifmedia_upd, scx_ifmedia_sts);
713 mii_attach(sc->sc_dev, mii, 0xffffffff, sc->sc_phy_id,
714 MII_OFFSET_ANY, MIIF_DOPAUSE);
715 if (LIST_FIRST(&mii->mii_phys) == NULL) {
716 ifmedia_add(ifm, IFM_ETHER | IFM_NONE, 0, NULL);
717 ifmedia_set(ifm, IFM_ETHER | IFM_NONE);
718 } else
719 ifmedia_set(ifm, IFM_ETHER | IFM_AUTO);
720 ifm->ifm_media = ifm->ifm_cur->ifm_media; /* as if user has requested */
721
722 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
723 ifp->if_softc = sc;
724 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
725 ifp->if_ioctl = scx_ioctl;
726 ifp->if_start = scx_start;
727 ifp->if_watchdog = scx_watchdog;
728 ifp->if_init = scx_init;
729 ifp->if_stop = scx_stop;
730 IFQ_SET_READY(&ifp->if_snd);
731
732 if_attach(ifp);
733 if_deferred_start_init(ifp, NULL);
734 ether_ifattach(ifp, enaddr);
735
736 callout_init(&sc->sc_tick_ch, 0);
737 callout_setfunc(&sc->sc_tick_ch, phy_tick, sc);
738
739 /*
740 * Allocate the control data structures, and create and load the
741 * DMA map for it.
742 */
743 error = bus_dmamem_alloc(sc->sc_dmat32,
744 sizeof(struct control_data), PAGE_SIZE, 0, &seg, 1, &nseg, 0);
745 if (error != 0) {
746 aprint_error_dev(sc->sc_dev,
747 "unable to allocate control data, error = %d\n", error);
748 goto fail_0;
749 }
750 error = bus_dmamem_map(sc->sc_dmat32, &seg, nseg,
751 sizeof(struct control_data), (void **)&sc->sc_control_data,
752 BUS_DMA_COHERENT);
753 if (error != 0) {
754 aprint_error_dev(sc->sc_dev,
755 "unable to map control data, error = %d\n", error);
756 goto fail_1;
757 }
758 error = bus_dmamap_create(sc->sc_dmat32,
759 sizeof(struct control_data), 1,
760 sizeof(struct control_data), 0, 0, &sc->sc_cddmamap);
761 if (error != 0) {
762 aprint_error_dev(sc->sc_dev,
763 "unable to create control data DMA map, "
764 "error = %d\n", error);
765 goto fail_2;
766 }
767 error = bus_dmamap_load(sc->sc_dmat32, sc->sc_cddmamap,
768 sc->sc_control_data, sizeof(struct control_data), NULL, 0);
769 if (error != 0) {
770 aprint_error_dev(sc->sc_dev,
771 "unable to load control data DMA map, error = %d\n",
772 error);
773 goto fail_3;
774 }
775 for (i = 0; i < MD_TXQUEUELEN; i++) {
776 if ((error = bus_dmamap_create(sc->sc_dmat32, MCLBYTES,
777 MD_NTXSEGS, MCLBYTES, 0, 0,
778 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
779 aprint_error_dev(sc->sc_dev,
780 "unable to create tx DMA map %d, error = %d\n",
781 i, error);
782 goto fail_4;
783 }
784 }
785 for (i = 0; i < MD_NRXDESC; i++) {
786 if ((error = bus_dmamap_create(sc->sc_dmat32, MCLBYTES,
787 1, MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
788 aprint_error_dev(sc->sc_dev,
789 "unable to create rx DMA map %d, error = %d\n",
790 i, error);
791 goto fail_5;
792 }
793 sc->sc_rxsoft[i].rxs_mbuf = NULL;
794 }
795 sc->sc_seg = seg;
796 sc->sc_nseg = nseg;
797 aprint_normal_dev(sc->sc_dev, "descriptor ds_addr %lx, ds_len %lx, nseg %d\n", seg.ds_addr, seg.ds_len, nseg);
798
799 if (pmf_device_register(sc->sc_dev, NULL, NULL))
800 pmf_class_network_register(sc->sc_dev, ifp);
801 else
802 aprint_error_dev(sc->sc_dev,
803 "couldn't establish power handler\n");
804
805 rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
806 RND_TYPE_NET, RND_FLAG_DEFAULT);
807
808 return;
809
810 fail_5:
811 for (i = 0; i < MD_NRXDESC; i++) {
812 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
813 bus_dmamap_destroy(sc->sc_dmat,
814 sc->sc_rxsoft[i].rxs_dmamap);
815 }
816 fail_4:
817 for (i = 0; i < MD_TXQUEUELEN; i++) {
818 if (sc->sc_txsoft[i].txs_dmamap != NULL)
819 bus_dmamap_destroy(sc->sc_dmat,
820 sc->sc_txsoft[i].txs_dmamap);
821 }
822 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
823 fail_3:
824 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
825 fail_2:
826 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
827 sizeof(struct control_data));
828 fail_1:
829 bus_dmamem_free(sc->sc_dmat, &seg, nseg);
830 fail_0:
831 if (sc->sc_phandle)
832 fdtbus_intr_disestablish(sc->sc_phandle, sc->sc_ih);
833 else
834 acpi_intr_disestablish(sc->sc_ih);
835 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_sz);
836 return;
837 }
838
839 static void
840 scx_reset(struct scx_softc *sc)
841 {
842 int loop = 0, busy;
843
844 mac_write(sc, GMACOMR, 0);
845 mac_write(sc, GMACBMR, BMR_RST);
846 do {
847 DELAY(1);
848 busy = mac_read(sc, GMACBMR) & BMR_RST;
849 } while (++loop < 3000 && busy);
850 mac_write(sc, GMACBMR, _BMR);
851 mac_write(sc, GMACAFR, 0);
852
853 CSR_WRITE(sc, CLKEN, CLK_ALL); /* distribute clock sources */
854 CSR_WRITE(sc, SWRESET, 0); /* reset operation */
855 CSR_WRITE(sc, SWRESET, 1U<<31); /* manifest run */
856 CSR_WRITE(sc, COMINIT, 3); /* DB|CLS*/
857
858 mac_write(sc, GMACEVCTL, 1);
859 }
860
861 static int
862 scx_init(struct ifnet *ifp)
863 {
864 struct scx_softc *sc = ifp->if_softc;
865 const uint8_t *ea = CLLADDR(ifp->if_sadl);
866 uint32_t csr;
867 int i;
868
869 /* Cancel pending I/O. */
870 scx_stop(ifp, 0);
871
872 /* Reset the chip to a known state. */
873 scx_reset(sc);
874
875 /* set my address in perfect match slot 0. little endin order */
876 csr = (ea[3] << 24) | (ea[2] << 16) | (ea[1] << 8) | ea[0];
877 mac_write(sc, GMACMAL0, csr);
878 csr = (ea[5] << 8) | ea[4];
879 mac_write(sc, GMACMAH0, csr);
880
881 /* accept multicast frame or run promisc mode */
882 scx_set_rcvfilt(sc);
883
884 (void)scx_ifmedia_upd(ifp);
885
886 /* build sane Tx */
887 memset(sc->sc_txdescs, 0, sizeof(struct tdes) * MD_NTXDESC);
888 sc->sc_txdescs[MD_NTXDESC - 1].t0 |= T0_EOD; /* tie off the ring */
889 SCX_CDTXSYNC(sc, 0, MD_NTXDESC,
890 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
891 sc->sc_txfree = MD_NTXDESC;
892 sc->sc_txnext = 0;
893 for (i = 0; i < MD_TXQUEUELEN; i++)
894 sc->sc_txsoft[i].txs_mbuf = NULL;
895 sc->sc_txsfree = MD_TXQUEUELEN;
896 sc->sc_txsnext = 0;
897 sc->sc_txsdirty = 0;
898
899 /* load Rx descriptors with fresh mbuf */
900 for (i = 0; i < MD_NRXDESC; i++)
901 (void)add_rxbuf(sc, i);
902 sc->sc_rxptr = 0;
903
904 /* XXX 32 bit paddr XXX hand Tx/Rx rings to HW XXX */
905 mac_write(sc, GMACTDLA, SCX_CDTXADDR(sc, 0));
906 mac_write(sc, GMACRDLA, SCX_CDRXADDR(sc, 0));
907
908 /* kick to start GMAC engine */
909 CSR_WRITE(sc, RXI_CLR, ~0);
910 CSR_WRITE(sc, TXI_CLR, ~0);
911 csr = mac_read(sc, GMACOMR);
912 mac_write(sc, GMACOMR, csr | OMR_RS | OMR_ST);
913
914 ifp->if_flags |= IFF_RUNNING;
915 ifp->if_flags &= ~IFF_OACTIVE;
916
917 /* start one second timer */
918 callout_schedule(&sc->sc_tick_ch, hz);
919
920 return 0;
921 }
922
923 static void
924 scx_stop(struct ifnet *ifp, int disable)
925 {
926 struct scx_softc *sc = ifp->if_softc;
927
928 /* Stop the one second clock. */
929 callout_stop(&sc->sc_tick_ch);
930
931 /* Down the MII. */
932 mii_down(&sc->sc_mii);
933
934 /* Mark the interface down and cancel the watchdog timer. */
935 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
936 ifp->if_timer = 0;
937 }
938
939 static void
940 scx_watchdog(struct ifnet *ifp)
941 {
942 struct scx_softc *sc = ifp->if_softc;
943
944 /*
945 * Since we're not interrupting every packet, sweep
946 * up before we report an error.
947 */
948 txreap(sc);
949
950 if (sc->sc_txfree != MD_NTXDESC) {
951 aprint_error_dev(sc->sc_dev,
952 "device timeout (txfree %d txsfree %d txnext %d)\n",
953 sc->sc_txfree, sc->sc_txsfree, sc->sc_txnext);
954 if_statinc(ifp, if_oerrors);
955
956 /* Reset the interface. */
957 scx_init(ifp);
958 }
959
960 scx_start(ifp);
961 }
962
963 static int
964 scx_ioctl(struct ifnet *ifp, u_long cmd, void *data)
965 {
966 struct scx_softc *sc = ifp->if_softc;
967 struct ifreq *ifr = (struct ifreq *)data;
968 struct ifmedia *ifm;
969 int s, error;
970
971 s = splnet();
972
973 switch (cmd) {
974 case SIOCSIFMEDIA:
975 /* Flow control requires full-duplex mode. */
976 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
977 (ifr->ifr_media & IFM_FDX) == 0)
978 ifr->ifr_media &= ~IFM_ETH_FMASK;
979 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
980 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
981 /* We can do both TXPAUSE and RXPAUSE. */
982 ifr->ifr_media |=
983 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
984 }
985 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
986 }
987 ifm = &sc->sc_mii.mii_media;
988 error = ifmedia_ioctl(ifp, ifr, ifm, cmd);
989 break;
990 default:
991 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
992 break;
993
994 error = 0;
995
996 if (cmd == SIOCSIFCAP)
997 error = (*ifp->if_init)(ifp);
998 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
999 ;
1000 else if (ifp->if_flags & IFF_RUNNING) {
1001 /*
1002 * Multicast list has changed; set the hardware filter
1003 * accordingly.
1004 */
1005 scx_set_rcvfilt(sc);
1006 }
1007 break;
1008 }
1009
1010 splx(s);
1011 return error;
1012 }
1013
1014 static void
1015 scx_set_rcvfilt(struct scx_softc *sc)
1016 {
1017 struct ethercom * const ec = &sc->sc_ethercom;
1018 struct ifnet * const ifp = &ec->ec_if;
1019 struct ether_multistep step;
1020 struct ether_multi *enm;
1021 uint32_t mchash[2]; /* 2x 32 = 64 bit */
1022 uint32_t csr, crc;
1023 int i;
1024
1025 csr = mac_read(sc, GMACAFR);
1026 csr &= ~(AFR_PR | AFR_PM | AFR_MHTE | AFR_HPF);
1027 mac_write(sc, GMACAFR, csr);
1028
1029 ETHER_LOCK(ec);
1030 if (ifp->if_flags & IFF_PROMISC) {
1031 ec->ec_flags |= ETHER_F_ALLMULTI;
1032 ETHER_UNLOCK(ec);
1033 goto update;
1034 }
1035 ec->ec_flags &= ~ETHER_F_ALLMULTI;
1036
1037 /* clear 15 entry supplimental perfect match filter */
1038 for (i = 1; i < 16; i++)
1039 mac_write(sc, GMACMAH(i), 0);
1040 /* build 64 bit multicast hash filter */
1041 crc = mchash[1] = mchash[0] = 0;
1042
1043 ETHER_FIRST_MULTI(step, ec, enm);
1044 i = 1; /* slot 0 is occupied */
1045 while (enm != NULL) {
1046 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1047 /*
1048 * We must listen to a range of multicast addresses.
1049 * For now, just accept all multicasts, rather than
1050 * trying to set only those filter bits needed to match
1051 * the range. (At this time, the only use of address
1052 * ranges is for IP multicast routing, for which the
1053 * range is big enough to require all bits set.)
1054 */
1055 ec->ec_flags |= ETHER_F_ALLMULTI;
1056 ETHER_UNLOCK(ec);
1057 goto update;
1058 }
1059 printf("[%d] %s\n", i, ether_sprintf(enm->enm_addrlo));
1060 if (i < 16) {
1061 /* use 15 entry perfect match filter */
1062 uint32_t addr;
1063 uint8_t *ep = enm->enm_addrlo;
1064 addr = (ep[3] << 24) | (ep[2] << 16)
1065 | (ep[1] << 8) | ep[0];
1066 mac_write(sc, GMACMAL(i), addr);
1067 addr = (ep[5] << 8) | ep[4];
1068 mac_write(sc, GMACMAH(i), addr | 1U<<31);
1069 } else {
1070 /* use hash table when too many */
1071 /* bit_reserve_32(~crc) !? */
1072 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
1073 /* 1(31) 5(30:26) bit sampling */
1074 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
1075 }
1076 ETHER_NEXT_MULTI(step, enm);
1077 i++;
1078 }
1079 ETHER_UNLOCK(ec);
1080
1081 if (crc)
1082 csr |= AFR_MHTE | AFR_HPF; /* use hash+perfect */
1083 mac_write(sc, GMACMHTH, mchash[1]);
1084 mac_write(sc, GMACMHTL, mchash[0]);
1085 mac_write(sc, GMACAFR, csr);
1086 return;
1087
1088 update:
1089 /* With PM or AM, MHTE/MHT0-7 are never consulted. really? */
1090 if (ifp->if_flags & IFF_PROMISC)
1091 csr |= AFR_PR; /* run promisc. mode */
1092 else
1093 csr |= AFR_PM; /* accept all multicast */
1094 mac_write(sc, GMACAFR, csr);
1095 return;
1096 }
1097
1098 static int
1099 scx_ifmedia_upd(struct ifnet *ifp)
1100 {
1101 struct scx_softc *sc = ifp->if_softc;
1102 struct ifmedia *ifm = &sc->sc_mii.mii_media;
1103
1104 if (IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_AUTO) {
1105 ; /* restart AN */
1106 ; /* enable AN */
1107 ; /* advertise flow control pause */
1108 ; /* adv. 1000FDX,100FDX,100HDX,10FDX,10HDX */
1109 } else {
1110 #if 1 /* XXX not sure to belong here XXX */
1111 uint32_t mcr = mac_read(sc, GMACMCR);
1112 if (IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_1000_T)
1113 mcr &= ~MCR_USEMII; /* RGMII+SPD1000 */
1114 else {
1115 if (IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_100_TX
1116 && sc->sc_100mii)
1117 mcr |= MCR_SPD100;
1118 mcr |= MCR_USEMII;
1119 }
1120 if (ifm->ifm_cur->ifm_media & IFM_FDX)
1121 mcr |= MCR_USEFDX;
1122 mcr |= MCR_CST | MCR_JE;
1123 if (sc->sc_100mii == 0)
1124 mcr |= MCR_IBN;
1125 mac_write(sc, GMACMCR, mcr);
1126 #endif
1127 }
1128 return 0;
1129 }
1130
1131 static void
1132 scx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1133 {
1134 struct scx_softc *sc = ifp->if_softc;
1135 struct mii_data *mii = &sc->sc_mii;
1136
1137 mii_pollstat(mii);
1138 ifmr->ifm_status = mii->mii_media_status;
1139 ifmr->ifm_active = sc->sc_flowflags |
1140 (mii->mii_media_active & ~IFM_ETH_FMASK);
1141 }
1142
1143 void
1144 mii_statchg(struct ifnet *ifp)
1145 {
1146 struct scx_softc *sc = ifp->if_softc;
1147 struct mii_data *mii = &sc->sc_mii;
1148 uint32_t fcr;
1149
1150 #if 1
1151 /* decode MIISR register value */
1152 uint32_t miisr = mac_read(sc, GMACMIISR);
1153 int spd = (miisr >> 1) & 03;
1154 printf("MII link status (0x%x) %s",
1155 miisr, (miisr & 8) ? "up" : "down");
1156 if (miisr & 8) {
1157 printf(" spd%d", (spd == 2) ? 1000 : (spd == 1) ? 100 : 10);
1158 if (miisr & 1)
1159 printf(",full-duplex");
1160 }
1161 printf("\n");
1162 #endif
1163 /* Get flow control negotiation result. */
1164 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
1165 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags)
1166 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
1167
1168 /* Adjust PAUSE flow control. */
1169 fcr = mac_read(sc, GMACFCR) & ~(FCR_TFE | FCR_RFE);
1170 if (mii->mii_media_active & IFM_FDX) {
1171 if (sc->sc_flowflags & IFM_ETH_TXPAUSE)
1172 fcr |= FCR_TFE;
1173 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
1174 fcr |= FCR_RFE;
1175 }
1176 mac_write(sc, GMACFCR, fcr);
1177
1178 printf("%ctxfe, %crxfe\n",
1179 (fcr & FCR_TFE) ? '+' : '-', (fcr & FCR_RFE) ? '+' : '-');
1180 }
1181
1182 static void
1183 phy_tick(void *arg)
1184 {
1185 struct scx_softc *sc = arg;
1186 struct mii_data *mii = &sc->sc_mii;
1187 int s;
1188
1189 s = splnet();
1190 mii_tick(mii);
1191 splx(s);
1192
1193 callout_schedule(&sc->sc_tick_ch, hz);
1194 }
1195
1196 static int
1197 mii_readreg(device_t self, int phy, int reg, uint16_t *val)
1198 {
1199 struct scx_softc *sc = device_private(self);
1200 uint32_t miia;
1201 int error;
1202
1203 uint32_t clk = CSR_READ(sc, CLKEN);
1204 CSR_WRITE(sc, CLKEN, clk | CLK_G);
1205
1206 miia = (phy << GAR_PHY) | (reg << GAR_REG) | sc->sc_mdclk;
1207 mac_write(sc, GMACGAR, miia | GAR_BUSY);
1208 error = spin_waitfor(sc, GMACGAR, GAR_BUSY);
1209 if (error)
1210 return error;
1211 *val = mac_read(sc, GMACGDR);
1212 return 0;
1213 }
1214
1215 static int
1216 mii_writereg(device_t self, int phy, int reg, uint16_t val)
1217 {
1218 struct scx_softc *sc = device_private(self);
1219 uint32_t miia;
1220 uint16_t dummy;
1221 int error;
1222
1223 uint32_t clk = CSR_READ(sc, CLKEN);
1224 CSR_WRITE(sc, CLKEN, clk | CLK_G);
1225
1226 miia = (phy << GAR_PHY) | (reg << GAR_REG) | sc->sc_mdclk;
1227 mac_write(sc, GMACGDR, val);
1228 mac_write(sc, GMACGAR, miia | GAR_IOWR | GAR_BUSY);
1229 error = spin_waitfor(sc, GMACGAR, GAR_BUSY);
1230 if (error)
1231 return error;
1232 mii_readreg(self, phy, MII_PHYIDR1, &dummy); /* dummy read cycle */
1233 return 0;
1234 }
1235
1236 static void
1237 scx_start(struct ifnet *ifp)
1238 {
1239 struct scx_softc *sc = ifp->if_softc;
1240 struct mbuf *m0, *m;
1241 struct scx_txsoft *txs;
1242 bus_dmamap_t dmamap;
1243 int error, nexttx, lasttx, ofree, seg;
1244 uint32_t tdes0;
1245
1246 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1247 return;
1248
1249 /* Remember the previous number of free descriptors. */
1250 ofree = sc->sc_txfree;
1251
1252 /*
1253 * Loop through the send queue, setting up transmit descriptors
1254 * until we drain the queue, or use up all available transmit
1255 * descriptors.
1256 */
1257 for (;;) {
1258 IFQ_POLL(&ifp->if_snd, m0);
1259 if (m0 == NULL)
1260 break;
1261
1262 if (sc->sc_txsfree < MD_TXQUEUE_GC) {
1263 txreap(sc);
1264 if (sc->sc_txsfree == 0)
1265 break;
1266 }
1267 txs = &sc->sc_txsoft[sc->sc_txsnext];
1268 dmamap = txs->txs_dmamap;
1269
1270 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1271 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1272 if (error) {
1273 if (error == EFBIG) {
1274 aprint_error_dev(sc->sc_dev,
1275 "Tx packet consumes too many "
1276 "DMA segments, dropping...\n");
1277 IFQ_DEQUEUE(&ifp->if_snd, m0);
1278 m_freem(m0);
1279 continue;
1280 }
1281 /* Short on resources, just stop for now. */
1282 break;
1283 }
1284
1285 if (dmamap->dm_nsegs > sc->sc_txfree) {
1286 /*
1287 * Not enough free descriptors to transmit this
1288 * packet. We haven't committed anything yet,
1289 * so just unload the DMA map, put the packet
1290 * back on the queue, and punt. Notify the upper
1291 * layer that there are not more slots left.
1292 */
1293 ifp->if_flags |= IFF_OACTIVE;
1294 bus_dmamap_unload(sc->sc_dmat, dmamap);
1295 break;
1296 }
1297
1298 IFQ_DEQUEUE(&ifp->if_snd, m0);
1299
1300 /*
1301 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1302 */
1303
1304 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1305 BUS_DMASYNC_PREWRITE);
1306
1307 tdes0 = 0; /* to postpone 1st segment T0_OWN write */
1308 lasttx = -1;
1309 for (nexttx = sc->sc_txnext, seg = 0;
1310 seg < dmamap->dm_nsegs;
1311 seg++, nexttx = MD_NEXTTX(nexttx)) {
1312 struct tdes *tdes = &sc->sc_txdescs[nexttx];
1313 bus_addr_t paddr = dmamap->dm_segs[seg].ds_addr;
1314 /*
1315 * If this is the first descriptor we're
1316 * enqueueing, don't set the OWN bit just
1317 * yet. That could cause a race condition.
1318 * We'll do it below.
1319 */
1320 tdes->t3 = dmamap->dm_segs[seg].ds_len;
1321 tdes->t2 = htole32(BUS_ADDR_LO32(paddr));
1322 tdes->t1 = htole32(BUS_ADDR_HI32(paddr));
1323 tdes->t0 = tdes0 | (tdes->t0 & T0_EOD) |
1324 (15 << T0_TRID) | T0_PT |
1325 sc->sc_t0coso | T0_TRS;
1326 tdes0 = T0_OWN; /* 2nd and other segments */
1327 lasttx = nexttx;
1328 }
1329 /*
1330 * Outgoing NFS mbuf must be unloaded when Tx completed.
1331 * Without T1_IC NFS mbuf is left unack'ed for excessive
1332 * time and NFS stops to proceed until scx_watchdog()
1333 * calls txreap() to reclaim the unack'ed mbuf.
1334 * It's painful to traverse every mbuf chain to determine
1335 * whether someone is waiting for Tx completion.
1336 */
1337 m = m0;
1338 do {
1339 if ((m->m_flags & M_EXT) && m->m_ext.ext_free) {
1340 sc->sc_txdescs[lasttx].t0 |= T0_IOC; /* !!! */
1341 break;
1342 }
1343 } while ((m = m->m_next) != NULL);
1344
1345 /* Write deferred 1st segment T0_OWN at the final stage */
1346 sc->sc_txdescs[lasttx].t0 |= T0_LS;
1347 sc->sc_txdescs[sc->sc_txnext].t0 |= (T0_FS | T0_OWN);
1348 SCX_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1349 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1350
1351 /* Tell DMA start transmit */
1352 mac_write(sc, GMACTPD, 1);
1353
1354 txs->txs_mbuf = m0;
1355 txs->txs_firstdesc = sc->sc_txnext;
1356 txs->txs_lastdesc = lasttx;
1357 txs->txs_ndesc = dmamap->dm_nsegs;
1358
1359 sc->sc_txfree -= txs->txs_ndesc;
1360 sc->sc_txnext = nexttx;
1361 sc->sc_txsfree--;
1362 sc->sc_txsnext = MD_NEXTTXS(sc->sc_txsnext);
1363 /*
1364 * Pass the packet to any BPF listeners.
1365 */
1366 bpf_mtap(ifp, m0, BPF_D_OUT);
1367 }
1368
1369 if (sc->sc_txsfree == 0 || sc->sc_txfree == 0) {
1370 /* No more slots left; notify upper layer. */
1371 ifp->if_flags |= IFF_OACTIVE;
1372 }
1373 if (sc->sc_txfree != ofree) {
1374 /* Set a watchdog timer in case the chip flakes out. */
1375 ifp->if_timer = 5;
1376 }
1377 }
1378
1379 static int
1380 scx_intr(void *arg)
1381 {
1382 struct scx_softc *sc = arg;
1383 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1384
1385 (void)ifp;
1386 /* XXX decode interrupt cause to pick isr() XXX */
1387 rxintr(sc);
1388 txreap(sc);
1389 return 1;
1390 }
1391
1392 static void
1393 txreap(struct scx_softc *sc)
1394 {
1395 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1396 struct scx_txsoft *txs;
1397 uint32_t txstat;
1398 int i;
1399
1400 ifp->if_flags &= ~IFF_OACTIVE;
1401
1402 for (i = sc->sc_txsdirty; sc->sc_txsfree != MD_TXQUEUELEN;
1403 i = MD_NEXTTXS(i), sc->sc_txsfree++) {
1404 txs = &sc->sc_txsoft[i];
1405
1406 SCX_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
1407 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1408
1409 txstat = sc->sc_txdescs[txs->txs_lastdesc].t0;
1410 if (txstat & T0_OWN) /* desc is still in use */
1411 break;
1412
1413 /* There is no way to tell transmission status per frame */
1414
1415 if_statinc(ifp, if_opackets);
1416
1417 sc->sc_txfree += txs->txs_ndesc;
1418 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1419 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1420 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1421 m_freem(txs->txs_mbuf);
1422 txs->txs_mbuf = NULL;
1423 }
1424 sc->sc_txsdirty = i;
1425 if (sc->sc_txsfree == MD_TXQUEUELEN)
1426 ifp->if_timer = 0;
1427 }
1428
1429 static void
1430 rxintr(struct scx_softc *sc)
1431 {
1432 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1433 struct scx_rxsoft *rxs;
1434 struct mbuf *m;
1435 uint32_t rxstat;
1436 int i, len;
1437
1438 for (i = sc->sc_rxptr; /*CONSTCOND*/ 1; i = MD_NEXTRX(i)) {
1439 rxs = &sc->sc_rxsoft[i];
1440
1441 SCX_CDRXSYNC(sc, i,
1442 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1443
1444 rxstat = sc->sc_rxdescs[i].r0;
1445 if (rxstat & R0_OWN) /* desc is left empty */
1446 break;
1447
1448 /* R0_FS | R0_LS must have been marked for this desc */
1449
1450 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1451 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1452
1453 len = sc->sc_rxdescs[i].r3 >> 16; /* 31:16 received */
1454 len -= ETHER_CRC_LEN; /* Trim CRC off */
1455 m = rxs->rxs_mbuf;
1456
1457 if (add_rxbuf(sc, i) != 0) {
1458 if_statinc(ifp, if_ierrors);
1459 SCX_INIT_RXDESC(sc, i);
1460 bus_dmamap_sync(sc->sc_dmat,
1461 rxs->rxs_dmamap, 0,
1462 rxs->rxs_dmamap->dm_mapsize,
1463 BUS_DMASYNC_PREREAD);
1464 continue;
1465 }
1466
1467 m_set_rcvif(m, ifp);
1468 m->m_pkthdr.len = m->m_len = len;
1469
1470 if (rxstat & R0_CSUM) {
1471 uint32_t csum = M_CSUM_IPv4;
1472 if (rxstat & R0_CERR)
1473 csum |= M_CSUM_IPv4_BAD;
1474 m->m_pkthdr.csum_flags |= csum;
1475 }
1476 if_percpuq_enqueue(ifp->if_percpuq, m);
1477 }
1478 sc->sc_rxptr = i;
1479 }
1480
1481 static int
1482 add_rxbuf(struct scx_softc *sc, int i)
1483 {
1484 struct scx_rxsoft *rxs = &sc->sc_rxsoft[i];
1485 struct mbuf *m;
1486 int error;
1487
1488 MGETHDR(m, M_DONTWAIT, MT_DATA);
1489 if (m == NULL)
1490 return ENOBUFS;
1491
1492 MCLGET(m, M_DONTWAIT);
1493 if ((m->m_flags & M_EXT) == 0) {
1494 m_freem(m);
1495 return ENOBUFS;
1496 }
1497
1498 if (rxs->rxs_mbuf != NULL)
1499 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1500
1501 rxs->rxs_mbuf = m;
1502
1503 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
1504 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1505 if (error) {
1506 aprint_error_dev(sc->sc_dev,
1507 "can't load rx DMA map %d, error = %d\n", i, error);
1508 panic("add_rxbuf");
1509 }
1510
1511 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1512 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1513 SCX_INIT_RXDESC(sc, i);
1514
1515 return 0;
1516 }
1517
1518 static int
1519 spin_waitfor(struct scx_softc *sc, int reg, int exist)
1520 {
1521 int busy, loop;
1522
1523 busy = CSR_READ(sc, reg) & exist;
1524 if (busy == 0)
1525 return 0;
1526 loop = 30000;
1527 do {
1528 DELAY(10);
1529 busy = CSR_READ(sc, reg) & exist;
1530 } while (--loop > 0 && busy);
1531 return (loop > 0) ? 0 : ETIMEDOUT;
1532 }
1533
1534 /* GMAC register needs to use indirect rd/wr via memory mapped registers. */
1535
1536 static int
1537 mac_read(struct scx_softc *sc, int reg)
1538 {
1539
1540 CSR_WRITE(sc, MACCMD, reg);
1541 (void)spin_waitfor(sc, MACCMD, CMD_BUSY);
1542 return CSR_READ(sc, MACDATA);
1543 }
1544
1545 static void
1546 mac_write(struct scx_softc *sc, int reg, int val)
1547 {
1548
1549 CSR_WRITE(sc, MACDATA, val);
1550 CSR_WRITE(sc, MACCMD, reg | CMD_IOWR);
1551 (void)spin_waitfor(sc, MACCMD, CMD_BUSY);
1552 }
1553
1554 /*
1555 * 3 independent uengines exist * to process host2media, media2host and
1556 * packet data flows.
1557 */
1558 static void
1559 loaducode(struct scx_softc *sc)
1560 {
1561 uint32_t up, lo, sz;
1562 uint64_t addr;
1563
1564 sc->sc_ucodeloaded = 1;
1565
1566 up = EE_READ(sc, 0x08); /* H->M ucode addr high */
1567 lo = EE_READ(sc, 0x0c); /* H->M ucode addr low */
1568 sz = EE_READ(sc, 0x10); /* H->M ucode size */
1569 sz *= 4;
1570 addr = ((uint64_t)up << 32) | lo;
1571 aprint_normal_dev(sc->sc_dev, "0x%x H2M ucode %u\n", lo, sz);
1572 injectucode(sc, H2MENG, (bus_addr_t)addr, (bus_size_t)sz);
1573
1574 up = EE_READ(sc, 0x14); /* M->H ucode addr high */
1575 lo = EE_READ(sc, 0x18); /* M->H ucode addr low */
1576 sz = EE_READ(sc, 0x1c); /* M->H ucode size */
1577 sz *= 4;
1578 addr = ((uint64_t)up << 32) | lo;
1579 injectucode(sc, M2HENG, (bus_addr_t)addr, (bus_size_t)sz);
1580 aprint_normal_dev(sc->sc_dev, "0x%x M2H ucode %u\n", lo, sz);
1581
1582 lo = EE_READ(sc, 0x20); /* PKT ucode addr */
1583 sz = EE_READ(sc, 0x24); /* PKT ucode size */
1584 sz *= 4;
1585 injectucode(sc, PKTENG, (bus_addr_t)lo, (bus_size_t)sz);
1586 aprint_normal_dev(sc->sc_dev, "0x%x PKT ucode %u\n", lo, sz);
1587 }
1588
1589 static void
1590 injectucode(struct scx_softc *sc, int port,
1591 bus_addr_t addr, bus_size_t size)
1592 {
1593 bus_space_handle_t bsh;
1594 bus_size_t off;
1595 uint32_t ucode;
1596
1597 if (bus_space_map(sc->sc_st, addr, size, 0, &bsh) != 0) {
1598 aprint_error_dev(sc->sc_dev,
1599 "eeprom map failure for ucode port 0x%x\n", port);
1600 return;
1601 }
1602 for (off = 0; off < size; off += 4) {
1603 ucode = bus_space_read_4(sc->sc_st, bsh, off);
1604 CSR_WRITE(sc, port, ucode);
1605 }
1606 bus_space_unmap(sc->sc_st, bsh, size);
1607 }
1608
1609 /* bit selection to determine MDIO speed */
1610
1611 static int
1612 get_mdioclk(uint32_t freq)
1613 {
1614
1615 const struct {
1616 uint16_t freq, bit; /* GAR 5:2 MDIO frequency selection */
1617 } mdioclk[] = {
1618 { 35, 2 }, /* 25-35 MHz */
1619 { 60, 3 }, /* 35-60 MHz */
1620 { 100, 0 }, /* 60-100 MHz */
1621 { 150, 1 }, /* 100-150 MHz */
1622 { 250, 4 }, /* 150-250 MHz */
1623 { 300, 5 }, /* 250-300 MHz */
1624 };
1625 int i;
1626
1627 freq /= 1000 * 1000;
1628 /* convert MDIO clk to a divisor value */
1629 if (freq < mdioclk[0].freq)
1630 return mdioclk[0].bit;
1631 for (i = 1; i < __arraycount(mdioclk); i++) {
1632 if (freq < mdioclk[i].freq)
1633 return mdioclk[i-1].bit;
1634 }
1635 return mdioclk[__arraycount(mdioclk) - 1].bit << GAR_CTL;
1636 }
1637