if_scx.c revision 1.22.4.2 1 /* $NetBSD: if_scx.c,v 1.22.4.2 2020/04/13 08:03:37 martin Exp $ */
2
3 /*-
4 * Copyright (c) 2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Tohru Nishimura.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #define NOT_MP_SAFE 0
33
34 /*
35 * Socionext SC2A11 SynQuacer NetSec GbE driver
36 *
37 * (possibly incorrect notes to be removed eventually)
38 * - 32 byte descriptor for 64 bit paddr design.
39 * - multiple rings seems available. There are special descriptor fields
40 * to designify ring number from which to arrive or to which go.
41 * - memory mapped EEPROM to hold MAC address. The rest of the area is
42 * occupied by a set of ucode for two DMA engines and one packet engine.
43 * - The size of frame address filter is 16 plus 16.
44 * - The first slot is my own station address. Always enabled to perform
45 * to identify oneself.
46 * - 1~15 are for supplimental MAC addresses. Independently enabled for
47 * use. Good to catch multicast. Byte-wise selective match available.
48 * Use the mask to catch { 0x01, 0x00, 0x00 } and/or { 0x33, 0x33 }.
49 * - 16~32 might be exact match without byte-mask.
50 * - The size of multicast hash filter store is 64 bit.
51 * - Socionext/Linaro "NetSec" code contains some constants left unexplained.
52 * Fortunately, Intel/Altera CycloneV PDFs describe every detail of
53 * "such the instance of" DW EMAC IP and most of them are likely applicable
54 * to SC2A11 GbE.
55 * - not known "NetSec" instanciates DW timestamp or builds its own.
56 * - DW EMAC implmentation (0x20) is known 0x10.36
57 */
58
59 #include <sys/cdefs.h>
60 __KERNEL_RCSID(0, "$NetBSD: if_scx.c,v 1.22.4.2 2020/04/13 08:03:37 martin Exp $");
61
62 #include <sys/param.h>
63 #include <sys/bus.h>
64 #include <sys/intr.h>
65 #include <sys/device.h>
66 #include <sys/callout.h>
67 #include <sys/mbuf.h>
68 #include <sys/malloc.h>
69 #include <sys/errno.h>
70 #include <sys/rndsource.h>
71 #include <sys/kernel.h>
72 #include <sys/systm.h>
73
74 #include <net/if.h>
75 #include <net/if_media.h>
76 #include <net/if_dl.h>
77 #include <net/if_ether.h>
78 #include <dev/mii/mii.h>
79 #include <dev/mii/miivar.h>
80 #include <net/bpf.h>
81
82 #include <dev/fdt/fdtvar.h>
83 #include <dev/acpi/acpireg.h>
84 #include <dev/acpi/acpivar.h>
85 #include <dev/acpi/acpi_intr.h>
86
87 /*
88 * SC2A11 register block 0x100-0x1204?
89 */
90 #define SWRESET 0x104
91 #define COMINIT 0x120
92 #define xINTSR 0x200 /* aggregated interrupt status report */
93 #define IRQ_RX (1U<<1) /* top level Rx interrupt */
94 #define IRQ_TX (1U<<0) /* top level Rx interrupt */
95 #define xINTAEN 0x204 /* INT_A enable */
96 #define xINTA_SET 0x234 /* bit to set */
97 #define xINTA_CLR 0x238 /* bit to clr */
98 #define xINTBEN 0x23c /* INT_B enable */
99 #define xINTB_SET 0x240 /* bit to set */
100 #define xINTB_CLR 0x244 /* bit to clr */
101 /* 0x00c-048 */ /* pkt,tls,s0,s1 SR/IE/SET/CLR */
102 #define TXISR 0x400
103 #define TXIEN 0x404
104 #define TXI_SET 0x428
105 #define TXI_CLR 0x42c
106 #define TXI_NTOWNR (1U<<17)
107 #define TXI_TR_ERR (1U<<16)
108 #define TXI_TXDONE (1U<<15)
109 #define TXI_TMREXP (1U<<14)
110 #define RXISR 0x440
111 #define RXIEN 0x444
112 #define RXI_SET 0x468
113 #define RXI_CLR 0x46c
114 #define RXI_RC_ERR (1U<<16)
115 #define RXI_PKTCNT (1U<<15)
116 #define RXI_TMREXP (1U<<14)
117 #define TXTIMER 0x41c
118 #define RXTIMER 0x45c
119 #define TXCOUNT 0x410
120 #define RXCOUNT 0x454
121 #define H2MENG 0x210 /* DMAC host2media ucode port */
122 #define M2HENG 0x21c /* DMAC media2host ucode port */
123 #define PKTENG 0x0d0 /* packet engine ucode port */
124 #define CLKEN 0x100 /* clock distribution enable */
125 #define CLK_G (1U<<5)
126 #define CLK_ALL 0x24
127 #define MACADRH 0x10c /* ??? */
128 #define MACADRL 0x110 /* ??? */
129 #define MCVER 0x22c /* micro controller version */
130 #define HWVER 0x230 /* hardware version */
131
132 /* 0x800 */ /* dec Tx SR/EN/SET/CLR */
133 /* 0x840 */ /* enc Rx SR/EN/SET/CLR */
134 /* 0x880 */ /* enc TLS Tx SR/IE/SET/CLR */
135 /* 0x8c0 */ /* dec TLS Tx SR/IE/SET/CLR */
136 /* 0x900 */ /* enc TLS Rx SR/IE/SET/CLR */
137 /* 0x940 */ /* dec TLS Rx SR/IE/SET/CLR */
138 /* 0x980 */ /* enc RAW Tx SR/IE/SET/CLR */
139 /* 0x9c0 */ /* dec RAW Tx SR/IE/SET/CLR */
140 /* 0xA00 */ /* enc RAW Rx SR/IE/SET/CLR */
141 /* 0xA40 */ /* dec RAW Rx SR/IE/SET/CLR */
142
143 #define MACCMD 0x11c4 /* gmac operation */
144 #define CMD_IOWR (1U<<28) /* write op */
145 #define CMD_BUSY (1U<<31) /* busy bit */
146 #define MACSTAT 0x1024 /* gmac status */
147 #define MACDATA 0x11c0 /* gmac rd/wr data */
148 #define MACINTE 0x1028 /* interrupt enable */
149 #define DESC_INIT 0x11fc /* desc engine init */
150 #define DESC_SRST 0x1204 /* desc engine sw reset */
151
152 /*
153 * GMAC register block. use mac_write()/mac_read() to handle
154 */
155 #define GMACMCR 0x0000 /* MAC configuration */
156 #define MCR_IBN (1U<<30) /* ??? */
157 #define MCR_CST (1U<<25) /* strip CRC */
158 #define MCR_TC (1U<<24) /* keep RGMII PHY notified */
159 #define MCR_JE (1U<<20) /* ignore oversized >9018 condition */
160 #define MCR_IFG (7U<<17) /* 19:17 IFG value 0~7 */
161 #define MCR_DRCS (1U<<16) /* ignore (G)MII HDX Tx error */
162 #define MCR_USEMII (1U<<15) /* 1: RMII/MII, 0: RGMII (_PS) */
163 #define MCR_SPD100 (1U<<14) /* force speed 100 (_FES) */
164 #define MCR_DO (1U<<13) /* */
165 #define MCR_LOOP (1U<<12) /* */
166 #define MCR_USEFDX (1U<<11) /* force full duplex */
167 #define MCR_IPCEN (1U<<10) /* handle checksum */
168 #define MCR_ACS (1U<<7) /* auto pad strip CRC */
169 #define MCR_TE (1U<<3) /* run Tx MAC engine, 0 to stop */
170 #define MCR_RE (1U<<2) /* run Rx MAC engine, 0 to stop */
171 #define MCR_PREA (3U) /* 1:0 preamble len. 0~2 */
172 #define _MCR_FDX 0x0000280c /* XXX TBD */
173 #define _MCR_HDX 0x0001a00c /* XXX TBD */
174 #define GMACAFR 0x0004 /* frame DA/SA address filter */
175 #define AFR_RA (1U<<31) /* accept all irrecspective of filt. */
176 #define AFR_HPF (1U<<10) /* hash+perfect filter, or hash only */
177 #define AFR_SAF (1U<<9) /* source address filter */
178 #define AFR_SAIF (1U<<8) /* SA inverse filtering */
179 #define AFR_PCF (2U<<6) /* */
180 #define AFR_DBF (1U<<5) /* reject broadcast frame */
181 #define AFR_PM (1U<<4) /* accept all multicast frame */
182 #define AFR_DAIF (1U<<3) /* DA inverse filtering */
183 #define AFR_MHTE (1U<<2) /* use multicast hash table */
184 #define AFR_UHTE (1U<<1) /* use hash table for unicast */
185 #define AFR_PR (1U<<0) /* run promisc mode */
186 #define GMACMHTH 0x0008 /* 64bit multicast hash table 63:32 */
187 #define GMACMHTL 0x000c /* 64bit multicast hash table 31:0 */
188 #define GMACGAR 0x0010 /* MDIO operation */
189 #define GAR_PHY (11) /* mii phy 15:11 */
190 #define GAR_REG (6) /* mii reg 10:6 */
191 #define GAR_CTL (2) /* control 5:2 */
192 #define GAR_IOWR (1U<<1) /* MDIO write op */
193 #define GAR_BUSY (1U) /* busy bit */
194 #define GMACGDR 0x0014 /* MDIO rd/wr data */
195 #define GMACFCR 0x0018 /* 802.3x flowcontrol */
196 /* 31:16 pause timer value */
197 /* 5:4 pause timer threthold */
198 #define FCR_RFE (1U<<2) /* accept PAUSE to throttle Tx */
199 #define FCR_TFE (1U<<1) /* generate PAUSE to moderate Rx lvl */
200 #define GMACVTAG 0x001c /* VLAN tag control */
201 #define GMACIMPL 0x0020 /* implementation number XX.YY */
202 #define GMACLPIS 0x0030 /* AXI LPI control */
203 #define GMACLPIC 0x0034 /* AXI LPI control */
204 #define GMACISR 0x0038 /* interrupt status, clear when read */
205 #define GMACIMR 0x003c /* interrupt enable */
206 #define ISR_TS (1U<<9) /* time stamp operation detected */
207 #define ISR_CO (1U<<7) /* Rx checksum offload completed */
208 #define ISR_TX (1U<<6) /* Tx completed */
209 #define ISR_RX (1U<<5) /* Rx completed */
210 #define ISR_ANY (1U<<4) /* any of above 5-7 report */
211 #define ISR_LC (1U<<0) /* link status change detected */
212 #define GMACMAH0 0x0040 /* MAC address 0 47:32 */
213 #define GMACMAL0 0x0044 /* MAC address 0 31:0 */
214 #define GMACMAH(i) ((i)*8+0x40) /* supplimental MAC addr 1 - 15 */
215 #define GMACMAL(i) ((i)*8+0x44) /* bit 31 to use, 30 SA,
216 * 29:24 byte-wise don'care */
217 #define GMACMIISR 0x00d8 /* resolved xMII link status */
218 /* 3 link up detected
219 * 2:1 resovled speed
220 * 0 2.5Mhz (10Mbps)
221 * 1 25Mhz (100Mbps)
222 * 2 125Mhz (1000Mbps)
223 * 1 full duplex detected */
224 #define GMACEVCTL 0x0100 /* event counter control */
225 #define GMACEVCNT(i) ((i)*4+0x114) /* event counter 0x114~284 */
226
227 #define GMACMHT(i) ((i)*4+0x500) /* 256bit multicast hash table 0 - 7 */
228 #define GMACVHT 0x0588 /* VLAN tag hash */
229
230 /* 0x0700-0734 ??? */
231 #define GMACAMAH(i) ((i)*8+0x800) /* supplimental MAC addr 16-31 */
232 #define GMACAMAL(i) ((i)*8+0x804) /* bit 31 to use */
233
234 #define GMACBMR 0x1000 /* DMA bus mode control
235 * 24 4PBL 8???
236 * 23 USP
237 * 22:17 RPBL
238 * 16 fixed burst, or undefined b.
239 * 15:14 priority between Rx and Tx
240 * 3 rxtx ratio 41
241 * 2 rxtx ratio 31
242 * 1 rxtx ratio 21
243 * 0 rxtx ratio 11
244 * 13:8 PBL possible DMA burst len
245 * 7 alternative des8
246 * 0 reset op. self clear
247 */
248 #define _BMR 0x00412080 /* XXX TBD */
249 #define _BMR0 0x00020181 /* XXX TBD */
250 #define BMR_RST (1) /* reset op. self clear when done */
251 #define GMACTPD 0x1004 /* write any to resume tdes */
252 #define GMACRPD 0x1008 /* write any to resume rdes */
253 #define GMACRDLA 0x100c /* rdes base address 32bit paddr */
254 #define GMACTDLA 0x1010 /* tdes base address 32bit paddr */
255 #define _RDLA 0x18000 /* XXX TBD system SRAM with CC ? */
256 #define _TDLA 0x1c000 /* XXX TBD system SRAM with CC ? */
257 #define GMACDSR 0x1014 /* DMA status detail report; W1C */
258 #define GMACOMR 0x1018 /* DMA operation */
259 #define OMR_TSF (1U<<25) /* 1: Tx store&forword, 0: immed. */
260 #define OMR_RSF (1U<<21) /* 1: Rx store&forward, 0: immed. */
261 #define OMR_ST (1U<<13) /* run Tx DMA engine, 0 to stop */
262 #define OMR_EFC (1U<<8) /* transmit PAUSE to throttle Rx lvl. */
263 #define OMR_FEF (1U<<7) /* allow to receive error frames */
264 #define OMR_RS (1U<<1) /* run Rx DMA engine, 0 to stop */
265 #define GMACIE 0x101c /* interrupt enable */
266 #define GMACEVCS 0x1020 /* missed frame or ovf detected */
267 #define GMACRWDT 0x1024 /* receive watchdog timer count */
268 #define GMACAXIB 0x1028 /* AXI bus mode control */
269 #define GMACAXIS 0x102c /* AXI status report */
270 /* 0x1048-1054 */ /* descriptor and buffer cur. address */
271 #define HWFEA 0x1058 /* feature report */
272
273 /* descriptor format definition */
274 struct tdes {
275 uint32_t t0, t1, t2, t3;
276 };
277
278 struct rdes {
279 uint32_t r0, r1, r2, r3;
280 };
281
282 #define T0_OWN (1U<<31) /* desc is ready to Tx */
283 #define T0_EOD (1U<<30) /* end of descriptor array */
284 #define T0_DRID (24) /* 29:24 D-RID */
285 #define T0_PT (1U<<21) /* 23:21 PT */
286 #define T0_TRID (16) /* 20:16 T-RID */
287 #define T0_FS (1U<<9) /* first segment of frame */
288 #define T0_LS (1U<<8) /* last segment of frame */
289 #define T0_CSUM (1U<<7) /* enable check sum offload */
290 #define T0_SGOL (1U<<6) /* enable TCP segment offload */
291 #define T0_TRS (1U<<4) /* 5:4 TRS */
292 #define T0_IOC (0) /* XXX TBD interrupt when completed */
293 /* T1 segment address 63:32 */
294 /* T2 segment address 31:0 */
295 /* T3 31:16 TCP segment length, 15:0 segment length to transmit */
296 #define R0_OWN (1U<<31) /* desc is empty */
297 #define R0_EOD (1U<<30) /* end of descriptor array */
298 #define R0_SRID (24) /* 29:24 S-RID */
299 #define R0_FR (1U<<23) /* FR */
300 #define R0_ER (1U<<21) /* Rx error indication */
301 #define R0_ERR (3U<<16) /* 18:16 receive error code */
302 #define R0_TDRID (14) /* 15:14 TD-RID */
303 #define R0_FS (1U<<9) /* first segment of frame */
304 #define R0_LS (1U<<8) /* last segment of frame */
305 #define R0_CSUM (3U<<6) /* 7:6 checksum status */
306 #define R0_CERR (2U<<6) /* 0 (undone), 1 (found ok), 2 (bad) */
307 /* R1 frame address 63:32 */
308 /* R2 frame address 31:0 */
309 /* R3 31:16 received frame length, 15:0 buffer length to receive */
310
311 /*
312 * software constraction
313 */
314 #define MD_NTXSEGS 16 /* fixed */
315 #define MD_TXQUEUELEN 16 /* tunable */
316 #define MD_TXQUEUELEN_MASK (MD_TXQUEUELEN - 1)
317 #define MD_TXQUEUE_GC (MD_TXQUEUELEN / 4)
318 #define MD_NTXDESC (MD_TXQUEUELEN * MD_NTXSEGS)
319 #define MD_NTXDESC_MASK (MD_NTXDESC - 1)
320 #define MD_NEXTTX(x) (((x) + 1) & MD_NTXDESC_MASK)
321 #define MD_NEXTTXS(x) (((x) + 1) & MD_TXQUEUELEN_MASK)
322
323 #define MD_NRXDESC 64 /* tunable */
324 #define MD_NRXDESC_MASK (MD_NRXDESC - 1)
325 #define MD_NEXTRX(x) (((x) + 1) & MD_NRXDESC_MASK)
326
327 #define SCX_INIT_RXDESC(sc, x) \
328 do { \
329 struct scx_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
330 struct rdes *__rxd = &(sc)->sc_rxdescs[(x)]; \
331 struct mbuf *__m = __rxs->rxs_mbuf; \
332 bus_addr_t __paddr =__rxs->rxs_dmamap->dm_segs[0].ds_addr; \
333 __m->m_data = __m->m_ext.ext_buf; \
334 __rxd->r3 = __rxs->rxs_dmamap->dm_segs[0].ds_len; \
335 __rxd->r2 = htole32(BUS_ADDR_LO32(__paddr)); \
336 __rxd->r1 = htole32(BUS_ADDR_HI32(__paddr)); \
337 __rxd->r0 = R0_OWN | R0_FS | R0_LS; \
338 if ((x) == MD_NRXDESC - 1) __rxd->r0 |= R0_EOD; \
339 } while (/*CONSTCOND*/0)
340
341 struct control_data {
342 struct tdes cd_txdescs[MD_NTXDESC];
343 struct rdes cd_rxdescs[MD_NRXDESC];
344 };
345 #define SCX_CDOFF(x) offsetof(struct control_data, x)
346 #define SCX_CDTXOFF(x) SCX_CDOFF(cd_txdescs[(x)])
347 #define SCX_CDRXOFF(x) SCX_CDOFF(cd_rxdescs[(x)])
348
349 struct scx_txsoft {
350 struct mbuf *txs_mbuf; /* head of our mbuf chain */
351 bus_dmamap_t txs_dmamap; /* our DMA map */
352 int txs_firstdesc; /* first descriptor in packet */
353 int txs_lastdesc; /* last descriptor in packet */
354 int txs_ndesc; /* # of descriptors used */
355 };
356
357 struct scx_rxsoft {
358 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
359 bus_dmamap_t rxs_dmamap; /* our DMA map */
360 };
361
362 struct scx_softc {
363 device_t sc_dev; /* generic device information */
364 bus_space_tag_t sc_st; /* bus space tag */
365 bus_space_handle_t sc_sh; /* bus space handle */
366 bus_size_t sc_sz; /* csr map size */
367 bus_space_handle_t sc_eesh; /* eeprom section handle */
368 bus_size_t sc_eesz; /* eeprom map size */
369 bus_dma_tag_t sc_dmat; /* bus DMA tag */
370 bus_dma_tag_t sc_dmat32;
371 struct ethercom sc_ethercom; /* Ethernet common data */
372 struct mii_data sc_mii; /* MII */
373 callout_t sc_tick_ch; /* PHY monitor callout */
374 bus_dma_segment_t sc_seg; /* descriptor store seg */
375 int sc_nseg; /* descriptor store nseg */
376 void *sc_ih; /* interrupt cookie */
377 int sc_phy_id; /* PHY address */
378 int sc_flowflags; /* 802.3x PAUSE flow control */
379 uint32_t sc_mdclk; /* GAR 5:2 clock selection */
380 uint32_t sc_t0coso; /* T0_CSUM | T0_SGOL to run */
381 int sc_ucodeloaded; /* ucode for H2M/M2H/PKT */
382 int sc_100mii; /* 1 for RMII/MII, 0 for RGMII */
383 int sc_phandle; /* fdt phandle */
384 uint64_t sc_freq;
385
386 bus_dmamap_t sc_cddmamap; /* control data DMA map */
387 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
388
389 struct control_data *sc_control_data;
390 #define sc_txdescs sc_control_data->cd_txdescs
391 #define sc_rxdescs sc_control_data->cd_rxdescs
392
393 struct scx_txsoft sc_txsoft[MD_TXQUEUELEN];
394 struct scx_rxsoft sc_rxsoft[MD_NRXDESC];
395 int sc_txfree; /* number of free Tx descriptors */
396 int sc_txnext; /* next ready Tx descriptor */
397 int sc_txsfree; /* number of free Tx jobs */
398 int sc_txsnext; /* next ready Tx job */
399 int sc_txsdirty; /* dirty Tx jobs */
400 int sc_rxptr; /* next ready Rx descriptor/descsoft */
401
402 krndsource_t rnd_source; /* random source */
403 };
404
405 #define SCX_CDTXADDR(sc, x) ((sc)->sc_cddma + SCX_CDTXOFF((x)))
406 #define SCX_CDRXADDR(sc, x) ((sc)->sc_cddma + SCX_CDRXOFF((x)))
407
408 #define SCX_CDTXSYNC(sc, x, n, ops) \
409 do { \
410 int __x, __n; \
411 \
412 __x = (x); \
413 __n = (n); \
414 \
415 /* If it will wrap around, sync to the end of the ring. */ \
416 if ((__x + __n) > MD_NTXDESC) { \
417 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
418 SCX_CDTXOFF(__x), sizeof(struct tdes) * \
419 (MD_NTXDESC - __x), (ops)); \
420 __n -= (MD_NTXDESC - __x); \
421 __x = 0; \
422 } \
423 \
424 /* Now sync whatever is left. */ \
425 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
426 SCX_CDTXOFF(__x), sizeof(struct tdes) * __n, (ops)); \
427 } while (/*CONSTCOND*/0)
428
429 #define SCX_CDRXSYNC(sc, x, ops) \
430 do { \
431 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
432 SCX_CDRXOFF((x)), sizeof(struct rdes), (ops)); \
433 } while (/*CONSTCOND*/0)
434
435 static int scx_fdt_match(device_t, cfdata_t, void *);
436 static void scx_fdt_attach(device_t, device_t, void *);
437 static int scx_acpi_match(device_t, cfdata_t, void *);
438 static void scx_acpi_attach(device_t, device_t, void *);
439
440 CFATTACH_DECL_NEW(scx_fdt, sizeof(struct scx_softc),
441 scx_fdt_match, scx_fdt_attach, NULL, NULL);
442
443 CFATTACH_DECL_NEW(scx_acpi, sizeof(struct scx_softc),
444 scx_acpi_match, scx_acpi_attach, NULL, NULL);
445
446 static void scx_attach_i(struct scx_softc *);
447 static void scx_reset(struct scx_softc *);
448 static int scx_init(struct ifnet *);
449 static void scx_start(struct ifnet *);
450 static void scx_stop(struct ifnet *, int);
451 static void scx_watchdog(struct ifnet *);
452 static int scx_ioctl(struct ifnet *, u_long, void *);
453 static void scx_set_rcvfilt(struct scx_softc *);
454 static void scx_ifmedia_sts(struct ifnet *, struct ifmediareq *);
455 static void mii_statchg(struct ifnet *);
456 static void phy_tick(void *);
457 static int mii_readreg(device_t, int, int, uint16_t *);
458 static int mii_writereg(device_t, int, int, uint16_t);
459 static int scx_intr(void *);
460 static void txreap(struct scx_softc *);
461 static void rxintr(struct scx_softc *);
462 static int add_rxbuf(struct scx_softc *, int);
463
464 static int spin_waitfor(struct scx_softc *, int, int);
465 static int mac_read(struct scx_softc *, int);
466 static void mac_write(struct scx_softc *, int, int);
467 static void loaducode(struct scx_softc *);
468 static void injectucode(struct scx_softc *, int, bus_addr_t, bus_size_t);
469 static int get_mdioclk(uint32_t);
470
471 #define CSR_READ(sc,off) \
472 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (off))
473 #define CSR_WRITE(sc,off,val) \
474 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (off), (val))
475 #define EE_READ(sc,off) \
476 bus_space_read_4((sc)->sc_st, (sc)->sc_eesh, (off))
477
478 static int
479 scx_fdt_match(device_t parent, cfdata_t cf, void *aux)
480 {
481 static const char * compatible[] = {
482 "socionext,synquacer-netsec",
483 NULL
484 };
485 struct fdt_attach_args * const faa = aux;
486
487 return of_match_compatible(faa->faa_phandle, compatible);
488 }
489
490 static void
491 scx_fdt_attach(device_t parent, device_t self, void *aux)
492 {
493 struct scx_softc * const sc = device_private(self);
494 struct fdt_attach_args * const faa = aux;
495 const int phandle = faa->faa_phandle;
496 bus_space_tag_t bst = faa->faa_bst;
497 bus_space_handle_t bsh;
498 bus_space_handle_t eebsh;
499 bus_addr_t addr[2];
500 bus_size_t size[2];
501 char intrstr[128];
502 const char *phy_mode;
503
504 if (fdtbus_get_reg(phandle, 0, addr+0, size+0) != 0
505 || bus_space_map(faa->faa_bst, addr[0], size[0], 0, &bsh) != 0) {
506 aprint_error(": unable to map device csr\n");
507 return;
508 }
509 if (!fdtbus_intr_str(phandle, 0, intrstr, sizeof(intrstr))) {
510 aprint_error(": failed to decode interrupt\n");
511 goto fail;
512 }
513 sc->sc_ih = fdtbus_intr_establish(phandle, 0, IPL_NET,
514 NOT_MP_SAFE, scx_intr, sc);
515 if (sc->sc_ih == NULL) {
516 aprint_error_dev(self, "couldn't establish interrupt\n");
517 goto fail;
518 }
519 if (fdtbus_get_reg(phandle, 1, addr+1, size+1) != 0
520 || bus_space_map(faa->faa_bst, addr[1], size[1], 0, &eebsh) != 0) {
521 aprint_error(": unable to map device eeprom\n");
522 goto fail;
523 }
524
525 aprint_naive("\n");
526 /* aprint_normal(": Gigabit Ethernet Controller\n"); */
527 aprint_normal_dev(self, "interrupt on %s\n", intrstr);
528
529 sc->sc_dev = self;
530 sc->sc_st = bst;
531 sc->sc_sh = bsh;
532 sc->sc_sz = size[0];
533 sc->sc_eesh = eebsh;
534 sc->sc_eesz = size[1];
535 sc->sc_dmat = faa->faa_dmat;
536 sc->sc_dmat32 = faa->faa_dmat; /* XXX */
537 sc->sc_phandle = phandle;
538
539 phy_mode = fdtbus_get_string(phandle, "phy-mode");
540 if (phy_mode == NULL)
541 aprint_error(": missing 'phy-mode' property\n");
542 sc->sc_100mii = (phy_mode && strcmp(phy_mode, "rgmii") != 0);
543 sc->sc_phy_id = 7; /* XXX */
544 sc->sc_freq = 125 * 1000 * 1000; /* XXX */
545 aprint_normal_dev(self,
546 "phy mode %s, phy id %d, freq %ld\n", phy_mode, sc->sc_phy_id, sc->sc_freq);
547
548 scx_attach_i(sc);
549 return;
550 fail:
551 if (sc->sc_eesz)
552 bus_space_unmap(sc->sc_st, sc->sc_eesh, sc->sc_eesz);
553 if (sc->sc_sz)
554 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_sz);
555 return;
556 }
557
558 static int
559 scx_acpi_match(device_t parent, cfdata_t cf, void *aux)
560 {
561 static const char * compatible[] = {
562 "SCX0001",
563 NULL
564 };
565 struct acpi_attach_args *aa = aux;
566
567 if (aa->aa_node->ad_type != ACPI_TYPE_DEVICE)
568 return 0;
569 return acpi_match_hid(aa->aa_node->ad_devinfo, compatible);
570 }
571
572 static void
573 scx_acpi_attach(device_t parent, device_t self, void *aux)
574 {
575 struct scx_softc * const sc = device_private(self);
576 struct acpi_attach_args * const aa = aux;
577 ACPI_HANDLE handle = aa->aa_node->ad_handle;
578 bus_space_tag_t bst = aa->aa_memt;
579 bus_space_handle_t bsh, eebsh;
580 struct acpi_resources res;
581 struct acpi_mem *mem;
582 struct acpi_irq *irq;
583 char *phy_mode;
584 ACPI_INTEGER acpi_phy, acpi_freq;
585 ACPI_STATUS rv;
586
587 rv = acpi_resource_parse(self, handle, "_CRS",
588 &res, &acpi_resource_parse_ops_default);
589 if (ACPI_FAILURE(rv))
590 return;
591 mem = acpi_res_mem(&res, 0);
592 irq = acpi_res_irq(&res, 0);
593 if (mem == NULL || irq == NULL || mem->ar_length == 0) {
594 aprint_error(": incomplete csr resources\n");
595 return;
596 }
597 if (bus_space_map(bst, mem->ar_base, mem->ar_length, 0, &bsh) != 0) {
598 aprint_error(": couldn't map registers\n");
599 return;
600 }
601 sc->sc_sz = mem->ar_length;
602 sc->sc_ih = acpi_intr_establish(self, (uint64_t)handle, IPL_NET,
603 NOT_MP_SAFE, scx_intr, sc, device_xname(self));
604 if (sc->sc_ih == NULL) {
605 aprint_error_dev(self, "couldn't establish interrupt\n");
606 goto fail;
607 }
608 mem = acpi_res_mem(&res, 1); /* EEPROM for MAC address and ucode */
609 if (mem == NULL || mem->ar_length == 0) {
610 aprint_error(": incomplete eeprom resources\n");
611 goto fail;
612 }
613 if (bus_space_map(bst, mem->ar_base, mem->ar_length, 0, &eebsh) != 0) {
614 aprint_error(": couldn't map registers\n");
615 goto fail;
616 }
617 sc->sc_eesz = mem->ar_length;
618
619 rv = acpi_dsd_string(handle, "phy-mode", &phy_mode);
620 if (ACPI_FAILURE(rv)) {
621 aprint_error(": missing 'phy-mode' property\n");
622 phy_mode = NULL;
623 }
624 rv = acpi_dsd_integer(handle, "phy-channel", &acpi_phy);
625 if (ACPI_FAILURE(rv))
626 acpi_phy = 31;
627 rv = acpi_dsd_integer(handle, "socionext,phy-clock-frequency",
628 &acpi_freq);
629 if (ACPI_FAILURE(rv))
630 acpi_freq = 999;
631
632 aprint_naive("\n");
633 /* aprint_normal(": Gigabit Ethernet Controller\n"); */
634
635 sc->sc_dev = self;
636 sc->sc_st = bst;
637 sc->sc_sh = bsh;
638 sc->sc_eesh = eebsh;
639 sc->sc_dmat = aa->aa_dmat64;
640 sc->sc_dmat32 = aa->aa_dmat; /* descriptor needs dma32 */
641
642 aprint_normal_dev(self,
643 "phy mode %s, phy id %d, freq %ld\n", phy_mode, (int)acpi_phy, acpi_freq);
644 sc->sc_100mii = (phy_mode && strcmp(phy_mode, "rgmii") != 0);
645 sc->sc_phy_id = (int)acpi_phy;
646 sc->sc_freq = acpi_freq;
647 aprint_normal_dev(self,
648 "GMACGAR %08x\n", mac_read(sc, GMACGAR));
649
650 scx_attach_i(sc);
651
652 acpi_resource_cleanup(&res);
653 return;
654 fail:
655 if (sc->sc_eesz > 0)
656 bus_space_unmap(sc->sc_st, sc->sc_eesh, sc->sc_eesz);
657 if (sc->sc_sz > 0)
658 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_sz);
659 acpi_resource_cleanup(&res);
660 return;
661 }
662
663 static void
664 scx_attach_i(struct scx_softc *sc)
665 {
666 struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
667 struct mii_data * const mii = &sc->sc_mii;
668 struct ifmedia * const ifm = &mii->mii_media;
669 uint32_t hwver, dwimp;
670 uint8_t enaddr[ETHER_ADDR_LEN];
671 bus_dma_segment_t seg;
672 uint32_t csr;
673 int i, nseg, error = 0;
674
675 hwver = CSR_READ(sc, HWVER); /* Socionext HW */
676 dwimp = mac_read(sc, GMACIMPL); /* DW EMAC XX.YY */
677 aprint_normal_dev(sc->sc_dev,
678 "Socionext NetSec GbE hw %d.%d impl 0x%x\n",
679 hwver >> 16, hwver & 0xffff, dwimp);
680
681 /* fetch MAC address in flash. stored in big endian order */
682 csr = bus_space_read_4(sc->sc_st, sc->sc_eesh, 0);
683 enaddr[0] = csr >> 24;
684 enaddr[1] = csr >> 16;
685 enaddr[2] = csr >> 8;
686 enaddr[3] = csr;
687 csr = bus_space_read_4(sc->sc_st, sc->sc_eesh, 4);
688 enaddr[4] = csr >> 24;
689 enaddr[5] = csr >> 16;
690 aprint_normal_dev(sc->sc_dev,
691 "Ethernet address %s\n", ether_sprintf(enaddr));
692
693 sc->sc_phy_id = MII_PHY_ANY;
694 sc->sc_mdclk = get_mdioclk(sc->sc_freq); /* 5:2 clk control */
695 sc->sc_mdclk = 5; /* XXX */
696 aprint_normal_dev(sc->sc_dev, "using %d for mdclk\n", sc->sc_mdclk);
697 sc->sc_mdclk <<= 2;
698
699 sc->sc_flowflags = 0;
700
701 if (sc->sc_ucodeloaded == 0)
702 loaducode(sc);
703
704 mii->mii_ifp = ifp;
705 mii->mii_readreg = mii_readreg;
706 mii->mii_writereg = mii_writereg;
707 mii->mii_statchg = mii_statchg;
708
709 sc->sc_ethercom.ec_mii = mii;
710 ifmedia_init(ifm, 0, ether_mediachange, scx_ifmedia_sts);
711 mii_attach(sc->sc_dev, mii, 0xffffffff, sc->sc_phy_id,
712 MII_OFFSET_ANY, MIIF_DOPAUSE);
713 if (LIST_FIRST(&mii->mii_phys) == NULL) {
714 ifmedia_add(ifm, IFM_ETHER | IFM_NONE, 0, NULL);
715 ifmedia_set(ifm, IFM_ETHER | IFM_NONE);
716 } else
717 ifmedia_set(ifm, IFM_ETHER | IFM_AUTO);
718 ifm->ifm_media = ifm->ifm_cur->ifm_media; /* as if user has requested */
719
720 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
721 ifp->if_softc = sc;
722 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
723 ifp->if_ioctl = scx_ioctl;
724 ifp->if_start = scx_start;
725 ifp->if_watchdog = scx_watchdog;
726 ifp->if_init = scx_init;
727 ifp->if_stop = scx_stop;
728 IFQ_SET_READY(&ifp->if_snd);
729
730 if_attach(ifp);
731 if_deferred_start_init(ifp, NULL);
732 ether_ifattach(ifp, enaddr);
733
734 callout_init(&sc->sc_tick_ch, 0);
735 callout_setfunc(&sc->sc_tick_ch, phy_tick, sc);
736
737 /*
738 * Allocate the control data structures, and create and load the
739 * DMA map for it.
740 */
741 error = bus_dmamem_alloc(sc->sc_dmat32,
742 sizeof(struct control_data), PAGE_SIZE, 0, &seg, 1, &nseg, 0);
743 if (error != 0) {
744 aprint_error_dev(sc->sc_dev,
745 "unable to allocate control data, error = %d\n", error);
746 goto fail_0;
747 }
748 error = bus_dmamem_map(sc->sc_dmat32, &seg, nseg,
749 sizeof(struct control_data), (void **)&sc->sc_control_data,
750 BUS_DMA_COHERENT);
751 if (error != 0) {
752 aprint_error_dev(sc->sc_dev,
753 "unable to map control data, error = %d\n", error);
754 goto fail_1;
755 }
756 error = bus_dmamap_create(sc->sc_dmat32,
757 sizeof(struct control_data), 1,
758 sizeof(struct control_data), 0, 0, &sc->sc_cddmamap);
759 if (error != 0) {
760 aprint_error_dev(sc->sc_dev,
761 "unable to create control data DMA map, "
762 "error = %d\n", error);
763 goto fail_2;
764 }
765 error = bus_dmamap_load(sc->sc_dmat32, sc->sc_cddmamap,
766 sc->sc_control_data, sizeof(struct control_data), NULL, 0);
767 if (error != 0) {
768 aprint_error_dev(sc->sc_dev,
769 "unable to load control data DMA map, error = %d\n",
770 error);
771 goto fail_3;
772 }
773 for (i = 0; i < MD_TXQUEUELEN; i++) {
774 if ((error = bus_dmamap_create(sc->sc_dmat32, MCLBYTES,
775 MD_NTXSEGS, MCLBYTES, 0, 0,
776 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
777 aprint_error_dev(sc->sc_dev,
778 "unable to create tx DMA map %d, error = %d\n",
779 i, error);
780 goto fail_4;
781 }
782 }
783 for (i = 0; i < MD_NRXDESC; i++) {
784 if ((error = bus_dmamap_create(sc->sc_dmat32, MCLBYTES,
785 1, MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
786 aprint_error_dev(sc->sc_dev,
787 "unable to create rx DMA map %d, error = %d\n",
788 i, error);
789 goto fail_5;
790 }
791 sc->sc_rxsoft[i].rxs_mbuf = NULL;
792 }
793 sc->sc_seg = seg;
794 sc->sc_nseg = nseg;
795 aprint_normal_dev(sc->sc_dev, "descriptor ds_addr %lx, ds_len %lx, nseg %d\n", seg.ds_addr, seg.ds_len, nseg);
796
797 if (pmf_device_register(sc->sc_dev, NULL, NULL))
798 pmf_class_network_register(sc->sc_dev, ifp);
799 else
800 aprint_error_dev(sc->sc_dev,
801 "couldn't establish power handler\n");
802
803 rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
804 RND_TYPE_NET, RND_FLAG_DEFAULT);
805
806 return;
807
808 fail_5:
809 for (i = 0; i < MD_NRXDESC; i++) {
810 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
811 bus_dmamap_destroy(sc->sc_dmat,
812 sc->sc_rxsoft[i].rxs_dmamap);
813 }
814 fail_4:
815 for (i = 0; i < MD_TXQUEUELEN; i++) {
816 if (sc->sc_txsoft[i].txs_dmamap != NULL)
817 bus_dmamap_destroy(sc->sc_dmat,
818 sc->sc_txsoft[i].txs_dmamap);
819 }
820 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
821 fail_3:
822 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
823 fail_2:
824 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
825 sizeof(struct control_data));
826 fail_1:
827 bus_dmamem_free(sc->sc_dmat, &seg, nseg);
828 fail_0:
829 if (sc->sc_phandle)
830 fdtbus_intr_disestablish(sc->sc_phandle, sc->sc_ih);
831 else
832 acpi_intr_disestablish(sc->sc_ih);
833 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_sz);
834 return;
835 }
836
837 static void
838 scx_reset(struct scx_softc *sc)
839 {
840 int loop = 0, busy;
841
842 mac_write(sc, GMACOMR, 0);
843 mac_write(sc, GMACBMR, BMR_RST);
844 do {
845 DELAY(1);
846 busy = mac_read(sc, GMACBMR) & BMR_RST;
847 } while (++loop < 3000 && busy);
848 mac_write(sc, GMACBMR, _BMR);
849 mac_write(sc, GMACAFR, 0);
850
851 CSR_WRITE(sc, CLKEN, CLK_ALL); /* distribute clock sources */
852 CSR_WRITE(sc, SWRESET, 0); /* reset operation */
853 CSR_WRITE(sc, SWRESET, 1U<<31); /* manifest run */
854 CSR_WRITE(sc, COMINIT, 3); /* DB|CLS*/
855
856 mac_write(sc, GMACEVCTL, 1);
857 }
858
859 static int
860 scx_init(struct ifnet *ifp)
861 {
862 struct scx_softc *sc = ifp->if_softc;
863 const uint8_t *ea = CLLADDR(ifp->if_sadl);
864 uint32_t csr;
865 int i;
866
867 /* Cancel pending I/O. */
868 scx_stop(ifp, 0);
869
870 /* Reset the chip to a known state. */
871 scx_reset(sc);
872
873 /* set my address in perfect match slot 0. little endin order */
874 csr = (ea[3] << 24) | (ea[2] << 16) | (ea[1] << 8) | ea[0];
875 mac_write(sc, GMACMAL0, csr);
876 csr = (ea[5] << 8) | ea[4];
877 mac_write(sc, GMACMAH0, csr);
878
879 /* accept multicast frame or run promisc mode */
880 scx_set_rcvfilt(sc);
881
882 (void)ether_mediachange(ifp);
883
884 /* build sane Tx */
885 memset(sc->sc_txdescs, 0, sizeof(struct tdes) * MD_NTXDESC);
886 sc->sc_txdescs[MD_NTXDESC - 1].t0 |= T0_EOD; /* tie off the ring */
887 SCX_CDTXSYNC(sc, 0, MD_NTXDESC,
888 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
889 sc->sc_txfree = MD_NTXDESC;
890 sc->sc_txnext = 0;
891 for (i = 0; i < MD_TXQUEUELEN; i++)
892 sc->sc_txsoft[i].txs_mbuf = NULL;
893 sc->sc_txsfree = MD_TXQUEUELEN;
894 sc->sc_txsnext = 0;
895 sc->sc_txsdirty = 0;
896
897 /* load Rx descriptors with fresh mbuf */
898 for (i = 0; i < MD_NRXDESC; i++)
899 (void)add_rxbuf(sc, i);
900 sc->sc_rxptr = 0;
901
902 /* XXX 32 bit paddr XXX hand Tx/Rx rings to HW XXX */
903 mac_write(sc, GMACTDLA, SCX_CDTXADDR(sc, 0));
904 mac_write(sc, GMACRDLA, SCX_CDRXADDR(sc, 0));
905
906 /* kick to start GMAC engine */
907 CSR_WRITE(sc, RXI_CLR, ~0);
908 CSR_WRITE(sc, TXI_CLR, ~0);
909 csr = mac_read(sc, GMACOMR);
910 mac_write(sc, GMACOMR, csr | OMR_RS | OMR_ST);
911
912 ifp->if_flags |= IFF_RUNNING;
913 ifp->if_flags &= ~IFF_OACTIVE;
914
915 /* start one second timer */
916 callout_schedule(&sc->sc_tick_ch, hz);
917
918 return 0;
919 }
920
921 static void
922 scx_stop(struct ifnet *ifp, int disable)
923 {
924 struct scx_softc *sc = ifp->if_softc;
925
926 /* Stop the one second clock. */
927 callout_stop(&sc->sc_tick_ch);
928
929 /* Down the MII. */
930 mii_down(&sc->sc_mii);
931
932 /* Mark the interface down and cancel the watchdog timer. */
933 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
934 ifp->if_timer = 0;
935 }
936
937 static void
938 scx_watchdog(struct ifnet *ifp)
939 {
940 struct scx_softc *sc = ifp->if_softc;
941
942 /*
943 * Since we're not interrupting every packet, sweep
944 * up before we report an error.
945 */
946 txreap(sc);
947
948 if (sc->sc_txfree != MD_NTXDESC) {
949 aprint_error_dev(sc->sc_dev,
950 "device timeout (txfree %d txsfree %d txnext %d)\n",
951 sc->sc_txfree, sc->sc_txsfree, sc->sc_txnext);
952 if_statinc(ifp, if_oerrors);
953
954 /* Reset the interface. */
955 scx_init(ifp);
956 }
957
958 scx_start(ifp);
959 }
960
961 static int
962 scx_ioctl(struct ifnet *ifp, u_long cmd, void *data)
963 {
964 struct scx_softc *sc = ifp->if_softc;
965 struct ifreq *ifr = (struct ifreq *)data;
966 struct ifmedia *ifm;
967 int s, error;
968
969 s = splnet();
970
971 switch (cmd) {
972 case SIOCSIFMEDIA:
973 /* Flow control requires full-duplex mode. */
974 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
975 (ifr->ifr_media & IFM_FDX) == 0)
976 ifr->ifr_media &= ~IFM_ETH_FMASK;
977 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
978 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
979 /* We can do both TXPAUSE and RXPAUSE. */
980 ifr->ifr_media |=
981 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
982 }
983 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
984 }
985 ifm = &sc->sc_mii.mii_media;
986 error = ifmedia_ioctl(ifp, ifr, ifm, cmd);
987 break;
988 default:
989 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
990 break;
991
992 error = 0;
993
994 if (cmd == SIOCSIFCAP)
995 error = (*ifp->if_init)(ifp);
996 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
997 ;
998 else if (ifp->if_flags & IFF_RUNNING) {
999 /*
1000 * Multicast list has changed; set the hardware filter
1001 * accordingly.
1002 */
1003 scx_set_rcvfilt(sc);
1004 }
1005 break;
1006 }
1007
1008 splx(s);
1009 return error;
1010 }
1011
1012 static void
1013 scx_set_rcvfilt(struct scx_softc *sc)
1014 {
1015 struct ethercom * const ec = &sc->sc_ethercom;
1016 struct ifnet * const ifp = &ec->ec_if;
1017 struct ether_multistep step;
1018 struct ether_multi *enm;
1019 uint32_t mchash[2]; /* 2x 32 = 64 bit */
1020 uint32_t csr, crc;
1021 int i;
1022
1023 csr = mac_read(sc, GMACAFR);
1024 csr &= ~(AFR_PR | AFR_PM | AFR_MHTE | AFR_HPF);
1025 mac_write(sc, GMACAFR, csr);
1026
1027 /* clear 15 entry supplimental perfect match filter */
1028 for (i = 1; i < 16; i++)
1029 mac_write(sc, GMACMAH(i), 0);
1030 /* build 64 bit multicast hash filter */
1031 crc = mchash[1] = mchash[0] = 0;
1032
1033 ETHER_LOCK(ec);
1034 if (ifp->if_flags & IFF_PROMISC) {
1035 ec->ec_flags |= ETHER_F_ALLMULTI;
1036 ETHER_UNLOCK(ec);
1037 /* run promisc. mode */
1038 csr |= AFR_PR;
1039 goto update;
1040 }
1041 ec->ec_flags &= ~ETHER_F_ALLMULTI;
1042 ETHER_FIRST_MULTI(step, ec, enm);
1043 i = 1; /* slot 0 is occupied */
1044 while (enm != NULL) {
1045 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1046 /*
1047 * We must listen to a range of multicast addresses.
1048 * For now, just accept all multicasts, rather than
1049 * trying to set only those filter bits needed to match
1050 * the range. (At this time, the only use of address
1051 * ranges is for IP multicast routing, for which the
1052 * range is big enough to require all bits set.)
1053 */
1054 ec->ec_flags |= ETHER_F_ALLMULTI;
1055 ETHER_UNLOCK(ec);
1056 /* accept all multi */
1057 csr |= AFR_PM;
1058 goto update;
1059 }
1060 printf("[%d] %s\n", i, ether_sprintf(enm->enm_addrlo));
1061 if (i < 16) {
1062 /* use 15 entry perfect match filter */
1063 uint32_t addr;
1064 uint8_t *ep = enm->enm_addrlo;
1065 addr = (ep[3] << 24) | (ep[2] << 16)
1066 | (ep[1] << 8) | ep[0];
1067 mac_write(sc, GMACMAL(i), addr);
1068 addr = (ep[5] << 8) | ep[4];
1069 mac_write(sc, GMACMAH(i), addr | 1U<<31);
1070 } else {
1071 /* use hash table when too many */
1072 /* bit_reserve_32(~crc) !? */
1073 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
1074 /* 1(31) 5(30:26) bit sampling */
1075 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
1076 }
1077 ETHER_NEXT_MULTI(step, enm);
1078 i++;
1079 }
1080 ETHER_UNLOCK(ec);
1081 if (crc)
1082 csr |= AFR_MHTE;
1083 csr |= AFR_HPF; /* use hash+perfect */
1084 mac_write(sc, GMACMHTH, mchash[1]);
1085 mac_write(sc, GMACMHTL, mchash[0]);
1086 update:
1087 /* With PR or PM, MHTE/MHTL/MHTH are never consulted. really? */
1088 mac_write(sc, GMACAFR, csr);
1089 return;
1090 }
1091
1092 static void
1093 scx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1094 {
1095 struct scx_softc *sc = ifp->if_softc;
1096 struct mii_data *mii = &sc->sc_mii;
1097
1098 mii_pollstat(mii);
1099 ifmr->ifm_status = mii->mii_media_status;
1100 ifmr->ifm_active = sc->sc_flowflags |
1101 (mii->mii_media_active & ~IFM_ETH_FMASK);
1102 }
1103
1104 void
1105 mii_statchg(struct ifnet *ifp)
1106 {
1107 struct scx_softc *sc = ifp->if_softc;
1108 struct mii_data *mii = &sc->sc_mii;
1109 const int Mbps[4] = { 10, 100, 1000, 0 };
1110 uint32_t miisr, mcr, fcr;
1111 int spd;
1112
1113 /* decode MIISR register value */
1114 miisr = mac_read(sc, GMACMIISR);
1115 spd = Mbps[(miisr >> 1) & 03];
1116 #if 1
1117 printf("MII link status (0x%x) %s",
1118 miisr, (miisr & 8) ? "up" : "down");
1119 if (miisr & 8) {
1120 printf(" spd%d", spd);
1121 if (miisr & 01)
1122 printf(",full-duplex");
1123 }
1124 printf("\n");
1125 #endif
1126 /* Get flow control negotiation result. */
1127 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
1128 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags)
1129 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
1130
1131 /* Adjust speed 1000/100/10. */
1132 mcr = mac_read(sc, GMACMCR);
1133 if (spd == 1000)
1134 mcr &= ~MCR_USEMII; /* RGMII+SPD1000 */
1135 else {
1136 if (spd == 100 && sc->sc_100mii)
1137 mcr |= MCR_SPD100;
1138 mcr |= MCR_USEMII;
1139 }
1140 mcr |= MCR_CST | MCR_JE;
1141 if (sc->sc_100mii == 0)
1142 mcr |= MCR_IBN;
1143
1144 /* Adjust duplexity and PAUSE flow control. */
1145 mcr &= ~MCR_USEFDX;
1146 fcr = mac_read(sc, GMACFCR) & ~(FCR_TFE | FCR_RFE);
1147 if (miisr & 01) {
1148 if (sc->sc_flowflags & IFM_ETH_TXPAUSE)
1149 fcr |= FCR_TFE;
1150 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
1151 fcr |= FCR_RFE;
1152 mcr |= MCR_USEFDX;
1153 }
1154 mac_write(sc, GMACMCR, mcr);
1155 mac_write(sc, GMACFCR, fcr);
1156
1157 printf("%ctxfe, %crxfe\n",
1158 (fcr & FCR_TFE) ? '+' : '-', (fcr & FCR_RFE) ? '+' : '-');
1159 }
1160
1161 static void
1162 phy_tick(void *arg)
1163 {
1164 struct scx_softc *sc = arg;
1165 struct mii_data *mii = &sc->sc_mii;
1166 int s;
1167
1168 s = splnet();
1169 mii_tick(mii);
1170 splx(s);
1171
1172 callout_schedule(&sc->sc_tick_ch, hz);
1173 }
1174
1175 static int
1176 mii_readreg(device_t self, int phy, int reg, uint16_t *val)
1177 {
1178 struct scx_softc *sc = device_private(self);
1179 uint32_t miia;
1180 int error;
1181
1182 uint32_t clk = CSR_READ(sc, CLKEN);
1183 CSR_WRITE(sc, CLKEN, clk | CLK_G);
1184
1185 miia = (phy << GAR_PHY) | (reg << GAR_REG) | sc->sc_mdclk;
1186 mac_write(sc, GMACGAR, miia | GAR_BUSY);
1187 error = spin_waitfor(sc, GMACGAR, GAR_BUSY);
1188 if (error)
1189 return error;
1190 *val = mac_read(sc, GMACGDR);
1191 return 0;
1192 }
1193
1194 static int
1195 mii_writereg(device_t self, int phy, int reg, uint16_t val)
1196 {
1197 struct scx_softc *sc = device_private(self);
1198 uint32_t miia;
1199 uint16_t dummy;
1200 int error;
1201
1202 uint32_t clk = CSR_READ(sc, CLKEN);
1203 CSR_WRITE(sc, CLKEN, clk | CLK_G);
1204
1205 miia = (phy << GAR_PHY) | (reg << GAR_REG) | sc->sc_mdclk;
1206 mac_write(sc, GMACGDR, val);
1207 mac_write(sc, GMACGAR, miia | GAR_IOWR | GAR_BUSY);
1208 error = spin_waitfor(sc, GMACGAR, GAR_BUSY);
1209 if (error)
1210 return error;
1211 mii_readreg(self, phy, MII_PHYIDR1, &dummy); /* dummy read cycle */
1212 return 0;
1213 }
1214
1215 static void
1216 scx_start(struct ifnet *ifp)
1217 {
1218 struct scx_softc *sc = ifp->if_softc;
1219 struct mbuf *m0, *m;
1220 struct scx_txsoft *txs;
1221 bus_dmamap_t dmamap;
1222 int error, nexttx, lasttx, ofree, seg;
1223 uint32_t tdes0;
1224
1225 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1226 return;
1227
1228 /* Remember the previous number of free descriptors. */
1229 ofree = sc->sc_txfree;
1230
1231 /*
1232 * Loop through the send queue, setting up transmit descriptors
1233 * until we drain the queue, or use up all available transmit
1234 * descriptors.
1235 */
1236 for (;;) {
1237 IFQ_POLL(&ifp->if_snd, m0);
1238 if (m0 == NULL)
1239 break;
1240
1241 if (sc->sc_txsfree < MD_TXQUEUE_GC) {
1242 txreap(sc);
1243 if (sc->sc_txsfree == 0)
1244 break;
1245 }
1246 txs = &sc->sc_txsoft[sc->sc_txsnext];
1247 dmamap = txs->txs_dmamap;
1248
1249 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1250 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1251 if (error) {
1252 if (error == EFBIG) {
1253 aprint_error_dev(sc->sc_dev,
1254 "Tx packet consumes too many "
1255 "DMA segments, dropping...\n");
1256 IFQ_DEQUEUE(&ifp->if_snd, m0);
1257 m_freem(m0);
1258 continue;
1259 }
1260 /* Short on resources, just stop for now. */
1261 break;
1262 }
1263
1264 if (dmamap->dm_nsegs > sc->sc_txfree) {
1265 /*
1266 * Not enough free descriptors to transmit this
1267 * packet. We haven't committed anything yet,
1268 * so just unload the DMA map, put the packet
1269 * back on the queue, and punt. Notify the upper
1270 * layer that there are not more slots left.
1271 */
1272 ifp->if_flags |= IFF_OACTIVE;
1273 bus_dmamap_unload(sc->sc_dmat, dmamap);
1274 break;
1275 }
1276
1277 IFQ_DEQUEUE(&ifp->if_snd, m0);
1278
1279 /*
1280 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1281 */
1282
1283 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1284 BUS_DMASYNC_PREWRITE);
1285
1286 tdes0 = 0; /* to postpone 1st segment T0_OWN write */
1287 lasttx = -1;
1288 for (nexttx = sc->sc_txnext, seg = 0;
1289 seg < dmamap->dm_nsegs;
1290 seg++, nexttx = MD_NEXTTX(nexttx)) {
1291 struct tdes *tdes = &sc->sc_txdescs[nexttx];
1292 bus_addr_t paddr = dmamap->dm_segs[seg].ds_addr;
1293 /*
1294 * If this is the first descriptor we're
1295 * enqueueing, don't set the OWN bit just
1296 * yet. That could cause a race condition.
1297 * We'll do it below.
1298 */
1299 tdes->t3 = dmamap->dm_segs[seg].ds_len;
1300 tdes->t2 = htole32(BUS_ADDR_LO32(paddr));
1301 tdes->t1 = htole32(BUS_ADDR_HI32(paddr));
1302 tdes->t0 = tdes0 | (tdes->t0 & T0_EOD) |
1303 (15 << T0_TRID) | T0_PT |
1304 sc->sc_t0coso | T0_TRS;
1305 tdes0 = T0_OWN; /* 2nd and other segments */
1306 lasttx = nexttx;
1307 }
1308 /*
1309 * Outgoing NFS mbuf must be unloaded when Tx completed.
1310 * Without T1_IC NFS mbuf is left unack'ed for excessive
1311 * time and NFS stops to proceed until scx_watchdog()
1312 * calls txreap() to reclaim the unack'ed mbuf.
1313 * It's painful to traverse every mbuf chain to determine
1314 * whether someone is waiting for Tx completion.
1315 */
1316 m = m0;
1317 do {
1318 if ((m->m_flags & M_EXT) && m->m_ext.ext_free) {
1319 sc->sc_txdescs[lasttx].t0 |= T0_IOC; /* !!! */
1320 break;
1321 }
1322 } while ((m = m->m_next) != NULL);
1323
1324 /* Write deferred 1st segment T0_OWN at the final stage */
1325 sc->sc_txdescs[lasttx].t0 |= T0_LS;
1326 sc->sc_txdescs[sc->sc_txnext].t0 |= (T0_FS | T0_OWN);
1327 SCX_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1328 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1329
1330 /* Tell DMA start transmit */
1331 mac_write(sc, GMACTPD, 1);
1332
1333 txs->txs_mbuf = m0;
1334 txs->txs_firstdesc = sc->sc_txnext;
1335 txs->txs_lastdesc = lasttx;
1336 txs->txs_ndesc = dmamap->dm_nsegs;
1337
1338 sc->sc_txfree -= txs->txs_ndesc;
1339 sc->sc_txnext = nexttx;
1340 sc->sc_txsfree--;
1341 sc->sc_txsnext = MD_NEXTTXS(sc->sc_txsnext);
1342 /*
1343 * Pass the packet to any BPF listeners.
1344 */
1345 bpf_mtap(ifp, m0, BPF_D_OUT);
1346 }
1347
1348 if (sc->sc_txsfree == 0 || sc->sc_txfree == 0) {
1349 /* No more slots left; notify upper layer. */
1350 ifp->if_flags |= IFF_OACTIVE;
1351 }
1352 if (sc->sc_txfree != ofree) {
1353 /* Set a watchdog timer in case the chip flakes out. */
1354 ifp->if_timer = 5;
1355 }
1356 }
1357
1358 static int
1359 scx_intr(void *arg)
1360 {
1361 struct scx_softc *sc = arg;
1362 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1363
1364 (void)ifp;
1365 /* XXX decode interrupt cause to pick isr() XXX */
1366 rxintr(sc);
1367 txreap(sc);
1368 return 1;
1369 }
1370
1371 static void
1372 txreap(struct scx_softc *sc)
1373 {
1374 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1375 struct scx_txsoft *txs;
1376 uint32_t txstat;
1377 int i;
1378
1379 ifp->if_flags &= ~IFF_OACTIVE;
1380
1381 for (i = sc->sc_txsdirty; sc->sc_txsfree != MD_TXQUEUELEN;
1382 i = MD_NEXTTXS(i), sc->sc_txsfree++) {
1383 txs = &sc->sc_txsoft[i];
1384
1385 SCX_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
1386 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1387
1388 txstat = sc->sc_txdescs[txs->txs_lastdesc].t0;
1389 if (txstat & T0_OWN) /* desc is still in use */
1390 break;
1391
1392 /* There is no way to tell transmission status per frame */
1393
1394 if_statinc(ifp, if_opackets);
1395
1396 sc->sc_txfree += txs->txs_ndesc;
1397 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1398 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1399 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1400 m_freem(txs->txs_mbuf);
1401 txs->txs_mbuf = NULL;
1402 }
1403 sc->sc_txsdirty = i;
1404 if (sc->sc_txsfree == MD_TXQUEUELEN)
1405 ifp->if_timer = 0;
1406 }
1407
1408 static void
1409 rxintr(struct scx_softc *sc)
1410 {
1411 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1412 struct scx_rxsoft *rxs;
1413 struct mbuf *m;
1414 uint32_t rxstat;
1415 int i, len;
1416
1417 for (i = sc->sc_rxptr; /*CONSTCOND*/ 1; i = MD_NEXTRX(i)) {
1418 rxs = &sc->sc_rxsoft[i];
1419
1420 SCX_CDRXSYNC(sc, i,
1421 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1422
1423 rxstat = sc->sc_rxdescs[i].r0;
1424 if (rxstat & R0_OWN) /* desc is left empty */
1425 break;
1426
1427 /* R0_FS | R0_LS must have been marked for this desc */
1428
1429 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1430 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1431
1432 len = sc->sc_rxdescs[i].r3 >> 16; /* 31:16 received */
1433 len -= ETHER_CRC_LEN; /* Trim CRC off */
1434 m = rxs->rxs_mbuf;
1435
1436 if (add_rxbuf(sc, i) != 0) {
1437 if_statinc(ifp, if_ierrors);
1438 SCX_INIT_RXDESC(sc, i);
1439 bus_dmamap_sync(sc->sc_dmat,
1440 rxs->rxs_dmamap, 0,
1441 rxs->rxs_dmamap->dm_mapsize,
1442 BUS_DMASYNC_PREREAD);
1443 continue;
1444 }
1445
1446 m_set_rcvif(m, ifp);
1447 m->m_pkthdr.len = m->m_len = len;
1448
1449 if (rxstat & R0_CSUM) {
1450 uint32_t csum = M_CSUM_IPv4;
1451 if (rxstat & R0_CERR)
1452 csum |= M_CSUM_IPv4_BAD;
1453 m->m_pkthdr.csum_flags |= csum;
1454 }
1455 if_percpuq_enqueue(ifp->if_percpuq, m);
1456 }
1457 sc->sc_rxptr = i;
1458 }
1459
1460 static int
1461 add_rxbuf(struct scx_softc *sc, int i)
1462 {
1463 struct scx_rxsoft *rxs = &sc->sc_rxsoft[i];
1464 struct mbuf *m;
1465 int error;
1466
1467 MGETHDR(m, M_DONTWAIT, MT_DATA);
1468 if (m == NULL)
1469 return ENOBUFS;
1470
1471 MCLGET(m, M_DONTWAIT);
1472 if ((m->m_flags & M_EXT) == 0) {
1473 m_freem(m);
1474 return ENOBUFS;
1475 }
1476
1477 if (rxs->rxs_mbuf != NULL)
1478 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1479
1480 rxs->rxs_mbuf = m;
1481
1482 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
1483 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1484 if (error) {
1485 aprint_error_dev(sc->sc_dev,
1486 "can't load rx DMA map %d, error = %d\n", i, error);
1487 panic("add_rxbuf");
1488 }
1489
1490 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1491 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1492 SCX_INIT_RXDESC(sc, i);
1493
1494 return 0;
1495 }
1496
1497 static int
1498 spin_waitfor(struct scx_softc *sc, int reg, int exist)
1499 {
1500 int busy, loop;
1501
1502 busy = CSR_READ(sc, reg) & exist;
1503 if (busy == 0)
1504 return 0;
1505 loop = 30000;
1506 do {
1507 DELAY(10);
1508 busy = CSR_READ(sc, reg) & exist;
1509 } while (--loop > 0 && busy);
1510 return (loop > 0) ? 0 : ETIMEDOUT;
1511 }
1512
1513 /* GMAC register needs to use indirect rd/wr via memory mapped registers. */
1514
1515 static int
1516 mac_read(struct scx_softc *sc, int reg)
1517 {
1518
1519 CSR_WRITE(sc, MACCMD, reg);
1520 (void)spin_waitfor(sc, MACCMD, CMD_BUSY);
1521 return CSR_READ(sc, MACDATA);
1522 }
1523
1524 static void
1525 mac_write(struct scx_softc *sc, int reg, int val)
1526 {
1527
1528 CSR_WRITE(sc, MACDATA, val);
1529 CSR_WRITE(sc, MACCMD, reg | CMD_IOWR);
1530 (void)spin_waitfor(sc, MACCMD, CMD_BUSY);
1531 }
1532
1533 /*
1534 * 3 independent uengines exist * to process host2media, media2host and
1535 * packet data flows.
1536 */
1537 static void
1538 loaducode(struct scx_softc *sc)
1539 {
1540 uint32_t up, lo, sz;
1541 uint64_t addr;
1542
1543 sc->sc_ucodeloaded = 1;
1544
1545 up = EE_READ(sc, 0x08); /* H->M ucode addr high */
1546 lo = EE_READ(sc, 0x0c); /* H->M ucode addr low */
1547 sz = EE_READ(sc, 0x10); /* H->M ucode size */
1548 sz *= 4;
1549 addr = ((uint64_t)up << 32) | lo;
1550 aprint_normal_dev(sc->sc_dev, "0x%x H2M ucode %u\n", lo, sz);
1551 injectucode(sc, H2MENG, (bus_addr_t)addr, (bus_size_t)sz);
1552
1553 up = EE_READ(sc, 0x14); /* M->H ucode addr high */
1554 lo = EE_READ(sc, 0x18); /* M->H ucode addr low */
1555 sz = EE_READ(sc, 0x1c); /* M->H ucode size */
1556 sz *= 4;
1557 addr = ((uint64_t)up << 32) | lo;
1558 injectucode(sc, M2HENG, (bus_addr_t)addr, (bus_size_t)sz);
1559 aprint_normal_dev(sc->sc_dev, "0x%x M2H ucode %u\n", lo, sz);
1560
1561 lo = EE_READ(sc, 0x20); /* PKT ucode addr */
1562 sz = EE_READ(sc, 0x24); /* PKT ucode size */
1563 sz *= 4;
1564 injectucode(sc, PKTENG, (bus_addr_t)lo, (bus_size_t)sz);
1565 aprint_normal_dev(sc->sc_dev, "0x%x PKT ucode %u\n", lo, sz);
1566 }
1567
1568 static void
1569 injectucode(struct scx_softc *sc, int port,
1570 bus_addr_t addr, bus_size_t size)
1571 {
1572 bus_space_handle_t bsh;
1573 bus_size_t off;
1574 uint32_t ucode;
1575
1576 if (bus_space_map(sc->sc_st, addr, size, 0, &bsh) != 0) {
1577 aprint_error_dev(sc->sc_dev,
1578 "eeprom map failure for ucode port 0x%x\n", port);
1579 return;
1580 }
1581 for (off = 0; off < size; off += 4) {
1582 ucode = bus_space_read_4(sc->sc_st, bsh, off);
1583 CSR_WRITE(sc, port, ucode);
1584 }
1585 bus_space_unmap(sc->sc_st, bsh, size);
1586 }
1587
1588 /* bit selection to determine MDIO speed */
1589
1590 static int
1591 get_mdioclk(uint32_t freq)
1592 {
1593
1594 const struct {
1595 uint16_t freq, bit; /* GAR 5:2 MDIO frequency selection */
1596 } mdioclk[] = {
1597 { 35, 2 }, /* 25-35 MHz */
1598 { 60, 3 }, /* 35-60 MHz */
1599 { 100, 0 }, /* 60-100 MHz */
1600 { 150, 1 }, /* 100-150 MHz */
1601 { 250, 4 }, /* 150-250 MHz */
1602 { 300, 5 }, /* 250-300 MHz */
1603 };
1604 int i;
1605
1606 freq /= 1000 * 1000;
1607 /* convert MDIO clk to a divisor value */
1608 if (freq < mdioclk[0].freq)
1609 return mdioclk[0].bit;
1610 for (i = 1; i < __arraycount(mdioclk); i++) {
1611 if (freq < mdioclk[i].freq)
1612 return mdioclk[i-1].bit;
1613 }
1614 return mdioclk[__arraycount(mdioclk) - 1].bit << GAR_CTL;
1615 }
1616