if_cnmac.c revision 1.1 1 /* $NetBSD: if_cnmac.c,v 1.1 2015/04/29 08:32:01 hikaru Exp $ */
2
3 #include <sys/cdefs.h>
4 #if 0
5 __KERNEL_RCSID(0, "$NetBSD: if_cnmac.c,v 1.1 2015/04/29 08:32:01 hikaru Exp $");
6 #endif
7
8 #include "opt_octeon.h"
9
10 #ifdef OCTEON_ETH_DEBUG
11
12 #ifndef DIAGNOSTIC
13 #define DIAGNOSTIC
14 #endif
15
16 #ifndef DEBUG
17 #define DEBUG
18 #endif
19
20 #endif
21
22 /*
23 * If no free send buffer is available, free all the sent buffer and bail out.
24 */
25 #define OCTEON_ETH_SEND_QUEUE_CHECK
26
27 /* XXX XXX XXX XXX XXX XXX */
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/pool.h>
32 #include <sys/mbuf.h>
33 #include <sys/malloc.h>
34 #include <sys/kernel.h>
35 #include <sys/socket.h>
36 #include <sys/ioctl.h>
37 #include <sys/errno.h>
38 #include <sys/device.h>
39 #include <sys/queue.h>
40 #include <sys/conf.h>
41 #include <sys/sysctl.h>
42 #include <sys/syslog.h>
43
44 #include <net/if.h>
45 #include <net/if_dl.h>
46 #include <net/if_media.h>
47 #include <net/if_ether.h>
48 #include <net/route.h>
49
50 #include <net/bpf.h>
51
52 #include <netinet/in.h>
53 #include <netinet/in_systm.h>
54 #include <netinet/in_var.h>
55 #include <netinet/ip.h>
56
57 #include <sys/bus.h>
58 #include <machine/intr.h>
59 #include <machine/endian.h>
60 #include <machine/locore.h>
61
62 #include <dev/mii/mii.h>
63 #include <dev/mii/miivar.h>
64
65 #include <mips/cpuregs.h>
66
67 #include <mips/cavium/dev/octeon_asxreg.h>
68 #include <mips/cavium/dev/octeon_ciureg.h>
69 #include <mips/cavium/dev/octeon_npireg.h>
70 #include <mips/cavium/dev/octeon_gmxreg.h>
71 #include <mips/cavium/dev/octeon_ipdreg.h>
72 #include <mips/cavium/dev/octeon_pipreg.h>
73 #include <mips/cavium/dev/octeon_powreg.h>
74 #include <mips/cavium/dev/octeon_faureg.h>
75 #include <mips/cavium/dev/octeon_fpareg.h>
76 #include <mips/cavium/dev/octeon_bootbusreg.h>
77 #include <mips/cavium/include/iobusvar.h>
78 #include <mips/cavium/octeonvar.h>
79 #include <mips/cavium/dev/octeon_fpavar.h>
80 #include <mips/cavium/dev/octeon_gmxvar.h>
81 #include <mips/cavium/dev/octeon_fauvar.h>
82 #include <mips/cavium/dev/octeon_powvar.h>
83 #include <mips/cavium/dev/octeon_ipdvar.h>
84 #include <mips/cavium/dev/octeon_pipvar.h>
85 #include <mips/cavium/dev/octeon_pkovar.h>
86 #include <mips/cavium/dev/octeon_asxvar.h>
87 #include <mips/cavium/dev/octeon_smivar.h>
88 #include <mips/cavium/dev/if_cnmacvar.h>
89
90 #ifdef OCTEON_ETH_DEBUG
91 #define OCTEON_ETH_KASSERT(x) KASSERT(x)
92 #define OCTEON_ETH_KDASSERT(x) KDASSERT(x)
93 #else
94 #define OCTEON_ETH_KASSERT(x)
95 #define OCTEON_ETH_KDASSERT(x)
96 #endif
97
98 /*
99 * Set the PKO to think command buffers are an odd length. This makes it so we
100 * never have to divide a comamnd across two buffers.
101 */
102 #define OCTEON_POOL_NWORDS_CMD \
103 (((uint32_t)OCTEON_POOL_SIZE_CMD / sizeof(uint64_t)) - 1)
104 #define FPA_COMMAND_BUFFER_POOL_NWORDS OCTEON_POOL_NWORDS_CMD /* XXX */
105
106 static void octeon_eth_buf_init(struct octeon_eth_softc *);
107
108 static int octeon_eth_match(device_t, struct cfdata *, void *);
109 static void octeon_eth_attach(device_t, device_t, void *);
110 static void octeon_eth_pip_init(struct octeon_eth_softc *);
111 static void octeon_eth_ipd_init(struct octeon_eth_softc *);
112 static void octeon_eth_pko_init(struct octeon_eth_softc *);
113 static void octeon_eth_asx_init(struct octeon_eth_softc *);
114 static void octeon_eth_smi_init(struct octeon_eth_softc *);
115
116 static void octeon_eth_board_mac_addr(uint8_t *, size_t, struct octeon_eth_softc *);
117
118 static int octeon_eth_mii_readreg(device_t, int, int);
119 static void octeon_eth_mii_writereg(device_t, int, int, int);
120 static void octeon_eth_mii_statchg(struct ifnet *);
121
122 static int octeon_eth_mediainit(struct octeon_eth_softc *);
123 static void octeon_eth_mediastatus(struct ifnet *, struct ifmediareq *);
124 static int octeon_eth_mediachange(struct ifnet *);
125
126 static inline void octeon_eth_send_queue_flush_prefetch(struct octeon_eth_softc *);
127 static inline void octeon_eth_send_queue_flush_fetch(struct octeon_eth_softc *);
128 static inline void octeon_eth_send_queue_flush(struct octeon_eth_softc *);
129 static inline void octeon_eth_send_queue_flush_sync(struct octeon_eth_softc *);
130 static inline int octeon_eth_send_queue_is_full(struct octeon_eth_softc *);
131 static inline void octeon_eth_send_queue_add(struct octeon_eth_softc *,
132 struct mbuf *, uint64_t *);
133 static inline void octeon_eth_send_queue_del(struct octeon_eth_softc *,
134 struct mbuf **, uint64_t **);
135 static inline int octeon_eth_buf_free_work(struct octeon_eth_softc *,
136 uint64_t *, uint64_t);
137 static inline void octeon_eth_buf_ext_free_m(struct mbuf *, void *, size_t, void *);
138 static inline void octeon_eth_buf_ext_free_ext(struct mbuf *, void *, size_t, void *);
139
140 static int octeon_eth_ioctl(struct ifnet *, u_long, void *);
141 static void octeon_eth_watchdog(struct ifnet *);
142 static int octeon_eth_init(struct ifnet *);
143 static void octeon_eth_stop(struct ifnet *, int);
144 static void octeon_eth_start(struct ifnet *);
145
146 static inline int octeon_eth_send_cmd(struct octeon_eth_softc *, uint64_t,
147 uint64_t);
148 static inline uint64_t octeon_eth_send_makecmd_w1(int, paddr_t);
149 static inline uint64_t octeon_eth_send_makecmd_w0(uint64_t, uint64_t, size_t,
150 int);
151 static inline int octeon_eth_send_makecmd_gbuf(struct octeon_eth_softc *,
152 struct mbuf *, uint64_t *, int *);
153 static inline int octeon_eth_send_makecmd(struct octeon_eth_softc *,
154 struct mbuf *, uint64_t *, uint64_t *, uint64_t *);
155 static inline int octeon_eth_send_buf(struct octeon_eth_softc *,
156 struct mbuf *, uint64_t *);
157 static inline int octeon_eth_send(struct octeon_eth_softc *,
158 struct mbuf *);
159
160 static int octeon_eth_reset(struct octeon_eth_softc *);
161 static int octeon_eth_configure(struct octeon_eth_softc *);
162 static int octeon_eth_configure_common(struct octeon_eth_softc *);
163
164 static void octeon_eth_tick_free(void *arg);
165 static void octeon_eth_tick_misc(void *);
166
167 static inline int octeon_eth_recv_mbuf(struct octeon_eth_softc *,
168 uint64_t *, struct mbuf **);
169 static inline int octeon_eth_recv_check_code(struct octeon_eth_softc *,
170 uint64_t);
171 static inline int octeon_eth_recv_check_jumbo(struct octeon_eth_softc *,
172 uint64_t);
173 static inline int octeon_eth_recv_check_link(struct octeon_eth_softc *,
174 uint64_t);
175 static inline int octeon_eth_recv_check(struct octeon_eth_softc *,
176 uint64_t);
177 static inline int octeon_eth_recv(struct octeon_eth_softc *, uint64_t *);
178 static void octeon_eth_recv_redir(struct ifnet *, struct mbuf *);
179 static inline void octeon_eth_recv_intr(void *, uint64_t *);
180
181 /* device driver context */
182 static struct octeon_eth_softc *octeon_eth_gsc[GMX_PORT_NUNITS];
183 static void *octeon_eth_pow_recv_ih;
184
185 /* sysctl'able parameters */
186 int octeon_eth_param_pko_cmd_w0_n2 = 1;
187 int octeon_eth_param_pip_dyn_rs = 1;
188 int octeon_eth_param_redir = 0;
189 int octeon_eth_param_pktbuf = 0;
190 int octeon_eth_param_rate = 0;
191 int octeon_eth_param_intr = 0;
192
193 CFATTACH_DECL_NEW(cnmac, sizeof(struct octeon_eth_softc),
194 octeon_eth_match, octeon_eth_attach, NULL, NULL);
195
196 #ifdef OCTEON_ETH_DEBUG
197
198 static const struct octeon_evcnt_entry octeon_evcnt_entries[] = {
199 #define _ENTRY(name, type, parent, descr) \
200 OCTEON_EVCNT_ENTRY(struct octeon_eth_softc, name, type, parent, descr)
201 _ENTRY(rx, MISC, NULL, "rx"),
202 _ENTRY(rxint, INTR, NULL, "rx intr"),
203 _ENTRY(rxrs, MISC, NULL, "rx dynamic short"),
204 _ENTRY(rxbufpkalloc, MISC, NULL, "rx buf pkt alloc"),
205 _ENTRY(rxbufpkput, MISC, NULL, "rx buf pkt put"),
206 _ENTRY(rxbufwqalloc, MISC, NULL, "rx buf wqe alloc"),
207 _ENTRY(rxbufwqput, MISC, NULL, "rx buf wqe put"),
208 _ENTRY(rxerrcode, MISC, NULL, "rx code error"),
209 _ENTRY(rxerrfix, MISC, NULL, "rx fixup error"),
210 _ENTRY(rxerrjmb, MISC, NULL, "rx jmb error"),
211 _ENTRY(rxerrlink, MISC, NULL, "rx link error"),
212 _ENTRY(rxerroff, MISC, NULL, "rx offload error"),
213 _ENTRY(rxonperrshort, MISC, NULL, "rx onp fixup short error"),
214 _ENTRY(rxonperrpreamble, MISC, NULL, "rx onp fixup preamble error"),
215 _ENTRY(rxonperrcrc, MISC, NULL, "rx onp fixup crc error"),
216 _ENTRY(rxonperraddress, MISC, NULL, "rx onp fixup address error"),
217 _ENTRY(rxonponp, MISC, NULL, "rx onp fixup onp packets"),
218 _ENTRY(rxonpok, MISC, NULL, "rx onp fixup success packets"),
219 _ENTRY(tx, MISC, NULL, "tx"),
220 _ENTRY(txadd, MISC, NULL, "tx add"),
221 _ENTRY(txbufcballoc, MISC, NULL, "tx buf cb alloc"),
222 _ENTRY(txbufcbget, MISC, NULL, "tx buf cb get"),
223 _ENTRY(txbufgballoc, MISC, NULL, "tx buf gb alloc"),
224 _ENTRY(txbufgbget, MISC, NULL, "tx buf gb get"),
225 _ENTRY(txbufgbput, MISC, NULL, "tx buf gb put"),
226 _ENTRY(txdel, MISC, NULL, "tx del"),
227 _ENTRY(txerr, MISC, NULL, "tx error"),
228 _ENTRY(txerrcmd, MISC, NULL, "tx cmd error"),
229 _ENTRY(txerrgbuf, MISC, NULL, "tx gbuf error"),
230 _ENTRY(txerrlink, MISC, NULL, "tx link error"),
231 _ENTRY(txerrmkcmd, MISC, NULL, "tx makecmd error"),
232 #undef _ENTRY
233 };
234 #endif
235
236 /* ---- buffer management */
237
238 static const struct octeon_eth_pool_param {
239 int poolno;
240 size_t size;
241 size_t nelems;
242 } octeon_eth_pool_params[] = {
243 #define _ENTRY(x) { OCTEON_POOL_NO_##x, OCTEON_POOL_SIZE_##x, OCTEON_POOL_NELEMS_##x }
244 _ENTRY(PKT),
245 _ENTRY(WQE),
246 _ENTRY(CMD),
247 _ENTRY(SG)
248 #undef _ENTRY
249 };
250 struct octeon_fpa_buf *octeon_eth_pools[8/* XXX */];
251 #define octeon_eth_fb_pkt octeon_eth_pools[OCTEON_POOL_NO_PKT]
252 #define octeon_eth_fb_wqe octeon_eth_pools[OCTEON_POOL_NO_WQE]
253 #define octeon_eth_fb_cmd octeon_eth_pools[OCTEON_POOL_NO_CMD]
254 #define octeon_eth_fb_sg octeon_eth_pools[OCTEON_POOL_NO_SG]
255
256 static void
257 octeon_eth_buf_init(struct octeon_eth_softc *sc)
258 {
259 static int once;
260 int i;
261 const struct octeon_eth_pool_param *pp;
262 struct octeon_fpa_buf *fb;
263
264 if (once == 1)
265 return;
266 once = 1;
267
268 for (i = 0; i < (int)__arraycount(octeon_eth_pool_params); i++) {
269 pp = &octeon_eth_pool_params[i];
270 octeon_fpa_buf_init(pp->poolno, pp->size, pp->nelems, &fb);
271 octeon_eth_pools[i] = fb;
272 }
273 }
274
275 /* ---- autoconf */
276
277 static int
278 octeon_eth_match(device_t parent, struct cfdata *match, void *aux)
279 {
280 struct octeon_gmx_attach_args *ga = aux;
281
282 if (strcmp(match->cf_name, ga->ga_name) != 0) {
283 return 0;
284 }
285 return 1;
286 }
287
288 static void
289 octeon_eth_attach(device_t parent, device_t self, void *aux)
290 {
291 struct octeon_eth_softc *sc = device_private(self);
292 struct octeon_gmx_attach_args *ga = aux;
293 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
294 uint8_t enaddr[ETHER_ADDR_LEN];
295
296 sc->sc_dev = self;
297 sc->sc_regt = ga->ga_regt;
298 sc->sc_port = ga->ga_portno;
299 sc->sc_port_type = ga->ga_port_type;
300 sc->sc_gmx = ga->ga_gmx;
301 sc->sc_gmx_port = ga->ga_gmx_port;
302
303 sc->sc_init_flag = 0;
304 /*
305 * XXXUEBAYASI
306 * Setting PIP_IP_OFFSET[OFFSET] to 8 causes panic ... why???
307 */
308 sc->sc_ip_offset = 0/* XXX */;
309
310 if (MIPS_PRID_IMPL(mips_options.mips_cpu_id) <= MIPS_CN30XX) {
311 SET(sc->sc_quirks, OCTEON_ETH_QUIRKS_NO_PRE_ALIGN);
312 SET(sc->sc_quirks, OCTEON_ETH_QUIRKS_NO_RX_INBND);
313 }
314
315 octeon_eth_board_mac_addr(enaddr, sizeof(enaddr), sc);
316 printf("%s: Ethernet address %s\n", device_xname(sc->sc_dev),
317 ether_sprintf(enaddr));
318
319 octeon_eth_gsc[sc->sc_port] = sc;
320
321 SIMPLEQ_INIT(&sc->sc_sendq);
322 sc->sc_soft_req_thresh = 15/* XXX */;
323 sc->sc_ext_callback_cnt = 0;
324
325 octeon_gmx_stats_init(sc->sc_gmx_port);
326
327 callout_init(&sc->sc_tick_misc_ch, 0);
328 callout_init(&sc->sc_tick_free_ch, 0);
329
330 octeon_fau_op_init(&sc->sc_fau_done,
331 OCTEON_CVMSEG_ETHER_OFFSET(sc->sc_port, csm_ether_fau_done),
332 OCT_FAU_REG_ADDR_END - (8 * (sc->sc_port + 1))/* XXX */);
333 octeon_fau_op_set_8(&sc->sc_fau_done, 0);
334
335 octeon_eth_pip_init(sc);
336 octeon_eth_ipd_init(sc);
337 octeon_eth_pko_init(sc);
338 octeon_eth_asx_init(sc);
339 octeon_eth_smi_init(sc);
340
341 sc->sc_gmx_port->sc_ipd = sc->sc_ipd;
342 sc->sc_gmx_port->sc_port_asx = sc->sc_asx;
343 sc->sc_gmx_port->sc_port_mii = &sc->sc_mii;
344 sc->sc_gmx_port->sc_port_ec = &sc->sc_ethercom;
345 /* XXX */
346 sc->sc_gmx_port->sc_quirks = sc->sc_quirks;
347
348 /* XXX */
349 sc->sc_pow = &octeon_pow_softc;
350
351 octeon_eth_mediainit(sc);
352
353 strncpy(ifp->if_xname, device_xname(sc->sc_dev), sizeof(ifp->if_xname));
354 ifp->if_softc = sc;
355 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
356 ifp->if_ioctl = octeon_eth_ioctl;
357 ifp->if_start = octeon_eth_start;
358 ifp->if_watchdog = octeon_eth_watchdog;
359 ifp->if_init = octeon_eth_init;
360 ifp->if_stop = octeon_eth_stop;
361 IFQ_SET_MAXLEN(&ifp->if_snd, max(GATHER_QUEUE_SIZE, IFQ_MAXLEN));
362 IFQ_SET_READY(&ifp->if_snd);
363
364 /* XXX: not yet tx checksum */
365 ifp->if_capabilities =
366 IFCAP_CSUM_IPv4_Rx | IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
367 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
368
369 octeon_gmx_set_mac_addr(sc->sc_gmx_port, enaddr);
370 octeon_gmx_set_filter(sc->sc_gmx_port);
371
372 if_attach(ifp);
373 ether_ifattach(ifp, enaddr);
374
375 /* XXX */
376 sc->sc_rate_recv_check_link_cap.tv_sec = 1;
377 sc->sc_rate_recv_check_jumbo_cap.tv_sec = 1;
378 sc->sc_rate_recv_check_code_cap.tv_sec = 1;
379 sc->sc_rate_recv_fixup_odd_nibble_short_cap.tv_sec = 1;
380 sc->sc_rate_recv_fixup_odd_nibble_preamble_cap.tv_sec = 1;
381 sc->sc_rate_recv_fixup_odd_nibble_crc_cap.tv_sec = 1;
382 #ifdef OCTEON_ETH_DEBUG
383 sc->sc_rate_recv_fixup_odd_nibble_addr_cap.tv_sec = 1;
384 #endif
385 /* XXX */
386
387 #if 1
388 octeon_eth_buf_init(sc);
389 #endif
390
391 if (octeon_eth_pow_recv_ih == NULL)
392 octeon_eth_pow_recv_ih = octeon_pow_intr_establish(OCTEON_POW_GROUP_PIP,
393 IPL_NET, octeon_eth_recv_intr, NULL, NULL);
394
395 OCTEON_EVCNT_ATTACH_EVCNTS(sc, octeon_evcnt_entries,
396 device_xname(sc->sc_dev));
397 }
398
399 /* ---- submodules */
400
401 /* XXX */
402 static void
403 octeon_eth_pip_init(struct octeon_eth_softc *sc)
404 {
405 struct octeon_pip_attach_args pip_aa;
406
407 pip_aa.aa_port = sc->sc_port;
408 pip_aa.aa_regt = sc->sc_regt;
409 pip_aa.aa_tag_type = POW_TAG_TYPE_ORDERED/* XXX */;
410 pip_aa.aa_receive_group = OCTEON_POW_GROUP_PIP;
411 pip_aa.aa_ip_offset = sc->sc_ip_offset;
412 octeon_pip_init(&pip_aa, &sc->sc_pip);
413 }
414
415 /* XXX */
416 static void
417 octeon_eth_ipd_init(struct octeon_eth_softc *sc)
418 {
419 struct octeon_ipd_attach_args ipd_aa;
420
421 ipd_aa.aa_port = sc->sc_port;
422 ipd_aa.aa_regt = sc->sc_regt;
423 ipd_aa.aa_first_mbuff_skip = 184/* XXX */;
424 ipd_aa.aa_not_first_mbuff_skip = 0/* XXX */;
425 octeon_ipd_init(&ipd_aa, &sc->sc_ipd);
426 }
427
428 /* XXX */
429 static void
430 octeon_eth_pko_init(struct octeon_eth_softc *sc)
431 {
432 struct octeon_pko_attach_args pko_aa;
433
434 pko_aa.aa_port = sc->sc_port;
435 pko_aa.aa_regt = sc->sc_regt;
436 pko_aa.aa_cmdptr = &sc->sc_cmdptr;
437 pko_aa.aa_cmd_buf_pool = OCTEON_POOL_NO_CMD;
438 pko_aa.aa_cmd_buf_size = OCTEON_POOL_NWORDS_CMD;
439 octeon_pko_init(&pko_aa, &sc->sc_pko);
440 }
441
442 /* XXX */
443 static void
444 octeon_eth_asx_init(struct octeon_eth_softc *sc)
445 {
446 struct octeon_asx_attach_args asx_aa;
447
448 asx_aa.aa_port = sc->sc_port;
449 asx_aa.aa_regt = sc->sc_regt;
450 octeon_asx_init(&asx_aa, &sc->sc_asx);
451 }
452
453 static void
454 octeon_eth_smi_init(struct octeon_eth_softc *sc)
455 {
456 struct octeon_smi_attach_args smi_aa;
457
458 smi_aa.aa_port = sc->sc_port;
459 smi_aa.aa_regt = sc->sc_regt;
460 octeon_smi_init(&smi_aa, &sc->sc_smi);
461 octeon_smi_set_clock(sc->sc_smi, 0x1464ULL); /* XXX */
462 }
463
464 /* ---- XXX */
465
466 #define ADDR2UINT64(u, a) \
467 do { \
468 u = \
469 (((uint64_t)a[0] << 40) | ((uint64_t)a[1] << 32) | \
470 ((uint64_t)a[2] << 24) | ((uint64_t)a[3] << 16) | \
471 ((uint64_t)a[4] << 8) | ((uint64_t)a[5] << 0)); \
472 } while (0)
473 #define UINT642ADDR(a, u) \
474 do { \
475 a[0] = (uint8_t)((u) >> 40); a[1] = (uint8_t)((u) >> 32); \
476 a[2] = (uint8_t)((u) >> 24); a[3] = (uint8_t)((u) >> 16); \
477 a[4] = (uint8_t)((u) >> 8); a[5] = (uint8_t)((u) >> 0); \
478 } while (0)
479
480 static void
481 octeon_eth_board_mac_addr(uint8_t *enaddr, size_t size, struct octeon_eth_softc *sc)
482 {
483 prop_dictionary_t dict;
484 prop_data_t ea;
485
486 dict = device_properties(sc->sc_dev);
487 KASSERT(dict != NULL);
488 ea = prop_dictionary_get(dict, "mac-address");
489 KASSERT(ea != NULL);
490 memcpy(enaddr, prop_data_data_nocopy(ea), size);
491 }
492
493 /* ---- media */
494
495 static int
496 octeon_eth_mii_readreg(device_t self, int phy_addr, int reg)
497 {
498 struct octeon_eth_softc *sc = device_private(self);
499
500 return octeon_smi_read(sc->sc_smi, phy_addr, reg);
501 }
502
503 static void
504 octeon_eth_mii_writereg(device_t self, int phy_addr, int reg, int value)
505 {
506 struct octeon_eth_softc *sc = device_private(self);
507
508 octeon_smi_write(sc->sc_smi, phy_addr, reg, value);
509 }
510
511 static void
512 octeon_eth_mii_statchg(struct ifnet *ifp)
513 {
514 struct octeon_eth_softc *sc = ifp->if_softc;
515
516 octeon_pko_port_enable(sc->sc_pko, 0);
517 octeon_gmx_port_enable(sc->sc_gmx_port, 0);
518
519 octeon_eth_reset(sc);
520
521 if (ISSET(ifp->if_flags, IFF_RUNNING))
522 octeon_gmx_set_filter(sc->sc_gmx_port);
523
524 octeon_pko_port_enable(sc->sc_pko, 1);
525 octeon_gmx_port_enable(sc->sc_gmx_port, 1);
526 }
527
528 static int
529 octeon_eth_mediainit(struct octeon_eth_softc *sc)
530 {
531 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
532 prop_object_t phy;
533
534 sc->sc_mii.mii_ifp = ifp;
535 sc->sc_mii.mii_readreg = octeon_eth_mii_readreg;
536 sc->sc_mii.mii_writereg = octeon_eth_mii_writereg;
537 sc->sc_mii.mii_statchg = octeon_eth_mii_statchg;
538 ifmedia_init(&sc->sc_mii.mii_media, 0, octeon_eth_mediachange,
539 octeon_eth_mediastatus);
540
541 phy = prop_dictionary_get(device_properties(sc->sc_dev), "phy-addr");
542 KASSERT(phy != NULL);
543
544 mii_attach(sc->sc_dev, &sc->sc_mii,
545 0xffffffff, prop_number_integer_value(phy),
546 MII_OFFSET_ANY, MIIF_DOPAUSE);
547
548 /* XXX XXX XXX */
549 if (LIST_FIRST(&sc->sc_mii.mii_phys) != NULL) {
550 /* XXX XXX XXX */
551 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
552 /* XXX XXX XXX */
553 } else {
554 /* XXX XXX XXX */
555 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_NONE,
556 MII_MEDIA_NONE, NULL);
557 /* XXX XXX XXX */
558 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_NONE);
559 /* XXX XXX XXX */
560 }
561 /* XXX XXX XXX */
562
563 return 0;
564 }
565
566 static void
567 octeon_eth_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
568 {
569 struct octeon_eth_softc *sc = ifp->if_softc;
570
571 mii_pollstat(&sc->sc_mii);
572
573 ifmr->ifm_status = sc->sc_mii.mii_media_status;
574 ifmr->ifm_active = sc->sc_mii.mii_media_active;
575 ifmr->ifm_active = (sc->sc_mii.mii_media_active & ~IFM_ETH_FMASK) |
576 sc->sc_gmx_port->sc_port_flowflags;
577 }
578
579 static int
580 octeon_eth_mediachange(struct ifnet *ifp)
581 {
582 struct octeon_eth_softc *sc = ifp->if_softc;
583
584 mii_mediachg(&sc->sc_mii);
585
586 return 0;
587 }
588
589 /* ---- send buffer garbage collection */
590
591 static inline void
592 octeon_eth_send_queue_flush_prefetch(struct octeon_eth_softc *sc)
593 {
594 OCTEON_ETH_KASSERT(sc->sc_prefetch == 0);
595 octeon_fau_op_inc_fetch_8(&sc->sc_fau_done, 0);
596 sc->sc_prefetch = 1;
597 }
598
599 static inline void
600 octeon_eth_send_queue_flush_fetch(struct octeon_eth_softc *sc)
601 {
602 #ifndef OCTEON_ETH_DEBUG
603 if (!sc->sc_prefetch)
604 return;
605 #endif
606 OCTEON_ETH_KASSERT(sc->sc_prefetch == 1);
607 sc->sc_hard_done_cnt = octeon_fau_op_inc_read_8(&sc->sc_fau_done);
608 OCTEON_ETH_KASSERT(sc->sc_hard_done_cnt <= 0);
609 sc->sc_prefetch = 0;
610 }
611
612 static inline void
613 octeon_eth_send_queue_flush(struct octeon_eth_softc *sc)
614 {
615 const int64_t sent_count = sc->sc_hard_done_cnt;
616 int i;
617
618 OCTEON_ETH_KASSERT(sc->sc_flush == 0);
619 OCTEON_ETH_KASSERT(sent_count <= 0);
620
621 for (i = 0; i < 0 - sent_count; i++) {
622 struct mbuf *m;
623 uint64_t *gbuf;
624
625 octeon_eth_send_queue_del(sc, &m, &gbuf);
626
627 octeon_fpa_buf_put(octeon_eth_fb_sg, gbuf);
628 OCTEON_EVCNT_INC(sc, txbufgbput);
629
630 m_freem(m);
631 }
632
633 octeon_fau_op_inc_fetch_8(&sc->sc_fau_done, i);
634 sc->sc_flush = i;
635 }
636
637 static inline void
638 octeon_eth_send_queue_flush_sync(struct octeon_eth_softc *sc)
639 {
640 if (sc->sc_flush == 0)
641 return;
642
643 OCTEON_ETH_KASSERT(sc->sc_flush > 0);
644
645 /* XXX XXX XXX */
646 octeon_fau_op_inc_read_8(&sc->sc_fau_done);
647 sc->sc_soft_req_cnt -= sc->sc_flush;
648 OCTEON_ETH_KASSERT(sc->sc_soft_req_cnt >= 0);
649 /* XXX XXX XXX */
650
651 sc->sc_flush = 0;
652 }
653
654 static inline int
655 octeon_eth_send_queue_is_full(struct octeon_eth_softc *sc)
656 {
657 #ifdef OCTEON_ETH_SEND_QUEUE_CHECK
658 int64_t nofree_cnt;
659
660 nofree_cnt = sc->sc_soft_req_cnt + sc->sc_hard_done_cnt;
661
662 if (__predict_false(nofree_cnt == GATHER_QUEUE_SIZE - 1)) {
663 octeon_eth_send_queue_flush(sc);
664 OCTEON_EVCNT_INC(sc, txerrgbuf);
665 octeon_eth_send_queue_flush_sync(sc);
666 return 1;
667 }
668
669 #endif
670 return 0;
671 }
672
673 /*
674 * (Ab)use m_nextpkt and m_paddr to maintain mbuf chain and pointer to gather
675 * buffer. Other mbuf members may be used by m_freem(), so don't touch them!
676 */
677
678 struct _send_queue_entry {
679 union {
680 struct mbuf _sqe_s_mbuf;
681 struct {
682 char _sqe_s_entry_pad[offsetof(struct mbuf, m_nextpkt)];
683 SIMPLEQ_ENTRY(_send_queue_entry) _sqe_s_entry_entry;
684 } _sqe_s_entry;
685 struct {
686 char _sqe_s_gbuf_pad[offsetof(struct mbuf, m_paddr)];
687 uint64_t *_sqe_s_gbuf_gbuf;
688 } _sqe_s_gbuf;
689 } _sqe_u;
690 #define _sqe_entry _sqe_u._sqe_s_entry._sqe_s_entry_entry
691 #define _sqe_gbuf _sqe_u._sqe_s_gbuf._sqe_s_gbuf_gbuf
692 };
693
694 static inline void
695 octeon_eth_send_queue_add(struct octeon_eth_softc *sc, struct mbuf *m,
696 uint64_t *gbuf)
697 {
698 struct _send_queue_entry *sqe = (struct _send_queue_entry *)m;
699
700 sqe->_sqe_gbuf = gbuf;
701 SIMPLEQ_INSERT_TAIL(&sc->sc_sendq, sqe, _sqe_entry);
702
703 if ((m->m_flags & M_EXT) && m->m_ext.ext_free != NULL)
704 sc->sc_ext_callback_cnt++;
705
706 OCTEON_EVCNT_INC(sc, txadd);
707 }
708
709 static inline void
710 octeon_eth_send_queue_del(struct octeon_eth_softc *sc, struct mbuf **rm,
711 uint64_t **rgbuf)
712 {
713 struct _send_queue_entry *sqe;
714
715 sqe = SIMPLEQ_FIRST(&sc->sc_sendq);
716 OCTEON_ETH_KASSERT(sqe != NULL);
717 SIMPLEQ_REMOVE_HEAD(&sc->sc_sendq, _sqe_entry);
718
719 *rm = (void *)sqe;
720 *rgbuf = sqe->_sqe_gbuf;
721
722 if (((*rm)->m_flags & M_EXT) && (*rm)->m_ext.ext_free != NULL) {
723 sc->sc_ext_callback_cnt--;
724 OCTEON_ETH_KASSERT(sc->sc_ext_callback_cnt >= 0);
725 }
726
727 OCTEON_EVCNT_INC(sc, txdel);
728 }
729
730 static inline int
731 octeon_eth_buf_free_work(struct octeon_eth_softc *sc, uint64_t *work,
732 uint64_t word2)
733 {
734 /* XXX when jumbo frame */
735 if (ISSET(word2, PIP_WQE_WORD2_IP_BUFS)) {
736 paddr_t addr;
737 paddr_t start_buffer;
738
739 addr = work[3] & PIP_WQE_WORD3_ADDR;
740 start_buffer = addr & ~(2048 - 1);
741
742 octeon_fpa_buf_put_paddr(octeon_eth_fb_pkt, start_buffer);
743 OCTEON_EVCNT_INC(sc, rxbufpkput);
744 }
745
746 octeon_fpa_buf_put(octeon_eth_fb_wqe, work);
747 OCTEON_EVCNT_INC(sc, rxbufwqput);
748
749 return 0;
750 }
751
752 static inline void
753 octeon_eth_buf_ext_free_m(struct mbuf *m, void *buf, size_t size, void *arg)
754 {
755 uint64_t *work = (void *)arg;
756 #ifdef OCTEON_ETH_DEBUG
757 struct octeon_eth_softc *sc = (void *)(uintptr_t)work[0];
758 #endif
759 int s = splnet();
760
761 OCTEON_EVCNT_INC(sc, rxrs);
762
763 octeon_fpa_buf_put(octeon_eth_fb_wqe, work);
764 OCTEON_EVCNT_INC(sc, rxbufwqput);
765
766 OCTEON_ETH_KASSERT(m != NULL);
767
768 pool_cache_put(mb_cache, m);
769
770 splx(s);
771 }
772
773 static inline void
774 octeon_eth_buf_ext_free_ext(struct mbuf *m, void *buf, size_t size,
775 void *arg)
776 {
777 uint64_t *work = (void *)arg;
778 #ifdef OCTEON_ETH_DEBUG
779 struct octeon_eth_softc *sc = (void *)(uintptr_t)work[0];
780 #endif
781 int s = splnet();
782
783 octeon_fpa_buf_put(octeon_eth_fb_wqe, work);
784 OCTEON_EVCNT_INC(sc, rxbufwqput);
785
786 octeon_fpa_buf_put(octeon_eth_fb_pkt, buf);
787 OCTEON_EVCNT_INC(sc, rxbufpkput);
788
789 OCTEON_ETH_KASSERT(m != NULL);
790
791 pool_cache_put(mb_cache, m);
792
793 splx(s);
794 }
795
796 /* ---- ifnet interfaces */
797
798 static int
799 octeon_eth_ioctl(struct ifnet *ifp, u_long cmd, void *data)
800 {
801 struct octeon_eth_softc *sc = ifp->if_softc;
802 struct ifreq *ifr = (struct ifreq *)data;
803 int s, error;
804
805 s = splnet();
806 switch (cmd) {
807 case SIOCSIFMEDIA:
808 /* Flow control requires full-duplex mode. */
809 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
810 (ifr->ifr_media & IFM_FDX) == 0) {
811 ifr->ifr_media &= ~IFM_ETH_FMASK;
812 }
813 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
814 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
815 ifr->ifr_media |=
816 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
817 }
818 sc->sc_gmx_port->sc_port_flowflags =
819 ifr->ifr_media & IFM_ETH_FMASK;
820 }
821 /* FALLTHROUGH */
822 case SIOCGIFMEDIA:
823 /* XXX: Flow contorol */
824 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
825 break;
826 default:
827 error = ether_ioctl(ifp, cmd, data);
828 if (error == ENETRESET) {
829 /*
830 * Multicast list has changed; set the hardware filter
831 * accordingly.
832 */
833 if (ISSET(ifp->if_flags, IFF_RUNNING))
834 octeon_gmx_set_filter(sc->sc_gmx_port);
835 error = 0;
836 }
837 break;
838 }
839 octeon_eth_start(ifp);
840 splx(s);
841
842 return (error);
843 }
844
845 /* ---- send (output) */
846
847 static inline uint64_t
848 octeon_eth_send_makecmd_w0(uint64_t fau0, uint64_t fau1, size_t len, int segs)
849 {
850 return octeon_pko_cmd_word0(
851 OCT_FAU_OP_SIZE_64, /* sz1 */
852 OCT_FAU_OP_SIZE_64, /* sz0 */
853 1, fau1, 1, fau0, /* s1, reg1, s0, reg0 */
854 0, /* le */
855 octeon_eth_param_pko_cmd_w0_n2, /* n2 */
856 1, 0, /* q, r */
857 (segs == 1) ? 0 : 1, /* g */
858 0, 0, 1, /* ipoffp1, ii, df */
859 segs, (int)len); /* segs, totalbytes */
860 }
861
862 static inline uint64_t
863 octeon_eth_send_makecmd_w1(int size, paddr_t addr)
864 {
865 return octeon_pko_cmd_word1(
866 0, 0, /* i, back */
867 FPA_GATHER_BUFFER_POOL, /* pool */
868 size, addr); /* size, addr */
869 }
870
871 static inline int
872 octeon_eth_send_makecmd_gbuf(struct octeon_eth_softc *sc, struct mbuf *m0,
873 uint64_t *gbuf, int *rsegs)
874 {
875 struct mbuf *m;
876 int segs = 0;
877 uintptr_t laddr, rlen, nlen;
878
879 for (m = m0; m != NULL; m = m->m_next) {
880
881 if (__predict_false(m->m_len == 0))
882 continue;
883
884 #if 0
885 OCTEON_ETH_KASSERT(((uint32_t)m->m_data & (PAGE_SIZE - 1))
886 == (kvtophys((vaddr_t)m->m_data) & (PAGE_SIZE - 1)));
887 #endif
888
889 /*
890 * aligned 4k
891 */
892 laddr = (uintptr_t)m->m_data & (PAGE_SIZE - 1);
893
894 if (laddr + m->m_len > PAGE_SIZE) {
895 /* XXX XXX XXX */
896 rlen = PAGE_SIZE - laddr;
897 nlen = m->m_len - rlen;
898 *(gbuf + segs) = octeon_eth_send_makecmd_w1(rlen,
899 kvtophys((vaddr_t)m->m_data));
900 segs++;
901 if (segs > 63) {
902 return 1;
903 }
904 /* XXX XXX XXX */
905 } else {
906 rlen = 0;
907 nlen = m->m_len;
908 }
909
910 *(gbuf + segs) = octeon_eth_send_makecmd_w1(nlen,
911 kvtophys((vaddr_t)(m->m_data + rlen)));
912 segs++;
913 if (segs > 63) {
914 return 1;
915 }
916 }
917
918 OCTEON_ETH_KASSERT(m == NULL);
919
920 *rsegs = segs;
921
922 return 0;
923 }
924
925 static inline int
926 octeon_eth_send_makecmd(struct octeon_eth_softc *sc, struct mbuf *m,
927 uint64_t *gbuf, uint64_t *rpko_cmd_w0, uint64_t *rpko_cmd_w1)
928 {
929 uint64_t pko_cmd_w0, pko_cmd_w1;
930 int segs;
931 int result = 0;
932
933 if (octeon_eth_send_makecmd_gbuf(sc, m, gbuf, &segs)) {
934 log(LOG_WARNING, "%s: there are a lot of number of segments"
935 " of transmission data", device_xname(sc->sc_dev));
936 result = 1;
937 goto done;
938 }
939
940 /*
941 * segs == 1 -> link mode (single continuous buffer)
942 * WORD1[size] is number of bytes pointed by segment
943 *
944 * segs > 1 -> gather mode (scatter-gather buffer)
945 * WORD1[size] is number of segments
946 */
947 pko_cmd_w0 = octeon_eth_send_makecmd_w0(sc->sc_fau_done.fd_regno,
948 0, m->m_pkthdr.len, segs);
949 pko_cmd_w1 = octeon_eth_send_makecmd_w1(
950 (segs == 1) ? m->m_pkthdr.len : segs,
951 (segs == 1) ?
952 kvtophys((vaddr_t)m->m_data) :
953 MIPS_XKPHYS_TO_PHYS(gbuf));
954
955 *rpko_cmd_w0 = pko_cmd_w0;
956 *rpko_cmd_w1 = pko_cmd_w1;
957
958 done:
959 return result;
960 }
961
962 static inline int
963 octeon_eth_send_cmd(struct octeon_eth_softc *sc, uint64_t pko_cmd_w0,
964 uint64_t pko_cmd_w1)
965 {
966 uint64_t *cmdptr;
967 int result = 0;
968
969 cmdptr = (uint64_t *)MIPS_PHYS_TO_XKPHYS_CACHED(sc->sc_cmdptr.cmdptr);
970 cmdptr += sc->sc_cmdptr.cmdptr_idx;
971
972 OCTEON_ETH_KASSERT(cmdptr != NULL);
973
974 *cmdptr++ = pko_cmd_w0;
975 *cmdptr++ = pko_cmd_w1;
976
977 OCTEON_ETH_KASSERT(sc->sc_cmdptr.cmdptr_idx + 2 <= FPA_COMMAND_BUFFER_POOL_NWORDS - 1);
978
979 if (sc->sc_cmdptr.cmdptr_idx + 2 == FPA_COMMAND_BUFFER_POOL_NWORDS - 1) {
980 paddr_t buf;
981
982 buf = octeon_fpa_buf_get_paddr(octeon_eth_fb_cmd);
983 if (buf == 0) {
984 log(LOG_WARNING,
985 "%s: can not allocate command buffer from free pool allocator\n",
986 device_xname(sc->sc_dev));
987 result = 1;
988 goto done;
989 }
990 OCTEON_EVCNT_INC(sc, txbufcbget);
991 *cmdptr++ = buf;
992 sc->sc_cmdptr.cmdptr = (uint64_t)buf;
993 sc->sc_cmdptr.cmdptr_idx = 0;
994 } else {
995 sc->sc_cmdptr.cmdptr_idx += 2;
996 }
997
998 octeon_pko_op_doorbell_write(sc->sc_port, sc->sc_port, 2);
999
1000 done:
1001 return result;
1002 }
1003
1004 static inline int
1005 octeon_eth_send_buf(struct octeon_eth_softc *sc, struct mbuf *m,
1006 uint64_t *gbuf)
1007 {
1008 int result = 0, error;
1009 uint64_t pko_cmd_w0, pko_cmd_w1;
1010
1011 error = octeon_eth_send_makecmd(sc, m, gbuf, &pko_cmd_w0, &pko_cmd_w1);
1012 if (error != 0) {
1013 /* already logging */
1014 OCTEON_EVCNT_INC(sc, txerrmkcmd);
1015 result = error;
1016 goto done;
1017 }
1018
1019 error = octeon_eth_send_cmd(sc, pko_cmd_w0, pko_cmd_w1);
1020 if (error != 0) {
1021 /* already logging */
1022 OCTEON_EVCNT_INC(sc, txerrcmd);
1023 result = error;
1024 }
1025
1026 done:
1027 return result;
1028 }
1029
1030 static inline int
1031 octeon_eth_send(struct octeon_eth_softc *sc, struct mbuf *m)
1032 {
1033 paddr_t gaddr = 0;
1034 uint64_t *gbuf = NULL;
1035 int result = 0, error;
1036
1037 OCTEON_EVCNT_INC(sc, tx);
1038
1039 gaddr = octeon_fpa_buf_get_paddr(octeon_eth_fb_sg);
1040 if (gaddr == 0) {
1041 log(LOG_WARNING,
1042 "%s: can not allocate gather buffer from free pool allocator\n",
1043 device_xname(sc->sc_dev));
1044 OCTEON_EVCNT_INC(sc, txerrgbuf);
1045 result = 1;
1046 goto done;
1047 }
1048 OCTEON_EVCNT_INC(sc, txbufgbget);
1049
1050 gbuf = (uint64_t *)(uintptr_t)MIPS_PHYS_TO_XKPHYS_CACHED(gaddr);
1051
1052 OCTEON_ETH_KASSERT(gbuf != NULL);
1053
1054 error = octeon_eth_send_buf(sc, m, gbuf);
1055 if (error != 0) {
1056 /* already logging */
1057 octeon_fpa_buf_put_paddr(octeon_eth_fb_sg, gaddr);
1058 OCTEON_EVCNT_INC(sc, txbufgbput);
1059 result = error;
1060 goto done;
1061 }
1062
1063 octeon_eth_send_queue_add(sc, m, gbuf);
1064
1065 done:
1066 return result;
1067 }
1068
1069 static void
1070 octeon_eth_start(struct ifnet *ifp)
1071 {
1072 struct octeon_eth_softc *sc = ifp->if_softc;
1073 struct mbuf *m;
1074
1075 /*
1076 * performance tuning
1077 * presend iobdma request
1078 */
1079 octeon_eth_send_queue_flush_prefetch(sc);
1080
1081 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1082 goto last;
1083
1084 /* XXX assume that OCTEON doesn't buffer packets */
1085 if (__predict_false(!octeon_gmx_link_status(sc->sc_gmx_port))) {
1086 /* dequeue and drop them */
1087 while (1) {
1088 IFQ_DEQUEUE(&ifp->if_snd, m);
1089 if (m == NULL)
1090 break;
1091
1092 m_freem(m);
1093 IF_DROP(&ifp->if_snd);
1094 OCTEON_EVCNT_INC(sc, txerrlink);
1095 }
1096 goto last;
1097 }
1098
1099 for (;;) {
1100 IFQ_POLL(&ifp->if_snd, m);
1101 if (__predict_false(m == NULL))
1102 break;
1103
1104 /* XXX XXX XXX */
1105 octeon_eth_send_queue_flush_fetch(sc);
1106
1107 /*
1108 * If no free send buffer is available, free all the sent buffer
1109 * and bail out.
1110 */
1111 if (octeon_eth_send_queue_is_full(sc)) {
1112 return;
1113 }
1114 /* XXX XXX XXX */
1115
1116 IFQ_DEQUEUE(&ifp->if_snd, m);
1117
1118 bpf_mtap(ifp, m);
1119
1120 /* XXX XXX XXX */
1121 if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh)
1122 octeon_eth_send_queue_flush(sc);
1123 if (octeon_eth_send(sc, m)) {
1124 IF_DROP(&ifp->if_snd);
1125 m_freem(m);
1126 log(LOG_WARNING,
1127 "%s: failed in the transmission of the packet\n",
1128 device_xname(sc->sc_dev));
1129 OCTEON_EVCNT_INC(sc, txerr);
1130 } else {
1131 sc->sc_soft_req_cnt++;
1132 }
1133 if (sc->sc_flush)
1134 octeon_eth_send_queue_flush_sync(sc);
1135 /* XXX XXX XXX */
1136
1137 /*
1138 * send next iobdma request
1139 */
1140 octeon_eth_send_queue_flush_prefetch(sc);
1141 }
1142
1143 /*
1144 * Don't schedule send-buffer-free callout every time - those buffers are freed
1145 * by "free tick". This makes some packets like NFS slower.
1146 */
1147 #ifdef OCTEON_ETH_USENFS
1148 if (__predict_false(sc->sc_ext_callback_cnt > 0)) {
1149 int timo;
1150
1151 /* ??? */
1152 timo = hz - (100 * sc->sc_ext_callback_cnt);
1153 if (timo < 10)
1154 timo = 10;
1155 callout_schedule(&sc->sc_tick_free_ch, timo);
1156 }
1157 #endif
1158
1159 last:
1160 octeon_eth_send_queue_flush_fetch(sc);
1161 }
1162
1163 static void
1164 octeon_eth_watchdog(struct ifnet *ifp)
1165 {
1166 struct octeon_eth_softc *sc = ifp->if_softc;
1167
1168 printf("%s: device timeout\n", device_xname(sc->sc_dev));
1169
1170 octeon_eth_configure(sc);
1171
1172 SET(ifp->if_flags, IFF_RUNNING);
1173 CLR(ifp->if_flags, IFF_OACTIVE);
1174 ifp->if_timer = 0;
1175
1176 octeon_eth_start(ifp);
1177 }
1178
1179 static int
1180 octeon_eth_init(struct ifnet *ifp)
1181 {
1182 struct octeon_eth_softc *sc = ifp->if_softc;
1183
1184 /* XXX don't disable commonly used parts!!! XXX */
1185 if (sc->sc_init_flag == 0) {
1186 /* Cancel any pending I/O. */
1187 octeon_eth_stop(ifp, 0);
1188
1189 /* Initialize the device */
1190 octeon_eth_configure(sc);
1191
1192 octeon_pko_enable(sc->sc_pko);
1193 octeon_ipd_enable(sc->sc_ipd);
1194
1195 sc->sc_init_flag = 1;
1196 } else {
1197 octeon_gmx_port_enable(sc->sc_gmx_port, 1);
1198 }
1199 octeon_eth_mediachange(ifp);
1200
1201 octeon_gmx_set_filter(sc->sc_gmx_port);
1202
1203 callout_reset(&sc->sc_tick_misc_ch, hz, octeon_eth_tick_misc, sc);
1204 callout_reset(&sc->sc_tick_free_ch, hz, octeon_eth_tick_free, sc);
1205
1206 SET(ifp->if_flags, IFF_RUNNING);
1207 CLR(ifp->if_flags, IFF_OACTIVE);
1208
1209 return 0;
1210 }
1211
1212 static void
1213 octeon_eth_stop(struct ifnet *ifp, int disable)
1214 {
1215 struct octeon_eth_softc *sc = ifp->if_softc;
1216
1217 callout_stop(&sc->sc_tick_misc_ch);
1218 callout_stop(&sc->sc_tick_free_ch);
1219
1220 mii_down(&sc->sc_mii);
1221
1222 octeon_gmx_port_enable(sc->sc_gmx_port, 0);
1223
1224 /* Mark the interface as down and cancel the watchdog timer. */
1225 CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE);
1226 ifp->if_timer = 0;
1227 }
1228
1229 /* ---- misc */
1230
1231 #define PKO_INDEX_MASK ((1ULL << 12/* XXX */) - 1)
1232
1233 static int
1234 octeon_eth_reset(struct octeon_eth_softc *sc)
1235 {
1236 octeon_gmx_reset_speed(sc->sc_gmx_port);
1237 octeon_gmx_reset_flowctl(sc->sc_gmx_port);
1238 octeon_gmx_reset_timing(sc->sc_gmx_port);
1239
1240 return 0;
1241 }
1242
1243 static int
1244 octeon_eth_configure(struct octeon_eth_softc *sc)
1245 {
1246 octeon_gmx_port_enable(sc->sc_gmx_port, 0);
1247
1248 octeon_eth_reset(sc);
1249
1250 octeon_eth_configure_common(sc);
1251
1252 octeon_pko_port_config(sc->sc_pko);
1253 octeon_pko_port_enable(sc->sc_pko, 1);
1254 octeon_pip_port_config(sc->sc_pip);
1255
1256 octeon_gmx_tx_stats_rd_clr(sc->sc_gmx_port, 1);
1257 octeon_gmx_rx_stats_rd_clr(sc->sc_gmx_port, 1);
1258
1259 octeon_gmx_port_enable(sc->sc_gmx_port, 1);
1260
1261 return 0;
1262 }
1263
1264 static int
1265 octeon_eth_configure_common(struct octeon_eth_softc *sc)
1266 {
1267 static int once;
1268
1269 if (once == 1)
1270 return 0;
1271 once = 1;
1272
1273 octeon_ipd_config(sc->sc_ipd);
1274 #ifdef OCTEON_ETH_IPD_RED
1275 octeon_ipd_red(sc->sc_ipd, RECV_QUEUE_SIZE >> 2, RECV_QUEUE_SIZE >> 3);
1276 #endif
1277 octeon_pko_config(sc->sc_pko);
1278
1279 octeon_pow_config(sc->sc_pow, OCTEON_POW_GROUP_PIP);
1280
1281 return 0;
1282 }
1283
1284 /* ---- receive (input) */
1285
1286 static inline int
1287 octeon_eth_recv_mbuf(struct octeon_eth_softc *sc, uint64_t *work,
1288 struct mbuf **rm)
1289 {
1290 struct mbuf *m;
1291 void (*ext_free)(struct mbuf *, void *, size_t, void *);
1292 void *ext_buf;
1293 size_t ext_size;
1294 void *data;
1295 uint64_t word1 = work[1];
1296 uint64_t word2 = work[2];
1297 uint64_t word3 = work[3];
1298
1299 MGETHDR(m, M_NOWAIT, MT_DATA);
1300 if (m == NULL)
1301 return 1;
1302 OCTEON_ETH_KASSERT(m != NULL);
1303
1304 if ((word2 & PIP_WQE_WORD2_IP_BUFS) == 0) {
1305 /* Dynamic short */
1306 ext_free = octeon_eth_buf_ext_free_m;
1307 ext_buf = &work[4];
1308 ext_size = 96;
1309
1310 data = &work[4 + sc->sc_ip_offset / sizeof(uint64_t)];
1311 } else {
1312 vaddr_t addr;
1313 vaddr_t start_buffer;
1314
1315 addr = MIPS_PHYS_TO_XKPHYS_CACHED(word3 & PIP_WQE_WORD3_ADDR);
1316 start_buffer = addr & ~(2048 - 1);
1317
1318 ext_free = octeon_eth_buf_ext_free_ext;
1319 ext_buf = (void *)start_buffer;
1320 ext_size = 2048;
1321
1322 data = (void *)addr;
1323 }
1324
1325 /* embed sc pointer into work[0] for _ext_free evcnt */
1326 work[0] = (uintptr_t)sc;
1327
1328 MEXTADD(m, ext_buf, ext_size, 0, ext_free, work);
1329 OCTEON_ETH_KASSERT(ISSET(m->m_flags, M_EXT));
1330
1331 m->m_data = data;
1332 m->m_len = m->m_pkthdr.len = (word1 & PIP_WQE_WORD1_LEN) >> 48;
1333 m->m_pkthdr.rcvif = &sc->sc_ethercom.ec_if;
1334 /*
1335 * not readonly buffer
1336 */
1337 m->m_flags |= M_EXT_RW;
1338
1339 *rm = m;
1340
1341 OCTEON_ETH_KASSERT(*rm != NULL);
1342
1343 return 0;
1344 }
1345
1346 static inline int
1347 octeon_eth_recv_check_code(struct octeon_eth_softc *sc, uint64_t word2)
1348 {
1349 uint64_t opecode = word2 & PIP_WQE_WORD2_NOIP_OPECODE;
1350
1351 if (__predict_true(!ISSET(word2, PIP_WQE_WORD2_NOIP_RE)))
1352 return 0;
1353
1354 /* this error is harmless */
1355 if (opecode == PIP_OVER_ERR)
1356 return 0;
1357
1358 return 1;
1359 }
1360
1361 static inline int
1362 octeon_eth_recv_check_jumbo(struct octeon_eth_softc *sc, uint64_t word2)
1363 {
1364 if (__predict_false((word2 & PIP_WQE_WORD2_IP_BUFS) > (1ULL << 56)))
1365 return 1;
1366 return 0;
1367 }
1368
1369 static inline int
1370 octeon_eth_recv_check_link(struct octeon_eth_softc *sc, uint64_t word2)
1371 {
1372 if (__predict_false(!octeon_gmx_link_status(sc->sc_gmx_port)))
1373 return 1;
1374 return 0;
1375 }
1376
1377 static inline int
1378 octeon_eth_recv_check(struct octeon_eth_softc *sc, uint64_t word2)
1379 {
1380 if (__predict_false(octeon_eth_recv_check_link(sc, word2)) != 0) {
1381 if (ratecheck(&sc->sc_rate_recv_check_link_last,
1382 &sc->sc_rate_recv_check_link_cap))
1383 log(LOG_DEBUG,
1384 "%s: link is not up, the packet was dropped\n",
1385 device_xname(sc->sc_dev));
1386 OCTEON_EVCNT_INC(sc, rxerrlink);
1387 return 1;
1388 }
1389
1390 #if 0 /* XXX Performance tunig (Jumbo-frame is not supported yet!) */
1391 if (__predict_false(octeon_eth_recv_check_jumbo(sc, word2)) != 0) {
1392 /* XXX jumbo frame */
1393 if (ratecheck(&sc->sc_rate_recv_check_jumbo_last,
1394 &sc->sc_rate_recv_check_jumbo_cap))
1395 log(LOG_DEBUG,
1396 "jumbo frame was received\n");
1397 OCTEON_EVCNT_INC(sc, rxerrjmb);
1398 return 1;
1399 }
1400 #endif
1401
1402 if (__predict_false(octeon_eth_recv_check_code(sc, word2)) != 0) {
1403
1404 if ((word2 & PIP_WQE_WORD2_NOIP_OPECODE) ==
1405 PIP_WQE_WORD2_RE_OPCODE_LENGTH) {
1406 /* no logging */
1407 /* XXX inclement special error count */
1408 } else if ((word2 & PIP_WQE_WORD2_NOIP_OPECODE) ==
1409 PIP_WQE_WORD2_RE_OPCODE_PARTIAL) {
1410 /* not an erorr. it's because of overload */
1411 } else {
1412
1413 if (ratecheck(&sc->sc_rate_recv_check_code_last,
1414 &sc->sc_rate_recv_check_code_cap))
1415 log(LOG_WARNING,
1416 "%s: the reception error had occured, "
1417 "the packet was dropped (error code = %" PRId64 ")\n",
1418 device_xname(sc->sc_dev), word2 & PIP_WQE_WORD2_NOIP_OPECODE);
1419 }
1420 OCTEON_EVCNT_INC(sc, rxerrcode);
1421 return 1;
1422 }
1423
1424 return 0;
1425 }
1426
1427 static inline int
1428 octeon_eth_recv(struct octeon_eth_softc *sc, uint64_t *work)
1429 {
1430 int result = 0;
1431 struct ifnet *ifp;
1432 struct mbuf *m;
1433 uint64_t word2;
1434
1435 /* XXX XXX XXX */
1436 /*
1437 * performance tuning
1438 * presend iobdma request
1439 */
1440 if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh) {
1441 octeon_eth_send_queue_flush_prefetch(sc);
1442 }
1443 /* XXX XXX XXX */
1444
1445 OCTEON_ETH_KASSERT(sc != NULL);
1446 OCTEON_ETH_KASSERT(work != NULL);
1447
1448 OCTEON_EVCNT_INC(sc, rx);
1449
1450 word2 = work[2];
1451 ifp = &sc->sc_ethercom.ec_if;
1452
1453 OCTEON_ETH_KASSERT(ifp != NULL);
1454
1455 if (__predict_false(octeon_eth_recv_check(sc, word2) != 0)) {
1456 ifp->if_ierrors++;
1457 result = 1;
1458 octeon_eth_buf_free_work(sc, work, word2);
1459 goto drop;
1460 }
1461
1462 if (__predict_false(octeon_eth_recv_mbuf(sc, work, &m) != 0)) {
1463 ifp->if_ierrors++;
1464 result = 1;
1465 octeon_eth_buf_free_work(sc, work, word2);
1466 goto drop;
1467 }
1468
1469 /* work[0] .. work[3] may not be valid any more */
1470
1471 OCTEON_ETH_KASSERT(m != NULL);
1472
1473 octeon_ipd_offload(word2, m->m_data, &m->m_pkthdr.csum_flags);
1474
1475 /* count input packet */
1476 ifp->if_ipackets++;
1477
1478 /* XXX XXX XXX */
1479 if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh) {
1480 octeon_eth_send_queue_flush_fetch(sc);
1481 octeon_eth_send_queue_flush(sc);
1482 }
1483 /* XXX XXX XXX */
1484
1485 bpf_mtap(ifp, m);
1486
1487 /* XXX XXX XXX */
1488 if (sc->sc_flush)
1489 octeon_eth_send_queue_flush_sync(sc);
1490 /* XXX XXX XXX */
1491
1492 (*ifp->if_input)(ifp, m);
1493
1494 return 0;
1495
1496 drop:
1497 /* XXX XXX XXX */
1498 if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh) {
1499 octeon_eth_send_queue_flush_fetch(sc);
1500 }
1501 /* XXX XXX XXX */
1502
1503 return result;
1504 }
1505
1506 static void
1507 octeon_eth_recv_redir(struct ifnet *ifp, struct mbuf *m)
1508 {
1509 struct octeon_eth_softc *rsc = ifp->if_softc;
1510 struct octeon_eth_softc *sc = NULL;
1511 int i;
1512
1513 for (i = 0; i < 3 /* XXX */; i++) {
1514 if (rsc->sc_redir & (1 << i))
1515 sc = octeon_eth_gsc[i];
1516 }
1517
1518 if (sc == NULL) {
1519 m_freem(m);
1520 return;
1521 }
1522 octeon_eth_send_queue_flush_prefetch(sc);
1523
1524 octeon_eth_send_queue_flush_fetch(sc);
1525
1526 if (octeon_eth_send_queue_is_full(sc)) {
1527 m_freem(m);
1528 return;
1529 }
1530 if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh)
1531 octeon_eth_send_queue_flush(sc);
1532
1533 if (octeon_eth_send(sc, m)) {
1534 IF_DROP(&ifp->if_snd);
1535 m_freem(m);
1536 } else {
1537 sc->sc_soft_req_cnt++;
1538 }
1539
1540 if (sc->sc_flush)
1541 octeon_eth_send_queue_flush_sync(sc);
1542 }
1543
1544 static inline void
1545 octeon_eth_recv_intr(void *data, uint64_t *work)
1546 {
1547 struct octeon_eth_softc *sc;
1548 int port;
1549
1550 OCTEON_ETH_KASSERT(work != NULL);
1551
1552 port = (work[1] & PIP_WQE_WORD1_IPRT) >> 42;
1553
1554 OCTEON_ETH_KASSERT(port < GMX_PORT_NUNITS);
1555
1556 sc = octeon_eth_gsc[port];
1557
1558 OCTEON_ETH_KASSERT(sc != NULL);
1559 OCTEON_ETH_KASSERT(port == sc->sc_port);
1560
1561 /* XXX process all work queue entries anyway */
1562
1563 (void)octeon_eth_recv(sc, work);
1564 }
1565
1566 /* ---- tick */
1567
1568 /*
1569 * octeon_eth_tick_free
1570 *
1571 * => garbage collect send gather buffer / mbuf
1572 * => called at softclock
1573 */
1574 static void
1575 octeon_eth_tick_free(void *arg)
1576 {
1577 struct octeon_eth_softc *sc = arg;
1578 int timo;
1579 int s;
1580
1581 s = splnet();
1582 /* XXX XXX XXX */
1583 if (sc->sc_soft_req_cnt > 0) {
1584 octeon_eth_send_queue_flush_prefetch(sc);
1585 octeon_eth_send_queue_flush_fetch(sc);
1586 octeon_eth_send_queue_flush(sc);
1587 octeon_eth_send_queue_flush_sync(sc);
1588 }
1589 /* XXX XXX XXX */
1590
1591 /* XXX XXX XXX */
1592 /* ??? */
1593 timo = hz - (100 * sc->sc_ext_callback_cnt);
1594 if (timo < 10)
1595 timo = 10;
1596 callout_schedule(&sc->sc_tick_free_ch, timo);
1597 /* XXX XXX XXX */
1598 splx(s);
1599 }
1600
1601 /*
1602 * octeon_eth_tick_misc
1603 *
1604 * => collect statistics
1605 * => check link status
1606 * => called at softclock
1607 */
1608 static void
1609 octeon_eth_tick_misc(void *arg)
1610 {
1611 struct octeon_eth_softc *sc = arg;
1612 struct ifnet *ifp;
1613 int s;
1614
1615 s = splnet();
1616
1617 ifp = &sc->sc_ethercom.ec_if;
1618
1619 octeon_gmx_stats(sc->sc_gmx_port);
1620 octeon_pip_stats(sc->sc_pip, ifp, sc->sc_port);
1621 mii_tick(&sc->sc_mii);
1622
1623 splx(s);
1624
1625 callout_schedule(&sc->sc_tick_misc_ch, hz);
1626 }
1627
1628 /* ---- odd nibble preamble workaround (software CRC processing) */
1629
1630 /* ---- sysctl */
1631
1632 static int octeon_eth_sysctl_verify(SYSCTLFN_ARGS);
1633 static int octeon_eth_sysctl_pool(SYSCTLFN_ARGS);
1634 static int octeon_eth_sysctl_rd(SYSCTLFN_ARGS);
1635
1636 static int octeon_eth_sysctl_pkocmdw0n2_num;
1637 static int octeon_eth_sysctl_pipdynrs_num;
1638 static int octeon_eth_sysctl_redir_num;
1639 static int octeon_eth_sysctl_pkt_pool_num;
1640 static int octeon_eth_sysctl_wqe_pool_num;
1641 static int octeon_eth_sysctl_cmd_pool_num;
1642 static int octeon_eth_sysctl_sg_pool_num;
1643 static int octeon_eth_sysctl_pktbuf_num;
1644
1645 /*
1646 * Set up sysctl(3) MIB, hw.cnmac.*.
1647 */
1648 SYSCTL_SETUP(sysctl_octeon_eth, "sysctl cnmac subtree setup")
1649 {
1650 int rc;
1651 int octeon_eth_sysctl_root_num;
1652 const struct sysctlnode *node;
1653
1654 if ((rc = sysctl_createv(clog, 0, NULL, NULL,
1655 0, CTLTYPE_NODE, "hw", NULL,
1656 NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) {
1657 goto err;
1658 }
1659
1660 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1661 0, CTLTYPE_NODE, "cnmac",
1662 SYSCTL_DESCR("cnmac interface controls"),
1663 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) {
1664 goto err;
1665 }
1666
1667 octeon_eth_sysctl_root_num = node->sysctl_num;
1668
1669 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1670 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1671 CTLTYPE_INT, "pko_cmd_w0_n2",
1672 SYSCTL_DESCR("PKO command WORD0 N2 bit"),
1673 octeon_eth_sysctl_verify, 0,
1674 &octeon_eth_param_pko_cmd_w0_n2,
1675 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1676 CTL_EOL)) != 0) {
1677 goto err;
1678 }
1679
1680 octeon_eth_sysctl_pkocmdw0n2_num = node->sysctl_num;
1681
1682 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1683 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1684 CTLTYPE_INT, "pip_dyn_rs",
1685 SYSCTL_DESCR("PIP dynamic short in WQE"),
1686 octeon_eth_sysctl_verify, 0,
1687 &octeon_eth_param_pip_dyn_rs,
1688 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1689 CTL_EOL)) != 0) {
1690 goto err;
1691 }
1692
1693 octeon_eth_sysctl_pipdynrs_num = node->sysctl_num;
1694
1695 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1696 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1697 CTLTYPE_INT, "redir",
1698 SYSCTL_DESCR("input port redirection"),
1699 octeon_eth_sysctl_verify, 0,
1700 &octeon_eth_param_redir,
1701 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1702 CTL_EOL)) != 0) {
1703 goto err;
1704 }
1705
1706 octeon_eth_sysctl_redir_num = node->sysctl_num;
1707
1708 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1709 CTLFLAG_PERMANENT,
1710 CTLTYPE_INT, "pkt_pool",
1711 SYSCTL_DESCR("packet pool available"),
1712 octeon_eth_sysctl_pool, 0, NULL,
1713 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1714 CTL_EOL)) != 0) {
1715 goto err;
1716 }
1717
1718 octeon_eth_sysctl_pkt_pool_num = node->sysctl_num;
1719
1720 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1721 CTLFLAG_PERMANENT,
1722 CTLTYPE_INT, "wqe_pool",
1723 SYSCTL_DESCR("wqe pool available"),
1724 octeon_eth_sysctl_pool, 0, NULL,
1725 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1726 CTL_EOL)) != 0) {
1727 goto err;
1728 }
1729
1730 octeon_eth_sysctl_wqe_pool_num = node->sysctl_num;
1731
1732 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1733 CTLFLAG_PERMANENT,
1734 CTLTYPE_INT, "cmd_pool",
1735 SYSCTL_DESCR("cmd pool available"),
1736 octeon_eth_sysctl_pool, 0, NULL,
1737 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1738 CTL_EOL)) != 0) {
1739 goto err;
1740 }
1741
1742 octeon_eth_sysctl_cmd_pool_num = node->sysctl_num;
1743
1744 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1745 CTLFLAG_PERMANENT,
1746 CTLTYPE_INT, "sg_pool",
1747 SYSCTL_DESCR("sg pool available"),
1748 octeon_eth_sysctl_pool, 0, NULL,
1749 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1750 CTL_EOL)) != 0) {
1751 goto err;
1752 }
1753
1754 octeon_eth_sysctl_sg_pool_num = node->sysctl_num;
1755
1756 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1757 CTLFLAG_PERMANENT | CTLFLAG_READONLY,
1758 CTLTYPE_INT, "pktbuf",
1759 SYSCTL_DESCR("input packet buffer size on POW"),
1760 octeon_eth_sysctl_rd, 0,
1761 &octeon_eth_param_pktbuf,
1762 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1763 CTL_EOL)) != 0) {
1764 goto err;
1765 }
1766
1767 octeon_eth_sysctl_pktbuf_num = node->sysctl_num;
1768
1769 return;
1770
1771 err:
1772 aprint_error("%s: syctl_createv failed (rc = %d)\n", __func__, rc);
1773 }
1774
1775 static int
1776 octeon_eth_sysctl_verify(SYSCTLFN_ARGS)
1777 {
1778 int error, v;
1779 struct sysctlnode node;
1780 struct octeon_eth_softc *sc;
1781 int i;
1782 int s;
1783
1784 node = *rnode;
1785 v = *(int *)rnode->sysctl_data;
1786 node.sysctl_data = &v;
1787 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1788 if (error || newp == NULL)
1789 return error;
1790
1791 if (node.sysctl_num == octeon_eth_sysctl_pkocmdw0n2_num) {
1792 if (v < 0 || v > 1)
1793 return EINVAL;
1794 *(int *)rnode->sysctl_data = v;
1795 return 0;
1796 }
1797
1798 if (node.sysctl_num == octeon_eth_sysctl_pipdynrs_num) {
1799 if (v < 0 || v > 1)
1800 return EINVAL;
1801 *(int *)rnode->sysctl_data = v;
1802 s = splnet();
1803 for (i = 0; i < 3/* XXX */; i++) {
1804 sc = octeon_eth_gsc[i]; /* XXX */
1805 octeon_pip_prt_cfg_enable(sc->sc_pip, PIP_PRT_CFGN_DYN_RS, v);
1806 }
1807 splx(s);
1808 return 0;
1809 }
1810
1811 if (node.sysctl_num == octeon_eth_sysctl_redir_num) {
1812 if (v & ~((0x7 << (4 * 0)) | (0x7 << (4 * 1)) | (0x7 << (4 * 2))))
1813 return EINVAL;
1814 *(int *)rnode->sysctl_data = v;
1815 s = splnet();
1816 for (i = 0; i < 3/* XXX */; i++) {
1817 struct ifnet *ifp;
1818
1819 sc = octeon_eth_gsc[i]; /* XXX */
1820 ifp = &sc->sc_ethercom.ec_if;
1821
1822 sc->sc_redir = (octeon_eth_param_redir >> (4 * i)) & 0x7;
1823 if (sc->sc_redir == 0) {
1824 if (ISSET(ifp->if_flags, IFF_PROMISC)) {
1825 CLR(ifp->if_flags, IFF_PROMISC);
1826 octeon_eth_mii_statchg(ifp);
1827 /* octeon_gmx_set_filter(sc->sc_gmx_port); */
1828 }
1829 ifp->if_input = ether_input;
1830 }
1831 else {
1832 if (!ISSET(ifp->if_flags, IFF_PROMISC)) {
1833 SET(ifp->if_flags, IFF_PROMISC);
1834 octeon_eth_mii_statchg(ifp);
1835 /* octeon_gmx_set_filter(sc->sc_gmx_port); */
1836 }
1837 ifp->if_input = octeon_eth_recv_redir;
1838 }
1839 }
1840 splx(s);
1841 return 0;
1842 }
1843
1844 return EINVAL;
1845 }
1846
1847 static int
1848 octeon_eth_sysctl_pool(SYSCTLFN_ARGS)
1849 {
1850 int error, newval = 0;
1851 struct sysctlnode node;
1852 int s;
1853
1854 node = *rnode;
1855 node.sysctl_data = &newval;
1856 s = splnet();
1857 if (node.sysctl_num == octeon_eth_sysctl_pkt_pool_num) {
1858 error = octeon_fpa_available_fpa_pool(&newval, OCTEON_POOL_NO_PKT);
1859 } else if (node.sysctl_num == octeon_eth_sysctl_wqe_pool_num) {
1860 error = octeon_fpa_available_fpa_pool(&newval, OCTEON_POOL_NO_WQE);
1861 } else if (node.sysctl_num == octeon_eth_sysctl_cmd_pool_num) {
1862 error = octeon_fpa_available_fpa_pool(&newval, OCTEON_POOL_NO_CMD);
1863 } else if (node.sysctl_num == octeon_eth_sysctl_sg_pool_num) {
1864 error = octeon_fpa_available_fpa_pool(&newval, OCTEON_POOL_NO_SG);
1865 } else {
1866 splx(s);
1867 return EINVAL;
1868 }
1869 splx(s);
1870 if (error)
1871 return error;
1872 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1873 if (error || newp == NULL)
1874 return error;
1875
1876 return 0;
1877 }
1878
1879 static int
1880 octeon_eth_sysctl_rd(SYSCTLFN_ARGS)
1881 {
1882 int error, v;
1883 struct sysctlnode node;
1884 int s;
1885
1886 node = *rnode;
1887 v = *(int *)rnode->sysctl_data;
1888 node.sysctl_data = &v;
1889 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1890 if (error || newp != NULL)
1891 return error;
1892
1893 if (node.sysctl_num == octeon_eth_sysctl_pktbuf_num) {
1894 uint64_t tmp;
1895 int n;
1896
1897 s = splnet();
1898 tmp = octeon_fpa_query(0);
1899 n = (int)tmp;
1900 splx(s);
1901 *(int *)rnode->sysctl_data = n;
1902 octeon_eth_param_pktbuf = n;
1903 *(int *)oldp = n;
1904 return 0;
1905 }
1906
1907 return EINVAL;
1908 }
1909