if_cnmac.c revision 1.4 1 /* $NetBSD: if_cnmac.c,v 1.4 2016/07/11 16:15:35 matt Exp $ */
2
3 #include <sys/cdefs.h>
4 #if 0
5 __KERNEL_RCSID(0, "$NetBSD: if_cnmac.c,v 1.4 2016/07/11 16:15:35 matt Exp $");
6 #endif
7
8 #include "opt_octeon.h"
9
10 #ifdef OCTEON_ETH_DEBUG
11
12 #ifndef DIAGNOSTIC
13 #define DIAGNOSTIC
14 #endif
15
16 #ifndef DEBUG
17 #define DEBUG
18 #endif
19
20 #endif
21
22 /*
23 * If no free send buffer is available, free all the sent buffer and bail out.
24 */
25 #define OCTEON_ETH_SEND_QUEUE_CHECK
26
27 /* XXX XXX XXX XXX XXX XXX */
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/pool.h>
32 #include <sys/mbuf.h>
33 #include <sys/malloc.h>
34 #include <sys/kernel.h>
35 #include <sys/socket.h>
36 #include <sys/ioctl.h>
37 #include <sys/errno.h>
38 #include <sys/device.h>
39 #include <sys/queue.h>
40 #include <sys/conf.h>
41 #include <sys/sysctl.h>
42 #include <sys/syslog.h>
43
44 #include <net/if.h>
45 #include <net/if_dl.h>
46 #include <net/if_media.h>
47 #include <net/if_ether.h>
48 #include <net/route.h>
49
50 #include <net/bpf.h>
51
52 #include <netinet/in.h>
53 #include <netinet/in_systm.h>
54 #include <netinet/in_var.h>
55 #include <netinet/ip.h>
56
57 #include <sys/bus.h>
58 #include <machine/intr.h>
59 #include <machine/endian.h>
60 #include <machine/locore.h>
61
62 #include <dev/mii/mii.h>
63 #include <dev/mii/miivar.h>
64
65 #include <mips/cpuregs.h>
66
67 #include <mips/cavium/dev/octeon_asxreg.h>
68 #include <mips/cavium/dev/octeon_ciureg.h>
69 #include <mips/cavium/dev/octeon_npireg.h>
70 #include <mips/cavium/dev/octeon_gmxreg.h>
71 #include <mips/cavium/dev/octeon_ipdreg.h>
72 #include <mips/cavium/dev/octeon_pipreg.h>
73 #include <mips/cavium/dev/octeon_powreg.h>
74 #include <mips/cavium/dev/octeon_faureg.h>
75 #include <mips/cavium/dev/octeon_fpareg.h>
76 #include <mips/cavium/dev/octeon_bootbusreg.h>
77 #include <mips/cavium/include/iobusvar.h>
78 #include <mips/cavium/octeonvar.h>
79 #include <mips/cavium/dev/octeon_fpavar.h>
80 #include <mips/cavium/dev/octeon_gmxvar.h>
81 #include <mips/cavium/dev/octeon_fauvar.h>
82 #include <mips/cavium/dev/octeon_powvar.h>
83 #include <mips/cavium/dev/octeon_ipdvar.h>
84 #include <mips/cavium/dev/octeon_pipvar.h>
85 #include <mips/cavium/dev/octeon_pkovar.h>
86 #include <mips/cavium/dev/octeon_asxvar.h>
87 #include <mips/cavium/dev/octeon_smivar.h>
88 #include <mips/cavium/dev/if_cnmacvar.h>
89
90 #ifdef OCTEON_ETH_DEBUG
91 #define OCTEON_ETH_KASSERT(x) KASSERT(x)
92 #define OCTEON_ETH_KDASSERT(x) KDASSERT(x)
93 #else
94 #define OCTEON_ETH_KASSERT(x)
95 #define OCTEON_ETH_KDASSERT(x)
96 #endif
97
98 /*
99 * Set the PKO to think command buffers are an odd length. This makes it so we
100 * never have to divide a comamnd across two buffers.
101 */
102 #define OCTEON_POOL_NWORDS_CMD \
103 (((uint32_t)OCTEON_POOL_SIZE_CMD / sizeof(uint64_t)) - 1)
104 #define FPA_COMMAND_BUFFER_POOL_NWORDS OCTEON_POOL_NWORDS_CMD /* XXX */
105
106 static void octeon_eth_buf_init(struct octeon_eth_softc *);
107
108 static int octeon_eth_match(device_t, struct cfdata *, void *);
109 static void octeon_eth_attach(device_t, device_t, void *);
110 static void octeon_eth_pip_init(struct octeon_eth_softc *);
111 static void octeon_eth_ipd_init(struct octeon_eth_softc *);
112 static void octeon_eth_pko_init(struct octeon_eth_softc *);
113 static void octeon_eth_asx_init(struct octeon_eth_softc *);
114 static void octeon_eth_smi_init(struct octeon_eth_softc *);
115
116 static void octeon_eth_board_mac_addr(uint8_t *, size_t, struct octeon_eth_softc *);
117
118 static int octeon_eth_mii_readreg(device_t, int, int);
119 static void octeon_eth_mii_writereg(device_t, int, int, int);
120 static void octeon_eth_mii_statchg(struct ifnet *);
121
122 static int octeon_eth_mediainit(struct octeon_eth_softc *);
123 static void octeon_eth_mediastatus(struct ifnet *, struct ifmediareq *);
124 static int octeon_eth_mediachange(struct ifnet *);
125
126 static inline void octeon_eth_send_queue_flush_prefetch(struct octeon_eth_softc *);
127 static inline void octeon_eth_send_queue_flush_fetch(struct octeon_eth_softc *);
128 static inline void octeon_eth_send_queue_flush(struct octeon_eth_softc *);
129 static inline void octeon_eth_send_queue_flush_sync(struct octeon_eth_softc *);
130 static inline int octeon_eth_send_queue_is_full(struct octeon_eth_softc *);
131 static inline void octeon_eth_send_queue_add(struct octeon_eth_softc *,
132 struct mbuf *, uint64_t *);
133 static inline void octeon_eth_send_queue_del(struct octeon_eth_softc *,
134 struct mbuf **, uint64_t **);
135 static inline int octeon_eth_buf_free_work(struct octeon_eth_softc *,
136 uint64_t *, uint64_t);
137 static inline void octeon_eth_buf_ext_free_m(struct mbuf *, void *, size_t, void *);
138 static inline void octeon_eth_buf_ext_free_ext(struct mbuf *, void *, size_t, void *);
139
140 static int octeon_eth_ioctl(struct ifnet *, u_long, void *);
141 static void octeon_eth_watchdog(struct ifnet *);
142 static int octeon_eth_init(struct ifnet *);
143 static void octeon_eth_stop(struct ifnet *, int);
144 static void octeon_eth_start(struct ifnet *);
145
146 static inline int octeon_eth_send_cmd(struct octeon_eth_softc *, uint64_t,
147 uint64_t);
148 static inline uint64_t octeon_eth_send_makecmd_w1(int, paddr_t);
149 static inline uint64_t octeon_eth_send_makecmd_w0(uint64_t, uint64_t, size_t,
150 int);
151 static inline int octeon_eth_send_makecmd_gbuf(struct octeon_eth_softc *,
152 struct mbuf *, uint64_t *, int *);
153 static inline int octeon_eth_send_makecmd(struct octeon_eth_softc *,
154 struct mbuf *, uint64_t *, uint64_t *, uint64_t *);
155 static inline int octeon_eth_send_buf(struct octeon_eth_softc *,
156 struct mbuf *, uint64_t *);
157 static inline int octeon_eth_send(struct octeon_eth_softc *,
158 struct mbuf *);
159
160 static int octeon_eth_reset(struct octeon_eth_softc *);
161 static int octeon_eth_configure(struct octeon_eth_softc *);
162 static int octeon_eth_configure_common(struct octeon_eth_softc *);
163
164 static void octeon_eth_tick_free(void *arg);
165 static void octeon_eth_tick_misc(void *);
166
167 static inline int octeon_eth_recv_mbuf(struct octeon_eth_softc *,
168 uint64_t *, struct mbuf **);
169 static inline int octeon_eth_recv_check_code(struct octeon_eth_softc *,
170 uint64_t);
171 static inline int octeon_eth_recv_check_jumbo(struct octeon_eth_softc *,
172 uint64_t);
173 static inline int octeon_eth_recv_check_link(struct octeon_eth_softc *,
174 uint64_t);
175 static inline int octeon_eth_recv_check(struct octeon_eth_softc *,
176 uint64_t);
177 static inline int octeon_eth_recv(struct octeon_eth_softc *, uint64_t *);
178 static void octeon_eth_recv_redir(struct ifnet *, struct mbuf *);
179 static inline void octeon_eth_recv_intr(void *, uint64_t *);
180
181 /* device driver context */
182 static struct octeon_eth_softc *octeon_eth_gsc[GMX_PORT_NUNITS];
183 static void *octeon_eth_pow_recv_ih;
184
185 /* sysctl'able parameters */
186 int octeon_eth_param_pko_cmd_w0_n2 = 1;
187 int octeon_eth_param_pip_dyn_rs = 1;
188 int octeon_eth_param_redir = 0;
189 int octeon_eth_param_pktbuf = 0;
190 int octeon_eth_param_rate = 0;
191 int octeon_eth_param_intr = 0;
192
193 CFATTACH_DECL_NEW(cnmac, sizeof(struct octeon_eth_softc),
194 octeon_eth_match, octeon_eth_attach, NULL, NULL);
195
196 #ifdef OCTEON_ETH_DEBUG
197
198 static const struct octeon_evcnt_entry octeon_evcnt_entries[] = {
199 #define _ENTRY(name, type, parent, descr) \
200 OCTEON_EVCNT_ENTRY(struct octeon_eth_softc, name, type, parent, descr)
201 _ENTRY(rx, MISC, NULL, "rx"),
202 _ENTRY(rxint, INTR, NULL, "rx intr"),
203 _ENTRY(rxrs, MISC, NULL, "rx dynamic short"),
204 _ENTRY(rxbufpkalloc, MISC, NULL, "rx buf pkt alloc"),
205 _ENTRY(rxbufpkput, MISC, NULL, "rx buf pkt put"),
206 _ENTRY(rxbufwqalloc, MISC, NULL, "rx buf wqe alloc"),
207 _ENTRY(rxbufwqput, MISC, NULL, "rx buf wqe put"),
208 _ENTRY(rxerrcode, MISC, NULL, "rx code error"),
209 _ENTRY(rxerrfix, MISC, NULL, "rx fixup error"),
210 _ENTRY(rxerrjmb, MISC, NULL, "rx jmb error"),
211 _ENTRY(rxerrlink, MISC, NULL, "rx link error"),
212 _ENTRY(rxerroff, MISC, NULL, "rx offload error"),
213 _ENTRY(rxonperrshort, MISC, NULL, "rx onp fixup short error"),
214 _ENTRY(rxonperrpreamble, MISC, NULL, "rx onp fixup preamble error"),
215 _ENTRY(rxonperrcrc, MISC, NULL, "rx onp fixup crc error"),
216 _ENTRY(rxonperraddress, MISC, NULL, "rx onp fixup address error"),
217 _ENTRY(rxonponp, MISC, NULL, "rx onp fixup onp packets"),
218 _ENTRY(rxonpok, MISC, NULL, "rx onp fixup success packets"),
219 _ENTRY(tx, MISC, NULL, "tx"),
220 _ENTRY(txadd, MISC, NULL, "tx add"),
221 _ENTRY(txbufcballoc, MISC, NULL, "tx buf cb alloc"),
222 _ENTRY(txbufcbget, MISC, NULL, "tx buf cb get"),
223 _ENTRY(txbufgballoc, MISC, NULL, "tx buf gb alloc"),
224 _ENTRY(txbufgbget, MISC, NULL, "tx buf gb get"),
225 _ENTRY(txbufgbput, MISC, NULL, "tx buf gb put"),
226 _ENTRY(txdel, MISC, NULL, "tx del"),
227 _ENTRY(txerr, MISC, NULL, "tx error"),
228 _ENTRY(txerrcmd, MISC, NULL, "tx cmd error"),
229 _ENTRY(txerrgbuf, MISC, NULL, "tx gbuf error"),
230 _ENTRY(txerrlink, MISC, NULL, "tx link error"),
231 _ENTRY(txerrmkcmd, MISC, NULL, "tx makecmd error"),
232 #undef _ENTRY
233 };
234 #endif
235
236 /* ---- buffer management */
237
238 static const struct octeon_eth_pool_param {
239 int poolno;
240 size_t size;
241 size_t nelems;
242 } octeon_eth_pool_params[] = {
243 #define _ENTRY(x) { OCTEON_POOL_NO_##x, OCTEON_POOL_SIZE_##x, OCTEON_POOL_NELEMS_##x }
244 _ENTRY(PKT),
245 _ENTRY(WQE),
246 _ENTRY(CMD),
247 _ENTRY(SG)
248 #undef _ENTRY
249 };
250 struct octeon_fpa_buf *octeon_eth_pools[8/* XXX */];
251 #define octeon_eth_fb_pkt octeon_eth_pools[OCTEON_POOL_NO_PKT]
252 #define octeon_eth_fb_wqe octeon_eth_pools[OCTEON_POOL_NO_WQE]
253 #define octeon_eth_fb_cmd octeon_eth_pools[OCTEON_POOL_NO_CMD]
254 #define octeon_eth_fb_sg octeon_eth_pools[OCTEON_POOL_NO_SG]
255
256 static void
257 octeon_eth_buf_init(struct octeon_eth_softc *sc)
258 {
259 static int once;
260 int i;
261 const struct octeon_eth_pool_param *pp;
262 struct octeon_fpa_buf *fb;
263
264 if (once == 1)
265 return;
266 once = 1;
267
268 for (i = 0; i < (int)__arraycount(octeon_eth_pool_params); i++) {
269 pp = &octeon_eth_pool_params[i];
270 octeon_fpa_buf_init(pp->poolno, pp->size, pp->nelems, &fb);
271 octeon_eth_pools[i] = fb;
272 }
273 }
274
275 /* ---- autoconf */
276
277 static int
278 octeon_eth_match(device_t parent, struct cfdata *match, void *aux)
279 {
280 struct octeon_gmx_attach_args *ga = aux;
281
282 if (strcmp(match->cf_name, ga->ga_name) != 0) {
283 return 0;
284 }
285 return 1;
286 }
287
288 static void
289 octeon_eth_attach(device_t parent, device_t self, void *aux)
290 {
291 struct octeon_eth_softc *sc = device_private(self);
292 struct octeon_gmx_attach_args *ga = aux;
293 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
294 uint8_t enaddr[ETHER_ADDR_LEN];
295
296 sc->sc_dev = self;
297 sc->sc_regt = ga->ga_regt;
298 sc->sc_port = ga->ga_portno;
299 sc->sc_port_type = ga->ga_port_type;
300 sc->sc_gmx = ga->ga_gmx;
301 sc->sc_gmx_port = ga->ga_gmx_port;
302
303 sc->sc_init_flag = 0;
304 /*
305 * XXXUEBAYASI
306 * Setting PIP_IP_OFFSET[OFFSET] to 8 causes panic ... why???
307 */
308 sc->sc_ip_offset = 0/* XXX */;
309
310 if (MIPS_PRID_IMPL(mips_options.mips_cpu_id) <= MIPS_CN30XX) {
311 SET(sc->sc_quirks, OCTEON_ETH_QUIRKS_NO_PRE_ALIGN);
312 SET(sc->sc_quirks, OCTEON_ETH_QUIRKS_NO_RX_INBND);
313 }
314
315 octeon_eth_board_mac_addr(enaddr, sizeof(enaddr), sc);
316 printf("%s: Ethernet address %s\n", device_xname(sc->sc_dev),
317 ether_sprintf(enaddr));
318
319 octeon_eth_gsc[sc->sc_port] = sc;
320
321 SIMPLEQ_INIT(&sc->sc_sendq);
322 sc->sc_soft_req_thresh = 15/* XXX */;
323 sc->sc_ext_callback_cnt = 0;
324
325 octeon_gmx_stats_init(sc->sc_gmx_port);
326
327 callout_init(&sc->sc_tick_misc_ch, 0);
328 callout_init(&sc->sc_tick_free_ch, 0);
329
330 octeon_fau_op_init(&sc->sc_fau_done,
331 OCTEON_CVMSEG_ETHER_OFFSET(sc->sc_port, csm_ether_fau_done),
332 OCT_FAU_REG_ADDR_END - (8 * (sc->sc_port + 1))/* XXX */);
333 octeon_fau_op_set_8(&sc->sc_fau_done, 0);
334
335 octeon_eth_pip_init(sc);
336 octeon_eth_ipd_init(sc);
337 octeon_eth_pko_init(sc);
338 octeon_eth_asx_init(sc);
339 octeon_eth_smi_init(sc);
340
341 sc->sc_gmx_port->sc_ipd = sc->sc_ipd;
342 sc->sc_gmx_port->sc_port_asx = sc->sc_asx;
343 sc->sc_gmx_port->sc_port_mii = &sc->sc_mii;
344 sc->sc_gmx_port->sc_port_ec = &sc->sc_ethercom;
345 /* XXX */
346 sc->sc_gmx_port->sc_quirks = sc->sc_quirks;
347
348 /* XXX */
349 sc->sc_pow = &octeon_pow_softc;
350
351 octeon_eth_mediainit(sc);
352
353 strncpy(ifp->if_xname, device_xname(sc->sc_dev), sizeof(ifp->if_xname));
354 ifp->if_softc = sc;
355 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
356 ifp->if_ioctl = octeon_eth_ioctl;
357 ifp->if_start = octeon_eth_start;
358 ifp->if_watchdog = octeon_eth_watchdog;
359 ifp->if_init = octeon_eth_init;
360 ifp->if_stop = octeon_eth_stop;
361 IFQ_SET_MAXLEN(&ifp->if_snd, max(GATHER_QUEUE_SIZE, IFQ_MAXLEN));
362 IFQ_SET_READY(&ifp->if_snd);
363
364 /* XXX: not yet tx checksum */
365 ifp->if_capabilities =
366 IFCAP_CSUM_IPv4_Rx | IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
367 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
368
369 octeon_gmx_set_mac_addr(sc->sc_gmx_port, enaddr);
370 octeon_gmx_set_filter(sc->sc_gmx_port);
371
372 if_attach(ifp);
373 ether_ifattach(ifp, enaddr);
374
375 /* XXX */
376 sc->sc_rate_recv_check_link_cap.tv_sec = 1;
377 sc->sc_rate_recv_check_jumbo_cap.tv_sec = 1;
378 sc->sc_rate_recv_check_code_cap.tv_sec = 1;
379 sc->sc_rate_recv_fixup_odd_nibble_short_cap.tv_sec = 1;
380 sc->sc_rate_recv_fixup_odd_nibble_preamble_cap.tv_sec = 1;
381 sc->sc_rate_recv_fixup_odd_nibble_crc_cap.tv_sec = 1;
382 #ifdef OCTEON_ETH_DEBUG
383 sc->sc_rate_recv_fixup_odd_nibble_addr_cap.tv_sec = 1;
384 #endif
385 /* XXX */
386
387 #if 1
388 octeon_eth_buf_init(sc);
389 #endif
390
391 if (octeon_eth_pow_recv_ih == NULL)
392 octeon_eth_pow_recv_ih = octeon_pow_intr_establish(OCTEON_POW_GROUP_PIP,
393 IPL_NET, octeon_eth_recv_intr, NULL, NULL);
394
395 OCTEON_EVCNT_ATTACH_EVCNTS(sc, octeon_evcnt_entries,
396 device_xname(sc->sc_dev));
397 }
398
399 /* ---- submodules */
400
401 /* XXX */
402 static void
403 octeon_eth_pip_init(struct octeon_eth_softc *sc)
404 {
405 struct octeon_pip_attach_args pip_aa;
406
407 pip_aa.aa_port = sc->sc_port;
408 pip_aa.aa_regt = sc->sc_regt;
409 pip_aa.aa_tag_type = POW_TAG_TYPE_ORDERED/* XXX */;
410 pip_aa.aa_receive_group = OCTEON_POW_GROUP_PIP;
411 pip_aa.aa_ip_offset = sc->sc_ip_offset;
412 octeon_pip_init(&pip_aa, &sc->sc_pip);
413 }
414
415 /* XXX */
416 static void
417 octeon_eth_ipd_init(struct octeon_eth_softc *sc)
418 {
419 struct octeon_ipd_attach_args ipd_aa;
420
421 ipd_aa.aa_port = sc->sc_port;
422 ipd_aa.aa_regt = sc->sc_regt;
423 ipd_aa.aa_first_mbuff_skip = 184/* XXX */;
424 ipd_aa.aa_not_first_mbuff_skip = 0/* XXX */;
425 octeon_ipd_init(&ipd_aa, &sc->sc_ipd);
426 }
427
428 /* XXX */
429 static void
430 octeon_eth_pko_init(struct octeon_eth_softc *sc)
431 {
432 struct octeon_pko_attach_args pko_aa;
433
434 pko_aa.aa_port = sc->sc_port;
435 pko_aa.aa_regt = sc->sc_regt;
436 pko_aa.aa_cmdptr = &sc->sc_cmdptr;
437 pko_aa.aa_cmd_buf_pool = OCTEON_POOL_NO_CMD;
438 pko_aa.aa_cmd_buf_size = OCTEON_POOL_NWORDS_CMD;
439 octeon_pko_init(&pko_aa, &sc->sc_pko);
440 }
441
442 /* XXX */
443 static void
444 octeon_eth_asx_init(struct octeon_eth_softc *sc)
445 {
446 struct octeon_asx_attach_args asx_aa;
447
448 asx_aa.aa_port = sc->sc_port;
449 asx_aa.aa_regt = sc->sc_regt;
450 octeon_asx_init(&asx_aa, &sc->sc_asx);
451 }
452
453 static void
454 octeon_eth_smi_init(struct octeon_eth_softc *sc)
455 {
456 struct octeon_smi_attach_args smi_aa;
457
458 smi_aa.aa_port = sc->sc_port;
459 smi_aa.aa_regt = sc->sc_regt;
460 octeon_smi_init(&smi_aa, &sc->sc_smi);
461 octeon_smi_set_clock(sc->sc_smi, 0x1464ULL); /* XXX */
462 }
463
464 /* ---- XXX */
465
466 #define ADDR2UINT64(u, a) \
467 do { \
468 u = \
469 (((uint64_t)a[0] << 40) | ((uint64_t)a[1] << 32) | \
470 ((uint64_t)a[2] << 24) | ((uint64_t)a[3] << 16) | \
471 ((uint64_t)a[4] << 8) | ((uint64_t)a[5] << 0)); \
472 } while (0)
473 #define UINT642ADDR(a, u) \
474 do { \
475 a[0] = (uint8_t)((u) >> 40); a[1] = (uint8_t)((u) >> 32); \
476 a[2] = (uint8_t)((u) >> 24); a[3] = (uint8_t)((u) >> 16); \
477 a[4] = (uint8_t)((u) >> 8); a[5] = (uint8_t)((u) >> 0); \
478 } while (0)
479
480 static void
481 octeon_eth_board_mac_addr(uint8_t *enaddr, size_t size, struct octeon_eth_softc *sc)
482 {
483 prop_dictionary_t dict;
484 prop_data_t ea;
485
486 dict = device_properties(sc->sc_dev);
487 KASSERT(dict != NULL);
488 ea = prop_dictionary_get(dict, "mac-address");
489 KASSERT(ea != NULL);
490 memcpy(enaddr, prop_data_data_nocopy(ea), size);
491 }
492
493 /* ---- media */
494
495 static int
496 octeon_eth_mii_readreg(device_t self, int phy_addr, int reg)
497 {
498 struct octeon_eth_softc *sc = device_private(self);
499
500 return octeon_smi_read(sc->sc_smi, phy_addr, reg);
501 }
502
503 static void
504 octeon_eth_mii_writereg(device_t self, int phy_addr, int reg, int value)
505 {
506 struct octeon_eth_softc *sc = device_private(self);
507
508 octeon_smi_write(sc->sc_smi, phy_addr, reg, value);
509 }
510
511 static void
512 octeon_eth_mii_statchg(struct ifnet *ifp)
513 {
514 struct octeon_eth_softc *sc = ifp->if_softc;
515
516 octeon_pko_port_enable(sc->sc_pko, 0);
517 octeon_gmx_port_enable(sc->sc_gmx_port, 0);
518
519 octeon_eth_reset(sc);
520
521 if (ISSET(ifp->if_flags, IFF_RUNNING))
522 octeon_gmx_set_filter(sc->sc_gmx_port);
523
524 octeon_pko_port_enable(sc->sc_pko, 1);
525 octeon_gmx_port_enable(sc->sc_gmx_port, 1);
526 }
527
528 static int
529 octeon_eth_mediainit(struct octeon_eth_softc *sc)
530 {
531 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
532 prop_object_t phy;
533
534 sc->sc_mii.mii_ifp = ifp;
535 sc->sc_mii.mii_readreg = octeon_eth_mii_readreg;
536 sc->sc_mii.mii_writereg = octeon_eth_mii_writereg;
537 sc->sc_mii.mii_statchg = octeon_eth_mii_statchg;
538 ifmedia_init(&sc->sc_mii.mii_media, 0, octeon_eth_mediachange,
539 octeon_eth_mediastatus);
540
541 phy = prop_dictionary_get(device_properties(sc->sc_dev), "phy-addr");
542 KASSERT(phy != NULL);
543
544 mii_attach(sc->sc_dev, &sc->sc_mii,
545 0xffffffff, prop_number_integer_value(phy),
546 MII_OFFSET_ANY, MIIF_DOPAUSE);
547
548 /* XXX XXX XXX */
549 if (LIST_FIRST(&sc->sc_mii.mii_phys) != NULL) {
550 /* XXX XXX XXX */
551 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
552 /* XXX XXX XXX */
553 } else {
554 /* XXX XXX XXX */
555 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_NONE,
556 MII_MEDIA_NONE, NULL);
557 /* XXX XXX XXX */
558 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_NONE);
559 /* XXX XXX XXX */
560 }
561 /* XXX XXX XXX */
562
563 return 0;
564 }
565
566 static void
567 octeon_eth_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
568 {
569 struct octeon_eth_softc *sc = ifp->if_softc;
570
571 mii_pollstat(&sc->sc_mii);
572
573 ifmr->ifm_status = sc->sc_mii.mii_media_status;
574 ifmr->ifm_active = sc->sc_mii.mii_media_active;
575 ifmr->ifm_active = (sc->sc_mii.mii_media_active & ~IFM_ETH_FMASK) |
576 sc->sc_gmx_port->sc_port_flowflags;
577 }
578
579 static int
580 octeon_eth_mediachange(struct ifnet *ifp)
581 {
582 struct octeon_eth_softc *sc = ifp->if_softc;
583
584 mii_mediachg(&sc->sc_mii);
585
586 return 0;
587 }
588
589 /* ---- send buffer garbage collection */
590
591 static inline void
592 octeon_eth_send_queue_flush_prefetch(struct octeon_eth_softc *sc)
593 {
594 OCTEON_ETH_KASSERT(sc->sc_prefetch == 0);
595 octeon_fau_op_inc_fetch_8(&sc->sc_fau_done, 0);
596 sc->sc_prefetch = 1;
597 }
598
599 static inline void
600 octeon_eth_send_queue_flush_fetch(struct octeon_eth_softc *sc)
601 {
602 #ifndef OCTEON_ETH_DEBUG
603 if (!sc->sc_prefetch)
604 return;
605 #endif
606 OCTEON_ETH_KASSERT(sc->sc_prefetch == 1);
607 sc->sc_hard_done_cnt = octeon_fau_op_inc_read_8(&sc->sc_fau_done);
608 OCTEON_ETH_KASSERT(sc->sc_hard_done_cnt <= 0);
609 sc->sc_prefetch = 0;
610 }
611
612 static inline void
613 octeon_eth_send_queue_flush(struct octeon_eth_softc *sc)
614 {
615 const int64_t sent_count = sc->sc_hard_done_cnt;
616 int i;
617
618 OCTEON_ETH_KASSERT(sc->sc_flush == 0);
619 OCTEON_ETH_KASSERT(sent_count <= 0);
620
621 for (i = 0; i < 0 - sent_count; i++) {
622 struct mbuf *m;
623 uint64_t *gbuf;
624
625 octeon_eth_send_queue_del(sc, &m, &gbuf);
626
627 octeon_fpa_buf_put(octeon_eth_fb_sg, gbuf);
628 OCTEON_EVCNT_INC(sc, txbufgbput);
629
630 m_freem(m);
631 }
632
633 octeon_fau_op_inc_fetch_8(&sc->sc_fau_done, i);
634 sc->sc_flush = i;
635 }
636
637 static inline void
638 octeon_eth_send_queue_flush_sync(struct octeon_eth_softc *sc)
639 {
640 if (sc->sc_flush == 0)
641 return;
642
643 OCTEON_ETH_KASSERT(sc->sc_flush > 0);
644
645 /* XXX XXX XXX */
646 octeon_fau_op_inc_read_8(&sc->sc_fau_done);
647 sc->sc_soft_req_cnt -= sc->sc_flush;
648 OCTEON_ETH_KASSERT(sc->sc_soft_req_cnt >= 0);
649 /* XXX XXX XXX */
650
651 sc->sc_flush = 0;
652 }
653
654 static inline int
655 octeon_eth_send_queue_is_full(struct octeon_eth_softc *sc)
656 {
657 #ifdef OCTEON_ETH_SEND_QUEUE_CHECK
658 int64_t nofree_cnt;
659
660 nofree_cnt = sc->sc_soft_req_cnt + sc->sc_hard_done_cnt;
661
662 if (__predict_false(nofree_cnt == GATHER_QUEUE_SIZE - 1)) {
663 octeon_eth_send_queue_flush(sc);
664 OCTEON_EVCNT_INC(sc, txerrgbuf);
665 octeon_eth_send_queue_flush_sync(sc);
666 return 1;
667 }
668
669 #endif
670 return 0;
671 }
672
673 /*
674 * (Ab)use m_nextpkt and m_paddr to maintain mbuf chain and pointer to gather
675 * buffer. Other mbuf members may be used by m_freem(), so don't touch them!
676 */
677
678 struct _send_queue_entry {
679 union {
680 struct mbuf _sqe_s_mbuf;
681 struct {
682 char _sqe_s_entry_pad[offsetof(struct mbuf, m_nextpkt)];
683 SIMPLEQ_ENTRY(_send_queue_entry) _sqe_s_entry_entry;
684 } _sqe_s_entry;
685 struct {
686 char _sqe_s_gbuf_pad[offsetof(struct mbuf, m_paddr)];
687 uint64_t *_sqe_s_gbuf_gbuf;
688 } _sqe_s_gbuf;
689 } _sqe_u;
690 #define _sqe_entry _sqe_u._sqe_s_entry._sqe_s_entry_entry
691 #define _sqe_gbuf _sqe_u._sqe_s_gbuf._sqe_s_gbuf_gbuf
692 };
693
694 static inline void
695 octeon_eth_send_queue_add(struct octeon_eth_softc *sc, struct mbuf *m,
696 uint64_t *gbuf)
697 {
698 struct _send_queue_entry *sqe = (struct _send_queue_entry *)m;
699
700 sqe->_sqe_gbuf = gbuf;
701 SIMPLEQ_INSERT_TAIL(&sc->sc_sendq, sqe, _sqe_entry);
702
703 if ((m->m_flags & M_EXT) && m->m_ext.ext_free != NULL)
704 sc->sc_ext_callback_cnt++;
705
706 OCTEON_EVCNT_INC(sc, txadd);
707 }
708
709 static inline void
710 octeon_eth_send_queue_del(struct octeon_eth_softc *sc, struct mbuf **rm,
711 uint64_t **rgbuf)
712 {
713 struct _send_queue_entry *sqe;
714
715 sqe = SIMPLEQ_FIRST(&sc->sc_sendq);
716 OCTEON_ETH_KASSERT(sqe != NULL);
717 SIMPLEQ_REMOVE_HEAD(&sc->sc_sendq, _sqe_entry);
718
719 *rm = (void *)sqe;
720 *rgbuf = sqe->_sqe_gbuf;
721
722 if (((*rm)->m_flags & M_EXT) && (*rm)->m_ext.ext_free != NULL) {
723 sc->sc_ext_callback_cnt--;
724 OCTEON_ETH_KASSERT(sc->sc_ext_callback_cnt >= 0);
725 }
726
727 OCTEON_EVCNT_INC(sc, txdel);
728 }
729
730 static inline int
731 octeon_eth_buf_free_work(struct octeon_eth_softc *sc, uint64_t *work,
732 uint64_t word2)
733 {
734 /* XXX when jumbo frame */
735 if (ISSET(word2, PIP_WQE_WORD2_IP_BUFS)) {
736 paddr_t addr;
737 paddr_t start_buffer;
738
739 addr = work[3] & PIP_WQE_WORD3_ADDR;
740 start_buffer = addr & ~(2048 - 1);
741
742 octeon_fpa_buf_put_paddr(octeon_eth_fb_pkt, start_buffer);
743 OCTEON_EVCNT_INC(sc, rxbufpkput);
744 }
745
746 octeon_fpa_buf_put(octeon_eth_fb_wqe, work);
747 OCTEON_EVCNT_INC(sc, rxbufwqput);
748
749 return 0;
750 }
751
752 static inline void
753 octeon_eth_buf_ext_free_m(struct mbuf *m, void *buf, size_t size, void *arg)
754 {
755 uint64_t *work = (void *)arg;
756 #ifdef OCTEON_ETH_DEBUG
757 struct octeon_eth_softc *sc = (void *)(uintptr_t)work[0];
758 #endif
759 int s = splnet();
760
761 OCTEON_EVCNT_INC(sc, rxrs);
762
763 octeon_fpa_buf_put(octeon_eth_fb_wqe, work);
764 OCTEON_EVCNT_INC(sc, rxbufwqput);
765
766 OCTEON_ETH_KASSERT(m != NULL);
767
768 pool_cache_put(mb_cache, m);
769
770 splx(s);
771 }
772
773 static inline void
774 octeon_eth_buf_ext_free_ext(struct mbuf *m, void *buf, size_t size,
775 void *arg)
776 {
777 uint64_t *work = (void *)arg;
778 #ifdef OCTEON_ETH_DEBUG
779 struct octeon_eth_softc *sc = (void *)(uintptr_t)work[0];
780 #endif
781 int s = splnet();
782
783 octeon_fpa_buf_put(octeon_eth_fb_wqe, work);
784 OCTEON_EVCNT_INC(sc, rxbufwqput);
785
786 octeon_fpa_buf_put(octeon_eth_fb_pkt, buf);
787 OCTEON_EVCNT_INC(sc, rxbufpkput);
788
789 OCTEON_ETH_KASSERT(m != NULL);
790
791 pool_cache_put(mb_cache, m);
792
793 splx(s);
794 }
795
796 /* ---- ifnet interfaces */
797
798 static int
799 octeon_eth_ioctl(struct ifnet *ifp, u_long cmd, void *data)
800 {
801 struct octeon_eth_softc *sc = ifp->if_softc;
802 struct ifreq *ifr = (struct ifreq *)data;
803 int s, error;
804
805 s = splnet();
806 switch (cmd) {
807 case SIOCSIFMEDIA:
808 /* Flow control requires full-duplex mode. */
809 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
810 (ifr->ifr_media & IFM_FDX) == 0) {
811 ifr->ifr_media &= ~IFM_ETH_FMASK;
812 }
813 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
814 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
815 ifr->ifr_media |=
816 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
817 }
818 sc->sc_gmx_port->sc_port_flowflags =
819 ifr->ifr_media & IFM_ETH_FMASK;
820 }
821 /* FALLTHROUGH */
822 case SIOCGIFMEDIA:
823 /* XXX: Flow contorol */
824 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
825 break;
826 default:
827 error = ether_ioctl(ifp, cmd, data);
828 if (error == ENETRESET) {
829 /*
830 * Multicast list has changed; set the hardware filter
831 * accordingly.
832 */
833 if (ISSET(ifp->if_flags, IFF_RUNNING))
834 octeon_gmx_set_filter(sc->sc_gmx_port);
835 error = 0;
836 }
837 break;
838 }
839 octeon_eth_start(ifp);
840 splx(s);
841
842 return (error);
843 }
844
845 /* ---- send (output) */
846
847 static inline uint64_t
848 octeon_eth_send_makecmd_w0(uint64_t fau0, uint64_t fau1, size_t len, int segs)
849 {
850 return octeon_pko_cmd_word0(
851 OCT_FAU_OP_SIZE_64, /* sz1 */
852 OCT_FAU_OP_SIZE_64, /* sz0 */
853 1, fau1, 1, fau0, /* s1, reg1, s0, reg0 */
854 0, /* le */
855 octeon_eth_param_pko_cmd_w0_n2, /* n2 */
856 1, 0, /* q, r */
857 (segs == 1) ? 0 : 1, /* g */
858 0, 0, 1, /* ipoffp1, ii, df */
859 segs, (int)len); /* segs, totalbytes */
860 }
861
862 static inline uint64_t
863 octeon_eth_send_makecmd_w1(int size, paddr_t addr)
864 {
865 return octeon_pko_cmd_word1(
866 0, 0, /* i, back */
867 FPA_GATHER_BUFFER_POOL, /* pool */
868 size, addr); /* size, addr */
869 }
870
871 static inline int
872 octeon_eth_send_makecmd_gbuf(struct octeon_eth_softc *sc, struct mbuf *m0,
873 uint64_t *gbuf, int *rsegs)
874 {
875 struct mbuf *m;
876 int segs = 0;
877 uintptr_t laddr, rlen, nlen;
878
879 for (m = m0; m != NULL; m = m->m_next) {
880
881 if (__predict_false(m->m_len == 0))
882 continue;
883
884 #if 0
885 OCTEON_ETH_KASSERT(((uint32_t)m->m_data & (PAGE_SIZE - 1))
886 == (kvtophys((vaddr_t)m->m_data) & (PAGE_SIZE - 1)));
887 #endif
888
889 /*
890 * aligned 4k
891 */
892 laddr = (uintptr_t)m->m_data & (PAGE_SIZE - 1);
893
894 if (laddr + m->m_len > PAGE_SIZE) {
895 /* XXX XXX XXX */
896 rlen = PAGE_SIZE - laddr;
897 nlen = m->m_len - rlen;
898 *(gbuf + segs) = octeon_eth_send_makecmd_w1(rlen,
899 kvtophys((vaddr_t)m->m_data));
900 segs++;
901 if (segs > 63) {
902 return 1;
903 }
904 /* XXX XXX XXX */
905 } else {
906 rlen = 0;
907 nlen = m->m_len;
908 }
909
910 *(gbuf + segs) = octeon_eth_send_makecmd_w1(nlen,
911 kvtophys((vaddr_t)(m->m_data + rlen)));
912 segs++;
913 if (segs > 63) {
914 return 1;
915 }
916 }
917
918 OCTEON_ETH_KASSERT(m == NULL);
919
920 *rsegs = segs;
921
922 return 0;
923 }
924
925 static inline int
926 octeon_eth_send_makecmd(struct octeon_eth_softc *sc, struct mbuf *m,
927 uint64_t *gbuf, uint64_t *rpko_cmd_w0, uint64_t *rpko_cmd_w1)
928 {
929 uint64_t pko_cmd_w0, pko_cmd_w1;
930 int segs;
931 int result = 0;
932
933 if (octeon_eth_send_makecmd_gbuf(sc, m, gbuf, &segs)) {
934 log(LOG_WARNING, "%s: there are a lot of number of segments"
935 " of transmission data", device_xname(sc->sc_dev));
936 result = 1;
937 goto done;
938 }
939
940 /*
941 * segs == 1 -> link mode (single continuous buffer)
942 * WORD1[size] is number of bytes pointed by segment
943 *
944 * segs > 1 -> gather mode (scatter-gather buffer)
945 * WORD1[size] is number of segments
946 */
947 pko_cmd_w0 = octeon_eth_send_makecmd_w0(sc->sc_fau_done.fd_regno,
948 0, m->m_pkthdr.len, segs);
949 if (segs == 1) {
950 pko_cmd_w1 = octeon_eth_send_makecmd_w1(
951 m->m_pkthdr.len, kvtophys((vaddr_t)m->m_data));
952 } else {
953 #ifdef __mips_n32
954 KASSERT(MIPS_KSEG0_P(gbuf));
955 pko_cmd_w1 = octeon_eth_send_makecmd_w1(segs,
956 MIPS_KSEG0_TO_PHYS(gbuf));
957 #else
958 pko_cmd_w1 = octeon_eth_send_makecmd_w1(segs,
959 MIPS_XKPHYS_TO_PHYS(gbuf));
960 #endif
961 }
962
963 *rpko_cmd_w0 = pko_cmd_w0;
964 *rpko_cmd_w1 = pko_cmd_w1;
965
966 done:
967 return result;
968 }
969
970 static inline int
971 octeon_eth_send_cmd(struct octeon_eth_softc *sc, uint64_t pko_cmd_w0,
972 uint64_t pko_cmd_w1)
973 {
974 uint64_t *cmdptr;
975 int result = 0;
976
977 #ifdef __mips_n32
978 KASSERT((sc->sc_cmdptr.cmdptr & ~MIPS_PHYS_MASK) == 0);
979 cmdptr = (uint64_t *)MIPS_PHYS_TO_KSEG0(sc->sc_cmdptr.cmdptr);
980 #else
981 cmdptr = (uint64_t *)MIPS_PHYS_TO_XKPHYS_CACHED(sc->sc_cmdptr.cmdptr);
982 #endif
983 cmdptr += sc->sc_cmdptr.cmdptr_idx;
984
985 OCTEON_ETH_KASSERT(cmdptr != NULL);
986
987 *cmdptr++ = pko_cmd_w0;
988 *cmdptr++ = pko_cmd_w1;
989
990 OCTEON_ETH_KASSERT(sc->sc_cmdptr.cmdptr_idx + 2 <= FPA_COMMAND_BUFFER_POOL_NWORDS - 1);
991
992 if (sc->sc_cmdptr.cmdptr_idx + 2 == FPA_COMMAND_BUFFER_POOL_NWORDS - 1) {
993 paddr_t buf;
994
995 buf = octeon_fpa_buf_get_paddr(octeon_eth_fb_cmd);
996 if (buf == 0) {
997 log(LOG_WARNING,
998 "%s: can not allocate command buffer from free pool allocator\n",
999 device_xname(sc->sc_dev));
1000 result = 1;
1001 goto done;
1002 }
1003 OCTEON_EVCNT_INC(sc, txbufcbget);
1004 *cmdptr++ = buf;
1005 sc->sc_cmdptr.cmdptr = (uint64_t)buf;
1006 sc->sc_cmdptr.cmdptr_idx = 0;
1007 } else {
1008 sc->sc_cmdptr.cmdptr_idx += 2;
1009 }
1010
1011 octeon_pko_op_doorbell_write(sc->sc_port, sc->sc_port, 2);
1012
1013 done:
1014 return result;
1015 }
1016
1017 static inline int
1018 octeon_eth_send_buf(struct octeon_eth_softc *sc, struct mbuf *m,
1019 uint64_t *gbuf)
1020 {
1021 int result = 0, error;
1022 uint64_t pko_cmd_w0, pko_cmd_w1;
1023
1024 error = octeon_eth_send_makecmd(sc, m, gbuf, &pko_cmd_w0, &pko_cmd_w1);
1025 if (error != 0) {
1026 /* already logging */
1027 OCTEON_EVCNT_INC(sc, txerrmkcmd);
1028 result = error;
1029 goto done;
1030 }
1031
1032 error = octeon_eth_send_cmd(sc, pko_cmd_w0, pko_cmd_w1);
1033 if (error != 0) {
1034 /* already logging */
1035 OCTEON_EVCNT_INC(sc, txerrcmd);
1036 result = error;
1037 }
1038
1039 done:
1040 return result;
1041 }
1042
1043 static inline int
1044 octeon_eth_send(struct octeon_eth_softc *sc, struct mbuf *m)
1045 {
1046 paddr_t gaddr = 0;
1047 uint64_t *gbuf = NULL;
1048 int result = 0, error;
1049
1050 OCTEON_EVCNT_INC(sc, tx);
1051
1052 gaddr = octeon_fpa_buf_get_paddr(octeon_eth_fb_sg);
1053 if (gaddr == 0) {
1054 log(LOG_WARNING,
1055 "%s: can not allocate gather buffer from free pool allocator\n",
1056 device_xname(sc->sc_dev));
1057 OCTEON_EVCNT_INC(sc, txerrgbuf);
1058 result = 1;
1059 goto done;
1060 }
1061 OCTEON_EVCNT_INC(sc, txbufgbget);
1062
1063 #ifdef __mips_n32
1064 KASSERT((gaddr & ~MIPS_PHYS_MASK) == 0);
1065 gbuf = (uint64_t *)(uintptr_t)MIPS_PHYS_TO_KSEG0(gaddr);
1066 #else
1067 gbuf = (uint64_t *)(uintptr_t)MIPS_PHYS_TO_XKPHYS_CACHED(gaddr);
1068 #endif
1069
1070 OCTEON_ETH_KASSERT(gbuf != NULL);
1071
1072 error = octeon_eth_send_buf(sc, m, gbuf);
1073 if (error != 0) {
1074 /* already logging */
1075 octeon_fpa_buf_put_paddr(octeon_eth_fb_sg, gaddr);
1076 OCTEON_EVCNT_INC(sc, txbufgbput);
1077 result = error;
1078 goto done;
1079 }
1080
1081 octeon_eth_send_queue_add(sc, m, gbuf);
1082
1083 done:
1084 return result;
1085 }
1086
1087 static void
1088 octeon_eth_start(struct ifnet *ifp)
1089 {
1090 struct octeon_eth_softc *sc = ifp->if_softc;
1091 struct mbuf *m;
1092
1093 /*
1094 * performance tuning
1095 * presend iobdma request
1096 */
1097 octeon_eth_send_queue_flush_prefetch(sc);
1098
1099 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1100 goto last;
1101
1102 /* XXX assume that OCTEON doesn't buffer packets */
1103 if (__predict_false(!octeon_gmx_link_status(sc->sc_gmx_port))) {
1104 /* dequeue and drop them */
1105 while (1) {
1106 IFQ_DEQUEUE(&ifp->if_snd, m);
1107 if (m == NULL)
1108 break;
1109
1110 m_freem(m);
1111 IF_DROP(&ifp->if_snd);
1112 OCTEON_EVCNT_INC(sc, txerrlink);
1113 }
1114 goto last;
1115 }
1116
1117 for (;;) {
1118 IFQ_POLL(&ifp->if_snd, m);
1119 if (__predict_false(m == NULL))
1120 break;
1121
1122 /* XXX XXX XXX */
1123 octeon_eth_send_queue_flush_fetch(sc);
1124
1125 /*
1126 * If no free send buffer is available, free all the sent buffer
1127 * and bail out.
1128 */
1129 if (octeon_eth_send_queue_is_full(sc)) {
1130 return;
1131 }
1132 /* XXX XXX XXX */
1133
1134 IFQ_DEQUEUE(&ifp->if_snd, m);
1135
1136 bpf_mtap(ifp, m);
1137
1138 /* XXX XXX XXX */
1139 if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh)
1140 octeon_eth_send_queue_flush(sc);
1141 if (octeon_eth_send(sc, m)) {
1142 IF_DROP(&ifp->if_snd);
1143 m_freem(m);
1144 log(LOG_WARNING,
1145 "%s: failed in the transmission of the packet\n",
1146 device_xname(sc->sc_dev));
1147 OCTEON_EVCNT_INC(sc, txerr);
1148 } else {
1149 sc->sc_soft_req_cnt++;
1150 }
1151 if (sc->sc_flush)
1152 octeon_eth_send_queue_flush_sync(sc);
1153 /* XXX XXX XXX */
1154
1155 /*
1156 * send next iobdma request
1157 */
1158 octeon_eth_send_queue_flush_prefetch(sc);
1159 }
1160
1161 /*
1162 * Don't schedule send-buffer-free callout every time - those buffers are freed
1163 * by "free tick". This makes some packets like NFS slower.
1164 */
1165 #ifdef OCTEON_ETH_USENFS
1166 if (__predict_false(sc->sc_ext_callback_cnt > 0)) {
1167 int timo;
1168
1169 /* ??? */
1170 timo = hz - (100 * sc->sc_ext_callback_cnt);
1171 if (timo < 10)
1172 timo = 10;
1173 callout_schedule(&sc->sc_tick_free_ch, timo);
1174 }
1175 #endif
1176
1177 last:
1178 octeon_eth_send_queue_flush_fetch(sc);
1179 }
1180
1181 static void
1182 octeon_eth_watchdog(struct ifnet *ifp)
1183 {
1184 struct octeon_eth_softc *sc = ifp->if_softc;
1185
1186 printf("%s: device timeout\n", device_xname(sc->sc_dev));
1187
1188 octeon_eth_configure(sc);
1189
1190 SET(ifp->if_flags, IFF_RUNNING);
1191 CLR(ifp->if_flags, IFF_OACTIVE);
1192 ifp->if_timer = 0;
1193
1194 octeon_eth_start(ifp);
1195 }
1196
1197 static int
1198 octeon_eth_init(struct ifnet *ifp)
1199 {
1200 struct octeon_eth_softc *sc = ifp->if_softc;
1201
1202 /* XXX don't disable commonly used parts!!! XXX */
1203 if (sc->sc_init_flag == 0) {
1204 /* Cancel any pending I/O. */
1205 octeon_eth_stop(ifp, 0);
1206
1207 /* Initialize the device */
1208 octeon_eth_configure(sc);
1209
1210 octeon_pko_enable(sc->sc_pko);
1211 octeon_ipd_enable(sc->sc_ipd);
1212
1213 sc->sc_init_flag = 1;
1214 } else {
1215 octeon_gmx_port_enable(sc->sc_gmx_port, 1);
1216 }
1217 octeon_eth_mediachange(ifp);
1218
1219 octeon_gmx_set_filter(sc->sc_gmx_port);
1220
1221 callout_reset(&sc->sc_tick_misc_ch, hz, octeon_eth_tick_misc, sc);
1222 callout_reset(&sc->sc_tick_free_ch, hz, octeon_eth_tick_free, sc);
1223
1224 SET(ifp->if_flags, IFF_RUNNING);
1225 CLR(ifp->if_flags, IFF_OACTIVE);
1226
1227 return 0;
1228 }
1229
1230 static void
1231 octeon_eth_stop(struct ifnet *ifp, int disable)
1232 {
1233 struct octeon_eth_softc *sc = ifp->if_softc;
1234
1235 callout_stop(&sc->sc_tick_misc_ch);
1236 callout_stop(&sc->sc_tick_free_ch);
1237
1238 mii_down(&sc->sc_mii);
1239
1240 octeon_gmx_port_enable(sc->sc_gmx_port, 0);
1241
1242 /* Mark the interface as down and cancel the watchdog timer. */
1243 CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE);
1244 ifp->if_timer = 0;
1245 }
1246
1247 /* ---- misc */
1248
1249 #define PKO_INDEX_MASK ((1ULL << 12/* XXX */) - 1)
1250
1251 static int
1252 octeon_eth_reset(struct octeon_eth_softc *sc)
1253 {
1254 octeon_gmx_reset_speed(sc->sc_gmx_port);
1255 octeon_gmx_reset_flowctl(sc->sc_gmx_port);
1256 octeon_gmx_reset_timing(sc->sc_gmx_port);
1257
1258 return 0;
1259 }
1260
1261 static int
1262 octeon_eth_configure(struct octeon_eth_softc *sc)
1263 {
1264 octeon_gmx_port_enable(sc->sc_gmx_port, 0);
1265
1266 octeon_eth_reset(sc);
1267
1268 octeon_eth_configure_common(sc);
1269
1270 octeon_pko_port_config(sc->sc_pko);
1271 octeon_pko_port_enable(sc->sc_pko, 1);
1272 octeon_pip_port_config(sc->sc_pip);
1273
1274 octeon_gmx_tx_stats_rd_clr(sc->sc_gmx_port, 1);
1275 octeon_gmx_rx_stats_rd_clr(sc->sc_gmx_port, 1);
1276
1277 octeon_gmx_port_enable(sc->sc_gmx_port, 1);
1278
1279 return 0;
1280 }
1281
1282 static int
1283 octeon_eth_configure_common(struct octeon_eth_softc *sc)
1284 {
1285 static int once;
1286
1287 if (once == 1)
1288 return 0;
1289 once = 1;
1290
1291 octeon_ipd_config(sc->sc_ipd);
1292 #ifdef OCTEON_ETH_IPD_RED
1293 octeon_ipd_red(sc->sc_ipd, RECV_QUEUE_SIZE >> 2, RECV_QUEUE_SIZE >> 3);
1294 #endif
1295 octeon_pko_config(sc->sc_pko);
1296
1297 octeon_pow_config(sc->sc_pow, OCTEON_POW_GROUP_PIP);
1298
1299 return 0;
1300 }
1301
1302 /* ---- receive (input) */
1303
1304 static inline int
1305 octeon_eth_recv_mbuf(struct octeon_eth_softc *sc, uint64_t *work,
1306 struct mbuf **rm)
1307 {
1308 struct mbuf *m;
1309 void (*ext_free)(struct mbuf *, void *, size_t, void *);
1310 void *ext_buf;
1311 size_t ext_size;
1312 void *data;
1313 uint64_t word1 = work[1];
1314 uint64_t word2 = work[2];
1315 uint64_t word3 = work[3];
1316
1317 MGETHDR(m, M_NOWAIT, MT_DATA);
1318 if (m == NULL)
1319 return 1;
1320 OCTEON_ETH_KASSERT(m != NULL);
1321
1322 if ((word2 & PIP_WQE_WORD2_IP_BUFS) == 0) {
1323 /* Dynamic short */
1324 ext_free = octeon_eth_buf_ext_free_m;
1325 ext_buf = &work[4];
1326 ext_size = 96;
1327
1328 data = &work[4 + sc->sc_ip_offset / sizeof(uint64_t)];
1329 } else {
1330 vaddr_t addr;
1331 vaddr_t start_buffer;
1332
1333 #ifdef __mips_n32
1334 KASSERT((word3 & ~MIPS_PHYS_MASK) == 0);
1335 addr = MIPS_PHYS_TO_KSEG0(word3 & PIP_WQE_WORD3_ADDR);
1336 #else
1337 addr = MIPS_PHYS_TO_XKPHYS_CACHED(word3 & PIP_WQE_WORD3_ADDR);
1338 #endif
1339 start_buffer = addr & ~(2048 - 1);
1340
1341 ext_free = octeon_eth_buf_ext_free_ext;
1342 ext_buf = (void *)start_buffer;
1343 ext_size = 2048;
1344
1345 data = (void *)addr;
1346 }
1347
1348 /* embed sc pointer into work[0] for _ext_free evcnt */
1349 work[0] = (uintptr_t)sc;
1350
1351 MEXTADD(m, ext_buf, ext_size, 0, ext_free, work);
1352 OCTEON_ETH_KASSERT(ISSET(m->m_flags, M_EXT));
1353
1354 m->m_data = data;
1355 m->m_len = m->m_pkthdr.len = (word1 & PIP_WQE_WORD1_LEN) >> 48;
1356 m_set_rcvif(m, &sc->sc_ethercom.ec_if);
1357 /*
1358 * not readonly buffer
1359 */
1360 m->m_flags |= M_EXT_RW;
1361
1362 *rm = m;
1363
1364 OCTEON_ETH_KASSERT(*rm != NULL);
1365
1366 return 0;
1367 }
1368
1369 static inline int
1370 octeon_eth_recv_check_code(struct octeon_eth_softc *sc, uint64_t word2)
1371 {
1372 uint64_t opecode = word2 & PIP_WQE_WORD2_NOIP_OPECODE;
1373
1374 if (__predict_true(!ISSET(word2, PIP_WQE_WORD2_NOIP_RE)))
1375 return 0;
1376
1377 /* this error is harmless */
1378 if (opecode == PIP_OVER_ERR)
1379 return 0;
1380
1381 return 1;
1382 }
1383
1384 static inline int
1385 octeon_eth_recv_check_jumbo(struct octeon_eth_softc *sc, uint64_t word2)
1386 {
1387 if (__predict_false((word2 & PIP_WQE_WORD2_IP_BUFS) > (1ULL << 56)))
1388 return 1;
1389 return 0;
1390 }
1391
1392 static inline int
1393 octeon_eth_recv_check_link(struct octeon_eth_softc *sc, uint64_t word2)
1394 {
1395 if (__predict_false(!octeon_gmx_link_status(sc->sc_gmx_port)))
1396 return 1;
1397 return 0;
1398 }
1399
1400 static inline int
1401 octeon_eth_recv_check(struct octeon_eth_softc *sc, uint64_t word2)
1402 {
1403 if (__predict_false(octeon_eth_recv_check_link(sc, word2)) != 0) {
1404 if (ratecheck(&sc->sc_rate_recv_check_link_last,
1405 &sc->sc_rate_recv_check_link_cap))
1406 log(LOG_DEBUG,
1407 "%s: link is not up, the packet was dropped\n",
1408 device_xname(sc->sc_dev));
1409 OCTEON_EVCNT_INC(sc, rxerrlink);
1410 return 1;
1411 }
1412
1413 #if 0 /* XXX Performance tunig (Jumbo-frame is not supported yet!) */
1414 if (__predict_false(octeon_eth_recv_check_jumbo(sc, word2)) != 0) {
1415 /* XXX jumbo frame */
1416 if (ratecheck(&sc->sc_rate_recv_check_jumbo_last,
1417 &sc->sc_rate_recv_check_jumbo_cap))
1418 log(LOG_DEBUG,
1419 "jumbo frame was received\n");
1420 OCTEON_EVCNT_INC(sc, rxerrjmb);
1421 return 1;
1422 }
1423 #endif
1424
1425 if (__predict_false(octeon_eth_recv_check_code(sc, word2)) != 0) {
1426
1427 if ((word2 & PIP_WQE_WORD2_NOIP_OPECODE) ==
1428 PIP_WQE_WORD2_RE_OPCODE_LENGTH) {
1429 /* no logging */
1430 /* XXX inclement special error count */
1431 } else if ((word2 & PIP_WQE_WORD2_NOIP_OPECODE) ==
1432 PIP_WQE_WORD2_RE_OPCODE_PARTIAL) {
1433 /* not an erorr. it's because of overload */
1434 } else {
1435
1436 if (ratecheck(&sc->sc_rate_recv_check_code_last,
1437 &sc->sc_rate_recv_check_code_cap))
1438 log(LOG_WARNING,
1439 "%s: the reception error had occured, "
1440 "the packet was dropped (error code = %" PRId64 ")\n",
1441 device_xname(sc->sc_dev), word2 & PIP_WQE_WORD2_NOIP_OPECODE);
1442 }
1443 OCTEON_EVCNT_INC(sc, rxerrcode);
1444 return 1;
1445 }
1446
1447 return 0;
1448 }
1449
1450 static inline int
1451 octeon_eth_recv(struct octeon_eth_softc *sc, uint64_t *work)
1452 {
1453 int result = 0;
1454 struct ifnet *ifp;
1455 struct mbuf *m;
1456 uint64_t word2;
1457
1458 /* XXX XXX XXX */
1459 /*
1460 * performance tuning
1461 * presend iobdma request
1462 */
1463 if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh) {
1464 octeon_eth_send_queue_flush_prefetch(sc);
1465 }
1466 /* XXX XXX XXX */
1467
1468 OCTEON_ETH_KASSERT(sc != NULL);
1469 OCTEON_ETH_KASSERT(work != NULL);
1470
1471 OCTEON_EVCNT_INC(sc, rx);
1472
1473 word2 = work[2];
1474 ifp = &sc->sc_ethercom.ec_if;
1475
1476 OCTEON_ETH_KASSERT(ifp != NULL);
1477
1478 if (__predict_false(octeon_eth_recv_check(sc, word2) != 0)) {
1479 ifp->if_ierrors++;
1480 result = 1;
1481 octeon_eth_buf_free_work(sc, work, word2);
1482 goto drop;
1483 }
1484
1485 if (__predict_false(octeon_eth_recv_mbuf(sc, work, &m) != 0)) {
1486 ifp->if_ierrors++;
1487 result = 1;
1488 octeon_eth_buf_free_work(sc, work, word2);
1489 goto drop;
1490 }
1491
1492 /* work[0] .. work[3] may not be valid any more */
1493
1494 OCTEON_ETH_KASSERT(m != NULL);
1495
1496 octeon_ipd_offload(word2, m->m_data, &m->m_pkthdr.csum_flags);
1497
1498 /* count input packet */
1499 ifp->if_ipackets++;
1500
1501 /* XXX XXX XXX */
1502 if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh) {
1503 octeon_eth_send_queue_flush_fetch(sc);
1504 octeon_eth_send_queue_flush(sc);
1505 }
1506 /* XXX XXX XXX */
1507
1508 bpf_mtap(ifp, m);
1509
1510 /* XXX XXX XXX */
1511 if (sc->sc_flush)
1512 octeon_eth_send_queue_flush_sync(sc);
1513 /* XXX XXX XXX */
1514
1515 if_percpuq_enqueue(ifp->if_percpuq, m);
1516
1517 return 0;
1518
1519 drop:
1520 /* XXX XXX XXX */
1521 if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh) {
1522 octeon_eth_send_queue_flush_fetch(sc);
1523 }
1524 /* XXX XXX XXX */
1525
1526 return result;
1527 }
1528
1529 static void
1530 octeon_eth_recv_redir(struct ifnet *ifp, struct mbuf *m)
1531 {
1532 struct octeon_eth_softc *rsc = ifp->if_softc;
1533 struct octeon_eth_softc *sc = NULL;
1534 int i;
1535
1536 for (i = 0; i < 3 /* XXX */; i++) {
1537 if (rsc->sc_redir & (1 << i))
1538 sc = octeon_eth_gsc[i];
1539 }
1540
1541 if (sc == NULL) {
1542 m_freem(m);
1543 return;
1544 }
1545 octeon_eth_send_queue_flush_prefetch(sc);
1546
1547 octeon_eth_send_queue_flush_fetch(sc);
1548
1549 if (octeon_eth_send_queue_is_full(sc)) {
1550 m_freem(m);
1551 return;
1552 }
1553 if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh)
1554 octeon_eth_send_queue_flush(sc);
1555
1556 if (octeon_eth_send(sc, m)) {
1557 IF_DROP(&ifp->if_snd);
1558 m_freem(m);
1559 } else {
1560 sc->sc_soft_req_cnt++;
1561 }
1562
1563 if (sc->sc_flush)
1564 octeon_eth_send_queue_flush_sync(sc);
1565 }
1566
1567 static inline void
1568 octeon_eth_recv_intr(void *data, uint64_t *work)
1569 {
1570 struct octeon_eth_softc *sc;
1571 int port;
1572
1573 OCTEON_ETH_KASSERT(work != NULL);
1574
1575 port = (work[1] & PIP_WQE_WORD1_IPRT) >> 42;
1576
1577 OCTEON_ETH_KASSERT(port < GMX_PORT_NUNITS);
1578
1579 sc = octeon_eth_gsc[port];
1580
1581 OCTEON_ETH_KASSERT(sc != NULL);
1582 OCTEON_ETH_KASSERT(port == sc->sc_port);
1583
1584 /* XXX process all work queue entries anyway */
1585
1586 (void)octeon_eth_recv(sc, work);
1587 }
1588
1589 /* ---- tick */
1590
1591 /*
1592 * octeon_eth_tick_free
1593 *
1594 * => garbage collect send gather buffer / mbuf
1595 * => called at softclock
1596 */
1597 static void
1598 octeon_eth_tick_free(void *arg)
1599 {
1600 struct octeon_eth_softc *sc = arg;
1601 int timo;
1602 int s;
1603
1604 s = splnet();
1605 /* XXX XXX XXX */
1606 if (sc->sc_soft_req_cnt > 0) {
1607 octeon_eth_send_queue_flush_prefetch(sc);
1608 octeon_eth_send_queue_flush_fetch(sc);
1609 octeon_eth_send_queue_flush(sc);
1610 octeon_eth_send_queue_flush_sync(sc);
1611 }
1612 /* XXX XXX XXX */
1613
1614 /* XXX XXX XXX */
1615 /* ??? */
1616 timo = hz - (100 * sc->sc_ext_callback_cnt);
1617 if (timo < 10)
1618 timo = 10;
1619 callout_schedule(&sc->sc_tick_free_ch, timo);
1620 /* XXX XXX XXX */
1621 splx(s);
1622 }
1623
1624 /*
1625 * octeon_eth_tick_misc
1626 *
1627 * => collect statistics
1628 * => check link status
1629 * => called at softclock
1630 */
1631 static void
1632 octeon_eth_tick_misc(void *arg)
1633 {
1634 struct octeon_eth_softc *sc = arg;
1635 struct ifnet *ifp;
1636 int s;
1637
1638 s = splnet();
1639
1640 ifp = &sc->sc_ethercom.ec_if;
1641
1642 octeon_gmx_stats(sc->sc_gmx_port);
1643 octeon_pip_stats(sc->sc_pip, ifp, sc->sc_port);
1644 mii_tick(&sc->sc_mii);
1645
1646 splx(s);
1647
1648 callout_schedule(&sc->sc_tick_misc_ch, hz);
1649 }
1650
1651 /* ---- odd nibble preamble workaround (software CRC processing) */
1652
1653 /* ---- sysctl */
1654
1655 static int octeon_eth_sysctl_verify(SYSCTLFN_ARGS);
1656 static int octeon_eth_sysctl_pool(SYSCTLFN_ARGS);
1657 static int octeon_eth_sysctl_rd(SYSCTLFN_ARGS);
1658
1659 static int octeon_eth_sysctl_pkocmdw0n2_num;
1660 static int octeon_eth_sysctl_pipdynrs_num;
1661 static int octeon_eth_sysctl_redir_num;
1662 static int octeon_eth_sysctl_pkt_pool_num;
1663 static int octeon_eth_sysctl_wqe_pool_num;
1664 static int octeon_eth_sysctl_cmd_pool_num;
1665 static int octeon_eth_sysctl_sg_pool_num;
1666 static int octeon_eth_sysctl_pktbuf_num;
1667
1668 /*
1669 * Set up sysctl(3) MIB, hw.cnmac.*.
1670 */
1671 SYSCTL_SETUP(sysctl_octeon_eth, "sysctl cnmac subtree setup")
1672 {
1673 int rc;
1674 int octeon_eth_sysctl_root_num;
1675 const struct sysctlnode *node;
1676
1677 if ((rc = sysctl_createv(clog, 0, NULL, NULL,
1678 0, CTLTYPE_NODE, "hw", NULL,
1679 NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) {
1680 goto err;
1681 }
1682
1683 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1684 0, CTLTYPE_NODE, "cnmac",
1685 SYSCTL_DESCR("cnmac interface controls"),
1686 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) {
1687 goto err;
1688 }
1689
1690 octeon_eth_sysctl_root_num = node->sysctl_num;
1691
1692 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1693 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1694 CTLTYPE_INT, "pko_cmd_w0_n2",
1695 SYSCTL_DESCR("PKO command WORD0 N2 bit"),
1696 octeon_eth_sysctl_verify, 0,
1697 &octeon_eth_param_pko_cmd_w0_n2,
1698 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1699 CTL_EOL)) != 0) {
1700 goto err;
1701 }
1702
1703 octeon_eth_sysctl_pkocmdw0n2_num = node->sysctl_num;
1704
1705 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1706 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1707 CTLTYPE_INT, "pip_dyn_rs",
1708 SYSCTL_DESCR("PIP dynamic short in WQE"),
1709 octeon_eth_sysctl_verify, 0,
1710 &octeon_eth_param_pip_dyn_rs,
1711 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1712 CTL_EOL)) != 0) {
1713 goto err;
1714 }
1715
1716 octeon_eth_sysctl_pipdynrs_num = node->sysctl_num;
1717
1718 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1719 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1720 CTLTYPE_INT, "redir",
1721 SYSCTL_DESCR("input port redirection"),
1722 octeon_eth_sysctl_verify, 0,
1723 &octeon_eth_param_redir,
1724 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1725 CTL_EOL)) != 0) {
1726 goto err;
1727 }
1728
1729 octeon_eth_sysctl_redir_num = node->sysctl_num;
1730
1731 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1732 CTLFLAG_PERMANENT,
1733 CTLTYPE_INT, "pkt_pool",
1734 SYSCTL_DESCR("packet pool available"),
1735 octeon_eth_sysctl_pool, 0, NULL,
1736 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1737 CTL_EOL)) != 0) {
1738 goto err;
1739 }
1740
1741 octeon_eth_sysctl_pkt_pool_num = node->sysctl_num;
1742
1743 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1744 CTLFLAG_PERMANENT,
1745 CTLTYPE_INT, "wqe_pool",
1746 SYSCTL_DESCR("wqe pool available"),
1747 octeon_eth_sysctl_pool, 0, NULL,
1748 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1749 CTL_EOL)) != 0) {
1750 goto err;
1751 }
1752
1753 octeon_eth_sysctl_wqe_pool_num = node->sysctl_num;
1754
1755 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1756 CTLFLAG_PERMANENT,
1757 CTLTYPE_INT, "cmd_pool",
1758 SYSCTL_DESCR("cmd pool available"),
1759 octeon_eth_sysctl_pool, 0, NULL,
1760 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1761 CTL_EOL)) != 0) {
1762 goto err;
1763 }
1764
1765 octeon_eth_sysctl_cmd_pool_num = node->sysctl_num;
1766
1767 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1768 CTLFLAG_PERMANENT,
1769 CTLTYPE_INT, "sg_pool",
1770 SYSCTL_DESCR("sg pool available"),
1771 octeon_eth_sysctl_pool, 0, NULL,
1772 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1773 CTL_EOL)) != 0) {
1774 goto err;
1775 }
1776
1777 octeon_eth_sysctl_sg_pool_num = node->sysctl_num;
1778
1779 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1780 CTLFLAG_PERMANENT | CTLFLAG_READONLY,
1781 CTLTYPE_INT, "pktbuf",
1782 SYSCTL_DESCR("input packet buffer size on POW"),
1783 octeon_eth_sysctl_rd, 0,
1784 &octeon_eth_param_pktbuf,
1785 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1786 CTL_EOL)) != 0) {
1787 goto err;
1788 }
1789
1790 octeon_eth_sysctl_pktbuf_num = node->sysctl_num;
1791
1792 return;
1793
1794 err:
1795 aprint_error("%s: syctl_createv failed (rc = %d)\n", __func__, rc);
1796 }
1797
1798 static int
1799 octeon_eth_sysctl_verify(SYSCTLFN_ARGS)
1800 {
1801 int error, v;
1802 struct sysctlnode node;
1803 struct octeon_eth_softc *sc;
1804 int i;
1805 int s;
1806
1807 node = *rnode;
1808 v = *(int *)rnode->sysctl_data;
1809 node.sysctl_data = &v;
1810 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1811 if (error || newp == NULL)
1812 return error;
1813
1814 if (node.sysctl_num == octeon_eth_sysctl_pkocmdw0n2_num) {
1815 if (v < 0 || v > 1)
1816 return EINVAL;
1817 *(int *)rnode->sysctl_data = v;
1818 return 0;
1819 }
1820
1821 if (node.sysctl_num == octeon_eth_sysctl_pipdynrs_num) {
1822 if (v < 0 || v > 1)
1823 return EINVAL;
1824 *(int *)rnode->sysctl_data = v;
1825 s = splnet();
1826 for (i = 0; i < 3/* XXX */; i++) {
1827 sc = octeon_eth_gsc[i]; /* XXX */
1828 octeon_pip_prt_cfg_enable(sc->sc_pip, PIP_PRT_CFGN_DYN_RS, v);
1829 }
1830 splx(s);
1831 return 0;
1832 }
1833
1834 if (node.sysctl_num == octeon_eth_sysctl_redir_num) {
1835 if (v & ~((0x7 << (4 * 0)) | (0x7 << (4 * 1)) | (0x7 << (4 * 2))))
1836 return EINVAL;
1837 *(int *)rnode->sysctl_data = v;
1838 s = splnet();
1839 for (i = 0; i < 3/* XXX */; i++) {
1840 struct ifnet *ifp;
1841
1842 sc = octeon_eth_gsc[i]; /* XXX */
1843 ifp = &sc->sc_ethercom.ec_if;
1844
1845 sc->sc_redir = (octeon_eth_param_redir >> (4 * i)) & 0x7;
1846 if (sc->sc_redir == 0) {
1847 if (ISSET(ifp->if_flags, IFF_PROMISC)) {
1848 CLR(ifp->if_flags, IFF_PROMISC);
1849 octeon_eth_mii_statchg(ifp);
1850 /* octeon_gmx_set_filter(sc->sc_gmx_port); */
1851 }
1852 ifp->_if_input = ether_input;
1853 }
1854 else {
1855 if (!ISSET(ifp->if_flags, IFF_PROMISC)) {
1856 SET(ifp->if_flags, IFF_PROMISC);
1857 octeon_eth_mii_statchg(ifp);
1858 /* octeon_gmx_set_filter(sc->sc_gmx_port); */
1859 }
1860 ifp->_if_input = octeon_eth_recv_redir;
1861 }
1862 }
1863 splx(s);
1864 return 0;
1865 }
1866
1867 return EINVAL;
1868 }
1869
1870 static int
1871 octeon_eth_sysctl_pool(SYSCTLFN_ARGS)
1872 {
1873 int error, newval = 0;
1874 struct sysctlnode node;
1875 int s;
1876
1877 node = *rnode;
1878 node.sysctl_data = &newval;
1879 s = splnet();
1880 if (node.sysctl_num == octeon_eth_sysctl_pkt_pool_num) {
1881 error = octeon_fpa_available_fpa_pool(&newval, OCTEON_POOL_NO_PKT);
1882 } else if (node.sysctl_num == octeon_eth_sysctl_wqe_pool_num) {
1883 error = octeon_fpa_available_fpa_pool(&newval, OCTEON_POOL_NO_WQE);
1884 } else if (node.sysctl_num == octeon_eth_sysctl_cmd_pool_num) {
1885 error = octeon_fpa_available_fpa_pool(&newval, OCTEON_POOL_NO_CMD);
1886 } else if (node.sysctl_num == octeon_eth_sysctl_sg_pool_num) {
1887 error = octeon_fpa_available_fpa_pool(&newval, OCTEON_POOL_NO_SG);
1888 } else {
1889 splx(s);
1890 return EINVAL;
1891 }
1892 splx(s);
1893 if (error)
1894 return error;
1895 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1896 if (error || newp == NULL)
1897 return error;
1898
1899 return 0;
1900 }
1901
1902 static int
1903 octeon_eth_sysctl_rd(SYSCTLFN_ARGS)
1904 {
1905 int error, v;
1906 struct sysctlnode node;
1907 int s;
1908
1909 node = *rnode;
1910 v = *(int *)rnode->sysctl_data;
1911 node.sysctl_data = &v;
1912 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1913 if (error || newp != NULL)
1914 return error;
1915
1916 if (node.sysctl_num == octeon_eth_sysctl_pktbuf_num) {
1917 uint64_t tmp;
1918 int n;
1919
1920 s = splnet();
1921 tmp = octeon_fpa_query(0);
1922 n = (int)tmp;
1923 splx(s);
1924 *(int *)rnode->sysctl_data = n;
1925 octeon_eth_param_pktbuf = n;
1926 *(int *)oldp = n;
1927 return 0;
1928 }
1929
1930 return EINVAL;
1931 }
1932