if_cnmac.c revision 1.12 1 /* $NetBSD: if_cnmac.c,v 1.12 2019/04/26 06:33:33 msaitoh Exp $ */
2
3 #include <sys/cdefs.h>
4 #if 0
5 __KERNEL_RCSID(0, "$NetBSD: if_cnmac.c,v 1.12 2019/04/26 06:33:33 msaitoh Exp $");
6 #endif
7
8 #include "opt_octeon.h"
9
10 #ifdef OCTEON_ETH_DEBUG
11
12 #ifndef DIAGNOSTIC
13 #define DIAGNOSTIC
14 #endif
15
16 #ifndef DEBUG
17 #define DEBUG
18 #endif
19
20 #endif
21
22 /*
23 * If no free send buffer is available, free all the sent buffer and bail out.
24 */
25 #define OCTEON_ETH_SEND_QUEUE_CHECK
26
27 /* XXX XXX XXX XXX XXX XXX */
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/pool.h>
32 #include <sys/mbuf.h>
33 #include <sys/malloc.h>
34 #include <sys/kernel.h>
35 #include <sys/socket.h>
36 #include <sys/ioctl.h>
37 #include <sys/errno.h>
38 #include <sys/device.h>
39 #include <sys/queue.h>
40 #include <sys/conf.h>
41 #include <sys/sysctl.h>
42 #include <sys/syslog.h>
43
44 #include <net/if.h>
45 #include <net/if_dl.h>
46 #include <net/if_media.h>
47 #include <net/if_ether.h>
48 #include <net/route.h>
49 #include <net/bpf.h>
50
51 #include <netinet/in.h>
52 #include <netinet/in_systm.h>
53 #include <netinet/in_var.h>
54 #include <netinet/ip.h>
55
56 #include <sys/bus.h>
57 #include <machine/intr.h>
58 #include <machine/endian.h>
59 #include <machine/locore.h>
60
61 #include <dev/mii/mii.h>
62 #include <dev/mii/miivar.h>
63
64 #include <mips/cpuregs.h>
65
66 #include <mips/cavium/dev/octeon_asxreg.h>
67 #include <mips/cavium/dev/octeon_ciureg.h>
68 #include <mips/cavium/dev/octeon_npireg.h>
69 #include <mips/cavium/dev/octeon_gmxreg.h>
70 #include <mips/cavium/dev/octeon_ipdreg.h>
71 #include <mips/cavium/dev/octeon_pipreg.h>
72 #include <mips/cavium/dev/octeon_powreg.h>
73 #include <mips/cavium/dev/octeon_faureg.h>
74 #include <mips/cavium/dev/octeon_fpareg.h>
75 #include <mips/cavium/dev/octeon_bootbusreg.h>
76 #include <mips/cavium/include/iobusvar.h>
77 #include <mips/cavium/octeonvar.h>
78 #include <mips/cavium/dev/octeon_fpavar.h>
79 #include <mips/cavium/dev/octeon_gmxvar.h>
80 #include <mips/cavium/dev/octeon_fauvar.h>
81 #include <mips/cavium/dev/octeon_powvar.h>
82 #include <mips/cavium/dev/octeon_ipdvar.h>
83 #include <mips/cavium/dev/octeon_pipvar.h>
84 #include <mips/cavium/dev/octeon_pkovar.h>
85 #include <mips/cavium/dev/octeon_asxvar.h>
86 #include <mips/cavium/dev/octeon_smivar.h>
87 #include <mips/cavium/dev/if_cnmacvar.h>
88
89 #ifdef OCTEON_ETH_DEBUG
90 #define OCTEON_ETH_KASSERT(x) KASSERT(x)
91 #define OCTEON_ETH_KDASSERT(x) KDASSERT(x)
92 #else
93 #define OCTEON_ETH_KASSERT(x)
94 #define OCTEON_ETH_KDASSERT(x)
95 #endif
96
97 /*
98 * Set the PKO to think command buffers are an odd length. This makes it so we
99 * never have to divide a comamnd across two buffers.
100 */
101 #define OCTEON_POOL_NWORDS_CMD \
102 (((uint32_t)OCTEON_POOL_SIZE_CMD / sizeof(uint64_t)) - 1)
103 #define FPA_COMMAND_BUFFER_POOL_NWORDS OCTEON_POOL_NWORDS_CMD /* XXX */
104
105 static void octeon_eth_buf_init(struct octeon_eth_softc *);
106
107 static int octeon_eth_match(device_t, struct cfdata *, void *);
108 static void octeon_eth_attach(device_t, device_t, void *);
109 static void octeon_eth_pip_init(struct octeon_eth_softc *);
110 static void octeon_eth_ipd_init(struct octeon_eth_softc *);
111 static void octeon_eth_pko_init(struct octeon_eth_softc *);
112 static void octeon_eth_asx_init(struct octeon_eth_softc *);
113 static void octeon_eth_smi_init(struct octeon_eth_softc *);
114
115 static void octeon_eth_board_mac_addr(uint8_t *, size_t,
116 struct octeon_eth_softc *);
117
118 static int octeon_eth_mii_readreg(device_t, int, int, uint16_t *);
119 static int octeon_eth_mii_writereg(device_t, int, int, uint16_t);
120 static void octeon_eth_mii_statchg(struct ifnet *);
121
122 static int octeon_eth_mediainit(struct octeon_eth_softc *);
123 static void octeon_eth_mediastatus(struct ifnet *, struct ifmediareq *);
124 static int octeon_eth_mediachange(struct ifnet *);
125
126 static inline void octeon_eth_send_queue_flush_prefetch(struct octeon_eth_softc *);
127 static inline void octeon_eth_send_queue_flush_fetch(struct octeon_eth_softc *);
128 static inline void octeon_eth_send_queue_flush(struct octeon_eth_softc *);
129 static inline void octeon_eth_send_queue_flush_sync(struct octeon_eth_softc *);
130 static inline int octeon_eth_send_queue_is_full(struct octeon_eth_softc *);
131 static inline void octeon_eth_send_queue_add(struct octeon_eth_softc *,
132 struct mbuf *, uint64_t *);
133 static inline void octeon_eth_send_queue_del(struct octeon_eth_softc *,
134 struct mbuf **, uint64_t **);
135 static inline int octeon_eth_buf_free_work(struct octeon_eth_softc *,
136 uint64_t *, uint64_t);
137 static inline void octeon_eth_buf_ext_free_m(struct mbuf *, void *, size_t,
138 void *);
139 static inline void octeon_eth_buf_ext_free_ext(struct mbuf *, void *, size_t,
140 void *);
141
142 static int octeon_eth_ioctl(struct ifnet *, u_long, void *);
143 static void octeon_eth_watchdog(struct ifnet *);
144 static int octeon_eth_init(struct ifnet *);
145 static void octeon_eth_stop(struct ifnet *, int);
146 static void octeon_eth_start(struct ifnet *);
147
148 static inline int octeon_eth_send_cmd(struct octeon_eth_softc *, uint64_t,
149 uint64_t, int *);
150 static inline uint64_t octeon_eth_send_makecmd_w1(int, paddr_t);
151 static inline uint64_t octeon_eth_send_makecmd_w0(uint64_t, uint64_t, size_t,
152 int);
153 static inline int octeon_eth_send_makecmd_gbuf(struct octeon_eth_softc *,
154 struct mbuf *, uint64_t *, int *);
155 static inline int octeon_eth_send_makecmd(struct octeon_eth_softc *,
156 struct mbuf *, uint64_t *, uint64_t *, uint64_t *);
157 static inline int octeon_eth_send_buf(struct octeon_eth_softc *,
158 struct mbuf *, uint64_t *, int *);
159 static inline int octeon_eth_send(struct octeon_eth_softc *,
160 struct mbuf *, int *);
161
162 static int octeon_eth_reset(struct octeon_eth_softc *);
163 static int octeon_eth_configure(struct octeon_eth_softc *);
164 static int octeon_eth_configure_common(struct octeon_eth_softc *);
165
166 static void octeon_eth_tick_free(void *);
167 static void octeon_eth_tick_misc(void *);
168
169 static inline int octeon_eth_recv_mbuf(struct octeon_eth_softc *,
170 uint64_t *, struct mbuf **);
171 static inline int octeon_eth_recv_check_code(struct octeon_eth_softc *,
172 uint64_t);
173 static inline int octeon_eth_recv_check_jumbo(struct octeon_eth_softc *,
174 uint64_t);
175 static inline int octeon_eth_recv_check_link(struct octeon_eth_softc *,
176 uint64_t);
177 static inline int octeon_eth_recv_check(struct octeon_eth_softc *,
178 uint64_t);
179 static inline int octeon_eth_recv(struct octeon_eth_softc *, uint64_t *);
180 static void octeon_eth_recv_redir(struct ifnet *, struct mbuf *);
181 static inline void octeon_eth_recv_intr(void *, uint64_t *);
182
183 /* Device driver context */
184 static struct octeon_eth_softc *octeon_eth_gsc[GMX_PORT_NUNITS];
185 static void *octeon_eth_pow_recv_ih;
186
187 /* sysctl'able parameters */
188 int octeon_eth_param_pko_cmd_w0_n2 = 1;
189 int octeon_eth_param_pip_dyn_rs = 1;
190 int octeon_eth_param_redir = 0;
191 int octeon_eth_param_pktbuf = 0;
192 int octeon_eth_param_rate = 0;
193 int octeon_eth_param_intr = 0;
194
195 CFATTACH_DECL_NEW(cnmac, sizeof(struct octeon_eth_softc),
196 octeon_eth_match, octeon_eth_attach, NULL, NULL);
197
198 #ifdef OCTEON_ETH_DEBUG
199
200 static const struct octeon_evcnt_entry octeon_evcnt_entries[] = {
201 #define _ENTRY(name, type, parent, descr) \
202 OCTEON_EVCNT_ENTRY(struct octeon_eth_softc, name, type, parent, descr)
203 _ENTRY(rx, MISC, NULL, "rx"),
204 _ENTRY(rxint, INTR, NULL, "rx intr"),
205 _ENTRY(rxrs, MISC, NULL, "rx dynamic short"),
206 _ENTRY(rxbufpkalloc, MISC, NULL, "rx buf pkt alloc"),
207 _ENTRY(rxbufpkput, MISC, NULL, "rx buf pkt put"),
208 _ENTRY(rxbufwqalloc, MISC, NULL, "rx buf wqe alloc"),
209 _ENTRY(rxbufwqput, MISC, NULL, "rx buf wqe put"),
210 _ENTRY(rxerrcode, MISC, NULL, "rx code error"),
211 _ENTRY(rxerrfix, MISC, NULL, "rx fixup error"),
212 _ENTRY(rxerrjmb, MISC, NULL, "rx jmb error"),
213 _ENTRY(rxerrlink, MISC, NULL, "rx link error"),
214 _ENTRY(rxerroff, MISC, NULL, "rx offload error"),
215 _ENTRY(rxonperrshort, MISC, NULL, "rx onp fixup short error"),
216 _ENTRY(rxonperrpreamble, MISC, NULL, "rx onp fixup preamble error"),
217 _ENTRY(rxonperrcrc, MISC, NULL, "rx onp fixup crc error"),
218 _ENTRY(rxonperraddress, MISC, NULL, "rx onp fixup address error"),
219 _ENTRY(rxonponp, MISC, NULL, "rx onp fixup onp packets"),
220 _ENTRY(rxonpok, MISC, NULL, "rx onp fixup success packets"),
221 _ENTRY(tx, MISC, NULL, "tx"),
222 _ENTRY(txadd, MISC, NULL, "tx add"),
223 _ENTRY(txbufcballoc, MISC, NULL, "tx buf cb alloc"),
224 _ENTRY(txbufcbget, MISC, NULL, "tx buf cb get"),
225 _ENTRY(txbufgballoc, MISC, NULL, "tx buf gb alloc"),
226 _ENTRY(txbufgbget, MISC, NULL, "tx buf gb get"),
227 _ENTRY(txbufgbput, MISC, NULL, "tx buf gb put"),
228 _ENTRY(txdel, MISC, NULL, "tx del"),
229 _ENTRY(txerr, MISC, NULL, "tx error"),
230 _ENTRY(txerrcmd, MISC, NULL, "tx cmd error"),
231 _ENTRY(txerrgbuf, MISC, NULL, "tx gbuf error"),
232 _ENTRY(txerrlink, MISC, NULL, "tx link error"),
233 _ENTRY(txerrmkcmd, MISC, NULL, "tx makecmd error"),
234 #undef _ENTRY
235 };
236 #endif
237
238 /* ---- buffer management */
239
240 static const struct octeon_eth_pool_param {
241 int poolno;
242 size_t size;
243 size_t nelems;
244 } octeon_eth_pool_params[] = {
245 #define _ENTRY(x) { OCTEON_POOL_NO_##x, OCTEON_POOL_SIZE_##x, OCTEON_POOL_NELEMS_##x }
246 _ENTRY(PKT),
247 _ENTRY(WQE),
248 _ENTRY(CMD),
249 _ENTRY(SG)
250 #undef _ENTRY
251 };
252 struct octeon_fpa_buf *octeon_eth_pools[8/* XXX */];
253 #define octeon_eth_fb_pkt octeon_eth_pools[OCTEON_POOL_NO_PKT]
254 #define octeon_eth_fb_wqe octeon_eth_pools[OCTEON_POOL_NO_WQE]
255 #define octeon_eth_fb_cmd octeon_eth_pools[OCTEON_POOL_NO_CMD]
256 #define octeon_eth_fb_sg octeon_eth_pools[OCTEON_POOL_NO_SG]
257
258 static void
259 octeon_eth_buf_init(struct octeon_eth_softc *sc)
260 {
261 static int once;
262 int i;
263 const struct octeon_eth_pool_param *pp;
264 struct octeon_fpa_buf *fb;
265
266 if (once == 1)
267 return;
268 once = 1;
269
270 for (i = 0; i < (int)__arraycount(octeon_eth_pool_params); i++) {
271 pp = &octeon_eth_pool_params[i];
272 octeon_fpa_buf_init(pp->poolno, pp->size, pp->nelems, &fb);
273 octeon_eth_pools[i] = fb;
274 }
275 }
276
277 /* ---- autoconf */
278
279 static int
280 octeon_eth_match(device_t parent, struct cfdata *match, void *aux)
281 {
282 struct octeon_gmx_attach_args *ga = aux;
283
284 if (strcmp(match->cf_name, ga->ga_name) != 0) {
285 return 0;
286 }
287 return 1;
288 }
289
290 static void
291 octeon_eth_attach(device_t parent, device_t self, void *aux)
292 {
293 struct octeon_eth_softc *sc = device_private(self);
294 struct octeon_gmx_attach_args *ga = aux;
295 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
296 uint8_t enaddr[ETHER_ADDR_LEN];
297
298 sc->sc_dev = self;
299 sc->sc_regt = ga->ga_regt;
300 sc->sc_port = ga->ga_portno;
301 sc->sc_port_type = ga->ga_port_type;
302 sc->sc_gmx = ga->ga_gmx;
303 sc->sc_gmx_port = ga->ga_gmx_port;
304
305 sc->sc_init_flag = 0;
306 /*
307 * XXXUEBAYASI
308 * Setting PIP_IP_OFFSET[OFFSET] to 8 causes panic ... why???
309 */
310 sc->sc_ip_offset = 0/* XXX */;
311
312 if (MIPS_PRID_IMPL(mips_options.mips_cpu_id) <= MIPS_CN30XX) {
313 SET(sc->sc_quirks, OCTEON_ETH_QUIRKS_NO_PRE_ALIGN);
314 SET(sc->sc_quirks, OCTEON_ETH_QUIRKS_NO_RX_INBND);
315 }
316
317 octeon_eth_board_mac_addr(enaddr, sizeof(enaddr), sc);
318 printf("%s: Ethernet address %s\n", device_xname(sc->sc_dev),
319 ether_sprintf(enaddr));
320
321 octeon_eth_gsc[sc->sc_port] = sc;
322
323 SIMPLEQ_INIT(&sc->sc_sendq);
324 sc->sc_soft_req_thresh = 15/* XXX */;
325 sc->sc_ext_callback_cnt = 0;
326
327 octeon_gmx_stats_init(sc->sc_gmx_port);
328
329 callout_init(&sc->sc_tick_misc_ch, 0);
330 callout_init(&sc->sc_tick_free_ch, 0);
331
332 octeon_fau_op_init(&sc->sc_fau_done,
333 OCTEON_CVMSEG_ETHER_OFFSET(sc->sc_port, csm_ether_fau_done),
334 OCT_FAU_REG_ADDR_END - (8 * (sc->sc_port + 1))/* XXX */);
335 octeon_fau_op_set_8(&sc->sc_fau_done, 0);
336
337 octeon_eth_pip_init(sc);
338 octeon_eth_ipd_init(sc);
339 octeon_eth_pko_init(sc);
340 octeon_eth_asx_init(sc);
341 octeon_eth_smi_init(sc);
342
343 sc->sc_gmx_port->sc_ipd = sc->sc_ipd;
344 sc->sc_gmx_port->sc_port_asx = sc->sc_asx;
345 sc->sc_gmx_port->sc_port_mii = &sc->sc_mii;
346 sc->sc_gmx_port->sc_port_ec = &sc->sc_ethercom;
347 /* XXX */
348 sc->sc_gmx_port->sc_quirks = sc->sc_quirks;
349
350 /* XXX */
351 sc->sc_pow = &octeon_pow_softc;
352
353 octeon_eth_mediainit(sc);
354
355 strncpy(ifp->if_xname, device_xname(sc->sc_dev), sizeof(ifp->if_xname));
356 ifp->if_softc = sc;
357 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
358 ifp->if_ioctl = octeon_eth_ioctl;
359 ifp->if_start = octeon_eth_start;
360 ifp->if_watchdog = octeon_eth_watchdog;
361 ifp->if_init = octeon_eth_init;
362 ifp->if_stop = octeon_eth_stop;
363 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(GATHER_QUEUE_SIZE, IFQ_MAXLEN));
364 IFQ_SET_READY(&ifp->if_snd);
365
366 /* XXX: not yet tx checksum */
367 ifp->if_capabilities =
368 IFCAP_CSUM_IPv4_Rx | IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
369 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
370
371 /* 802.1Q VLAN-sized frames are supported */
372 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
373
374 octeon_gmx_set_mac_addr(sc->sc_gmx_port, enaddr);
375 octeon_gmx_set_filter(sc->sc_gmx_port);
376
377 if_attach(ifp);
378 ether_ifattach(ifp, enaddr);
379
380 /* XXX */
381 sc->sc_rate_recv_check_link_cap.tv_sec = 1;
382 sc->sc_rate_recv_check_jumbo_cap.tv_sec = 1;
383 sc->sc_rate_recv_check_code_cap.tv_sec = 1;
384 sc->sc_rate_recv_fixup_odd_nibble_short_cap.tv_sec = 1;
385 sc->sc_rate_recv_fixup_odd_nibble_preamble_cap.tv_sec = 1;
386 sc->sc_rate_recv_fixup_odd_nibble_crc_cap.tv_sec = 1;
387 #ifdef OCTEON_ETH_DEBUG
388 sc->sc_rate_recv_fixup_odd_nibble_addr_cap.tv_sec = 1;
389 #endif
390 /* XXX */
391
392 #if 1
393 octeon_eth_buf_init(sc);
394 #endif
395
396 if (octeon_eth_pow_recv_ih == NULL)
397 octeon_eth_pow_recv_ih
398 = octeon_pow_intr_establish(OCTEON_POW_GROUP_PIP,
399 IPL_NET, octeon_eth_recv_intr, NULL, NULL);
400
401 OCTEON_EVCNT_ATTACH_EVCNTS(sc, octeon_evcnt_entries,
402 device_xname(sc->sc_dev));
403 }
404
405 /* ---- submodules */
406
407 /* XXX */
408 static void
409 octeon_eth_pip_init(struct octeon_eth_softc *sc)
410 {
411 struct octeon_pip_attach_args pip_aa;
412
413 pip_aa.aa_port = sc->sc_port;
414 pip_aa.aa_regt = sc->sc_regt;
415 pip_aa.aa_tag_type = POW_TAG_TYPE_ORDERED/* XXX */;
416 pip_aa.aa_receive_group = OCTEON_POW_GROUP_PIP;
417 pip_aa.aa_ip_offset = sc->sc_ip_offset;
418 octeon_pip_init(&pip_aa, &sc->sc_pip);
419 }
420
421 /* XXX */
422 static void
423 octeon_eth_ipd_init(struct octeon_eth_softc *sc)
424 {
425 struct octeon_ipd_attach_args ipd_aa;
426
427 ipd_aa.aa_port = sc->sc_port;
428 ipd_aa.aa_regt = sc->sc_regt;
429 ipd_aa.aa_first_mbuff_skip = 184/* XXX */;
430 ipd_aa.aa_not_first_mbuff_skip = 0/* XXX */;
431 octeon_ipd_init(&ipd_aa, &sc->sc_ipd);
432 }
433
434 /* XXX */
435 static void
436 octeon_eth_pko_init(struct octeon_eth_softc *sc)
437 {
438 struct octeon_pko_attach_args pko_aa;
439
440 pko_aa.aa_port = sc->sc_port;
441 pko_aa.aa_regt = sc->sc_regt;
442 pko_aa.aa_cmdptr = &sc->sc_cmdptr;
443 pko_aa.aa_cmd_buf_pool = OCTEON_POOL_NO_CMD;
444 pko_aa.aa_cmd_buf_size = OCTEON_POOL_NWORDS_CMD;
445 octeon_pko_init(&pko_aa, &sc->sc_pko);
446 }
447
448 /* XXX */
449 static void
450 octeon_eth_asx_init(struct octeon_eth_softc *sc)
451 {
452 struct octeon_asx_attach_args asx_aa;
453
454 asx_aa.aa_port = sc->sc_port;
455 asx_aa.aa_regt = sc->sc_regt;
456 octeon_asx_init(&asx_aa, &sc->sc_asx);
457 }
458
459 static void
460 octeon_eth_smi_init(struct octeon_eth_softc *sc)
461 {
462 struct octeon_smi_attach_args smi_aa;
463
464 smi_aa.aa_port = sc->sc_port;
465 smi_aa.aa_regt = sc->sc_regt;
466 octeon_smi_init(&smi_aa, &sc->sc_smi);
467 octeon_smi_set_clock(sc->sc_smi, 0x1464ULL); /* XXX */
468 }
469
470 /* ---- XXX */
471
472 #define ADDR2UINT64(u, a) \
473 do { \
474 u = \
475 (((uint64_t)a[0] << 40) | ((uint64_t)a[1] << 32) | \
476 ((uint64_t)a[2] << 24) | ((uint64_t)a[3] << 16) | \
477 ((uint64_t)a[4] << 8) | ((uint64_t)a[5] << 0)); \
478 } while (0)
479 #define UINT642ADDR(a, u) \
480 do { \
481 a[0] = (uint8_t)((u) >> 40); a[1] = (uint8_t)((u) >> 32); \
482 a[2] = (uint8_t)((u) >> 24); a[3] = (uint8_t)((u) >> 16); \
483 a[4] = (uint8_t)((u) >> 8); a[5] = (uint8_t)((u) >> 0); \
484 } while (0)
485
486 static void
487 octeon_eth_board_mac_addr(uint8_t *enaddr, size_t size,
488 struct octeon_eth_softc *sc)
489 {
490 prop_dictionary_t dict;
491 prop_data_t ea;
492
493 dict = device_properties(sc->sc_dev);
494 KASSERT(dict != NULL);
495 ea = prop_dictionary_get(dict, "mac-address");
496 KASSERT(ea != NULL);
497 memcpy(enaddr, prop_data_data_nocopy(ea), size);
498 }
499
500 /* ---- media */
501
502 static int
503 octeon_eth_mii_readreg(device_t self, int phy_addr, int reg, uint16_t *val)
504 {
505 struct octeon_eth_softc *sc = device_private(self);
506
507 return octeon_smi_read(sc->sc_smi, phy_addr, reg, val);
508 }
509
510 static int
511 octeon_eth_mii_writereg(device_t self, int phy_addr, int reg, uint16_t val)
512 {
513 struct octeon_eth_softc *sc = device_private(self);
514
515 return octeon_smi_write(sc->sc_smi, phy_addr, reg, val);
516 }
517
518 static void
519 octeon_eth_mii_statchg(struct ifnet *ifp)
520 {
521 struct octeon_eth_softc *sc = ifp->if_softc;
522
523 octeon_pko_port_enable(sc->sc_pko, 0);
524 octeon_gmx_port_enable(sc->sc_gmx_port, 0);
525
526 octeon_eth_reset(sc);
527
528 if (ISSET(ifp->if_flags, IFF_RUNNING))
529 octeon_gmx_set_filter(sc->sc_gmx_port);
530
531 octeon_pko_port_enable(sc->sc_pko, 1);
532 octeon_gmx_port_enable(sc->sc_gmx_port, 1);
533 }
534
535 static int
536 octeon_eth_mediainit(struct octeon_eth_softc *sc)
537 {
538 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
539 prop_object_t phy;
540
541 sc->sc_mii.mii_ifp = ifp;
542 sc->sc_mii.mii_readreg = octeon_eth_mii_readreg;
543 sc->sc_mii.mii_writereg = octeon_eth_mii_writereg;
544 sc->sc_mii.mii_statchg = octeon_eth_mii_statchg;
545 ifmedia_init(&sc->sc_mii.mii_media, 0, octeon_eth_mediachange,
546 octeon_eth_mediastatus);
547
548 phy = prop_dictionary_get(device_properties(sc->sc_dev), "phy-addr");
549 KASSERT(phy != NULL);
550
551 mii_attach(sc->sc_dev, &sc->sc_mii,
552 0xffffffff, prop_number_integer_value(phy),
553 MII_OFFSET_ANY, MIIF_DOPAUSE);
554
555 /* XXX XXX XXX */
556 if (LIST_FIRST(&sc->sc_mii.mii_phys) != NULL) {
557 /* XXX XXX XXX */
558 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
559 /* XXX XXX XXX */
560 } else {
561 /* XXX XXX XXX */
562 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_NONE,
563 MII_MEDIA_NONE, NULL);
564 /* XXX XXX XXX */
565 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_NONE);
566 /* XXX XXX XXX */
567 }
568 /* XXX XXX XXX */
569
570 return 0;
571 }
572
573 static void
574 octeon_eth_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
575 {
576 struct octeon_eth_softc *sc = ifp->if_softc;
577
578 mii_pollstat(&sc->sc_mii);
579
580 ifmr->ifm_status = sc->sc_mii.mii_media_status;
581 ifmr->ifm_active = sc->sc_mii.mii_media_active;
582 ifmr->ifm_active = (sc->sc_mii.mii_media_active & ~IFM_ETH_FMASK) |
583 sc->sc_gmx_port->sc_port_flowflags;
584 }
585
586 static int
587 octeon_eth_mediachange(struct ifnet *ifp)
588 {
589 struct octeon_eth_softc *sc = ifp->if_softc;
590
591 mii_mediachg(&sc->sc_mii);
592
593 return 0;
594 }
595
596 /* ---- send buffer garbage collection */
597
598 static inline void
599 octeon_eth_send_queue_flush_prefetch(struct octeon_eth_softc *sc)
600 {
601 OCTEON_ETH_KASSERT(sc->sc_prefetch == 0);
602 octeon_fau_op_inc_fetch_8(&sc->sc_fau_done, 0);
603 sc->sc_prefetch = 1;
604 }
605
606 static inline void
607 octeon_eth_send_queue_flush_fetch(struct octeon_eth_softc *sc)
608 {
609 #ifndef OCTEON_ETH_DEBUG
610 if (!sc->sc_prefetch)
611 return;
612 #endif
613 OCTEON_ETH_KASSERT(sc->sc_prefetch == 1);
614 sc->sc_hard_done_cnt = octeon_fau_op_inc_read_8(&sc->sc_fau_done);
615 OCTEON_ETH_KASSERT(sc->sc_hard_done_cnt <= 0);
616 sc->sc_prefetch = 0;
617 }
618
619 static inline void
620 octeon_eth_send_queue_flush(struct octeon_eth_softc *sc)
621 {
622 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
623 const int64_t sent_count = sc->sc_hard_done_cnt;
624 int i;
625
626 OCTEON_ETH_KASSERT(sc->sc_flush == 0);
627 OCTEON_ETH_KASSERT(sent_count <= 0);
628
629 for (i = 0; i < 0 - sent_count; i++) {
630 struct mbuf *m;
631 uint64_t *gbuf;
632
633 octeon_eth_send_queue_del(sc, &m, &gbuf);
634
635 octeon_fpa_buf_put(octeon_eth_fb_sg, gbuf);
636 OCTEON_EVCNT_INC(sc, txbufgbput);
637
638 m_freem(m);
639
640 CLR(ifp->if_flags, IFF_OACTIVE);
641 }
642
643 octeon_fau_op_inc_fetch_8(&sc->sc_fau_done, i);
644 sc->sc_flush = i;
645 }
646
647 static inline void
648 octeon_eth_send_queue_flush_sync(struct octeon_eth_softc *sc)
649 {
650 if (sc->sc_flush == 0)
651 return;
652
653 OCTEON_ETH_KASSERT(sc->sc_flush > 0);
654
655 /* XXX XXX XXX */
656 octeon_fau_op_inc_read_8(&sc->sc_fau_done);
657 sc->sc_soft_req_cnt -= sc->sc_flush;
658 OCTEON_ETH_KASSERT(sc->sc_soft_req_cnt >= 0);
659 /* XXX XXX XXX */
660
661 sc->sc_flush = 0;
662 }
663
664 static inline int
665 octeon_eth_send_queue_is_full(struct octeon_eth_softc *sc)
666 {
667 #ifdef OCTEON_ETH_SEND_QUEUE_CHECK
668 int64_t nofree_cnt;
669
670 nofree_cnt = sc->sc_soft_req_cnt + sc->sc_hard_done_cnt;
671
672 if (__predict_false(nofree_cnt == GATHER_QUEUE_SIZE - 1)) {
673 octeon_eth_send_queue_flush(sc);
674 OCTEON_EVCNT_INC(sc, txerrgbuf);
675 octeon_eth_send_queue_flush_sync(sc);
676 return 1;
677 }
678
679 #endif
680 return 0;
681 }
682
683 /*
684 * (Ab)use m_nextpkt and m_paddr to maintain mbuf chain and pointer to gather
685 * buffer. Other mbuf members may be used by m_freem(), so don't touch them!
686 */
687
688 struct _send_queue_entry {
689 union {
690 struct mbuf _sqe_s_mbuf;
691 struct {
692 char _sqe_s_entry_pad[offsetof(struct mbuf, m_nextpkt)];
693 SIMPLEQ_ENTRY(_send_queue_entry) _sqe_s_entry_entry;
694 } _sqe_s_entry;
695 struct {
696 char _sqe_s_gbuf_pad[offsetof(struct mbuf, m_paddr)];
697 uint64_t *_sqe_s_gbuf_gbuf;
698 } _sqe_s_gbuf;
699 } _sqe_u;
700 #define _sqe_entry _sqe_u._sqe_s_entry._sqe_s_entry_entry
701 #define _sqe_gbuf _sqe_u._sqe_s_gbuf._sqe_s_gbuf_gbuf
702 };
703
704 static inline void
705 octeon_eth_send_queue_add(struct octeon_eth_softc *sc, struct mbuf *m,
706 uint64_t *gbuf)
707 {
708 struct _send_queue_entry *sqe = (struct _send_queue_entry *)m;
709
710 sqe->_sqe_gbuf = gbuf;
711 SIMPLEQ_INSERT_TAIL(&sc->sc_sendq, sqe, _sqe_entry);
712
713 if ((m->m_flags & M_EXT) && m->m_ext.ext_free != NULL)
714 sc->sc_ext_callback_cnt++;
715
716 OCTEON_EVCNT_INC(sc, txadd);
717 }
718
719 static inline void
720 octeon_eth_send_queue_del(struct octeon_eth_softc *sc, struct mbuf **rm,
721 uint64_t **rgbuf)
722 {
723 struct _send_queue_entry *sqe;
724
725 sqe = SIMPLEQ_FIRST(&sc->sc_sendq);
726 OCTEON_ETH_KASSERT(sqe != NULL);
727 SIMPLEQ_REMOVE_HEAD(&sc->sc_sendq, _sqe_entry);
728
729 *rm = (void *)sqe;
730 *rgbuf = sqe->_sqe_gbuf;
731
732 if (((*rm)->m_flags & M_EXT) && (*rm)->m_ext.ext_free != NULL) {
733 sc->sc_ext_callback_cnt--;
734 OCTEON_ETH_KASSERT(sc->sc_ext_callback_cnt >= 0);
735 }
736
737 OCTEON_EVCNT_INC(sc, txdel);
738 }
739
740 static inline int
741 octeon_eth_buf_free_work(struct octeon_eth_softc *sc, uint64_t *work,
742 uint64_t word2)
743 {
744 /* XXX when jumbo frame */
745 if (ISSET(word2, PIP_WQE_WORD2_IP_BUFS)) {
746 paddr_t addr;
747 paddr_t start_buffer;
748
749 addr = work[3] & PIP_WQE_WORD3_ADDR;
750 start_buffer = addr & ~(2048 - 1);
751
752 octeon_fpa_buf_put_paddr(octeon_eth_fb_pkt, start_buffer);
753 OCTEON_EVCNT_INC(sc, rxbufpkput);
754 }
755
756 octeon_fpa_buf_put(octeon_eth_fb_wqe, work);
757 OCTEON_EVCNT_INC(sc, rxbufwqput);
758
759 return 0;
760 }
761
762 static inline void
763 octeon_eth_buf_ext_free_m(struct mbuf *m, void *buf, size_t size, void *arg)
764 {
765 uint64_t *work = (void *)arg;
766 #ifdef OCTEON_ETH_DEBUG
767 struct octeon_eth_softc *sc = (void *)(uintptr_t)work[0];
768 #endif
769 int s = splnet();
770
771 OCTEON_EVCNT_INC(sc, rxrs);
772
773 octeon_fpa_buf_put(octeon_eth_fb_wqe, work);
774 OCTEON_EVCNT_INC(sc, rxbufwqput);
775
776 OCTEON_ETH_KASSERT(m != NULL);
777
778 pool_cache_put(mb_cache, m);
779
780 splx(s);
781 }
782
783 static inline void
784 octeon_eth_buf_ext_free_ext(struct mbuf *m, void *buf, size_t size, void *arg)
785 {
786 uint64_t *work = (void *)arg;
787 #ifdef OCTEON_ETH_DEBUG
788 struct octeon_eth_softc *sc = (void *)(uintptr_t)work[0];
789 #endif
790 int s = splnet();
791
792 octeon_fpa_buf_put(octeon_eth_fb_wqe, work);
793 OCTEON_EVCNT_INC(sc, rxbufwqput);
794
795 octeon_fpa_buf_put(octeon_eth_fb_pkt, buf);
796 OCTEON_EVCNT_INC(sc, rxbufpkput);
797
798 OCTEON_ETH_KASSERT(m != NULL);
799
800 pool_cache_put(mb_cache, m);
801
802 splx(s);
803 }
804
805 /* ---- ifnet interfaces */
806
807 static int
808 octeon_eth_ioctl(struct ifnet *ifp, u_long cmd, void *data)
809 {
810 struct octeon_eth_softc *sc = ifp->if_softc;
811 struct ifreq *ifr = (struct ifreq *)data;
812 int s, error;
813
814 s = splnet();
815 switch (cmd) {
816 case SIOCSIFMEDIA:
817 /* Flow control requires full-duplex mode. */
818 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
819 (ifr->ifr_media & IFM_FDX) == 0) {
820 ifr->ifr_media &= ~IFM_ETH_FMASK;
821 }
822 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
823 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
824 ifr->ifr_media |=
825 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
826 }
827 sc->sc_gmx_port->sc_port_flowflags =
828 ifr->ifr_media & IFM_ETH_FMASK;
829 }
830 /* FALLTHROUGH */
831 case SIOCGIFMEDIA:
832 /* XXX: Flow contorol */
833 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
834 break;
835 default:
836 error = ether_ioctl(ifp, cmd, data);
837 if (error == ENETRESET) {
838 /*
839 * Multicast list has changed; set the hardware filter
840 * accordingly.
841 */
842 if (ISSET(ifp->if_flags, IFF_RUNNING))
843 octeon_gmx_set_filter(sc->sc_gmx_port);
844 error = 0;
845 }
846 break;
847 }
848 octeon_eth_start(ifp);
849 splx(s);
850
851 return error;
852 }
853
854 /* ---- send (output) */
855
856 static inline uint64_t
857 octeon_eth_send_makecmd_w0(uint64_t fau0, uint64_t fau1, size_t len, int segs)
858 {
859 return octeon_pko_cmd_word0(
860 OCT_FAU_OP_SIZE_64, /* sz1 */
861 OCT_FAU_OP_SIZE_64, /* sz0 */
862 1, fau1, 1, fau0, /* s1, reg1, s0, reg0 */
863 0, /* le */
864 octeon_eth_param_pko_cmd_w0_n2, /* n2 */
865 1, 0, /* q, r */
866 (segs == 1) ? 0 : 1, /* g */
867 0, 0, 1, /* ipoffp1, ii, df */
868 segs, (int)len); /* segs, totalbytes */
869 }
870
871 static inline uint64_t
872 octeon_eth_send_makecmd_w1(int size, paddr_t addr)
873 {
874 return octeon_pko_cmd_word1(
875 0, 0, /* i, back */
876 FPA_GATHER_BUFFER_POOL, /* pool */
877 size, addr); /* size, addr */
878 }
879
880 static inline int
881 octeon_eth_send_makecmd_gbuf(struct octeon_eth_softc *sc, struct mbuf *m0,
882 uint64_t *gbuf, int *rsegs)
883 {
884 struct mbuf *m;
885 int segs = 0;
886 uintptr_t laddr, rlen, nlen;
887
888 for (m = m0; m != NULL; m = m->m_next) {
889
890 if (__predict_false(m->m_len == 0))
891 continue;
892
893 #if 0
894 OCTEON_ETH_KASSERT(((uint32_t)m->m_data & (PAGE_SIZE - 1))
895 == (kvtophys((vaddr_t)m->m_data) & (PAGE_SIZE - 1)));
896 #endif
897
898 /* Aligned 4k */
899 laddr = (uintptr_t)m->m_data & (PAGE_SIZE - 1);
900
901 if (laddr + m->m_len > PAGE_SIZE) {
902 /* XXX XXX XXX */
903 rlen = PAGE_SIZE - laddr;
904 nlen = m->m_len - rlen;
905 *(gbuf + segs) = octeon_eth_send_makecmd_w1(rlen,
906 kvtophys((vaddr_t)m->m_data));
907 segs++;
908 if (segs > 63) {
909 return 1;
910 }
911 /* XXX XXX XXX */
912 } else {
913 rlen = 0;
914 nlen = m->m_len;
915 }
916
917 *(gbuf + segs) = octeon_eth_send_makecmd_w1(nlen,
918 kvtophys((vaddr_t)(m->m_data + rlen)));
919 segs++;
920 if (segs > 63) {
921 return 1;
922 }
923 }
924
925 OCTEON_ETH_KASSERT(m == NULL);
926
927 *rsegs = segs;
928
929 return 0;
930 }
931
932 static inline int
933 octeon_eth_send_makecmd(struct octeon_eth_softc *sc, struct mbuf *m,
934 uint64_t *gbuf, uint64_t *rpko_cmd_w0, uint64_t *rpko_cmd_w1)
935 {
936 uint64_t pko_cmd_w0, pko_cmd_w1;
937 int segs;
938 int result = 0;
939
940 if (octeon_eth_send_makecmd_gbuf(sc, m, gbuf, &segs)) {
941 log(LOG_WARNING, "%s: there are a lot of number of segments"
942 " of transmission data", device_xname(sc->sc_dev));
943 result = 1;
944 goto done;
945 }
946
947 /*
948 * segs == 1 -> link mode (single continuous buffer)
949 * WORD1[size] is number of bytes pointed by segment
950 *
951 * segs > 1 -> gather mode (scatter-gather buffer)
952 * WORD1[size] is number of segments
953 */
954 pko_cmd_w0 = octeon_eth_send_makecmd_w0(sc->sc_fau_done.fd_regno,
955 0, m->m_pkthdr.len, segs);
956 if (segs == 1) {
957 pko_cmd_w1 = octeon_eth_send_makecmd_w1(
958 m->m_pkthdr.len, kvtophys((vaddr_t)m->m_data));
959 } else {
960 #ifdef __mips_n32
961 KASSERT(MIPS_KSEG0_P(gbuf));
962 pko_cmd_w1 = octeon_eth_send_makecmd_w1(segs,
963 MIPS_KSEG0_TO_PHYS(gbuf));
964 #else
965 pko_cmd_w1 = octeon_eth_send_makecmd_w1(segs,
966 MIPS_XKPHYS_TO_PHYS(gbuf));
967 #endif
968 }
969
970 *rpko_cmd_w0 = pko_cmd_w0;
971 *rpko_cmd_w1 = pko_cmd_w1;
972
973 done:
974 return result;
975 }
976
977 static inline int
978 octeon_eth_send_cmd(struct octeon_eth_softc *sc, uint64_t pko_cmd_w0,
979 uint64_t pko_cmd_w1, int *pwdc)
980 {
981 uint64_t *cmdptr;
982 int result = 0;
983
984 #ifdef __mips_n32
985 KASSERT((sc->sc_cmdptr.cmdptr & ~MIPS_PHYS_MASK) == 0);
986 cmdptr = (uint64_t *)MIPS_PHYS_TO_KSEG0(sc->sc_cmdptr.cmdptr);
987 #else
988 cmdptr = (uint64_t *)MIPS_PHYS_TO_XKPHYS_CACHED(sc->sc_cmdptr.cmdptr);
989 #endif
990 cmdptr += sc->sc_cmdptr.cmdptr_idx;
991
992 OCTEON_ETH_KASSERT(cmdptr != NULL);
993
994 *cmdptr++ = pko_cmd_w0;
995 *cmdptr++ = pko_cmd_w1;
996
997 OCTEON_ETH_KASSERT(sc->sc_cmdptr.cmdptr_idx + 2 <= FPA_COMMAND_BUFFER_POOL_NWORDS - 1);
998
999 if (sc->sc_cmdptr.cmdptr_idx + 2 == FPA_COMMAND_BUFFER_POOL_NWORDS - 1) {
1000 paddr_t buf;
1001
1002 buf = octeon_fpa_buf_get_paddr(octeon_eth_fb_cmd);
1003 if (buf == 0) {
1004 log(LOG_WARNING,
1005 "%s: can not allocate command buffer from free pool allocator\n",
1006 device_xname(sc->sc_dev));
1007 result = 1;
1008 goto done;
1009 }
1010 OCTEON_EVCNT_INC(sc, txbufcbget);
1011 *cmdptr++ = buf;
1012 sc->sc_cmdptr.cmdptr = (uint64_t)buf;
1013 sc->sc_cmdptr.cmdptr_idx = 0;
1014 } else {
1015 sc->sc_cmdptr.cmdptr_idx += 2;
1016 }
1017
1018 *pwdc += 2;
1019
1020 done:
1021 return result;
1022 }
1023
1024 static inline int
1025 octeon_eth_send_buf(struct octeon_eth_softc *sc, struct mbuf *m,
1026 uint64_t *gbuf, int *pwdc)
1027 {
1028 int result = 0, error;
1029 uint64_t pko_cmd_w0, pko_cmd_w1;
1030
1031 error = octeon_eth_send_makecmd(sc, m, gbuf, &pko_cmd_w0, &pko_cmd_w1);
1032 if (error != 0) {
1033 /* Already logging */
1034 OCTEON_EVCNT_INC(sc, txerrmkcmd);
1035 result = error;
1036 goto done;
1037 }
1038
1039 error = octeon_eth_send_cmd(sc, pko_cmd_w0, pko_cmd_w1, pwdc);
1040 if (error != 0) {
1041 /* Already logging */
1042 OCTEON_EVCNT_INC(sc, txerrcmd);
1043 result = error;
1044 }
1045
1046 done:
1047 return result;
1048 }
1049
1050 static inline int
1051 octeon_eth_send(struct octeon_eth_softc *sc, struct mbuf *m, int *pwdc)
1052 {
1053 paddr_t gaddr = 0;
1054 uint64_t *gbuf = NULL;
1055 int result = 0, error;
1056
1057 OCTEON_EVCNT_INC(sc, tx);
1058
1059 gaddr = octeon_fpa_buf_get_paddr(octeon_eth_fb_sg);
1060 if (gaddr == 0) {
1061 log(LOG_WARNING, "%s: can not allocate gather buffer from "
1062 "free pool allocator\n", device_xname(sc->sc_dev));
1063 OCTEON_EVCNT_INC(sc, txerrgbuf);
1064 result = 1;
1065 goto done;
1066 }
1067 OCTEON_EVCNT_INC(sc, txbufgbget);
1068
1069 #ifdef __mips_n32
1070 KASSERT((gaddr & ~MIPS_PHYS_MASK) == 0);
1071 gbuf = (uint64_t *)(uintptr_t)MIPS_PHYS_TO_KSEG0(gaddr);
1072 #else
1073 gbuf = (uint64_t *)(uintptr_t)MIPS_PHYS_TO_XKPHYS_CACHED(gaddr);
1074 #endif
1075
1076 OCTEON_ETH_KASSERT(gbuf != NULL);
1077
1078 error = octeon_eth_send_buf(sc, m, gbuf, pwdc);
1079 if (error != 0) {
1080 /* Already logging */
1081 octeon_fpa_buf_put_paddr(octeon_eth_fb_sg, gaddr);
1082 OCTEON_EVCNT_INC(sc, txbufgbput);
1083 result = error;
1084 goto done;
1085 }
1086
1087 octeon_eth_send_queue_add(sc, m, gbuf);
1088
1089 done:
1090 return result;
1091 }
1092
1093 static void
1094 octeon_eth_start(struct ifnet *ifp)
1095 {
1096 struct octeon_eth_softc *sc = ifp->if_softc;
1097 struct mbuf *m;
1098 int wdc = 0;
1099
1100 /*
1101 * Performance tuning
1102 * presend iobdma request
1103 */
1104 octeon_eth_send_queue_flush_prefetch(sc);
1105
1106 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1107 goto last;
1108
1109 /* XXX assume that OCTEON doesn't buffer packets */
1110 if (__predict_false(!octeon_gmx_link_status(sc->sc_gmx_port))) {
1111 /* Dequeue and drop them */
1112 while (1) {
1113 IFQ_DEQUEUE(&ifp->if_snd, m);
1114 if (m == NULL)
1115 break;
1116
1117 m_freem(m);
1118 IF_DROP(&ifp->if_snd);
1119 OCTEON_EVCNT_INC(sc, txerrlink);
1120 }
1121 goto last;
1122 }
1123
1124 for (;;) {
1125 IFQ_POLL(&ifp->if_snd, m);
1126 if (__predict_false(m == NULL))
1127 break;
1128
1129 /* XXX XXX XXX */
1130 octeon_eth_send_queue_flush_fetch(sc);
1131
1132 /*
1133 * If no free send buffer is available, free all the sent
1134 * buffer and bail out.
1135 */
1136 if (octeon_eth_send_queue_is_full(sc)) {
1137 SET(ifp->if_flags, IFF_OACTIVE);
1138 if (wdc > 0)
1139 octeon_pko_op_doorbell_write(sc->sc_port,
1140 sc->sc_port, wdc);
1141 return;
1142 }
1143 /* XXX XXX XXX */
1144
1145 IFQ_DEQUEUE(&ifp->if_snd, m);
1146
1147 bpf_mtap(ifp, m, BPF_D_OUT);
1148
1149 /* XXX XXX XXX */
1150 if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh)
1151 octeon_eth_send_queue_flush(sc);
1152 if (octeon_eth_send(sc, m, &wdc)) {
1153 IF_DROP(&ifp->if_snd);
1154 m_freem(m);
1155 log(LOG_WARNING,
1156 "%s: failed in the transmission of the packet\n",
1157 device_xname(sc->sc_dev));
1158 OCTEON_EVCNT_INC(sc, txerr);
1159 } else
1160 sc->sc_soft_req_cnt++;
1161
1162 if (sc->sc_flush)
1163 octeon_eth_send_queue_flush_sync(sc);
1164 /* XXX XXX XXX */
1165
1166 /* Send next iobdma request */
1167 octeon_eth_send_queue_flush_prefetch(sc);
1168 }
1169
1170 if (wdc > 0)
1171 octeon_pko_op_doorbell_write(sc->sc_port, sc->sc_port, wdc);
1172
1173 /*
1174 * Don't schedule send-buffer-free callout every time - those buffers are freed
1175 * by "free tick". This makes some packets like NFS slower.
1176 */
1177 #ifdef OCTEON_ETH_USENFS
1178 if (__predict_false(sc->sc_ext_callback_cnt > 0)) {
1179 int timo;
1180
1181 /* ??? */
1182 timo = hz - (100 * sc->sc_ext_callback_cnt);
1183 if (timo < 10)
1184 timo = 10;
1185 callout_schedule(&sc->sc_tick_free_ch, timo);
1186 }
1187 #endif
1188
1189 last:
1190 octeon_eth_send_queue_flush_fetch(sc);
1191 }
1192
1193 static void
1194 octeon_eth_watchdog(struct ifnet *ifp)
1195 {
1196 struct octeon_eth_softc *sc = ifp->if_softc;
1197
1198 printf("%s: device timeout\n", device_xname(sc->sc_dev));
1199
1200 octeon_eth_configure(sc);
1201
1202 SET(ifp->if_flags, IFF_RUNNING);
1203 CLR(ifp->if_flags, IFF_OACTIVE);
1204 ifp->if_timer = 0;
1205
1206 octeon_eth_start(ifp);
1207 }
1208
1209 static int
1210 octeon_eth_init(struct ifnet *ifp)
1211 {
1212 struct octeon_eth_softc *sc = ifp->if_softc;
1213
1214 /* XXX don't disable commonly used parts!!! XXX */
1215 if (sc->sc_init_flag == 0) {
1216 /* Cancel any pending I/O. */
1217 octeon_eth_stop(ifp, 0);
1218
1219 /* Initialize the device */
1220 octeon_eth_configure(sc);
1221
1222 octeon_pko_enable(sc->sc_pko);
1223 octeon_ipd_enable(sc->sc_ipd);
1224
1225 sc->sc_init_flag = 1;
1226 } else {
1227 octeon_gmx_port_enable(sc->sc_gmx_port, 1);
1228 }
1229 octeon_eth_mediachange(ifp);
1230
1231 octeon_gmx_set_filter(sc->sc_gmx_port);
1232
1233 callout_reset(&sc->sc_tick_misc_ch, hz, octeon_eth_tick_misc, sc);
1234 callout_reset(&sc->sc_tick_free_ch, hz, octeon_eth_tick_free, sc);
1235
1236 SET(ifp->if_flags, IFF_RUNNING);
1237 CLR(ifp->if_flags, IFF_OACTIVE);
1238
1239 return 0;
1240 }
1241
1242 static void
1243 octeon_eth_stop(struct ifnet *ifp, int disable)
1244 {
1245 struct octeon_eth_softc *sc = ifp->if_softc;
1246
1247 callout_stop(&sc->sc_tick_misc_ch);
1248 callout_stop(&sc->sc_tick_free_ch);
1249
1250 mii_down(&sc->sc_mii);
1251
1252 octeon_gmx_port_enable(sc->sc_gmx_port, 0);
1253
1254 /* Mark the interface as down and cancel the watchdog timer. */
1255 CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE);
1256 ifp->if_timer = 0;
1257 }
1258
1259 /* ---- misc */
1260
1261 #define PKO_INDEX_MASK ((1ULL << 12/* XXX */) - 1)
1262
1263 static int
1264 octeon_eth_reset(struct octeon_eth_softc *sc)
1265 {
1266 octeon_gmx_reset_speed(sc->sc_gmx_port);
1267 octeon_gmx_reset_flowctl(sc->sc_gmx_port);
1268 octeon_gmx_reset_timing(sc->sc_gmx_port);
1269
1270 return 0;
1271 }
1272
1273 static int
1274 octeon_eth_configure(struct octeon_eth_softc *sc)
1275 {
1276 octeon_gmx_port_enable(sc->sc_gmx_port, 0);
1277
1278 octeon_eth_reset(sc);
1279
1280 octeon_eth_configure_common(sc);
1281
1282 octeon_pko_port_config(sc->sc_pko);
1283 octeon_pko_port_enable(sc->sc_pko, 1);
1284 octeon_pip_port_config(sc->sc_pip);
1285
1286 octeon_gmx_tx_stats_rd_clr(sc->sc_gmx_port, 1);
1287 octeon_gmx_rx_stats_rd_clr(sc->sc_gmx_port, 1);
1288
1289 octeon_gmx_port_enable(sc->sc_gmx_port, 1);
1290
1291 return 0;
1292 }
1293
1294 static int
1295 octeon_eth_configure_common(struct octeon_eth_softc *sc)
1296 {
1297 static int once;
1298
1299 if (once == 1)
1300 return 0;
1301 once = 1;
1302
1303 octeon_ipd_config(sc->sc_ipd);
1304 #ifdef OCTEON_ETH_IPD_RED
1305 octeon_ipd_red(sc->sc_ipd, RECV_QUEUE_SIZE >> 2, RECV_QUEUE_SIZE >> 3);
1306 #endif
1307 octeon_pko_config(sc->sc_pko);
1308
1309 octeon_pow_config(sc->sc_pow, OCTEON_POW_GROUP_PIP);
1310
1311 return 0;
1312 }
1313
1314 /* ---- receive (input) */
1315
1316 static inline int
1317 octeon_eth_recv_mbuf(struct octeon_eth_softc *sc, uint64_t *work,
1318 struct mbuf **rm)
1319 {
1320 struct mbuf *m;
1321 void (*ext_free)(struct mbuf *, void *, size_t, void *);
1322 void *ext_buf;
1323 size_t ext_size;
1324 void *data;
1325 uint64_t word1 = work[1];
1326 uint64_t word2 = work[2];
1327 uint64_t word3 = work[3];
1328
1329 MGETHDR(m, M_NOWAIT, MT_DATA);
1330 if (m == NULL)
1331 return 1;
1332 OCTEON_ETH_KASSERT(m != NULL);
1333
1334 if ((word2 & PIP_WQE_WORD2_IP_BUFS) == 0) {
1335 /* Dynamic short */
1336 ext_free = octeon_eth_buf_ext_free_m;
1337 ext_buf = &work[4];
1338 ext_size = 96;
1339
1340 data = &work[4 + sc->sc_ip_offset / sizeof(uint64_t)];
1341 } else {
1342 vaddr_t addr;
1343 vaddr_t start_buffer;
1344
1345 #ifdef __mips_n32
1346 KASSERT((word3 & ~MIPS_PHYS_MASK) == 0);
1347 addr = MIPS_PHYS_TO_KSEG0(word3 & PIP_WQE_WORD3_ADDR);
1348 #else
1349 addr = MIPS_PHYS_TO_XKPHYS_CACHED(word3 & PIP_WQE_WORD3_ADDR);
1350 #endif
1351 start_buffer = addr & ~(2048 - 1);
1352
1353 ext_free = octeon_eth_buf_ext_free_ext;
1354 ext_buf = (void *)start_buffer;
1355 ext_size = 2048;
1356
1357 data = (void *)addr;
1358 }
1359
1360 /* Embed sc pointer into work[0] for _ext_free evcnt */
1361 work[0] = (uintptr_t)sc;
1362
1363 MEXTADD(m, ext_buf, ext_size, 0, ext_free, work);
1364 OCTEON_ETH_KASSERT(ISSET(m->m_flags, M_EXT));
1365
1366 m->m_data = data;
1367 m->m_len = m->m_pkthdr.len = (word1 & PIP_WQE_WORD1_LEN) >> 48;
1368 m_set_rcvif(m, &sc->sc_ethercom.ec_if);
1369
1370 /* Not readonly buffer */
1371 m->m_flags |= M_EXT_RW;
1372
1373 *rm = m;
1374
1375 OCTEON_ETH_KASSERT(*rm != NULL);
1376
1377 return 0;
1378 }
1379
1380 static inline int
1381 octeon_eth_recv_check_code(struct octeon_eth_softc *sc, uint64_t word2)
1382 {
1383 uint64_t opecode = word2 & PIP_WQE_WORD2_NOIP_OPECODE;
1384
1385 if (__predict_true(!ISSET(word2, PIP_WQE_WORD2_NOIP_RE)))
1386 return 0;
1387
1388 /* This error is harmless */
1389 if (opecode == PIP_OVER_ERR)
1390 return 0;
1391
1392 return 1;
1393 }
1394
1395 static inline int
1396 octeon_eth_recv_check_jumbo(struct octeon_eth_softc *sc, uint64_t word2)
1397 {
1398 if (__predict_false((word2 & PIP_WQE_WORD2_IP_BUFS) > (1ULL << 56)))
1399 return 1;
1400 return 0;
1401 }
1402
1403 static inline int
1404 octeon_eth_recv_check_link(struct octeon_eth_softc *sc, uint64_t word2)
1405 {
1406 if (__predict_false(!octeon_gmx_link_status(sc->sc_gmx_port)))
1407 return 1;
1408 return 0;
1409 }
1410
1411 static inline int
1412 octeon_eth_recv_check(struct octeon_eth_softc *sc, uint64_t word2)
1413 {
1414 if (__predict_false(octeon_eth_recv_check_link(sc, word2)) != 0) {
1415 if (ratecheck(&sc->sc_rate_recv_check_link_last,
1416 &sc->sc_rate_recv_check_link_cap))
1417 log(LOG_DEBUG,
1418 "%s: link is not up, the packet was dropped\n",
1419 device_xname(sc->sc_dev));
1420 OCTEON_EVCNT_INC(sc, rxerrlink);
1421 return 1;
1422 }
1423
1424 #if 0 /* XXX Performance tunig (Jumbo-frame is not supported yet!) */
1425 if (__predict_false(octeon_eth_recv_check_jumbo(sc, word2)) != 0) {
1426 /* XXX jumbo frame */
1427 if (ratecheck(&sc->sc_rate_recv_check_jumbo_last,
1428 &sc->sc_rate_recv_check_jumbo_cap))
1429 log(LOG_DEBUG,
1430 "jumbo frame was received\n");
1431 OCTEON_EVCNT_INC(sc, rxerrjmb);
1432 return 1;
1433 }
1434 #endif
1435
1436 if (__predict_false(octeon_eth_recv_check_code(sc, word2)) != 0) {
1437
1438 if ((word2 & PIP_WQE_WORD2_NOIP_OPECODE) ==
1439 PIP_WQE_WORD2_RE_OPCODE_LENGTH) {
1440 /* No logging */
1441 /* XXX inclement special error count */
1442 } else if ((word2 & PIP_WQE_WORD2_NOIP_OPECODE) ==
1443 PIP_WQE_WORD2_RE_OPCODE_PARTIAL) {
1444 /* Not an erorr. it's because of overload */
1445 } else {
1446
1447 if (ratecheck(&sc->sc_rate_recv_check_code_last,
1448 &sc->sc_rate_recv_check_code_cap))
1449 log(LOG_WARNING,
1450 "%s: reception error, packet dropped "
1451 "(error code = %" PRId64 ")\n",
1452 device_xname(sc->sc_dev), word2 & PIP_WQE_WORD2_NOIP_OPECODE);
1453 }
1454 OCTEON_EVCNT_INC(sc, rxerrcode);
1455 return 1;
1456 }
1457
1458 return 0;
1459 }
1460
1461 static inline int
1462 octeon_eth_recv(struct octeon_eth_softc *sc, uint64_t *work)
1463 {
1464 int result = 0;
1465 struct ifnet *ifp;
1466 struct mbuf *m;
1467 uint64_t word2;
1468
1469 /* XXX XXX XXX */
1470 /*
1471 * Performance tuning
1472 * presend iobdma request
1473 */
1474 if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh) {
1475 octeon_eth_send_queue_flush_prefetch(sc);
1476 }
1477 /* XXX XXX XXX */
1478
1479 OCTEON_ETH_KASSERT(sc != NULL);
1480 OCTEON_ETH_KASSERT(work != NULL);
1481
1482 OCTEON_EVCNT_INC(sc, rx);
1483
1484 word2 = work[2];
1485 ifp = &sc->sc_ethercom.ec_if;
1486
1487 OCTEON_ETH_KASSERT(ifp != NULL);
1488
1489 if (__predict_false(octeon_eth_recv_check(sc, word2) != 0)) {
1490 ifp->if_ierrors++;
1491 result = 1;
1492 octeon_eth_buf_free_work(sc, work, word2);
1493 goto drop;
1494 }
1495
1496 if (__predict_false(octeon_eth_recv_mbuf(sc, work, &m) != 0)) {
1497 ifp->if_ierrors++;
1498 result = 1;
1499 octeon_eth_buf_free_work(sc, work, word2);
1500 goto drop;
1501 }
1502
1503 /* work[0] .. work[3] may not be valid any more */
1504
1505 OCTEON_ETH_KASSERT(m != NULL);
1506
1507 octeon_ipd_offload(word2, m->m_data, &m->m_pkthdr.csum_flags);
1508
1509 /* XXX XXX XXX */
1510 if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh) {
1511 octeon_eth_send_queue_flush_fetch(sc);
1512 octeon_eth_send_queue_flush(sc);
1513 }
1514
1515 /* XXX XXX XXX */
1516 if (sc->sc_flush)
1517 octeon_eth_send_queue_flush_sync(sc);
1518 /* XXX XXX XXX */
1519
1520 if_percpuq_enqueue(ifp->if_percpuq, m);
1521
1522 return 0;
1523
1524 drop:
1525 /* XXX XXX XXX */
1526 if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh) {
1527 octeon_eth_send_queue_flush_fetch(sc);
1528 }
1529 /* XXX XXX XXX */
1530
1531 return result;
1532 }
1533
1534 static void
1535 octeon_eth_recv_redir(struct ifnet *ifp, struct mbuf *m)
1536 {
1537 struct octeon_eth_softc *rsc = ifp->if_softc;
1538 struct octeon_eth_softc *sc = NULL;
1539 int i, wdc = 0;
1540
1541 for (i = 0; i < 3 /* XXX */; i++) {
1542 if (rsc->sc_redir & (1 << i))
1543 sc = octeon_eth_gsc[i];
1544 }
1545
1546 if (sc == NULL) {
1547 m_freem(m);
1548 return;
1549 }
1550 octeon_eth_send_queue_flush_prefetch(sc);
1551
1552 octeon_eth_send_queue_flush_fetch(sc);
1553
1554 if (octeon_eth_send_queue_is_full(sc)) {
1555 m_freem(m);
1556 return;
1557 }
1558 if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh)
1559 octeon_eth_send_queue_flush(sc);
1560
1561 if (octeon_eth_send(sc, m, &wdc)) {
1562 IF_DROP(&ifp->if_snd);
1563 m_freem(m);
1564 } else {
1565 octeon_pko_op_doorbell_write(sc->sc_port, sc->sc_port, wdc);
1566 sc->sc_soft_req_cnt++;
1567 }
1568
1569 if (sc->sc_flush)
1570 octeon_eth_send_queue_flush_sync(sc);
1571 }
1572
1573 static inline void
1574 octeon_eth_recv_intr(void *data, uint64_t *work)
1575 {
1576 struct octeon_eth_softc *sc;
1577 int port;
1578
1579 OCTEON_ETH_KASSERT(work != NULL);
1580
1581 port = (work[1] & PIP_WQE_WORD1_IPRT) >> 42;
1582
1583 OCTEON_ETH_KASSERT(port < GMX_PORT_NUNITS);
1584
1585 sc = octeon_eth_gsc[port];
1586
1587 OCTEON_ETH_KASSERT(sc != NULL);
1588 OCTEON_ETH_KASSERT(port == sc->sc_port);
1589
1590 /* XXX process all work queue entries anyway */
1591
1592 (void)octeon_eth_recv(sc, work);
1593 }
1594
1595 /* ---- tick */
1596
1597 /*
1598 * octeon_eth_tick_free
1599 *
1600 * => garbage collect send gather buffer / mbuf
1601 * => called at softclock
1602 */
1603 static void
1604 octeon_eth_tick_free(void *arg)
1605 {
1606 struct octeon_eth_softc *sc = arg;
1607 int timo;
1608 int s;
1609
1610 s = splnet();
1611 /* XXX XXX XXX */
1612 if (sc->sc_soft_req_cnt > 0) {
1613 octeon_eth_send_queue_flush_prefetch(sc);
1614 octeon_eth_send_queue_flush_fetch(sc);
1615 octeon_eth_send_queue_flush(sc);
1616 octeon_eth_send_queue_flush_sync(sc);
1617 }
1618 /* XXX XXX XXX */
1619
1620 /* XXX XXX XXX */
1621 /* ??? */
1622 timo = hz - (100 * sc->sc_ext_callback_cnt);
1623 if (timo < 10)
1624 timo = 10;
1625 callout_schedule(&sc->sc_tick_free_ch, timo);
1626 /* XXX XXX XXX */
1627 splx(s);
1628 }
1629
1630 /*
1631 * octeon_eth_tick_misc
1632 *
1633 * => collect statistics
1634 * => check link status
1635 * => called at softclock
1636 */
1637 static void
1638 octeon_eth_tick_misc(void *arg)
1639 {
1640 struct octeon_eth_softc *sc = arg;
1641 struct ifnet *ifp;
1642 int s;
1643
1644 s = splnet();
1645
1646 ifp = &sc->sc_ethercom.ec_if;
1647
1648 octeon_gmx_stats(sc->sc_gmx_port);
1649 octeon_pip_stats(sc->sc_pip, ifp, sc->sc_port);
1650 mii_tick(&sc->sc_mii);
1651
1652 splx(s);
1653
1654 callout_schedule(&sc->sc_tick_misc_ch, hz);
1655 }
1656
1657 /* ---- Odd nibble preamble workaround (software CRC processing) */
1658
1659 /* ---- sysctl */
1660
1661 static int octeon_eth_sysctl_verify(SYSCTLFN_ARGS);
1662 static int octeon_eth_sysctl_pool(SYSCTLFN_ARGS);
1663 static int octeon_eth_sysctl_rd(SYSCTLFN_ARGS);
1664
1665 static int octeon_eth_sysctl_pkocmdw0n2_num;
1666 static int octeon_eth_sysctl_pipdynrs_num;
1667 static int octeon_eth_sysctl_redir_num;
1668 static int octeon_eth_sysctl_pkt_pool_num;
1669 static int octeon_eth_sysctl_wqe_pool_num;
1670 static int octeon_eth_sysctl_cmd_pool_num;
1671 static int octeon_eth_sysctl_sg_pool_num;
1672 static int octeon_eth_sysctl_pktbuf_num;
1673
1674 /*
1675 * Set up sysctl(3) MIB, hw.cnmac.*.
1676 */
1677 SYSCTL_SETUP(sysctl_octeon_eth, "sysctl cnmac subtree setup")
1678 {
1679 int rc;
1680 int octeon_eth_sysctl_root_num;
1681 const struct sysctlnode *node;
1682
1683 if ((rc = sysctl_createv(clog, 0, NULL, NULL,
1684 0, CTLTYPE_NODE, "hw", NULL,
1685 NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) {
1686 goto err;
1687 }
1688
1689 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1690 0, CTLTYPE_NODE, "cnmac",
1691 SYSCTL_DESCR("cnmac interface controls"),
1692 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) {
1693 goto err;
1694 }
1695
1696 octeon_eth_sysctl_root_num = node->sysctl_num;
1697
1698 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1699 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1700 CTLTYPE_INT, "pko_cmd_w0_n2",
1701 SYSCTL_DESCR("PKO command WORD0 N2 bit"),
1702 octeon_eth_sysctl_verify, 0,
1703 &octeon_eth_param_pko_cmd_w0_n2,
1704 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1705 CTL_EOL)) != 0) {
1706 goto err;
1707 }
1708
1709 octeon_eth_sysctl_pkocmdw0n2_num = node->sysctl_num;
1710
1711 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1712 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1713 CTLTYPE_INT, "pip_dyn_rs",
1714 SYSCTL_DESCR("PIP dynamic short in WQE"),
1715 octeon_eth_sysctl_verify, 0,
1716 &octeon_eth_param_pip_dyn_rs,
1717 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1718 CTL_EOL)) != 0) {
1719 goto err;
1720 }
1721
1722 octeon_eth_sysctl_pipdynrs_num = node->sysctl_num;
1723
1724 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1725 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1726 CTLTYPE_INT, "redir",
1727 SYSCTL_DESCR("input port redirection"),
1728 octeon_eth_sysctl_verify, 0,
1729 &octeon_eth_param_redir,
1730 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1731 CTL_EOL)) != 0) {
1732 goto err;
1733 }
1734
1735 octeon_eth_sysctl_redir_num = node->sysctl_num;
1736
1737 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1738 CTLFLAG_PERMANENT,
1739 CTLTYPE_INT, "pkt_pool",
1740 SYSCTL_DESCR("packet pool available"),
1741 octeon_eth_sysctl_pool, 0, NULL,
1742 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1743 CTL_EOL)) != 0) {
1744 goto err;
1745 }
1746
1747 octeon_eth_sysctl_pkt_pool_num = node->sysctl_num;
1748
1749 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1750 CTLFLAG_PERMANENT,
1751 CTLTYPE_INT, "wqe_pool",
1752 SYSCTL_DESCR("wqe pool available"),
1753 octeon_eth_sysctl_pool, 0, NULL,
1754 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1755 CTL_EOL)) != 0) {
1756 goto err;
1757 }
1758
1759 octeon_eth_sysctl_wqe_pool_num = node->sysctl_num;
1760
1761 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1762 CTLFLAG_PERMANENT,
1763 CTLTYPE_INT, "cmd_pool",
1764 SYSCTL_DESCR("cmd pool available"),
1765 octeon_eth_sysctl_pool, 0, NULL,
1766 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1767 CTL_EOL)) != 0) {
1768 goto err;
1769 }
1770
1771 octeon_eth_sysctl_cmd_pool_num = node->sysctl_num;
1772
1773 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1774 CTLFLAG_PERMANENT,
1775 CTLTYPE_INT, "sg_pool",
1776 SYSCTL_DESCR("sg pool available"),
1777 octeon_eth_sysctl_pool, 0, NULL,
1778 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1779 CTL_EOL)) != 0) {
1780 goto err;
1781 }
1782
1783 octeon_eth_sysctl_sg_pool_num = node->sysctl_num;
1784
1785 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1786 CTLFLAG_PERMANENT | CTLFLAG_READONLY,
1787 CTLTYPE_INT, "pktbuf",
1788 SYSCTL_DESCR("input packet buffer size on POW"),
1789 octeon_eth_sysctl_rd, 0,
1790 &octeon_eth_param_pktbuf,
1791 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1792 CTL_EOL)) != 0) {
1793 goto err;
1794 }
1795
1796 octeon_eth_sysctl_pktbuf_num = node->sysctl_num;
1797
1798 return;
1799
1800 err:
1801 aprint_error("%s: syctl_createv failed (rc = %d)\n", __func__, rc);
1802 }
1803
1804 static int
1805 octeon_eth_sysctl_verify(SYSCTLFN_ARGS)
1806 {
1807 int error, v;
1808 struct sysctlnode node;
1809 struct octeon_eth_softc *sc;
1810 int i;
1811 int s;
1812
1813 node = *rnode;
1814 v = *(int *)rnode->sysctl_data;
1815 node.sysctl_data = &v;
1816 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1817 if (error || newp == NULL)
1818 return error;
1819
1820 if (node.sysctl_num == octeon_eth_sysctl_pkocmdw0n2_num) {
1821 if (v < 0 || v > 1)
1822 return EINVAL;
1823 *(int *)rnode->sysctl_data = v;
1824 return 0;
1825 }
1826
1827 if (node.sysctl_num == octeon_eth_sysctl_pipdynrs_num) {
1828 if (v < 0 || v > 1)
1829 return EINVAL;
1830 *(int *)rnode->sysctl_data = v;
1831 s = splnet();
1832 for (i = 0; i < 3/* XXX */; i++) {
1833 sc = octeon_eth_gsc[i]; /* XXX */
1834 octeon_pip_prt_cfg_enable(sc->sc_pip,
1835 PIP_PRT_CFGN_DYN_RS, v);
1836 }
1837 splx(s);
1838 return 0;
1839 }
1840
1841 if (node.sysctl_num == octeon_eth_sysctl_redir_num) {
1842 if (v & ~((0x7 << (4 * 0)) | (0x7 << (4 * 1)) | (0x7 << (4 * 2))))
1843 return EINVAL;
1844 *(int *)rnode->sysctl_data = v;
1845 s = splnet();
1846 for (i = 0; i < 3/* XXX */; i++) {
1847 struct ifnet *ifp;
1848
1849 sc = octeon_eth_gsc[i]; /* XXX */
1850 ifp = &sc->sc_ethercom.ec_if;
1851
1852 sc->sc_redir
1853 = (octeon_eth_param_redir >> (4 * i)) & 0x7;
1854 if (sc->sc_redir == 0) {
1855 if (ISSET(ifp->if_flags, IFF_PROMISC)) {
1856 CLR(ifp->if_flags, IFF_PROMISC);
1857 octeon_eth_mii_statchg(ifp);
1858 /* octeon_gmx_set_filter(sc->sc_gmx_port); */
1859 }
1860 ifp->_if_input = ether_input;
1861 }
1862 else {
1863 if (!ISSET(ifp->if_flags, IFF_PROMISC)) {
1864 SET(ifp->if_flags, IFF_PROMISC);
1865 octeon_eth_mii_statchg(ifp);
1866 /* octeon_gmx_set_filter(sc->sc_gmx_port); */
1867 }
1868 ifp->_if_input = octeon_eth_recv_redir;
1869 }
1870 }
1871 splx(s);
1872 return 0;
1873 }
1874
1875 return EINVAL;
1876 }
1877
1878 static int
1879 octeon_eth_sysctl_pool(SYSCTLFN_ARGS)
1880 {
1881 int error, newval = 0;
1882 struct sysctlnode node;
1883 int s;
1884
1885 node = *rnode;
1886 node.sysctl_data = &newval;
1887 s = splnet();
1888 if (node.sysctl_num == octeon_eth_sysctl_pkt_pool_num) {
1889 error = octeon_fpa_available_fpa_pool(&newval,
1890 OCTEON_POOL_NO_PKT);
1891 } else if (node.sysctl_num == octeon_eth_sysctl_wqe_pool_num) {
1892 error = octeon_fpa_available_fpa_pool(&newval,
1893 OCTEON_POOL_NO_WQE);
1894 } else if (node.sysctl_num == octeon_eth_sysctl_cmd_pool_num) {
1895 error = octeon_fpa_available_fpa_pool(&newval,
1896 OCTEON_POOL_NO_CMD);
1897 } else if (node.sysctl_num == octeon_eth_sysctl_sg_pool_num) {
1898 error = octeon_fpa_available_fpa_pool(&newval,
1899 OCTEON_POOL_NO_SG);
1900 } else {
1901 splx(s);
1902 return EINVAL;
1903 }
1904 splx(s);
1905 if (error)
1906 return error;
1907 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1908 if (error || newp == NULL)
1909 return error;
1910
1911 return 0;
1912 }
1913
1914 static int
1915 octeon_eth_sysctl_rd(SYSCTLFN_ARGS)
1916 {
1917 int error, v;
1918 struct sysctlnode node;
1919 int s;
1920
1921 node = *rnode;
1922 v = *(int *)rnode->sysctl_data;
1923 node.sysctl_data = &v;
1924 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1925 if (error || newp != NULL)
1926 return error;
1927
1928 if (node.sysctl_num == octeon_eth_sysctl_pktbuf_num) {
1929 uint64_t tmp;
1930 int n;
1931
1932 s = splnet();
1933 tmp = octeon_fpa_query(0);
1934 n = (int)tmp;
1935 splx(s);
1936 *(int *)rnode->sysctl_data = n;
1937 octeon_eth_param_pktbuf = n;
1938 *(int *)oldp = n;
1939 return 0;
1940 }
1941
1942 return EINVAL;
1943 }
1944