if_cnmac.c revision 1.17 1 /* $NetBSD: if_cnmac.c,v 1.17 2020/02/18 15:00:42 thorpej Exp $ */
2
3 #include <sys/cdefs.h>
4 #if 0
5 __KERNEL_RCSID(0, "$NetBSD: if_cnmac.c,v 1.17 2020/02/18 15:00:42 thorpej Exp $");
6 #endif
7
8 #include "opt_octeon.h"
9
10 #ifdef OCTEON_ETH_DEBUG
11
12 #ifndef DIAGNOSTIC
13 #define DIAGNOSTIC
14 #endif
15
16 #ifndef DEBUG
17 #define DEBUG
18 #endif
19
20 #endif
21
22 /*
23 * If no free send buffer is available, free all the sent buffers and bail out.
24 */
25 #define OCTEON_ETH_SEND_QUEUE_CHECK
26
27 /* XXX XXX XXX XXX XXX XXX */
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/pool.h>
32 #include <sys/mbuf.h>
33 #include <sys/malloc.h>
34 #include <sys/kernel.h>
35 #include <sys/socket.h>
36 #include <sys/ioctl.h>
37 #include <sys/errno.h>
38 #include <sys/device.h>
39 #include <sys/queue.h>
40 #include <sys/conf.h>
41 #include <sys/sysctl.h>
42 #include <sys/syslog.h>
43
44 #include <net/if.h>
45 #include <net/if_dl.h>
46 #include <net/if_media.h>
47 #include <net/if_ether.h>
48 #include <net/route.h>
49 #include <net/bpf.h>
50
51 #include <netinet/in.h>
52 #include <netinet/in_systm.h>
53 #include <netinet/in_var.h>
54 #include <netinet/ip.h>
55
56 #include <sys/bus.h>
57 #include <machine/intr.h>
58 #include <machine/endian.h>
59 #include <machine/locore.h>
60
61 #include <dev/mii/mii.h>
62 #include <dev/mii/miivar.h>
63
64 #include <mips/cpuregs.h>
65
66 #include <mips/cavium/dev/octeon_asxreg.h>
67 #include <mips/cavium/dev/octeon_ciureg.h>
68 #include <mips/cavium/dev/octeon_npireg.h>
69 #include <mips/cavium/dev/octeon_gmxreg.h>
70 #include <mips/cavium/dev/octeon_ipdreg.h>
71 #include <mips/cavium/dev/octeon_pipreg.h>
72 #include <mips/cavium/dev/octeon_powreg.h>
73 #include <mips/cavium/dev/octeon_faureg.h>
74 #include <mips/cavium/dev/octeon_fpareg.h>
75 #include <mips/cavium/dev/octeon_bootbusreg.h>
76 #include <mips/cavium/include/iobusvar.h>
77 #include <mips/cavium/octeonvar.h>
78 #include <mips/cavium/dev/octeon_fpavar.h>
79 #include <mips/cavium/dev/octeon_gmxvar.h>
80 #include <mips/cavium/dev/octeon_fauvar.h>
81 #include <mips/cavium/dev/octeon_powvar.h>
82 #include <mips/cavium/dev/octeon_ipdvar.h>
83 #include <mips/cavium/dev/octeon_pipvar.h>
84 #include <mips/cavium/dev/octeon_pkovar.h>
85 #include <mips/cavium/dev/octeon_asxvar.h>
86 #include <mips/cavium/dev/octeon_smivar.h>
87 #include <mips/cavium/dev/if_cnmacvar.h>
88
89 #ifdef OCTEON_ETH_DEBUG
90 #define OCTEON_ETH_KASSERT(x) KASSERT(x)
91 #define OCTEON_ETH_KDASSERT(x) KDASSERT(x)
92 #else
93 #define OCTEON_ETH_KASSERT(x)
94 #define OCTEON_ETH_KDASSERT(x)
95 #endif
96
97 /*
98 * Set the PKO to think command buffers are an odd length. This makes it so we
99 * never have to divide a comamnd across two buffers.
100 */
101 #define OCTEON_POOL_NWORDS_CMD \
102 (((uint32_t)OCTEON_POOL_SIZE_CMD / sizeof(uint64_t)) - 1)
103 #define FPA_COMMAND_BUFFER_POOL_NWORDS OCTEON_POOL_NWORDS_CMD /* XXX */
104
105 static void octeon_eth_buf_init(struct octeon_eth_softc *);
106
107 static int octeon_eth_match(device_t, struct cfdata *, void *);
108 static void octeon_eth_attach(device_t, device_t, void *);
109 static void octeon_eth_pip_init(struct octeon_eth_softc *);
110 static void octeon_eth_ipd_init(struct octeon_eth_softc *);
111 static void octeon_eth_pko_init(struct octeon_eth_softc *);
112 static void octeon_eth_asx_init(struct octeon_eth_softc *);
113 static void octeon_eth_smi_init(struct octeon_eth_softc *);
114
115 static void octeon_eth_board_mac_addr(uint8_t *, size_t,
116 struct octeon_eth_softc *);
117
118 static int octeon_eth_mii_readreg(device_t, int, int, uint16_t *);
119 static int octeon_eth_mii_writereg(device_t, int, int, uint16_t);
120 static void octeon_eth_mii_statchg(struct ifnet *);
121
122 static int octeon_eth_mediainit(struct octeon_eth_softc *);
123 static void octeon_eth_mediastatus(struct ifnet *, struct ifmediareq *);
124
125 static inline void octeon_eth_send_queue_flush_prefetch(struct octeon_eth_softc *);
126 static inline void octeon_eth_send_queue_flush_fetch(struct octeon_eth_softc *);
127 static inline void octeon_eth_send_queue_flush(struct octeon_eth_softc *);
128 static inline void octeon_eth_send_queue_flush_sync(struct octeon_eth_softc *);
129 static inline int octeon_eth_send_queue_is_full(struct octeon_eth_softc *);
130 static inline void octeon_eth_send_queue_add(struct octeon_eth_softc *,
131 struct mbuf *, uint64_t *);
132 static inline void octeon_eth_send_queue_del(struct octeon_eth_softc *,
133 struct mbuf **, uint64_t **);
134 static inline int octeon_eth_buf_free_work(struct octeon_eth_softc *,
135 uint64_t *, uint64_t);
136 static inline void octeon_eth_buf_ext_free_m(struct mbuf *, void *, size_t,
137 void *);
138 static inline void octeon_eth_buf_ext_free_ext(struct mbuf *, void *, size_t,
139 void *);
140
141 static int octeon_eth_ioctl(struct ifnet *, u_long, void *);
142 static void octeon_eth_watchdog(struct ifnet *);
143 static int octeon_eth_init(struct ifnet *);
144 static void octeon_eth_stop(struct ifnet *, int);
145 static void octeon_eth_start(struct ifnet *);
146
147 static inline int octeon_eth_send_cmd(struct octeon_eth_softc *, uint64_t,
148 uint64_t, int *);
149 static inline uint64_t octeon_eth_send_makecmd_w1(int, paddr_t);
150 static inline uint64_t octeon_eth_send_makecmd_w0(uint64_t, uint64_t, size_t,
151 int);
152 static inline int octeon_eth_send_makecmd_gbuf(struct octeon_eth_softc *,
153 struct mbuf *, uint64_t *, int *);
154 static inline int octeon_eth_send_makecmd(struct octeon_eth_softc *,
155 struct mbuf *, uint64_t *, uint64_t *, uint64_t *);
156 static inline int octeon_eth_send_buf(struct octeon_eth_softc *,
157 struct mbuf *, uint64_t *, int *);
158 static inline int octeon_eth_send(struct octeon_eth_softc *,
159 struct mbuf *, int *);
160
161 static int octeon_eth_reset(struct octeon_eth_softc *);
162 static int octeon_eth_configure(struct octeon_eth_softc *);
163 static int octeon_eth_configure_common(struct octeon_eth_softc *);
164
165 static void octeon_eth_tick_free(void *);
166 static void octeon_eth_tick_misc(void *);
167
168 static inline int octeon_eth_recv_mbuf(struct octeon_eth_softc *,
169 uint64_t *, struct mbuf **);
170 static inline int octeon_eth_recv_check_code(struct octeon_eth_softc *,
171 uint64_t);
172 static inline int octeon_eth_recv_check_jumbo(struct octeon_eth_softc *,
173 uint64_t);
174 static inline int octeon_eth_recv_check_link(struct octeon_eth_softc *,
175 uint64_t);
176 static inline int octeon_eth_recv_check(struct octeon_eth_softc *,
177 uint64_t);
178 static inline int octeon_eth_recv(struct octeon_eth_softc *, uint64_t *);
179 static void octeon_eth_recv_redir(struct ifnet *, struct mbuf *);
180 static inline void octeon_eth_recv_intr(void *, uint64_t *);
181
182 /* Device driver context */
183 static struct octeon_eth_softc *octeon_eth_gsc[GMX_PORT_NUNITS];
184 static void *octeon_eth_pow_recv_ih;
185
186 /* sysctl'able parameters */
187 int octeon_eth_param_pko_cmd_w0_n2 = 1;
188 int octeon_eth_param_pip_dyn_rs = 1;
189 int octeon_eth_param_redir = 0;
190 int octeon_eth_param_pktbuf = 0;
191 int octeon_eth_param_rate = 0;
192 int octeon_eth_param_intr = 0;
193
194 CFATTACH_DECL_NEW(cnmac, sizeof(struct octeon_eth_softc),
195 octeon_eth_match, octeon_eth_attach, NULL, NULL);
196
197 #ifdef OCTEON_ETH_DEBUG
198
199 static const struct octeon_evcnt_entry octeon_evcnt_entries[] = {
200 #define _ENTRY(name, type, parent, descr) \
201 OCTEON_EVCNT_ENTRY(struct octeon_eth_softc, name, type, parent, descr)
202 _ENTRY(rx, MISC, NULL, "rx"),
203 _ENTRY(rxint, INTR, NULL, "rx intr"),
204 _ENTRY(rxrs, MISC, NULL, "rx dynamic short"),
205 _ENTRY(rxbufpkalloc, MISC, NULL, "rx buf pkt alloc"),
206 _ENTRY(rxbufpkput, MISC, NULL, "rx buf pkt put"),
207 _ENTRY(rxbufwqalloc, MISC, NULL, "rx buf wqe alloc"),
208 _ENTRY(rxbufwqput, MISC, NULL, "rx buf wqe put"),
209 _ENTRY(rxerrcode, MISC, NULL, "rx code error"),
210 _ENTRY(rxerrfix, MISC, NULL, "rx fixup error"),
211 _ENTRY(rxerrjmb, MISC, NULL, "rx jmb error"),
212 _ENTRY(rxerrlink, MISC, NULL, "rx link error"),
213 _ENTRY(rxerroff, MISC, NULL, "rx offload error"),
214 _ENTRY(rxonperrshort, MISC, NULL, "rx onp fixup short error"),
215 _ENTRY(rxonperrpreamble, MISC, NULL, "rx onp fixup preamble error"),
216 _ENTRY(rxonperrcrc, MISC, NULL, "rx onp fixup crc error"),
217 _ENTRY(rxonperraddress, MISC, NULL, "rx onp fixup address error"),
218 _ENTRY(rxonponp, MISC, NULL, "rx onp fixup onp packets"),
219 _ENTRY(rxonpok, MISC, NULL, "rx onp fixup success packets"),
220 _ENTRY(tx, MISC, NULL, "tx"),
221 _ENTRY(txadd, MISC, NULL, "tx add"),
222 _ENTRY(txbufcballoc, MISC, NULL, "tx buf cb alloc"),
223 _ENTRY(txbufcbget, MISC, NULL, "tx buf cb get"),
224 _ENTRY(txbufgballoc, MISC, NULL, "tx buf gb alloc"),
225 _ENTRY(txbufgbget, MISC, NULL, "tx buf gb get"),
226 _ENTRY(txbufgbput, MISC, NULL, "tx buf gb put"),
227 _ENTRY(txdel, MISC, NULL, "tx del"),
228 _ENTRY(txerr, MISC, NULL, "tx error"),
229 _ENTRY(txerrcmd, MISC, NULL, "tx cmd error"),
230 _ENTRY(txerrgbuf, MISC, NULL, "tx gbuf error"),
231 _ENTRY(txerrlink, MISC, NULL, "tx link error"),
232 _ENTRY(txerrmkcmd, MISC, NULL, "tx makecmd error"),
233 #undef _ENTRY
234 };
235 #endif
236
237 /* ---- buffer management */
238
239 static const struct octeon_eth_pool_param {
240 int poolno;
241 size_t size;
242 size_t nelems;
243 } octeon_eth_pool_params[] = {
244 #define _ENTRY(x) { OCTEON_POOL_NO_##x, OCTEON_POOL_SIZE_##x, OCTEON_POOL_NELEMS_##x }
245 _ENTRY(PKT),
246 _ENTRY(WQE),
247 _ENTRY(CMD),
248 _ENTRY(SG)
249 #undef _ENTRY
250 };
251 struct octeon_fpa_buf *octeon_eth_pools[8/* XXX */];
252 #define octeon_eth_fb_pkt octeon_eth_pools[OCTEON_POOL_NO_PKT]
253 #define octeon_eth_fb_wqe octeon_eth_pools[OCTEON_POOL_NO_WQE]
254 #define octeon_eth_fb_cmd octeon_eth_pools[OCTEON_POOL_NO_CMD]
255 #define octeon_eth_fb_sg octeon_eth_pools[OCTEON_POOL_NO_SG]
256
257 static void
258 octeon_eth_buf_init(struct octeon_eth_softc *sc)
259 {
260 static int once;
261 int i;
262 const struct octeon_eth_pool_param *pp;
263 struct octeon_fpa_buf *fb;
264
265 if (once == 1)
266 return;
267 once = 1;
268
269 for (i = 0; i < (int)__arraycount(octeon_eth_pool_params); i++) {
270 pp = &octeon_eth_pool_params[i];
271 octeon_fpa_buf_init(pp->poolno, pp->size, pp->nelems, &fb);
272 octeon_eth_pools[i] = fb;
273 }
274 }
275
276 /* ---- autoconf */
277
278 static int
279 octeon_eth_match(device_t parent, struct cfdata *match, void *aux)
280 {
281 struct octeon_gmx_attach_args *ga = aux;
282
283 if (strcmp(match->cf_name, ga->ga_name) != 0) {
284 return 0;
285 }
286 return 1;
287 }
288
289 static void
290 octeon_eth_attach(device_t parent, device_t self, void *aux)
291 {
292 struct octeon_eth_softc *sc = device_private(self);
293 struct octeon_gmx_attach_args *ga = aux;
294 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
295 uint8_t enaddr[ETHER_ADDR_LEN];
296
297 sc->sc_dev = self;
298 sc->sc_regt = ga->ga_regt;
299 sc->sc_port = ga->ga_portno;
300 sc->sc_port_type = ga->ga_port_type;
301 sc->sc_gmx = ga->ga_gmx;
302 sc->sc_gmx_port = ga->ga_gmx_port;
303
304 sc->sc_init_flag = 0;
305 /*
306 * XXXUEBAYASI
307 * Setting PIP_IP_OFFSET[OFFSET] to 8 causes panic ... why???
308 */
309 sc->sc_ip_offset = 0/* XXX */;
310
311 if (MIPS_PRID_IMPL(mips_options.mips_cpu_id) <= MIPS_CN30XX) {
312 SET(sc->sc_quirks, OCTEON_ETH_QUIRKS_NO_PRE_ALIGN);
313 SET(sc->sc_quirks, OCTEON_ETH_QUIRKS_NO_RX_INBND);
314 }
315
316 octeon_eth_board_mac_addr(enaddr, sizeof(enaddr), sc);
317 printf("%s: Ethernet address %s\n", device_xname(sc->sc_dev),
318 ether_sprintf(enaddr));
319
320 octeon_eth_gsc[sc->sc_port] = sc;
321
322 SIMPLEQ_INIT(&sc->sc_sendq);
323 sc->sc_soft_req_thresh = 15/* XXX */;
324 sc->sc_ext_callback_cnt = 0;
325
326 octeon_gmx_stats_init(sc->sc_gmx_port);
327
328 callout_init(&sc->sc_tick_misc_ch, 0);
329 callout_init(&sc->sc_tick_free_ch, 0);
330
331 octeon_fau_op_init(&sc->sc_fau_done,
332 OCTEON_CVMSEG_ETHER_OFFSET(sc->sc_port, csm_ether_fau_done),
333 OCT_FAU_REG_ADDR_END - (8 * (sc->sc_port + 1))/* XXX */);
334 octeon_fau_op_set_8(&sc->sc_fau_done, 0);
335
336 octeon_eth_pip_init(sc);
337 octeon_eth_ipd_init(sc);
338 octeon_eth_pko_init(sc);
339 octeon_eth_asx_init(sc);
340 octeon_eth_smi_init(sc);
341
342 sc->sc_gmx_port->sc_ipd = sc->sc_ipd;
343 sc->sc_gmx_port->sc_port_asx = sc->sc_asx;
344 sc->sc_gmx_port->sc_port_mii = &sc->sc_mii;
345 sc->sc_gmx_port->sc_port_ec = &sc->sc_ethercom;
346 /* XXX */
347 sc->sc_gmx_port->sc_quirks = sc->sc_quirks;
348
349 /* XXX */
350 sc->sc_pow = &octeon_pow_softc;
351
352 octeon_eth_mediainit(sc);
353
354 strncpy(ifp->if_xname, device_xname(sc->sc_dev), sizeof(ifp->if_xname));
355 ifp->if_softc = sc;
356 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
357 ifp->if_ioctl = octeon_eth_ioctl;
358 ifp->if_start = octeon_eth_start;
359 ifp->if_watchdog = octeon_eth_watchdog;
360 ifp->if_init = octeon_eth_init;
361 ifp->if_stop = octeon_eth_stop;
362 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(GATHER_QUEUE_SIZE, IFQ_MAXLEN));
363 IFQ_SET_READY(&ifp->if_snd);
364
365 /* XXX: not yet tx checksum */
366 ifp->if_capabilities =
367 IFCAP_CSUM_IPv4_Rx | IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
368 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
369
370 /* 802.1Q VLAN-sized frames are supported */
371 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
372
373 octeon_gmx_set_mac_addr(sc->sc_gmx_port, enaddr);
374
375 if_attach(ifp);
376 ether_ifattach(ifp, enaddr);
377 octeon_gmx_set_filter(sc->sc_gmx_port);
378
379 /* XXX */
380 sc->sc_rate_recv_check_link_cap.tv_sec = 1;
381 sc->sc_rate_recv_check_jumbo_cap.tv_sec = 1;
382 sc->sc_rate_recv_check_code_cap.tv_sec = 1;
383 sc->sc_rate_recv_fixup_odd_nibble_short_cap.tv_sec = 1;
384 sc->sc_rate_recv_fixup_odd_nibble_preamble_cap.tv_sec = 1;
385 sc->sc_rate_recv_fixup_odd_nibble_crc_cap.tv_sec = 1;
386 #ifdef OCTEON_ETH_DEBUG
387 sc->sc_rate_recv_fixup_odd_nibble_addr_cap.tv_sec = 1;
388 #endif
389 /* XXX */
390
391 #if 1
392 octeon_eth_buf_init(sc);
393 #endif
394
395 if (octeon_eth_pow_recv_ih == NULL)
396 octeon_eth_pow_recv_ih
397 = octeon_pow_intr_establish(OCTEON_POW_GROUP_PIP,
398 IPL_NET, octeon_eth_recv_intr, NULL, NULL);
399
400 OCTEON_EVCNT_ATTACH_EVCNTS(sc, octeon_evcnt_entries,
401 device_xname(sc->sc_dev));
402 }
403
404 /* ---- submodules */
405
406 /* XXX */
407 static void
408 octeon_eth_pip_init(struct octeon_eth_softc *sc)
409 {
410 struct octeon_pip_attach_args pip_aa;
411
412 pip_aa.aa_port = sc->sc_port;
413 pip_aa.aa_regt = sc->sc_regt;
414 pip_aa.aa_tag_type = POW_TAG_TYPE_ORDERED/* XXX */;
415 pip_aa.aa_receive_group = OCTEON_POW_GROUP_PIP;
416 pip_aa.aa_ip_offset = sc->sc_ip_offset;
417 octeon_pip_init(&pip_aa, &sc->sc_pip);
418 }
419
420 /* XXX */
421 static void
422 octeon_eth_ipd_init(struct octeon_eth_softc *sc)
423 {
424 struct octeon_ipd_attach_args ipd_aa;
425
426 ipd_aa.aa_port = sc->sc_port;
427 ipd_aa.aa_regt = sc->sc_regt;
428 ipd_aa.aa_first_mbuff_skip = 184/* XXX */;
429 ipd_aa.aa_not_first_mbuff_skip = 0/* XXX */;
430 octeon_ipd_init(&ipd_aa, &sc->sc_ipd);
431 }
432
433 /* XXX */
434 static void
435 octeon_eth_pko_init(struct octeon_eth_softc *sc)
436 {
437 struct octeon_pko_attach_args pko_aa;
438
439 pko_aa.aa_port = sc->sc_port;
440 pko_aa.aa_regt = sc->sc_regt;
441 pko_aa.aa_cmdptr = &sc->sc_cmdptr;
442 pko_aa.aa_cmd_buf_pool = OCTEON_POOL_NO_CMD;
443 pko_aa.aa_cmd_buf_size = OCTEON_POOL_NWORDS_CMD;
444 octeon_pko_init(&pko_aa, &sc->sc_pko);
445 }
446
447 /* XXX */
448 static void
449 octeon_eth_asx_init(struct octeon_eth_softc *sc)
450 {
451 struct octeon_asx_attach_args asx_aa;
452
453 asx_aa.aa_port = sc->sc_port;
454 asx_aa.aa_regt = sc->sc_regt;
455 octeon_asx_init(&asx_aa, &sc->sc_asx);
456 }
457
458 static void
459 octeon_eth_smi_init(struct octeon_eth_softc *sc)
460 {
461 struct octeon_smi_attach_args smi_aa;
462
463 smi_aa.aa_port = sc->sc_port;
464 smi_aa.aa_regt = sc->sc_regt;
465 octeon_smi_init(&smi_aa, &sc->sc_smi);
466 octeon_smi_set_clock(sc->sc_smi, 0x1464ULL); /* XXX */
467 }
468
469 /* ---- XXX */
470
471 #define ADDR2UINT64(u, a) \
472 do { \
473 u = \
474 (((uint64_t)a[0] << 40) | ((uint64_t)a[1] << 32) | \
475 ((uint64_t)a[2] << 24) | ((uint64_t)a[3] << 16) | \
476 ((uint64_t)a[4] << 8) | ((uint64_t)a[5] << 0)); \
477 } while (0)
478 #define UINT642ADDR(a, u) \
479 do { \
480 a[0] = (uint8_t)((u) >> 40); a[1] = (uint8_t)((u) >> 32); \
481 a[2] = (uint8_t)((u) >> 24); a[3] = (uint8_t)((u) >> 16); \
482 a[4] = (uint8_t)((u) >> 8); a[5] = (uint8_t)((u) >> 0); \
483 } while (0)
484
485 static void
486 octeon_eth_board_mac_addr(uint8_t *enaddr, size_t size,
487 struct octeon_eth_softc *sc)
488 {
489 prop_dictionary_t dict;
490 prop_data_t ea;
491
492 dict = device_properties(sc->sc_dev);
493 KASSERT(dict != NULL);
494 ea = prop_dictionary_get(dict, "mac-address");
495 KASSERT(ea != NULL);
496 memcpy(enaddr, prop_data_data_nocopy(ea), size);
497 }
498
499 /* ---- media */
500
501 static int
502 octeon_eth_mii_readreg(device_t self, int phy_addr, int reg, uint16_t *val)
503 {
504 struct octeon_eth_softc *sc = device_private(self);
505
506 return octeon_smi_read(sc->sc_smi, phy_addr, reg, val);
507 }
508
509 static int
510 octeon_eth_mii_writereg(device_t self, int phy_addr, int reg, uint16_t val)
511 {
512 struct octeon_eth_softc *sc = device_private(self);
513
514 return octeon_smi_write(sc->sc_smi, phy_addr, reg, val);
515 }
516
517 static void
518 octeon_eth_mii_statchg(struct ifnet *ifp)
519 {
520 struct octeon_eth_softc *sc = ifp->if_softc;
521
522 octeon_pko_port_enable(sc->sc_pko, 0);
523 octeon_gmx_port_enable(sc->sc_gmx_port, 0);
524
525 octeon_eth_reset(sc);
526
527 if (ISSET(ifp->if_flags, IFF_RUNNING))
528 octeon_gmx_set_filter(sc->sc_gmx_port);
529
530 octeon_pko_port_enable(sc->sc_pko, 1);
531 octeon_gmx_port_enable(sc->sc_gmx_port, 1);
532 }
533
534 static int
535 octeon_eth_mediainit(struct octeon_eth_softc *sc)
536 {
537 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
538 struct mii_data *mii = &sc->sc_mii;
539 prop_object_t phy;
540
541 mii->mii_ifp = ifp;
542 mii->mii_readreg = octeon_eth_mii_readreg;
543 mii->mii_writereg = octeon_eth_mii_writereg;
544 mii->mii_statchg = octeon_eth_mii_statchg;
545 sc->sc_ethercom.ec_mii = mii;
546
547 /* Initialize ifmedia structures. */
548 ifmedia_init(&mii->mii_media, 0, ether_mediachange,
549 octeon_eth_mediastatus);
550
551 phy = prop_dictionary_get(device_properties(sc->sc_dev), "phy-addr");
552 KASSERT(phy != NULL);
553
554 mii_attach(sc->sc_dev, mii, 0xffffffff, prop_number_integer_value(phy),
555 MII_OFFSET_ANY, MIIF_DOPAUSE);
556
557 /* XXX XXX XXX */
558 if (LIST_FIRST(&mii->mii_phys) != NULL) {
559 /* XXX XXX XXX */
560 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
561 /* XXX XXX XXX */
562 } else {
563 /* XXX XXX XXX */
564 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE,
565 MII_MEDIA_NONE, NULL);
566 /* XXX XXX XXX */
567 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
568 /* XXX XXX XXX */
569 }
570 /* XXX XXX XXX */
571
572 return 0;
573 }
574
575 static void
576 octeon_eth_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
577 {
578 struct octeon_eth_softc *sc = ifp->if_softc;
579
580 mii_pollstat(&sc->sc_mii);
581
582 ifmr->ifm_status = sc->sc_mii.mii_media_status;
583 ifmr->ifm_active = sc->sc_mii.mii_media_active;
584 ifmr->ifm_active = (sc->sc_mii.mii_media_active & ~IFM_ETH_FMASK) |
585 sc->sc_gmx_port->sc_port_flowflags;
586 }
587
588 /* ---- send buffer garbage collection */
589
590 static inline void
591 octeon_eth_send_queue_flush_prefetch(struct octeon_eth_softc *sc)
592 {
593 OCTEON_ETH_KASSERT(sc->sc_prefetch == 0);
594 octeon_fau_op_inc_fetch_8(&sc->sc_fau_done, 0);
595 sc->sc_prefetch = 1;
596 }
597
598 static inline void
599 octeon_eth_send_queue_flush_fetch(struct octeon_eth_softc *sc)
600 {
601 #ifndef OCTEON_ETH_DEBUG
602 if (!sc->sc_prefetch)
603 return;
604 #endif
605 OCTEON_ETH_KASSERT(sc->sc_prefetch == 1);
606 sc->sc_hard_done_cnt = octeon_fau_op_inc_read_8(&sc->sc_fau_done);
607 OCTEON_ETH_KASSERT(sc->sc_hard_done_cnt <= 0);
608 sc->sc_prefetch = 0;
609 }
610
611 static inline void
612 octeon_eth_send_queue_flush(struct octeon_eth_softc *sc)
613 {
614 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
615 const int64_t sent_count = sc->sc_hard_done_cnt;
616 int i;
617
618 OCTEON_ETH_KASSERT(sc->sc_flush == 0);
619 OCTEON_ETH_KASSERT(sent_count <= 0);
620
621 for (i = 0; i < 0 - sent_count; i++) {
622 struct mbuf *m;
623 uint64_t *gbuf;
624
625 octeon_eth_send_queue_del(sc, &m, &gbuf);
626
627 octeon_fpa_buf_put(octeon_eth_fb_sg, gbuf);
628 OCTEON_EVCNT_INC(sc, txbufgbput);
629
630 m_freem(m);
631
632 CLR(ifp->if_flags, IFF_OACTIVE);
633 }
634
635 octeon_fau_op_inc_fetch_8(&sc->sc_fau_done, i);
636 sc->sc_flush = i;
637 }
638
639 static inline void
640 octeon_eth_send_queue_flush_sync(struct octeon_eth_softc *sc)
641 {
642 if (sc->sc_flush == 0)
643 return;
644
645 OCTEON_ETH_KASSERT(sc->sc_flush > 0);
646
647 /* XXX XXX XXX */
648 octeon_fau_op_inc_read_8(&sc->sc_fau_done);
649 sc->sc_soft_req_cnt -= sc->sc_flush;
650 OCTEON_ETH_KASSERT(sc->sc_soft_req_cnt >= 0);
651 /* XXX XXX XXX */
652
653 sc->sc_flush = 0;
654 }
655
656 static inline int
657 octeon_eth_send_queue_is_full(struct octeon_eth_softc *sc)
658 {
659 #ifdef OCTEON_ETH_SEND_QUEUE_CHECK
660 int64_t nofree_cnt;
661
662 nofree_cnt = sc->sc_soft_req_cnt + sc->sc_hard_done_cnt;
663
664 if (__predict_false(nofree_cnt == GATHER_QUEUE_SIZE - 1)) {
665 octeon_eth_send_queue_flush(sc);
666 OCTEON_EVCNT_INC(sc, txerrgbuf);
667 octeon_eth_send_queue_flush_sync(sc);
668 return 1;
669 }
670
671 #endif
672 return 0;
673 }
674
675 /*
676 * (Ab)use m_nextpkt and m_paddr to maintain mbuf chain and pointer to gather
677 * buffer. Other mbuf members may be used by m_freem(), so don't touch them!
678 */
679
680 struct _send_queue_entry {
681 union {
682 struct mbuf _sqe_s_mbuf;
683 struct {
684 char _sqe_s_entry_pad[offsetof(struct mbuf, m_nextpkt)];
685 SIMPLEQ_ENTRY(_send_queue_entry) _sqe_s_entry_entry;
686 } _sqe_s_entry;
687 struct {
688 char _sqe_s_gbuf_pad[offsetof(struct mbuf, m_paddr)];
689 uint64_t *_sqe_s_gbuf_gbuf;
690 } _sqe_s_gbuf;
691 } _sqe_u;
692 #define _sqe_entry _sqe_u._sqe_s_entry._sqe_s_entry_entry
693 #define _sqe_gbuf _sqe_u._sqe_s_gbuf._sqe_s_gbuf_gbuf
694 };
695
696 static inline void
697 octeon_eth_send_queue_add(struct octeon_eth_softc *sc, struct mbuf *m,
698 uint64_t *gbuf)
699 {
700 struct _send_queue_entry *sqe = (struct _send_queue_entry *)m;
701
702 sqe->_sqe_gbuf = gbuf;
703 SIMPLEQ_INSERT_TAIL(&sc->sc_sendq, sqe, _sqe_entry);
704
705 if ((m->m_flags & M_EXT) && m->m_ext.ext_free != NULL)
706 sc->sc_ext_callback_cnt++;
707
708 OCTEON_EVCNT_INC(sc, txadd);
709 }
710
711 static inline void
712 octeon_eth_send_queue_del(struct octeon_eth_softc *sc, struct mbuf **rm,
713 uint64_t **rgbuf)
714 {
715 struct _send_queue_entry *sqe;
716
717 sqe = SIMPLEQ_FIRST(&sc->sc_sendq);
718 OCTEON_ETH_KASSERT(sqe != NULL);
719 SIMPLEQ_REMOVE_HEAD(&sc->sc_sendq, _sqe_entry);
720
721 *rm = (void *)sqe;
722 *rgbuf = sqe->_sqe_gbuf;
723
724 if (((*rm)->m_flags & M_EXT) && (*rm)->m_ext.ext_free != NULL) {
725 sc->sc_ext_callback_cnt--;
726 OCTEON_ETH_KASSERT(sc->sc_ext_callback_cnt >= 0);
727 }
728
729 OCTEON_EVCNT_INC(sc, txdel);
730 }
731
732 static inline int
733 octeon_eth_buf_free_work(struct octeon_eth_softc *sc, uint64_t *work,
734 uint64_t word2)
735 {
736 /* XXX when jumbo frame */
737 if (ISSET(word2, PIP_WQE_WORD2_IP_BUFS)) {
738 paddr_t addr;
739 paddr_t start_buffer;
740
741 addr = work[3] & PIP_WQE_WORD3_ADDR;
742 start_buffer = addr & ~(2048 - 1);
743
744 octeon_fpa_buf_put_paddr(octeon_eth_fb_pkt, start_buffer);
745 OCTEON_EVCNT_INC(sc, rxbufpkput);
746 }
747
748 octeon_fpa_buf_put(octeon_eth_fb_wqe, work);
749 OCTEON_EVCNT_INC(sc, rxbufwqput);
750
751 return 0;
752 }
753
754 static inline void
755 octeon_eth_buf_ext_free_m(struct mbuf *m, void *buf, size_t size, void *arg)
756 {
757 uint64_t *work = (void *)arg;
758 #ifdef OCTEON_ETH_DEBUG
759 struct octeon_eth_softc *sc = (void *)(uintptr_t)work[0];
760 #endif
761 int s = splnet();
762
763 OCTEON_EVCNT_INC(sc, rxrs);
764
765 octeon_fpa_buf_put(octeon_eth_fb_wqe, work);
766 OCTEON_EVCNT_INC(sc, rxbufwqput);
767
768 OCTEON_ETH_KASSERT(m != NULL);
769
770 pool_cache_put(mb_cache, m);
771
772 splx(s);
773 }
774
775 static inline void
776 octeon_eth_buf_ext_free_ext(struct mbuf *m, void *buf, size_t size, void *arg)
777 {
778 uint64_t *work = (void *)arg;
779 #ifdef OCTEON_ETH_DEBUG
780 struct octeon_eth_softc *sc = (void *)(uintptr_t)work[0];
781 #endif
782 int s = splnet();
783
784 octeon_fpa_buf_put(octeon_eth_fb_wqe, work);
785 OCTEON_EVCNT_INC(sc, rxbufwqput);
786
787 octeon_fpa_buf_put(octeon_eth_fb_pkt, buf);
788 OCTEON_EVCNT_INC(sc, rxbufpkput);
789
790 OCTEON_ETH_KASSERT(m != NULL);
791
792 pool_cache_put(mb_cache, m);
793
794 splx(s);
795 }
796
797 /* ---- ifnet interfaces */
798
799 static int
800 octeon_eth_ioctl(struct ifnet *ifp, u_long cmd, void *data)
801 {
802 struct octeon_eth_softc *sc = ifp->if_softc;
803 struct ifreq *ifr = (struct ifreq *)data;
804 int s, error;
805
806 s = splnet();
807 switch (cmd) {
808 case SIOCSIFMEDIA:
809 /* Flow control requires full-duplex mode. */
810 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
811 (ifr->ifr_media & IFM_FDX) == 0) {
812 ifr->ifr_media &= ~IFM_ETH_FMASK;
813 }
814 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
815 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
816 ifr->ifr_media |=
817 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
818 }
819 sc->sc_gmx_port->sc_port_flowflags =
820 ifr->ifr_media & IFM_ETH_FMASK;
821 }
822 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
823 break;
824 default:
825 error = ether_ioctl(ifp, cmd, data);
826 if (error == ENETRESET) {
827 /*
828 * Multicast list has changed; set the hardware filter
829 * accordingly.
830 */
831 if (ISSET(ifp->if_flags, IFF_RUNNING))
832 octeon_gmx_set_filter(sc->sc_gmx_port);
833 error = 0;
834 }
835 break;
836 }
837 octeon_eth_start(ifp);
838 splx(s);
839
840 return error;
841 }
842
843 /* ---- send (output) */
844
845 static inline uint64_t
846 octeon_eth_send_makecmd_w0(uint64_t fau0, uint64_t fau1, size_t len, int segs)
847 {
848 return octeon_pko_cmd_word0(
849 OCT_FAU_OP_SIZE_64, /* sz1 */
850 OCT_FAU_OP_SIZE_64, /* sz0 */
851 1, fau1, 1, fau0, /* s1, reg1, s0, reg0 */
852 0, /* le */
853 octeon_eth_param_pko_cmd_w0_n2, /* n2 */
854 1, 0, /* q, r */
855 (segs == 1) ? 0 : 1, /* g */
856 0, 0, 1, /* ipoffp1, ii, df */
857 segs, (int)len); /* segs, totalbytes */
858 }
859
860 static inline uint64_t
861 octeon_eth_send_makecmd_w1(int size, paddr_t addr)
862 {
863 return octeon_pko_cmd_word1(
864 0, 0, /* i, back */
865 FPA_GATHER_BUFFER_POOL, /* pool */
866 size, addr); /* size, addr */
867 }
868
869 static inline int
870 octeon_eth_send_makecmd_gbuf(struct octeon_eth_softc *sc, struct mbuf *m0,
871 uint64_t *gbuf, int *rsegs)
872 {
873 struct mbuf *m;
874 int segs = 0;
875 uintptr_t laddr, rlen, nlen;
876
877 for (m = m0; m != NULL; m = m->m_next) {
878
879 if (__predict_false(m->m_len == 0))
880 continue;
881
882 #if 0
883 OCTEON_ETH_KASSERT(((uint32_t)m->m_data & (PAGE_SIZE - 1))
884 == (kvtophys((vaddr_t)m->m_data) & (PAGE_SIZE - 1)));
885 #endif
886
887 /* Aligned 4k */
888 laddr = (uintptr_t)m->m_data & (PAGE_SIZE - 1);
889
890 if (laddr + m->m_len > PAGE_SIZE) {
891 /* XXX XXX XXX */
892 rlen = PAGE_SIZE - laddr;
893 nlen = m->m_len - rlen;
894 *(gbuf + segs) = octeon_eth_send_makecmd_w1(rlen,
895 kvtophys((vaddr_t)m->m_data));
896 segs++;
897 if (segs > 63) {
898 return 1;
899 }
900 /* XXX XXX XXX */
901 } else {
902 rlen = 0;
903 nlen = m->m_len;
904 }
905
906 *(gbuf + segs) = octeon_eth_send_makecmd_w1(nlen,
907 kvtophys((vaddr_t)(m->m_data + rlen)));
908 segs++;
909 if (segs > 63) {
910 return 1;
911 }
912 }
913
914 OCTEON_ETH_KASSERT(m == NULL);
915
916 *rsegs = segs;
917
918 return 0;
919 }
920
921 static inline int
922 octeon_eth_send_makecmd(struct octeon_eth_softc *sc, struct mbuf *m,
923 uint64_t *gbuf, uint64_t *rpko_cmd_w0, uint64_t *rpko_cmd_w1)
924 {
925 uint64_t pko_cmd_w0, pko_cmd_w1;
926 int segs;
927 int result = 0;
928
929 if (octeon_eth_send_makecmd_gbuf(sc, m, gbuf, &segs)) {
930 log(LOG_WARNING, "%s: there are a lot of number of segments"
931 " of transmission data", device_xname(sc->sc_dev));
932 result = 1;
933 goto done;
934 }
935
936 /*
937 * segs == 1 -> link mode (single continuous buffer)
938 * WORD1[size] is number of bytes pointed by segment
939 *
940 * segs > 1 -> gather mode (scatter-gather buffer)
941 * WORD1[size] is number of segments
942 */
943 pko_cmd_w0 = octeon_eth_send_makecmd_w0(sc->sc_fau_done.fd_regno,
944 0, m->m_pkthdr.len, segs);
945 if (segs == 1) {
946 pko_cmd_w1 = octeon_eth_send_makecmd_w1(
947 m->m_pkthdr.len, kvtophys((vaddr_t)m->m_data));
948 } else {
949 #ifdef __mips_n32
950 KASSERT(MIPS_KSEG0_P(gbuf));
951 pko_cmd_w1 = octeon_eth_send_makecmd_w1(segs,
952 MIPS_KSEG0_TO_PHYS(gbuf));
953 #else
954 pko_cmd_w1 = octeon_eth_send_makecmd_w1(segs,
955 MIPS_XKPHYS_TO_PHYS(gbuf));
956 #endif
957 }
958
959 *rpko_cmd_w0 = pko_cmd_w0;
960 *rpko_cmd_w1 = pko_cmd_w1;
961
962 done:
963 return result;
964 }
965
966 static inline int
967 octeon_eth_send_cmd(struct octeon_eth_softc *sc, uint64_t pko_cmd_w0,
968 uint64_t pko_cmd_w1, int *pwdc)
969 {
970 uint64_t *cmdptr;
971 int result = 0;
972
973 #ifdef __mips_n32
974 KASSERT((sc->sc_cmdptr.cmdptr & ~MIPS_PHYS_MASK) == 0);
975 cmdptr = (uint64_t *)MIPS_PHYS_TO_KSEG0(sc->sc_cmdptr.cmdptr);
976 #else
977 cmdptr = (uint64_t *)MIPS_PHYS_TO_XKPHYS_CACHED(sc->sc_cmdptr.cmdptr);
978 #endif
979 cmdptr += sc->sc_cmdptr.cmdptr_idx;
980
981 OCTEON_ETH_KASSERT(cmdptr != NULL);
982
983 *cmdptr++ = pko_cmd_w0;
984 *cmdptr++ = pko_cmd_w1;
985
986 OCTEON_ETH_KASSERT(sc->sc_cmdptr.cmdptr_idx + 2 <= FPA_COMMAND_BUFFER_POOL_NWORDS - 1);
987
988 if (sc->sc_cmdptr.cmdptr_idx + 2 == FPA_COMMAND_BUFFER_POOL_NWORDS - 1) {
989 paddr_t buf;
990
991 buf = octeon_fpa_buf_get_paddr(octeon_eth_fb_cmd);
992 if (buf == 0) {
993 log(LOG_WARNING,
994 "%s: can not allocate command buffer from free pool allocator\n",
995 device_xname(sc->sc_dev));
996 result = 1;
997 goto done;
998 }
999 OCTEON_EVCNT_INC(sc, txbufcbget);
1000 *cmdptr++ = buf;
1001 sc->sc_cmdptr.cmdptr = (uint64_t)buf;
1002 sc->sc_cmdptr.cmdptr_idx = 0;
1003 } else {
1004 sc->sc_cmdptr.cmdptr_idx += 2;
1005 }
1006
1007 *pwdc += 2;
1008
1009 done:
1010 return result;
1011 }
1012
1013 static inline int
1014 octeon_eth_send_buf(struct octeon_eth_softc *sc, struct mbuf *m,
1015 uint64_t *gbuf, int *pwdc)
1016 {
1017 int result = 0, error;
1018 uint64_t pko_cmd_w0, pko_cmd_w1;
1019
1020 error = octeon_eth_send_makecmd(sc, m, gbuf, &pko_cmd_w0, &pko_cmd_w1);
1021 if (error != 0) {
1022 /* Already logging */
1023 OCTEON_EVCNT_INC(sc, txerrmkcmd);
1024 result = error;
1025 goto done;
1026 }
1027
1028 error = octeon_eth_send_cmd(sc, pko_cmd_w0, pko_cmd_w1, pwdc);
1029 if (error != 0) {
1030 /* Already logging */
1031 OCTEON_EVCNT_INC(sc, txerrcmd);
1032 result = error;
1033 }
1034
1035 done:
1036 return result;
1037 }
1038
1039 static inline int
1040 octeon_eth_send(struct octeon_eth_softc *sc, struct mbuf *m, int *pwdc)
1041 {
1042 paddr_t gaddr = 0;
1043 uint64_t *gbuf = NULL;
1044 int result = 0, error;
1045
1046 OCTEON_EVCNT_INC(sc, tx);
1047
1048 gaddr = octeon_fpa_buf_get_paddr(octeon_eth_fb_sg);
1049 if (gaddr == 0) {
1050 log(LOG_WARNING, "%s: can not allocate gather buffer from "
1051 "free pool allocator\n", device_xname(sc->sc_dev));
1052 OCTEON_EVCNT_INC(sc, txerrgbuf);
1053 result = 1;
1054 goto done;
1055 }
1056 OCTEON_EVCNT_INC(sc, txbufgbget);
1057
1058 #ifdef __mips_n32
1059 KASSERT((gaddr & ~MIPS_PHYS_MASK) == 0);
1060 gbuf = (uint64_t *)(uintptr_t)MIPS_PHYS_TO_KSEG0(gaddr);
1061 #else
1062 gbuf = (uint64_t *)(uintptr_t)MIPS_PHYS_TO_XKPHYS_CACHED(gaddr);
1063 #endif
1064
1065 OCTEON_ETH_KASSERT(gbuf != NULL);
1066
1067 error = octeon_eth_send_buf(sc, m, gbuf, pwdc);
1068 if (error != 0) {
1069 /* Already logging */
1070 octeon_fpa_buf_put_paddr(octeon_eth_fb_sg, gaddr);
1071 OCTEON_EVCNT_INC(sc, txbufgbput);
1072 result = error;
1073 goto done;
1074 }
1075
1076 octeon_eth_send_queue_add(sc, m, gbuf);
1077
1078 done:
1079 return result;
1080 }
1081
1082 static void
1083 octeon_eth_start(struct ifnet *ifp)
1084 {
1085 struct octeon_eth_softc *sc = ifp->if_softc;
1086 struct mbuf *m;
1087 int wdc = 0;
1088
1089 /*
1090 * Performance tuning
1091 * pre-send iobdma request
1092 */
1093 octeon_eth_send_queue_flush_prefetch(sc);
1094
1095 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1096 goto last;
1097
1098 /* XXX assume that OCTEON doesn't buffer packets */
1099 if (__predict_false(!octeon_gmx_link_status(sc->sc_gmx_port))) {
1100 /* Dequeue and drop them */
1101 while (1) {
1102 IFQ_DEQUEUE(&ifp->if_snd, m);
1103 if (m == NULL)
1104 break;
1105
1106 m_freem(m);
1107 IF_DROP(&ifp->if_snd);
1108 OCTEON_EVCNT_INC(sc, txerrlink);
1109 }
1110 goto last;
1111 }
1112
1113 for (;;) {
1114 IFQ_POLL(&ifp->if_snd, m);
1115 if (__predict_false(m == NULL))
1116 break;
1117
1118 /* XXX XXX XXX */
1119 octeon_eth_send_queue_flush_fetch(sc);
1120
1121 /*
1122 * If no free send buffer is available, free all the sent
1123 * buffers and bail out.
1124 */
1125 if (octeon_eth_send_queue_is_full(sc)) {
1126 SET(ifp->if_flags, IFF_OACTIVE);
1127 if (wdc > 0)
1128 octeon_pko_op_doorbell_write(sc->sc_port,
1129 sc->sc_port, wdc);
1130 return;
1131 }
1132 /* XXX XXX XXX */
1133
1134 IFQ_DEQUEUE(&ifp->if_snd, m);
1135
1136 bpf_mtap(ifp, m, BPF_D_OUT);
1137
1138 /* XXX XXX XXX */
1139 if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh)
1140 octeon_eth_send_queue_flush(sc);
1141 if (octeon_eth_send(sc, m, &wdc)) {
1142 IF_DROP(&ifp->if_snd);
1143 m_freem(m);
1144 log(LOG_WARNING,
1145 "%s: failed in the transmission of the packet\n",
1146 device_xname(sc->sc_dev));
1147 OCTEON_EVCNT_INC(sc, txerr);
1148 } else
1149 sc->sc_soft_req_cnt++;
1150
1151 if (sc->sc_flush)
1152 octeon_eth_send_queue_flush_sync(sc);
1153 /* XXX XXX XXX */
1154
1155 /* Send next iobdma request */
1156 octeon_eth_send_queue_flush_prefetch(sc);
1157 }
1158
1159 if (wdc > 0)
1160 octeon_pko_op_doorbell_write(sc->sc_port, sc->sc_port, wdc);
1161
1162 /*
1163 * Don't schedule send-buffer-free callout every time - those buffers are freed
1164 * by "free tick". This makes some packets like NFS slower.
1165 */
1166 #ifdef OCTEON_ETH_USENFS
1167 if (__predict_false(sc->sc_ext_callback_cnt > 0)) {
1168 int timo;
1169
1170 /* ??? */
1171 timo = hz - (100 * sc->sc_ext_callback_cnt);
1172 if (timo < 10)
1173 timo = 10;
1174 callout_schedule(&sc->sc_tick_free_ch, timo);
1175 }
1176 #endif
1177
1178 last:
1179 octeon_eth_send_queue_flush_fetch(sc);
1180 }
1181
1182 static void
1183 octeon_eth_watchdog(struct ifnet *ifp)
1184 {
1185 struct octeon_eth_softc *sc = ifp->if_softc;
1186
1187 printf("%s: device timeout\n", device_xname(sc->sc_dev));
1188
1189 octeon_eth_configure(sc);
1190
1191 SET(ifp->if_flags, IFF_RUNNING);
1192 CLR(ifp->if_flags, IFF_OACTIVE);
1193 ifp->if_timer = 0;
1194
1195 octeon_eth_start(ifp);
1196 }
1197
1198 static int
1199 octeon_eth_init(struct ifnet *ifp)
1200 {
1201 struct octeon_eth_softc *sc = ifp->if_softc;
1202
1203 /* XXX don't disable commonly used parts!!! XXX */
1204 if (sc->sc_init_flag == 0) {
1205 /* Cancel any pending I/O. */
1206 octeon_eth_stop(ifp, 0);
1207
1208 /* Initialize the device */
1209 octeon_eth_configure(sc);
1210
1211 octeon_pko_enable(sc->sc_pko);
1212 octeon_ipd_enable(sc->sc_ipd);
1213
1214 sc->sc_init_flag = 1;
1215 } else {
1216 octeon_gmx_port_enable(sc->sc_gmx_port, 1);
1217 }
1218 mii_ifmedia_change(&sc->sc_mii);
1219
1220 octeon_gmx_set_filter(sc->sc_gmx_port);
1221
1222 callout_reset(&sc->sc_tick_misc_ch, hz, octeon_eth_tick_misc, sc);
1223 callout_reset(&sc->sc_tick_free_ch, hz, octeon_eth_tick_free, sc);
1224
1225 SET(ifp->if_flags, IFF_RUNNING);
1226 CLR(ifp->if_flags, IFF_OACTIVE);
1227
1228 return 0;
1229 }
1230
1231 static void
1232 octeon_eth_stop(struct ifnet *ifp, int disable)
1233 {
1234 struct octeon_eth_softc *sc = ifp->if_softc;
1235
1236 callout_stop(&sc->sc_tick_misc_ch);
1237 callout_stop(&sc->sc_tick_free_ch);
1238
1239 mii_down(&sc->sc_mii);
1240
1241 octeon_gmx_port_enable(sc->sc_gmx_port, 0);
1242
1243 /* Mark the interface as down and cancel the watchdog timer. */
1244 CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE);
1245 ifp->if_timer = 0;
1246 }
1247
1248 /* ---- misc */
1249
1250 #define PKO_INDEX_MASK ((1ULL << 12/* XXX */) - 1)
1251
1252 static int
1253 octeon_eth_reset(struct octeon_eth_softc *sc)
1254 {
1255 octeon_gmx_reset_speed(sc->sc_gmx_port);
1256 octeon_gmx_reset_flowctl(sc->sc_gmx_port);
1257 octeon_gmx_reset_timing(sc->sc_gmx_port);
1258
1259 return 0;
1260 }
1261
1262 static int
1263 octeon_eth_configure(struct octeon_eth_softc *sc)
1264 {
1265 octeon_gmx_port_enable(sc->sc_gmx_port, 0);
1266
1267 octeon_eth_reset(sc);
1268
1269 octeon_eth_configure_common(sc);
1270
1271 octeon_pko_port_config(sc->sc_pko);
1272 octeon_pko_port_enable(sc->sc_pko, 1);
1273 octeon_pip_port_config(sc->sc_pip);
1274
1275 octeon_gmx_tx_stats_rd_clr(sc->sc_gmx_port, 1);
1276 octeon_gmx_rx_stats_rd_clr(sc->sc_gmx_port, 1);
1277
1278 octeon_gmx_port_enable(sc->sc_gmx_port, 1);
1279
1280 return 0;
1281 }
1282
1283 static int
1284 octeon_eth_configure_common(struct octeon_eth_softc *sc)
1285 {
1286 static int once;
1287
1288 if (once == 1)
1289 return 0;
1290 once = 1;
1291
1292 octeon_ipd_config(sc->sc_ipd);
1293 #ifdef OCTEON_ETH_IPD_RED
1294 octeon_ipd_red(sc->sc_ipd, RECV_QUEUE_SIZE >> 2, RECV_QUEUE_SIZE >> 3);
1295 #endif
1296 octeon_pko_config(sc->sc_pko);
1297
1298 octeon_pow_config(sc->sc_pow, OCTEON_POW_GROUP_PIP);
1299
1300 return 0;
1301 }
1302
1303 /* ---- receive (input) */
1304
1305 static inline int
1306 octeon_eth_recv_mbuf(struct octeon_eth_softc *sc, uint64_t *work,
1307 struct mbuf **rm)
1308 {
1309 struct mbuf *m;
1310 void (*ext_free)(struct mbuf *, void *, size_t, void *);
1311 void *ext_buf;
1312 size_t ext_size;
1313 void *data;
1314 uint64_t word1 = work[1];
1315 uint64_t word2 = work[2];
1316 uint64_t word3 = work[3];
1317
1318 MGETHDR(m, M_NOWAIT, MT_DATA);
1319 if (m == NULL)
1320 return 1;
1321 OCTEON_ETH_KASSERT(m != NULL);
1322
1323 if ((word2 & PIP_WQE_WORD2_IP_BUFS) == 0) {
1324 /* Dynamic short */
1325 ext_free = octeon_eth_buf_ext_free_m;
1326 ext_buf = &work[4];
1327 ext_size = 96;
1328
1329 data = &work[4 + sc->sc_ip_offset / sizeof(uint64_t)];
1330 } else {
1331 vaddr_t addr;
1332 vaddr_t start_buffer;
1333
1334 #ifdef __mips_n32
1335 KASSERT((word3 & ~MIPS_PHYS_MASK) == 0);
1336 addr = MIPS_PHYS_TO_KSEG0(word3 & PIP_WQE_WORD3_ADDR);
1337 #else
1338 addr = MIPS_PHYS_TO_XKPHYS_CACHED(word3 & PIP_WQE_WORD3_ADDR);
1339 #endif
1340 start_buffer = addr & ~(2048 - 1);
1341
1342 ext_free = octeon_eth_buf_ext_free_ext;
1343 ext_buf = (void *)start_buffer;
1344 ext_size = 2048;
1345
1346 data = (void *)addr;
1347 }
1348
1349 /* Embed sc pointer into work[0] for _ext_free evcnt */
1350 work[0] = (uintptr_t)sc;
1351
1352 MEXTADD(m, ext_buf, ext_size, 0, ext_free, work);
1353 OCTEON_ETH_KASSERT(ISSET(m->m_flags, M_EXT));
1354
1355 m->m_data = data;
1356 m->m_len = m->m_pkthdr.len = (word1 & PIP_WQE_WORD1_LEN) >> 48;
1357 m_set_rcvif(m, &sc->sc_ethercom.ec_if);
1358
1359 /* Not readonly buffer */
1360 m->m_flags |= M_EXT_RW;
1361
1362 *rm = m;
1363
1364 OCTEON_ETH_KASSERT(*rm != NULL);
1365
1366 return 0;
1367 }
1368
1369 static inline int
1370 octeon_eth_recv_check_code(struct octeon_eth_softc *sc, uint64_t word2)
1371 {
1372 uint64_t opecode = word2 & PIP_WQE_WORD2_NOIP_OPECODE;
1373
1374 if (__predict_true(!ISSET(word2, PIP_WQE_WORD2_NOIP_RE)))
1375 return 0;
1376
1377 /* This error is harmless */
1378 if (opecode == PIP_OVER_ERR)
1379 return 0;
1380
1381 return 1;
1382 }
1383
1384 static inline int
1385 octeon_eth_recv_check_jumbo(struct octeon_eth_softc *sc, uint64_t word2)
1386 {
1387 if (__predict_false((word2 & PIP_WQE_WORD2_IP_BUFS) > (1ULL << 56)))
1388 return 1;
1389 return 0;
1390 }
1391
1392 static inline int
1393 octeon_eth_recv_check_link(struct octeon_eth_softc *sc, uint64_t word2)
1394 {
1395 if (__predict_false(!octeon_gmx_link_status(sc->sc_gmx_port)))
1396 return 1;
1397 return 0;
1398 }
1399
1400 static inline int
1401 octeon_eth_recv_check(struct octeon_eth_softc *sc, uint64_t word2)
1402 {
1403 if (__predict_false(octeon_eth_recv_check_link(sc, word2)) != 0) {
1404 if (ratecheck(&sc->sc_rate_recv_check_link_last,
1405 &sc->sc_rate_recv_check_link_cap))
1406 log(LOG_DEBUG,
1407 "%s: link is not up, the packet was dropped\n",
1408 device_xname(sc->sc_dev));
1409 OCTEON_EVCNT_INC(sc, rxerrlink);
1410 return 1;
1411 }
1412
1413 #if 0 /* XXX Performance tuning (Jumbo-frame is not supported yet!) */
1414 if (__predict_false(octeon_eth_recv_check_jumbo(sc, word2)) != 0) {
1415 /* XXX jumbo frame */
1416 if (ratecheck(&sc->sc_rate_recv_check_jumbo_last,
1417 &sc->sc_rate_recv_check_jumbo_cap))
1418 log(LOG_DEBUG,
1419 "jumbo frame was received\n");
1420 OCTEON_EVCNT_INC(sc, rxerrjmb);
1421 return 1;
1422 }
1423 #endif
1424
1425 if (__predict_false(octeon_eth_recv_check_code(sc, word2)) != 0) {
1426
1427 if ((word2 & PIP_WQE_WORD2_NOIP_OPECODE) ==
1428 PIP_WQE_WORD2_RE_OPCODE_LENGTH) {
1429 /* No logging */
1430 /* XXX increment special error count */
1431 } else if ((word2 & PIP_WQE_WORD2_NOIP_OPECODE) ==
1432 PIP_WQE_WORD2_RE_OPCODE_PARTIAL) {
1433 /* Not an error, it's because of overload */
1434 } else {
1435
1436 if (ratecheck(&sc->sc_rate_recv_check_code_last,
1437 &sc->sc_rate_recv_check_code_cap))
1438 log(LOG_WARNING,
1439 "%s: reception error, packet dropped "
1440 "(error code = %" PRId64 ")\n",
1441 device_xname(sc->sc_dev), word2 & PIP_WQE_WORD2_NOIP_OPECODE);
1442 }
1443 OCTEON_EVCNT_INC(sc, rxerrcode);
1444 return 1;
1445 }
1446
1447 return 0;
1448 }
1449
1450 static inline int
1451 octeon_eth_recv(struct octeon_eth_softc *sc, uint64_t *work)
1452 {
1453 int result = 0;
1454 struct ifnet *ifp;
1455 struct mbuf *m;
1456 uint64_t word2;
1457
1458 /* XXX XXX XXX */
1459 /*
1460 * Performance tuning
1461 * pre-send iobdma request
1462 */
1463 if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh) {
1464 octeon_eth_send_queue_flush_prefetch(sc);
1465 }
1466 /* XXX XXX XXX */
1467
1468 OCTEON_ETH_KASSERT(sc != NULL);
1469 OCTEON_ETH_KASSERT(work != NULL);
1470
1471 OCTEON_EVCNT_INC(sc, rx);
1472
1473 word2 = work[2];
1474 ifp = &sc->sc_ethercom.ec_if;
1475
1476 OCTEON_ETH_KASSERT(ifp != NULL);
1477
1478 if (__predict_false(octeon_eth_recv_check(sc, word2) != 0)) {
1479 if_statinc(ifp, if_ierrors);
1480 result = 1;
1481 octeon_eth_buf_free_work(sc, work, word2);
1482 goto drop;
1483 }
1484
1485 if (__predict_false(octeon_eth_recv_mbuf(sc, work, &m) != 0)) {
1486 if_statinc(ifp, if_ierrors);
1487 result = 1;
1488 octeon_eth_buf_free_work(sc, work, word2);
1489 goto drop;
1490 }
1491
1492 /* work[0] .. work[3] may not be valid any more */
1493
1494 OCTEON_ETH_KASSERT(m != NULL);
1495
1496 octeon_ipd_offload(word2, m->m_data, &m->m_pkthdr.csum_flags);
1497
1498 /* XXX XXX XXX */
1499 if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh) {
1500 octeon_eth_send_queue_flush_fetch(sc);
1501 octeon_eth_send_queue_flush(sc);
1502 }
1503
1504 /* XXX XXX XXX */
1505 if (sc->sc_flush)
1506 octeon_eth_send_queue_flush_sync(sc);
1507 /* XXX XXX XXX */
1508
1509 if_percpuq_enqueue(ifp->if_percpuq, m);
1510
1511 return 0;
1512
1513 drop:
1514 /* XXX XXX XXX */
1515 if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh) {
1516 octeon_eth_send_queue_flush_fetch(sc);
1517 }
1518 /* XXX XXX XXX */
1519
1520 return result;
1521 }
1522
1523 static void
1524 octeon_eth_recv_redir(struct ifnet *ifp, struct mbuf *m)
1525 {
1526 struct octeon_eth_softc *rsc = ifp->if_softc;
1527 struct octeon_eth_softc *sc = NULL;
1528 int i, wdc = 0;
1529
1530 for (i = 0; i < 3 /* XXX */; i++) {
1531 if (rsc->sc_redir & (1 << i))
1532 sc = octeon_eth_gsc[i];
1533 }
1534
1535 if (sc == NULL) {
1536 m_freem(m);
1537 return;
1538 }
1539 octeon_eth_send_queue_flush_prefetch(sc);
1540
1541 octeon_eth_send_queue_flush_fetch(sc);
1542
1543 if (octeon_eth_send_queue_is_full(sc)) {
1544 m_freem(m);
1545 return;
1546 }
1547 if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh)
1548 octeon_eth_send_queue_flush(sc);
1549
1550 if (octeon_eth_send(sc, m, &wdc)) {
1551 IF_DROP(&ifp->if_snd);
1552 m_freem(m);
1553 } else {
1554 octeon_pko_op_doorbell_write(sc->sc_port, sc->sc_port, wdc);
1555 sc->sc_soft_req_cnt++;
1556 }
1557
1558 if (sc->sc_flush)
1559 octeon_eth_send_queue_flush_sync(sc);
1560 }
1561
1562 static inline void
1563 octeon_eth_recv_intr(void *data, uint64_t *work)
1564 {
1565 struct octeon_eth_softc *sc;
1566 int port;
1567
1568 OCTEON_ETH_KASSERT(work != NULL);
1569
1570 port = (work[1] & PIP_WQE_WORD1_IPRT) >> 42;
1571
1572 OCTEON_ETH_KASSERT(port < GMX_PORT_NUNITS);
1573
1574 sc = octeon_eth_gsc[port];
1575
1576 OCTEON_ETH_KASSERT(sc != NULL);
1577 OCTEON_ETH_KASSERT(port == sc->sc_port);
1578
1579 /* XXX process all work queue entries anyway */
1580
1581 (void)octeon_eth_recv(sc, work);
1582 }
1583
1584 /* ---- tick */
1585
1586 /*
1587 * octeon_eth_tick_free
1588 *
1589 * => garbage collect send gather buffer / mbuf
1590 * => called at softclock
1591 */
1592 static void
1593 octeon_eth_tick_free(void *arg)
1594 {
1595 struct octeon_eth_softc *sc = arg;
1596 int timo;
1597 int s;
1598
1599 s = splnet();
1600 /* XXX XXX XXX */
1601 if (sc->sc_soft_req_cnt > 0) {
1602 octeon_eth_send_queue_flush_prefetch(sc);
1603 octeon_eth_send_queue_flush_fetch(sc);
1604 octeon_eth_send_queue_flush(sc);
1605 octeon_eth_send_queue_flush_sync(sc);
1606 }
1607 /* XXX XXX XXX */
1608
1609 /* XXX XXX XXX */
1610 /* ??? */
1611 timo = hz - (100 * sc->sc_ext_callback_cnt);
1612 if (timo < 10)
1613 timo = 10;
1614 callout_schedule(&sc->sc_tick_free_ch, timo);
1615 /* XXX XXX XXX */
1616 splx(s);
1617 }
1618
1619 /*
1620 * octeon_eth_tick_misc
1621 *
1622 * => collect statistics
1623 * => check link status
1624 * => called at softclock
1625 */
1626 static void
1627 octeon_eth_tick_misc(void *arg)
1628 {
1629 struct octeon_eth_softc *sc = arg;
1630 struct ifnet *ifp;
1631 int s;
1632
1633 s = splnet();
1634
1635 ifp = &sc->sc_ethercom.ec_if;
1636
1637 octeon_gmx_stats(sc->sc_gmx_port);
1638 octeon_pip_stats(sc->sc_pip, ifp, sc->sc_port);
1639 mii_tick(&sc->sc_mii);
1640
1641 splx(s);
1642
1643 callout_schedule(&sc->sc_tick_misc_ch, hz);
1644 }
1645
1646 /* ---- Odd nibble preamble workaround (software CRC processing) */
1647
1648 /* ---- sysctl */
1649
1650 static int octeon_eth_sysctl_verify(SYSCTLFN_ARGS);
1651 static int octeon_eth_sysctl_pool(SYSCTLFN_ARGS);
1652 static int octeon_eth_sysctl_rd(SYSCTLFN_ARGS);
1653
1654 static int octeon_eth_sysctl_pkocmdw0n2_num;
1655 static int octeon_eth_sysctl_pipdynrs_num;
1656 static int octeon_eth_sysctl_redir_num;
1657 static int octeon_eth_sysctl_pkt_pool_num;
1658 static int octeon_eth_sysctl_wqe_pool_num;
1659 static int octeon_eth_sysctl_cmd_pool_num;
1660 static int octeon_eth_sysctl_sg_pool_num;
1661 static int octeon_eth_sysctl_pktbuf_num;
1662
1663 /*
1664 * Set up sysctl(3) MIB, hw.cnmac.*.
1665 */
1666 SYSCTL_SETUP(sysctl_octeon_eth, "sysctl cnmac subtree setup")
1667 {
1668 int rc;
1669 int octeon_eth_sysctl_root_num;
1670 const struct sysctlnode *node;
1671
1672 if ((rc = sysctl_createv(clog, 0, NULL, NULL,
1673 0, CTLTYPE_NODE, "hw", NULL,
1674 NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) {
1675 goto err;
1676 }
1677
1678 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1679 0, CTLTYPE_NODE, "cnmac",
1680 SYSCTL_DESCR("cnmac interface controls"),
1681 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) {
1682 goto err;
1683 }
1684
1685 octeon_eth_sysctl_root_num = node->sysctl_num;
1686
1687 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1688 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1689 CTLTYPE_INT, "pko_cmd_w0_n2",
1690 SYSCTL_DESCR("PKO command WORD0 N2 bit"),
1691 octeon_eth_sysctl_verify, 0,
1692 &octeon_eth_param_pko_cmd_w0_n2,
1693 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1694 CTL_EOL)) != 0) {
1695 goto err;
1696 }
1697
1698 octeon_eth_sysctl_pkocmdw0n2_num = node->sysctl_num;
1699
1700 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1701 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1702 CTLTYPE_INT, "pip_dyn_rs",
1703 SYSCTL_DESCR("PIP dynamic short in WQE"),
1704 octeon_eth_sysctl_verify, 0,
1705 &octeon_eth_param_pip_dyn_rs,
1706 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1707 CTL_EOL)) != 0) {
1708 goto err;
1709 }
1710
1711 octeon_eth_sysctl_pipdynrs_num = node->sysctl_num;
1712
1713 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1714 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1715 CTLTYPE_INT, "redir",
1716 SYSCTL_DESCR("input port redirection"),
1717 octeon_eth_sysctl_verify, 0,
1718 &octeon_eth_param_redir,
1719 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1720 CTL_EOL)) != 0) {
1721 goto err;
1722 }
1723
1724 octeon_eth_sysctl_redir_num = node->sysctl_num;
1725
1726 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1727 CTLFLAG_PERMANENT,
1728 CTLTYPE_INT, "pkt_pool",
1729 SYSCTL_DESCR("packet pool available"),
1730 octeon_eth_sysctl_pool, 0, NULL,
1731 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1732 CTL_EOL)) != 0) {
1733 goto err;
1734 }
1735
1736 octeon_eth_sysctl_pkt_pool_num = node->sysctl_num;
1737
1738 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1739 CTLFLAG_PERMANENT,
1740 CTLTYPE_INT, "wqe_pool",
1741 SYSCTL_DESCR("wqe pool available"),
1742 octeon_eth_sysctl_pool, 0, NULL,
1743 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1744 CTL_EOL)) != 0) {
1745 goto err;
1746 }
1747
1748 octeon_eth_sysctl_wqe_pool_num = node->sysctl_num;
1749
1750 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1751 CTLFLAG_PERMANENT,
1752 CTLTYPE_INT, "cmd_pool",
1753 SYSCTL_DESCR("cmd pool available"),
1754 octeon_eth_sysctl_pool, 0, NULL,
1755 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1756 CTL_EOL)) != 0) {
1757 goto err;
1758 }
1759
1760 octeon_eth_sysctl_cmd_pool_num = node->sysctl_num;
1761
1762 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1763 CTLFLAG_PERMANENT,
1764 CTLTYPE_INT, "sg_pool",
1765 SYSCTL_DESCR("sg pool available"),
1766 octeon_eth_sysctl_pool, 0, NULL,
1767 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1768 CTL_EOL)) != 0) {
1769 goto err;
1770 }
1771
1772 octeon_eth_sysctl_sg_pool_num = node->sysctl_num;
1773
1774 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1775 CTLFLAG_PERMANENT | CTLFLAG_READONLY,
1776 CTLTYPE_INT, "pktbuf",
1777 SYSCTL_DESCR("input packet buffer size on POW"),
1778 octeon_eth_sysctl_rd, 0,
1779 &octeon_eth_param_pktbuf,
1780 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1781 CTL_EOL)) != 0) {
1782 goto err;
1783 }
1784
1785 octeon_eth_sysctl_pktbuf_num = node->sysctl_num;
1786
1787 return;
1788
1789 err:
1790 aprint_error("%s: syctl_createv failed (rc = %d)\n", __func__, rc);
1791 }
1792
1793 static int
1794 octeon_eth_sysctl_verify(SYSCTLFN_ARGS)
1795 {
1796 int error, v;
1797 struct sysctlnode node;
1798 struct octeon_eth_softc *sc;
1799 int i;
1800 int s;
1801
1802 node = *rnode;
1803 v = *(int *)rnode->sysctl_data;
1804 node.sysctl_data = &v;
1805 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1806 if (error || newp == NULL)
1807 return error;
1808
1809 if (node.sysctl_num == octeon_eth_sysctl_pkocmdw0n2_num) {
1810 if (v < 0 || v > 1)
1811 return EINVAL;
1812 *(int *)rnode->sysctl_data = v;
1813 return 0;
1814 }
1815
1816 if (node.sysctl_num == octeon_eth_sysctl_pipdynrs_num) {
1817 if (v < 0 || v > 1)
1818 return EINVAL;
1819 *(int *)rnode->sysctl_data = v;
1820 s = splnet();
1821 for (i = 0; i < 3/* XXX */; i++) {
1822 sc = octeon_eth_gsc[i]; /* XXX */
1823 octeon_pip_prt_cfg_enable(sc->sc_pip,
1824 PIP_PRT_CFGN_DYN_RS, v);
1825 }
1826 splx(s);
1827 return 0;
1828 }
1829
1830 if (node.sysctl_num == octeon_eth_sysctl_redir_num) {
1831 if (v & ~((0x7 << (4 * 0)) | (0x7 << (4 * 1)) | (0x7 << (4 * 2))))
1832 return EINVAL;
1833 *(int *)rnode->sysctl_data = v;
1834 s = splnet();
1835 for (i = 0; i < 3/* XXX */; i++) {
1836 struct ifnet *ifp;
1837
1838 sc = octeon_eth_gsc[i]; /* XXX */
1839 ifp = &sc->sc_ethercom.ec_if;
1840
1841 sc->sc_redir
1842 = (octeon_eth_param_redir >> (4 * i)) & 0x7;
1843 if (sc->sc_redir == 0) {
1844 if (ISSET(ifp->if_flags, IFF_PROMISC)) {
1845 CLR(ifp->if_flags, IFF_PROMISC);
1846 octeon_eth_mii_statchg(ifp);
1847 /* octeon_gmx_set_filter(sc->sc_gmx_port); */
1848 }
1849 ifp->_if_input = ether_input;
1850 }
1851 else {
1852 if (!ISSET(ifp->if_flags, IFF_PROMISC)) {
1853 SET(ifp->if_flags, IFF_PROMISC);
1854 octeon_eth_mii_statchg(ifp);
1855 /* octeon_gmx_set_filter(sc->sc_gmx_port); */
1856 }
1857 ifp->_if_input = octeon_eth_recv_redir;
1858 }
1859 }
1860 splx(s);
1861 return 0;
1862 }
1863
1864 return EINVAL;
1865 }
1866
1867 static int
1868 octeon_eth_sysctl_pool(SYSCTLFN_ARGS)
1869 {
1870 int error, newval = 0;
1871 struct sysctlnode node;
1872 int s;
1873
1874 node = *rnode;
1875 node.sysctl_data = &newval;
1876 s = splnet();
1877 if (node.sysctl_num == octeon_eth_sysctl_pkt_pool_num) {
1878 error = octeon_fpa_available_fpa_pool(&newval,
1879 OCTEON_POOL_NO_PKT);
1880 } else if (node.sysctl_num == octeon_eth_sysctl_wqe_pool_num) {
1881 error = octeon_fpa_available_fpa_pool(&newval,
1882 OCTEON_POOL_NO_WQE);
1883 } else if (node.sysctl_num == octeon_eth_sysctl_cmd_pool_num) {
1884 error = octeon_fpa_available_fpa_pool(&newval,
1885 OCTEON_POOL_NO_CMD);
1886 } else if (node.sysctl_num == octeon_eth_sysctl_sg_pool_num) {
1887 error = octeon_fpa_available_fpa_pool(&newval,
1888 OCTEON_POOL_NO_SG);
1889 } else {
1890 splx(s);
1891 return EINVAL;
1892 }
1893 splx(s);
1894 if (error)
1895 return error;
1896 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1897 if (error || newp == NULL)
1898 return error;
1899
1900 return 0;
1901 }
1902
1903 static int
1904 octeon_eth_sysctl_rd(SYSCTLFN_ARGS)
1905 {
1906 int error, v;
1907 struct sysctlnode node;
1908 int s;
1909
1910 node = *rnode;
1911 v = *(int *)rnode->sysctl_data;
1912 node.sysctl_data = &v;
1913 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1914 if (error || newp != NULL)
1915 return error;
1916
1917 if (node.sysctl_num == octeon_eth_sysctl_pktbuf_num) {
1918 uint64_t tmp;
1919 int n;
1920
1921 s = splnet();
1922 tmp = octeon_fpa_query(0);
1923 n = (int)tmp;
1924 splx(s);
1925 *(int *)rnode->sysctl_data = n;
1926 octeon_eth_param_pktbuf = n;
1927 *(int *)oldp = n;
1928 return 0;
1929 }
1930
1931 return EINVAL;
1932 }
1933