if_cnmac.c revision 1.9 1 /* $NetBSD: if_cnmac.c,v 1.9 2018/06/26 06:47:58 msaitoh Exp $ */
2
3 #include <sys/cdefs.h>
4 #if 0
5 __KERNEL_RCSID(0, "$NetBSD: if_cnmac.c,v 1.9 2018/06/26 06:47:58 msaitoh Exp $");
6 #endif
7
8 #include "opt_octeon.h"
9
10 #ifdef OCTEON_ETH_DEBUG
11
12 #ifndef DIAGNOSTIC
13 #define DIAGNOSTIC
14 #endif
15
16 #ifndef DEBUG
17 #define DEBUG
18 #endif
19
20 #endif
21
22 /*
23 * If no free send buffer is available, free all the sent buffer and bail out.
24 */
25 #define OCTEON_ETH_SEND_QUEUE_CHECK
26
27 /* XXX XXX XXX XXX XXX XXX */
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/pool.h>
32 #include <sys/mbuf.h>
33 #include <sys/malloc.h>
34 #include <sys/kernel.h>
35 #include <sys/socket.h>
36 #include <sys/ioctl.h>
37 #include <sys/errno.h>
38 #include <sys/device.h>
39 #include <sys/queue.h>
40 #include <sys/conf.h>
41 #include <sys/sysctl.h>
42 #include <sys/syslog.h>
43
44 #include <net/if.h>
45 #include <net/if_dl.h>
46 #include <net/if_media.h>
47 #include <net/if_ether.h>
48 #include <net/route.h>
49
50 #include <net/bpf.h>
51
52 #include <netinet/in.h>
53 #include <netinet/in_systm.h>
54 #include <netinet/in_var.h>
55 #include <netinet/ip.h>
56
57 #include <sys/bus.h>
58 #include <machine/intr.h>
59 #include <machine/endian.h>
60 #include <machine/locore.h>
61
62 #include <dev/mii/mii.h>
63 #include <dev/mii/miivar.h>
64
65 #include <mips/cpuregs.h>
66
67 #include <mips/cavium/dev/octeon_asxreg.h>
68 #include <mips/cavium/dev/octeon_ciureg.h>
69 #include <mips/cavium/dev/octeon_npireg.h>
70 #include <mips/cavium/dev/octeon_gmxreg.h>
71 #include <mips/cavium/dev/octeon_ipdreg.h>
72 #include <mips/cavium/dev/octeon_pipreg.h>
73 #include <mips/cavium/dev/octeon_powreg.h>
74 #include <mips/cavium/dev/octeon_faureg.h>
75 #include <mips/cavium/dev/octeon_fpareg.h>
76 #include <mips/cavium/dev/octeon_bootbusreg.h>
77 #include <mips/cavium/include/iobusvar.h>
78 #include <mips/cavium/octeonvar.h>
79 #include <mips/cavium/dev/octeon_fpavar.h>
80 #include <mips/cavium/dev/octeon_gmxvar.h>
81 #include <mips/cavium/dev/octeon_fauvar.h>
82 #include <mips/cavium/dev/octeon_powvar.h>
83 #include <mips/cavium/dev/octeon_ipdvar.h>
84 #include <mips/cavium/dev/octeon_pipvar.h>
85 #include <mips/cavium/dev/octeon_pkovar.h>
86 #include <mips/cavium/dev/octeon_asxvar.h>
87 #include <mips/cavium/dev/octeon_smivar.h>
88 #include <mips/cavium/dev/if_cnmacvar.h>
89
90 #ifdef OCTEON_ETH_DEBUG
91 #define OCTEON_ETH_KASSERT(x) KASSERT(x)
92 #define OCTEON_ETH_KDASSERT(x) KDASSERT(x)
93 #else
94 #define OCTEON_ETH_KASSERT(x)
95 #define OCTEON_ETH_KDASSERT(x)
96 #endif
97
98 /*
99 * Set the PKO to think command buffers are an odd length. This makes it so we
100 * never have to divide a comamnd across two buffers.
101 */
102 #define OCTEON_POOL_NWORDS_CMD \
103 (((uint32_t)OCTEON_POOL_SIZE_CMD / sizeof(uint64_t)) - 1)
104 #define FPA_COMMAND_BUFFER_POOL_NWORDS OCTEON_POOL_NWORDS_CMD /* XXX */
105
106 static void octeon_eth_buf_init(struct octeon_eth_softc *);
107
108 static int octeon_eth_match(device_t, struct cfdata *, void *);
109 static void octeon_eth_attach(device_t, device_t, void *);
110 static void octeon_eth_pip_init(struct octeon_eth_softc *);
111 static void octeon_eth_ipd_init(struct octeon_eth_softc *);
112 static void octeon_eth_pko_init(struct octeon_eth_softc *);
113 static void octeon_eth_asx_init(struct octeon_eth_softc *);
114 static void octeon_eth_smi_init(struct octeon_eth_softc *);
115
116 static void octeon_eth_board_mac_addr(uint8_t *, size_t, struct octeon_eth_softc *);
117
118 static int octeon_eth_mii_readreg(device_t, int, int);
119 static void octeon_eth_mii_writereg(device_t, int, int, int);
120 static void octeon_eth_mii_statchg(struct ifnet *);
121
122 static int octeon_eth_mediainit(struct octeon_eth_softc *);
123 static void octeon_eth_mediastatus(struct ifnet *, struct ifmediareq *);
124 static int octeon_eth_mediachange(struct ifnet *);
125
126 static inline void octeon_eth_send_queue_flush_prefetch(struct octeon_eth_softc *);
127 static inline void octeon_eth_send_queue_flush_fetch(struct octeon_eth_softc *);
128 static inline void octeon_eth_send_queue_flush(struct octeon_eth_softc *);
129 static inline void octeon_eth_send_queue_flush_sync(struct octeon_eth_softc *);
130 static inline int octeon_eth_send_queue_is_full(struct octeon_eth_softc *);
131 static inline void octeon_eth_send_queue_add(struct octeon_eth_softc *,
132 struct mbuf *, uint64_t *);
133 static inline void octeon_eth_send_queue_del(struct octeon_eth_softc *,
134 struct mbuf **, uint64_t **);
135 static inline int octeon_eth_buf_free_work(struct octeon_eth_softc *,
136 uint64_t *, uint64_t);
137 static inline void octeon_eth_buf_ext_free_m(struct mbuf *, void *, size_t, void *);
138 static inline void octeon_eth_buf_ext_free_ext(struct mbuf *, void *, size_t, void *);
139
140 static int octeon_eth_ioctl(struct ifnet *, u_long, void *);
141 static void octeon_eth_watchdog(struct ifnet *);
142 static int octeon_eth_init(struct ifnet *);
143 static void octeon_eth_stop(struct ifnet *, int);
144 static void octeon_eth_start(struct ifnet *);
145
146 static inline int octeon_eth_send_cmd(struct octeon_eth_softc *, uint64_t,
147 uint64_t, int *);
148 static inline uint64_t octeon_eth_send_makecmd_w1(int, paddr_t);
149 static inline uint64_t octeon_eth_send_makecmd_w0(uint64_t, uint64_t, size_t,
150 int);
151 static inline int octeon_eth_send_makecmd_gbuf(struct octeon_eth_softc *,
152 struct mbuf *, uint64_t *, int *);
153 static inline int octeon_eth_send_makecmd(struct octeon_eth_softc *,
154 struct mbuf *, uint64_t *, uint64_t *, uint64_t *);
155 static inline int octeon_eth_send_buf(struct octeon_eth_softc *,
156 struct mbuf *, uint64_t *, int *);
157 static inline int octeon_eth_send(struct octeon_eth_softc *,
158 struct mbuf *, int *);
159
160 static int octeon_eth_reset(struct octeon_eth_softc *);
161 static int octeon_eth_configure(struct octeon_eth_softc *);
162 static int octeon_eth_configure_common(struct octeon_eth_softc *);
163
164 static void octeon_eth_tick_free(void *arg);
165 static void octeon_eth_tick_misc(void *);
166
167 static inline int octeon_eth_recv_mbuf(struct octeon_eth_softc *,
168 uint64_t *, struct mbuf **);
169 static inline int octeon_eth_recv_check_code(struct octeon_eth_softc *,
170 uint64_t);
171 static inline int octeon_eth_recv_check_jumbo(struct octeon_eth_softc *,
172 uint64_t);
173 static inline int octeon_eth_recv_check_link(struct octeon_eth_softc *,
174 uint64_t);
175 static inline int octeon_eth_recv_check(struct octeon_eth_softc *,
176 uint64_t);
177 static inline int octeon_eth_recv(struct octeon_eth_softc *, uint64_t *);
178 static void octeon_eth_recv_redir(struct ifnet *, struct mbuf *);
179 static inline void octeon_eth_recv_intr(void *, uint64_t *);
180
181 /* device driver context */
182 static struct octeon_eth_softc *octeon_eth_gsc[GMX_PORT_NUNITS];
183 static void *octeon_eth_pow_recv_ih;
184
185 /* sysctl'able parameters */
186 int octeon_eth_param_pko_cmd_w0_n2 = 1;
187 int octeon_eth_param_pip_dyn_rs = 1;
188 int octeon_eth_param_redir = 0;
189 int octeon_eth_param_pktbuf = 0;
190 int octeon_eth_param_rate = 0;
191 int octeon_eth_param_intr = 0;
192
193 CFATTACH_DECL_NEW(cnmac, sizeof(struct octeon_eth_softc),
194 octeon_eth_match, octeon_eth_attach, NULL, NULL);
195
196 #ifdef OCTEON_ETH_DEBUG
197
198 static const struct octeon_evcnt_entry octeon_evcnt_entries[] = {
199 #define _ENTRY(name, type, parent, descr) \
200 OCTEON_EVCNT_ENTRY(struct octeon_eth_softc, name, type, parent, descr)
201 _ENTRY(rx, MISC, NULL, "rx"),
202 _ENTRY(rxint, INTR, NULL, "rx intr"),
203 _ENTRY(rxrs, MISC, NULL, "rx dynamic short"),
204 _ENTRY(rxbufpkalloc, MISC, NULL, "rx buf pkt alloc"),
205 _ENTRY(rxbufpkput, MISC, NULL, "rx buf pkt put"),
206 _ENTRY(rxbufwqalloc, MISC, NULL, "rx buf wqe alloc"),
207 _ENTRY(rxbufwqput, MISC, NULL, "rx buf wqe put"),
208 _ENTRY(rxerrcode, MISC, NULL, "rx code error"),
209 _ENTRY(rxerrfix, MISC, NULL, "rx fixup error"),
210 _ENTRY(rxerrjmb, MISC, NULL, "rx jmb error"),
211 _ENTRY(rxerrlink, MISC, NULL, "rx link error"),
212 _ENTRY(rxerroff, MISC, NULL, "rx offload error"),
213 _ENTRY(rxonperrshort, MISC, NULL, "rx onp fixup short error"),
214 _ENTRY(rxonperrpreamble, MISC, NULL, "rx onp fixup preamble error"),
215 _ENTRY(rxonperrcrc, MISC, NULL, "rx onp fixup crc error"),
216 _ENTRY(rxonperraddress, MISC, NULL, "rx onp fixup address error"),
217 _ENTRY(rxonponp, MISC, NULL, "rx onp fixup onp packets"),
218 _ENTRY(rxonpok, MISC, NULL, "rx onp fixup success packets"),
219 _ENTRY(tx, MISC, NULL, "tx"),
220 _ENTRY(txadd, MISC, NULL, "tx add"),
221 _ENTRY(txbufcballoc, MISC, NULL, "tx buf cb alloc"),
222 _ENTRY(txbufcbget, MISC, NULL, "tx buf cb get"),
223 _ENTRY(txbufgballoc, MISC, NULL, "tx buf gb alloc"),
224 _ENTRY(txbufgbget, MISC, NULL, "tx buf gb get"),
225 _ENTRY(txbufgbput, MISC, NULL, "tx buf gb put"),
226 _ENTRY(txdel, MISC, NULL, "tx del"),
227 _ENTRY(txerr, MISC, NULL, "tx error"),
228 _ENTRY(txerrcmd, MISC, NULL, "tx cmd error"),
229 _ENTRY(txerrgbuf, MISC, NULL, "tx gbuf error"),
230 _ENTRY(txerrlink, MISC, NULL, "tx link error"),
231 _ENTRY(txerrmkcmd, MISC, NULL, "tx makecmd error"),
232 #undef _ENTRY
233 };
234 #endif
235
236 /* ---- buffer management */
237
238 static const struct octeon_eth_pool_param {
239 int poolno;
240 size_t size;
241 size_t nelems;
242 } octeon_eth_pool_params[] = {
243 #define _ENTRY(x) { OCTEON_POOL_NO_##x, OCTEON_POOL_SIZE_##x, OCTEON_POOL_NELEMS_##x }
244 _ENTRY(PKT),
245 _ENTRY(WQE),
246 _ENTRY(CMD),
247 _ENTRY(SG)
248 #undef _ENTRY
249 };
250 struct octeon_fpa_buf *octeon_eth_pools[8/* XXX */];
251 #define octeon_eth_fb_pkt octeon_eth_pools[OCTEON_POOL_NO_PKT]
252 #define octeon_eth_fb_wqe octeon_eth_pools[OCTEON_POOL_NO_WQE]
253 #define octeon_eth_fb_cmd octeon_eth_pools[OCTEON_POOL_NO_CMD]
254 #define octeon_eth_fb_sg octeon_eth_pools[OCTEON_POOL_NO_SG]
255
256 static void
257 octeon_eth_buf_init(struct octeon_eth_softc *sc)
258 {
259 static int once;
260 int i;
261 const struct octeon_eth_pool_param *pp;
262 struct octeon_fpa_buf *fb;
263
264 if (once == 1)
265 return;
266 once = 1;
267
268 for (i = 0; i < (int)__arraycount(octeon_eth_pool_params); i++) {
269 pp = &octeon_eth_pool_params[i];
270 octeon_fpa_buf_init(pp->poolno, pp->size, pp->nelems, &fb);
271 octeon_eth_pools[i] = fb;
272 }
273 }
274
275 /* ---- autoconf */
276
277 static int
278 octeon_eth_match(device_t parent, struct cfdata *match, void *aux)
279 {
280 struct octeon_gmx_attach_args *ga = aux;
281
282 if (strcmp(match->cf_name, ga->ga_name) != 0) {
283 return 0;
284 }
285 return 1;
286 }
287
288 static void
289 octeon_eth_attach(device_t parent, device_t self, void *aux)
290 {
291 struct octeon_eth_softc *sc = device_private(self);
292 struct octeon_gmx_attach_args *ga = aux;
293 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
294 uint8_t enaddr[ETHER_ADDR_LEN];
295
296 sc->sc_dev = self;
297 sc->sc_regt = ga->ga_regt;
298 sc->sc_port = ga->ga_portno;
299 sc->sc_port_type = ga->ga_port_type;
300 sc->sc_gmx = ga->ga_gmx;
301 sc->sc_gmx_port = ga->ga_gmx_port;
302
303 sc->sc_init_flag = 0;
304 /*
305 * XXXUEBAYASI
306 * Setting PIP_IP_OFFSET[OFFSET] to 8 causes panic ... why???
307 */
308 sc->sc_ip_offset = 0/* XXX */;
309
310 if (MIPS_PRID_IMPL(mips_options.mips_cpu_id) <= MIPS_CN30XX) {
311 SET(sc->sc_quirks, OCTEON_ETH_QUIRKS_NO_PRE_ALIGN);
312 SET(sc->sc_quirks, OCTEON_ETH_QUIRKS_NO_RX_INBND);
313 }
314
315 octeon_eth_board_mac_addr(enaddr, sizeof(enaddr), sc);
316 printf("%s: Ethernet address %s\n", device_xname(sc->sc_dev),
317 ether_sprintf(enaddr));
318
319 octeon_eth_gsc[sc->sc_port] = sc;
320
321 SIMPLEQ_INIT(&sc->sc_sendq);
322 sc->sc_soft_req_thresh = 15/* XXX */;
323 sc->sc_ext_callback_cnt = 0;
324
325 octeon_gmx_stats_init(sc->sc_gmx_port);
326
327 callout_init(&sc->sc_tick_misc_ch, 0);
328 callout_init(&sc->sc_tick_free_ch, 0);
329
330 octeon_fau_op_init(&sc->sc_fau_done,
331 OCTEON_CVMSEG_ETHER_OFFSET(sc->sc_port, csm_ether_fau_done),
332 OCT_FAU_REG_ADDR_END - (8 * (sc->sc_port + 1))/* XXX */);
333 octeon_fau_op_set_8(&sc->sc_fau_done, 0);
334
335 octeon_eth_pip_init(sc);
336 octeon_eth_ipd_init(sc);
337 octeon_eth_pko_init(sc);
338 octeon_eth_asx_init(sc);
339 octeon_eth_smi_init(sc);
340
341 sc->sc_gmx_port->sc_ipd = sc->sc_ipd;
342 sc->sc_gmx_port->sc_port_asx = sc->sc_asx;
343 sc->sc_gmx_port->sc_port_mii = &sc->sc_mii;
344 sc->sc_gmx_port->sc_port_ec = &sc->sc_ethercom;
345 /* XXX */
346 sc->sc_gmx_port->sc_quirks = sc->sc_quirks;
347
348 /* XXX */
349 sc->sc_pow = &octeon_pow_softc;
350
351 octeon_eth_mediainit(sc);
352
353 strncpy(ifp->if_xname, device_xname(sc->sc_dev), sizeof(ifp->if_xname));
354 ifp->if_softc = sc;
355 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
356 ifp->if_ioctl = octeon_eth_ioctl;
357 ifp->if_start = octeon_eth_start;
358 ifp->if_watchdog = octeon_eth_watchdog;
359 ifp->if_init = octeon_eth_init;
360 ifp->if_stop = octeon_eth_stop;
361 IFQ_SET_MAXLEN(&ifp->if_snd, max(GATHER_QUEUE_SIZE, IFQ_MAXLEN));
362 IFQ_SET_READY(&ifp->if_snd);
363
364 /* XXX: not yet tx checksum */
365 ifp->if_capabilities =
366 IFCAP_CSUM_IPv4_Rx | IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
367 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
368
369 /* 802.1Q VLAN-sized frames are supported */
370 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
371
372 octeon_gmx_set_mac_addr(sc->sc_gmx_port, enaddr);
373 octeon_gmx_set_filter(sc->sc_gmx_port);
374
375 if_attach(ifp);
376 ether_ifattach(ifp, enaddr);
377
378 /* XXX */
379 sc->sc_rate_recv_check_link_cap.tv_sec = 1;
380 sc->sc_rate_recv_check_jumbo_cap.tv_sec = 1;
381 sc->sc_rate_recv_check_code_cap.tv_sec = 1;
382 sc->sc_rate_recv_fixup_odd_nibble_short_cap.tv_sec = 1;
383 sc->sc_rate_recv_fixup_odd_nibble_preamble_cap.tv_sec = 1;
384 sc->sc_rate_recv_fixup_odd_nibble_crc_cap.tv_sec = 1;
385 #ifdef OCTEON_ETH_DEBUG
386 sc->sc_rate_recv_fixup_odd_nibble_addr_cap.tv_sec = 1;
387 #endif
388 /* XXX */
389
390 #if 1
391 octeon_eth_buf_init(sc);
392 #endif
393
394 if (octeon_eth_pow_recv_ih == NULL)
395 octeon_eth_pow_recv_ih = octeon_pow_intr_establish(OCTEON_POW_GROUP_PIP,
396 IPL_NET, octeon_eth_recv_intr, NULL, NULL);
397
398 OCTEON_EVCNT_ATTACH_EVCNTS(sc, octeon_evcnt_entries,
399 device_xname(sc->sc_dev));
400 }
401
402 /* ---- submodules */
403
404 /* XXX */
405 static void
406 octeon_eth_pip_init(struct octeon_eth_softc *sc)
407 {
408 struct octeon_pip_attach_args pip_aa;
409
410 pip_aa.aa_port = sc->sc_port;
411 pip_aa.aa_regt = sc->sc_regt;
412 pip_aa.aa_tag_type = POW_TAG_TYPE_ORDERED/* XXX */;
413 pip_aa.aa_receive_group = OCTEON_POW_GROUP_PIP;
414 pip_aa.aa_ip_offset = sc->sc_ip_offset;
415 octeon_pip_init(&pip_aa, &sc->sc_pip);
416 }
417
418 /* XXX */
419 static void
420 octeon_eth_ipd_init(struct octeon_eth_softc *sc)
421 {
422 struct octeon_ipd_attach_args ipd_aa;
423
424 ipd_aa.aa_port = sc->sc_port;
425 ipd_aa.aa_regt = sc->sc_regt;
426 ipd_aa.aa_first_mbuff_skip = 184/* XXX */;
427 ipd_aa.aa_not_first_mbuff_skip = 0/* XXX */;
428 octeon_ipd_init(&ipd_aa, &sc->sc_ipd);
429 }
430
431 /* XXX */
432 static void
433 octeon_eth_pko_init(struct octeon_eth_softc *sc)
434 {
435 struct octeon_pko_attach_args pko_aa;
436
437 pko_aa.aa_port = sc->sc_port;
438 pko_aa.aa_regt = sc->sc_regt;
439 pko_aa.aa_cmdptr = &sc->sc_cmdptr;
440 pko_aa.aa_cmd_buf_pool = OCTEON_POOL_NO_CMD;
441 pko_aa.aa_cmd_buf_size = OCTEON_POOL_NWORDS_CMD;
442 octeon_pko_init(&pko_aa, &sc->sc_pko);
443 }
444
445 /* XXX */
446 static void
447 octeon_eth_asx_init(struct octeon_eth_softc *sc)
448 {
449 struct octeon_asx_attach_args asx_aa;
450
451 asx_aa.aa_port = sc->sc_port;
452 asx_aa.aa_regt = sc->sc_regt;
453 octeon_asx_init(&asx_aa, &sc->sc_asx);
454 }
455
456 static void
457 octeon_eth_smi_init(struct octeon_eth_softc *sc)
458 {
459 struct octeon_smi_attach_args smi_aa;
460
461 smi_aa.aa_port = sc->sc_port;
462 smi_aa.aa_regt = sc->sc_regt;
463 octeon_smi_init(&smi_aa, &sc->sc_smi);
464 octeon_smi_set_clock(sc->sc_smi, 0x1464ULL); /* XXX */
465 }
466
467 /* ---- XXX */
468
469 #define ADDR2UINT64(u, a) \
470 do { \
471 u = \
472 (((uint64_t)a[0] << 40) | ((uint64_t)a[1] << 32) | \
473 ((uint64_t)a[2] << 24) | ((uint64_t)a[3] << 16) | \
474 ((uint64_t)a[4] << 8) | ((uint64_t)a[5] << 0)); \
475 } while (0)
476 #define UINT642ADDR(a, u) \
477 do { \
478 a[0] = (uint8_t)((u) >> 40); a[1] = (uint8_t)((u) >> 32); \
479 a[2] = (uint8_t)((u) >> 24); a[3] = (uint8_t)((u) >> 16); \
480 a[4] = (uint8_t)((u) >> 8); a[5] = (uint8_t)((u) >> 0); \
481 } while (0)
482
483 static void
484 octeon_eth_board_mac_addr(uint8_t *enaddr, size_t size, struct octeon_eth_softc *sc)
485 {
486 prop_dictionary_t dict;
487 prop_data_t ea;
488
489 dict = device_properties(sc->sc_dev);
490 KASSERT(dict != NULL);
491 ea = prop_dictionary_get(dict, "mac-address");
492 KASSERT(ea != NULL);
493 memcpy(enaddr, prop_data_data_nocopy(ea), size);
494 }
495
496 /* ---- media */
497
498 static int
499 octeon_eth_mii_readreg(device_t self, int phy_addr, int reg)
500 {
501 struct octeon_eth_softc *sc = device_private(self);
502
503 return octeon_smi_read(sc->sc_smi, phy_addr, reg);
504 }
505
506 static void
507 octeon_eth_mii_writereg(device_t self, int phy_addr, int reg, int value)
508 {
509 struct octeon_eth_softc *sc = device_private(self);
510
511 octeon_smi_write(sc->sc_smi, phy_addr, reg, value);
512 }
513
514 static void
515 octeon_eth_mii_statchg(struct ifnet *ifp)
516 {
517 struct octeon_eth_softc *sc = ifp->if_softc;
518
519 octeon_pko_port_enable(sc->sc_pko, 0);
520 octeon_gmx_port_enable(sc->sc_gmx_port, 0);
521
522 octeon_eth_reset(sc);
523
524 if (ISSET(ifp->if_flags, IFF_RUNNING))
525 octeon_gmx_set_filter(sc->sc_gmx_port);
526
527 octeon_pko_port_enable(sc->sc_pko, 1);
528 octeon_gmx_port_enable(sc->sc_gmx_port, 1);
529 }
530
531 static int
532 octeon_eth_mediainit(struct octeon_eth_softc *sc)
533 {
534 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
535 prop_object_t phy;
536
537 sc->sc_mii.mii_ifp = ifp;
538 sc->sc_mii.mii_readreg = octeon_eth_mii_readreg;
539 sc->sc_mii.mii_writereg = octeon_eth_mii_writereg;
540 sc->sc_mii.mii_statchg = octeon_eth_mii_statchg;
541 ifmedia_init(&sc->sc_mii.mii_media, 0, octeon_eth_mediachange,
542 octeon_eth_mediastatus);
543
544 phy = prop_dictionary_get(device_properties(sc->sc_dev), "phy-addr");
545 KASSERT(phy != NULL);
546
547 mii_attach(sc->sc_dev, &sc->sc_mii,
548 0xffffffff, prop_number_integer_value(phy),
549 MII_OFFSET_ANY, MIIF_DOPAUSE);
550
551 /* XXX XXX XXX */
552 if (LIST_FIRST(&sc->sc_mii.mii_phys) != NULL) {
553 /* XXX XXX XXX */
554 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
555 /* XXX XXX XXX */
556 } else {
557 /* XXX XXX XXX */
558 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_NONE,
559 MII_MEDIA_NONE, NULL);
560 /* XXX XXX XXX */
561 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_NONE);
562 /* XXX XXX XXX */
563 }
564 /* XXX XXX XXX */
565
566 return 0;
567 }
568
569 static void
570 octeon_eth_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
571 {
572 struct octeon_eth_softc *sc = ifp->if_softc;
573
574 mii_pollstat(&sc->sc_mii);
575
576 ifmr->ifm_status = sc->sc_mii.mii_media_status;
577 ifmr->ifm_active = sc->sc_mii.mii_media_active;
578 ifmr->ifm_active = (sc->sc_mii.mii_media_active & ~IFM_ETH_FMASK) |
579 sc->sc_gmx_port->sc_port_flowflags;
580 }
581
582 static int
583 octeon_eth_mediachange(struct ifnet *ifp)
584 {
585 struct octeon_eth_softc *sc = ifp->if_softc;
586
587 mii_mediachg(&sc->sc_mii);
588
589 return 0;
590 }
591
592 /* ---- send buffer garbage collection */
593
594 static inline void
595 octeon_eth_send_queue_flush_prefetch(struct octeon_eth_softc *sc)
596 {
597 OCTEON_ETH_KASSERT(sc->sc_prefetch == 0);
598 octeon_fau_op_inc_fetch_8(&sc->sc_fau_done, 0);
599 sc->sc_prefetch = 1;
600 }
601
602 static inline void
603 octeon_eth_send_queue_flush_fetch(struct octeon_eth_softc *sc)
604 {
605 #ifndef OCTEON_ETH_DEBUG
606 if (!sc->sc_prefetch)
607 return;
608 #endif
609 OCTEON_ETH_KASSERT(sc->sc_prefetch == 1);
610 sc->sc_hard_done_cnt = octeon_fau_op_inc_read_8(&sc->sc_fau_done);
611 OCTEON_ETH_KASSERT(sc->sc_hard_done_cnt <= 0);
612 sc->sc_prefetch = 0;
613 }
614
615 static inline void
616 octeon_eth_send_queue_flush(struct octeon_eth_softc *sc)
617 {
618 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
619 const int64_t sent_count = sc->sc_hard_done_cnt;
620 int i;
621
622 OCTEON_ETH_KASSERT(sc->sc_flush == 0);
623 OCTEON_ETH_KASSERT(sent_count <= 0);
624
625 for (i = 0; i < 0 - sent_count; i++) {
626 struct mbuf *m;
627 uint64_t *gbuf;
628
629 octeon_eth_send_queue_del(sc, &m, &gbuf);
630
631 octeon_fpa_buf_put(octeon_eth_fb_sg, gbuf);
632 OCTEON_EVCNT_INC(sc, txbufgbput);
633
634 m_freem(m);
635
636 CLR(ifp->if_flags, IFF_OACTIVE);
637 }
638
639 octeon_fau_op_inc_fetch_8(&sc->sc_fau_done, i);
640 sc->sc_flush = i;
641 }
642
643 static inline void
644 octeon_eth_send_queue_flush_sync(struct octeon_eth_softc *sc)
645 {
646 if (sc->sc_flush == 0)
647 return;
648
649 OCTEON_ETH_KASSERT(sc->sc_flush > 0);
650
651 /* XXX XXX XXX */
652 octeon_fau_op_inc_read_8(&sc->sc_fau_done);
653 sc->sc_soft_req_cnt -= sc->sc_flush;
654 OCTEON_ETH_KASSERT(sc->sc_soft_req_cnt >= 0);
655 /* XXX XXX XXX */
656
657 sc->sc_flush = 0;
658 }
659
660 static inline int
661 octeon_eth_send_queue_is_full(struct octeon_eth_softc *sc)
662 {
663 #ifdef OCTEON_ETH_SEND_QUEUE_CHECK
664 int64_t nofree_cnt;
665
666 nofree_cnt = sc->sc_soft_req_cnt + sc->sc_hard_done_cnt;
667
668 if (__predict_false(nofree_cnt == GATHER_QUEUE_SIZE - 1)) {
669 octeon_eth_send_queue_flush(sc);
670 OCTEON_EVCNT_INC(sc, txerrgbuf);
671 octeon_eth_send_queue_flush_sync(sc);
672 return 1;
673 }
674
675 #endif
676 return 0;
677 }
678
679 /*
680 * (Ab)use m_nextpkt and m_paddr to maintain mbuf chain and pointer to gather
681 * buffer. Other mbuf members may be used by m_freem(), so don't touch them!
682 */
683
684 struct _send_queue_entry {
685 union {
686 struct mbuf _sqe_s_mbuf;
687 struct {
688 char _sqe_s_entry_pad[offsetof(struct mbuf, m_nextpkt)];
689 SIMPLEQ_ENTRY(_send_queue_entry) _sqe_s_entry_entry;
690 } _sqe_s_entry;
691 struct {
692 char _sqe_s_gbuf_pad[offsetof(struct mbuf, m_paddr)];
693 uint64_t *_sqe_s_gbuf_gbuf;
694 } _sqe_s_gbuf;
695 } _sqe_u;
696 #define _sqe_entry _sqe_u._sqe_s_entry._sqe_s_entry_entry
697 #define _sqe_gbuf _sqe_u._sqe_s_gbuf._sqe_s_gbuf_gbuf
698 };
699
700 static inline void
701 octeon_eth_send_queue_add(struct octeon_eth_softc *sc, struct mbuf *m,
702 uint64_t *gbuf)
703 {
704 struct _send_queue_entry *sqe = (struct _send_queue_entry *)m;
705
706 sqe->_sqe_gbuf = gbuf;
707 SIMPLEQ_INSERT_TAIL(&sc->sc_sendq, sqe, _sqe_entry);
708
709 if ((m->m_flags & M_EXT) && m->m_ext.ext_free != NULL)
710 sc->sc_ext_callback_cnt++;
711
712 OCTEON_EVCNT_INC(sc, txadd);
713 }
714
715 static inline void
716 octeon_eth_send_queue_del(struct octeon_eth_softc *sc, struct mbuf **rm,
717 uint64_t **rgbuf)
718 {
719 struct _send_queue_entry *sqe;
720
721 sqe = SIMPLEQ_FIRST(&sc->sc_sendq);
722 OCTEON_ETH_KASSERT(sqe != NULL);
723 SIMPLEQ_REMOVE_HEAD(&sc->sc_sendq, _sqe_entry);
724
725 *rm = (void *)sqe;
726 *rgbuf = sqe->_sqe_gbuf;
727
728 if (((*rm)->m_flags & M_EXT) && (*rm)->m_ext.ext_free != NULL) {
729 sc->sc_ext_callback_cnt--;
730 OCTEON_ETH_KASSERT(sc->sc_ext_callback_cnt >= 0);
731 }
732
733 OCTEON_EVCNT_INC(sc, txdel);
734 }
735
736 static inline int
737 octeon_eth_buf_free_work(struct octeon_eth_softc *sc, uint64_t *work,
738 uint64_t word2)
739 {
740 /* XXX when jumbo frame */
741 if (ISSET(word2, PIP_WQE_WORD2_IP_BUFS)) {
742 paddr_t addr;
743 paddr_t start_buffer;
744
745 addr = work[3] & PIP_WQE_WORD3_ADDR;
746 start_buffer = addr & ~(2048 - 1);
747
748 octeon_fpa_buf_put_paddr(octeon_eth_fb_pkt, start_buffer);
749 OCTEON_EVCNT_INC(sc, rxbufpkput);
750 }
751
752 octeon_fpa_buf_put(octeon_eth_fb_wqe, work);
753 OCTEON_EVCNT_INC(sc, rxbufwqput);
754
755 return 0;
756 }
757
758 static inline void
759 octeon_eth_buf_ext_free_m(struct mbuf *m, void *buf, size_t size, void *arg)
760 {
761 uint64_t *work = (void *)arg;
762 #ifdef OCTEON_ETH_DEBUG
763 struct octeon_eth_softc *sc = (void *)(uintptr_t)work[0];
764 #endif
765 int s = splnet();
766
767 OCTEON_EVCNT_INC(sc, rxrs);
768
769 octeon_fpa_buf_put(octeon_eth_fb_wqe, work);
770 OCTEON_EVCNT_INC(sc, rxbufwqput);
771
772 OCTEON_ETH_KASSERT(m != NULL);
773
774 pool_cache_put(mb_cache, m);
775
776 splx(s);
777 }
778
779 static inline void
780 octeon_eth_buf_ext_free_ext(struct mbuf *m, void *buf, size_t size,
781 void *arg)
782 {
783 uint64_t *work = (void *)arg;
784 #ifdef OCTEON_ETH_DEBUG
785 struct octeon_eth_softc *sc = (void *)(uintptr_t)work[0];
786 #endif
787 int s = splnet();
788
789 octeon_fpa_buf_put(octeon_eth_fb_wqe, work);
790 OCTEON_EVCNT_INC(sc, rxbufwqput);
791
792 octeon_fpa_buf_put(octeon_eth_fb_pkt, buf);
793 OCTEON_EVCNT_INC(sc, rxbufpkput);
794
795 OCTEON_ETH_KASSERT(m != NULL);
796
797 pool_cache_put(mb_cache, m);
798
799 splx(s);
800 }
801
802 /* ---- ifnet interfaces */
803
804 static int
805 octeon_eth_ioctl(struct ifnet *ifp, u_long cmd, void *data)
806 {
807 struct octeon_eth_softc *sc = ifp->if_softc;
808 struct ifreq *ifr = (struct ifreq *)data;
809 int s, error;
810
811 s = splnet();
812 switch (cmd) {
813 case SIOCSIFMEDIA:
814 /* Flow control requires full-duplex mode. */
815 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
816 (ifr->ifr_media & IFM_FDX) == 0) {
817 ifr->ifr_media &= ~IFM_ETH_FMASK;
818 }
819 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
820 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
821 ifr->ifr_media |=
822 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
823 }
824 sc->sc_gmx_port->sc_port_flowflags =
825 ifr->ifr_media & IFM_ETH_FMASK;
826 }
827 /* FALLTHROUGH */
828 case SIOCGIFMEDIA:
829 /* XXX: Flow contorol */
830 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
831 break;
832 default:
833 error = ether_ioctl(ifp, cmd, data);
834 if (error == ENETRESET) {
835 /*
836 * Multicast list has changed; set the hardware filter
837 * accordingly.
838 */
839 if (ISSET(ifp->if_flags, IFF_RUNNING))
840 octeon_gmx_set_filter(sc->sc_gmx_port);
841 error = 0;
842 }
843 break;
844 }
845 octeon_eth_start(ifp);
846 splx(s);
847
848 return (error);
849 }
850
851 /* ---- send (output) */
852
853 static inline uint64_t
854 octeon_eth_send_makecmd_w0(uint64_t fau0, uint64_t fau1, size_t len, int segs)
855 {
856 return octeon_pko_cmd_word0(
857 OCT_FAU_OP_SIZE_64, /* sz1 */
858 OCT_FAU_OP_SIZE_64, /* sz0 */
859 1, fau1, 1, fau0, /* s1, reg1, s0, reg0 */
860 0, /* le */
861 octeon_eth_param_pko_cmd_w0_n2, /* n2 */
862 1, 0, /* q, r */
863 (segs == 1) ? 0 : 1, /* g */
864 0, 0, 1, /* ipoffp1, ii, df */
865 segs, (int)len); /* segs, totalbytes */
866 }
867
868 static inline uint64_t
869 octeon_eth_send_makecmd_w1(int size, paddr_t addr)
870 {
871 return octeon_pko_cmd_word1(
872 0, 0, /* i, back */
873 FPA_GATHER_BUFFER_POOL, /* pool */
874 size, addr); /* size, addr */
875 }
876
877 static inline int
878 octeon_eth_send_makecmd_gbuf(struct octeon_eth_softc *sc, struct mbuf *m0,
879 uint64_t *gbuf, int *rsegs)
880 {
881 struct mbuf *m;
882 int segs = 0;
883 uintptr_t laddr, rlen, nlen;
884
885 for (m = m0; m != NULL; m = m->m_next) {
886
887 if (__predict_false(m->m_len == 0))
888 continue;
889
890 #if 0
891 OCTEON_ETH_KASSERT(((uint32_t)m->m_data & (PAGE_SIZE - 1))
892 == (kvtophys((vaddr_t)m->m_data) & (PAGE_SIZE - 1)));
893 #endif
894
895 /*
896 * aligned 4k
897 */
898 laddr = (uintptr_t)m->m_data & (PAGE_SIZE - 1);
899
900 if (laddr + m->m_len > PAGE_SIZE) {
901 /* XXX XXX XXX */
902 rlen = PAGE_SIZE - laddr;
903 nlen = m->m_len - rlen;
904 *(gbuf + segs) = octeon_eth_send_makecmd_w1(rlen,
905 kvtophys((vaddr_t)m->m_data));
906 segs++;
907 if (segs > 63) {
908 return 1;
909 }
910 /* XXX XXX XXX */
911 } else {
912 rlen = 0;
913 nlen = m->m_len;
914 }
915
916 *(gbuf + segs) = octeon_eth_send_makecmd_w1(nlen,
917 kvtophys((vaddr_t)(m->m_data + rlen)));
918 segs++;
919 if (segs > 63) {
920 return 1;
921 }
922 }
923
924 OCTEON_ETH_KASSERT(m == NULL);
925
926 *rsegs = segs;
927
928 return 0;
929 }
930
931 static inline int
932 octeon_eth_send_makecmd(struct octeon_eth_softc *sc, struct mbuf *m,
933 uint64_t *gbuf, uint64_t *rpko_cmd_w0, uint64_t *rpko_cmd_w1)
934 {
935 uint64_t pko_cmd_w0, pko_cmd_w1;
936 int segs;
937 int result = 0;
938
939 if (octeon_eth_send_makecmd_gbuf(sc, m, gbuf, &segs)) {
940 log(LOG_WARNING, "%s: there are a lot of number of segments"
941 " of transmission data", device_xname(sc->sc_dev));
942 result = 1;
943 goto done;
944 }
945
946 /*
947 * segs == 1 -> link mode (single continuous buffer)
948 * WORD1[size] is number of bytes pointed by segment
949 *
950 * segs > 1 -> gather mode (scatter-gather buffer)
951 * WORD1[size] is number of segments
952 */
953 pko_cmd_w0 = octeon_eth_send_makecmd_w0(sc->sc_fau_done.fd_regno,
954 0, m->m_pkthdr.len, segs);
955 if (segs == 1) {
956 pko_cmd_w1 = octeon_eth_send_makecmd_w1(
957 m->m_pkthdr.len, kvtophys((vaddr_t)m->m_data));
958 } else {
959 #ifdef __mips_n32
960 KASSERT(MIPS_KSEG0_P(gbuf));
961 pko_cmd_w1 = octeon_eth_send_makecmd_w1(segs,
962 MIPS_KSEG0_TO_PHYS(gbuf));
963 #else
964 pko_cmd_w1 = octeon_eth_send_makecmd_w1(segs,
965 MIPS_XKPHYS_TO_PHYS(gbuf));
966 #endif
967 }
968
969 *rpko_cmd_w0 = pko_cmd_w0;
970 *rpko_cmd_w1 = pko_cmd_w1;
971
972 done:
973 return result;
974 }
975
976 static inline int
977 octeon_eth_send_cmd(struct octeon_eth_softc *sc, uint64_t pko_cmd_w0,
978 uint64_t pko_cmd_w1, int *pwdc)
979 {
980 uint64_t *cmdptr;
981 int result = 0;
982
983 #ifdef __mips_n32
984 KASSERT((sc->sc_cmdptr.cmdptr & ~MIPS_PHYS_MASK) == 0);
985 cmdptr = (uint64_t *)MIPS_PHYS_TO_KSEG0(sc->sc_cmdptr.cmdptr);
986 #else
987 cmdptr = (uint64_t *)MIPS_PHYS_TO_XKPHYS_CACHED(sc->sc_cmdptr.cmdptr);
988 #endif
989 cmdptr += sc->sc_cmdptr.cmdptr_idx;
990
991 OCTEON_ETH_KASSERT(cmdptr != NULL);
992
993 *cmdptr++ = pko_cmd_w0;
994 *cmdptr++ = pko_cmd_w1;
995
996 OCTEON_ETH_KASSERT(sc->sc_cmdptr.cmdptr_idx + 2 <= FPA_COMMAND_BUFFER_POOL_NWORDS - 1);
997
998 if (sc->sc_cmdptr.cmdptr_idx + 2 == FPA_COMMAND_BUFFER_POOL_NWORDS - 1) {
999 paddr_t buf;
1000
1001 buf = octeon_fpa_buf_get_paddr(octeon_eth_fb_cmd);
1002 if (buf == 0) {
1003 log(LOG_WARNING,
1004 "%s: can not allocate command buffer from free pool allocator\n",
1005 device_xname(sc->sc_dev));
1006 result = 1;
1007 goto done;
1008 }
1009 OCTEON_EVCNT_INC(sc, txbufcbget);
1010 *cmdptr++ = buf;
1011 sc->sc_cmdptr.cmdptr = (uint64_t)buf;
1012 sc->sc_cmdptr.cmdptr_idx = 0;
1013 } else {
1014 sc->sc_cmdptr.cmdptr_idx += 2;
1015 }
1016
1017 *pwdc += 2;
1018
1019 done:
1020 return result;
1021 }
1022
1023 static inline int
1024 octeon_eth_send_buf(struct octeon_eth_softc *sc, struct mbuf *m,
1025 uint64_t *gbuf, int *pwdc)
1026 {
1027 int result = 0, error;
1028 uint64_t pko_cmd_w0, pko_cmd_w1;
1029
1030 error = octeon_eth_send_makecmd(sc, m, gbuf, &pko_cmd_w0, &pko_cmd_w1);
1031 if (error != 0) {
1032 /* already logging */
1033 OCTEON_EVCNT_INC(sc, txerrmkcmd);
1034 result = error;
1035 goto done;
1036 }
1037
1038 error = octeon_eth_send_cmd(sc, pko_cmd_w0, pko_cmd_w1, pwdc);
1039 if (error != 0) {
1040 /* already logging */
1041 OCTEON_EVCNT_INC(sc, txerrcmd);
1042 result = error;
1043 }
1044
1045 done:
1046 return result;
1047 }
1048
1049 static inline int
1050 octeon_eth_send(struct octeon_eth_softc *sc, struct mbuf *m, int *pwdc)
1051 {
1052 paddr_t gaddr = 0;
1053 uint64_t *gbuf = NULL;
1054 int result = 0, error;
1055
1056 OCTEON_EVCNT_INC(sc, tx);
1057
1058 gaddr = octeon_fpa_buf_get_paddr(octeon_eth_fb_sg);
1059 if (gaddr == 0) {
1060 log(LOG_WARNING,
1061 "%s: can not allocate gather buffer from free pool allocator\n",
1062 device_xname(sc->sc_dev));
1063 OCTEON_EVCNT_INC(sc, txerrgbuf);
1064 result = 1;
1065 goto done;
1066 }
1067 OCTEON_EVCNT_INC(sc, txbufgbget);
1068
1069 #ifdef __mips_n32
1070 KASSERT((gaddr & ~MIPS_PHYS_MASK) == 0);
1071 gbuf = (uint64_t *)(uintptr_t)MIPS_PHYS_TO_KSEG0(gaddr);
1072 #else
1073 gbuf = (uint64_t *)(uintptr_t)MIPS_PHYS_TO_XKPHYS_CACHED(gaddr);
1074 #endif
1075
1076 OCTEON_ETH_KASSERT(gbuf != NULL);
1077
1078 error = octeon_eth_send_buf(sc, m, gbuf, pwdc);
1079 if (error != 0) {
1080 /* already logging */
1081 octeon_fpa_buf_put_paddr(octeon_eth_fb_sg, gaddr);
1082 OCTEON_EVCNT_INC(sc, txbufgbput);
1083 result = error;
1084 goto done;
1085 }
1086
1087 octeon_eth_send_queue_add(sc, m, gbuf);
1088
1089 done:
1090 return result;
1091 }
1092
1093 static void
1094 octeon_eth_start(struct ifnet *ifp)
1095 {
1096 struct octeon_eth_softc *sc = ifp->if_softc;
1097 struct mbuf *m;
1098 int wdc = 0;
1099
1100 /*
1101 * performance tuning
1102 * presend iobdma request
1103 */
1104 octeon_eth_send_queue_flush_prefetch(sc);
1105
1106 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1107 goto last;
1108
1109 /* XXX assume that OCTEON doesn't buffer packets */
1110 if (__predict_false(!octeon_gmx_link_status(sc->sc_gmx_port))) {
1111 /* dequeue and drop them */
1112 while (1) {
1113 IFQ_DEQUEUE(&ifp->if_snd, m);
1114 if (m == NULL)
1115 break;
1116
1117 m_freem(m);
1118 IF_DROP(&ifp->if_snd);
1119 OCTEON_EVCNT_INC(sc, txerrlink);
1120 }
1121 goto last;
1122 }
1123
1124 for (;;) {
1125 IFQ_POLL(&ifp->if_snd, m);
1126 if (__predict_false(m == NULL))
1127 break;
1128
1129 /* XXX XXX XXX */
1130 octeon_eth_send_queue_flush_fetch(sc);
1131
1132 /*
1133 * If no free send buffer is available, free all the sent buffer
1134 * and bail out.
1135 */
1136 if (octeon_eth_send_queue_is_full(sc)) {
1137 SET(ifp->if_flags, IFF_OACTIVE);
1138 if (wdc > 0)
1139 octeon_pko_op_doorbell_write(sc->sc_port,
1140 sc->sc_port, wdc);
1141 return;
1142 }
1143 /* XXX XXX XXX */
1144
1145 IFQ_DEQUEUE(&ifp->if_snd, m);
1146
1147 bpf_mtap(ifp, m, BPF_D_OUT);
1148
1149 /* XXX XXX XXX */
1150 if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh)
1151 octeon_eth_send_queue_flush(sc);
1152 if (octeon_eth_send(sc, m, &wdc)) {
1153 IF_DROP(&ifp->if_snd);
1154 m_freem(m);
1155 log(LOG_WARNING,
1156 "%s: failed in the transmission of the packet\n",
1157 device_xname(sc->sc_dev));
1158 OCTEON_EVCNT_INC(sc, txerr);
1159 } else {
1160 sc->sc_soft_req_cnt++;
1161 }
1162 if (sc->sc_flush)
1163 octeon_eth_send_queue_flush_sync(sc);
1164 /* XXX XXX XXX */
1165
1166 /*
1167 * send next iobdma request
1168 */
1169 octeon_eth_send_queue_flush_prefetch(sc);
1170 }
1171
1172 if (wdc > 0)
1173 octeon_pko_op_doorbell_write(sc->sc_port, sc->sc_port, wdc);
1174
1175 /*
1176 * Don't schedule send-buffer-free callout every time - those buffers are freed
1177 * by "free tick". This makes some packets like NFS slower.
1178 */
1179 #ifdef OCTEON_ETH_USENFS
1180 if (__predict_false(sc->sc_ext_callback_cnt > 0)) {
1181 int timo;
1182
1183 /* ??? */
1184 timo = hz - (100 * sc->sc_ext_callback_cnt);
1185 if (timo < 10)
1186 timo = 10;
1187 callout_schedule(&sc->sc_tick_free_ch, timo);
1188 }
1189 #endif
1190
1191 last:
1192 octeon_eth_send_queue_flush_fetch(sc);
1193 }
1194
1195 static void
1196 octeon_eth_watchdog(struct ifnet *ifp)
1197 {
1198 struct octeon_eth_softc *sc = ifp->if_softc;
1199
1200 printf("%s: device timeout\n", device_xname(sc->sc_dev));
1201
1202 octeon_eth_configure(sc);
1203
1204 SET(ifp->if_flags, IFF_RUNNING);
1205 CLR(ifp->if_flags, IFF_OACTIVE);
1206 ifp->if_timer = 0;
1207
1208 octeon_eth_start(ifp);
1209 }
1210
1211 static int
1212 octeon_eth_init(struct ifnet *ifp)
1213 {
1214 struct octeon_eth_softc *sc = ifp->if_softc;
1215
1216 /* XXX don't disable commonly used parts!!! XXX */
1217 if (sc->sc_init_flag == 0) {
1218 /* Cancel any pending I/O. */
1219 octeon_eth_stop(ifp, 0);
1220
1221 /* Initialize the device */
1222 octeon_eth_configure(sc);
1223
1224 octeon_pko_enable(sc->sc_pko);
1225 octeon_ipd_enable(sc->sc_ipd);
1226
1227 sc->sc_init_flag = 1;
1228 } else {
1229 octeon_gmx_port_enable(sc->sc_gmx_port, 1);
1230 }
1231 octeon_eth_mediachange(ifp);
1232
1233 octeon_gmx_set_filter(sc->sc_gmx_port);
1234
1235 callout_reset(&sc->sc_tick_misc_ch, hz, octeon_eth_tick_misc, sc);
1236 callout_reset(&sc->sc_tick_free_ch, hz, octeon_eth_tick_free, sc);
1237
1238 SET(ifp->if_flags, IFF_RUNNING);
1239 CLR(ifp->if_flags, IFF_OACTIVE);
1240
1241 return 0;
1242 }
1243
1244 static void
1245 octeon_eth_stop(struct ifnet *ifp, int disable)
1246 {
1247 struct octeon_eth_softc *sc = ifp->if_softc;
1248
1249 callout_stop(&sc->sc_tick_misc_ch);
1250 callout_stop(&sc->sc_tick_free_ch);
1251
1252 mii_down(&sc->sc_mii);
1253
1254 octeon_gmx_port_enable(sc->sc_gmx_port, 0);
1255
1256 /* Mark the interface as down and cancel the watchdog timer. */
1257 CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE);
1258 ifp->if_timer = 0;
1259 }
1260
1261 /* ---- misc */
1262
1263 #define PKO_INDEX_MASK ((1ULL << 12/* XXX */) - 1)
1264
1265 static int
1266 octeon_eth_reset(struct octeon_eth_softc *sc)
1267 {
1268 octeon_gmx_reset_speed(sc->sc_gmx_port);
1269 octeon_gmx_reset_flowctl(sc->sc_gmx_port);
1270 octeon_gmx_reset_timing(sc->sc_gmx_port);
1271
1272 return 0;
1273 }
1274
1275 static int
1276 octeon_eth_configure(struct octeon_eth_softc *sc)
1277 {
1278 octeon_gmx_port_enable(sc->sc_gmx_port, 0);
1279
1280 octeon_eth_reset(sc);
1281
1282 octeon_eth_configure_common(sc);
1283
1284 octeon_pko_port_config(sc->sc_pko);
1285 octeon_pko_port_enable(sc->sc_pko, 1);
1286 octeon_pip_port_config(sc->sc_pip);
1287
1288 octeon_gmx_tx_stats_rd_clr(sc->sc_gmx_port, 1);
1289 octeon_gmx_rx_stats_rd_clr(sc->sc_gmx_port, 1);
1290
1291 octeon_gmx_port_enable(sc->sc_gmx_port, 1);
1292
1293 return 0;
1294 }
1295
1296 static int
1297 octeon_eth_configure_common(struct octeon_eth_softc *sc)
1298 {
1299 static int once;
1300
1301 if (once == 1)
1302 return 0;
1303 once = 1;
1304
1305 octeon_ipd_config(sc->sc_ipd);
1306 #ifdef OCTEON_ETH_IPD_RED
1307 octeon_ipd_red(sc->sc_ipd, RECV_QUEUE_SIZE >> 2, RECV_QUEUE_SIZE >> 3);
1308 #endif
1309 octeon_pko_config(sc->sc_pko);
1310
1311 octeon_pow_config(sc->sc_pow, OCTEON_POW_GROUP_PIP);
1312
1313 return 0;
1314 }
1315
1316 /* ---- receive (input) */
1317
1318 static inline int
1319 octeon_eth_recv_mbuf(struct octeon_eth_softc *sc, uint64_t *work,
1320 struct mbuf **rm)
1321 {
1322 struct mbuf *m;
1323 void (*ext_free)(struct mbuf *, void *, size_t, void *);
1324 void *ext_buf;
1325 size_t ext_size;
1326 void *data;
1327 uint64_t word1 = work[1];
1328 uint64_t word2 = work[2];
1329 uint64_t word3 = work[3];
1330
1331 MGETHDR(m, M_NOWAIT, MT_DATA);
1332 if (m == NULL)
1333 return 1;
1334 OCTEON_ETH_KASSERT(m != NULL);
1335
1336 if ((word2 & PIP_WQE_WORD2_IP_BUFS) == 0) {
1337 /* Dynamic short */
1338 ext_free = octeon_eth_buf_ext_free_m;
1339 ext_buf = &work[4];
1340 ext_size = 96;
1341
1342 data = &work[4 + sc->sc_ip_offset / sizeof(uint64_t)];
1343 } else {
1344 vaddr_t addr;
1345 vaddr_t start_buffer;
1346
1347 #ifdef __mips_n32
1348 KASSERT((word3 & ~MIPS_PHYS_MASK) == 0);
1349 addr = MIPS_PHYS_TO_KSEG0(word3 & PIP_WQE_WORD3_ADDR);
1350 #else
1351 addr = MIPS_PHYS_TO_XKPHYS_CACHED(word3 & PIP_WQE_WORD3_ADDR);
1352 #endif
1353 start_buffer = addr & ~(2048 - 1);
1354
1355 ext_free = octeon_eth_buf_ext_free_ext;
1356 ext_buf = (void *)start_buffer;
1357 ext_size = 2048;
1358
1359 data = (void *)addr;
1360 }
1361
1362 /* embed sc pointer into work[0] for _ext_free evcnt */
1363 work[0] = (uintptr_t)sc;
1364
1365 MEXTADD(m, ext_buf, ext_size, 0, ext_free, work);
1366 OCTEON_ETH_KASSERT(ISSET(m->m_flags, M_EXT));
1367
1368 m->m_data = data;
1369 m->m_len = m->m_pkthdr.len = (word1 & PIP_WQE_WORD1_LEN) >> 48;
1370 m_set_rcvif(m, &sc->sc_ethercom.ec_if);
1371 /*
1372 * not readonly buffer
1373 */
1374 m->m_flags |= M_EXT_RW;
1375
1376 *rm = m;
1377
1378 OCTEON_ETH_KASSERT(*rm != NULL);
1379
1380 return 0;
1381 }
1382
1383 static inline int
1384 octeon_eth_recv_check_code(struct octeon_eth_softc *sc, uint64_t word2)
1385 {
1386 uint64_t opecode = word2 & PIP_WQE_WORD2_NOIP_OPECODE;
1387
1388 if (__predict_true(!ISSET(word2, PIP_WQE_WORD2_NOIP_RE)))
1389 return 0;
1390
1391 /* this error is harmless */
1392 if (opecode == PIP_OVER_ERR)
1393 return 0;
1394
1395 return 1;
1396 }
1397
1398 static inline int
1399 octeon_eth_recv_check_jumbo(struct octeon_eth_softc *sc, uint64_t word2)
1400 {
1401 if (__predict_false((word2 & PIP_WQE_WORD2_IP_BUFS) > (1ULL << 56)))
1402 return 1;
1403 return 0;
1404 }
1405
1406 static inline int
1407 octeon_eth_recv_check_link(struct octeon_eth_softc *sc, uint64_t word2)
1408 {
1409 if (__predict_false(!octeon_gmx_link_status(sc->sc_gmx_port)))
1410 return 1;
1411 return 0;
1412 }
1413
1414 static inline int
1415 octeon_eth_recv_check(struct octeon_eth_softc *sc, uint64_t word2)
1416 {
1417 if (__predict_false(octeon_eth_recv_check_link(sc, word2)) != 0) {
1418 if (ratecheck(&sc->sc_rate_recv_check_link_last,
1419 &sc->sc_rate_recv_check_link_cap))
1420 log(LOG_DEBUG,
1421 "%s: link is not up, the packet was dropped\n",
1422 device_xname(sc->sc_dev));
1423 OCTEON_EVCNT_INC(sc, rxerrlink);
1424 return 1;
1425 }
1426
1427 #if 0 /* XXX Performance tunig (Jumbo-frame is not supported yet!) */
1428 if (__predict_false(octeon_eth_recv_check_jumbo(sc, word2)) != 0) {
1429 /* XXX jumbo frame */
1430 if (ratecheck(&sc->sc_rate_recv_check_jumbo_last,
1431 &sc->sc_rate_recv_check_jumbo_cap))
1432 log(LOG_DEBUG,
1433 "jumbo frame was received\n");
1434 OCTEON_EVCNT_INC(sc, rxerrjmb);
1435 return 1;
1436 }
1437 #endif
1438
1439 if (__predict_false(octeon_eth_recv_check_code(sc, word2)) != 0) {
1440
1441 if ((word2 & PIP_WQE_WORD2_NOIP_OPECODE) ==
1442 PIP_WQE_WORD2_RE_OPCODE_LENGTH) {
1443 /* no logging */
1444 /* XXX inclement special error count */
1445 } else if ((word2 & PIP_WQE_WORD2_NOIP_OPECODE) ==
1446 PIP_WQE_WORD2_RE_OPCODE_PARTIAL) {
1447 /* not an erorr. it's because of overload */
1448 } else {
1449
1450 if (ratecheck(&sc->sc_rate_recv_check_code_last,
1451 &sc->sc_rate_recv_check_code_cap))
1452 log(LOG_WARNING,
1453 "%s: reception error, packet dropped "
1454 "(error code = %" PRId64 ")\n",
1455 device_xname(sc->sc_dev), word2 & PIP_WQE_WORD2_NOIP_OPECODE);
1456 }
1457 OCTEON_EVCNT_INC(sc, rxerrcode);
1458 return 1;
1459 }
1460
1461 return 0;
1462 }
1463
1464 static inline int
1465 octeon_eth_recv(struct octeon_eth_softc *sc, uint64_t *work)
1466 {
1467 int result = 0;
1468 struct ifnet *ifp;
1469 struct mbuf *m;
1470 uint64_t word2;
1471
1472 /* XXX XXX XXX */
1473 /*
1474 * performance tuning
1475 * presend iobdma request
1476 */
1477 if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh) {
1478 octeon_eth_send_queue_flush_prefetch(sc);
1479 }
1480 /* XXX XXX XXX */
1481
1482 OCTEON_ETH_KASSERT(sc != NULL);
1483 OCTEON_ETH_KASSERT(work != NULL);
1484
1485 OCTEON_EVCNT_INC(sc, rx);
1486
1487 word2 = work[2];
1488 ifp = &sc->sc_ethercom.ec_if;
1489
1490 OCTEON_ETH_KASSERT(ifp != NULL);
1491
1492 if (__predict_false(octeon_eth_recv_check(sc, word2) != 0)) {
1493 ifp->if_ierrors++;
1494 result = 1;
1495 octeon_eth_buf_free_work(sc, work, word2);
1496 goto drop;
1497 }
1498
1499 if (__predict_false(octeon_eth_recv_mbuf(sc, work, &m) != 0)) {
1500 ifp->if_ierrors++;
1501 result = 1;
1502 octeon_eth_buf_free_work(sc, work, word2);
1503 goto drop;
1504 }
1505
1506 /* work[0] .. work[3] may not be valid any more */
1507
1508 OCTEON_ETH_KASSERT(m != NULL);
1509
1510 octeon_ipd_offload(word2, m->m_data, &m->m_pkthdr.csum_flags);
1511
1512 /* XXX XXX XXX */
1513 if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh) {
1514 octeon_eth_send_queue_flush_fetch(sc);
1515 octeon_eth_send_queue_flush(sc);
1516 }
1517
1518 /* XXX XXX XXX */
1519 if (sc->sc_flush)
1520 octeon_eth_send_queue_flush_sync(sc);
1521 /* XXX XXX XXX */
1522
1523 if_percpuq_enqueue(ifp->if_percpuq, m);
1524
1525 return 0;
1526
1527 drop:
1528 /* XXX XXX XXX */
1529 if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh) {
1530 octeon_eth_send_queue_flush_fetch(sc);
1531 }
1532 /* XXX XXX XXX */
1533
1534 return result;
1535 }
1536
1537 static void
1538 octeon_eth_recv_redir(struct ifnet *ifp, struct mbuf *m)
1539 {
1540 struct octeon_eth_softc *rsc = ifp->if_softc;
1541 struct octeon_eth_softc *sc = NULL;
1542 int i, wdc = 0;
1543
1544 for (i = 0; i < 3 /* XXX */; i++) {
1545 if (rsc->sc_redir & (1 << i))
1546 sc = octeon_eth_gsc[i];
1547 }
1548
1549 if (sc == NULL) {
1550 m_freem(m);
1551 return;
1552 }
1553 octeon_eth_send_queue_flush_prefetch(sc);
1554
1555 octeon_eth_send_queue_flush_fetch(sc);
1556
1557 if (octeon_eth_send_queue_is_full(sc)) {
1558 m_freem(m);
1559 return;
1560 }
1561 if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh)
1562 octeon_eth_send_queue_flush(sc);
1563
1564 if (octeon_eth_send(sc, m, &wdc)) {
1565 IF_DROP(&ifp->if_snd);
1566 m_freem(m);
1567 } else {
1568 octeon_pko_op_doorbell_write(sc->sc_port, sc->sc_port, wdc);
1569 sc->sc_soft_req_cnt++;
1570 }
1571
1572 if (sc->sc_flush)
1573 octeon_eth_send_queue_flush_sync(sc);
1574 }
1575
1576 static inline void
1577 octeon_eth_recv_intr(void *data, uint64_t *work)
1578 {
1579 struct octeon_eth_softc *sc;
1580 int port;
1581
1582 OCTEON_ETH_KASSERT(work != NULL);
1583
1584 port = (work[1] & PIP_WQE_WORD1_IPRT) >> 42;
1585
1586 OCTEON_ETH_KASSERT(port < GMX_PORT_NUNITS);
1587
1588 sc = octeon_eth_gsc[port];
1589
1590 OCTEON_ETH_KASSERT(sc != NULL);
1591 OCTEON_ETH_KASSERT(port == sc->sc_port);
1592
1593 /* XXX process all work queue entries anyway */
1594
1595 (void)octeon_eth_recv(sc, work);
1596 }
1597
1598 /* ---- tick */
1599
1600 /*
1601 * octeon_eth_tick_free
1602 *
1603 * => garbage collect send gather buffer / mbuf
1604 * => called at softclock
1605 */
1606 static void
1607 octeon_eth_tick_free(void *arg)
1608 {
1609 struct octeon_eth_softc *sc = arg;
1610 int timo;
1611 int s;
1612
1613 s = splnet();
1614 /* XXX XXX XXX */
1615 if (sc->sc_soft_req_cnt > 0) {
1616 octeon_eth_send_queue_flush_prefetch(sc);
1617 octeon_eth_send_queue_flush_fetch(sc);
1618 octeon_eth_send_queue_flush(sc);
1619 octeon_eth_send_queue_flush_sync(sc);
1620 }
1621 /* XXX XXX XXX */
1622
1623 /* XXX XXX XXX */
1624 /* ??? */
1625 timo = hz - (100 * sc->sc_ext_callback_cnt);
1626 if (timo < 10)
1627 timo = 10;
1628 callout_schedule(&sc->sc_tick_free_ch, timo);
1629 /* XXX XXX XXX */
1630 splx(s);
1631 }
1632
1633 /*
1634 * octeon_eth_tick_misc
1635 *
1636 * => collect statistics
1637 * => check link status
1638 * => called at softclock
1639 */
1640 static void
1641 octeon_eth_tick_misc(void *arg)
1642 {
1643 struct octeon_eth_softc *sc = arg;
1644 struct ifnet *ifp;
1645 int s;
1646
1647 s = splnet();
1648
1649 ifp = &sc->sc_ethercom.ec_if;
1650
1651 octeon_gmx_stats(sc->sc_gmx_port);
1652 octeon_pip_stats(sc->sc_pip, ifp, sc->sc_port);
1653 mii_tick(&sc->sc_mii);
1654
1655 splx(s);
1656
1657 callout_schedule(&sc->sc_tick_misc_ch, hz);
1658 }
1659
1660 /* ---- odd nibble preamble workaround (software CRC processing) */
1661
1662 /* ---- sysctl */
1663
1664 static int octeon_eth_sysctl_verify(SYSCTLFN_ARGS);
1665 static int octeon_eth_sysctl_pool(SYSCTLFN_ARGS);
1666 static int octeon_eth_sysctl_rd(SYSCTLFN_ARGS);
1667
1668 static int octeon_eth_sysctl_pkocmdw0n2_num;
1669 static int octeon_eth_sysctl_pipdynrs_num;
1670 static int octeon_eth_sysctl_redir_num;
1671 static int octeon_eth_sysctl_pkt_pool_num;
1672 static int octeon_eth_sysctl_wqe_pool_num;
1673 static int octeon_eth_sysctl_cmd_pool_num;
1674 static int octeon_eth_sysctl_sg_pool_num;
1675 static int octeon_eth_sysctl_pktbuf_num;
1676
1677 /*
1678 * Set up sysctl(3) MIB, hw.cnmac.*.
1679 */
1680 SYSCTL_SETUP(sysctl_octeon_eth, "sysctl cnmac subtree setup")
1681 {
1682 int rc;
1683 int octeon_eth_sysctl_root_num;
1684 const struct sysctlnode *node;
1685
1686 if ((rc = sysctl_createv(clog, 0, NULL, NULL,
1687 0, CTLTYPE_NODE, "hw", NULL,
1688 NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) {
1689 goto err;
1690 }
1691
1692 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1693 0, CTLTYPE_NODE, "cnmac",
1694 SYSCTL_DESCR("cnmac interface controls"),
1695 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) {
1696 goto err;
1697 }
1698
1699 octeon_eth_sysctl_root_num = node->sysctl_num;
1700
1701 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1702 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1703 CTLTYPE_INT, "pko_cmd_w0_n2",
1704 SYSCTL_DESCR("PKO command WORD0 N2 bit"),
1705 octeon_eth_sysctl_verify, 0,
1706 &octeon_eth_param_pko_cmd_w0_n2,
1707 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1708 CTL_EOL)) != 0) {
1709 goto err;
1710 }
1711
1712 octeon_eth_sysctl_pkocmdw0n2_num = node->sysctl_num;
1713
1714 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1715 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1716 CTLTYPE_INT, "pip_dyn_rs",
1717 SYSCTL_DESCR("PIP dynamic short in WQE"),
1718 octeon_eth_sysctl_verify, 0,
1719 &octeon_eth_param_pip_dyn_rs,
1720 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1721 CTL_EOL)) != 0) {
1722 goto err;
1723 }
1724
1725 octeon_eth_sysctl_pipdynrs_num = node->sysctl_num;
1726
1727 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1728 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1729 CTLTYPE_INT, "redir",
1730 SYSCTL_DESCR("input port redirection"),
1731 octeon_eth_sysctl_verify, 0,
1732 &octeon_eth_param_redir,
1733 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1734 CTL_EOL)) != 0) {
1735 goto err;
1736 }
1737
1738 octeon_eth_sysctl_redir_num = node->sysctl_num;
1739
1740 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1741 CTLFLAG_PERMANENT,
1742 CTLTYPE_INT, "pkt_pool",
1743 SYSCTL_DESCR("packet pool available"),
1744 octeon_eth_sysctl_pool, 0, NULL,
1745 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1746 CTL_EOL)) != 0) {
1747 goto err;
1748 }
1749
1750 octeon_eth_sysctl_pkt_pool_num = node->sysctl_num;
1751
1752 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1753 CTLFLAG_PERMANENT,
1754 CTLTYPE_INT, "wqe_pool",
1755 SYSCTL_DESCR("wqe pool available"),
1756 octeon_eth_sysctl_pool, 0, NULL,
1757 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1758 CTL_EOL)) != 0) {
1759 goto err;
1760 }
1761
1762 octeon_eth_sysctl_wqe_pool_num = node->sysctl_num;
1763
1764 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1765 CTLFLAG_PERMANENT,
1766 CTLTYPE_INT, "cmd_pool",
1767 SYSCTL_DESCR("cmd pool available"),
1768 octeon_eth_sysctl_pool, 0, NULL,
1769 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1770 CTL_EOL)) != 0) {
1771 goto err;
1772 }
1773
1774 octeon_eth_sysctl_cmd_pool_num = node->sysctl_num;
1775
1776 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1777 CTLFLAG_PERMANENT,
1778 CTLTYPE_INT, "sg_pool",
1779 SYSCTL_DESCR("sg pool available"),
1780 octeon_eth_sysctl_pool, 0, NULL,
1781 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1782 CTL_EOL)) != 0) {
1783 goto err;
1784 }
1785
1786 octeon_eth_sysctl_sg_pool_num = node->sysctl_num;
1787
1788 if ((rc = sysctl_createv(clog, 0, NULL, &node,
1789 CTLFLAG_PERMANENT | CTLFLAG_READONLY,
1790 CTLTYPE_INT, "pktbuf",
1791 SYSCTL_DESCR("input packet buffer size on POW"),
1792 octeon_eth_sysctl_rd, 0,
1793 &octeon_eth_param_pktbuf,
1794 0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1795 CTL_EOL)) != 0) {
1796 goto err;
1797 }
1798
1799 octeon_eth_sysctl_pktbuf_num = node->sysctl_num;
1800
1801 return;
1802
1803 err:
1804 aprint_error("%s: syctl_createv failed (rc = %d)\n", __func__, rc);
1805 }
1806
1807 static int
1808 octeon_eth_sysctl_verify(SYSCTLFN_ARGS)
1809 {
1810 int error, v;
1811 struct sysctlnode node;
1812 struct octeon_eth_softc *sc;
1813 int i;
1814 int s;
1815
1816 node = *rnode;
1817 v = *(int *)rnode->sysctl_data;
1818 node.sysctl_data = &v;
1819 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1820 if (error || newp == NULL)
1821 return error;
1822
1823 if (node.sysctl_num == octeon_eth_sysctl_pkocmdw0n2_num) {
1824 if (v < 0 || v > 1)
1825 return EINVAL;
1826 *(int *)rnode->sysctl_data = v;
1827 return 0;
1828 }
1829
1830 if (node.sysctl_num == octeon_eth_sysctl_pipdynrs_num) {
1831 if (v < 0 || v > 1)
1832 return EINVAL;
1833 *(int *)rnode->sysctl_data = v;
1834 s = splnet();
1835 for (i = 0; i < 3/* XXX */; i++) {
1836 sc = octeon_eth_gsc[i]; /* XXX */
1837 octeon_pip_prt_cfg_enable(sc->sc_pip, PIP_PRT_CFGN_DYN_RS, v);
1838 }
1839 splx(s);
1840 return 0;
1841 }
1842
1843 if (node.sysctl_num == octeon_eth_sysctl_redir_num) {
1844 if (v & ~((0x7 << (4 * 0)) | (0x7 << (4 * 1)) | (0x7 << (4 * 2))))
1845 return EINVAL;
1846 *(int *)rnode->sysctl_data = v;
1847 s = splnet();
1848 for (i = 0; i < 3/* XXX */; i++) {
1849 struct ifnet *ifp;
1850
1851 sc = octeon_eth_gsc[i]; /* XXX */
1852 ifp = &sc->sc_ethercom.ec_if;
1853
1854 sc->sc_redir = (octeon_eth_param_redir >> (4 * i)) & 0x7;
1855 if (sc->sc_redir == 0) {
1856 if (ISSET(ifp->if_flags, IFF_PROMISC)) {
1857 CLR(ifp->if_flags, IFF_PROMISC);
1858 octeon_eth_mii_statchg(ifp);
1859 /* octeon_gmx_set_filter(sc->sc_gmx_port); */
1860 }
1861 ifp->_if_input = ether_input;
1862 }
1863 else {
1864 if (!ISSET(ifp->if_flags, IFF_PROMISC)) {
1865 SET(ifp->if_flags, IFF_PROMISC);
1866 octeon_eth_mii_statchg(ifp);
1867 /* octeon_gmx_set_filter(sc->sc_gmx_port); */
1868 }
1869 ifp->_if_input = octeon_eth_recv_redir;
1870 }
1871 }
1872 splx(s);
1873 return 0;
1874 }
1875
1876 return EINVAL;
1877 }
1878
1879 static int
1880 octeon_eth_sysctl_pool(SYSCTLFN_ARGS)
1881 {
1882 int error, newval = 0;
1883 struct sysctlnode node;
1884 int s;
1885
1886 node = *rnode;
1887 node.sysctl_data = &newval;
1888 s = splnet();
1889 if (node.sysctl_num == octeon_eth_sysctl_pkt_pool_num) {
1890 error = octeon_fpa_available_fpa_pool(&newval, OCTEON_POOL_NO_PKT);
1891 } else if (node.sysctl_num == octeon_eth_sysctl_wqe_pool_num) {
1892 error = octeon_fpa_available_fpa_pool(&newval, OCTEON_POOL_NO_WQE);
1893 } else if (node.sysctl_num == octeon_eth_sysctl_cmd_pool_num) {
1894 error = octeon_fpa_available_fpa_pool(&newval, OCTEON_POOL_NO_CMD);
1895 } else if (node.sysctl_num == octeon_eth_sysctl_sg_pool_num) {
1896 error = octeon_fpa_available_fpa_pool(&newval, OCTEON_POOL_NO_SG);
1897 } else {
1898 splx(s);
1899 return EINVAL;
1900 }
1901 splx(s);
1902 if (error)
1903 return error;
1904 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1905 if (error || newp == NULL)
1906 return error;
1907
1908 return 0;
1909 }
1910
1911 static int
1912 octeon_eth_sysctl_rd(SYSCTLFN_ARGS)
1913 {
1914 int error, v;
1915 struct sysctlnode node;
1916 int s;
1917
1918 node = *rnode;
1919 v = *(int *)rnode->sysctl_data;
1920 node.sysctl_data = &v;
1921 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1922 if (error || newp != NULL)
1923 return error;
1924
1925 if (node.sysctl_num == octeon_eth_sysctl_pktbuf_num) {
1926 uint64_t tmp;
1927 int n;
1928
1929 s = splnet();
1930 tmp = octeon_fpa_query(0);
1931 n = (int)tmp;
1932 splx(s);
1933 *(int *)rnode->sysctl_data = n;
1934 octeon_eth_param_pktbuf = n;
1935 *(int *)oldp = n;
1936 return 0;
1937 }
1938
1939 return EINVAL;
1940 }
1941