if_cnmac.c revision 1.27 1 /* $NetBSD: if_cnmac.c,v 1.27 2022/09/18 11:38:48 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2007 Internet Initiative Japan, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: if_cnmac.c,v 1.27 2022/09/18 11:38:48 thorpej Exp $");
31
32 /*
33 * If no free send buffer is available, free all the sent buffers and bail out.
34 */
35 #define CNMAC_SEND_QUEUE_CHECK
36
37 /* XXX XXX XXX XXX XXX XXX */
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/pool.h>
42 #include <sys/mbuf.h>
43 #include <sys/malloc.h>
44 #include <sys/kernel.h>
45 #include <sys/socket.h>
46 #include <sys/ioctl.h>
47 #include <sys/errno.h>
48 #include <sys/device.h>
49 #include <sys/queue.h>
50 #include <sys/conf.h>
51 #include <sys/sysctl.h>
52 #include <sys/syslog.h>
53
54 #include <net/if.h>
55 #include <net/if_media.h>
56 #include <net/if_ether.h>
57 #include <net/route.h>
58 #include <net/bpf.h>
59
60 #include <netinet/in.h>
61 #include <netinet/in_systm.h>
62 #include <netinet/in_var.h>
63 #include <netinet/ip.h>
64
65 #include <sys/bus.h>
66 #include <machine/intr.h>
67 #include <machine/endian.h>
68 #include <machine/locore.h>
69
70 #include <dev/mii/mii.h>
71 #include <dev/mii/miivar.h>
72
73 #include <mips/cpuregs.h>
74
75 #include <mips/cavium/octeonreg.h>
76 #include <mips/cavium/octeonvar.h>
77 #include <mips/cavium/include/iobusvar.h>
78
79 #include <mips/cavium/dev/octeon_ciureg.h>
80 #include <mips/cavium/dev/octeon_faureg.h>
81 #include <mips/cavium/dev/octeon_fpareg.h>
82 #include <mips/cavium/dev/octeon_gmxreg.h>
83 #include <mips/cavium/dev/octeon_pipreg.h>
84 #include <mips/cavium/dev/octeon_powreg.h>
85 #include <mips/cavium/dev/octeon_fauvar.h>
86 #include <mips/cavium/dev/octeon_fpavar.h>
87 #include <mips/cavium/dev/octeon_gmxvar.h>
88 #include <mips/cavium/dev/octeon_ipdvar.h>
89 #include <mips/cavium/dev/octeon_pipvar.h>
90 #include <mips/cavium/dev/octeon_pkovar.h>
91 #include <mips/cavium/dev/octeon_powvar.h>
92 #include <mips/cavium/dev/octeon_smivar.h>
93
94 #include <mips/cavium/dev/if_cnmacvar.h>
95
96 /*
97 * Set the PKO to think command buffers are an odd length. This makes it so we
98 * never have to divide a comamnd across two buffers.
99 */
100 #define OCTEON_POOL_NWORDS_CMD \
101 (((uint32_t)OCTEON_POOL_SIZE_CMD / sizeof(uint64_t)) - 1)
102 #define FPA_COMMAND_BUFFER_POOL_NWORDS OCTEON_POOL_NWORDS_CMD /* XXX */
103
104 static void cnmac_buf_init(struct cnmac_softc *);
105
106 static int cnmac_match(device_t, struct cfdata *, void *);
107 static void cnmac_attach(device_t, device_t, void *);
108 static void cnmac_pip_init(struct cnmac_softc *);
109 static void cnmac_ipd_init(struct cnmac_softc *);
110 static void cnmac_pko_init(struct cnmac_softc *);
111
112 static void cnmac_board_mac_addr(uint8_t *, size_t, struct cnmac_softc *);
113
114 static int cnmac_mii_readreg(device_t, int, int, uint16_t *);
115 static int cnmac_mii_writereg(device_t, int, int, uint16_t);
116 static void cnmac_mii_statchg(struct ifnet *);
117
118 static int cnmac_mediainit(struct cnmac_softc *);
119 static void cnmac_mediastatus(struct ifnet *, struct ifmediareq *);
120
121 static inline void cnmac_send_queue_flush_prefetch(struct cnmac_softc *);
122 static inline void cnmac_send_queue_flush_fetch(struct cnmac_softc *);
123 static inline void cnmac_send_queue_flush(struct cnmac_softc *);
124 static inline void cnmac_send_queue_flush_sync(struct cnmac_softc *);
125 static void cnmac_send_queue_check_and_flush(struct cnmac_softc *);
126 static inline int cnmac_send_queue_is_full(struct cnmac_softc *);
127 static inline void cnmac_send_queue_add(struct cnmac_softc *, struct mbuf *,
128 uint64_t *);
129 static inline void cnmac_send_queue_del(struct cnmac_softc *, struct mbuf **,
130 uint64_t **);
131 static inline int cnmac_buf_free_work(struct cnmac_softc *, uint64_t *);
132 static inline void cnmac_buf_ext_free(struct mbuf *, void *, size_t, void *);
133
134 static int cnmac_ioctl(struct ifnet *, u_long, void *);
135 static void cnmac_watchdog(struct ifnet *);
136 static int cnmac_init(struct ifnet *);
137 static void cnmac_stop(struct ifnet *, int);
138 static void cnmac_start(struct ifnet *);
139
140 static inline int cnmac_send_cmd(struct cnmac_softc *, uint64_t, uint64_t,
141 int *);
142 static inline uint64_t cnmac_send_makecmd_w1(int, paddr_t);
143 static inline uint64_t cnmac_send_makecmd_w0(uint64_t, uint64_t, size_t, int,
144 int);
145 static inline int cnmac_send_makecmd_gbuf(struct cnmac_softc *, struct mbuf *,
146 uint64_t *, int *);
147 static inline int cnmac_send_makecmd(struct cnmac_softc *, struct mbuf *,
148 uint64_t *, uint64_t *, uint64_t *);
149 static inline int cnmac_send_buf(struct cnmac_softc *, struct mbuf *,
150 uint64_t *, int *);
151 static inline int cnmac_send(struct cnmac_softc *, struct mbuf *, int *);
152
153 static int cnmac_reset(struct cnmac_softc *);
154 static int cnmac_configure(struct cnmac_softc *);
155 static int cnmac_configure_common(struct cnmac_softc *);
156
157 static void cnmac_tick_free(void *);
158 static void cnmac_tick_misc(void *);
159
160 static inline int cnmac_recv_mbuf(struct cnmac_softc *, uint64_t *,
161 struct mbuf **);
162 static inline int cnmac_recv_check(struct cnmac_softc *, uint64_t);
163 static inline int cnmac_recv(struct cnmac_softc *, uint64_t *);
164 static int cnmac_intr(void *);
165
166 /* device parameters */
167 int cnmac_param_pko_cmd_w0_n2 = 1;
168
169 CFATTACH_DECL_NEW(cnmac, sizeof(struct cnmac_softc),
170 cnmac_match, cnmac_attach, NULL, NULL);
171
172 /* ---- buffer management */
173
174 static const struct cnmac_pool_param {
175 int poolno;
176 size_t size;
177 size_t nelems;
178 } cnmac_pool_params[] = {
179 #define _ENTRY(x) { OCTEON_POOL_NO_##x, OCTEON_POOL_SIZE_##x, OCTEON_POOL_NELEMS_##x }
180 _ENTRY(PKT),
181 _ENTRY(WQE),
182 _ENTRY(CMD),
183 _ENTRY(SG)
184 #undef _ENTRY
185 };
186 struct octfpa_buf *cnmac_pools[FPA_NPOOLS];
187 #define cnmac_fb_pkt cnmac_pools[OCTEON_POOL_NO_PKT]
188 #define cnmac_fb_wqe cnmac_pools[OCTEON_POOL_NO_WQE]
189 #define cnmac_fb_cmd cnmac_pools[OCTEON_POOL_NO_CMD]
190 #define cnmac_fb_sg cnmac_pools[OCTEON_POOL_NO_SG]
191
192 static int cnmac_npowgroups = 0;
193
194 static void
195 cnmac_buf_init(struct cnmac_softc *sc)
196 {
197 static int once;
198 int i;
199 const struct cnmac_pool_param *pp;
200 struct octfpa_buf *fb;
201
202 if (once == 1)
203 return;
204 once = 1;
205
206 for (i = 0; i < (int)__arraycount(cnmac_pool_params); i++) {
207 pp = &cnmac_pool_params[i];
208 octfpa_buf_init(pp->poolno, pp->size, pp->nelems, &fb);
209 cnmac_pools[i] = fb;
210 }
211 }
212
213 /* ---- autoconf */
214
215 static int
216 cnmac_match(device_t parent, struct cfdata *match, void *aux)
217 {
218 struct octgmx_attach_args *ga = aux;
219
220 if (strcmp(match->cf_name, ga->ga_name) != 0) {
221 return 0;
222 }
223 return 1;
224 }
225
226 static void
227 cnmac_attach(device_t parent, device_t self, void *aux)
228 {
229 struct cnmac_softc *sc = device_private(self);
230 struct octgmx_attach_args *ga = aux;
231 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
232 prop_dictionary_t dict;
233 prop_object_t clk;
234 uint8_t enaddr[ETHER_ADDR_LEN];
235
236 if (cnmac_npowgroups >= OCTEON_POW_GROUP_MAX) {
237 printf(": out of POW groups\n");
238 }
239
240 sc->sc_dev = self;
241 sc->sc_regt = ga->ga_regt;
242 sc->sc_port = ga->ga_portno;
243 sc->sc_port_type = ga->ga_port_type;
244 sc->sc_gmx = ga->ga_gmx;
245 sc->sc_gmx_port = ga->ga_gmx_port;
246 sc->sc_smi = ga->ga_smi;
247 sc->sc_powgroup = cnmac_npowgroups++;
248
249 if (sc->sc_port >= CVMSEG_LM_ETHER_COUNT) {
250 /*
251 * If we got here, increase CVMSEG_LM_ETHER_COUNT
252 * in octeonvar.h .
253 */
254 printf("%s: ERROR out of CVMSEG LM buffers\n",
255 device_xname(self));
256 return;
257 }
258
259 sc->sc_init_flag = 0;
260 /*
261 * XXXUEBAYASI
262 * Setting PIP_IP_OFFSET[OFFSET] to 8 causes panic ... why???
263 */
264 sc->sc_ip_offset = 0/* XXX */;
265
266 if (MIPS_PRID_IMPL(mips_options.mips_cpu_id) <= MIPS_CN30XX) {
267 SET(sc->sc_quirks, CNMAC_QUIRKS_NO_PRE_ALIGN);
268 SET(sc->sc_quirks, CNMAC_QUIRKS_NO_RX_INBND);
269 }
270
271 cnmac_board_mac_addr(enaddr, sizeof(enaddr), sc);
272 printf("%s: Ethernet address %s\n", device_xname(self),
273 ether_sprintf(enaddr));
274
275 SIMPLEQ_INIT(&sc->sc_sendq);
276 sc->sc_soft_req_thresh = 15/* XXX */;
277 sc->sc_ext_callback_cnt = 0;
278
279 octgmx_stats_init(sc->sc_gmx_port);
280
281 callout_init(&sc->sc_tick_misc_ch, 0);
282 callout_setfunc(&sc->sc_tick_misc_ch, cnmac_tick_misc, sc);
283
284 callout_init(&sc->sc_tick_free_ch, 0);
285 callout_setfunc(&sc->sc_tick_free_ch, cnmac_tick_free, sc);
286
287 const int dv_unit = device_unit(self);
288 octfau_op_init(&sc->sc_fau_done,
289 OCTEON_CVMSEG_ETHER_OFFSET(dv_unit, csm_ether_fau_done),
290 OCT_FAU_REG_ADDR_END - (8 * (dv_unit + 1))/* XXX */);
291 octfau_op_set_8(&sc->sc_fau_done, 0);
292
293 cnmac_pip_init(sc);
294 cnmac_ipd_init(sc);
295 cnmac_pko_init(sc);
296
297 cnmac_configure_common(sc);
298
299 sc->sc_gmx_port->sc_ipd = sc->sc_ipd;
300 sc->sc_gmx_port->sc_port_mii = &sc->sc_mii;
301 sc->sc_gmx_port->sc_port_ec = &sc->sc_ethercom;
302 /* XXX */
303 sc->sc_gmx_port->sc_quirks = sc->sc_quirks;
304
305 /* XXX */
306 sc->sc_pow = &octpow_softc;
307
308 cnmac_mediainit(sc);
309
310 strncpy(ifp->if_xname, device_xname(self), sizeof(ifp->if_xname));
311 ifp->if_softc = sc;
312 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
313 ifp->if_ioctl = cnmac_ioctl;
314 ifp->if_start = cnmac_start;
315 ifp->if_watchdog = cnmac_watchdog;
316 ifp->if_init = cnmac_init;
317 ifp->if_stop = cnmac_stop;
318 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(GATHER_QUEUE_SIZE, IFQ_MAXLEN));
319 IFQ_SET_READY(&ifp->if_snd);
320
321
322 ifp->if_capabilities =
323 #if 0 /* XXX: no tx checksum yet */
324 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
325 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
326 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
327 IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_TCPv6_Rx |
328 IFCAP_CSUM_UDPv6_Tx | IFCAP_CSUM_UDPv6_Rx;
329 #else
330 IFCAP_CSUM_IPv4_Rx | IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
331 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
332 #endif
333
334 /* 802.1Q VLAN-sized frames are supported */
335 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
336
337 octgmx_set_mac_addr(sc->sc_gmx_port, enaddr);
338
339 if_attach(ifp);
340 ether_ifattach(ifp, enaddr);
341 octgmx_set_filter(sc->sc_gmx_port);
342
343 #if 1
344 cnmac_buf_init(sc);
345 #endif
346
347 sc->sc_ih = octeon_intr_establish(POW_WORKQ_IRQ(sc->sc_powgroup),
348 IPL_NET, cnmac_intr, sc);
349 if (sc->sc_ih == NULL)
350 panic("%s: could not set up interrupt", device_xname(self));
351
352 dict = device_properties(sc->sc_gmx->sc_dev);
353
354 clk = prop_dictionary_get(dict, "rgmii-tx");
355 if (clk)
356 sc->sc_gmx_port->sc_clk_tx_setting =
357 prop_number_signed_value(clk);
358 clk = prop_dictionary_get(dict, "rgmii-rx");
359 if (clk)
360 sc->sc_gmx_port->sc_clk_rx_setting =
361 prop_number_signed_value(clk);
362 }
363
364 /* ---- submodules */
365
366 /* XXX */
367 static void
368 cnmac_pip_init(struct cnmac_softc *sc)
369 {
370 struct octpip_attach_args pip_aa;
371
372 pip_aa.aa_port = sc->sc_port;
373 pip_aa.aa_regt = sc->sc_regt;
374 pip_aa.aa_tag_type = POW_TAG_TYPE_ORDERED/* XXX */;
375 pip_aa.aa_receive_group = sc->sc_powgroup;
376 pip_aa.aa_ip_offset = sc->sc_ip_offset;
377 octpip_init(&pip_aa, &sc->sc_pip);
378 octpip_port_config(sc->sc_pip);
379 }
380
381 /* XXX */
382 static void
383 cnmac_ipd_init(struct cnmac_softc *sc)
384 {
385 struct octipd_attach_args ipd_aa;
386
387 ipd_aa.aa_port = sc->sc_port;
388 ipd_aa.aa_regt = sc->sc_regt;
389 ipd_aa.aa_first_mbuff_skip = 184/* XXX */;
390 ipd_aa.aa_not_first_mbuff_skip = 0/* XXX */;
391 octipd_init(&ipd_aa, &sc->sc_ipd);
392 }
393
394 /* XXX */
395 static void
396 cnmac_pko_init(struct cnmac_softc *sc)
397 {
398 struct octpko_attach_args pko_aa;
399
400 pko_aa.aa_port = sc->sc_port;
401 pko_aa.aa_regt = sc->sc_regt;
402 pko_aa.aa_cmdptr = &sc->sc_cmdptr;
403 pko_aa.aa_cmd_buf_pool = OCTEON_POOL_NO_CMD;
404 pko_aa.aa_cmd_buf_size = OCTEON_POOL_NWORDS_CMD;
405 octpko_init(&pko_aa, &sc->sc_pko);
406 }
407
408 /* ---- XXX */
409
410 #define ADDR2UINT64(u, a) \
411 do { \
412 u = \
413 (((uint64_t)a[0] << 40) | ((uint64_t)a[1] << 32) | \
414 ((uint64_t)a[2] << 24) | ((uint64_t)a[3] << 16) | \
415 ((uint64_t)a[4] << 8) | ((uint64_t)a[5] << 0)); \
416 } while (0)
417 #define UINT642ADDR(a, u) \
418 do { \
419 a[0] = (uint8_t)((u) >> 40); a[1] = (uint8_t)((u) >> 32); \
420 a[2] = (uint8_t)((u) >> 24); a[3] = (uint8_t)((u) >> 16); \
421 a[4] = (uint8_t)((u) >> 8); a[5] = (uint8_t)((u) >> 0); \
422 } while (0)
423
424 static void
425 cnmac_board_mac_addr(uint8_t *enaddr, size_t size, struct cnmac_softc *sc)
426 {
427 prop_dictionary_t dict;
428 prop_data_t ea;
429
430 dict = device_properties(sc->sc_dev);
431 KASSERT(dict != NULL);
432 ea = prop_dictionary_get(dict, "mac-address");
433 KASSERT(ea != NULL);
434 memcpy(enaddr, prop_data_value(ea), size);
435 }
436
437 /* ---- media */
438
439 static int
440 cnmac_mii_readreg(device_t self, int phy_addr, int reg, uint16_t *val)
441 {
442 struct cnmac_softc *sc = device_private(self);
443
444 return octsmi_read(sc->sc_smi, phy_addr, reg, val);
445 }
446
447 static int
448 cnmac_mii_writereg(device_t self, int phy_addr, int reg, uint16_t val)
449 {
450 struct cnmac_softc *sc = device_private(self);
451
452 return octsmi_write(sc->sc_smi, phy_addr, reg, val);
453 }
454
455 static void
456 cnmac_mii_statchg(struct ifnet *ifp)
457 {
458 struct cnmac_softc *sc = ifp->if_softc;
459
460 octpko_port_enable(sc->sc_pko, 0);
461 octgmx_port_enable(sc->sc_gmx_port, 0);
462
463 cnmac_reset(sc);
464
465 if (ISSET(ifp->if_flags, IFF_RUNNING))
466 octgmx_set_filter(sc->sc_gmx_port);
467
468 octpko_port_enable(sc->sc_pko, 1);
469 octgmx_port_enable(sc->sc_gmx_port, 1);
470 }
471
472 static int
473 cnmac_mediainit(struct cnmac_softc *sc)
474 {
475 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
476 struct mii_data *mii = &sc->sc_mii;
477 prop_object_t phy;
478
479 mii->mii_ifp = ifp;
480 mii->mii_readreg = cnmac_mii_readreg;
481 mii->mii_writereg = cnmac_mii_writereg;
482 mii->mii_statchg = cnmac_mii_statchg;
483 sc->sc_ethercom.ec_mii = mii;
484
485 /* Initialize ifmedia structures. */
486 ifmedia_init(&mii->mii_media, 0, ether_mediachange, cnmac_mediastatus);
487
488 phy = prop_dictionary_get(device_properties(sc->sc_dev), "phy-addr");
489 KASSERT(phy != NULL);
490
491 mii_attach(sc->sc_dev, mii, 0xffffffff, prop_number_signed_value(phy),
492 MII_OFFSET_ANY, MIIF_DOPAUSE);
493
494 /* XXX XXX XXX */
495 if (LIST_FIRST(&mii->mii_phys) != NULL) {
496 /* XXX XXX XXX */
497 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
498 /* XXX XXX XXX */
499 } else {
500 /* XXX XXX XXX */
501 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE,
502 MII_MEDIA_NONE, NULL);
503 /* XXX XXX XXX */
504 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
505 /* XXX XXX XXX */
506 }
507 /* XXX XXX XXX */
508
509 return 0;
510 }
511
512 static void
513 cnmac_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
514 {
515 struct cnmac_softc *sc = ifp->if_softc;
516
517 mii_pollstat(&sc->sc_mii);
518
519 ifmr->ifm_status = sc->sc_mii.mii_media_status;
520 ifmr->ifm_active = sc->sc_mii.mii_media_active;
521 ifmr->ifm_active = (sc->sc_mii.mii_media_active & ~IFM_ETH_FMASK) |
522 sc->sc_gmx_port->sc_port_flowflags;
523 }
524
525 /* ---- send buffer garbage collection */
526
527 static inline void
528 cnmac_send_queue_flush_prefetch(struct cnmac_softc *sc)
529 {
530
531 KASSERT(sc->sc_prefetch == 0);
532 octfau_op_inc_fetch_8(&sc->sc_fau_done, 0);
533 sc->sc_prefetch = 1;
534 }
535
536 static inline void
537 cnmac_send_queue_flush_fetch(struct cnmac_softc *sc)
538 {
539
540 KASSERT(sc->sc_prefetch == 1);
541 sc->sc_hard_done_cnt = octfau_op_inc_read_8(&sc->sc_fau_done);
542 KASSERT(sc->sc_hard_done_cnt <= 0);
543 sc->sc_prefetch = 0;
544 }
545
546 static inline void
547 cnmac_send_queue_flush(struct cnmac_softc *sc)
548 {
549 const int64_t sent_count = sc->sc_hard_done_cnt;
550 int i;
551
552 KASSERT(sc->sc_flush == 0);
553 KASSERT(sent_count <= 0);
554
555 for (i = 0; i < 0 - sent_count; i++) {
556 struct mbuf *m;
557 uint64_t *gbuf;
558
559 cnmac_send_queue_del(sc, &m, &gbuf);
560
561 octfpa_buf_put(cnmac_fb_sg, gbuf);
562
563 m_freem(m);
564
565 sc->sc_txbusy = false;
566 }
567
568 octfau_op_inc_fetch_8(&sc->sc_fau_done, i);
569 sc->sc_flush = i;
570 }
571
572 static inline void
573 cnmac_send_queue_flush_sync(struct cnmac_softc *sc)
574 {
575 if (sc->sc_flush == 0)
576 return;
577
578 KASSERT(sc->sc_flush > 0);
579
580 /* XXX XXX XXX */
581 octfau_op_inc_read_8(&sc->sc_fau_done);
582 sc->sc_soft_req_cnt -= sc->sc_flush;
583 KASSERT(sc->sc_soft_req_cnt >= 0);
584 /* XXX XXX XXX */
585
586 sc->sc_flush = 0;
587 }
588
589 static inline int
590 cnmac_send_queue_is_full(struct cnmac_softc *sc)
591 {
592 #ifdef CNMAC_SEND_QUEUE_CHECK
593 int64_t nofree_cnt;
594
595 nofree_cnt = sc->sc_soft_req_cnt + sc->sc_hard_done_cnt;
596
597 if (__predict_false(nofree_cnt == GATHER_QUEUE_SIZE - 1)) {
598 cnmac_send_queue_flush(sc);
599 cnmac_send_queue_flush_sync(sc);
600 return 1;
601 }
602
603 #endif
604 return 0;
605 }
606
607 static void
608 cnmac_send_queue_check_and_flush(struct cnmac_softc *sc)
609 {
610 int s;
611
612 /* XXX XXX XXX */
613 s = splnet();
614 if (sc->sc_soft_req_cnt > 0) {
615 cnmac_send_queue_flush_prefetch(sc);
616 cnmac_send_queue_flush_fetch(sc);
617 cnmac_send_queue_flush(sc);
618 cnmac_send_queue_flush_sync(sc);
619 }
620 splx(s);
621 /* XXX XXX XXX */
622 }
623
624 /*
625 * (Ab)use m_nextpkt and m_paddr to maintain mbuf chain and pointer to gather
626 * buffer. Other mbuf members may be used by m_freem(), so don't touch them!
627 */
628
629 struct _send_queue_entry {
630 union {
631 struct mbuf _sqe_s_mbuf;
632 struct {
633 char _sqe_s_entry_pad[offsetof(struct mbuf, m_nextpkt)];
634 SIMPLEQ_ENTRY(_send_queue_entry) _sqe_s_entry_entry;
635 } _sqe_s_entry;
636 struct {
637 char _sqe_s_gbuf_pad[offsetof(struct mbuf, m_paddr)];
638 uint64_t *_sqe_s_gbuf_gbuf;
639 } _sqe_s_gbuf;
640 } _sqe_u;
641 #define _sqe_entry _sqe_u._sqe_s_entry._sqe_s_entry_entry
642 #define _sqe_gbuf _sqe_u._sqe_s_gbuf._sqe_s_gbuf_gbuf
643 };
644
645 static inline void
646 cnmac_send_queue_add(struct cnmac_softc *sc, struct mbuf *m,
647 uint64_t *gbuf)
648 {
649 struct _send_queue_entry *sqe = (struct _send_queue_entry *)m;
650
651 sqe->_sqe_gbuf = gbuf;
652 SIMPLEQ_INSERT_TAIL(&sc->sc_sendq, sqe, _sqe_entry);
653
654 if ((m->m_flags & M_EXT) && m->m_ext.ext_free != NULL)
655 sc->sc_ext_callback_cnt++;
656 }
657
658 static inline void
659 cnmac_send_queue_del(struct cnmac_softc *sc, struct mbuf **rm, uint64_t **rgbuf)
660 {
661 struct _send_queue_entry *sqe;
662
663 sqe = SIMPLEQ_FIRST(&sc->sc_sendq);
664 KASSERT(sqe != NULL);
665 SIMPLEQ_REMOVE_HEAD(&sc->sc_sendq, _sqe_entry);
666
667 *rm = (void *)sqe;
668 *rgbuf = sqe->_sqe_gbuf;
669
670 if (((*rm)->m_flags & M_EXT) && (*rm)->m_ext.ext_free != NULL) {
671 sc->sc_ext_callback_cnt--;
672 KASSERT(sc->sc_ext_callback_cnt >= 0);
673 }
674 }
675
676 static inline int
677 cnmac_buf_free_work(struct cnmac_softc *sc, uint64_t *work)
678 {
679
680 /* XXX when jumbo frame */
681 if (ISSET(work[2], PIP_WQE_WORD2_IP_BUFS)) {
682 paddr_t addr;
683 paddr_t start_buffer;
684
685 addr = work[3] & PIP_WQE_WORD3_ADDR;
686 start_buffer = addr & ~(2048 - 1);
687
688 octfpa_buf_put_paddr(cnmac_fb_pkt, start_buffer);
689 }
690
691 octfpa_buf_put(cnmac_fb_wqe, work);
692
693 return 0;
694 }
695
696 static inline void
697 cnmac_buf_ext_free(struct mbuf *m, void *buf, size_t size, void *arg)
698 {
699 octfpa_buf_put(cnmac_fb_pkt, buf);
700
701 KASSERT(m != NULL);
702
703 pool_cache_put(mb_cache, m);
704 }
705
706 /* ---- ifnet interfaces */
707
708 static int
709 cnmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
710 {
711 struct cnmac_softc *sc = ifp->if_softc;
712 struct ifreq *ifr = (struct ifreq *)data;
713 int s, error;
714
715 s = splnet();
716 switch (cmd) {
717 case SIOCSIFMEDIA:
718 /* Flow control requires full-duplex mode. */
719 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
720 (ifr->ifr_media & IFM_FDX) == 0) {
721 ifr->ifr_media &= ~IFM_ETH_FMASK;
722 }
723 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
724 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
725 ifr->ifr_media |=
726 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
727 }
728 sc->sc_gmx_port->sc_port_flowflags =
729 ifr->ifr_media & IFM_ETH_FMASK;
730 }
731 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
732 break;
733 default:
734 error = ether_ioctl(ifp, cmd, data);
735 break;
736 }
737
738 if (error == ENETRESET) {
739 if (ISSET(ifp->if_flags, IFF_RUNNING))
740 octgmx_set_filter(sc->sc_gmx_port);
741 error = 0;
742 }
743
744 cnmac_start(ifp);
745
746 splx(s);
747
748 return error;
749 }
750
751 /* ---- send (output) */
752
753 static inline uint64_t
754 cnmac_send_makecmd_w0(uint64_t fau0, uint64_t fau1, size_t len, int segs,
755 int ipoffp1)
756 {
757
758 return octpko_cmd_word0(
759 OCT_FAU_OP_SIZE_64, /* sz1 */
760 OCT_FAU_OP_SIZE_64, /* sz0 */
761 1, fau1, 1, fau0, /* s1, reg1, s0, reg0 */
762 0, /* le */
763 cnmac_param_pko_cmd_w0_n2, /* n2 */
764 1, 0, /* q, r */
765 (segs == 1) ? 0 : 1, /* g */
766 0, 0, 1, /* ipoffp1, ii, df */
767 segs, (int)len); /* segs, totalbytes */
768 }
769
770 static inline uint64_t
771 cnmac_send_makecmd_w1(int size, paddr_t addr)
772 {
773
774 return octpko_cmd_word1(
775 0, 0, /* i, back */
776 OCTEON_POOL_NO_SG, /* pool */
777 size, addr); /* size, addr */
778 }
779
780 static inline int
781 cnmac_send_makecmd_gbuf(struct cnmac_softc *sc, struct mbuf *m0, uint64_t *gbuf,
782 int *rsegs)
783 {
784 struct mbuf *m;
785 int segs = 0;
786 uintptr_t laddr, rlen, nlen;
787
788 for (m = m0; m != NULL; m = m->m_next) {
789
790 if (__predict_false(m->m_len == 0))
791 continue;
792
793 /* Aligned 4k */
794 laddr = (uintptr_t)m->m_data & (PAGE_SIZE - 1);
795
796 if (laddr + m->m_len > PAGE_SIZE) {
797 /* XXX XXX XXX */
798 rlen = PAGE_SIZE - laddr;
799 nlen = m->m_len - rlen;
800 *(gbuf + segs) = cnmac_send_makecmd_w1(rlen,
801 kvtophys((vaddr_t)m->m_data));
802 segs++;
803 if (segs > 63) {
804 return 1;
805 }
806 /* XXX XXX XXX */
807 } else {
808 rlen = 0;
809 nlen = m->m_len;
810 }
811
812 *(gbuf + segs) = cnmac_send_makecmd_w1(nlen,
813 kvtophys((vaddr_t)(m->m_data + rlen)));
814 segs++;
815 if (segs > 63) {
816 return 1;
817 }
818 }
819
820 KASSERT(m == NULL);
821
822 *rsegs = segs;
823
824 return 0;
825 }
826
827 static inline int
828 cnmac_send_makecmd(struct cnmac_softc *sc, struct mbuf *m,
829 uint64_t *gbuf, uint64_t *rpko_cmd_w0, uint64_t *rpko_cmd_w1)
830 {
831 uint64_t pko_cmd_w0, pko_cmd_w1;
832 int ipoffp1;
833 int segs;
834 int result = 0;
835
836 if (cnmac_send_makecmd_gbuf(sc, m, gbuf, &segs)) {
837 log(LOG_WARNING, "%s: there are a lot of number of segments"
838 " of transmission data", device_xname(sc->sc_dev));
839 result = 1;
840 goto done;
841 }
842
843 /* Get the IP packet offset for TCP/UDP checksum offloading. */
844 ipoffp1 = (m->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_UDPv4))
845 ? (ETHER_HDR_LEN + 1) : 0;
846
847 /*
848 * segs == 1 -> link mode (single continuous buffer)
849 * WORD1[size] is number of bytes pointed by segment
850 *
851 * segs > 1 -> gather mode (scatter-gather buffer)
852 * WORD1[size] is number of segments
853 */
854 pko_cmd_w0 = cnmac_send_makecmd_w0(sc->sc_fau_done.fd_regno,
855 0, m->m_pkthdr.len, segs, ipoffp1);
856 if (segs == 1) {
857 pko_cmd_w1 = cnmac_send_makecmd_w1(
858 m->m_pkthdr.len, kvtophys((vaddr_t)m->m_data));
859 } else {
860 #ifdef __mips_n32
861 KASSERT(MIPS_KSEG0_P(gbuf));
862 pko_cmd_w1 = cnmac_send_makecmd_w1(segs,
863 MIPS_KSEG0_TO_PHYS(gbuf));
864 #else
865 pko_cmd_w1 = cnmac_send_makecmd_w1(segs,
866 MIPS_XKPHYS_TO_PHYS(gbuf));
867 #endif
868 }
869
870 *rpko_cmd_w0 = pko_cmd_w0;
871 *rpko_cmd_w1 = pko_cmd_w1;
872
873 done:
874 return result;
875 }
876
877 static inline int
878 cnmac_send_cmd(struct cnmac_softc *sc, uint64_t pko_cmd_w0,
879 uint64_t pko_cmd_w1, int *pwdc)
880 {
881 uint64_t *cmdptr;
882 int result = 0;
883
884 #ifdef __mips_n32
885 KASSERT((sc->sc_cmdptr.cmdptr & ~MIPS_PHYS_MASK) == 0);
886 cmdptr = (uint64_t *)MIPS_PHYS_TO_KSEG0(sc->sc_cmdptr.cmdptr);
887 #else
888 cmdptr = (uint64_t *)MIPS_PHYS_TO_XKPHYS_CACHED(sc->sc_cmdptr.cmdptr);
889 #endif
890 cmdptr += sc->sc_cmdptr.cmdptr_idx;
891
892 KASSERT(cmdptr != NULL);
893
894 *cmdptr++ = pko_cmd_w0;
895 *cmdptr++ = pko_cmd_w1;
896
897 KASSERT(sc->sc_cmdptr.cmdptr_idx + 2 <= FPA_COMMAND_BUFFER_POOL_NWORDS - 1);
898
899 if (sc->sc_cmdptr.cmdptr_idx + 2 == FPA_COMMAND_BUFFER_POOL_NWORDS - 1) {
900 paddr_t buf;
901
902 buf = octfpa_buf_get_paddr(cnmac_fb_cmd);
903 if (buf == 0) {
904 log(LOG_WARNING,
905 "%s: can not allocate command buffer from free pool allocator\n",
906 device_xname(sc->sc_dev));
907 result = 1;
908 goto done;
909 }
910 *cmdptr++ = buf;
911 sc->sc_cmdptr.cmdptr = (uint64_t)buf;
912 sc->sc_cmdptr.cmdptr_idx = 0;
913 } else {
914 sc->sc_cmdptr.cmdptr_idx += 2;
915 }
916
917 *pwdc += 2;
918
919 done:
920 return result;
921 }
922
923 static inline int
924 cnmac_send_buf(struct cnmac_softc *sc, struct mbuf *m, uint64_t *gbuf,
925 int *pwdc)
926 {
927 int result = 0, error;
928 uint64_t pko_cmd_w0, pko_cmd_w1;
929
930 error = cnmac_send_makecmd(sc, m, gbuf, &pko_cmd_w0, &pko_cmd_w1);
931 if (error != 0) {
932 /* Already logging */
933 result = error;
934 goto done;
935 }
936
937 error = cnmac_send_cmd(sc, pko_cmd_w0, pko_cmd_w1, pwdc);
938 if (error != 0) {
939 /* Already logging */
940 result = error;
941 }
942
943 done:
944 return result;
945 }
946
947 static inline int
948 cnmac_send(struct cnmac_softc *sc, struct mbuf *m, int *pwdc)
949 {
950 paddr_t gaddr = 0;
951 uint64_t *gbuf = NULL;
952 int result = 0, error;
953
954 gaddr = octfpa_buf_get_paddr(cnmac_fb_sg);
955 if (gaddr == 0) {
956 log(LOG_WARNING, "%s: can not allocate gather buffer from "
957 "free pool allocator\n", device_xname(sc->sc_dev));
958 result = 1;
959 goto done;
960 }
961
962 #ifdef __mips_n32
963 KASSERT((gaddr & ~MIPS_PHYS_MASK) == 0);
964 gbuf = (uint64_t *)(uintptr_t)MIPS_PHYS_TO_KSEG0(gaddr);
965 #else
966 gbuf = (uint64_t *)(uintptr_t)MIPS_PHYS_TO_XKPHYS_CACHED(gaddr);
967 #endif
968
969 KASSERT(gbuf != NULL);
970
971 error = cnmac_send_buf(sc, m, gbuf, pwdc);
972 if (error != 0) {
973 /* Already logging */
974 octfpa_buf_put_paddr(cnmac_fb_sg, gaddr);
975 result = error;
976 goto done;
977 }
978
979 cnmac_send_queue_add(sc, m, gbuf);
980
981 done:
982 return result;
983 }
984
985 static void
986 cnmac_start(struct ifnet *ifp)
987 {
988 struct cnmac_softc *sc = ifp->if_softc;
989 struct mbuf *m;
990 int wdc = 0;
991
992 /*
993 * Performance tuning
994 * pre-send iobdma request
995 */
996 cnmac_send_queue_flush_prefetch(sc);
997
998 if ((ifp->if_flags & IFF_RUNNING) == 0)
999 goto last;
1000
1001 if (sc->sc_txbusy)
1002 goto last;
1003
1004 if (__predict_false(!octgmx_link_status(sc->sc_gmx_port)))
1005 goto last;
1006
1007 for (;;) {
1008 IFQ_POLL(&ifp->if_snd, m);
1009 if (__predict_false(m == NULL))
1010 break;
1011
1012 /* XXX XXX XXX */
1013 cnmac_send_queue_flush_fetch(sc);
1014
1015 /*
1016 * If no free send buffer is available, free all the sent
1017 * buffers and bail out.
1018 */
1019 if (cnmac_send_queue_is_full(sc)) {
1020 sc->sc_txbusy = true;
1021 if (wdc > 0)
1022 octpko_op_doorbell_write(sc->sc_port,
1023 sc->sc_port, wdc);
1024 callout_schedule(&sc->sc_tick_free_ch, 1);
1025 return;
1026 }
1027 /* XXX XXX XXX */
1028
1029 IFQ_DEQUEUE(&ifp->if_snd, m);
1030
1031 bpf_mtap(ifp, m, BPF_D_OUT);
1032
1033 /* XXX XXX XXX */
1034 if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh)
1035 cnmac_send_queue_flush(sc);
1036 if (cnmac_send(sc, m, &wdc)) {
1037 IF_DROP(&ifp->if_snd);
1038 m_freem(m);
1039 log(LOG_WARNING,
1040 "%s: failed in the transmission of the packet\n",
1041 device_xname(sc->sc_dev));
1042 } else
1043 sc->sc_soft_req_cnt++;
1044
1045 if (sc->sc_flush)
1046 cnmac_send_queue_flush_sync(sc);
1047 /* XXX XXX XXX */
1048
1049 /* Send next iobdma request */
1050 cnmac_send_queue_flush_prefetch(sc);
1051 }
1052
1053 if (wdc > 0)
1054 octpko_op_doorbell_write(sc->sc_port, sc->sc_port, wdc);
1055
1056 last:
1057 cnmac_send_queue_flush_fetch(sc);
1058 callout_schedule(&sc->sc_tick_free_ch, 1);
1059 }
1060
1061 static void
1062 cnmac_watchdog(struct ifnet *ifp)
1063 {
1064 struct cnmac_softc *sc = ifp->if_softc;
1065
1066 printf("%s: device timeout\n", device_xname(sc->sc_dev));
1067
1068 cnmac_configure(sc);
1069
1070 SET(ifp->if_flags, IFF_RUNNING);
1071 sc->sc_txbusy = false;
1072 ifp->if_timer = 0;
1073
1074 cnmac_start(ifp);
1075 }
1076
1077 static int
1078 cnmac_init(struct ifnet *ifp)
1079 {
1080 struct cnmac_softc *sc = ifp->if_softc;
1081
1082 /* XXX don't disable commonly used parts!!! XXX */
1083 if (sc->sc_init_flag == 0) {
1084 /* Cancel any pending I/O. */
1085 cnmac_stop(ifp, 0);
1086
1087 /* Initialize the device */
1088 cnmac_configure(sc);
1089
1090 octpko_enable(sc->sc_pko);
1091 octipd_enable(sc->sc_ipd);
1092
1093 sc->sc_init_flag = 1;
1094 } else {
1095 octgmx_port_enable(sc->sc_gmx_port, 1);
1096 }
1097 mii_ifmedia_change(&sc->sc_mii);
1098
1099 octgmx_set_filter(sc->sc_gmx_port);
1100
1101 callout_schedule(&sc->sc_tick_misc_ch, hz);
1102 callout_schedule(&sc->sc_tick_free_ch, hz);
1103
1104 SET(ifp->if_flags, IFF_RUNNING);
1105 sc->sc_txbusy = false;
1106
1107 return 0;
1108 }
1109
1110 static void
1111 cnmac_stop(struct ifnet *ifp, int disable)
1112 {
1113 struct cnmac_softc *sc = ifp->if_softc;
1114
1115 callout_stop(&sc->sc_tick_misc_ch);
1116 callout_stop(&sc->sc_tick_free_ch);
1117
1118 mii_down(&sc->sc_mii);
1119
1120 octgmx_port_enable(sc->sc_gmx_port, 0);
1121
1122 /* Mark the interface as down and cancel the watchdog timer. */
1123 CLR(ifp->if_flags, IFF_RUNNING);
1124 sc->sc_txbusy = false;
1125 ifp->if_timer = 0;
1126 }
1127
1128 /* ---- misc */
1129
1130 static int
1131 cnmac_reset(struct cnmac_softc *sc)
1132 {
1133 octgmx_reset_speed(sc->sc_gmx_port);
1134 octgmx_reset_flowctl(sc->sc_gmx_port);
1135 octgmx_reset_timing(sc->sc_gmx_port);
1136
1137 return 0;
1138 }
1139
1140 static int
1141 cnmac_configure(struct cnmac_softc *sc)
1142 {
1143 octgmx_port_enable(sc->sc_gmx_port, 0);
1144
1145 cnmac_reset(sc);
1146
1147 cnmac_configure_common(sc);
1148
1149 octpko_port_config(sc->sc_pko);
1150 octpko_port_enable(sc->sc_pko, 1);
1151 octpow_config(sc->sc_pow, sc->sc_powgroup);
1152
1153 octgmx_tx_stats_rd_clr(sc->sc_gmx_port, 1);
1154 octgmx_rx_stats_rd_clr(sc->sc_gmx_port, 1);
1155
1156 octgmx_port_enable(sc->sc_gmx_port, 1);
1157
1158 return 0;
1159 }
1160
1161 static int
1162 cnmac_configure_common(struct cnmac_softc *sc)
1163 {
1164 static int once;
1165
1166 if (once == 1)
1167 return 0;
1168 once = 1;
1169
1170 octipd_config(sc->sc_ipd);
1171 octpko_config(sc->sc_pko);
1172
1173 return 0;
1174 }
1175
1176 /* ---- receive (input) */
1177
1178 static inline int
1179 cnmac_recv_mbuf(struct cnmac_softc *sc, uint64_t *work, struct mbuf **rm)
1180 {
1181 struct mbuf *m;
1182 vaddr_t addr;
1183 vaddr_t ext_buf;
1184 size_t ext_size;
1185 uint64_t word1 = work[1];
1186 uint64_t word2 = work[2];
1187 uint64_t word3 = work[3];
1188
1189 MGETHDR(m, M_NOWAIT, MT_DATA);
1190 if (m == NULL)
1191 return 1;
1192
1193 octfpa_buf_put(cnmac_fb_wqe, work);
1194
1195 if (__SHIFTOUT(word2, PIP_WQE_WORD2_IP_BUFS) != 1)
1196 panic("%s: expected one buffer, got %" PRId64, __func__,
1197 __SHIFTOUT(word2, PIP_WQE_WORD2_IP_BUFS));
1198
1199
1200 #ifdef __mips_n32
1201 KASSERT((word3 & ~MIPS_PHYS_MASK) == 0);
1202 addr = MIPS_PHYS_TO_KSEG0(word3 & PIP_WQE_WORD3_ADDR);
1203 #else
1204 addr = MIPS_PHYS_TO_XKPHYS_CACHED(word3 & PIP_WQE_WORD3_ADDR);
1205 #endif
1206
1207 ext_size = OCTEON_POOL_SIZE_PKT;
1208 ext_buf = addr & ~(ext_size - 1);
1209 MEXTADD(m, ext_buf, ext_size, 0, cnmac_buf_ext_free, NULL);
1210
1211 m->m_data = (void *)addr;
1212 m->m_len = m->m_pkthdr.len = (word1 & PIP_WQE_WORD1_LEN) >> 48;
1213 m_set_rcvif(m, &sc->sc_ethercom.ec_if);
1214
1215 /* Not readonly buffer */
1216 m->m_flags |= M_EXT_RW;
1217
1218 *rm = m;
1219
1220 KASSERT(*rm != NULL);
1221
1222 return 0;
1223 }
1224
1225 static inline int
1226 cnmac_recv_check(struct cnmac_softc *sc, uint64_t word2)
1227 {
1228 static struct timeval rxerr_log_interval = { 0, 2500000 };
1229 uint64_t opecode;
1230
1231 if (__predict_true(!ISSET(word2, PIP_WQE_WORD2_NOIP_RE)))
1232 return 0;
1233
1234 opecode = word2 & PIP_WQE_WORD2_NOIP_OPECODE;
1235 if ((sc->sc_ethercom.ec_if.if_flags & IFF_DEBUG) &&
1236 ratecheck(&sc->sc_rxerr_log_last, &rxerr_log_interval))
1237 log(LOG_DEBUG, "%s: rx error (%"PRId64")\n",
1238 device_xname(sc->sc_dev), opecode);
1239
1240 /* This error is harmless */
1241 if (opecode == PIP_WQE_WORD2_RE_OPCODE_OVRRUN)
1242 return 0;
1243
1244 return 1;
1245 }
1246
1247 static inline int
1248 cnmac_recv(struct cnmac_softc *sc, uint64_t *work)
1249 {
1250 struct ifnet *ifp;
1251 struct mbuf *m;
1252 uint64_t word2;
1253
1254 KASSERT(sc != NULL);
1255 KASSERT(work != NULL);
1256
1257 word2 = work[2];
1258 ifp = &sc->sc_ethercom.ec_if;
1259
1260 KASSERT(ifp != NULL);
1261
1262 if (!ISSET(ifp->if_flags, IFF_RUNNING))
1263 goto drop;
1264
1265 if (__predict_false(cnmac_recv_check(sc, word2) != 0)) {
1266 if_statinc(ifp, if_ierrors);
1267 goto drop;
1268 }
1269
1270 if (__predict_false(cnmac_recv_mbuf(sc, work, &m) != 0)) {
1271 if_statinc(ifp, if_ierrors);
1272 goto drop;
1273 }
1274
1275 /* work[0] .. work[3] may not be valid any more */
1276
1277 KASSERT(m != NULL);
1278
1279 octipd_offload(word2, m->m_data, &m->m_pkthdr.csum_flags);
1280
1281 if_percpuq_enqueue(ifp->if_percpuq, m);
1282
1283 return 0;
1284
1285 drop:
1286 cnmac_buf_free_work(sc, work);
1287 return 1;
1288 }
1289
1290 static int
1291 cnmac_intr(void *arg)
1292 {
1293 struct cnmac_softc *sc = arg;
1294 uint64_t *work;
1295 uint64_t wqmask = __BIT(sc->sc_powgroup);
1296 uint32_t coreid = 0; /* XXX octeon_get_coreid() */
1297 uint32_t port;
1298
1299 _POW_WR8(sc->sc_pow, POW_PP_GRP_MSK_OFFSET(coreid), wqmask);
1300
1301 octpow_tag_sw_wait();
1302 octpow_work_request_async(OCTEON_CVMSEG_OFFSET(csm_pow_intr),
1303 POW_NO_WAIT);
1304
1305 for (;;) {
1306 work = (uint64_t *)octpow_work_response_async(
1307 OCTEON_CVMSEG_OFFSET(csm_pow_intr));
1308 if (work == NULL)
1309 break;
1310
1311 octpow_tag_sw_wait();
1312 octpow_work_request_async(OCTEON_CVMSEG_OFFSET(csm_pow_intr),
1313 POW_NO_WAIT);
1314
1315 port = __SHIFTOUT(work[1], PIP_WQE_WORD1_IPRT);
1316 if (port != sc->sc_port) {
1317 printf("%s: unexpected wqe port %u, should be %u\n",
1318 device_xname(sc->sc_dev), port, sc->sc_port);
1319 goto wqe_error;
1320 }
1321
1322 (void)cnmac_recv(sc, work);
1323
1324 cnmac_send_queue_check_and_flush(sc);
1325 }
1326
1327 _POW_WR8(sc->sc_pow, POW_WQ_INT_OFFSET, wqmask);
1328
1329 return 1;
1330
1331 wqe_error:
1332 printf("word0: 0x%016" PRIx64 "\n", work[0]);
1333 printf("word1: 0x%016" PRIx64 "\n", work[1]);
1334 printf("word2: 0x%016" PRIx64 "\n", work[2]);
1335 printf("word3: 0x%016" PRIx64 "\n", work[3]);
1336 panic("wqe_error");
1337 }
1338
1339 /* ---- tick */
1340
1341 /*
1342 * cnmac_tick_free
1343 *
1344 * => garbage collect send gather buffer / mbuf
1345 * => called at softclock
1346 */
1347 static void
1348 cnmac_tick_free(void *arg)
1349 {
1350 struct cnmac_softc *sc = arg;
1351 int timo;
1352
1353 cnmac_send_queue_check_and_flush(sc);
1354
1355 timo = (sc->sc_ext_callback_cnt > 0) ? 1 : hz;
1356 callout_schedule(&sc->sc_tick_free_ch, timo);
1357 }
1358
1359 /*
1360 * cnmac_tick_misc
1361 *
1362 * => collect statistics
1363 * => check link status
1364 * => called at softclock
1365 */
1366 static void
1367 cnmac_tick_misc(void *arg)
1368 {
1369 struct cnmac_softc *sc = arg;
1370 struct ifnet *ifp;
1371 int s;
1372
1373 s = splnet();
1374
1375 ifp = &sc->sc_ethercom.ec_if;
1376
1377 octgmx_stats(sc->sc_gmx_port);
1378 octpip_stats(sc->sc_pip, ifp, sc->sc_port);
1379 mii_tick(&sc->sc_mii);
1380
1381 splx(s);
1382
1383 callout_schedule(&sc->sc_tick_misc_ch, hz);
1384 }
1385