octeon_ipd.c revision 1.1 1 /* $NetBSD: octeon_ipd.c,v 1.1 2015/04/29 08:32:01 hikaru Exp $ */
2
3 /*
4 * Copyright (c) 2007 Internet Initiative Japan, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: octeon_ipd.c,v 1.1 2015/04/29 08:32:01 hikaru Exp $");
31
32 #include "opt_octeon.h"
33
34 #include "opt_octeon.h"
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/malloc.h>
39 #include <sys/mbuf.h>
40 #include <mips/locore.h>
41 #include <mips/cavium/octeonvar.h>
42 #include <mips/cavium/dev/octeon_ciureg.h>
43 #include <mips/cavium/dev/octeon_fpavar.h>
44 #include <mips/cavium/dev/octeon_pipreg.h>
45 #include <mips/cavium/dev/octeon_ipdreg.h>
46 #include <mips/cavium/dev/octeon_ipdvar.h>
47
48 #include <netinet/in.h>
49 #include <netinet/in_systm.h>
50 #include <netinet/ip.h>
51
52 #define IP_OFFSET(data, word2) \
53 ((uintptr_t)(data) + (uintptr_t)((word2 & PIP_WQE_WORD2_IP_OFFSET) >> PIP_WQE_WORD2_IP_OFFSET_SHIFT))
54
55 #ifdef OCTEON_ETH_DEBUG
56 void octeon_ipd_intr_evcnt_attach(struct octeon_ipd_softc *);
57 void octeon_ipd_intr_rml(void *);
58 int octeon_ipd_intr_drop(void *);
59
60 void octeon_ipd_dump(void);
61
62 static void *octeon_ipd_intr_drop_ih;
63 struct evcnt octeon_ipd_intr_drop_evcnt =
64 EVCNT_INITIALIZER(EVCNT_TYPE_INTR, NULL, "octeon",
65 "ipd drop intr");
66 EVCNT_ATTACH_STATIC(octeon_ipd_intr_drop_evcnt);
67
68 struct octeon_ipd_softc *__octeon_ipd_softc[3/* XXX */];
69 #endif
70
71 /* XXX */
72 void
73 octeon_ipd_init(struct octeon_ipd_attach_args *aa,
74 struct octeon_ipd_softc **rsc)
75 {
76 struct octeon_ipd_softc *sc;
77 int status;
78
79 sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO);
80 if (sc == NULL)
81 panic("can't allocate memory: %s", __func__);
82
83 sc->sc_port = aa->aa_port;
84 sc->sc_regt = aa->aa_regt;
85 sc->sc_first_mbuff_skip = aa->aa_first_mbuff_skip;
86 sc->sc_not_first_mbuff_skip = aa->aa_not_first_mbuff_skip;
87
88 status = bus_space_map(sc->sc_regt, IPD_BASE, IPD_SIZE, 0,
89 &sc->sc_regh);
90 if (status != 0)
91 panic("can't map %s space", "ipd register");
92
93 *rsc = sc;
94
95 #ifdef OCTEON_ETH_DEBUG
96 octeon_ipd_int_enable(sc, 1);
97 octeon_ipd_intr_evcnt_attach(sc);
98 if (octeon_ipd_intr_drop_ih == NULL)
99 octeon_ipd_intr_drop_ih = octeon_intr_establish(
100 ffs64(CIU_INTX_SUM0_IPD_DRP) - 1, 0, IPL_NET,
101 octeon_ipd_intr_drop, NULL);
102 __octeon_ipd_softc[sc->sc_port] = sc;
103 #endif /* OCTEON_ETH_DEBUG */
104 }
105
106 #define _IPD_RD8(sc, off) \
107 bus_space_read_8((sc)->sc_regt, (sc)->sc_regh, (off))
108 #define _IPD_WR8(sc, off, v) \
109 bus_space_write_8((sc)->sc_regt, (sc)->sc_regh, (off), (v))
110
111 int
112 octeon_ipd_enable(struct octeon_ipd_softc *sc)
113 {
114 uint64_t ctl_status;
115
116 ctl_status = _IPD_RD8(sc, IPD_CTL_STATUS_OFFSET);
117 SET(ctl_status, IPD_CTL_STATUS_IPD_EN);
118 _IPD_WR8(sc, IPD_CTL_STATUS_OFFSET, ctl_status);
119
120 return 0;
121 }
122
123 int
124 octeon_ipd_config(struct octeon_ipd_softc *sc)
125 {
126 uint64_t first_mbuff_skip;
127 uint64_t not_first_mbuff_skip;
128 uint64_t packet_mbuff_size;
129 uint64_t first_next_ptr_back;
130 uint64_t second_next_ptr_back;
131 uint64_t sqe_fpa_queue;
132 uint64_t ctl_status;
133
134 /* XXX XXX XXX */
135 first_mbuff_skip = 0;
136 SET(first_mbuff_skip, (sc->sc_first_mbuff_skip / 8) & IPD_1ST_MBUFF_SKIP_SZ);
137 _IPD_WR8(sc, IPD_1ST_MBUFF_SKIP_OFFSET, first_mbuff_skip);
138 /* XXX XXX XXX */
139
140 /* XXX XXX XXX */
141 not_first_mbuff_skip = 0;
142 SET(not_first_mbuff_skip, (sc->sc_not_first_mbuff_skip / 8) &
143 IPD_NOT_1ST_MBUFF_SKIP_SZ);
144 _IPD_WR8(sc, IPD_NOT_1ST_MBUFF_SKIP_OFFSET, not_first_mbuff_skip);
145 /* XXX XXX XXX */
146
147 packet_mbuff_size = 0;
148 SET(packet_mbuff_size, (FPA_RECV_PKT_POOL_SIZE / 8) &
149 IPD_PACKET_MBUFF_SIZE_MB_SIZE);
150 _IPD_WR8(sc, IPD_PACKET_MBUFF_SIZE_OFFSET, packet_mbuff_size);
151
152 first_next_ptr_back = 0;
153 SET(first_next_ptr_back, (sc->sc_first_mbuff_skip / 128) & IPD_1ST_NEXT_PTR_BACK_BACK);
154 _IPD_WR8(sc, IPD_1ST_NEXT_PTR_BACK_OFFSET, first_next_ptr_back);
155
156 second_next_ptr_back = 0;
157 SET(second_next_ptr_back, (sc->sc_not_first_mbuff_skip / 128) &
158 IPD_2ND_NEXT_PTR_BACK_BACK);
159 _IPD_WR8(sc, IPD_2ND_NEXT_PTR_BACK_OFFSET, second_next_ptr_back);
160
161 sqe_fpa_queue = 0;
162 SET(sqe_fpa_queue, FPA_WQE_POOL & IPD_WQE_FPA_QUEUE_WQE_QUE);
163 _IPD_WR8(sc, IPD_WQE_FPA_QUEUE_OFFSET, sqe_fpa_queue);
164
165 ctl_status = _IPD_RD8(sc, IPD_CTL_STATUS_OFFSET);
166 CLR(ctl_status, IPD_CTL_STATUS_OPC_MODE);
167 SET(ctl_status, IPD_CTL_STATUS_OPC_MODE_ALL);
168 SET(ctl_status, IPD_CTL_STATUS_PBP_EN);
169
170 /*
171 * XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
172 * from SDK
173 * SET(ctl_status, IPD_CTL_STATUS_LEN_M8);
174 * XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
175 */
176
177 _IPD_WR8(sc, IPD_CTL_STATUS_OFFSET, ctl_status);
178
179 return 0;
180 }
181
182 /*
183 * octeon work queue entry offload
184 * L3 error & L4 error
185 */
186 void
187 octeon_ipd_offload(uint64_t word2, void *data, int *rcflags)
188 {
189 int cflags;
190
191 if (ISSET(word2, PIP_WQE_WORD2_IP_NI))
192 return;
193
194 cflags = 0;
195
196 if (!ISSET(word2, PIP_WQE_WORD2_IP_V6))
197 SET(cflags, M_CSUM_IPv4);
198
199 if (ISSET(word2, PIP_WQE_WORD2_IP_TU)) {
200 SET(cflags,
201 !ISSET(word2, PIP_WQE_WORD2_IP_V6) ?
202 (M_CSUM_TCPv4 | M_CSUM_UDPv4) :
203 (M_CSUM_TCPv6 | M_CSUM_UDPv6));
204 }
205
206 /* check L3 (IP) error */
207 if (ISSET(word2, PIP_WQE_WORD2_IP_IE)) {
208 struct ip *ip;
209
210 switch (word2 & PIP_WQE_WORD2_IP_OPECODE) {
211 case IPD_WQE_L3_V4_CSUM_ERR:
212 /* CN31XX Pass 1.1 Errata */
213 ip = (struct ip *)(IP_OFFSET(data, word2));
214 if (ip->ip_hl == 5)
215 SET(cflags, M_CSUM_IPv4_BAD);
216 break;
217 default:
218 break;
219 }
220 }
221
222 /* check L4 (UDP / TCP) error */
223 if (ISSET(word2, PIP_WQE_WORD2_IP_LE)) {
224 switch (word2 & PIP_WQE_WORD2_IP_OPECODE) {
225 case IPD_WQE_L4_CSUM_ERR:
226 SET(cflags, M_CSUM_TCP_UDP_BAD);
227 break;
228 default:
229 break;
230 }
231 }
232
233 *rcflags = cflags;
234 }
235
236 int
237 octeon_ipd_red(struct octeon_ipd_softc *sc, uint64_t pass_thr, uint64_t drop_thr)
238 {
239 #if defined(OCTEON_ETH_IPD_RED)
240 /*
241 * no receive problem workaround.
242 * if not set IPD RED pramaters,
243 * may become unable to receive packet
244 * on media mismatch environment
245 * of self media 100-half duplex.
246 */
247 uint64_t red_marks;
248 uint64_t red_param;
249 uint64_t red_port;
250
251 red_marks = drop_thr << 32 /* XXX */ | pass_thr;
252 _IPD_WR8(sc, IPD_QOS0_RED_MARKS_OFFSET, red_marks);
253 _IPD_WR8(sc, IPD_QOS1_RED_MARKS_OFFSET, red_marks);
254 _IPD_WR8(sc, IPD_QOS2_RED_MARKS_OFFSET, red_marks);
255 _IPD_WR8(sc, IPD_QOS3_RED_MARKS_OFFSET, red_marks);
256 _IPD_WR8(sc, IPD_QOS4_RED_MARKS_OFFSET, red_marks);
257 _IPD_WR8(sc, IPD_QOS5_RED_MARKS_OFFSET, red_marks);
258 _IPD_WR8(sc, IPD_QOS6_RED_MARKS_OFFSET, red_marks);
259 _IPD_WR8(sc, IPD_QOS7_RED_MARKS_OFFSET, red_marks);
260 red_param =
261 ((255ull << 24 /* XXX */) / (pass_thr - drop_thr)) |
262 1ull << 32 /* XXX */ |
263 255ull << 40 /* XXX */ |
264 1ull << 48 /* XXX */;
265 _IPD_WR8(sc, IPD_RED_QUE0_PARAM_OFFSET, red_param);
266 _IPD_WR8(sc, IPD_RED_QUE1_PARAM_OFFSET, red_param);
267 _IPD_WR8(sc, IPD_RED_QUE2_PARAM_OFFSET, red_param);
268 _IPD_WR8(sc, IPD_RED_QUE3_PARAM_OFFSET, red_param);
269 _IPD_WR8(sc, IPD_RED_QUE4_PARAM_OFFSET, red_param);
270 _IPD_WR8(sc, IPD_RED_QUE5_PARAM_OFFSET, red_param);
271 _IPD_WR8(sc, IPD_RED_QUE6_PARAM_OFFSET, red_param);
272 _IPD_WR8(sc, IPD_RED_QUE7_PARAM_OFFSET, red_param);
273
274 _IPD_WR8(sc, IPD_BP_PRT_RED_END_OFFSET, 0);
275
276 red_port = 0xfffffffffull |
277 10000ull << 36 /* XXX */ |
278 10000ull << 50 /* XXX */;
279 _IPD_WR8(sc, IPD_RED_PORT_ENABLE_OFFSET, red_port);
280 #endif
281
282 return 0;
283 }
284
285 void
286 octeon_ipd_sub_port_fcs(struct octeon_ipd_softc *sc, int enable)
287 {
288 uint64_t sub_port_fcs;
289
290 sub_port_fcs = _IPD_RD8(sc, IPD_SUB_PORT_FCS_OFFSET);
291 if (enable == 0)
292 CLR(sub_port_fcs, 1 << sc->sc_port);
293 else
294 SET(sub_port_fcs, 1 << sc->sc_port);
295 _IPD_WR8(sc, IPD_SUB_PORT_FCS_OFFSET, sub_port_fcs);
296 }
297
298 #ifdef OCTEON_ETH_DEBUG
299 int octeon_ipd_intr_rml_verbose;
300 struct evcnt octeon_ipd_intr_evcnt;
301
302 static const struct octeon_evcnt_entry octeon_ipd_intr_evcnt_entries[] = {
303 #define _ENTRY(name, type, parent, descr) \
304 OCTEON_EVCNT_ENTRY(struct octeon_ipd_softc, name, type, parent, descr)
305 _ENTRY(ipdbpsub, MISC, NULL, "ipd backpressure subtract"),
306 _ENTRY(ipdprcpar3, MISC, NULL, "ipd parity error 127:96"),
307 _ENTRY(ipdprcpar2, MISC, NULL, "ipd parity error 95:64"),
308 _ENTRY(ipdprcpar1, MISC, NULL, "ipd parity error 63:32"),
309 _ENTRY(ipdprcpar0, MISC, NULL, "ipd parity error 31:0"),
310 #undef _ENTRY
311 };
312
313 void
314 octeon_ipd_intr_evcnt_attach(struct octeon_ipd_softc *sc)
315 {
316 OCTEON_EVCNT_ATTACH_EVCNTS(sc, octeon_ipd_intr_evcnt_entries, "ipd0");
317 }
318
319 void
320 octeon_ipd_intr_rml(void *arg)
321 {
322 int i;
323
324 octeon_ipd_intr_evcnt.ev_count++;
325 for (i = 0; i < 3/* XXX */; i++) {
326 struct octeon_ipd_softc *sc;
327 uint64_t reg;
328
329 sc = __octeon_ipd_softc[i];
330 KASSERT(sc != NULL);
331 reg = octeon_ipd_int_summary(sc);
332 if (octeon_ipd_intr_rml_verbose)
333 printf("%s: IPD_INT_SUM=0x%016" PRIx64 "\n", __func__, reg);
334 if (reg & IPD_INT_SUM_BP_SUB)
335 OCTEON_EVCNT_INC(sc, ipdbpsub);
336 if (reg & IPD_INT_SUM_PRC_PAR3)
337 OCTEON_EVCNT_INC(sc, ipdprcpar3);
338 if (reg & IPD_INT_SUM_PRC_PAR2)
339 OCTEON_EVCNT_INC(sc, ipdprcpar2);
340 if (reg & IPD_INT_SUM_PRC_PAR1)
341 OCTEON_EVCNT_INC(sc, ipdprcpar1);
342 if (reg & IPD_INT_SUM_PRC_PAR0)
343 OCTEON_EVCNT_INC(sc, ipdprcpar0);
344 }
345 }
346
347 void
348 octeon_ipd_int_enable(struct octeon_ipd_softc *sc, int enable)
349 {
350 uint64_t ipd_int_xxx = 0;
351
352 SET(ipd_int_xxx,
353 IPD_INT_SUM_BP_SUB |
354 IPD_INT_SUM_PRC_PAR3 |
355 IPD_INT_SUM_PRC_PAR2 |
356 IPD_INT_SUM_PRC_PAR1 |
357 IPD_INT_SUM_PRC_PAR0);
358 _IPD_WR8(sc, IPD_INT_SUM_OFFSET, ipd_int_xxx);
359 _IPD_WR8(sc, IPD_INT_ENB_OFFSET, enable ? ipd_int_xxx : 0);
360 }
361
362 uint64_t
363 octeon_ipd_int_summary(struct octeon_ipd_softc *sc)
364 {
365 uint64_t summary;
366
367 summary = _IPD_RD8(sc, IPD_INT_SUM_OFFSET);
368 _IPD_WR8(sc, IPD_INT_SUM_OFFSET, summary);
369 return summary;
370 }
371
372 int
373 octeon_ipd_intr_drop(void *arg)
374 {
375 octeon_write_csr(CIU_INT0_SUM0, CIU_INTX_SUM0_IPD_DRP);
376 octeon_ipd_intr_drop_evcnt.ev_count++;
377 return (1);
378 }
379
380 #define _ENTRY(x) { #x, x##_BITS, x##_OFFSET }
381
382 struct octeon_ipd_dump_reg {
383 const char *name;
384 const char *format;
385 size_t offset;
386 };
387
388 static const struct octeon_ipd_dump_reg octeon_ipd_dump_regs[] = {
389 _ENTRY(IPD_1ST_MBUFF_SKIP),
390 _ENTRY(IPD_NOT_1ST_MBUFF_SKIP),
391 _ENTRY(IPD_PACKET_MBUFF_SIZE),
392 _ENTRY(IPD_CTL_STATUS),
393 _ENTRY(IPD_WQE_FPA_QUEUE),
394 _ENTRY(IPD_PORT0_BP_PAGE_CNT),
395 _ENTRY(IPD_PORT1_BP_PAGE_CNT),
396 _ENTRY(IPD_PORT2_BP_PAGE_CNT),
397 _ENTRY(IPD_PORT32_BP_PAGE_CNT),
398 _ENTRY(IPD_SUB_PORT_BP_PAGE_CNT),
399 _ENTRY(IPD_1ST_NEXT_PTR_BACK),
400 _ENTRY(IPD_2ND_NEXT_PTR_BACK),
401 _ENTRY(IPD_INT_ENB),
402 _ENTRY(IPD_INT_SUM),
403 _ENTRY(IPD_SUB_PORT_FCS),
404 _ENTRY(IPD_QOS0_RED_MARKS),
405 _ENTRY(IPD_QOS1_RED_MARKS),
406 _ENTRY(IPD_QOS2_RED_MARKS),
407 _ENTRY(IPD_QOS3_RED_MARKS),
408 _ENTRY(IPD_QOS4_RED_MARKS),
409 _ENTRY(IPD_QOS5_RED_MARKS),
410 _ENTRY(IPD_QOS6_RED_MARKS),
411 _ENTRY(IPD_QOS7_RED_MARKS),
412 _ENTRY(IPD_PORT_BP_COUNTERS_PAIR0),
413 _ENTRY(IPD_PORT_BP_COUNTERS_PAIR1),
414 _ENTRY(IPD_PORT_BP_COUNTERS_PAIR2),
415 _ENTRY(IPD_PORT_BP_COUNTERS_PAIR32),
416 _ENTRY(IPD_RED_PORT_ENABLE),
417 _ENTRY(IPD_RED_QUE0_PARAM),
418 _ENTRY(IPD_RED_QUE1_PARAM),
419 _ENTRY(IPD_RED_QUE2_PARAM),
420 _ENTRY(IPD_RED_QUE3_PARAM),
421 _ENTRY(IPD_RED_QUE4_PARAM),
422 _ENTRY(IPD_RED_QUE5_PARAM),
423 _ENTRY(IPD_RED_QUE6_PARAM),
424 _ENTRY(IPD_RED_QUE7_PARAM),
425 _ENTRY(IPD_PTR_COUNT),
426 _ENTRY(IPD_BP_PRT_RED_END),
427 _ENTRY(IPD_QUE0_FREE_PAGE_CNT),
428 _ENTRY(IPD_CLK_COUNT),
429 _ENTRY(IPD_PWP_PTR_FIFO_CTL),
430 _ENTRY(IPD_PRC_HOLD_PTR_FIFO_CTL),
431 _ENTRY(IPD_PRC_PORT_PTR_FIFO_CTL),
432 _ENTRY(IPD_PKT_PTR_VALID),
433 _ENTRY(IPD_WQE_PTR_VALID),
434 _ENTRY(IPD_BIST_STATUS),
435 };
436
437 void
438 octeon_ipd_dump(void)
439 {
440 struct octeon_ipd_softc *sc;
441 const struct octeon_ipd_dump_reg *reg;
442 uint64_t tmp;
443 char buf[512];
444 int i;
445
446 sc = __octeon_ipd_softc[0];
447 for (i = 0; i < (int)__arraycount(octeon_ipd_dump_regs); i++) {
448 reg = &octeon_ipd_dump_regs[i];
449 tmp = _IPD_RD8(sc, reg->offset);
450 if (reg->format == NULL) {
451 snprintf(buf, sizeof(buf), "%16" PRIx64, tmp);
452 } else {
453 snprintb(buf, sizeof(buf), reg->format, tmp);
454 }
455 printf("%-32s: %s\n", reg->name, buf);
456 }
457 }
458 #endif /* OCTEON_ETH_DEBUG */
459