ixp425_qmgr.c revision 1.2 1 /* $NetBSD: ixp425_qmgr.c,v 1.2 2007/02/22 05:14:05 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 2006 Sam Leffler, Errno Consulting
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
15 * redistribution must be conditioned upon including a substantially
16 * similar Disclaimer requirement for further binary redistribution.
17 *
18 * NO WARRANTY
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
22 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
23 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
24 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
27 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
29 * THE POSSIBILITY OF SUCH DAMAGES.
30 */
31
32 /*-
33 * Copyright (c) 2001-2005, Intel Corporation.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the Intel Corporation nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
50 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 */
61 #include <sys/cdefs.h>
62 /*__FBSDID("$FreeBSD: src/sys/arm/xscale/ixp425/ixp425_qmgr.c,v 1.1 2006/11/19 23:55:23 sam Exp $");*/
63 __KERNEL_RCSID(0, "$NetBSD: ixp425_qmgr.c,v 1.2 2007/02/22 05:14:05 thorpej Exp $");
64
65 /*
66 * Intel XScale Queue Manager support.
67 *
68 * Each IXP4XXX device has a hardware block that implements a priority
69 * queue manager that is shared between the XScale cpu and the backend
70 * devices (such as the NPE). Queues are accessed by reading/writing
71 * special memory locations. The queue contents are mapped into a shared
72 * SRAM region with entries managed in a circular buffer. The XScale
73 * processor can receive interrupts based on queue contents (a condition
74 * code determines when interrupts should be delivered).
75 *
76 * The code here basically replaces the qmgr class in the Intel Access
77 * Library (IAL).
78 */
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/time.h>
83 #include <sys/malloc.h>
84 #include <sys/resource.h>
85
86 #include <machine/bus.h>
87 #include <machine/cpu.h>
88 #include <machine/intr.h>
89
90 #include <arm/xscale/ixp425reg.h>
91 #include <arm/xscale/ixp425var.h>
92
93 #include <arm/xscale/ixp425_qmgr.h>
94
95 /*
96 * State per AQM hw queue.
97 * This structure holds q configuration and dispatch state.
98 */
99 struct qmgrInfo {
100 int qSizeInWords; /* queue size in words */
101
102 uint32_t qOflowStatBitMask; /* overflow status mask */
103 int qWriteCount; /* queue write count */
104
105 bus_size_t qAccRegAddr; /* access register */
106 bus_size_t qUOStatRegAddr; /* status register */
107 bus_size_t qConfigRegAddr; /* config register */
108 int qSizeInEntries; /* queue size in entries */
109
110 uint32_t qUflowStatBitMask; /* underflow status mask */
111 int qReadCount; /* queue read count */
112
113 /* XXX union */
114 uint32_t qStatRegAddr;
115 uint32_t qStatBitsOffset;
116 uint32_t qStat0BitMask;
117 uint32_t qStat1BitMask;
118
119 uint32_t intRegCheckMask; /* interrupt reg check mask */
120 void (*cb)(int, void *); /* callback function */
121 void *cbarg; /* callback argument */
122 int priority; /* dispatch priority */
123 #if 0
124 /* NB: needed only for A0 parts */
125 u_int statusWordOffset; /* status word offset */
126 uint32_t statusMask; /* status mask */
127 uint32_t statusCheckValue; /* status check value */
128 #endif
129 };
130
131 struct ixpqmgr_softc {
132 #ifdef __FreeBSD__
133 device_t sc_dev;
134 bus_space_tag_t sc_iot;
135 bus_space_handle_t sc_ioh;
136 struct resource *sc_irq; /* IRQ resource */
137 int sc_rid; /* resource id for irq */
138 void *sc_ih; /* interrupt handler */
139 #else
140 bus_space_tag_t sc_iot;
141 bus_space_handle_t sc_ioh;
142 void *sc_ih[2]; /* interrupt handler */
143 #endif
144
145 struct qmgrInfo qinfo[IX_QMGR_MAX_NUM_QUEUES];
146 /*
147 * This array contains a list of queue identifiers ordered by
148 * priority. The table is split logically between queue
149 * identifiers 0-31 and 32-63. To optimize lookups bit masks
150 * are kept for the first-32 and last-32 q's. When the
151 * table needs to be rebuilt mark rebuildTable and it'll
152 * happen after the next interrupt.
153 */
154 int priorityTable[IX_QMGR_MAX_NUM_QUEUES];
155 uint32_t lowPriorityTableFirstHalfMask;
156 uint32_t uppPriorityTableFirstHalfMask;
157 int rebuildTable; /* rebuild priorityTable */
158
159 uint32_t aqmFreeSramAddress; /* SRAM free space */
160 };
161
162 static int qmgr_debug = 0;
163 #define DPRINTF(dev, fmt, ...) do { \
164 if (qmgr_debug) printf(fmt, __VA_ARGS__); \
165 } while (0)
166 #define DPRINTFn(n, dev, fmt, ...) do { \
167 if (qmgr_debug >= n) printf(fmt, __VA_ARGS__); \
168 } while (0)
169
170 static struct ixpqmgr_softc *ixpqmgr_sc = NULL;
171
172 static void ixpqmgr_rebuild(struct ixpqmgr_softc *);
173 static int ixpqmgr_intr(void *);
174
175 static void aqm_int_enable(struct ixpqmgr_softc *sc, int qId);
176 static void aqm_int_disable(struct ixpqmgr_softc *sc, int qId);
177 static void aqm_qcfg(struct ixpqmgr_softc *sc, int qId, u_int ne, u_int nf);
178 static void aqm_srcsel_write(struct ixpqmgr_softc *sc, int qId, int sourceId);
179 static void aqm_reset(struct ixpqmgr_softc *sc);
180
181 static void
182 dummyCallback(int qId, void *arg)
183 {
184 /* XXX complain */
185 }
186
187 static uint32_t
188 aqm_reg_read(struct ixpqmgr_softc *sc, bus_size_t off)
189 {
190 DPRINTFn(9, sc->sc_dev, "%s(0x%x)\n", __func__, (int)off);
191 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
192 }
193
194 static void
195 aqm_reg_write(struct ixpqmgr_softc *sc, bus_size_t off, uint32_t val)
196 {
197 DPRINTFn(9, sc->sc_dev, "%s(0x%x, 0x%x)\n", __func__, (int)off, val);
198 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
199 }
200
201 #ifdef __FreeBSD__
202 static int
203 ixpqmgr_probe(device_t dev)
204 {
205 device_set_desc(dev, "IXP425 Q-Manager");
206 return 0;
207 }
208 #endif
209
210 #ifdef __FreeBSD__
211 static void
212 ixpqmgr_attach(device_t dev)
213 #else
214 void *
215 ixpqmgr_init(bus_space_tag_t iot)
216 #endif
217 {
218 #ifdef __FreeBSD__
219 struct ixpqmgr_softc *sc = device_get_softc(dev);
220 struct ixp425_softc *sa = device_get_softc(device_get_parent(dev));
221 #else
222 struct ixpqmgr_softc *sc;
223 #endif
224 int i;
225
226 #ifdef __FreeBSD__
227 ixpqmgr_sc = sc;
228
229 sc->sc_dev = dev;
230 sc->sc_iot = sa->sc_iot;
231 #else
232 sc = malloc(sizeof(*sc), M_DEVBUF, M_NOWAIT | M_ZERO);
233 if (sc == NULL)
234 return (NULL);
235
236 sc->sc_iot = iot;
237 #endif
238
239 if (bus_space_map(sc->sc_iot, IXP425_QMGR_HWBASE, IXP425_QMGR_SIZE,
240 0, &sc->sc_ioh))
241 panic("%s: Cannot map registers", __func__);
242
243 #ifdef __FreeBSD__
244 /* NB: we only use the lower 32 q's */
245 sc->sc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->sc_rid,
246 IXP425_INT_QUE1_32, IXP425_INT_QUE33_64, 2, RF_ACTIVE);
247 if (!sc->sc_irq)
248 panic("Unable to allocate the qmgr irqs.\n");
249 /* XXX could be a source of entropy */
250 bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
251 ixpqmgr_intr, NULL, &sc->sc_ih);
252 #else
253 sc->sc_ih[0] = ixp425_intr_establish(IXP425_INT_QUE1_32, IPL_NET,
254 ixpqmgr_intr, sc);
255 if (sc->sc_ih[0] == NULL) {
256 free(sc, M_DEVBUF);
257 return (NULL);
258 }
259 sc->sc_ih[1] = ixp425_intr_establish(IXP425_INT_QUE33_64, IPL_NET,
260 ixpqmgr_intr, sc);
261 if (sc->sc_ih[1] == NULL) {
262 ixp425_intr_disestablish(sc->sc_ih[0]);
263 free(sc, M_DEVBUF);
264 return (NULL);
265 }
266
267 ixpqmgr_sc = sc;
268 #endif
269
270 /* NB: softc is pre-zero'd */
271 for (i = 0; i < IX_QMGR_MAX_NUM_QUEUES; i++) {
272 struct qmgrInfo *qi = &sc->qinfo[i];
273
274 qi->cb = dummyCallback;
275 qi->priority = IX_QMGR_Q_PRIORITY_0; /* default priority */
276 /*
277 * There are two interrupt registers, 32 bits each. One
278 * for the lower queues(0-31) and one for the upper
279 * queues(32-63). Therefore need to mod by 32 i.e the
280 * min upper queue identifier.
281 */
282 qi->intRegCheckMask = (1<<(i%(IX_QMGR_MIN_QUEUPP_QID)));
283
284 /*
285 * Register addresses and bit masks are calculated and
286 * stored here to optimize QRead, QWrite and QStatusGet
287 * functions.
288 */
289
290 /* AQM Queue access reg addresses, per queue */
291 qi->qAccRegAddr = IX_QMGR_Q_ACCESS_ADDR_GET(i);
292 qi->qAccRegAddr = IX_QMGR_Q_ACCESS_ADDR_GET(i);
293 qi->qConfigRegAddr = IX_QMGR_Q_CONFIG_ADDR_GET(i);
294
295 /* AQM Queue lower-group (0-31), only */
296 if (i < IX_QMGR_MIN_QUEUPP_QID) {
297 /* AQM Q underflow/overflow status reg address, per queue */
298 qi->qUOStatRegAddr = IX_QMGR_QUEUOSTAT0_OFFSET +
299 ((i / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD) *
300 sizeof(uint32_t));
301
302 /* AQM Q underflow status bit masks for status reg per queue */
303 qi->qUflowStatBitMask =
304 (IX_QMGR_UNDERFLOW_BIT_OFFSET + 1) <<
305 ((i & (IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD - 1)) *
306 (32 / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD));
307
308 /* AQM Q overflow status bit masks for status reg, per queue */
309 qi->qOflowStatBitMask =
310 (IX_QMGR_OVERFLOW_BIT_OFFSET + 1) <<
311 ((i & (IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD - 1)) *
312 (32 / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD));
313
314 /* AQM Q lower-group (0-31) status reg addresses, per queue */
315 qi->qStatRegAddr = IX_QMGR_QUELOWSTAT0_OFFSET +
316 ((i / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD) *
317 sizeof(uint32_t));
318
319 /* AQM Q lower-group (0-31) status register bit offset */
320 qi->qStatBitsOffset =
321 (i & (IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD - 1)) *
322 (32 / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD);
323 } else { /* AQM Q upper-group (32-63), only */
324 qi->qUOStatRegAddr = 0; /* XXX */
325
326 /* AQM Q upper-group (32-63) Nearly Empty status reg bitmasks */
327 qi->qStat0BitMask = (1 << (i - IX_QMGR_MIN_QUEUPP_QID));
328
329 /* AQM Q upper-group (32-63) Full status register bitmasks */
330 qi->qStat1BitMask = (1 << (i - IX_QMGR_MIN_QUEUPP_QID));
331 }
332 }
333
334 sc->aqmFreeSramAddress = 0x100; /* Q buffer space starts at 0x2100 */
335
336 ixpqmgr_rebuild(sc); /* build inital priority table */
337 aqm_reset(sc); /* reset h/w */
338
339 return (sc);
340 }
341
342 #ifdef __FreeBSD__
343 static void
344 ixpqmgr_detach(device_t dev)
345 {
346 struct ixpqmgr_softc *sc = device_get_softc(dev);
347
348 aqm_reset(sc); /* disable interrupts */
349 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
350 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_rid, sc->sc_irq);
351 bus_space_unmap(sc->sc_iot, sc->sc_ioh, IXP425_QMGR_SIZE);
352 }
353 #endif
354
355 int
356 ixpqmgr_qconfig(int qId, int qEntries, int ne, int nf, int srcSel,
357 void (*cb)(int, void *), void *cbarg)
358 {
359 struct ixpqmgr_softc *sc = ixpqmgr_sc;
360 struct qmgrInfo *qi = &sc->qinfo[qId];
361
362 DPRINTF(sc->sc_dev, "%s(%u, %u, %u, %u, %u, %p, %p)\n",
363 __func__, qId, qEntries, ne, nf, srcSel, cb, cbarg);
364
365 /* NB: entry size is always 1 */
366 qi->qSizeInWords = qEntries;
367
368 qi->qReadCount = 0;
369 qi->qWriteCount = 0;
370 qi->qSizeInEntries = qEntries; /* XXX kept for code clarity */
371
372 if (cb == NULL) {
373 /* Reset to dummy callback */
374 qi->cb = dummyCallback;
375 qi->cbarg = 0;
376 } else {
377 qi->cb = cb;
378 qi->cbarg = cbarg;
379 }
380
381 /* Write the config register; NB must be AFTER qinfo setup */
382 aqm_qcfg(sc, qId, ne, nf);
383 /*
384 * Account for space just allocated to queue.
385 */
386 sc->aqmFreeSramAddress += (qi->qSizeInWords * sizeof(uint32_t));
387
388 /* Set the interupt source if this queue is in the range 0-31 */
389 if (qId < IX_QMGR_MIN_QUEUPP_QID)
390 aqm_srcsel_write(sc, qId, srcSel);
391
392 if (cb != NULL) /* Enable the interrupt */
393 aqm_int_enable(sc, qId);
394
395 sc->rebuildTable = true;
396
397 return 0; /* XXX */
398 }
399
400 int
401 ixpqmgr_qwrite(int qId, uint32_t entry)
402 {
403 struct ixpqmgr_softc *sc = ixpqmgr_sc;
404 struct qmgrInfo *qi = &sc->qinfo[qId];
405
406 DPRINTFn(3, sc->sc_dev, "%s(%u, 0x%x) writeCount %u size %u\n",
407 __func__, qId, entry, qi->qWriteCount, qi->qSizeInEntries);
408
409 /* write the entry */
410 aqm_reg_write(sc, qi->qAccRegAddr, entry);
411
412 /* NB: overflow is available for lower queues only */
413 if (qId < IX_QMGR_MIN_QUEUPP_QID) {
414 int qSize = qi->qSizeInEntries;
415 /*
416 * Increment the current number of entries in the queue
417 * and check for overflow .
418 */
419 if (qi->qWriteCount++ == qSize) { /* check for overflow */
420 uint32_t status = aqm_reg_read(sc, qi->qUOStatRegAddr);
421 int qPtrs;
422
423 /*
424 * Read the status twice because the status may
425 * not be immediately ready after the write operation
426 */
427 if ((status & qi->qOflowStatBitMask) ||
428 ((status = aqm_reg_read(sc, qi->qUOStatRegAddr)) & qi->qOflowStatBitMask)) {
429 /*
430 * The queue is full, clear the overflow status bit if set.
431 */
432 aqm_reg_write(sc, qi->qUOStatRegAddr,
433 status & ~qi->qOflowStatBitMask);
434 qi->qWriteCount = qSize;
435 DPRINTFn(5, sc->sc_dev,
436 "%s(%u, 0x%x) Q full, overflow status cleared\n",
437 __func__, qId, entry);
438 return ENOSPC;
439 }
440 /*
441 * No overflow occured : someone is draining the queue
442 * and the current counter needs to be
443 * updated from the current number of entries in the queue
444 */
445
446 /* calculate number of words in q */
447 qPtrs = aqm_reg_read(sc, qi->qConfigRegAddr);
448 DPRINTFn(2, sc->sc_dev,
449 "%s(%u, 0x%x) Q full, no overflow status, qConfig 0x%x\n",
450 __func__, qId, entry, qPtrs);
451 qPtrs = (qPtrs - (qPtrs >> 7)) & 0x7f;
452
453 if (qPtrs == 0) {
454 /*
455 * The queue may be full at the time of the
456 * snapshot. Next access will check
457 * the overflow status again.
458 */
459 qi->qWriteCount = qSize;
460 } else {
461 /* convert the number of words to a number of entries */
462 qi->qWriteCount = qPtrs & (qSize - 1);
463 }
464 }
465 }
466 return 0;
467 }
468
469 int
470 ixpqmgr_qread(int qId, uint32_t *entry)
471 {
472 struct ixpqmgr_softc *sc = ixpqmgr_sc;
473 struct qmgrInfo *qi = &sc->qinfo[qId];
474 bus_size_t off = qi->qAccRegAddr;
475
476 *entry = aqm_reg_read(sc, off);
477
478 /*
479 * Reset the current read count : next access to the read function
480 * will force a underflow status check.
481 */
482 qi->qReadCount = 0;
483
484 /* Check if underflow occurred on the read */
485 if (*entry == 0 && qId < IX_QMGR_MIN_QUEUPP_QID) {
486 /* get the queue status */
487 uint32_t status = aqm_reg_read(sc, qi->qUOStatRegAddr);
488
489 if (status & qi->qUflowStatBitMask) { /* clear underflow status */
490 aqm_reg_write(sc, qi->qUOStatRegAddr,
491 status &~ qi->qUflowStatBitMask);
492 return ENOSPC;
493 }
494 }
495 return 0;
496 }
497
498 int
499 ixpqmgr_qreadm(int qId, uint32_t n, uint32_t *p)
500 {
501 struct ixpqmgr_softc *sc = ixpqmgr_sc;
502 struct qmgrInfo *qi = &sc->qinfo[qId];
503 uint32_t entry;
504 bus_size_t off = qi->qAccRegAddr;
505
506 entry = aqm_reg_read(sc, off);
507 while (--n) {
508 if (entry == 0) {
509 /* if we read a NULL entry, stop. We have underflowed */
510 break;
511 }
512 *p++ = entry; /* store */
513 entry = aqm_reg_read(sc, off);
514 }
515 *p = entry;
516
517 /*
518 * Reset the current read count : next access to the read function
519 * will force a underflow status check.
520 */
521 qi->qReadCount = 0;
522
523 /* Check if underflow occurred on the read */
524 if (entry == 0 && qId < IX_QMGR_MIN_QUEUPP_QID) {
525 /* get the queue status */
526 uint32_t status = aqm_reg_read(sc, qi->qUOStatRegAddr);
527
528 if (status & qi->qUflowStatBitMask) { /* clear underflow status */
529 aqm_reg_write(sc, qi->qUOStatRegAddr,
530 status &~ qi->qUflowStatBitMask);
531 return ENOSPC;
532 }
533 }
534 return 0;
535 }
536
537 uint32_t
538 ixpqmgr_getqstatus(int qId)
539 {
540 #define QLOWSTATMASK \
541 ((1 << (32 / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD)) - 1)
542 struct ixpqmgr_softc *sc = ixpqmgr_sc;
543 const struct qmgrInfo *qi = &sc->qinfo[qId];
544 uint32_t status;
545
546 if (qId < IX_QMGR_MIN_QUEUPP_QID) {
547 /* read the status of a queue in the range 0-31 */
548 status = aqm_reg_read(sc, qi->qStatRegAddr);
549
550 /* mask out the status bits relevant only to this queue */
551 status = (status >> qi->qStatBitsOffset) & QLOWSTATMASK;
552 } else { /* read status of a queue in the range 32-63 */
553 status = 0;
554 if (aqm_reg_read(sc, IX_QMGR_QUEUPPSTAT0_OFFSET)&qi->qStat0BitMask)
555 status |= IX_QMGR_Q_STATUS_NE_BIT_MASK; /* nearly empty */
556 if (aqm_reg_read(sc, IX_QMGR_QUEUPPSTAT1_OFFSET)&qi->qStat1BitMask)
557 status |= IX_QMGR_Q_STATUS_F_BIT_MASK; /* full */
558 }
559 return status;
560 #undef QLOWSTATMASK
561 }
562
563 uint32_t
564 ixpqmgr_getqconfig(int qId)
565 {
566 struct ixpqmgr_softc *sc = ixpqmgr_sc;
567
568 return aqm_reg_read(sc, IX_QMGR_Q_CONFIG_ADDR_GET(qId));
569 }
570
571 void
572 ixpqmgr_dump(void)
573 {
574 struct ixpqmgr_softc *sc = ixpqmgr_sc;
575 int i, a;
576
577 /* status registers */
578 printf("0x%04x: %08x %08x %08x %08x\n"
579 , 0x400
580 , aqm_reg_read(sc, 0x400)
581 , aqm_reg_read(sc, 0x400+4)
582 , aqm_reg_read(sc, 0x400+8)
583 , aqm_reg_read(sc, 0x400+12)
584 );
585 printf("0x%04x: %08x %08x %08x %08x\n"
586 , 0x410
587 , aqm_reg_read(sc, 0x410)
588 , aqm_reg_read(sc, 0x410+4)
589 , aqm_reg_read(sc, 0x410+8)
590 , aqm_reg_read(sc, 0x410+12)
591 );
592 printf("0x%04x: %08x %08x %08x %08x\n"
593 , 0x420
594 , aqm_reg_read(sc, 0x420)
595 , aqm_reg_read(sc, 0x420+4)
596 , aqm_reg_read(sc, 0x420+8)
597 , aqm_reg_read(sc, 0x420+12)
598 );
599 printf("0x%04x: %08x %08x %08x %08x\n"
600 , 0x430
601 , aqm_reg_read(sc, 0x430)
602 , aqm_reg_read(sc, 0x430+4)
603 , aqm_reg_read(sc, 0x430+8)
604 , aqm_reg_read(sc, 0x430+12)
605 );
606 /* q configuration registers */
607 for (a = 0x2000; a < 0x20ff; a += 32)
608 printf("0x%04x: %08x %08x %08x %08x %08x %08x %08x %08x\n"
609 , a
610 , aqm_reg_read(sc, a)
611 , aqm_reg_read(sc, a+4)
612 , aqm_reg_read(sc, a+8)
613 , aqm_reg_read(sc, a+12)
614 , aqm_reg_read(sc, a+16)
615 , aqm_reg_read(sc, a+20)
616 , aqm_reg_read(sc, a+24)
617 , aqm_reg_read(sc, a+28)
618 );
619 /* allocated SRAM */
620 for (i = 0x100; i < sc->aqmFreeSramAddress; i += 32) {
621 a = 0x2000 + i;
622 printf("0x%04x: %08x %08x %08x %08x %08x %08x %08x %08x\n"
623 , a
624 , aqm_reg_read(sc, a)
625 , aqm_reg_read(sc, a+4)
626 , aqm_reg_read(sc, a+8)
627 , aqm_reg_read(sc, a+12)
628 , aqm_reg_read(sc, a+16)
629 , aqm_reg_read(sc, a+20)
630 , aqm_reg_read(sc, a+24)
631 , aqm_reg_read(sc, a+28)
632 );
633 }
634 for (i = 0; i < 16; i++) {
635 printf("Q[%2d] config 0x%08x status 0x%02x "
636 "Q[%2d] config 0x%08x status 0x%02x\n"
637 , i, ixpqmgr_getqconfig(i), ixpqmgr_getqstatus(i)
638 , i+16, ixpqmgr_getqconfig(i+16), ixpqmgr_getqstatus(i+16)
639 );
640 }
641 }
642
643 void
644 ixpqmgr_notify_enable(int qId, int srcSel)
645 {
646 struct ixpqmgr_softc *sc = ixpqmgr_sc;
647 #if 0
648 /* Calculate the checkMask and checkValue for this q */
649 aqm_calc_statuscheck(sc, qId, srcSel);
650 #endif
651 /* Set the interupt source if this queue is in the range 0-31 */
652 if (qId < IX_QMGR_MIN_QUEUPP_QID)
653 aqm_srcsel_write(sc, qId, srcSel);
654
655 /* Enable the interrupt */
656 aqm_int_enable(sc, qId);
657 }
658
659 void
660 ixpqmgr_notify_disable(int qId)
661 {
662 struct ixpqmgr_softc *sc = ixpqmgr_sc;
663
664 aqm_int_disable(sc, qId);
665 }
666
667 /*
668 * Rebuild the priority table used by the dispatcher.
669 */
670 static void
671 ixpqmgr_rebuild(struct ixpqmgr_softc *sc)
672 {
673 int q, pri;
674 int lowQuePriorityTableIndex, uppQuePriorityTableIndex;
675 struct qmgrInfo *qi;
676
677 sc->lowPriorityTableFirstHalfMask = 0;
678 sc->uppPriorityTableFirstHalfMask = 0;
679
680 lowQuePriorityTableIndex = 0;
681 uppQuePriorityTableIndex = 32;
682 for (pri = 0; pri < IX_QMGR_NUM_PRIORITY_LEVELS; pri++) {
683 /* low priority q's */
684 for (q = 0; q < IX_QMGR_MIN_QUEUPP_QID; q++) {
685 qi = &sc->qinfo[q];
686 if (qi->priority == pri) {
687 /*
688 * Build the priority table bitmask which match the
689 * queues of the first half of the priority table.
690 */
691 if (lowQuePriorityTableIndex < 16) {
692 sc->lowPriorityTableFirstHalfMask |=
693 qi->intRegCheckMask;
694 }
695 sc->priorityTable[lowQuePriorityTableIndex++] = q;
696 }
697 }
698 /* high priority q's */
699 for (; q < IX_QMGR_MAX_NUM_QUEUES; q++) {
700 qi = &sc->qinfo[q];
701 if (qi->priority == pri) {
702 /*
703 * Build the priority table bitmask which match the
704 * queues of the first half of the priority table .
705 */
706 if (uppQuePriorityTableIndex < 48) {
707 sc->uppPriorityTableFirstHalfMask |=
708 qi->intRegCheckMask;
709 }
710 sc->priorityTable[uppQuePriorityTableIndex++] = q;
711 }
712 }
713 }
714 sc->rebuildTable = false;
715 }
716
717 /*
718 * Count the number of leading zero bits in a word,
719 * and return the same value than the CLZ instruction.
720 * Note this is similar to the standard ffs function but
721 * it counts zero's from the MSB instead of the LSB.
722 *
723 * word (in) return value (out)
724 * 0x80000000 0
725 * 0x40000000 1
726 * ,,, ,,,
727 * 0x00000002 30
728 * 0x00000001 31
729 * 0x00000000 32
730 *
731 * The C version of this function is used as a replacement
732 * for system not providing the equivalent of the CLZ
733 * assembly language instruction.
734 *
735 * Note that this version is big-endian
736 */
737 static unsigned int
738 _lzcount(uint32_t word)
739 {
740 unsigned int lzcount = 0;
741
742 if (word == 0)
743 return 32;
744 while ((word & 0x80000000) == 0) {
745 word <<= 1;
746 lzcount++;
747 }
748 return lzcount;
749 }
750
751 static int
752 ixpqmgr_intr(void *arg)
753 {
754 struct ixpqmgr_softc *sc = ixpqmgr_sc;
755 uint32_t intRegVal; /* Interrupt reg val */
756 struct qmgrInfo *qi;
757 int priorityTableIndex; /* Priority table index */
758 int qIndex; /* Current queue being processed */
759
760 /* Read the interrupt register */
761 intRegVal = aqm_reg_read(sc, IX_QMGR_QINTREG0_OFFSET);
762 /* Write back to clear interrupt */
763 aqm_reg_write(sc, IX_QMGR_QINTREG0_OFFSET, intRegVal);
764
765 DPRINTFn(5, sc->sc_dev, "%s: ISR0 0x%x ISR1 0x%x\n",
766 __func__, intRegVal, aqm_reg_read(sc, IX_QMGR_QINTREG1_OFFSET));
767
768 /* No queue has interrupt register set */
769 if (intRegVal != 0) {
770 /* get the first queue Id from the interrupt register value */
771 qIndex = (32 - 1) - _lzcount(intRegVal);
772
773 DPRINTFn(2, sc->sc_dev, "%s: ISR0 0x%x qIndex %u\n",
774 __func__, intRegVal, qIndex);
775
776 /*
777 * Optimize for single callback case.
778 */
779 qi = &sc->qinfo[qIndex];
780 if (intRegVal == qi->intRegCheckMask) {
781 /*
782 * Only 1 queue event triggered a notification.
783 * Call the callback function for this queue
784 */
785 qi->cb(qIndex, qi->cbarg);
786 } else {
787 /*
788 * The event is triggered by more than 1 queue,
789 * the queue search will start from the beginning
790 * or the middle of the priority table.
791 *
792 * The search will end when all the bits of the interrupt
793 * register are cleared. There is no need to maintain
794 * a seperate value and test it at each iteration.
795 */
796 if (intRegVal & sc->lowPriorityTableFirstHalfMask) {
797 priorityTableIndex = 0;
798 } else {
799 priorityTableIndex = 16;
800 }
801 /*
802 * Iterate over the priority table until all the bits
803 * of the interrupt register are cleared.
804 */
805 do {
806 qIndex = sc->priorityTable[priorityTableIndex++];
807 qi = &sc->qinfo[qIndex];
808
809 /* If this queue caused this interrupt to be raised */
810 if (intRegVal & qi->intRegCheckMask) {
811 /* Call the callback function for this queue */
812 qi->cb(qIndex, qi->cbarg);
813 /* Clear the interrupt register bit */
814 intRegVal &= ~qi->intRegCheckMask;
815 }
816 } while (intRegVal);
817 }
818 }
819
820 /* Rebuild the priority table if needed */
821 if (sc->rebuildTable)
822 ixpqmgr_rebuild(sc);
823
824 return (1);
825 }
826
827 #if 0
828 /*
829 * Generate the parameters used to check if a Q's status matches
830 * the specified source select. We calculate which status word
831 * to check (statusWordOffset), the value to check the status
832 * against (statusCheckValue) and the mask (statusMask) to mask
833 * out all but the bits to check in the status word.
834 */
835 static void
836 aqm_calc_statuscheck(int qId, IxQMgrSourceId srcSel)
837 {
838 struct qmgrInfo *qi = &qinfo[qId];
839 uint32_t shiftVal;
840
841 if (qId < IX_QMGR_MIN_QUEUPP_QID) {
842 switch (srcSel) {
843 case IX_QMGR_Q_SOURCE_ID_E:
844 qi->statusCheckValue = IX_QMGR_Q_STATUS_E_BIT_MASK;
845 qi->statusMask = IX_QMGR_Q_STATUS_E_BIT_MASK;
846 break;
847 case IX_QMGR_Q_SOURCE_ID_NE:
848 qi->statusCheckValue = IX_QMGR_Q_STATUS_NE_BIT_MASK;
849 qi->statusMask = IX_QMGR_Q_STATUS_NE_BIT_MASK;
850 break;
851 case IX_QMGR_Q_SOURCE_ID_NF:
852 qi->statusCheckValue = IX_QMGR_Q_STATUS_NF_BIT_MASK;
853 qi->statusMask = IX_QMGR_Q_STATUS_NF_BIT_MASK;
854 break;
855 case IX_QMGR_Q_SOURCE_ID_F:
856 qi->statusCheckValue = IX_QMGR_Q_STATUS_F_BIT_MASK;
857 qi->statusMask = IX_QMGR_Q_STATUS_F_BIT_MASK;
858 break;
859 case IX_QMGR_Q_SOURCE_ID_NOT_E:
860 qi->statusCheckValue = 0;
861 qi->statusMask = IX_QMGR_Q_STATUS_E_BIT_MASK;
862 break;
863 case IX_QMGR_Q_SOURCE_ID_NOT_NE:
864 qi->statusCheckValue = 0;
865 qi->statusMask = IX_QMGR_Q_STATUS_NE_BIT_MASK;
866 break;
867 case IX_QMGR_Q_SOURCE_ID_NOT_NF:
868 qi->statusCheckValue = 0;
869 qi->statusMask = IX_QMGR_Q_STATUS_NF_BIT_MASK;
870 break;
871 case IX_QMGR_Q_SOURCE_ID_NOT_F:
872 qi->statusCheckValue = 0;
873 qi->statusMask = IX_QMGR_Q_STATUS_F_BIT_MASK;
874 break;
875 default:
876 /* Should never hit */
877 IX_OSAL_ASSERT(0);
878 break;
879 }
880
881 /* One nibble of status per queue so need to shift the
882 * check value and mask out to the correct position.
883 */
884 shiftVal = (qId % IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD) *
885 IX_QMGR_QUELOWSTAT_BITS_PER_Q;
886
887 /* Calculate the which status word to check from the qId,
888 * 8 Qs status per word
889 */
890 qi->statusWordOffset = qId / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD;
891
892 qi->statusCheckValue <<= shiftVal;
893 qi->statusMask <<= shiftVal;
894 } else {
895 /* One status word */
896 qi->statusWordOffset = 0;
897 /* Single bits per queue and int source bit hardwired NE,
898 * Qs start at 32.
899 */
900 qi->statusMask = 1 << (qId - IX_QMGR_MIN_QUEUPP_QID);
901 qi->statusCheckValue = qi->statusMask;
902 }
903 }
904 #endif
905
906 static void
907 aqm_int_enable(struct ixpqmgr_softc *sc, int qId)
908 {
909 bus_size_t reg;
910 uint32_t v;
911
912 if (qId < IX_QMGR_MIN_QUEUPP_QID)
913 reg = IX_QMGR_QUEIEREG0_OFFSET;
914 else
915 reg = IX_QMGR_QUEIEREG1_OFFSET;
916 v = aqm_reg_read(sc, reg);
917 aqm_reg_write(sc, reg, v | (1 << (qId % IX_QMGR_MIN_QUEUPP_QID)));
918
919 DPRINTF(sc->sc_dev, "%s(%u) 0x%lx: 0x%x => 0x%x\n",
920 __func__, qId, reg, v, aqm_reg_read(sc, reg));
921 }
922
923 static void
924 aqm_int_disable(struct ixpqmgr_softc *sc, int qId)
925 {
926 bus_size_t reg;
927 uint32_t v;
928
929 if (qId < IX_QMGR_MIN_QUEUPP_QID)
930 reg = IX_QMGR_QUEIEREG0_OFFSET;
931 else
932 reg = IX_QMGR_QUEIEREG1_OFFSET;
933 v = aqm_reg_read(sc, reg);
934 aqm_reg_write(sc, reg, v &~ (1 << (qId % IX_QMGR_MIN_QUEUPP_QID)));
935
936 DPRINTF(sc->sc_dev, "%s(%u) 0x%lx: 0x%x => 0x%x\n",
937 __func__, qId, reg, v, aqm_reg_read(sc, reg));
938 }
939
940 static unsigned
941 log2(unsigned n)
942 {
943 unsigned count;
944 /*
945 * N.B. this function will return 0 if supplied 0.
946 */
947 for (count = 0; n/2; count++)
948 n /= 2;
949 return count;
950 }
951
952 static __inline unsigned
953 toAqmEntrySize(int entrySize)
954 {
955 /* entrySize 1("00"),2("01"),4("10") */
956 return log2(entrySize);
957 }
958
959 static __inline unsigned
960 toAqmBufferSize(unsigned bufferSizeInWords)
961 {
962 /* bufferSize 16("00"),32("01),64("10"),128("11") */
963 return log2(bufferSizeInWords / IX_QMGR_MIN_BUFFER_SIZE);
964 }
965
966 static __inline unsigned
967 toAqmWatermark(int watermark)
968 {
969 /*
970 * Watermarks 0("000"),1("001"),2("010"),4("011"),
971 * 8("100"),16("101"),32("110"),64("111")
972 */
973 return log2(2 * watermark);
974 }
975
976 static void
977 aqm_qcfg(struct ixpqmgr_softc *sc, int qId, u_int ne, u_int nf)
978 {
979 const struct qmgrInfo *qi = &sc->qinfo[qId];
980 uint32_t qCfg;
981 uint32_t baseAddress;
982
983 /* Build config register */
984 qCfg = ((toAqmEntrySize(1) & IX_QMGR_ENTRY_SIZE_MASK) <<
985 IX_QMGR_Q_CONFIG_ESIZE_OFFSET)
986 | ((toAqmBufferSize(qi->qSizeInWords) & IX_QMGR_SIZE_MASK) <<
987 IX_QMGR_Q_CONFIG_BSIZE_OFFSET);
988
989 /* baseAddress, calculated relative to start address */
990 baseAddress = sc->aqmFreeSramAddress;
991
992 /* base address must be word-aligned */
993 KASSERT((baseAddress % IX_QMGR_BASE_ADDR_16_WORD_ALIGN) == 0);
994
995 /* Now convert to a 16 word pointer as required by QUECONFIG register */
996 baseAddress >>= IX_QMGR_BASE_ADDR_16_WORD_SHIFT;
997 qCfg |= baseAddress << IX_QMGR_Q_CONFIG_BADDR_OFFSET;
998
999 /* set watermarks */
1000 qCfg |= (toAqmWatermark(ne) << IX_QMGR_Q_CONFIG_NE_OFFSET)
1001 | (toAqmWatermark(nf) << IX_QMGR_Q_CONFIG_NF_OFFSET);
1002
1003 DPRINTF(sc->sc_dev, "%s(%u, %u, %u) 0x%x => 0x%x @ 0x%x\n",
1004 __func__, qId, ne, nf,
1005 aqm_reg_read(sc, IX_QMGR_Q_CONFIG_ADDR_GET(qId)),
1006 qCfg, (u_int)IX_QMGR_Q_CONFIG_ADDR_GET(qId));
1007
1008 aqm_reg_write(sc, IX_QMGR_Q_CONFIG_ADDR_GET(qId), qCfg);
1009 }
1010
1011 static void
1012 aqm_srcsel_write(struct ixpqmgr_softc *sc, int qId, int sourceId)
1013 {
1014 bus_size_t off;
1015 uint32_t v;
1016
1017 /*
1018 * Calculate the register offset; multiple queues split across registers
1019 */
1020 off = IX_QMGR_INT0SRCSELREG0_OFFSET +
1021 ((qId / IX_QMGR_INTSRC_NUM_QUE_PER_WORD) * sizeof(uint32_t));
1022
1023 v = aqm_reg_read(sc, off);
1024 if (off == IX_QMGR_INT0SRCSELREG0_OFFSET && qId == 0) {
1025 /* Queue 0 at INT0SRCSELREG should not corrupt the value bit-3 */
1026 v |= 0x7;
1027 } else {
1028 const uint32_t bpq = 32 / IX_QMGR_INTSRC_NUM_QUE_PER_WORD;
1029 uint32_t mask;
1030 int qshift;
1031
1032 qshift = (qId & (IX_QMGR_INTSRC_NUM_QUE_PER_WORD-1)) * bpq;
1033 mask = ((1 << bpq) - 1) << qshift; /* q's status mask */
1034
1035 /* merge sourceId */
1036 v = (v &~ mask) | ((sourceId << qshift) & mask);
1037 }
1038
1039 DPRINTF(sc->sc_dev, "%s(%u, %u) 0x%x => 0x%x @ 0x%lx\n",
1040 __func__, qId, sourceId, aqm_reg_read(sc, off), v, off);
1041 aqm_reg_write(sc, off, v);
1042 }
1043
1044 /*
1045 * Reset AQM registers to default values.
1046 */
1047 static void
1048 aqm_reset(struct ixpqmgr_softc *sc)
1049 {
1050 int i;
1051
1052 /* Reset queues 0..31 status registers 0..3 */
1053 aqm_reg_write(sc, IX_QMGR_QUELOWSTAT0_OFFSET,
1054 IX_QMGR_QUELOWSTAT_RESET_VALUE);
1055 aqm_reg_write(sc, IX_QMGR_QUELOWSTAT1_OFFSET,
1056 IX_QMGR_QUELOWSTAT_RESET_VALUE);
1057 aqm_reg_write(sc, IX_QMGR_QUELOWSTAT2_OFFSET,
1058 IX_QMGR_QUELOWSTAT_RESET_VALUE);
1059 aqm_reg_write(sc, IX_QMGR_QUELOWSTAT3_OFFSET,
1060 IX_QMGR_QUELOWSTAT_RESET_VALUE);
1061
1062 /* Reset underflow/overflow status registers 0..1 */
1063 aqm_reg_write(sc, IX_QMGR_QUEUOSTAT0_OFFSET,
1064 IX_QMGR_QUEUOSTAT_RESET_VALUE);
1065 aqm_reg_write(sc, IX_QMGR_QUEUOSTAT1_OFFSET,
1066 IX_QMGR_QUEUOSTAT_RESET_VALUE);
1067
1068 /* Reset queues 32..63 nearly empty status registers */
1069 aqm_reg_write(sc, IX_QMGR_QUEUPPSTAT0_OFFSET,
1070 IX_QMGR_QUEUPPSTAT0_RESET_VALUE);
1071
1072 /* Reset queues 32..63 full status registers */
1073 aqm_reg_write(sc, IX_QMGR_QUEUPPSTAT1_OFFSET,
1074 IX_QMGR_QUEUPPSTAT1_RESET_VALUE);
1075
1076 /* Reset int0 status flag source select registers 0..3 */
1077 aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG0_OFFSET,
1078 IX_QMGR_INT0SRCSELREG_RESET_VALUE);
1079 aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG1_OFFSET,
1080 IX_QMGR_INT0SRCSELREG_RESET_VALUE);
1081 aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG2_OFFSET,
1082 IX_QMGR_INT0SRCSELREG_RESET_VALUE);
1083 aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG3_OFFSET,
1084 IX_QMGR_INT0SRCSELREG_RESET_VALUE);
1085
1086 /* Reset queue interrupt enable register 0..1 */
1087 aqm_reg_write(sc, IX_QMGR_QUEIEREG0_OFFSET,
1088 IX_QMGR_QUEIEREG_RESET_VALUE);
1089 aqm_reg_write(sc, IX_QMGR_QUEIEREG1_OFFSET,
1090 IX_QMGR_QUEIEREG_RESET_VALUE);
1091
1092 /* Reset queue interrupt register 0..1 */
1093 aqm_reg_write(sc, IX_QMGR_QINTREG0_OFFSET, IX_QMGR_QINTREG_RESET_VALUE);
1094 aqm_reg_write(sc, IX_QMGR_QINTREG1_OFFSET, IX_QMGR_QINTREG_RESET_VALUE);
1095
1096 /* Reset queue configuration words 0..63 */
1097 for (i = 0; i < IX_QMGR_MAX_NUM_QUEUES; i++)
1098 aqm_reg_write(sc, sc->qinfo[i].qConfigRegAddr,
1099 IX_QMGR_QUECONFIG_RESET_VALUE);
1100
1101 /* XXX zero SRAM to simplify debugging */
1102 for (i = IX_QMGR_QUEBUFFER_SPACE_OFFSET;
1103 i < IX_QMGR_AQM_SRAM_SIZE_IN_BYTES; i += sizeof(uint32_t))
1104 aqm_reg_write(sc, i, 0);
1105 }
1106
1107 #ifdef __FreeBSD__
1108 static device_method_t ixpqmgr_methods[] = {
1109 DEVMETHOD(device_probe, ixpqmgr_probe),
1110 DEVMETHOD(device_attach, ixpqmgr_attach),
1111 DEVMETHOD(device_detach, ixpqmgr_detach),
1112
1113 { 0, 0 }
1114 };
1115
1116 static driver_t ixpqmgr_driver = {
1117 "ixpqmgr",
1118 ixpqmgr_methods,
1119 sizeof(struct ixpqmgr_softc),
1120 };
1121 static devclass_t ixpqmgr_devclass;
1122
1123 DRIVER_MODULE(ixpqmgr, ixp, ixpqmgr_driver, ixpqmgr_devclass, 0, 0);
1124 #endif
1125