1 1.12 skrll /* $NetBSD: ixp425_qmgr.c,v 1.12 2022/09/27 06:12:58 skrll Exp $ */ 2 1.1 scw 3 1.1 scw /*- 4 1.1 scw * Copyright (c) 2006 Sam Leffler, Errno Consulting 5 1.1 scw * All rights reserved. 6 1.1 scw * 7 1.1 scw * Redistribution and use in source and binary forms, with or without 8 1.1 scw * modification, are permitted provided that the following conditions 9 1.1 scw * are met: 10 1.1 scw * 1. Redistributions of source code must retain the above copyright 11 1.1 scw * notice, this list of conditions and the following disclaimer, 12 1.1 scw * without modification. 13 1.1 scw * 2. Redistributions in binary form must reproduce at minimum a disclaimer 14 1.1 scw * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 15 1.1 scw * redistribution must be conditioned upon including a substantially 16 1.1 scw * similar Disclaimer requirement for further binary redistribution. 17 1.1 scw * 18 1.1 scw * NO WARRANTY 19 1.1 scw * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 1.1 scw * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 1.1 scw * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 22 1.1 scw * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 23 1.1 scw * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 24 1.1 scw * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 1.1 scw * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 1.1 scw * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 27 1.1 scw * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 1.1 scw * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 29 1.1 scw * THE POSSIBILITY OF SUCH DAMAGES. 30 1.1 scw */ 31 1.1 scw 32 1.1 scw /*- 33 1.1 scw * Copyright (c) 2001-2005, Intel Corporation. 34 1.1 scw * All rights reserved. 35 1.1 scw * 36 1.1 scw * Redistribution and use in source and binary forms, with or without 37 1.1 scw * modification, are permitted provided that the following conditions 38 1.1 scw * are met: 39 1.1 scw * 1. Redistributions of source code must retain the above copyright 40 1.1 scw * notice, this list of conditions and the following disclaimer. 41 1.1 scw * 2. Redistributions in binary form must reproduce the above copyright 42 1.1 scw * notice, this list of conditions and the following disclaimer in the 43 1.1 scw * documentation and/or other materials provided with the distribution. 44 1.1 scw * 3. Neither the name of the Intel Corporation nor the names of its contributors 45 1.1 scw * may be used to endorse or promote products derived from this software 46 1.1 scw * without specific prior written permission. 47 1.1 scw * 48 1.1 scw * 49 1.1 scw * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' 50 1.1 scw * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 51 1.1 scw * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 52 1.1 scw * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE 53 1.1 scw * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 54 1.1 scw * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 55 1.1 scw * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 56 1.1 scw * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 57 1.1 scw * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 58 1.1 scw * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 59 1.1 scw * SUCH DAMAGE. 60 1.1 scw */ 61 1.1 scw #include <sys/cdefs.h> 62 1.1 scw /*__FBSDID("$FreeBSD: src/sys/arm/xscale/ixp425/ixp425_qmgr.c,v 1.1 2006/11/19 23:55:23 sam Exp $");*/ 63 1.12 skrll __KERNEL_RCSID(0, "$NetBSD: ixp425_qmgr.c,v 1.12 2022/09/27 06:12:58 skrll Exp $"); 64 1.1 scw 65 1.1 scw /* 66 1.1 scw * Intel XScale Queue Manager support. 67 1.1 scw * 68 1.1 scw * Each IXP4XXX device has a hardware block that implements a priority 69 1.1 scw * queue manager that is shared between the XScale cpu and the backend 70 1.1 scw * devices (such as the NPE). Queues are accessed by reading/writing 71 1.1 scw * special memory locations. The queue contents are mapped into a shared 72 1.1 scw * SRAM region with entries managed in a circular buffer. The XScale 73 1.1 scw * processor can receive interrupts based on queue contents (a condition 74 1.1 scw * code determines when interrupts should be delivered). 75 1.1 scw * 76 1.1 scw * The code here basically replaces the qmgr class in the Intel Access 77 1.1 scw * Library (IAL). 78 1.1 scw */ 79 1.1 scw #include <sys/param.h> 80 1.1 scw #include <sys/systm.h> 81 1.1 scw #include <sys/kernel.h> 82 1.1 scw #include <sys/time.h> 83 1.12 skrll #include <sys/kmem.h> 84 1.1 scw #include <sys/resource.h> 85 1.1 scw 86 1.7 dyoung #include <sys/bus.h> 87 1.1 scw #include <machine/cpu.h> 88 1.1 scw #include <machine/intr.h> 89 1.1 scw 90 1.1 scw #include <arm/xscale/ixp425reg.h> 91 1.1 scw #include <arm/xscale/ixp425var.h> 92 1.1 scw 93 1.1 scw #include <arm/xscale/ixp425_qmgr.h> 94 1.1 scw 95 1.1 scw /* 96 1.1 scw * State per AQM hw queue. 97 1.1 scw * This structure holds q configuration and dispatch state. 98 1.1 scw */ 99 1.1 scw struct qmgrInfo { 100 1.1 scw int qSizeInWords; /* queue size in words */ 101 1.1 scw 102 1.1 scw uint32_t qOflowStatBitMask; /* overflow status mask */ 103 1.1 scw int qWriteCount; /* queue write count */ 104 1.1 scw 105 1.1 scw bus_size_t qAccRegAddr; /* access register */ 106 1.1 scw bus_size_t qUOStatRegAddr; /* status register */ 107 1.1 scw bus_size_t qConfigRegAddr; /* config register */ 108 1.1 scw int qSizeInEntries; /* queue size in entries */ 109 1.1 scw 110 1.1 scw uint32_t qUflowStatBitMask; /* underflow status mask */ 111 1.1 scw int qReadCount; /* queue read count */ 112 1.1 scw 113 1.1 scw /* XXX union */ 114 1.1 scw uint32_t qStatRegAddr; 115 1.1 scw uint32_t qStatBitsOffset; 116 1.1 scw uint32_t qStat0BitMask; 117 1.1 scw uint32_t qStat1BitMask; 118 1.1 scw 119 1.1 scw uint32_t intRegCheckMask; /* interrupt reg check mask */ 120 1.1 scw void (*cb)(int, void *); /* callback function */ 121 1.1 scw void *cbarg; /* callback argument */ 122 1.1 scw int priority; /* dispatch priority */ 123 1.1 scw #if 0 124 1.1 scw /* NB: needed only for A0 parts */ 125 1.1 scw u_int statusWordOffset; /* status word offset */ 126 1.1 scw uint32_t statusMask; /* status mask */ 127 1.1 scw uint32_t statusCheckValue; /* status check value */ 128 1.1 scw #endif 129 1.1 scw }; 130 1.1 scw 131 1.1 scw struct ixpqmgr_softc { 132 1.1 scw #ifdef __FreeBSD__ 133 1.1 scw device_t sc_dev; 134 1.1 scw bus_space_tag_t sc_iot; 135 1.1 scw bus_space_handle_t sc_ioh; 136 1.1 scw struct resource *sc_irq; /* IRQ resource */ 137 1.1 scw int sc_rid; /* resource id for irq */ 138 1.1 scw void *sc_ih; /* interrupt handler */ 139 1.1 scw #else 140 1.1 scw bus_space_tag_t sc_iot; 141 1.1 scw bus_space_handle_t sc_ioh; 142 1.1 scw void *sc_ih[2]; /* interrupt handler */ 143 1.1 scw #endif 144 1.1 scw 145 1.1 scw struct qmgrInfo qinfo[IX_QMGR_MAX_NUM_QUEUES]; 146 1.1 scw /* 147 1.1 scw * This array contains a list of queue identifiers ordered by 148 1.1 scw * priority. The table is split logically between queue 149 1.1 scw * identifiers 0-31 and 32-63. To optimize lookups bit masks 150 1.1 scw * are kept for the first-32 and last-32 q's. When the 151 1.1 scw * table needs to be rebuilt mark rebuildTable and it'll 152 1.1 scw * happen after the next interrupt. 153 1.1 scw */ 154 1.1 scw int priorityTable[IX_QMGR_MAX_NUM_QUEUES]; 155 1.1 scw uint32_t lowPriorityTableFirstHalfMask; 156 1.1 scw uint32_t uppPriorityTableFirstHalfMask; 157 1.1 scw int rebuildTable; /* rebuild priorityTable */ 158 1.1 scw 159 1.1 scw uint32_t aqmFreeSramAddress; /* SRAM free space */ 160 1.1 scw }; 161 1.1 scw 162 1.1 scw static int qmgr_debug = 0; 163 1.1 scw #define DPRINTF(dev, fmt, ...) do { \ 164 1.1 scw if (qmgr_debug) printf(fmt, __VA_ARGS__); \ 165 1.1 scw } while (0) 166 1.1 scw #define DPRINTFn(n, dev, fmt, ...) do { \ 167 1.1 scw if (qmgr_debug >= n) printf(fmt, __VA_ARGS__); \ 168 1.1 scw } while (0) 169 1.1 scw 170 1.1 scw static struct ixpqmgr_softc *ixpqmgr_sc = NULL; 171 1.1 scw 172 1.1 scw static void ixpqmgr_rebuild(struct ixpqmgr_softc *); 173 1.1 scw static int ixpqmgr_intr(void *); 174 1.1 scw 175 1.1 scw static void aqm_int_enable(struct ixpqmgr_softc *sc, int qId); 176 1.1 scw static void aqm_int_disable(struct ixpqmgr_softc *sc, int qId); 177 1.1 scw static void aqm_qcfg(struct ixpqmgr_softc *sc, int qId, u_int ne, u_int nf); 178 1.1 scw static void aqm_srcsel_write(struct ixpqmgr_softc *sc, int qId, int sourceId); 179 1.1 scw static void aqm_reset(struct ixpqmgr_softc *sc); 180 1.1 scw 181 1.1 scw static void 182 1.1 scw dummyCallback(int qId, void *arg) 183 1.1 scw { 184 1.1 scw /* XXX complain */ 185 1.1 scw } 186 1.1 scw 187 1.1 scw static uint32_t 188 1.1 scw aqm_reg_read(struct ixpqmgr_softc *sc, bus_size_t off) 189 1.1 scw { 190 1.1 scw DPRINTFn(9, sc->sc_dev, "%s(0x%x)\n", __func__, (int)off); 191 1.1 scw return bus_space_read_4(sc->sc_iot, sc->sc_ioh, off); 192 1.1 scw } 193 1.1 scw 194 1.1 scw static void 195 1.1 scw aqm_reg_write(struct ixpqmgr_softc *sc, bus_size_t off, uint32_t val) 196 1.1 scw { 197 1.1 scw DPRINTFn(9, sc->sc_dev, "%s(0x%x, 0x%x)\n", __func__, (int)off, val); 198 1.1 scw bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val); 199 1.1 scw } 200 1.1 scw 201 1.1 scw #ifdef __FreeBSD__ 202 1.1 scw static int 203 1.1 scw ixpqmgr_probe(device_t dev) 204 1.1 scw { 205 1.1 scw device_set_desc(dev, "IXP425 Q-Manager"); 206 1.1 scw return 0; 207 1.1 scw } 208 1.1 scw #endif 209 1.1 scw 210 1.1 scw #ifdef __FreeBSD__ 211 1.1 scw static void 212 1.1 scw ixpqmgr_attach(device_t dev) 213 1.1 scw #else 214 1.1 scw void * 215 1.1 scw ixpqmgr_init(bus_space_tag_t iot) 216 1.1 scw #endif 217 1.1 scw { 218 1.1 scw #ifdef __FreeBSD__ 219 1.1 scw struct ixpqmgr_softc *sc = device_get_softc(dev); 220 1.1 scw struct ixp425_softc *sa = device_get_softc(device_get_parent(dev)); 221 1.1 scw #else 222 1.1 scw struct ixpqmgr_softc *sc; 223 1.1 scw #endif 224 1.1 scw int i; 225 1.1 scw 226 1.1 scw #ifdef __FreeBSD__ 227 1.1 scw ixpqmgr_sc = sc; 228 1.1 scw 229 1.1 scw sc->sc_dev = dev; 230 1.1 scw sc->sc_iot = sa->sc_iot; 231 1.1 scw #else 232 1.12 skrll sc = kmem_zalloc(sizeof(*sc), KM_SLEEP); 233 1.1 scw sc->sc_iot = iot; 234 1.1 scw #endif 235 1.1 scw 236 1.1 scw if (bus_space_map(sc->sc_iot, IXP425_QMGR_HWBASE, IXP425_QMGR_SIZE, 237 1.1 scw 0, &sc->sc_ioh)) 238 1.1 scw panic("%s: Cannot map registers", __func__); 239 1.1 scw 240 1.1 scw #ifdef __FreeBSD__ 241 1.1 scw /* NB: we only use the lower 32 q's */ 242 1.1 scw sc->sc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->sc_rid, 243 1.1 scw IXP425_INT_QUE1_32, IXP425_INT_QUE33_64, 2, RF_ACTIVE); 244 1.1 scw if (!sc->sc_irq) 245 1.1 scw panic("Unable to allocate the qmgr irqs.\n"); 246 1.1 scw /* XXX could be a source of entropy */ 247 1.1 scw bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, 248 1.1 scw ixpqmgr_intr, NULL, &sc->sc_ih); 249 1.1 scw #else 250 1.3 scw ixpqmgr_sc = sc; 251 1.1 scw sc->sc_ih[0] = ixp425_intr_establish(IXP425_INT_QUE1_32, IPL_NET, 252 1.1 scw ixpqmgr_intr, sc); 253 1.1 scw if (sc->sc_ih[0] == NULL) { 254 1.3 scw ixpqmgr_sc = NULL; 255 1.12 skrll kmem_free(sc, sizeof(*sc)); 256 1.1 scw return (NULL); 257 1.1 scw } 258 1.1 scw sc->sc_ih[1] = ixp425_intr_establish(IXP425_INT_QUE33_64, IPL_NET, 259 1.1 scw ixpqmgr_intr, sc); 260 1.1 scw if (sc->sc_ih[1] == NULL) { 261 1.1 scw ixp425_intr_disestablish(sc->sc_ih[0]); 262 1.3 scw ixpqmgr_sc = NULL; 263 1.12 skrll kmem_free(sc, sizeof(*sc)); 264 1.1 scw return (NULL); 265 1.1 scw } 266 1.1 scw #endif 267 1.1 scw 268 1.1 scw /* NB: softc is pre-zero'd */ 269 1.1 scw for (i = 0; i < IX_QMGR_MAX_NUM_QUEUES; i++) { 270 1.1 scw struct qmgrInfo *qi = &sc->qinfo[i]; 271 1.1 scw 272 1.1 scw qi->cb = dummyCallback; 273 1.1 scw qi->priority = IX_QMGR_Q_PRIORITY_0; /* default priority */ 274 1.1 scw /* 275 1.1 scw * There are two interrupt registers, 32 bits each. One 276 1.1 scw * for the lower queues(0-31) and one for the upper 277 1.1 scw * queues(32-63). Therefore need to mod by 32 i.e the 278 1.1 scw * min upper queue identifier. 279 1.1 scw */ 280 1.1 scw qi->intRegCheckMask = (1<<(i%(IX_QMGR_MIN_QUEUPP_QID))); 281 1.1 scw 282 1.1 scw /* 283 1.1 scw * Register addresses and bit masks are calculated and 284 1.1 scw * stored here to optimize QRead, QWrite and QStatusGet 285 1.1 scw * functions. 286 1.1 scw */ 287 1.1 scw 288 1.1 scw /* AQM Queue access reg addresses, per queue */ 289 1.1 scw qi->qAccRegAddr = IX_QMGR_Q_ACCESS_ADDR_GET(i); 290 1.1 scw qi->qAccRegAddr = IX_QMGR_Q_ACCESS_ADDR_GET(i); 291 1.1 scw qi->qConfigRegAddr = IX_QMGR_Q_CONFIG_ADDR_GET(i); 292 1.1 scw 293 1.1 scw /* AQM Queue lower-group (0-31), only */ 294 1.1 scw if (i < IX_QMGR_MIN_QUEUPP_QID) { 295 1.1 scw /* AQM Q underflow/overflow status reg address, per queue */ 296 1.1 scw qi->qUOStatRegAddr = IX_QMGR_QUEUOSTAT0_OFFSET + 297 1.1 scw ((i / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD) * 298 1.1 scw sizeof(uint32_t)); 299 1.1 scw 300 1.1 scw /* AQM Q underflow status bit masks for status reg per queue */ 301 1.1 scw qi->qUflowStatBitMask = 302 1.1 scw (IX_QMGR_UNDERFLOW_BIT_OFFSET + 1) << 303 1.1 scw ((i & (IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD - 1)) * 304 1.1 scw (32 / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD)); 305 1.1 scw 306 1.1 scw /* AQM Q overflow status bit masks for status reg, per queue */ 307 1.1 scw qi->qOflowStatBitMask = 308 1.1 scw (IX_QMGR_OVERFLOW_BIT_OFFSET + 1) << 309 1.1 scw ((i & (IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD - 1)) * 310 1.1 scw (32 / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD)); 311 1.1 scw 312 1.1 scw /* AQM Q lower-group (0-31) status reg addresses, per queue */ 313 1.1 scw qi->qStatRegAddr = IX_QMGR_QUELOWSTAT0_OFFSET + 314 1.1 scw ((i / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD) * 315 1.1 scw sizeof(uint32_t)); 316 1.1 scw 317 1.1 scw /* AQM Q lower-group (0-31) status register bit offset */ 318 1.1 scw qi->qStatBitsOffset = 319 1.1 scw (i & (IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD - 1)) * 320 1.1 scw (32 / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD); 321 1.1 scw } else { /* AQM Q upper-group (32-63), only */ 322 1.1 scw qi->qUOStatRegAddr = 0; /* XXX */ 323 1.1 scw 324 1.1 scw /* AQM Q upper-group (32-63) Nearly Empty status reg bitmasks */ 325 1.1 scw qi->qStat0BitMask = (1 << (i - IX_QMGR_MIN_QUEUPP_QID)); 326 1.1 scw 327 1.1 scw /* AQM Q upper-group (32-63) Full status register bitmasks */ 328 1.1 scw qi->qStat1BitMask = (1 << (i - IX_QMGR_MIN_QUEUPP_QID)); 329 1.1 scw } 330 1.1 scw } 331 1.1 scw 332 1.1 scw sc->aqmFreeSramAddress = 0x100; /* Q buffer space starts at 0x2100 */ 333 1.1 scw 334 1.10 msaitoh ixpqmgr_rebuild(sc); /* build initial priority table */ 335 1.1 scw aqm_reset(sc); /* reset h/w */ 336 1.1 scw 337 1.1 scw return (sc); 338 1.1 scw } 339 1.1 scw 340 1.1 scw #ifdef __FreeBSD__ 341 1.1 scw static void 342 1.1 scw ixpqmgr_detach(device_t dev) 343 1.1 scw { 344 1.1 scw struct ixpqmgr_softc *sc = device_get_softc(dev); 345 1.1 scw 346 1.1 scw aqm_reset(sc); /* disable interrupts */ 347 1.1 scw bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); 348 1.1 scw bus_release_resource(dev, SYS_RES_IRQ, sc->sc_rid, sc->sc_irq); 349 1.1 scw bus_space_unmap(sc->sc_iot, sc->sc_ioh, IXP425_QMGR_SIZE); 350 1.1 scw } 351 1.1 scw #endif 352 1.1 scw 353 1.1 scw int 354 1.1 scw ixpqmgr_qconfig(int qId, int qEntries, int ne, int nf, int srcSel, 355 1.1 scw void (*cb)(int, void *), void *cbarg) 356 1.1 scw { 357 1.1 scw struct ixpqmgr_softc *sc = ixpqmgr_sc; 358 1.1 scw struct qmgrInfo *qi = &sc->qinfo[qId]; 359 1.1 scw 360 1.1 scw DPRINTF(sc->sc_dev, "%s(%u, %u, %u, %u, %u, %p, %p)\n", 361 1.1 scw __func__, qId, qEntries, ne, nf, srcSel, cb, cbarg); 362 1.1 scw 363 1.1 scw /* NB: entry size is always 1 */ 364 1.1 scw qi->qSizeInWords = qEntries; 365 1.1 scw 366 1.1 scw qi->qReadCount = 0; 367 1.1 scw qi->qWriteCount = 0; 368 1.1 scw qi->qSizeInEntries = qEntries; /* XXX kept for code clarity */ 369 1.1 scw 370 1.1 scw if (cb == NULL) { 371 1.1 scw /* Reset to dummy callback */ 372 1.1 scw qi->cb = dummyCallback; 373 1.1 scw qi->cbarg = 0; 374 1.1 scw } else { 375 1.1 scw qi->cb = cb; 376 1.1 scw qi->cbarg = cbarg; 377 1.1 scw } 378 1.1 scw 379 1.1 scw /* Write the config register; NB must be AFTER qinfo setup */ 380 1.1 scw aqm_qcfg(sc, qId, ne, nf); 381 1.1 scw /* 382 1.1 scw * Account for space just allocated to queue. 383 1.1 scw */ 384 1.1 scw sc->aqmFreeSramAddress += (qi->qSizeInWords * sizeof(uint32_t)); 385 1.1 scw 386 1.5 msaitoh /* Set the interrupt source if this queue is in the range 0-31 */ 387 1.1 scw if (qId < IX_QMGR_MIN_QUEUPP_QID) 388 1.1 scw aqm_srcsel_write(sc, qId, srcSel); 389 1.1 scw 390 1.1 scw if (cb != NULL) /* Enable the interrupt */ 391 1.1 scw aqm_int_enable(sc, qId); 392 1.1 scw 393 1.2 thorpej sc->rebuildTable = true; 394 1.1 scw 395 1.1 scw return 0; /* XXX */ 396 1.1 scw } 397 1.1 scw 398 1.1 scw int 399 1.1 scw ixpqmgr_qwrite(int qId, uint32_t entry) 400 1.1 scw { 401 1.1 scw struct ixpqmgr_softc *sc = ixpqmgr_sc; 402 1.1 scw struct qmgrInfo *qi = &sc->qinfo[qId]; 403 1.1 scw 404 1.1 scw DPRINTFn(3, sc->sc_dev, "%s(%u, 0x%x) writeCount %u size %u\n", 405 1.1 scw __func__, qId, entry, qi->qWriteCount, qi->qSizeInEntries); 406 1.1 scw 407 1.1 scw /* write the entry */ 408 1.1 scw aqm_reg_write(sc, qi->qAccRegAddr, entry); 409 1.1 scw 410 1.1 scw /* NB: overflow is available for lower queues only */ 411 1.1 scw if (qId < IX_QMGR_MIN_QUEUPP_QID) { 412 1.1 scw int qSize = qi->qSizeInEntries; 413 1.1 scw /* 414 1.1 scw * Increment the current number of entries in the queue 415 1.1 scw * and check for overflow . 416 1.1 scw */ 417 1.1 scw if (qi->qWriteCount++ == qSize) { /* check for overflow */ 418 1.1 scw uint32_t status = aqm_reg_read(sc, qi->qUOStatRegAddr); 419 1.1 scw int qPtrs; 420 1.1 scw 421 1.1 scw /* 422 1.1 scw * Read the status twice because the status may 423 1.1 scw * not be immediately ready after the write operation 424 1.1 scw */ 425 1.1 scw if ((status & qi->qOflowStatBitMask) || 426 1.1 scw ((status = aqm_reg_read(sc, qi->qUOStatRegAddr)) & qi->qOflowStatBitMask)) { 427 1.1 scw /* 428 1.1 scw * The queue is full, clear the overflow status bit if set. 429 1.1 scw */ 430 1.1 scw aqm_reg_write(sc, qi->qUOStatRegAddr, 431 1.1 scw status & ~qi->qOflowStatBitMask); 432 1.1 scw qi->qWriteCount = qSize; 433 1.1 scw DPRINTFn(5, sc->sc_dev, 434 1.1 scw "%s(%u, 0x%x) Q full, overflow status cleared\n", 435 1.1 scw __func__, qId, entry); 436 1.1 scw return ENOSPC; 437 1.1 scw } 438 1.1 scw /* 439 1.11 andvar * No overflow occurred : someone is draining the queue 440 1.1 scw * and the current counter needs to be 441 1.1 scw * updated from the current number of entries in the queue 442 1.1 scw */ 443 1.1 scw 444 1.1 scw /* calculate number of words in q */ 445 1.1 scw qPtrs = aqm_reg_read(sc, qi->qConfigRegAddr); 446 1.1 scw DPRINTFn(2, sc->sc_dev, 447 1.1 scw "%s(%u, 0x%x) Q full, no overflow status, qConfig 0x%x\n", 448 1.1 scw __func__, qId, entry, qPtrs); 449 1.1 scw qPtrs = (qPtrs - (qPtrs >> 7)) & 0x7f; 450 1.1 scw 451 1.1 scw if (qPtrs == 0) { 452 1.1 scw /* 453 1.1 scw * The queue may be full at the time of the 454 1.1 scw * snapshot. Next access will check 455 1.1 scw * the overflow status again. 456 1.1 scw */ 457 1.1 scw qi->qWriteCount = qSize; 458 1.1 scw } else { 459 1.1 scw /* convert the number of words to a number of entries */ 460 1.1 scw qi->qWriteCount = qPtrs & (qSize - 1); 461 1.1 scw } 462 1.1 scw } 463 1.1 scw } 464 1.1 scw return 0; 465 1.1 scw } 466 1.1 scw 467 1.1 scw int 468 1.1 scw ixpqmgr_qread(int qId, uint32_t *entry) 469 1.1 scw { 470 1.1 scw struct ixpqmgr_softc *sc = ixpqmgr_sc; 471 1.1 scw struct qmgrInfo *qi = &sc->qinfo[qId]; 472 1.1 scw bus_size_t off = qi->qAccRegAddr; 473 1.1 scw 474 1.1 scw *entry = aqm_reg_read(sc, off); 475 1.1 scw 476 1.1 scw /* 477 1.1 scw * Reset the current read count : next access to the read function 478 1.1 scw * will force a underflow status check. 479 1.1 scw */ 480 1.1 scw qi->qReadCount = 0; 481 1.1 scw 482 1.1 scw /* Check if underflow occurred on the read */ 483 1.1 scw if (*entry == 0 && qId < IX_QMGR_MIN_QUEUPP_QID) { 484 1.1 scw /* get the queue status */ 485 1.1 scw uint32_t status = aqm_reg_read(sc, qi->qUOStatRegAddr); 486 1.1 scw 487 1.1 scw if (status & qi->qUflowStatBitMask) { /* clear underflow status */ 488 1.1 scw aqm_reg_write(sc, qi->qUOStatRegAddr, 489 1.1 scw status &~ qi->qUflowStatBitMask); 490 1.1 scw return ENOSPC; 491 1.1 scw } 492 1.1 scw } 493 1.1 scw return 0; 494 1.1 scw } 495 1.1 scw 496 1.1 scw int 497 1.1 scw ixpqmgr_qreadm(int qId, uint32_t n, uint32_t *p) 498 1.1 scw { 499 1.1 scw struct ixpqmgr_softc *sc = ixpqmgr_sc; 500 1.1 scw struct qmgrInfo *qi = &sc->qinfo[qId]; 501 1.1 scw uint32_t entry; 502 1.1 scw bus_size_t off = qi->qAccRegAddr; 503 1.1 scw 504 1.1 scw entry = aqm_reg_read(sc, off); 505 1.1 scw while (--n) { 506 1.1 scw if (entry == 0) { 507 1.1 scw /* if we read a NULL entry, stop. We have underflowed */ 508 1.1 scw break; 509 1.1 scw } 510 1.1 scw *p++ = entry; /* store */ 511 1.1 scw entry = aqm_reg_read(sc, off); 512 1.1 scw } 513 1.1 scw *p = entry; 514 1.1 scw 515 1.1 scw /* 516 1.1 scw * Reset the current read count : next access to the read function 517 1.1 scw * will force a underflow status check. 518 1.1 scw */ 519 1.1 scw qi->qReadCount = 0; 520 1.1 scw 521 1.1 scw /* Check if underflow occurred on the read */ 522 1.1 scw if (entry == 0 && qId < IX_QMGR_MIN_QUEUPP_QID) { 523 1.1 scw /* get the queue status */ 524 1.1 scw uint32_t status = aqm_reg_read(sc, qi->qUOStatRegAddr); 525 1.1 scw 526 1.1 scw if (status & qi->qUflowStatBitMask) { /* clear underflow status */ 527 1.1 scw aqm_reg_write(sc, qi->qUOStatRegAddr, 528 1.1 scw status &~ qi->qUflowStatBitMask); 529 1.1 scw return ENOSPC; 530 1.1 scw } 531 1.1 scw } 532 1.1 scw return 0; 533 1.1 scw } 534 1.1 scw 535 1.1 scw uint32_t 536 1.1 scw ixpqmgr_getqstatus(int qId) 537 1.1 scw { 538 1.1 scw #define QLOWSTATMASK \ 539 1.1 scw ((1 << (32 / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD)) - 1) 540 1.1 scw struct ixpqmgr_softc *sc = ixpqmgr_sc; 541 1.1 scw const struct qmgrInfo *qi = &sc->qinfo[qId]; 542 1.1 scw uint32_t status; 543 1.1 scw 544 1.1 scw if (qId < IX_QMGR_MIN_QUEUPP_QID) { 545 1.1 scw /* read the status of a queue in the range 0-31 */ 546 1.1 scw status = aqm_reg_read(sc, qi->qStatRegAddr); 547 1.1 scw 548 1.1 scw /* mask out the status bits relevant only to this queue */ 549 1.1 scw status = (status >> qi->qStatBitsOffset) & QLOWSTATMASK; 550 1.1 scw } else { /* read status of a queue in the range 32-63 */ 551 1.1 scw status = 0; 552 1.1 scw if (aqm_reg_read(sc, IX_QMGR_QUEUPPSTAT0_OFFSET)&qi->qStat0BitMask) 553 1.1 scw status |= IX_QMGR_Q_STATUS_NE_BIT_MASK; /* nearly empty */ 554 1.1 scw if (aqm_reg_read(sc, IX_QMGR_QUEUPPSTAT1_OFFSET)&qi->qStat1BitMask) 555 1.1 scw status |= IX_QMGR_Q_STATUS_F_BIT_MASK; /* full */ 556 1.1 scw } 557 1.1 scw return status; 558 1.1 scw #undef QLOWSTATMASK 559 1.1 scw } 560 1.1 scw 561 1.1 scw uint32_t 562 1.1 scw ixpqmgr_getqconfig(int qId) 563 1.1 scw { 564 1.1 scw struct ixpqmgr_softc *sc = ixpqmgr_sc; 565 1.1 scw 566 1.1 scw return aqm_reg_read(sc, IX_QMGR_Q_CONFIG_ADDR_GET(qId)); 567 1.1 scw } 568 1.1 scw 569 1.1 scw void 570 1.1 scw ixpqmgr_dump(void) 571 1.1 scw { 572 1.1 scw struct ixpqmgr_softc *sc = ixpqmgr_sc; 573 1.1 scw int i, a; 574 1.1 scw 575 1.1 scw /* status registers */ 576 1.1 scw printf("0x%04x: %08x %08x %08x %08x\n" 577 1.1 scw , 0x400 578 1.1 scw , aqm_reg_read(sc, 0x400) 579 1.1 scw , aqm_reg_read(sc, 0x400+4) 580 1.1 scw , aqm_reg_read(sc, 0x400+8) 581 1.1 scw , aqm_reg_read(sc, 0x400+12) 582 1.1 scw ); 583 1.1 scw printf("0x%04x: %08x %08x %08x %08x\n" 584 1.1 scw , 0x410 585 1.1 scw , aqm_reg_read(sc, 0x410) 586 1.1 scw , aqm_reg_read(sc, 0x410+4) 587 1.1 scw , aqm_reg_read(sc, 0x410+8) 588 1.1 scw , aqm_reg_read(sc, 0x410+12) 589 1.1 scw ); 590 1.1 scw printf("0x%04x: %08x %08x %08x %08x\n" 591 1.1 scw , 0x420 592 1.1 scw , aqm_reg_read(sc, 0x420) 593 1.1 scw , aqm_reg_read(sc, 0x420+4) 594 1.1 scw , aqm_reg_read(sc, 0x420+8) 595 1.1 scw , aqm_reg_read(sc, 0x420+12) 596 1.1 scw ); 597 1.1 scw printf("0x%04x: %08x %08x %08x %08x\n" 598 1.1 scw , 0x430 599 1.1 scw , aqm_reg_read(sc, 0x430) 600 1.1 scw , aqm_reg_read(sc, 0x430+4) 601 1.1 scw , aqm_reg_read(sc, 0x430+8) 602 1.1 scw , aqm_reg_read(sc, 0x430+12) 603 1.1 scw ); 604 1.1 scw /* q configuration registers */ 605 1.1 scw for (a = 0x2000; a < 0x20ff; a += 32) 606 1.1 scw printf("0x%04x: %08x %08x %08x %08x %08x %08x %08x %08x\n" 607 1.1 scw , a 608 1.1 scw , aqm_reg_read(sc, a) 609 1.1 scw , aqm_reg_read(sc, a+4) 610 1.1 scw , aqm_reg_read(sc, a+8) 611 1.1 scw , aqm_reg_read(sc, a+12) 612 1.1 scw , aqm_reg_read(sc, a+16) 613 1.1 scw , aqm_reg_read(sc, a+20) 614 1.1 scw , aqm_reg_read(sc, a+24) 615 1.1 scw , aqm_reg_read(sc, a+28) 616 1.1 scw ); 617 1.1 scw /* allocated SRAM */ 618 1.1 scw for (i = 0x100; i < sc->aqmFreeSramAddress; i += 32) { 619 1.1 scw a = 0x2000 + i; 620 1.1 scw printf("0x%04x: %08x %08x %08x %08x %08x %08x %08x %08x\n" 621 1.1 scw , a 622 1.1 scw , aqm_reg_read(sc, a) 623 1.1 scw , aqm_reg_read(sc, a+4) 624 1.1 scw , aqm_reg_read(sc, a+8) 625 1.1 scw , aqm_reg_read(sc, a+12) 626 1.1 scw , aqm_reg_read(sc, a+16) 627 1.1 scw , aqm_reg_read(sc, a+20) 628 1.1 scw , aqm_reg_read(sc, a+24) 629 1.1 scw , aqm_reg_read(sc, a+28) 630 1.1 scw ); 631 1.1 scw } 632 1.1 scw for (i = 0; i < 16; i++) { 633 1.1 scw printf("Q[%2d] config 0x%08x status 0x%02x " 634 1.1 scw "Q[%2d] config 0x%08x status 0x%02x\n" 635 1.1 scw , i, ixpqmgr_getqconfig(i), ixpqmgr_getqstatus(i) 636 1.1 scw , i+16, ixpqmgr_getqconfig(i+16), ixpqmgr_getqstatus(i+16) 637 1.1 scw ); 638 1.1 scw } 639 1.1 scw } 640 1.1 scw 641 1.1 scw void 642 1.1 scw ixpqmgr_notify_enable(int qId, int srcSel) 643 1.1 scw { 644 1.1 scw struct ixpqmgr_softc *sc = ixpqmgr_sc; 645 1.1 scw #if 0 646 1.1 scw /* Calculate the checkMask and checkValue for this q */ 647 1.1 scw aqm_calc_statuscheck(sc, qId, srcSel); 648 1.1 scw #endif 649 1.6 msaitoh /* Set the interrupt source if this queue is in the range 0-31 */ 650 1.1 scw if (qId < IX_QMGR_MIN_QUEUPP_QID) 651 1.1 scw aqm_srcsel_write(sc, qId, srcSel); 652 1.1 scw 653 1.1 scw /* Enable the interrupt */ 654 1.1 scw aqm_int_enable(sc, qId); 655 1.1 scw } 656 1.1 scw 657 1.1 scw void 658 1.1 scw ixpqmgr_notify_disable(int qId) 659 1.1 scw { 660 1.1 scw struct ixpqmgr_softc *sc = ixpqmgr_sc; 661 1.1 scw 662 1.1 scw aqm_int_disable(sc, qId); 663 1.1 scw } 664 1.1 scw 665 1.1 scw /* 666 1.1 scw * Rebuild the priority table used by the dispatcher. 667 1.1 scw */ 668 1.1 scw static void 669 1.1 scw ixpqmgr_rebuild(struct ixpqmgr_softc *sc) 670 1.1 scw { 671 1.1 scw int q, pri; 672 1.1 scw int lowQuePriorityTableIndex, uppQuePriorityTableIndex; 673 1.1 scw struct qmgrInfo *qi; 674 1.1 scw 675 1.1 scw sc->lowPriorityTableFirstHalfMask = 0; 676 1.1 scw sc->uppPriorityTableFirstHalfMask = 0; 677 1.1 scw 678 1.1 scw lowQuePriorityTableIndex = 0; 679 1.1 scw uppQuePriorityTableIndex = 32; 680 1.1 scw for (pri = 0; pri < IX_QMGR_NUM_PRIORITY_LEVELS; pri++) { 681 1.1 scw /* low priority q's */ 682 1.1 scw for (q = 0; q < IX_QMGR_MIN_QUEUPP_QID; q++) { 683 1.1 scw qi = &sc->qinfo[q]; 684 1.1 scw if (qi->priority == pri) { 685 1.1 scw /* 686 1.1 scw * Build the priority table bitmask which match the 687 1.1 scw * queues of the first half of the priority table. 688 1.1 scw */ 689 1.1 scw if (lowQuePriorityTableIndex < 16) { 690 1.1 scw sc->lowPriorityTableFirstHalfMask |= 691 1.1 scw qi->intRegCheckMask; 692 1.1 scw } 693 1.1 scw sc->priorityTable[lowQuePriorityTableIndex++] = q; 694 1.1 scw } 695 1.1 scw } 696 1.1 scw /* high priority q's */ 697 1.1 scw for (; q < IX_QMGR_MAX_NUM_QUEUES; q++) { 698 1.1 scw qi = &sc->qinfo[q]; 699 1.1 scw if (qi->priority == pri) { 700 1.1 scw /* 701 1.1 scw * Build the priority table bitmask which match the 702 1.1 scw * queues of the first half of the priority table . 703 1.1 scw */ 704 1.1 scw if (uppQuePriorityTableIndex < 48) { 705 1.1 scw sc->uppPriorityTableFirstHalfMask |= 706 1.1 scw qi->intRegCheckMask; 707 1.1 scw } 708 1.1 scw sc->priorityTable[uppQuePriorityTableIndex++] = q; 709 1.1 scw } 710 1.1 scw } 711 1.1 scw } 712 1.2 thorpej sc->rebuildTable = false; 713 1.1 scw } 714 1.1 scw 715 1.1 scw /* 716 1.1 scw * Count the number of leading zero bits in a word, 717 1.1 scw * and return the same value than the CLZ instruction. 718 1.1 scw * Note this is similar to the standard ffs function but 719 1.1 scw * it counts zero's from the MSB instead of the LSB. 720 1.1 scw * 721 1.1 scw * word (in) return value (out) 722 1.1 scw * 0x80000000 0 723 1.1 scw * 0x40000000 1 724 1.1 scw * ,,, ,,, 725 1.1 scw * 0x00000002 30 726 1.1 scw * 0x00000001 31 727 1.1 scw * 0x00000000 32 728 1.1 scw * 729 1.1 scw * The C version of this function is used as a replacement 730 1.1 scw * for system not providing the equivalent of the CLZ 731 1.1 scw * assembly language instruction. 732 1.1 scw * 733 1.1 scw * Note that this version is big-endian 734 1.1 scw */ 735 1.1 scw static unsigned int 736 1.1 scw _lzcount(uint32_t word) 737 1.1 scw { 738 1.1 scw unsigned int lzcount = 0; 739 1.1 scw 740 1.1 scw if (word == 0) 741 1.1 scw return 32; 742 1.1 scw while ((word & 0x80000000) == 0) { 743 1.1 scw word <<= 1; 744 1.1 scw lzcount++; 745 1.1 scw } 746 1.1 scw return lzcount; 747 1.1 scw } 748 1.1 scw 749 1.1 scw static int 750 1.1 scw ixpqmgr_intr(void *arg) 751 1.1 scw { 752 1.1 scw struct ixpqmgr_softc *sc = ixpqmgr_sc; 753 1.1 scw uint32_t intRegVal; /* Interrupt reg val */ 754 1.1 scw struct qmgrInfo *qi; 755 1.1 scw int priorityTableIndex; /* Priority table index */ 756 1.1 scw int qIndex; /* Current queue being processed */ 757 1.1 scw 758 1.1 scw /* Read the interrupt register */ 759 1.1 scw intRegVal = aqm_reg_read(sc, IX_QMGR_QINTREG0_OFFSET); 760 1.1 scw /* Write back to clear interrupt */ 761 1.1 scw aqm_reg_write(sc, IX_QMGR_QINTREG0_OFFSET, intRegVal); 762 1.1 scw 763 1.1 scw DPRINTFn(5, sc->sc_dev, "%s: ISR0 0x%x ISR1 0x%x\n", 764 1.1 scw __func__, intRegVal, aqm_reg_read(sc, IX_QMGR_QINTREG1_OFFSET)); 765 1.1 scw 766 1.1 scw /* No queue has interrupt register set */ 767 1.1 scw if (intRegVal != 0) { 768 1.1 scw /* get the first queue Id from the interrupt register value */ 769 1.1 scw qIndex = (32 - 1) - _lzcount(intRegVal); 770 1.1 scw 771 1.1 scw DPRINTFn(2, sc->sc_dev, "%s: ISR0 0x%x qIndex %u\n", 772 1.1 scw __func__, intRegVal, qIndex); 773 1.1 scw 774 1.1 scw /* 775 1.1 scw * Optimize for single callback case. 776 1.1 scw */ 777 1.1 scw qi = &sc->qinfo[qIndex]; 778 1.1 scw if (intRegVal == qi->intRegCheckMask) { 779 1.1 scw /* 780 1.1 scw * Only 1 queue event triggered a notification. 781 1.1 scw * Call the callback function for this queue 782 1.1 scw */ 783 1.1 scw qi->cb(qIndex, qi->cbarg); 784 1.1 scw } else { 785 1.1 scw /* 786 1.1 scw * The event is triggered by more than 1 queue, 787 1.1 scw * the queue search will start from the beginning 788 1.1 scw * or the middle of the priority table. 789 1.1 scw * 790 1.1 scw * The search will end when all the bits of the interrupt 791 1.1 scw * register are cleared. There is no need to maintain 792 1.8 msaitoh * a separate value and test it at each iteration. 793 1.1 scw */ 794 1.1 scw if (intRegVal & sc->lowPriorityTableFirstHalfMask) { 795 1.1 scw priorityTableIndex = 0; 796 1.1 scw } else { 797 1.1 scw priorityTableIndex = 16; 798 1.1 scw } 799 1.1 scw /* 800 1.1 scw * Iterate over the priority table until all the bits 801 1.1 scw * of the interrupt register are cleared. 802 1.1 scw */ 803 1.1 scw do { 804 1.1 scw qIndex = sc->priorityTable[priorityTableIndex++]; 805 1.3 scw if (qIndex >= IX_QMGR_MAX_NUM_QUEUES) 806 1.3 scw break; 807 1.1 scw qi = &sc->qinfo[qIndex]; 808 1.1 scw 809 1.1 scw /* If this queue caused this interrupt to be raised */ 810 1.1 scw if (intRegVal & qi->intRegCheckMask) { 811 1.1 scw /* Call the callback function for this queue */ 812 1.1 scw qi->cb(qIndex, qi->cbarg); 813 1.1 scw /* Clear the interrupt register bit */ 814 1.1 scw intRegVal &= ~qi->intRegCheckMask; 815 1.1 scw } 816 1.3 scw } while (intRegVal && 817 1.3 scw priorityTableIndex < IX_QMGR_MAX_NUM_QUEUES); 818 1.1 scw } 819 1.1 scw } 820 1.1 scw 821 1.1 scw /* Rebuild the priority table if needed */ 822 1.1 scw if (sc->rebuildTable) 823 1.1 scw ixpqmgr_rebuild(sc); 824 1.1 scw 825 1.1 scw return (1); 826 1.1 scw } 827 1.1 scw 828 1.1 scw #if 0 829 1.1 scw /* 830 1.1 scw * Generate the parameters used to check if a Q's status matches 831 1.1 scw * the specified source select. We calculate which status word 832 1.1 scw * to check (statusWordOffset), the value to check the status 833 1.1 scw * against (statusCheckValue) and the mask (statusMask) to mask 834 1.1 scw * out all but the bits to check in the status word. 835 1.1 scw */ 836 1.1 scw static void 837 1.1 scw aqm_calc_statuscheck(int qId, IxQMgrSourceId srcSel) 838 1.1 scw { 839 1.1 scw struct qmgrInfo *qi = &qinfo[qId]; 840 1.1 scw uint32_t shiftVal; 841 1.1 scw 842 1.1 scw if (qId < IX_QMGR_MIN_QUEUPP_QID) { 843 1.1 scw switch (srcSel) { 844 1.1 scw case IX_QMGR_Q_SOURCE_ID_E: 845 1.1 scw qi->statusCheckValue = IX_QMGR_Q_STATUS_E_BIT_MASK; 846 1.1 scw qi->statusMask = IX_QMGR_Q_STATUS_E_BIT_MASK; 847 1.1 scw break; 848 1.1 scw case IX_QMGR_Q_SOURCE_ID_NE: 849 1.1 scw qi->statusCheckValue = IX_QMGR_Q_STATUS_NE_BIT_MASK; 850 1.1 scw qi->statusMask = IX_QMGR_Q_STATUS_NE_BIT_MASK; 851 1.1 scw break; 852 1.1 scw case IX_QMGR_Q_SOURCE_ID_NF: 853 1.1 scw qi->statusCheckValue = IX_QMGR_Q_STATUS_NF_BIT_MASK; 854 1.1 scw qi->statusMask = IX_QMGR_Q_STATUS_NF_BIT_MASK; 855 1.1 scw break; 856 1.1 scw case IX_QMGR_Q_SOURCE_ID_F: 857 1.1 scw qi->statusCheckValue = IX_QMGR_Q_STATUS_F_BIT_MASK; 858 1.1 scw qi->statusMask = IX_QMGR_Q_STATUS_F_BIT_MASK; 859 1.1 scw break; 860 1.1 scw case IX_QMGR_Q_SOURCE_ID_NOT_E: 861 1.1 scw qi->statusCheckValue = 0; 862 1.1 scw qi->statusMask = IX_QMGR_Q_STATUS_E_BIT_MASK; 863 1.1 scw break; 864 1.1 scw case IX_QMGR_Q_SOURCE_ID_NOT_NE: 865 1.1 scw qi->statusCheckValue = 0; 866 1.1 scw qi->statusMask = IX_QMGR_Q_STATUS_NE_BIT_MASK; 867 1.1 scw break; 868 1.1 scw case IX_QMGR_Q_SOURCE_ID_NOT_NF: 869 1.1 scw qi->statusCheckValue = 0; 870 1.1 scw qi->statusMask = IX_QMGR_Q_STATUS_NF_BIT_MASK; 871 1.1 scw break; 872 1.1 scw case IX_QMGR_Q_SOURCE_ID_NOT_F: 873 1.1 scw qi->statusCheckValue = 0; 874 1.1 scw qi->statusMask = IX_QMGR_Q_STATUS_F_BIT_MASK; 875 1.1 scw break; 876 1.1 scw default: 877 1.1 scw /* Should never hit */ 878 1.1 scw IX_OSAL_ASSERT(0); 879 1.1 scw break; 880 1.1 scw } 881 1.1 scw 882 1.1 scw /* One nibble of status per queue so need to shift the 883 1.1 scw * check value and mask out to the correct position. 884 1.1 scw */ 885 1.1 scw shiftVal = (qId % IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD) * 886 1.1 scw IX_QMGR_QUELOWSTAT_BITS_PER_Q; 887 1.1 scw 888 1.1 scw /* Calculate the which status word to check from the qId, 889 1.1 scw * 8 Qs status per word 890 1.1 scw */ 891 1.1 scw qi->statusWordOffset = qId / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD; 892 1.1 scw 893 1.1 scw qi->statusCheckValue <<= shiftVal; 894 1.1 scw qi->statusMask <<= shiftVal; 895 1.1 scw } else { 896 1.1 scw /* One status word */ 897 1.1 scw qi->statusWordOffset = 0; 898 1.1 scw /* Single bits per queue and int source bit hardwired NE, 899 1.1 scw * Qs start at 32. 900 1.1 scw */ 901 1.1 scw qi->statusMask = 1 << (qId - IX_QMGR_MIN_QUEUPP_QID); 902 1.1 scw qi->statusCheckValue = qi->statusMask; 903 1.1 scw } 904 1.1 scw } 905 1.1 scw #endif 906 1.1 scw 907 1.1 scw static void 908 1.1 scw aqm_int_enable(struct ixpqmgr_softc *sc, int qId) 909 1.1 scw { 910 1.1 scw bus_size_t reg; 911 1.1 scw uint32_t v; 912 1.1 scw 913 1.1 scw if (qId < IX_QMGR_MIN_QUEUPP_QID) 914 1.1 scw reg = IX_QMGR_QUEIEREG0_OFFSET; 915 1.1 scw else 916 1.1 scw reg = IX_QMGR_QUEIEREG1_OFFSET; 917 1.1 scw v = aqm_reg_read(sc, reg); 918 1.1 scw aqm_reg_write(sc, reg, v | (1 << (qId % IX_QMGR_MIN_QUEUPP_QID))); 919 1.1 scw 920 1.1 scw DPRINTF(sc->sc_dev, "%s(%u) 0x%lx: 0x%x => 0x%x\n", 921 1.1 scw __func__, qId, reg, v, aqm_reg_read(sc, reg)); 922 1.1 scw } 923 1.1 scw 924 1.1 scw static void 925 1.1 scw aqm_int_disable(struct ixpqmgr_softc *sc, int qId) 926 1.1 scw { 927 1.1 scw bus_size_t reg; 928 1.1 scw uint32_t v; 929 1.1 scw 930 1.1 scw if (qId < IX_QMGR_MIN_QUEUPP_QID) 931 1.1 scw reg = IX_QMGR_QUEIEREG0_OFFSET; 932 1.1 scw else 933 1.1 scw reg = IX_QMGR_QUEIEREG1_OFFSET; 934 1.1 scw v = aqm_reg_read(sc, reg); 935 1.1 scw aqm_reg_write(sc, reg, v &~ (1 << (qId % IX_QMGR_MIN_QUEUPP_QID))); 936 1.1 scw 937 1.1 scw DPRINTF(sc->sc_dev, "%s(%u) 0x%lx: 0x%x => 0x%x\n", 938 1.1 scw __func__, qId, reg, v, aqm_reg_read(sc, reg)); 939 1.1 scw } 940 1.1 scw 941 1.1 scw static unsigned 942 1.1 scw log2(unsigned n) 943 1.1 scw { 944 1.1 scw unsigned count; 945 1.1 scw /* 946 1.1 scw * N.B. this function will return 0 if supplied 0. 947 1.1 scw */ 948 1.1 scw for (count = 0; n/2; count++) 949 1.1 scw n /= 2; 950 1.1 scw return count; 951 1.1 scw } 952 1.1 scw 953 1.1 scw static __inline unsigned 954 1.1 scw toAqmEntrySize(int entrySize) 955 1.1 scw { 956 1.1 scw /* entrySize 1("00"),2("01"),4("10") */ 957 1.1 scw return log2(entrySize); 958 1.1 scw } 959 1.1 scw 960 1.1 scw static __inline unsigned 961 1.1 scw toAqmBufferSize(unsigned bufferSizeInWords) 962 1.1 scw { 963 1.1 scw /* bufferSize 16("00"),32("01),64("10"),128("11") */ 964 1.1 scw return log2(bufferSizeInWords / IX_QMGR_MIN_BUFFER_SIZE); 965 1.1 scw } 966 1.1 scw 967 1.1 scw static __inline unsigned 968 1.1 scw toAqmWatermark(int watermark) 969 1.1 scw { 970 1.1 scw /* 971 1.1 scw * Watermarks 0("000"),1("001"),2("010"),4("011"), 972 1.1 scw * 8("100"),16("101"),32("110"),64("111") 973 1.1 scw */ 974 1.1 scw return log2(2 * watermark); 975 1.1 scw } 976 1.1 scw 977 1.1 scw static void 978 1.1 scw aqm_qcfg(struct ixpqmgr_softc *sc, int qId, u_int ne, u_int nf) 979 1.1 scw { 980 1.1 scw const struct qmgrInfo *qi = &sc->qinfo[qId]; 981 1.1 scw uint32_t qCfg; 982 1.1 scw uint32_t baseAddress; 983 1.1 scw 984 1.1 scw /* Build config register */ 985 1.1 scw qCfg = ((toAqmEntrySize(1) & IX_QMGR_ENTRY_SIZE_MASK) << 986 1.1 scw IX_QMGR_Q_CONFIG_ESIZE_OFFSET) 987 1.1 scw | ((toAqmBufferSize(qi->qSizeInWords) & IX_QMGR_SIZE_MASK) << 988 1.1 scw IX_QMGR_Q_CONFIG_BSIZE_OFFSET); 989 1.1 scw 990 1.1 scw /* baseAddress, calculated relative to start address */ 991 1.1 scw baseAddress = sc->aqmFreeSramAddress; 992 1.1 scw 993 1.1 scw /* base address must be word-aligned */ 994 1.1 scw KASSERT((baseAddress % IX_QMGR_BASE_ADDR_16_WORD_ALIGN) == 0); 995 1.1 scw 996 1.1 scw /* Now convert to a 16 word pointer as required by QUECONFIG register */ 997 1.1 scw baseAddress >>= IX_QMGR_BASE_ADDR_16_WORD_SHIFT; 998 1.1 scw qCfg |= baseAddress << IX_QMGR_Q_CONFIG_BADDR_OFFSET; 999 1.1 scw 1000 1.1 scw /* set watermarks */ 1001 1.1 scw qCfg |= (toAqmWatermark(ne) << IX_QMGR_Q_CONFIG_NE_OFFSET) 1002 1.1 scw | (toAqmWatermark(nf) << IX_QMGR_Q_CONFIG_NF_OFFSET); 1003 1.1 scw 1004 1.1 scw DPRINTF(sc->sc_dev, "%s(%u, %u, %u) 0x%x => 0x%x @ 0x%x\n", 1005 1.1 scw __func__, qId, ne, nf, 1006 1.1 scw aqm_reg_read(sc, IX_QMGR_Q_CONFIG_ADDR_GET(qId)), 1007 1.1 scw qCfg, (u_int)IX_QMGR_Q_CONFIG_ADDR_GET(qId)); 1008 1.1 scw 1009 1.1 scw aqm_reg_write(sc, IX_QMGR_Q_CONFIG_ADDR_GET(qId), qCfg); 1010 1.1 scw } 1011 1.1 scw 1012 1.1 scw static void 1013 1.1 scw aqm_srcsel_write(struct ixpqmgr_softc *sc, int qId, int sourceId) 1014 1.1 scw { 1015 1.1 scw bus_size_t off; 1016 1.1 scw uint32_t v; 1017 1.1 scw 1018 1.1 scw /* 1019 1.1 scw * Calculate the register offset; multiple queues split across registers 1020 1.1 scw */ 1021 1.1 scw off = IX_QMGR_INT0SRCSELREG0_OFFSET + 1022 1.1 scw ((qId / IX_QMGR_INTSRC_NUM_QUE_PER_WORD) * sizeof(uint32_t)); 1023 1.1 scw 1024 1.1 scw v = aqm_reg_read(sc, off); 1025 1.1 scw if (off == IX_QMGR_INT0SRCSELREG0_OFFSET && qId == 0) { 1026 1.1 scw /* Queue 0 at INT0SRCSELREG should not corrupt the value bit-3 */ 1027 1.1 scw v |= 0x7; 1028 1.1 scw } else { 1029 1.1 scw const uint32_t bpq = 32 / IX_QMGR_INTSRC_NUM_QUE_PER_WORD; 1030 1.1 scw uint32_t mask; 1031 1.1 scw int qshift; 1032 1.1 scw 1033 1.1 scw qshift = (qId & (IX_QMGR_INTSRC_NUM_QUE_PER_WORD-1)) * bpq; 1034 1.1 scw mask = ((1 << bpq) - 1) << qshift; /* q's status mask */ 1035 1.1 scw 1036 1.1 scw /* merge sourceId */ 1037 1.1 scw v = (v &~ mask) | ((sourceId << qshift) & mask); 1038 1.1 scw } 1039 1.1 scw 1040 1.1 scw DPRINTF(sc->sc_dev, "%s(%u, %u) 0x%x => 0x%x @ 0x%lx\n", 1041 1.1 scw __func__, qId, sourceId, aqm_reg_read(sc, off), v, off); 1042 1.1 scw aqm_reg_write(sc, off, v); 1043 1.1 scw } 1044 1.1 scw 1045 1.1 scw /* 1046 1.1 scw * Reset AQM registers to default values. 1047 1.1 scw */ 1048 1.1 scw static void 1049 1.1 scw aqm_reset(struct ixpqmgr_softc *sc) 1050 1.1 scw { 1051 1.1 scw int i; 1052 1.1 scw 1053 1.1 scw /* Reset queues 0..31 status registers 0..3 */ 1054 1.1 scw aqm_reg_write(sc, IX_QMGR_QUELOWSTAT0_OFFSET, 1055 1.1 scw IX_QMGR_QUELOWSTAT_RESET_VALUE); 1056 1.1 scw aqm_reg_write(sc, IX_QMGR_QUELOWSTAT1_OFFSET, 1057 1.1 scw IX_QMGR_QUELOWSTAT_RESET_VALUE); 1058 1.1 scw aqm_reg_write(sc, IX_QMGR_QUELOWSTAT2_OFFSET, 1059 1.1 scw IX_QMGR_QUELOWSTAT_RESET_VALUE); 1060 1.1 scw aqm_reg_write(sc, IX_QMGR_QUELOWSTAT3_OFFSET, 1061 1.1 scw IX_QMGR_QUELOWSTAT_RESET_VALUE); 1062 1.1 scw 1063 1.1 scw /* Reset underflow/overflow status registers 0..1 */ 1064 1.1 scw aqm_reg_write(sc, IX_QMGR_QUEUOSTAT0_OFFSET, 1065 1.1 scw IX_QMGR_QUEUOSTAT_RESET_VALUE); 1066 1.1 scw aqm_reg_write(sc, IX_QMGR_QUEUOSTAT1_OFFSET, 1067 1.1 scw IX_QMGR_QUEUOSTAT_RESET_VALUE); 1068 1.1 scw 1069 1.1 scw /* Reset queues 32..63 nearly empty status registers */ 1070 1.1 scw aqm_reg_write(sc, IX_QMGR_QUEUPPSTAT0_OFFSET, 1071 1.1 scw IX_QMGR_QUEUPPSTAT0_RESET_VALUE); 1072 1.1 scw 1073 1.1 scw /* Reset queues 32..63 full status registers */ 1074 1.1 scw aqm_reg_write(sc, IX_QMGR_QUEUPPSTAT1_OFFSET, 1075 1.1 scw IX_QMGR_QUEUPPSTAT1_RESET_VALUE); 1076 1.1 scw 1077 1.1 scw /* Reset int0 status flag source select registers 0..3 */ 1078 1.1 scw aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG0_OFFSET, 1079 1.1 scw IX_QMGR_INT0SRCSELREG_RESET_VALUE); 1080 1.1 scw aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG1_OFFSET, 1081 1.1 scw IX_QMGR_INT0SRCSELREG_RESET_VALUE); 1082 1.1 scw aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG2_OFFSET, 1083 1.1 scw IX_QMGR_INT0SRCSELREG_RESET_VALUE); 1084 1.1 scw aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG3_OFFSET, 1085 1.1 scw IX_QMGR_INT0SRCSELREG_RESET_VALUE); 1086 1.1 scw 1087 1.1 scw /* Reset queue interrupt enable register 0..1 */ 1088 1.1 scw aqm_reg_write(sc, IX_QMGR_QUEIEREG0_OFFSET, 1089 1.1 scw IX_QMGR_QUEIEREG_RESET_VALUE); 1090 1.1 scw aqm_reg_write(sc, IX_QMGR_QUEIEREG1_OFFSET, 1091 1.1 scw IX_QMGR_QUEIEREG_RESET_VALUE); 1092 1.1 scw 1093 1.1 scw /* Reset queue interrupt register 0..1 */ 1094 1.1 scw aqm_reg_write(sc, IX_QMGR_QINTREG0_OFFSET, IX_QMGR_QINTREG_RESET_VALUE); 1095 1.1 scw aqm_reg_write(sc, IX_QMGR_QINTREG1_OFFSET, IX_QMGR_QINTREG_RESET_VALUE); 1096 1.1 scw 1097 1.1 scw /* Reset queue configuration words 0..63 */ 1098 1.1 scw for (i = 0; i < IX_QMGR_MAX_NUM_QUEUES; i++) 1099 1.1 scw aqm_reg_write(sc, sc->qinfo[i].qConfigRegAddr, 1100 1.1 scw IX_QMGR_QUECONFIG_RESET_VALUE); 1101 1.1 scw 1102 1.1 scw /* XXX zero SRAM to simplify debugging */ 1103 1.1 scw for (i = IX_QMGR_QUEBUFFER_SPACE_OFFSET; 1104 1.1 scw i < IX_QMGR_AQM_SRAM_SIZE_IN_BYTES; i += sizeof(uint32_t)) 1105 1.1 scw aqm_reg_write(sc, i, 0); 1106 1.1 scw } 1107 1.1 scw 1108 1.1 scw #ifdef __FreeBSD__ 1109 1.1 scw static device_method_t ixpqmgr_methods[] = { 1110 1.1 scw DEVMETHOD(device_probe, ixpqmgr_probe), 1111 1.1 scw DEVMETHOD(device_attach, ixpqmgr_attach), 1112 1.1 scw DEVMETHOD(device_detach, ixpqmgr_detach), 1113 1.1 scw 1114 1.1 scw { 0, 0 } 1115 1.1 scw }; 1116 1.1 scw 1117 1.1 scw static driver_t ixpqmgr_driver = { 1118 1.1 scw "ixpqmgr", 1119 1.1 scw ixpqmgr_methods, 1120 1.1 scw sizeof(struct ixpqmgr_softc), 1121 1.1 scw }; 1122 1.1 scw static devclass_t ixpqmgr_devclass; 1123 1.1 scw 1124 1.1 scw DRIVER_MODULE(ixpqmgr, ixp, ixpqmgr_driver, ixpqmgr_devclass, 0, 0); 1125 1.1 scw #endif 1126