/src/sys/dev/ic/ |
dwc_gmac.c | 46 * IFNET_LOCK -> sc_intr_lock -> {sc_txq.t_mtx, sc_rxq.r_mtx} 281 if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) { 296 mutex_init(&sc->sc_txq.t_mtx, MUTEX_DEFAULT, IPL_NET); 365 dwc_gmac_free_tx_ring(sc, &sc->sc_txq); 628 sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT; 629 sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr + 766 sc->sc_txq.t_physaddr); 907 sc->sc_txq.t_physaddr); 929 mutex_enter(&sc->sc_txq.t_mtx); 931 mutex_exit(&sc->sc_txq.t_mtx) [all...] |
dwc_gmac_var.h | 103 struct dwc_gmac_tx_ring sc_txq; member in struct:dwc_gmac_softc 107 bool sc_txbusy; /* (sc_txq.t_mtx) no Tx because down or busy */
|
ath.c | 3366 if (qnum >= N(sc->sc_txq)) { 3369 qnum, N(sc->sc_txq)); 3374 struct ath_txq *txq = &sc->sc_txq[qnum]; 3384 return &sc->sc_txq[qnum]; 3487 ath_tx_cleanupq(sc, &sc->sc_txq[i]); 4235 if (txqactive(sc->sc_ah, 0) && ath_tx_processq(sc, &sc->sc_txq[0]) > 0) 4272 nacked += ath_tx_processq(sc, &sc->sc_txq[0]); 4274 nacked += ath_tx_processq(sc, &sc->sc_txq[1]); 4276 nacked += ath_tx_processq(sc, &sc->sc_txq[2]); 4278 nacked += ath_tx_processq(sc, &sc->sc_txq[3]) [all...] |
athnvar.h | 553 struct athn_txq sc_txq[31]; member in struct:athn_softc
|
athn.c | 1813 struct athn_txq *txq = &sc->sc_txq[qid]; 1911 SIMPLEQ_INIT(&sc->sc_txq[qid].head); 1912 sc->sc_txq[qid].lastds = NULL; 1913 sc->sc_txq[qid].wait = NULL; 1914 sc->sc_txq[qid].queued = 0;
|
athvar.h | 280 struct ath_txq sc_txq[HAL_NUM_TX_QUEUES]; member in struct:ath_softc
|
arn5008.c | 971 struct athn_txq *txq = &sc->sc_txq[qid]; 1081 SIMPLEQ_EMPTY(&sc->sc_txq[ATHN_QID_CAB].head)) 1383 txq = &sc->sc_txq[qid];
|
arn9003.c | 1121 txq = &sc->sc_txq[qid]; 1227 SIMPLEQ_EMPTY(&sc->sc_txq[ATHN_QID_CAB].head)) 1536 txq = &sc->sc_txq[qid];
|
/src/sys/dev/marvell/ |
if_gfevar.h | 165 struct gfe_txqueue sc_txq[2]; /* High & Low transmit queues */ member in struct:gfe_softc
|
if_gfe.c | 701 if (IF_QFULL(&sc->sc_txq[GE_TXPRIO_HI].txq_pendq)) 710 IF_ENQUEUE(&sc->sc_txq[GE_TXPRIO_HI].txq_pendq, m); 728 struct gfe_txqueue * const txq = &sc->sc_txq[GE_TXPRIO_HI]; 1187 struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; 1358 struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; 1447 struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; 1471 struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; 1563 struct gfe_txqueue * const txq = &sc->sc_txq[txprio];
|
/src/sys/arch/arm/gemini/ |
if_gmc.c | 69 gmac_hwqueue_t *sc_txq[6]; member in struct:gmc_softc 399 if (!gmc_txqueue(sc, sc->sc_txq[0], m)) { 467 if (sc->sc_txq[0] == NULL) { 475 for (i = 0; i < __arraycount(sc->sc_txq); i++) { 476 sc->sc_txq[i] = gmac_hwqueue_create(hqm, sc->sc_iot, 479 if (sc->sc_txq[i] == NULL) { 484 sc->sc_txq[i]->hwq_ifp = ifp; 489 if (sc->sc_txq[i]->hwq_qoff < hwq->hwq_qoff) 496 sc->sc_txq[i], hwq_link); 498 SLIST_INSERT_AFTER(last_hwq, sc->sc_txq[i] [all...] |
/src/sys/arch/arm/broadcom/ |
bcm53xx_eth.c | 148 struct bcmeth_txqueue sc_txq; member in struct:bcmeth_softc 316 error = bcmeth_txq_attach(sc, &sc->sc_txq, 0); 547 bcmeth_txq_reset(sc, &sc->sc_txq); 549 bcmeth_write_4(sc, sc->sc_txq.txq_reg_xmtctl, XMTCTL_DMA_ACT_INDEX 575 bcmeth_write_4(sc, sc->sc_txq.txq_reg_xmtctl, 576 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtctl) | XMTCTL_ENABLE); 590 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtctl), 607 struct bcmeth_txqueue * const txq = &sc->sc_txq; 653 bcmeth_txq_consume(sc, &sc->sc_txq); 655 bcmeth_txq_purge(sc, &sc->sc_txq); [all...] |
/src/sys/arch/luna68k/dev/ |
lunaws.c | 119 uint8_t sc_txq[OMKBD_TXQ_LEN]; member in struct:ws_softc 330 sio->sio_data = sc->sc_txq[sc->sc_txqhead]; 333 sc->sc_txqhead, sc->sc_txq[sc->sc_txqhead])); 401 sc->sc_txq[sc->sc_txqtail] = txdata; 404 sc->sc_txqtail, sc->sc_txq[sc->sc_txqtail]));
|
/src/sys/arch/powerpc/booke/dev/ |
pq3etsec.c | 189 struct pq3etsec_txqueue sc_txq; member in struct:pq3etsec_softc 650 error = pq3etsec_txq_attach(sc, &sc->sc_txq, 0); 967 pq3etsec_txq_reset(sc, &sc->sc_txq); 1102 pq3etsec_txq_consume(sc, &sc->sc_txq); 1104 pq3etsec_txq_purge(sc, &sc->sc_txq); 2346 struct pq3etsec_txqueue * const txq = &sc->sc_txq; 2523 || pq3etsec_txq_active_p(sc, &sc->sc_txq)) { 2528 if (!pq3etsec_txq_consume(sc, &sc->sc_txq) 2529 || !pq3etsec_txq_enqueue(sc, &sc->sc_txq)) {
|