Home | History | Annotate | Download | only in igc

Lines Matching defs:iq

656 	for (int iq = 0; iq < sc->sc_nqueues; iq++, txconf++) {
657 struct tx_ring *txr = &sc->tx_rings[iq];
663 txr->txr_igcq = &sc->queues[iq];
664 txr->me = iq;
681 for (int iq = 0; iq < sc->sc_nqueues; iq++, rxconf++) {
682 struct rx_ring *rxr = &sc->rx_rings[iq];
688 rxr->rxr_igcq = &sc->queues[iq];
689 rxr->me = iq;
711 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
712 struct igc_queue *q = &sc->queues[iq];
715 q->txr = &sc->tx_rings[iq];
716 q->rxr = &sc->rx_rings[iq];
767 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
768 struct rx_ring *rxr = &sc->rx_rings[iq];
774 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
775 struct tx_ring *txr = &sc->tx_rings[iq];
988 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
989 struct igc_queue *q = &sc->queues[iq];
997 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
998 struct igc_queue *q = &sc->queues[iq];
1001 "%s q%d", device_xname(sc->sc_dev), iq);
1041 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
1042 struct igc_queue *q = &sc->queues[iq];
1055 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
1056 struct igc_queue *q = &sc->queues[iq];
1101 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
1102 struct igc_queue *q = &sc->queues[iq];
1132 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
1136 val = IGC_READ_REG(hw, IGC_RQDPC(iq));
1140 IGC_WRITE_REG(hw, IGC_RQDPC(iq), 0);
1159 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
1160 struct igc_queue *q = &sc->queues[iq];
1170 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
1171 struct igc_queue *q = &sc->queues[iq];
1198 int iq, error;
1200 for (iq = 0, intrs = sc->sc_intrs, ihs = sc->sc_ihs;
1201 iq < sc->sc_nqueues; iq++, intrs++, ihs++) {
1202 struct igc_queue *q = &sc->queues[iq];
1205 device_xname(dev), iq);
1222 kcpuset_set(affinity, iq % ncpu);
1239 q->msix = iq;
1240 q->eims = 1 << iq;
1265 sc->linkvec = iq;
1295 struct igc_queue *iq = sc->queues;
1296 iq->igcq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
1297 igc_handle_queue, iq);
1298 if (iq->igcq_si == NULL) {
1344 struct igc_queue *iq = sc->queues;
1345 iq->igcq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
1346 igc_handle_queue, iq);
1347 if (iq->igcq_si == NULL) {
1560 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
1561 struct rx_ring *rxr = &sc->rx_rings[iq];
1911 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
1912 struct tx_ring *txr = &sc->tx_rings[iq];
1917 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
1918 struct rx_ring *rxr = &sc->rx_rings[iq];
2011 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
2012 struct rx_ring *rxr = &sc->rx_rings[iq];
2014 ifr[iq].ifr_size = MCLBYTES;
2015 snprintf(ifr[iq].ifr_name, sizeof(ifr[iq].ifr_name), "%d", iq);
2016 ifr[iq].ifr_info = rxr->rx_ring;
2641 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
2642 struct igc_queue *q = &sc->queues[iq];
2645 igc_set_queues(sc, iq, q->msix, 0);
2647 igc_set_queues(sc, iq, q->msix, 1);
2754 struct igc_queue *iq = arg;
2755 struct igc_softc *sc = iq->sc;
2757 struct rx_ring *rxr = iq->rxr;
2758 struct tx_ring *txr = iq->txr;
2763 IGC_QUEUE_EVENT(iq, irqs, 1);
2776 IGC_QUEUE_EVENT(iq, req, 1);
2777 igc_sched_handle_queue(sc, iq);
2779 igc_enable_queue(sc, iq->eims);
2790 struct igc_queue *iq = &sc->queues[0];
2791 struct rx_ring *rxr = iq->rxr;
2792 struct tx_ring *txr = iq->txr;
2809 IGC_QUEUE_EVENT(iq, irqs, 1);
2848 IGC_QUEUE_EVENT(iq, req, 1);
2849 igc_sched_handle_queue(sc, iq);
2860 struct igc_queue *iq = arg;
2861 struct igc_softc *sc = iq->sc;
2862 struct tx_ring *txr = iq->txr;
2863 struct rx_ring *rxr = iq->rxr;
2868 IGC_QUEUE_EVENT(iq, handleq, 1);
2885 igc_sched_handle_queue(sc, iq);
2888 igc_enable_queue(sc, iq->eims);
2897 struct igc_queue *iq =
2900 igc_handle_queue(iq);
2904 igc_sched_handle_queue(struct igc_softc *sc, struct igc_queue *iq)
2907 if (iq->igcq_workqueue) {
2909 workqueue_enqueue(sc->sc_queue_wq, &iq->igcq_wq_cookie,
2912 softint_schedule(iq->igcq_si);
2921 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
2922 struct igc_queue *q = &sc->queues[iq];
2978 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
2979 struct tx_ring *txr = &sc->tx_rings[iq];
3036 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
3037 struct tx_ring *txr = &sc->tx_rings[iq];
3042 IGC_WRITE_REG(hw, IGC_TDLEN(iq),
3044 IGC_WRITE_REG(hw, IGC_TDBAH(iq), (uint32_t)(bus_addr >> 32));
3045 IGC_WRITE_REG(hw, IGC_TDBAL(iq), (uint32_t)bus_addr);
3048 IGC_WRITE_REG(hw, IGC_TDT(iq), 0 /* XXX txr->next_avail_desc */);
3049 IGC_WRITE_REG(hw, IGC_TDH(iq), 0);
3061 IGC_WRITE_REG(hw, IGC_TXDCTL(iq), txdctl);
3084 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
3085 struct tx_ring *txr = &sc->tx_rings[iq];
3368 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
3369 struct rx_ring *rxr = &sc->rx_rings[iq];
3494 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
3495 struct rx_ring *rxr = &sc->rx_rings[iq];
3499 IGC_WRITE_REG(hw, IGC_RXDCTL(iq), 0);
3503 IGC_WRITE_REG(hw, IGC_RDLEN(iq),
3505 IGC_WRITE_REG(hw, IGC_RDBAH(iq), (uint32_t)(bus_addr >> 32));
3506 IGC_WRITE_REG(hw, IGC_RDBAL(iq), (uint32_t)bus_addr);
3507 IGC_WRITE_REG(hw, IGC_SRRCTL(iq), srrctl);
3510 IGC_WRITE_REG(hw, IGC_RDH(iq), 0);
3511 IGC_WRITE_REG(hw, IGC_RDT(iq), 0 /* XXX rxr->last_desc_filled */);
3514 uint32_t rxdctl = IGC_READ_REG(hw, IGC_RXDCTL(iq));
3520 IGC_WRITE_REG(hw, IGC_RXDCTL(iq), rxdctl);
3539 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
3540 struct rx_ring *rxr = &sc->rx_rings[iq];