Home | History | Annotate | Download | only in igc

Lines Matching refs:iq

657 	for (int iq = 0; iq < sc->sc_nqueues; iq++, txconf++) {
658 struct tx_ring *txr = &sc->tx_rings[iq];
664 txr->txr_igcq = &sc->queues[iq];
665 txr->me = iq;
682 for (int iq = 0; iq < sc->sc_nqueues; iq++, rxconf++) {
683 struct rx_ring *rxr = &sc->rx_rings[iq];
689 rxr->rxr_igcq = &sc->queues[iq];
690 rxr->me = iq;
712 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
713 struct igc_queue *q = &sc->queues[iq];
716 q->txr = &sc->tx_rings[iq];
717 q->rxr = &sc->rx_rings[iq];
768 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
769 struct rx_ring *rxr = &sc->rx_rings[iq];
775 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
776 struct tx_ring *txr = &sc->tx_rings[iq];
989 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
990 struct igc_queue *q = &sc->queues[iq];
998 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
999 struct igc_queue *q = &sc->queues[iq];
1002 "%s q%d", device_xname(sc->sc_dev), iq);
1042 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
1043 struct igc_queue *q = &sc->queues[iq];
1056 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
1057 struct igc_queue *q = &sc->queues[iq];
1102 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
1103 struct igc_queue *q = &sc->queues[iq];
1133 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
1137 val = IGC_READ_REG(hw, IGC_RQDPC(iq));
1141 IGC_WRITE_REG(hw, IGC_RQDPC(iq), 0);
1160 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
1161 struct igc_queue *q = &sc->queues[iq];
1171 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
1172 struct igc_queue *q = &sc->queues[iq];
1199 int iq, error;
1201 for (iq = 0, intrs = sc->sc_intrs, ihs = sc->sc_ihs;
1202 iq < sc->sc_nqueues; iq++, intrs++, ihs++) {
1203 struct igc_queue *q = &sc->queues[iq];
1206 device_xname(dev), iq);
1223 kcpuset_set(affinity, iq % ncpu);
1240 q->msix = iq;
1241 q->eims = 1 << iq;
1266 sc->linkvec = iq;
1296 struct igc_queue *iq = sc->queues;
1297 iq->igcq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
1298 igc_handle_queue, iq);
1299 if (iq->igcq_si == NULL) {
1345 struct igc_queue *iq = sc->queues;
1346 iq->igcq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
1347 igc_handle_queue, iq);
1348 if (iq->igcq_si == NULL) {
1561 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
1562 struct rx_ring *rxr = &sc->rx_rings[iq];
1912 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
1913 struct tx_ring *txr = &sc->tx_rings[iq];
1918 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
1919 struct rx_ring *rxr = &sc->rx_rings[iq];
2012 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
2013 struct rx_ring *rxr = &sc->rx_rings[iq];
2015 ifr[iq].ifr_size = MCLBYTES;
2016 snprintf(ifr[iq].ifr_name, sizeof(ifr[iq].ifr_name), "%d", iq);
2017 ifr[iq].ifr_info = rxr->rx_ring;
2642 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
2643 struct igc_queue *q = &sc->queues[iq];
2646 igc_set_queues(sc, iq, q->msix, 0);
2648 igc_set_queues(sc, iq, q->msix, 1);
2755 struct igc_queue *iq = arg;
2756 struct igc_softc *sc = iq->sc;
2758 struct rx_ring *rxr = iq->rxr;
2759 struct tx_ring *txr = iq->txr;
2764 IGC_QUEUE_EVENT(iq, irqs, 1);
2777 IGC_QUEUE_EVENT(iq, req, 1);
2778 igc_sched_handle_queue(sc, iq);
2780 igc_enable_queue(sc, iq->eims);
2791 struct igc_queue *iq = &sc->queues[0];
2792 struct rx_ring *rxr = iq->rxr;
2793 struct tx_ring *txr = iq->txr;
2810 IGC_QUEUE_EVENT(iq, irqs, 1);
2849 IGC_QUEUE_EVENT(iq, req, 1);
2850 igc_sched_handle_queue(sc, iq);
2861 struct igc_queue *iq = arg;
2862 struct igc_softc *sc = iq->sc;
2863 struct tx_ring *txr = iq->txr;
2864 struct rx_ring *rxr = iq->rxr;
2869 IGC_QUEUE_EVENT(iq, handleq, 1);
2886 igc_sched_handle_queue(sc, iq);
2889 igc_enable_queue(sc, iq->eims);
2898 struct igc_queue *iq =
2901 igc_handle_queue(iq);
2905 igc_sched_handle_queue(struct igc_softc *sc, struct igc_queue *iq)
2908 if (iq->igcq_workqueue) {
2910 workqueue_enqueue(sc->sc_queue_wq, &iq->igcq_wq_cookie,
2913 softint_schedule(iq->igcq_si);
2922 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
2923 struct igc_queue *q = &sc->queues[iq];
2979 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
2980 struct tx_ring *txr = &sc->tx_rings[iq];
3037 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
3038 struct tx_ring *txr = &sc->tx_rings[iq];
3043 IGC_WRITE_REG(hw, IGC_TDLEN(iq),
3045 IGC_WRITE_REG(hw, IGC_TDBAH(iq), (uint32_t)(bus_addr >> 32));
3046 IGC_WRITE_REG(hw, IGC_TDBAL(iq), (uint32_t)bus_addr);
3049 IGC_WRITE_REG(hw, IGC_TDT(iq), 0 /* XXX txr->next_avail_desc */);
3050 IGC_WRITE_REG(hw, IGC_TDH(iq), 0);
3062 IGC_WRITE_REG(hw, IGC_TXDCTL(iq), txdctl);
3085 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
3086 struct tx_ring *txr = &sc->tx_rings[iq];
3369 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
3370 struct rx_ring *rxr = &sc->rx_rings[iq];
3495 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
3496 struct rx_ring *rxr = &sc->rx_rings[iq];
3500 IGC_WRITE_REG(hw, IGC_RXDCTL(iq), 0);
3504 IGC_WRITE_REG(hw, IGC_RDLEN(iq),
3506 IGC_WRITE_REG(hw, IGC_RDBAH(iq), (uint32_t)(bus_addr >> 32));
3507 IGC_WRITE_REG(hw, IGC_RDBAL(iq), (uint32_t)bus_addr);
3508 IGC_WRITE_REG(hw, IGC_SRRCTL(iq), srrctl);
3511 IGC_WRITE_REG(hw, IGC_RDH(iq), 0);
3512 IGC_WRITE_REG(hw, IGC_RDT(iq), 0 /* XXX rxr->last_desc_filled */);
3515 uint32_t rxdctl = IGC_READ_REG(hw, IGC_RXDCTL(iq));
3521 IGC_WRITE_REG(hw, IGC_RXDCTL(iq), rxdctl);
3540 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
3541 struct rx_ring *rxr = &sc->rx_rings[iq];