Home | History | Annotate | Download | only in ixgbe

Lines Matching defs:que

2714 	struct ix_queue *que = &sc->queues[vector];
2718 mutex_enter(&que->dc_mtx);
2719 if (que->disabled_count > 0 && --que->disabled_count > 0)
2734 mutex_exit(&que->dc_mtx);
2744 struct ix_queue *que = &sc->queues[vector];
2748 mutex_enter(&que->dc_mtx);
2750 if (que->disabled_count > 0) {
2752 que->disabled_count++;
2755 que->disabled_count++;
2769 mutex_exit(&que->dc_mtx);
2786 ixgbe_sched_handle_que(struct ixgbe_softc *sc, struct ix_queue *que)
2789 if (que->txrx_use_workqueue) {
2803 * (que->wq_cookie). So, "enqueued flag" to avoid
2806 workqueue_enqueue(sc->que_wq, &que->wq_cookie, curcpu());
2808 softint_schedule(que->que_si);
2817 struct ix_queue *que = arg;
2818 struct ixgbe_softc *sc = que->sc;
2820 struct tx_ring *txr = que->txr;
2821 struct rx_ring *rxr = que->rxr;
2828 ixgbe_disable_queue(sc, que->msix);
2829 IXGBE_EVC_ADD(&que->irqs, 1);
2832 * Don't change "que->txrx_use_workqueue" from this point to avoid
2835 que->txrx_use_workqueue = sc->txrx_use_workqueue;
2851 if (que->eitr_setting)
2852 ixgbe_eitr_write(sc, que->msix, que->eitr_setting);
2854 que->eitr_setting = 0;
2887 que->eitr_setting = newitr;
2896 ixgbe_sched_handle_que(sc, que);
3331 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
3336 if (que == NULL)
3339 sc = que->sc;
3343 reg = IXGBE_READ_REG(&sc->hw, IXGBE_EITR(que->msix));
3373 ixgbe_eitr_write(sc, que->msix, reg);
3491 * If each "que->txrx_use_workqueue" is changed in sysctl handler,
3496 * I think changing "que->txrx_use_workqueue" in interrupt handler
3648 struct ix_queue *que = sc->queues;
3652 for (i = 0; i < sc->num_queues; i++, que++, txr++) {
3657 if (que->que_si != NULL)
3658 softint_disestablish(que->que_si);
4053 struct ix_queue *que;
4073 for (i = 0, que = sc->queues; i < sc->num_queues; i++, que++)
4074 que->disabled_count = 0;
4384 struct ix_queue *que = sc->queues;
4398 for (int i = 0; i < sc->num_queues; i++, que++) {
4402 ixgbe_set_ivar(sc, rxr->me, que->msix, 0);
4404 ixgbe_set_ivar(sc, txr->me, que->msix, 1);
4406 ixgbe_eitr_write(sc, que->msix, newitr);
4413 que->eitr_setting = 0;
4616 struct ix_queue *que = sc->queues;
4658 que = sc->queues;
4659 for (i = 0; i < sc->num_queues; i++, que++) {
4660 struct tx_ring *txr = que->txr;
4685 que = sc->queues;
4686 for (i = 0; i < sc->num_queues; i++, que++) {
4688 if (que->txr->busy)
4689 queues |= 1ULL << que->me;
4695 if (que->busy == IXGBE_QUEUE_HUNG) {
4698 sc->active_queues &= ~(1ULL << que->me);
4702 if ((sc->active_queues & (1ULL << que->me)) == 0)
4703 sc->active_queues |= 1ULL << que->me;
4705 if (que->busy >= IXGBE_MAX_TX_BUSY) {
4708 que->txr->busy = IXGBE_QUEUE_HUNG;
4718 que = sc->queues;
4719 for (i = 0; i < sc->num_queues; i++, que++) {
4720 mutex_enter(&que->dc_mtx);
4721 if (que->disabled_count == 0)
4724 mutex_exit(&que->dc_mtx);
5068 struct ix_queue *que = sc->queues;
5069 for (int i = 0; i < sc->num_queues; i++, que++)
5070 que->eitr_setting = 0;
5173 struct ix_queue *que = sc->queues;
5242 for (int i = 0; i < sc->num_queues; i++, que
5243 ixgbe_enable_queue(sc, que->msix);
5255 struct ix_queue *que = sc->queues;
5263 for (int i = 0; i < sc->num_queues; i++, que++)
5264 ixgbe_disable_queue_internal(sc, que->msix, nestok);
5296 struct ix_queue *que = arg;
5297 struct ixgbe_softc *sc = que->sc;
5326 IXGBE_EVC_ADD(&que->irqs, 1);
5330 * "que->txrx_use_workqueue".
5332 que->txrx_use_workqueue = sc->txrx_use_workqueue;
5342 IXGBE_EVC_ADD(&que->req, 1);
5343 ixgbe_sched_handle_que(sc, que);
5364 struct ix_queue *que = sc->queues;
5370 for (int i = 0; i < sc->num_queues; i++, que++) {
5371 if (que->res != NULL) {
6692 struct ix_queue *que = context;
6693 struct ixgbe_softc *sc = que->sc;
6694 struct tx_ring *txr = que->txr;
6698 IXGBE_EVC_ADD(&que->handleq, 1);
6708 if ((&sc->queues[0] == que)
6712 more |= ixgbe_rxeof(que);
6716 IXGBE_EVC_ADD(&que->req, 1);
6717 ixgbe_sched_handle_que(sc, que);
6718 } else if (que->res != NULL) {
6720 ixgbe_enable_queue(sc, que->msix);
6735 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
6741 ixgbe_handle_que(que);
6752 struct ix_queue *que = sc->queues;
6779 sc->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
6833 que->que_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
6834 ixgbe_handle_que, que);
6842 || (que->que_si == NULL) || error != 0) {
6861 struct ix_queue *que = sc->queues;
6909 for (int i = 0; i < sc->num_queues; i++, vector++, que++, txr++) {
6918 que->res = sc->osdep.ihs[i] = pci_intr_establish_xname(pc,
6919 sc->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
6921 if (que->res == NULL) {
6923 "Failed to register QUE handler\n");
6927 que->msix = vector;
6928 sc->active_queues |= 1ULL << que->msix;
6982 que->que_si
6984 ixgbe_handle_que, que);
6985 if (que->que_si == NULL) {