Lines Matching defs:netq
706 struct vioif_netqueue *netq;
721 netq = &sc->sc_netqs[VIOIF_NETQ_RXQID(i)];
723 mutex_enter(&netq->netq_lock);
724 vioif_populate_rx_mbufs_locked(sc, netq);
725 mutex_exit(&netq->netq_lock);
754 struct vioif_netqueue *netq;
762 netq = &sc->sc_netqs[i];
764 mutex_enter(&netq->netq_lock);
765 netq->netq_stopping = true;
766 mutex_exit(&netq->netq_lock);
785 netq = &sc->sc_netqs[i];
786 vioif_work_wait(sc->sc_txrx_workqueue, &netq->netq_work);
790 netq = &sc->sc_netqs[VIOIF_NETQ_RXQID(i)];
791 vioif_rx_queue_clear(sc, vsc, netq);
793 netq = &sc->sc_netqs[VIOIF_NETQ_TXQID(i)];
794 vioif_tx_queue_clear(sc, vsc, netq);
799 netq = &sc->sc_netqs[i];
801 mutex_enter(&netq->netq_lock);
802 netq->netq_stopping = false;
803 mutex_exit(&netq->netq_lock);
835 struct vioif_netqueue *netq;
840 netq = &sc->sc_netqs[qid];
841 txc = netq->netq_ctx;
855 if (mutex_tryenter(&netq->netq_lock)) {
856 vioif_transmit_locked(ifp, netq);
857 mutex_exit(&netq->netq_lock);
867 struct vioif_netqueue *netq;
877 netq = &sc->sc_netqs[VIOIF_NETQ_TXQID(i)];
879 mutex_enter(&netq->netq_lock);
880 if (!netq->netq_running_handle) {
881 netq->netq_running_handle = true;
882 vioif_net_sched_handle(sc, netq);
884 mutex_exit(&netq->netq_lock);
1036 struct vioif_netqueue *netq;
1043 netq = &sc->sc_netqs[i];
1044 evcnt_attach_dynamic(&netq->netq_mbuf_load_failed, EVCNT_TYPE_MISC,
1045 NULL, netq->netq_evgroup, "failed to load mbuf to DMA");
1046 evcnt_attach_dynamic(&netq->netq_enqueue_failed,
1047 EVCNT_TYPE_MISC, NULL, netq->netq_evgroup,
1052 rxc = netq->netq_ctx;
1054 EVCNT_TYPE_MISC, NULL, netq->netq_evgroup,
1058 txc = netq->netq_ctx;
1060 EVCNT_TYPE_MISC, NULL, netq->netq_evgroup,
1063 EVCNT_TYPE_MISC, NULL, netq->netq_evgroup,
1156 struct vioif_netqueue *netq;
1211 netq = &sc->sc_netqs[qid];
1212 maps = netq->netq_maps;
1213 vq_num = netq->netq_vq->vq_num;
1215 netq->netq_maps_kva = vioif_assign_mem(&p,
1241 netq = &sc->sc_netqs[qid];
1242 vq_num = netq->netq_vq->vq_num;
1244 kmemsize += sizeof(netq->netq_maps[0]) * vq_num;
1254 netq = &sc->sc_netqs[qid];
1255 vq_num = netq->netq_vq->vq_num;
1257 netq->netq_maps = vioif_assign_mem(&p,
1258 sizeof(netq->netq_maps[0]) * vq_num);
1291 netq = &sc->sc_netqs[qid];
1292 vq_num = netq->netq_vq->vq_num;
1293 maps = netq->netq_maps;
1294 hdrs = netq->netq_maps_kva;
1461 struct vioif_netqueue *netq;
1469 netq = &sc->sc_netqs[qid];
1473 netq->netq_vq = &sc->sc_vqs[qid];
1474 netq->netq_stopping = false;
1475 netq->netq_running_handle = false;
1479 snprintf(netq->netq_evgroup, sizeof(netq->netq_evgroup),
1482 mutex_init(&netq->netq_lock, MUTEX_DEFAULT, IPL_NET);
1483 virtio_init_vq(vsc, vq, qid, params[dir].intrhand, netq);
1490 netq->netq_vq = vq;
1492 netq->netq_softint = softint_establish(softint_flags,
1493 params[dir].sihand, netq);
1494 if (netq->netq_softint == NULL) {
1500 vioif_work_set(&netq->netq_work, params[dir].sihand, netq);
1505 netq->netq_ctx = rxc;
1510 netq->netq_ctx = (void *)txc;
1512 vioif_deferred_transmit, netq);
1528 netq->netq_ctx = NULL;
1542 vioif_work_set(&netq->netq_work, NULL, NULL);
1543 if (netq->netq_softint != NULL) {
1544 softint_disestablish(netq->netq_softint);
1545 netq->netq_softint = NULL;
1549 mutex_destroy(&netq->netq_lock);
1550 netq->netq_vq = NULL;
1559 struct vioif_netqueue *netq;
1564 netq = &sc->sc_netqs[qid];
1566 if (netq->netq_vq == NULL)
1569 netq = &sc->sc_netqs[qid];
1573 rxc = netq->netq_ctx;
1574 netq->netq_ctx = NULL;
1578 txc = netq->netq_ctx;
1579 netq->netq_ctx = NULL;
1586 softint_disestablish(netq->netq_softint);
1587 virtio_free_vq(vsc, netq->netq_vq);
1588 mutex_destroy(&netq->netq_lock);
1589 netq->netq_vq = NULL;
1593 vioif_net_sched_handle(struct vioif_softc *sc, struct vioif_netqueue *netq)
1596 KASSERT(mutex_owned(&netq->netq_lock));
1597 KASSERT(!netq->netq_stopping);
1599 if (netq->netq_workqueue) {
1600 vioif_work_add(sc->sc_txrx_workqueue, &netq->netq_work);
1602 softint_schedule(netq->netq_softint);
1702 struct vioif_netqueue *netq;
1708 netq = &sc->sc_netqs[i];
1710 KASSERT(!netq->netq_stopping);
1711 KASSERT(!netq->netq_running_handle);
1713 enqueued = virtio_start_vq_intr(vsc, netq->netq_vq);
1715 virtio_stop_vq_intr(vsc, netq->netq_vq);
1717 mutex_enter(&netq->netq_lock);
1718 netq->netq_running_handle = true;
1719 vioif_net_sched_handle(sc, netq);
1720 mutex_exit(&netq->netq_lock);
1728 struct vioif_netqueue *netq;
1733 netq = &sc->sc_netqs[i];
1735 virtio_stop_vq_intr(vsc, netq->netq_vq);
1744 vioif_populate_rx_mbufs_locked(struct vioif_softc *sc, struct vioif_netqueue *netq)
1746 struct virtqueue *vq = netq->netq_vq;
1753 KASSERT(mutex_owned(&netq->netq_lock));
1755 rxc = netq->netq_ctx;
1783 map = &netq->netq_maps[slot];
1788 netq->netq_mbuf_load_failed.ev_count++;
1795 netq->netq_enqueue_failed.ev_count++;
1811 struct vioif_netqueue *netq, u_int limit, size_t *ndeqp)
1813 struct virtqueue *vq = netq->netq_vq;
1821 KASSERT(mutex_owned(&netq->netq_lock));
1838 map = &netq->netq_maps[slot];
1858 struct vioif_netqueue *netq)
1865 mutex_enter(&netq->netq_lock);
1867 vq_num = netq->netq_vq->vq_num;
1869 more = vioif_rx_deq_locked(sc, vsc, netq, vq_num, NULL);
1875 map = &netq->netq_maps[i];
1884 mutex_exit(&netq->netq_lock);
1890 struct vioif_netqueue *netq = xnetq;
1891 struct virtqueue *vq = netq->netq_vq;
1898 KASSERT(mutex_owned(&netq->netq_lock));
1899 KASSERT(!netq->netq_stopping);
1901 more = vioif_rx_deq_locked(sc, vsc, netq, limit, &ndeq);
1903 vioif_populate_rx_mbufs_locked(sc, netq);
1906 netq);
1910 enqueued = virtio_start_vq_intr(vsc, netq->netq_vq);
1912 virtio_stop_vq_intr(vsc, netq->netq_vq);
1913 vioif_net_sched_handle(sc, netq);
1917 netq->netq_running_handle = false;
1923 struct vioif_netqueue *netq = arg;
1924 struct virtqueue *vq = netq->netq_vq;
1929 mutex_enter(&netq->netq_lock);
1932 if (netq->netq_running_handle)
1935 if (netq->netq_stopping)
1938 netq->netq_running_handle = true;
1942 vioif_rx_handle_locked(netq, limit);
1945 mutex_exit(&netq->netq_lock);
1952 struct vioif_netqueue *netq = xnetq;
1953 struct virtqueue *vq = netq->netq_vq;
1958 mutex_enter(&netq->netq_lock);
1960 KASSERT(netq->netq_running_handle);
1962 if (netq->netq_stopping) {
1963 netq->netq_running_handle = false;
1968 vioif_rx_handle_locked(netq, limit);
1971 mutex_exit(&netq->netq_lock);
1979 vioif_send_common_locked(struct ifnet *ifp, struct vioif_netqueue *netq,
1984 struct virtqueue *vq = netq->netq_vq;
1990 KASSERT(mutex_owned(&netq->netq_lock));
1992 if (netq->netq_stopping ||
1996 txc = netq->netq_ctx;
2022 map = &netq->netq_maps[slot];
2041 netq->netq_mbuf_load_failed.ev_count++;
2053 netq->netq_enqueue_failed.ev_count++;
2075 struct vioif_netqueue *netq, u_int limit, size_t *ndeqp)
2077 struct virtqueue *vq = netq->netq_vq;
2085 KASSERT(mutex_owned(&netq->netq_lock));
2102 map = &netq->netq_maps[slot];
2120 struct vioif_netqueue *netq)
2128 mutex_enter(&netq->netq_lock);
2130 txc = netq->netq_ctx;
2131 vq_num = netq->netq_vq->vq_num;
2134 more = vioif_tx_deq_locked(sc, vsc, netq, vq_num, NULL);
2140 map = &netq->netq_maps[i];
2152 mutex_exit(&netq->netq_lock);
2156 vioif_start_locked(struct ifnet *ifp, struct vioif_netqueue *netq)
2162 vioif_send_common_locked(ifp, netq, false);
2167 vioif_transmit_locked(struct ifnet *ifp, struct vioif_netqueue *netq)
2170 vioif_send_common_locked(ifp, netq, true);
2176 struct vioif_netqueue *netq = arg;
2177 struct virtio_softc *vsc = netq->netq_vq->vq_owner;
2181 mutex_enter(&netq->netq_lock);
2182 vioif_send_common_locked(ifp, netq, true);
2183 mutex_exit(&netq->netq_lock);
2187 vioif_tx_handle_locked(struct vioif_netqueue *netq, u_int limit)
2189 struct virtqueue *vq = netq->netq_vq;
2190 struct vioif_tx_context *txc = netq->netq_ctx;
2198 KASSERT(mutex_owned(&netq->netq_lock));
2199 KASSERT(!netq->netq_stopping);
2201 more = vioif_tx_deq_locked(sc, vsc, netq, limit, &ndeq);
2208 vioif_net_sched_handle(sc, netq);
2217 vioif_net_sched_handle(sc, netq);
2221 netq->netq_running_handle = false;
2224 if (netq == &sc->sc_netqs[VIOIF_NETQ_TXQID(0)])
2233 struct vioif_netqueue *netq = arg;
2234 struct virtqueue *vq = netq->netq_vq;
2239 mutex_enter(&netq->netq_lock);
2242 if (netq->netq_running_handle)
2245 if (netq->netq_stopping)
2248 netq->netq_running_handle = true;
2251 netq->netq_workqueue = sc->sc_txrx_workqueue_sysctl;
2253 vioif_tx_handle_locked(netq, limit);
2256 mutex_exit(&netq->netq_lock);
2263 struct vioif_netqueue *netq = xnetq;
2264 struct virtqueue *vq = netq->netq_vq;
2269 mutex_enter(&netq->netq_lock);
2271 KASSERT(netq->netq_running_handle);
2273 if (netq->netq_stopping) {
2274 netq->netq_running_handle = false;
2279 vioif_tx_handle_locked(netq, limit);
2282 mutex_exit(&netq->netq_lock);
2729 struct vioif_netqueue *netq;
2745 netq = &sc->sc_netqs[VIOIF_NETQ_TXQID(i)];
2747 mutex_enter(&netq->netq_lock);
2748 txc = netq->netq_ctx;
2750 mutex_exit(&netq->netq_lock);