Lines Matching defs:rxr
296 igc_rxdesc_sync(struct rx_ring *rxr, int id, int ops)
299 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
682 struct rx_ring *rxr = &sc->rx_rings[iq];
687 rxr->sc = sc;
688 rxr->rxr_igcq = &sc->queues[iq];
689 rxr->me = iq;
691 timeout_set(&rxr->rx_refill, igc_rxrefill, rxr);
693 if (igc_dma_malloc(sc, rsize, &rxr->rxdma)) {
698 rxr->rx_base = (union igc_adv_rx_desc *)rxr->rxdma.dma_vaddr;
699 memset(rxr->rx_base, 0, rsize);
716 q->rxr = &sc->rx_rings[iq];
722 for (struct rx_ring *rxr = sc->rx_rings; rxconf > 0; rxr++, rxconf--)
723 igc_dma_free(sc, &rxr->rxdma);
768 struct rx_ring *rxr = &sc->rx_rings[iq];
770 igc_dma_free(sc, &rxr->rxdma);
1561 struct rx_ring *rxr = &sc->rx_rings[iq];
1563 mutex_enter(&rxr->rxr_lock);
1564 igc_rxfill(rxr);
1565 mutex_exit(&rxr->rxr_lock);
1918 struct rx_ring *rxr = &sc->rx_rings[iq];
1920 igc_clear_receive_status(rxr);
2012 struct rx_ring *rxr = &sc->rx_rings[iq];
2016 ifr[iq].ifr_info = rxr->rx_ring;
2028 igc_rxfill(struct rx_ring *rxr)
2030 struct igc_softc *sc = rxr->sc;
2034 if (igc_get_buf(rxr, id, false)) {
2035 panic("%s: msix=%d i=%d\n", __func__, rxr->me, id);
2040 rxr->last_desc_filled = id;
2041 IGC_WRITE_REG(&sc->hw, IGC_RDT(rxr->me), id);
2042 rxr->next_to_check = 0;
2046 igc_rxrefill(struct rx_ring *rxr, int end)
2048 struct igc_softc *sc = rxr->sc;
2051 for (id = rxr->next_to_check; id != end; id = igc_rxdesc_incr(sc, id)) {
2052 if (igc_get_buf(rxr, id, true)) {
2054 panic("%s: msix=%d id=%d\n", __func__, rxr->me, id);
2060 rxr->last_desc_filled == id ? "same" : "diff",
2061 rxr->last_desc_filled, id);
2062 rxr->last_desc_filled = id;
2063 IGC_WRITE_REG(&sc->hw, IGC_RDT(rxr->me), id);
2074 igc_rxeof(struct rx_ring *rxr, u_int limit)
2076 struct igc_softc *sc = rxr->sc;
2077 struct igc_queue *q = rxr->rxr_igcq;
2082 id = rxr->next_to_check;
2084 union igc_adv_rx_desc *rxdesc = &rxr->rx_base[id];
2088 igc_rxdesc_sync(rxr, id,
2094 igc_rxdesc_sync(rxr, id,
2100 igc_rxdesc_sync(rxr, id,
2111 rxbuf = &rxr->rx_buffers[id];
2113 bus_dmamap_sync(rxr->rxdma.dma_tag, map,
2115 bus_dmamap_unload(rxr->rxdma.dma_tag, map);
2136 igc_rxdesc_sync(rxr, id,
2157 id, rxr->last_desc_filled);
2166 nxbuf = &rxr->rx_buffers[nextp];
2218 DPRINTF(RX, "fill queue[%d]\n", rxr->me);
2219 igc_rxrefill(rxr, id);
2222 rxr->next_to_check == id ? "same" : "diff",
2223 rxr->next_to_check, id);
2224 rxr->next_to_check = id;
2573 igc_get_buf(struct rx_ring *rxr, int id, bool strict)
2575 struct igc_softc *sc = rxr->sc;
2576 struct igc_queue *q = rxr->rxr_igcq;
2577 struct igc_rx_buf *rxbuf = &rxr->rx_buffers[id];
2606 error = bus_dmamap_load_mbuf(rxr->rxdma.dma_tag, map, m,
2613 bus_dmamap_sync(rxr->rxdma.dma_tag, map, 0,
2617 union igc_adv_rx_desc *rxdesc = &rxr->rx_base[id];
2618 igc_rxdesc_sync(rxr, id, BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2620 igc_rxdesc_sync(rxr, id, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2757 struct rx_ring *rxr = iq->rxr;
2771 mutex_enter(&rxr->rxr_lock);
2772 rxmore = igc_rxeof(rxr, rxlimit);
2773 mutex_exit(&rxr->rxr_lock);
2791 struct rx_ring *rxr = iq->rxr;
2833 mutex_enter(&rxr->rxr_lock);
2834 rxmore = igc_rxeof(rxr, rxlimit);
2835 mutex_exit(&rxr->rxr_lock);
2863 struct rx_ring *rxr = iq->rxr;
2880 mutex_enter(&rxr->rxr_lock);
2881 rxmore = igc_rxeof(rxr, rxlimit);
2882 mutex_exit(&rxr->rxr_lock);
3331 igc_allocate_receive_buffers(struct rx_ring *rxr)
3333 struct igc_softc *sc = rxr->sc;
3336 rxr->rx_buffers =
3340 struct igc_rx_buf *rxbuf = &rxr->rx_buffers[id];
3342 error = bus_dmamap_create(rxr->rxdma.dma_tag, MCLBYTES, 1,
3350 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0,
3351 rxr->rxdma.dma_map->dm_mapsize,
3369 struct rx_ring *rxr = &sc->rx_rings[iq];
3371 if (igc_setup_receive_ring(rxr))
3387 igc_setup_receive_ring(struct rx_ring *rxr)
3389 struct igc_softc *sc = rxr->sc;
3394 memset(rxr->rx_base, 0, rsize);
3396 if (igc_allocate_receive_buffers(rxr))
3400 rxr->next_to_check = 0;
3401 rxr->last_desc_filled = 0;
3403 mutex_init(&rxr->rxr_lock, MUTEX_DEFAULT, IPL_NET);
3495 struct rx_ring *rxr = &sc->rx_rings[iq];
3497 rxr->rxdma.dma_map->dm_segs[0].ds_addr;
3511 IGC_WRITE_REG(hw, IGC_RDT(iq), 0 /* XXX rxr->last_desc_filled */);
3540 struct rx_ring *rxr = &sc->rx_rings[iq];
3542 igc_free_receive_buffers(rxr);
3552 igc_free_receive_buffers(struct rx_ring *rxr)
3554 struct igc_softc *sc = rxr->sc;
3556 if (rxr->rx_buffers != NULL) {
3558 struct igc_rx_buf *rxbuf = &rxr->rx_buffers[id];
3562 bus_dmamap_sync(rxr->rxdma.dma_tag, map,
3564 bus_dmamap_unload(rxr->rxdma.dma_tag, map);
3568 bus_dmamap_destroy(rxr->rxdma.dma_tag, map);
3571 kmem_free(rxr->rx_buffers,
3573 rxr->rx_buffers = NULL;
3576 mutex_destroy(&rxr->rxr_lock);
3585 igc_clear_receive_status(struct rx_ring *rxr)
3587 struct igc_softc *sc = rxr->sc;
3589 mutex_enter(&rxr->rxr_lock);
3592 union igc_adv_rx_desc *rxdesc = &rxr->rx_base[id];
3594 igc_rxdesc_sync(rxr, id,
3597 igc_rxdesc_sync(rxr, id,
3601 mutex_exit(&rxr->rxr_lock);