Home | History | Annotate | Download | only in ixgbe

Lines Matching defs:rxr

103 #define IXGBE_M_ADJ(sc, rxr, mp)					\
104 if (sc->max_frame_size <= (rxr->mbuf_sz - ETHER_ALIGN)) \
1289 ixgbe_setup_hw_rsc(struct rx_ring *rxr)
1291 struct ixgbe_softc *sc = rxr->sc;
1297 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
1314 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
1320 if (rxr->mbuf_sz == MCLBYTES)
1322 else if (rxr->mbuf_sz == MJUMPAGESIZE)
1324 else if (rxr->mbuf_sz == MJUM9BYTES)
1329 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl);
1339 rxr->hw_rsc = TRUE;
1353 ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
1355 struct ixgbe_softc *sc = rxr->sc;
1361 i = rxr->next_to_refresh;
1363 if (++i == rxr->num_desc)
1367 rxbuf = &rxr->rx_buffers[i];
1371 IXGBE_EVC_ADD(&rxr->no_mbuf, 1);
1374 mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
1375 IXGBE_M_ADJ(sc, rxr, mp);
1384 ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
1385 error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
1396 bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
1398 rxbuf->addr = rxr->rx_base[i].read.pkt_addr =
1401 rxr->rx_base[i].read.pkt_addr = rxbuf->addr;
1407 rxr->next_to_refresh = i;
1408 if (++i == rxr->num_desc)
1414 IXGBE_WRITE_REG(&sc->hw, rxr->tail, rxr->next_to_refresh);
1428 ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
1430 struct ixgbe_softc *sc = rxr->sc;
1435 bsize = sizeof(struct ixgbe_rx_buf) * rxr->num_desc;
1436 rxr->rx_buffers = kmem_zalloc(bsize, KM_SLEEP);
1446 &rxr->ptag);
1452 for (int i = 0; i < rxr->num_desc; i++, rxbuf++) {
1453 rxbuf = &rxr->rx_buffers[i];
1454 error = ixgbe_dmamap_create(rxr->ptag, 0, &rxbuf->pmap);
1474 ixgbe_free_receive_ring(struct rx_ring *rxr)
1476 for (int i = 0; i < rxr->num_desc; i++) {
1477 ixgbe_rx_discard(rxr, i);
1487 ixgbe_setup_receive_ring(struct rx_ring *rxr)
1493 struct lro_ctrl *lro = &rxr->lro;
1496 struct netmap_sc *na = NA(rxr->sc->ifp);
1501 sc = rxr->sc;
1507 IXGBE_RX_LOCK(rxr);
1511 slot = netmap_reset(na, NR_RX, rxr->me, 0);
1516 bzero((void *)rxr->rx_base, rsize);
1518 rxr->mbuf_sz = sc->rx_mbuf_sz;
1521 ixgbe_free_receive_ring(rxr);
1524 for (int i = 0; i < rxr->num_desc; i++) {
1527 rxbuf = &rxr->rx_buffers[i];
1538 int sj = netmap_idx_n2k(na->rx_rings[rxr->me], i);
1543 netmap_load_map(na, rxr->ptag, rxbuf->pmap, addr);
1545 rxr->rx_base[i].read.pkt_addr = htole64(paddr);
1554 IXGBE_EVC_ADD(&rxr->no_mbuf, 1);
1559 mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
1560 IXGBE_M_ADJ(sc, rxr, mp);
1562 error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat, rxbuf->pmap,
1574 bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
1577 rxr->rx_base[i].read.pkt_addr =
1583 rxr->next_to_check = 0;
1584 rxr->next_to_refresh = sc->num_rx_desc - 1; /* Fully allocated */
1586 rxr->lro_enabled = FALSE;
1588 rxr->discard_multidesc = false;
1589 IXGBE_EVC_STORE(&rxr->rx_copies, 0);
1591 IXGBE_EVC_STORE(&rxr->rx_bytes, 0);
1593 IXGBE_EVC_STORE(&rxr->rx_packets, 0);
1596 rxr->vtag_strip = FALSE;
1598 ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
1606 ixgbe_setup_hw_rsc(rxr);
1620 rxr->lro_enabled = TRUE;
1625 IXGBE_RX_UNLOCK(rxr);
1630 ixgbe_free_receive_ring(rxr);
1631 IXGBE_RX_UNLOCK(rxr);
1642 struct rx_ring *rxr = sc->rx_rings;
1646 for (j = 0; j < sc->num_queues; j++, rxr++)
1647 if (ixgbe_setup_receive_ring(rxr))
1658 rxr
1659 IXGBE_RX_LOCK(rxr);
1660 ixgbe_free_receive_ring(rxr);
1661 IXGBE_RX_UNLOCK(rxr);
1674 struct rx_ring *rxr = sc->rx_rings;
1678 for (int i = 0; i < sc->num_queues; i++, rxr++) {
1679 ixgbe_free_receive_buffers(rxr);
1682 tcp_lro_free(&rxr->lro);
1685 ixgbe_dma_free(sc, &rxr->rxdma);
1686 IXGBE_RX_LOCK_DESTROY(rxr);
1697 ixgbe_free_receive_buffers(struct rx_ring *rxr)
1699 struct ixgbe_softc *sc = rxr->sc;
1705 if (rxr->rx_buffers != NULL) {
1707 rxbuf = &rxr->rx_buffers[i];
1708 ixgbe_rx_discard(rxr, i);
1710 ixgbe_dmamap_destroy(rxr->ptag, rxbuf->pmap);
1715 if (rxr->rx_buffers != NULL) {
1716 kmem_free(rxr->rx_buffers,
1717 sizeof(struct ixgbe_rx_buf) * rxr->num_desc);
1718 rxr->rx_buffers = NULL;
1722 if (rxr->ptag != NULL) {
1723 ixgbe_dma_tag_destroy(rxr->ptag);
1724 rxr->ptag = NULL;
1734 ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m,
1747 if (rxr->lro_enabled &&
1762 if (rxr->lro.lro_cnt != 0)
1763 if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
1775 ixgbe_rx_discard(struct rx_ring *rxr, int i)
1779 rbuf = &rxr->rx_buffers[i];
1788 bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0,
1790 ixgbe_dmamap_unload(rxr->ptag, rbuf->pmap);
1795 bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0,
1797 ixgbe_dmamap_unload(rxr->ptag, rbuf->pmap);
1821 struct rx_ring *rxr = que->rxr;
1824 struct lro_ctrl *lro = &rxr->lro;
1833 bool discard_multidesc = rxr->discard_multidesc;
1840 IXGBE_RX_LOCK(rxr);
1845 if (netmap_rx_irq(ifp, rxr->me, &processed)) {
1846 IXGBE_RX_UNLOCK(rxr);
1853 if ((rxr->next_to_check + limit) <= rxr->num_desc) {
1859 numdesc = rxr->num_desc - rxr->next_to_check;
1864 bus_dmamap_sync(rxr->rxdma.dma_tag->dt_dmat,
1865 rxr->rxdma.dma_map,
1866 sizeof(union ixgbe_adv_rx_desc) * rxr->next_to_check,
1875 for (i = rxr->next_to_check;
1901 bus_dmamap_sync(rxr->rxdma.dma_tag->dt_dmat,
1902 rxr->rxdma.dma_map, 0,
1906 cur = &rxr->rx_base[i];
1922 rbuf = &rxr->rx_buffers[i];
1936 IXGBE_EVC_ADD(&rxr->rx_discarded, 1);
1937 ixgbe_rx_discard(rxr, i);
1952 IXGBE_EVC_ADD(&rxr->no_mbuf, 1);
1959 IXGBE_EVC_ADD(&rxr->no_mbuf, 1);
1986 bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0,
2007 if (rxr->hw_rsc == TRUE) {
2009 rxr->rsc_num += (rsc - 1);
2021 nbuf = &rxr->rx_buffers[nextp];
2039 newmp->m_pkthdr.len = newmp->m_len = rxr->mbuf_sz;
2040 IXGBE_M_ADJ(sc, rxr, newmp);
2066 IXGBE_EVC_ADD(&rxr->rx_copies, 1);
2073 = rxr->mbuf_sz;
2074 IXGBE_M_ADJ(sc, rxr, newmp);
2094 ++rxr->packets;
2095 IXGBE_EVC_ADD(&rxr->rx_packets, 1);
2097 rxr->bytes += sendmp->m_pkthdr.len;
2098 IXGBE_EVC_ADD(&rxr->rx_bytes, sendmp->m_pkthdr.len);
2100 if ((rxr->vtag_strip) && (staterr & IXGBE_RXD_STAT_VP))
2169 ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2173 if (++i == rxr->num_desc) {
2177 rxr->next_to_check = i;
2181 ixgbe_rx_input(rxr, ifp, sendmp, ptype);
2185 ixgbe_refresh_mbufs(rxr, i);
2191 rxr->discard_multidesc = discard_multidesc;
2194 if (ixgbe_rx_unrefreshed(rxr))
2195 ixgbe_refresh_mbufs(rxr, i);
2197 IXGBE_RX_UNLOCK(rxr);
2361 struct rx_ring *rxr;
2438 rxr = &sc->rx_rings[i];
2440 rxr->sc = sc;
2443 rxr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool,
2446 rxr->me = i;
2448 rxr->num_desc = sc->num_rx_desc;
2451 mutex_init(&rxr->rx_mtx, MUTEX_DEFAULT, IPL_NET);
2453 if (ixgbe_dma_malloc(sc, rsize, &rxr->rxdma,
2460 rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2461 bzero((void *)rxr->rx_base, rsize);
2464 if (ixgbe_allocate_receive_buffers(rxr)) {
2480 que->rxr = &sc->rx_rings[i];
2489 for (rxr = sc->rx_rings; rxconf > 0; rxr++, rxconf--)
2490 ixgbe_dma_free(sc, &rxr->rxdma);