/src/sys/dev/ic/ |
hmevar.h | 75 struct hme_ring sc_rb; member in struct:hme_softc
|
/src/sys/arch/hpcmips/dev/ |
ucbsnd.c | 145 struct ring_buf sc_rb; member in struct:ucbsnd_softc 229 ringbuf_allocate(&sc->sc_rb, TX39_SIBDMA_SIZE, UCBSND_BUFBLOCK); 341 ringbuf_consumer_return(&sc->sc_rb); 342 buf = ringbuf_consumer_get(&sc->sc_rb, &bufcnt); 358 wakeup(&sc->sc_rb); 537 ringbuf_reset(&sc->sc_rb); 585 ringbuf_producer_return(&sc->sc_rb, bufsize); 588 if (sc->sa_state == UCBSND_IDLE && ringbuf_full(&sc->sc_rb)) { 619 while (!(buf = ringbuf_producer_get(&sc->sc_rb))) { 620 error = tsleep(&sc->sc_rb, PRIBIO, "ucbsnd", 1000) [all...] |
/src/sys/dev/sbus/ |
qe.c | 129 struct qec_ring sc_rb; /* Packet Ring Buffer */ member in struct:qe_softc 237 sc->sc_rb.rb_ntbuf = QEC_XD_RING_MAXSIZE; 238 sc->sc_rb.rb_nrbuf = QEC_XD_RING_MAXSIZE; 242 sc->sc_rb.rb_ntbuf * QE_PKT_BUF_SZ + 243 sc->sc_rb.rb_nrbuf * QE_PKT_BUF_SZ; 263 &sc->sc_rb.rb_membase, 273 sc->sc_rb.rb_membase, size, NULL, 277 bus_dmamem_unmap(dmatag, sc->sc_rb.rb_membase, size); 281 sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr; 327 bp = sc->sc_rb.rb_rxbuf + (idx % sc->sc_rb.rb_nrbuf) * QE_PKT_BUF_SZ [all...] |
be.c | 139 struct qec_ring sc_rb; /* Packet Ring Buffer */ member in struct:be_softc 289 sc->sc_rb.rb_ntbuf = QEC_XD_RING_MAXSIZE; 290 sc->sc_rb.rb_nrbuf = QEC_XD_RING_MAXSIZE; 295 sc->sc_rb.rb_ntbuf * BE_PKT_BUF_SZ + 296 sc->sc_rb.rb_nrbuf * BE_PKT_BUF_SZ; 314 &sc->sc_rb.rb_membase, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 322 sc->sc_rb.rb_membase, size, NULL, BUS_DMA_NOWAIT)) != 0) { 324 bus_dmamem_unmap(dmatag, sc->sc_rb.rb_membase, size); 328 sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr; 465 bp = sc->sc_rb.rb_txbuf + (idx % sc->sc_rb.rb_ntbuf) * BE_PKT_BUF_SZ [all...] |