Home | History | Annotate | Download | only in dev

Lines Matching defs:xsc

152 	struct xe_softc *xsc = device_private(self);
153 struct mb8795_softc *sc = &xsc->sc_mb8795;
156 if (!xsc->sc_txdma) {
157 xsc->sc_txdma = nextdma_findchannel ("enetx");
158 if (xsc->sc_txdma == NULL)
162 if (!xsc->sc_rxdma) {
163 xsc->sc_rxdma = nextdma_findchannel ("enetr");
164 if (xsc->sc_rxdma == NULL)
169 device_xname(xsc->sc_txdma->sc_dev),
170 device_xname(xsc->sc_rxdma->sc_dev));
172 nextdma_setconf (xsc->sc_rxdma, continue_cb, xe_dma_rx_continue);
173 nextdma_setconf (xsc->sc_rxdma, completed_cb, xe_dma_rx_completed);
174 nextdma_setconf (xsc->sc_rxdma, shutdown_cb, xe_dma_rx_shutdown);
175 nextdma_setconf (xsc->sc_rxdma, cb_arg, sc);
177 nextdma_setconf (xsc->sc_txdma, continue_cb, xe_dma_tx_continue);
178 nextdma_setconf (xsc->sc_txdma, completed_cb, xe_dma_tx_completed);
179 nextdma_setconf (xsc->sc_txdma, shutdown_cb, xe_dma_tx_shutdown);
180 nextdma_setconf (xsc->sc_txdma, cb_arg, sc);
183 error = bus_dmamap_create(xsc->sc_txdma->sc_dmat, MCLBYTES,
185 &xsc->sc_tx_dmamap);
192 error = bus_dmamap_create(xsc->sc_rxdma->sc_dmat, MCLBYTES,
194 &xsc->sc_rx_dmamap[i]);
199 xsc->sc_rx_mb_head[i] = NULL;
201 xsc->sc_rx_loaded_idx = 0;
202 xsc->sc_rx_completed_idx = 0;
203 xsc->sc_rx_handled_idx = 0;
210 xsc->sc_txbuf = kmem_alloc(2000, KM_SLEEP);
211 xsc->sc_tx_mb_head = NULL;
212 xsc->sc_tx_loaded = 0;
226 struct xe_softc *xsc = device_private(self);
227 struct mb8795_softc *sc = &xsc->sc_mb8795;
246 xsc->sc_bst = ia->ia_bst;
247 if (bus_space_map(xsc->sc_bst, NEXT_P_ENET,
248 XE_DEVICE_SIZE, 0, &xsc->sc_bsh)) {
261 xsc->sc_txdma = nextdma_findchannel("enetx");
262 xsc->sc_rxdma = nextdma_findchannel("enetr");
263 if (xsc->sc_rxdma && xsc->sc_txdma)
296 struct xe_softc *xsc = (struct xe_softc *)sc;
298 return bus_space_read_1(xsc->sc_bst, xsc->sc_bsh, reg);
304 struct xe_softc *xsc = (struct xe_softc *)sc;
306 bus_space_write_1(xsc->sc_bst, xsc->sc_bsh, reg, val);
312 struct xe_softc *xsc = (struct xe_softc *)sc;
317 nextdma_reset(xsc->sc_rxdma);
318 nextdma_reset(xsc->sc_txdma);
320 if (xsc->sc_tx_loaded) {
321 bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
322 0, xsc->sc_tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
323 bus_dmamap_unload(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap);
324 xsc->sc_tx_loaded = 0;
326 m_freem(xsc->sc_tx_mb_head);
327 xsc->sc_tx_mb_head = NULL;
330 if (xsc->sc_rx_mb_head[i]) {
331 bus_dmamap_unload(xsc->sc_rxdma->sc_dmat,
332 xsc->sc_rx_dmamap[i]);
333 m_freem(xsc->sc_rx_mb_head[i]);
334 xsc->sc_rx_mb_head[i] = NULL;
342 struct xe_softc *xsc = (struct xe_softc *)sc;
348 xsc->sc_rx_mb_head[i] =
349 xe_dma_rxmap_load(sc, xsc->sc_rx_dmamap[i]);
351 xsc->sc_rx_loaded_idx = 0;
352 xsc->sc_rx_completed_idx = 0;
353 xsc->sc_rx_handled_idx = 0;
355 nextdma_init(xsc->sc_rxdma);
361 struct xe_softc *xsc = (struct xe_softc *)sc;
365 nextdma_start(xsc->sc_rxdma, DMACSR_SETREAD);
371 struct xe_softc *xsc = (struct xe_softc *)sc;
376 if (xsc->sc_rx_handled_idx != xsc->sc_rx_completed_idx) {
377 xsc->sc_rx_handled_idx++;
378 xsc->sc_rx_handled_idx %= MB8795_NRXBUFS;
380 map = xsc->sc_rx_dmamap[xsc->sc_rx_handled_idx];
381 m = xsc->sc_rx_mb_head[xsc->sc_rx_handled_idx];
385 bus_dmamap_sync(xsc->sc_rxdma->sc_dmat, map,
388 bus_dmamap_unload(xsc->sc_rxdma->sc_dmat, map);
392 xsc->sc_rx_mb_head[xsc->sc_rx_handled_idx] =
409 struct xe_softc *xsc = (struct xe_softc *)sc;
413 nextdma_init(xsc->sc_txdma);
419 struct xe_softc *xsc = (struct xe_softc *)sc;
423 nextdma_start(xsc->sc_txdma, DMACSR_SETWRITE);
429 struct xe_softc *xsc = (struct xe_softc *)sc;
432 xsc->sc_tx_mb_head = m;
446 error = bus_dmamap_load_mbuf(xsc->sc_txdma->sc_dmat,
447 xsc->sc_tx_dmamap, xsc->sc_tx_mb_head, BUS_DMA_NOWAIT);
450 u_char *buf = xsc->sc_txbuf;
457 for (m=xsc->sc_tx_mb_head; m; m = m->m_next) {
470 error = bus_dmamap_load(xsc->sc_txdma->sc_dmat,
471 xsc->sc_tx_dmamap, buf, buflen, NULL, BUS_DMA_NOWAIT);
477 m_freem(xsc->sc_tx_mb_head);
478 xsc->sc_tx_mb_head = NULL;
483 if (xsc->sc_tx_loaded != 0) {
484 panic("%s: xsc->sc_tx_loaded is %d", device_xname(sc->sc_dev),
485 xsc->sc_tx_loaded);
489 bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap, 0,
490 xsc->sc_tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
498 struct xe_softc *xsc = (struct xe_softc *)sc;
500 return (xsc->sc_tx_loaded != 0);
512 struct xe_softc *xsc = (struct xe_softc *)sc;
518 if (!xsc->sc_tx_loaded)
522 if (map != xsc->sc_tx_dmamap)
533 struct xe_softc *xsc = (struct xe_softc *)sc;
539 if (!xsc->sc_tx_loaded)
546 if (xsc->sc_tx_loaded) {
547 bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
548 0, xsc->sc_tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
549 bus_dmamap_unload(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap);
550 m_freem(xsc->sc_tx_mb_head);
551 xsc->sc_tx_mb_head = NULL;
553 xsc->sc_tx_loaded--;
557 if (xsc->sc_tx_loaded != 0)
559 xsc->sc_tx_loaded);
584 struct xe_softc *xsc = (struct xe_softc *)sc;
588 xsc->sc_rx_completed_idx++;
589 xsc->sc_rx_completed_idx %= MB8795_NRXBUFS;
593 device_xname(sc->sc_dev), xsc->sc_rx_completed_idx));
596 if (map != xsc->sc_rx_dmamap[xsc->sc_rx_completed_idx])
612 struct xe_softc *xsc = (struct xe_softc *)sc;
619 nextdma_start(xsc->sc_rxdma, DMACSR_SETREAD);
637 struct xe_softc *xsc = (struct xe_softc *)sc;
676 error = bus_dmamap_load_mbuf(xsc->sc_rxdma->sc_dmat,
679 bus_dmamap_sync(xsc->sc_rxdma->sc_dmat, map, 0,
701 struct xe_softc *xsc = (struct xe_softc *)sc;
706 if (((xsc->sc_rx_loaded_idx+1)%MB8795_NRXBUFS)
707 == xsc->sc_rx_handled_idx) {
718 xsc->sc_rx_loaded_idx++;
719 xsc->sc_rx_loaded_idx %= MB8795_NRXBUFS;
720 map = xsc->sc_rx_dmamap[xsc->sc_rx_loaded_idx];
722 DPRINTF(("%s: xe_dma_rx_continue() xsc->sc_rx_loaded_idx "
724 xsc->sc_rx_loaded_idx));
739 struct xe_softc *xsc = (struct xe_softc *)sc;
744 if (xsc->sc_tx_loaded)
747 map = xsc->sc_tx_dmamap;
748 xsc->sc_tx_loaded++;
752 if (xsc->sc_tx_loaded != 1)
754 xsc->sc_tx_loaded);