Home | History | Annotate | Line # | Download | only in marvell
gtmpsc.c revision 1.40
      1 /*	$NetBSD: gtmpsc.c,v 1.40 2010/08/01 06:57:06 kiyohara Exp $	*/
      2 /*
      3  * Copyright (c) 2009 KIYOHARA Takashi
      4  * All rights reserved.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     18  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
     19  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
     23  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
     24  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     25  * POSSIBILITY OF SUCH DAMAGE.
     26  */
     27 /*
     28  * mpsc.c - Multi-Protocol Serial Controller driver, supports UART mode only
     29  */
     30 
     31 #include <sys/cdefs.h>
     32 __KERNEL_RCSID(0, "$NetBSD: gtmpsc.c,v 1.40 2010/08/01 06:57:06 kiyohara Exp $");
     33 
     34 #include "opt_kgdb.h"
     35 
     36 #include <sys/param.h>
     37 #include <sys/bus.h>
     38 #include <sys/conf.h>
     39 #include <sys/device.h>
     40 #include <sys/fcntl.h>
     41 #include <sys/intr.h>
     42 #include <sys/kauth.h>
     43 #include <sys/kernel.h>
     44 #include <sys/mutex.h>
     45 #include <sys/proc.h>
     46 #include <sys/systm.h>
     47 #include <sys/timepps.h>
     48 #include <sys/tty.h>
     49 #ifdef KGDB
     50 #include <sys/kgdb.h>
     51 #endif
     52 
     53 #include <uvm/uvm.h>
     54 #include <uvm/uvm_extern.h>
     55 
     56 #include <dev/cons.h>
     57 
     58 #include <dev/marvell/gtreg.h>
     59 #include <dev/marvell/gtvar.h>
     60 #include <dev/marvell/gtbrgreg.h>
     61 #include <dev/marvell/gtbrgvar.h>
     62 #include <dev/marvell/gtsdmareg.h>
     63 #include <dev/marvell/gtsdmavar.h>
     64 #include <dev/marvell/gtmpscreg.h>
     65 #include <dev/marvell/gtmpscvar.h>
     66 #include <dev/marvell/marvellreg.h>
     67 #include <dev/marvell/marvellvar.h>
     68 
     69 #include "gtmpsc.h"
     70 #include "ioconf.h"
     71 #include "locators.h"
     72 
     73 /*
     74  * Wait 2 characters time for RESET_DELAY
     75  */
     76 #define GTMPSC_RESET_DELAY	(2*8*1000000 / GT_MPSC_DEFAULT_BAUD_RATE)
     77 
     78 
     79 #if defined(DEBUG)
     80 unsigned int gtmpsc_debug = 0;
     81 # define STATIC
     82 # define DPRINTF(x)	do { if (gtmpsc_debug) printf x ; } while (0)
     83 #else
     84 # define STATIC static
     85 # define DPRINTF(x)
     86 #endif
     87 
     88 #define GTMPSCUNIT_MASK    0x7ffff
     89 #define GTMPSCDIALOUT_MASK 0x80000
     90 
     91 #define GTMPSCUNIT(x)      (minor(x) & GTMPSCUNIT_MASK)
     92 #define GTMPSCDIALOUT(x)   (minor(x) & GTMPSCDIALOUT_MASK)
     93 
     94 #define CLEANUP_AND_RETURN_RXDMA(sc, ix)				    \
     95 	do {								    \
     96 		gtmpsc_pollrx_t *_vrxp = &(sc)->sc_poll_sdmapage->rx[(ix)]; \
     97 									    \
     98 		_vrxp->rxdesc.sdma_csr =				    \
     99 		    SDMA_CSR_RX_L	|				    \
    100 		    SDMA_CSR_RX_F	|				    \
    101 		    SDMA_CSR_RX_OWN	|				    \
    102 		    SDMA_CSR_RX_EI;					    \
    103 		_vrxp->rxdesc.sdma_cnt =				    \
    104 		    GTMPSC_RXBUFSZ << SDMA_RX_CNT_BUFSZ_SHIFT;		    \
    105 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_rxdma_map,	    \
    106 		    (ix) * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t),   \
    107 		    sizeof(vrxp->rxbuf),				    \
    108 		    BUS_DMASYNC_PREREAD);				    \
    109 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_rxdma_map,	    \
    110 		    (ix) * sizeof(gtmpsc_pollrx_t),			    \
    111 		    sizeof(sdma_desc_t),				    \
    112 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);	    \
    113 	} while (0);
    114 
    115 
    116 STATIC int  gtmpscmatch(device_t, cfdata_t, void *);
    117 STATIC void gtmpscattach(device_t, device_t, void *);
    118 
    119 STATIC void gtmpsc_softintr(void *);
    120 
    121 STATIC void gtmpscstart(struct tty *);
    122 STATIC int  gtmpscparam(struct tty *, struct termios *);
    123 
    124 STATIC void gtmpsc_shutdownhook(void *);
    125 
    126 STATIC uint32_t cflag2mpcr(tcflag_t);
    127 STATIC __inline void gtmpsc_intr_rx(struct gtmpsc_softc *);
    128 STATIC __inline void gtmpsc_intr_tx(struct gtmpsc_softc *);
    129 STATIC void gtmpsc_write(struct gtmpsc_softc *);
    130 STATIC void gtmpsc_txflush(gtmpsc_softc_t *);
    131 STATIC void gtmpsc_rxdesc_init(struct gtmpsc_softc *);
    132 STATIC void gtmpsc_txdesc_init(struct gtmpsc_softc *);
    133 STATIC void gtmpscinit_stop(struct gtmpsc_softc *);
    134 STATIC void gtmpscinit_start(struct gtmpsc_softc *);
    135 STATIC void gtmpscshutdown(struct gtmpsc_softc *);
    136 STATIC void gtmpsc_loadchannelregs(struct gtmpsc_softc *);
    137 
    138 #ifdef MPSC_CONSOLE
    139 STATIC int gtmpsccngetc(dev_t);
    140 STATIC void gtmpsccnputc(dev_t, int);
    141 STATIC void gtmpsccnpollc(dev_t, int);
    142 STATIC void gtmpsccnhalt(dev_t);
    143 
    144 STATIC int gtmpsc_hackinit(struct gtmpsc_softc *, bus_space_tag_t,
    145 			   bus_dma_tag_t, bus_addr_t, int, int, int, tcflag_t);
    146 #endif
    147 
    148 #if defined(MPSC_CONSOLE) || defined(KGDB)
    149 STATIC int  gtmpsc_common_getc(struct gtmpsc_softc *);
    150 STATIC void gtmpsc_common_putc(struct gtmpsc_softc *, int);
    151 STATIC void gtmpsc_common_putc_wait_complete(struct gtmpsc_softc *, int);
    152 #endif
    153 
    154 dev_type_open(gtmpscopen);
    155 dev_type_close(gtmpscclose);
    156 dev_type_read(gtmpscread);
    157 dev_type_write(gtmpscwrite);
    158 dev_type_ioctl(gtmpscioctl);
    159 dev_type_stop(gtmpscstop);
    160 dev_type_tty(gtmpsctty);
    161 dev_type_poll(gtmpscpoll);
    162 
    163 const struct cdevsw gtmpsc_cdevsw = {
    164 	gtmpscopen, gtmpscclose, gtmpscread, gtmpscwrite, gtmpscioctl,
    165 	gtmpscstop, gtmpsctty, gtmpscpoll, nommap, ttykqfilter, D_TTY
    166 };
    167 
    168 CFATTACH_DECL_NEW(gtmpsc, sizeof(struct gtmpsc_softc),
    169     gtmpscmatch, gtmpscattach, NULL, NULL);
    170 
    171 
    172 STATIC uint32_t sdma_imask;		/* soft copy of SDMA IMASK reg */
    173 STATIC struct cnm_state gtmpsc_cnm_state;
    174 
    175 #ifdef KGDB
    176 static int gtmpsc_kgdb_addr;
    177 static int gtmpsc_kgdb_attached;
    178 
    179 STATIC int      gtmpsc_kgdb_getc(void *);
    180 STATIC void     gtmpsc_kgdb_putc(void *, int);
    181 #endif /* KGDB */
    182 
    183 #ifdef MPSC_CONSOLE
    184 /*
    185  * hacks for console initialization
    186  * which happens prior to autoconfig "attach"
    187  *
    188  * XXX Assumes PAGE_SIZE is a constant!
    189  */
    190 gtmpsc_softc_t gtmpsc_cn_softc;
    191 STATIC unsigned char gtmpsc_cn_dmapage[PAGE_SIZE] __aligned(PAGE_SIZE);
    192 
    193 
    194 static struct consdev gtmpsc_consdev = {
    195 	NULL, NULL, gtmpsccngetc, gtmpsccnputc, gtmpsccnpollc,
    196 	NULL, gtmpsccnhalt, NULL, NODEV, CN_NORMAL
    197 };
    198 #endif
    199 
    200 
    201 #define GT_MPSC_READ(sc, o) \
    202 	bus_space_read_4((sc)->sc_iot, (sc)->sc_mpsch, (o))
    203 #define GT_MPSC_WRITE(sc, o, v) \
    204 	bus_space_write_4((sc)->sc_iot, (sc)->sc_mpsch, (o), (v))
    205 #define GT_SDMA_READ(sc, o) \
    206 	bus_space_read_4((sc)->sc_iot, (sc)->sc_sdmah, (o))
    207 #define GT_SDMA_WRITE(sc, o, v) \
    208 	bus_space_write_4((sc)->sc_iot, (sc)->sc_sdmah, (o), (v))
    209 
    210 
    211 /* ARGSUSED */
    212 STATIC int
    213 gtmpscmatch(device_t parent, cfdata_t match, void *aux)
    214 {
    215 	struct marvell_attach_args *mva = aux;
    216 
    217 	if (strcmp(mva->mva_name, match->cf_name) != 0)
    218 		return 0;
    219 	if (mva->mva_offset == MVA_OFFSET_DEFAULT)
    220 		return 0;
    221 
    222 	mva->mva_size = GTMPSC_SIZE;
    223 	return 1;
    224 }
    225 
    226 /* ARGSUSED */
    227 STATIC void
    228 gtmpscattach(device_t parent, device_t self, void *aux)
    229 {
    230 	struct gtmpsc_softc *sc = device_private(self);
    231 	struct marvell_attach_args *mva = aux;
    232 	bus_dma_segment_t segs;
    233 	struct tty *tp;
    234 	int rsegs, err, unit;
    235 	void *kva;
    236 
    237 	aprint_naive("\n");
    238 	aprint_normal(": Multi-Protocol Serial Controller\n");
    239 
    240 	if (mva->mva_unit != MVA_UNIT_DEFAULT)
    241 		unit = mva->mva_unit;
    242 	else
    243 		unit = (mva->mva_offset == GTMPSC_BASE(0)) ? 0 : 1;
    244 
    245 #ifdef MPSC_CONSOLE
    246 	if (cn_tab == &gtmpsc_consdev &&
    247 	    cn_tab->cn_dev == makedev(0, unit)) {
    248 		gtmpsc_cn_softc.sc_dev = self;
    249 		memcpy(sc, &gtmpsc_cn_softc, sizeof(struct gtmpsc_softc));
    250 		sc->sc_flags = GTMPSC_CONSOLE;
    251 	} else
    252 #endif
    253 	{
    254 		if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
    255 		    mva->mva_offset, mva->mva_size, &sc->sc_mpsch)) {
    256 			aprint_error_dev(self, "Cannot map MPSC registers\n");
    257 			return;
    258 		}
    259 		if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
    260 		    GTSDMA_BASE(unit), GTSDMA_SIZE, &sc->sc_sdmah)) {
    261 			aprint_error_dev(self, "Cannot map SDMA registers\n");
    262 			return;
    263 		}
    264 		sc->sc_dev = self;
    265 		sc->sc_unit = unit;
    266 		sc->sc_iot = mva->mva_iot;
    267 		sc->sc_dmat = mva->mva_dmat;
    268 
    269 		err = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
    270 		    &segs, 1, &rsegs, BUS_DMA_NOWAIT);
    271 		if (err) {
    272 			aprint_error_dev(sc->sc_dev,
    273 			    "bus_dmamem_alloc error 0x%x\n", err);
    274 			goto fail0;
    275 		}
    276 		err = bus_dmamem_map(sc->sc_dmat, &segs, 1, PAGE_SIZE, &kva,
    277 		    BUS_DMA_NOWAIT);
    278 		if (err) {
    279 			aprint_error_dev(sc->sc_dev,
    280 			    "bus_dmamem_map error 0x%x\n", err);
    281 			goto fail1;
    282 		}
    283 		memset(kva, 0, PAGE_SIZE);	/* paranoid/superfluous */
    284 		sc->sc_poll_sdmapage = kva;
    285 
    286 		err = bus_dmamap_create(sc->sc_dmat, sizeof(gtmpsc_polltx_t), 1,
    287 		   sizeof(gtmpsc_polltx_t), 0, BUS_DMA_NOWAIT,
    288 		   &sc->sc_txdma_map);
    289 		if (err != 0) {
    290 			aprint_error_dev(sc->sc_dev,
    291 			    "bus_dmamap_create error 0x%x\n", err);
    292 			goto fail2;
    293 		}
    294 		err = bus_dmamap_load(sc->sc_dmat, sc->sc_txdma_map,
    295 		    sc->sc_poll_sdmapage->tx, sizeof(gtmpsc_polltx_t),
    296 		    NULL, BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
    297 		if (err != 0) {
    298 			aprint_error_dev(sc->sc_dev,
    299 			    "bus_dmamap_load tx error 0x%x\n", err);
    300 			goto fail3;
    301 		}
    302 		err = bus_dmamap_create(sc->sc_dmat, sizeof(gtmpsc_pollrx_t), 1,
    303 		   sizeof(gtmpsc_pollrx_t), 0, BUS_DMA_NOWAIT,
    304 		   &sc->sc_rxdma_map);
    305 		if (err != 0) {
    306 			aprint_error_dev(sc->sc_dev,
    307 			    "bus_dmamap_create rx error 0x%x\n", err);
    308 			goto fail4;
    309 		}
    310 		err = bus_dmamap_load(sc->sc_dmat, sc->sc_rxdma_map,
    311 		    sc->sc_poll_sdmapage->rx, sizeof(gtmpsc_pollrx_t),
    312 		    NULL, BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
    313 		if (err != 0) {
    314 			aprint_error_dev(sc->sc_dev,
    315 			    "bus_dmamap_load rx error 0x%x\n", err);
    316 			goto fail5;
    317 		}
    318 
    319 		sc->sc_brg = unit;		/* XXXXX */
    320 		sc->sc_baudrate = GT_MPSC_DEFAULT_BAUD_RATE;
    321 	}
    322 	aprint_normal_dev(self, "with SDMA offset 0x%04x-0x%04x\n",
    323 	    GTSDMA_BASE(unit), GTSDMA_BASE(unit) + GTSDMA_SIZE - 1);
    324 
    325 	sc->sc_rx_ready = 0;
    326 	sc->sc_tx_busy = 0;
    327 	sc->sc_tx_done = 0;
    328 	sc->sc_tx_stopped = 0;
    329 	sc->sc_heldchange = 0;
    330 
    331 	gtmpsc_txdesc_init(sc);
    332 	gtmpsc_rxdesc_init(sc);
    333 
    334 	sc->sc_tty = tp = ttymalloc();
    335 	tp->t_oproc = gtmpscstart;
    336 	tp->t_param = gtmpscparam;
    337 	tty_attach(tp);
    338 
    339 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_HIGH);
    340 
    341 	/*
    342 	 * clear any pending SDMA interrupts for this unit
    343 	 */
    344 	(void) gt_sdma_icause(device_parent(sc->sc_dev),
    345 	    SDMA_INTR_RXBUF(sc->sc_unit) |
    346 	    SDMA_INTR_RXERR(sc->sc_unit) |
    347 	    SDMA_INTR_TXBUF(sc->sc_unit) |
    348 	    SDMA_INTR_TXEND(sc->sc_unit));
    349 
    350 	sc->sc_si = softint_establish(SOFTINT_SERIAL, gtmpsc_softintr, sc);
    351 	if (sc->sc_si == NULL)
    352 		panic("mpscattach: cannot softint_establish IPL_SOFTSERIAL");
    353 
    354 	shutdownhook_establish(gtmpsc_shutdownhook, sc);
    355 
    356 	gtmpscinit_stop(sc);
    357 	gtmpscinit_start(sc);
    358 
    359 	if (sc->sc_flags & GTMPSC_CONSOLE) {
    360 		int maj;
    361 
    362 		/* locate the major number */
    363 		maj = cdevsw_lookup_major(&gtmpsc_cdevsw);
    364 
    365 		tp->t_dev = cn_tab->cn_dev =
    366 		    makedev(maj, device_unit(sc->sc_dev));
    367 
    368 		aprint_normal_dev(self, "console\n");
    369 	}
    370 
    371 #ifdef KGDB
    372 	/*
    373 	 * Allow kgdb to "take over" this port.  If this is
    374 	 * the kgdb device, it has exclusive use.
    375 	 */
    376 	if (sc->sc_unit == gtmpsckgdbport) {
    377 #ifdef MPSC_CONSOLE
    378 		if (sc->sc_unit == MPSC_CONSOLE) {
    379 			aprint_error_dev(self,
    380 			    "(kgdb): cannot share with console\n");
    381 			return;
    382 		}
    383 #endif
    384 
    385 		sc->sc_flags |= GTMPSC_KGDB;
    386 		aprint_normal_dev(self, "kgdb\n");
    387 
    388 		gtmpsc_txflush(sc);
    389 
    390 		kgdb_attach(gtmpsc_kgdb_getc, gtmpsc_kgdb_putc, NULL);
    391 		kgdb_dev = 123;	/* unneeded, only to satisfy some tests */
    392 		gtmpsc_kgdb_attached = 1;
    393 		kgdb_connect(1);
    394 	}
    395 #endif /* KGDB */
    396 
    397 	return;
    398 
    399 
    400 fail5:
    401 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxdma_map);
    402 fail4:
    403 	bus_dmamap_unload(sc->sc_dmat, sc->sc_txdma_map);
    404 fail3:
    405 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_txdma_map);
    406 fail2:
    407 	bus_dmamem_unmap(sc->sc_dmat, kva, PAGE_SIZE);
    408 fail1:
    409 	bus_dmamem_free(sc->sc_dmat, &segs, 1);
    410 fail0:
    411 	return;
    412 }
    413 
    414 /* ARGSUSED */
    415 int
    416 gtmpsc_intr(void *arg)
    417 {
    418 	struct gt_softc *gt = (struct gt_softc *)arg;
    419 	struct gtmpsc_softc *sc;
    420 	uint32_t icause;
    421 	int i;
    422 
    423 	icause = gt_sdma_icause(gt->sc_dev, sdma_imask);
    424 
    425 	for (i = 0; i < GTMPSC_NCHAN; i++) {
    426 		sc = device_lookup_private(&gtmpsc_cd, i);
    427 		if (sc == NULL)
    428 			continue;
    429 		mutex_spin_enter(&sc->sc_lock);
    430 		if (icause & SDMA_INTR_RXBUF(sc->sc_unit)) {
    431 			gtmpsc_intr_rx(sc);
    432 			icause &= ~SDMA_INTR_RXBUF(sc->sc_unit);
    433 		}
    434 		if (icause & SDMA_INTR_TXBUF(sc->sc_unit)) {
    435 			gtmpsc_intr_tx(sc);
    436 			icause &= ~SDMA_INTR_TXBUF(sc->sc_unit);
    437 		}
    438 		mutex_spin_exit(&sc->sc_lock);
    439 	}
    440 
    441 	return 1;
    442 }
    443 
    444 STATIC void
    445 gtmpsc_softintr(void *arg)
    446 {
    447 	struct gtmpsc_softc *sc = arg;
    448 	struct tty *tp = sc->sc_tty;
    449 	gtmpsc_pollrx_t *vrxp;
    450 	int code;
    451 	u_int cc;
    452 	u_char *get, *end, lsr;
    453 	int (*rint)(int, struct tty *) = tp->t_linesw->l_rint;
    454 
    455 	if (sc->sc_rx_ready) {
    456 		sc->sc_rx_ready = 0;
    457 
    458 		cc = sc->sc_rcvcnt;
    459 
    460 		/* If not yet open, drop the entire buffer content here */
    461 		if (!ISSET(tp->t_state, TS_ISOPEN))
    462 			cc = 0;
    463 
    464 		vrxp = &sc->sc_poll_sdmapage->rx[sc->sc_rcvrx];
    465 		end = vrxp->rxbuf + vrxp->rxdesc.sdma_cnt;
    466 		get = vrxp->rxbuf + sc->sc_roffset;
    467 		while (cc > 0) {
    468 			code = *get;
    469 			lsr = vrxp->rxdesc.sdma_csr;
    470 
    471 			if (ISSET(lsr,
    472 			    SDMA_CSR_RX_PE |
    473 			    SDMA_CSR_RX_FR |
    474 			    SDMA_CSR_RX_OR |
    475 			    SDMA_CSR_RX_BR)) {
    476 				if (ISSET(lsr, SDMA_CSR_RX_OR))
    477 					;	/* XXXXX not yet... */
    478 				if (ISSET(lsr, SDMA_CSR_RX_BR | SDMA_CSR_RX_FR))
    479 					SET(code, TTY_FE);
    480 				if (ISSET(lsr, SDMA_CSR_RX_PE))
    481 					SET(code, TTY_PE);
    482 			}
    483 
    484 			if ((*rint)(code, tp) == -1) {
    485 				/*
    486 				 * The line discipline's buffer is out of space.
    487 				 */
    488 				/* XXXXX not yet... */
    489 			}
    490 			if (++get >= end) {
    491 				/* cleanup this descriptor, and return to DMA */
    492 				CLEANUP_AND_RETURN_RXDMA(sc, sc->sc_rcvrx);
    493 				sc->sc_rcvrx =
    494 				    (sc->sc_rcvrx + 1) % GTMPSC_NTXDESC;
    495 				vrxp = &sc->sc_poll_sdmapage->rx[sc->sc_rcvrx];
    496 				end = vrxp->rxbuf + vrxp->rxdesc.sdma_cnt;
    497 				get = vrxp->rxbuf + sc->sc_roffset;
    498 			}
    499 			cc--;
    500 		}
    501 	}
    502 	if (sc->sc_tx_done) {
    503 		sc->sc_tx_done = 0;
    504 		CLR(tp->t_state, TS_BUSY);
    505 		if (ISSET(tp->t_state, TS_FLUSH))
    506 		    CLR(tp->t_state, TS_FLUSH);
    507 		else
    508 		    ndflush(&tp->t_outq, (int)(sc->sc_tba - tp->t_outq.c_cf));
    509 		(*tp->t_linesw->l_start)(tp);
    510 	}
    511 }
    512 
    513 int
    514 gtmpscopen(dev_t dev, int flag, int mode, struct lwp *l)
    515 {
    516 	struct gtmpsc_softc *sc;
    517 	int unit = GTMPSCUNIT(dev);
    518 	struct tty *tp;
    519 	int s;
    520 	int error;
    521 
    522 	sc = device_lookup_private(&gtmpsc_cd, unit);
    523 	if (!sc)
    524 		return ENXIO;
    525 #ifdef KGDB
    526 	/*
    527 	 * If this is the kgdb port, no other use is permitted.
    528 	 */
    529 	if (sc->sc_flags & GTMPSC_KGDB)
    530 		return EBUSY;
    531 #endif
    532 	tp = sc->sc_tty;
    533 	if (kauth_authorize_device_tty(l->l_cred, KAUTH_DEVICE_TTY_OPEN, tp))
    534 		return EBUSY;
    535 
    536 	s = spltty();
    537 
    538 	if (!ISSET(tp->t_state, TS_ISOPEN) && tp->t_wopen == 0) {
    539 		struct termios t;
    540 
    541 		tp->t_dev = dev;
    542 
    543 		mutex_spin_enter(&sc->sc_lock);
    544 
    545 		/* Turn on interrupts. */
    546 		sdma_imask |= SDMA_INTR_RXBUF(sc->sc_unit);
    547 		gt_sdma_imask(device_parent(sc->sc_dev), sdma_imask);
    548 
    549 		/* Clear PPS capture state on first open. */
    550 		mutex_spin_enter(&timecounter_lock);
    551 		memset(&sc->sc_pps_state, 0, sizeof(sc->sc_pps_state));
    552 		sc->sc_pps_state.ppscap = PPS_CAPTUREASSERT | PPS_CAPTURECLEAR;
    553 		pps_init(&sc->sc_pps_state);
    554 		mutex_spin_exit(&timecounter_lock);
    555 
    556 		mutex_spin_exit(&sc->sc_lock);
    557 
    558 		if (sc->sc_flags & GTMPSC_CONSOLE) {
    559 			t.c_ospeed = sc->sc_baudrate;
    560 			t.c_cflag = sc->sc_cflag;
    561 		} else {
    562 			t.c_ospeed = TTYDEF_SPEED;
    563 			t.c_cflag = TTYDEF_CFLAG;
    564 		}
    565 		t.c_ispeed = t.c_ospeed;
    566 
    567 		/* Make sure gtmpscparam() will do something. */
    568 		tp->t_ospeed = 0;
    569 		(void) gtmpscparam(tp, &t);
    570 		tp->t_iflag = TTYDEF_IFLAG;
    571 		tp->t_oflag = TTYDEF_OFLAG;
    572 		tp->t_lflag = TTYDEF_LFLAG;
    573 		ttychars(tp);
    574 		ttsetwater(tp);
    575 
    576 		mutex_spin_enter(&sc->sc_lock);
    577 
    578 		/* Clear the input/output ring */
    579 		sc->sc_rcvcnt = 0;
    580 		sc->sc_roffset = 0;
    581 		sc->sc_rcvrx = 0;
    582 		sc->sc_rcvdrx = 0;
    583 		sc->sc_nexttx = 0;
    584 		sc->sc_lasttx = 0;
    585 
    586 		/*
    587 		 * enable SDMA receive
    588 		 */
    589 		GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_ERD);
    590 
    591 		mutex_spin_exit(&sc->sc_lock);
    592 	}
    593 	splx(s);
    594 	error = ttyopen(tp, GTMPSCDIALOUT(dev), ISSET(flag, O_NONBLOCK));
    595 	if (error)
    596 		goto bad;
    597 
    598 	error = (*tp->t_linesw->l_open)(dev, tp);
    599 	if (error)
    600 		goto bad;
    601 
    602 	return 0;
    603 
    604 bad:
    605 	if (!ISSET(tp->t_state, TS_ISOPEN) && tp->t_wopen == 0) {
    606 		/*
    607 		 * We failed to open the device, and nobody else had it opened.
    608 		 * Clean up the state as appropriate.
    609 		 */
    610 		gtmpscshutdown(sc);
    611 	}
    612 
    613 	return error;
    614 }
    615 
    616 int
    617 gtmpscclose(dev_t dev, int flag, int mode, struct lwp *l)
    618 {
    619 	int unit = GTMPSCUNIT(dev);
    620 	struct gtmpsc_softc *sc = device_lookup_private(&gtmpsc_cd, unit);
    621 	struct tty *tp = sc->sc_tty;
    622 
    623 	if (!ISSET(tp->t_state, TS_ISOPEN))
    624 		return 0;
    625 
    626 	(*tp->t_linesw->l_close)(tp, flag);
    627 	ttyclose(tp);
    628 
    629 	if (!ISSET(tp->t_state, TS_ISOPEN) && tp->t_wopen == 0) {
    630 		/*
    631 		 * Although we got a last close, the device may still be in
    632 		 * use; e.g. if this was the dialout node, and there are still
    633 		 * processes waiting for carrier on the non-dialout node.
    634 		 */
    635 		gtmpscshutdown(sc);
    636 	}
    637 
    638 	return 0;
    639 }
    640 
    641 int
    642 gtmpscread(dev_t dev, struct uio *uio, int flag)
    643 {
    644 	struct gtmpsc_softc *sc =
    645 	    device_lookup_private(&gtmpsc_cd, GTMPSCUNIT(dev));
    646 	struct tty *tp = sc->sc_tty;
    647 
    648 	return (*tp->t_linesw->l_read)(tp, uio, flag);
    649 }
    650 
    651 int
    652 gtmpscwrite(dev_t dev, struct uio *uio, int flag)
    653 {
    654 	struct gtmpsc_softc *sc =
    655 	    device_lookup_private(&gtmpsc_cd, GTMPSCUNIT(dev));
    656 	struct tty *tp = sc->sc_tty;
    657 
    658 	return (*tp->t_linesw->l_write)(tp, uio, flag);
    659 }
    660 
    661 int
    662 gtmpscioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
    663 {
    664 	struct gtmpsc_softc *sc =
    665 	    device_lookup_private(&gtmpsc_cd, GTMPSCUNIT(dev));
    666 	struct tty *tp = sc->sc_tty;
    667 	int error;
    668 
    669 	error = (*tp->t_linesw->l_ioctl)(tp, cmd, data, flag, l);
    670 	if (error != EPASSTHROUGH)
    671 		return error;
    672 
    673 	error = ttioctl(tp, cmd, data, flag, l);
    674 	if (error != EPASSTHROUGH)
    675 		return error;
    676 
    677 	error = 0;
    678 	switch (cmd) {
    679 	case TIOCSFLAGS:
    680 		error = kauth_authorize_device_tty(l->l_cred,
    681 		    KAUTH_DEVICE_TTY_PRIVSET, tp);
    682 		if (error)
    683 			return error;
    684 		break;
    685 	default:
    686 		/* nothing */
    687 		break;
    688 	}
    689 
    690 	mutex_spin_enter(&sc->sc_lock);
    691 
    692 	switch (cmd) {
    693 	case PPS_IOC_CREATE:
    694 	case PPS_IOC_DESTROY:
    695 	case PPS_IOC_GETPARAMS:
    696 	case PPS_IOC_SETPARAMS:
    697 	case PPS_IOC_GETCAP:
    698 	case PPS_IOC_FETCH:
    699 #ifdef PPS_SYNC
    700 	case PPS_IOC_KCBIND:
    701 #endif
    702 		mutex_spin_enter(&timecounter_lock);
    703 		error = pps_ioctl(cmd, data, &sc->sc_pps_state);
    704 		mutex_spin_exit(&timecounter_lock);
    705 		break;
    706 
    707 	case TIOCDCDTIMESTAMP:	/* XXX old, overloaded  API used by xntpd v3 */
    708 		mutex_spin_enter(&timecounter_lock);
    709 #ifndef PPS_TRAILING_EDGE
    710 		TIMESPEC_TO_TIMEVAL((struct timeval *)data,
    711 		    &sc->sc_pps_state.ppsinfo.assert_timestamp);
    712 #else
    713 		TIMESPEC_TO_TIMEVAL((struct timeval *)data,
    714 		    &sc->sc_pps_state.ppsinfo.clear_timestamp);
    715 #endif
    716 		mutex_spin_exit(&timecounter_lock);
    717 		break;
    718 
    719 	default:
    720 		error = EPASSTHROUGH;
    721 		break;
    722 	}
    723 
    724 	mutex_spin_exit(&sc->sc_lock);
    725 
    726 	return error;
    727 }
    728 
    729 void
    730 gtmpscstop(struct tty *tp, int flag)
    731 {
    732 }
    733 
    734 struct tty *
    735 gtmpsctty(dev_t dev)
    736 {
    737 	struct gtmpsc_softc *sc =
    738 	    device_lookup_private(&gtmpsc_cd, GTMPSCUNIT(dev));
    739 
    740 	return sc->sc_tty;
    741 }
    742 
    743 int
    744 gtmpscpoll(dev_t dev, int events, struct lwp *l)
    745 {
    746 	struct gtmpsc_softc *sc =
    747 	    device_lookup_private(&gtmpsc_cd, GTMPSCUNIT(dev));
    748 	struct tty *tp = sc->sc_tty;
    749 
    750 	return (*tp->t_linesw->l_poll)(tp, events, l);
    751 }
    752 
    753 
    754 STATIC void
    755 gtmpscstart(struct tty *tp)
    756 {
    757 	struct gtmpsc_softc *sc;
    758 	unsigned char *tba;
    759 	unsigned int unit;
    760 	int s, tbc;
    761 
    762 	unit = GTMPSCUNIT(tp->t_dev);
    763 	sc = device_lookup_private(&gtmpsc_cd, unit);
    764 	if (sc == NULL)
    765 		return;
    766 
    767 	s = spltty();
    768 	if (ISSET(tp->t_state, TS_TIMEOUT | TS_BUSY | TS_TTSTOP))
    769 		goto out;
    770 	if (sc->sc_tx_stopped)
    771 		goto out;
    772 	if (!ttypull(tp))
    773 		goto out;
    774 
    775 	/* Grab the first contiguous region of buffer space. */
    776 	tba = tp->t_outq.c_cf;
    777 	tbc = ndqb(&tp->t_outq, 0);
    778 
    779 	mutex_spin_enter(&sc->sc_lock);
    780 
    781 	sc->sc_tba = tba;
    782 	sc->sc_tbc = tbc;
    783 
    784 	sdma_imask |= SDMA_INTR_TXBUF(sc->sc_unit);
    785 	gt_sdma_imask(device_parent(sc->sc_dev), sdma_imask);
    786 	SET(tp->t_state, TS_BUSY);
    787 	sc->sc_tx_busy = 1;
    788 	gtmpsc_write(sc);
    789 
    790 	mutex_spin_exit(&sc->sc_lock);
    791 out:
    792 	splx(s);
    793 }
    794 
    795 STATIC int
    796 gtmpscparam(struct tty *tp, struct termios *t)
    797 {
    798 	struct gtmpsc_softc *sc =
    799 	    device_lookup_private(&gtmpsc_cd, GTMPSCUNIT(tp->t_dev));
    800 
    801 	/* Check requested parameters. */
    802 	if (compute_cdv(t->c_ospeed) < 0)
    803 		return EINVAL;
    804 	if (t->c_ispeed && t->c_ispeed != t->c_ospeed)
    805 		return EINVAL;
    806 
    807 	/*
    808 	 * If there were no changes, don't do anything.  This avoids dropping
    809 	 * input and improves performance when all we did was frob things like
    810 	 * VMIN and VTIME.
    811 	 */
    812 	if (tp->t_ospeed == t->c_ospeed &&
    813 	    tp->t_cflag == t->c_cflag)
    814 		return 0;
    815 
    816 	mutex_spin_enter(&sc->sc_lock);
    817 
    818 	/* And copy to tty. */
    819 	tp->t_ispeed = 0;
    820 	tp->t_ospeed = t->c_ospeed;
    821 	tp->t_cflag = t->c_cflag;
    822 
    823 	sc->sc_baudrate = t->c_ospeed;
    824 
    825 	if (!sc->sc_heldchange) {
    826 		if (sc->sc_tx_busy) {
    827 			sc->sc_heldtbc = sc->sc_tbc;
    828 			sc->sc_tbc = 0;
    829 			sc->sc_heldchange = 1;
    830 		} else
    831 			gtmpsc_loadchannelregs(sc);
    832 	}
    833 
    834 	mutex_spin_exit(&sc->sc_lock);
    835 
    836 	/* Fake carrier on */
    837 	(void) (*tp->t_linesw->l_modem)(tp, 1);
    838 
    839 	return 0;
    840 }
    841 
    842 void
    843 gtmpsc_shutdownhook(void *arg)
    844 {
    845 	gtmpsc_softc_t *sc = (gtmpsc_softc_t *)arg;
    846 
    847 	gtmpsc_txflush(sc);
    848 }
    849 
    850 /*
    851  * Convert to MPCR from cflag(CS[5678] and CSTOPB).
    852  */
    853 STATIC uint32_t
    854 cflag2mpcr(tcflag_t cflag)
    855 {
    856 	uint32_t mpcr = 0;
    857 
    858 	switch (ISSET(cflag, CSIZE)) {
    859 	case CS5:
    860 		SET(mpcr, GTMPSC_MPCR_CL_5);
    861 		break;
    862 	case CS6:
    863 		SET(mpcr, GTMPSC_MPCR_CL_6);
    864 		break;
    865 	case CS7:
    866 		SET(mpcr, GTMPSC_MPCR_CL_7);
    867 		break;
    868 	case CS8:
    869 		SET(mpcr, GTMPSC_MPCR_CL_8);
    870 		break;
    871 	}
    872 	if (ISSET(cflag, CSTOPB))
    873 		SET(mpcr, GTMPSC_MPCR_SBL_2);
    874 
    875 	return mpcr;
    876 }
    877 
    878 STATIC void
    879 gtmpsc_intr_rx(struct gtmpsc_softc *sc)
    880 {
    881 	gtmpsc_pollrx_t *vrxp;
    882 	uint32_t csr;
    883 	int kick, ix;
    884 
    885 	kick = 0;
    886 
    887 	/* already handled in gtmpsc_common_getc() */
    888 	if (sc->sc_rcvdrx == sc->sc_rcvrx)
    889 		return;
    890 
    891 	ix = sc->sc_rcvdrx;
    892 	vrxp = &sc->sc_poll_sdmapage->rx[ix];
    893 	bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
    894 	    ix * sizeof(gtmpsc_pollrx_t),
    895 	    sizeof(sdma_desc_t),
    896 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
    897 	csr = vrxp->rxdesc.sdma_csr;
    898 	while (!(csr & SDMA_CSR_RX_OWN)) {
    899 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
    900 		    ix * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t),
    901 		    sizeof(vrxp->rxbuf),
    902 		    BUS_DMASYNC_POSTREAD);
    903 		vrxp->rxdesc.sdma_cnt &= SDMA_RX_CNT_BCNT_MASK;
    904 		if (vrxp->rxdesc.sdma_csr & SDMA_CSR_RX_BR) {
    905 			int cn_trapped = 0;
    906 
    907 			cn_check_magic(sc->sc_tty->t_dev,
    908 			    CNC_BREAK, gtmpsc_cnm_state);
    909 			if (cn_trapped)
    910 				continue;
    911 #if defined(KGDB) && !defined(DDB)
    912 			if (ISSET(sc->sc_flags, GTMPSC_KGDB)) {
    913 				kgdb_connect(1);
    914 				continue;
    915 			}
    916 #endif
    917 		}
    918 
    919 		sc->sc_rcvcnt += vrxp->rxdesc.sdma_cnt;
    920 		kick = 1;
    921 
    922 		ix = (ix + 1) % GTMPSC_NTXDESC;
    923 		vrxp = &sc->sc_poll_sdmapage->rx[ix];
    924 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
    925 		    ix * sizeof(gtmpsc_pollrx_t),
    926 		    sizeof(sdma_desc_t),
    927 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
    928 		csr = vrxp->rxdesc.sdma_csr;
    929 	}
    930 	bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
    931 	    ix * sizeof(gtmpsc_pollrx_t),
    932 	    sizeof(sdma_desc_t),
    933 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    934 
    935 	if (kick) {
    936 		sc->sc_rcvdrx = ix;
    937 		sc->sc_rx_ready = 1;
    938 		softint_schedule(sc->sc_si);
    939 	}
    940 }
    941 
    942 STATIC __inline void
    943 gtmpsc_intr_tx(struct gtmpsc_softc *sc)
    944 {
    945 	gtmpsc_polltx_t *vtxp;
    946 	uint32_t csr;
    947 	int ix;
    948 
    949 	/*
    950 	 * If we've delayed a parameter change, do it now,
    951 	 * and restart output.
    952 	 */
    953 	if (sc->sc_heldchange) {
    954 		gtmpsc_loadchannelregs(sc);
    955 		sc->sc_heldchange = 0;
    956 		sc->sc_tbc = sc->sc_heldtbc;
    957 		sc->sc_heldtbc = 0;
    958 	}
    959 
    960 	/* Clean-up TX descriptors and buffers */
    961 	ix = sc->sc_lasttx;
    962 	while (ix != sc->sc_nexttx) {
    963 		vtxp = &sc->sc_poll_sdmapage->tx[ix];
    964 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
    965 		    ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
    966 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
    967 		csr = vtxp->txdesc.sdma_csr;
    968 		if (csr & SDMA_CSR_TX_OWN) {
    969 			bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
    970 			    ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
    971 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    972 			break;
    973 		}
    974 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
    975 		    ix * sizeof(gtmpsc_polltx_t) + sizeof(sdma_desc_t),
    976 		    sizeof(vtxp->txbuf), BUS_DMASYNC_POSTWRITE);
    977 		ix = (ix + 1) % GTMPSC_NTXDESC;
    978 	}
    979 	sc->sc_lasttx = ix;
    980 
    981 	/* Output the next chunk of the contiguous buffer */
    982 	gtmpsc_write(sc);
    983 	if (sc->sc_tbc == 0 && sc->sc_tx_busy) {
    984 		sc->sc_tx_busy = 0;
    985 		sc->sc_tx_done = 1;
    986 		softint_schedule(sc->sc_si);
    987 		sdma_imask &= ~SDMA_INTR_TXBUF(sc->sc_unit);
    988 		gt_sdma_imask(device_parent(sc->sc_dev), sdma_imask);
    989 	}
    990 }
    991 
    992 /*
    993  * gtmpsc_write - write a buffer into the hardware
    994  */
    995 STATIC void
    996 gtmpsc_write(struct gtmpsc_softc *sc)
    997 {
    998 	gtmpsc_polltx_t *vtxp;
    999 	uint32_t sdcm, ix;
   1000 	int kick, n;
   1001 
   1002 	kick = 0;
   1003 	while (sc->sc_tbc > 0 && sc->sc_nexttx != sc->sc_lasttx) {
   1004 		n = min(sc->sc_tbc, GTMPSC_TXBUFSZ);
   1005 
   1006 		ix = sc->sc_nexttx;
   1007 		sc->sc_nexttx = (ix + 1) % GTMPSC_NTXDESC;
   1008 
   1009 		vtxp = &sc->sc_poll_sdmapage->tx[ix];
   1010 
   1011 		memcpy(vtxp->txbuf, sc->sc_tba, n);
   1012 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
   1013 		    ix * sizeof(gtmpsc_polltx_t) + sizeof(sdma_desc_t),
   1014 		    sizeof(vtxp->txbuf), BUS_DMASYNC_PREWRITE);
   1015 
   1016 		vtxp->txdesc.sdma_cnt = (n << SDMA_TX_CNT_BCNT_SHIFT) | n;
   1017 		vtxp->txdesc.sdma_csr =
   1018 		    SDMA_CSR_TX_L	|
   1019 		    SDMA_CSR_TX_F	|
   1020 		    SDMA_CSR_TX_EI	|
   1021 		    SDMA_CSR_TX_OWN;
   1022 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
   1023 		    ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
   1024 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1025 
   1026 		sc->sc_tbc -= n;
   1027 		sc->sc_tba += n;
   1028 		kick = 1;
   1029 	}
   1030 	if (kick) {
   1031 		/*
   1032 		 * now kick some SDMA
   1033 		 */
   1034 		sdcm = GT_SDMA_READ(sc, SDMA_SDCM);
   1035 		if ((sdcm & SDMA_SDCM_TXD) == 0)
   1036 			GT_SDMA_WRITE(sc, SDMA_SDCM, sdcm | SDMA_SDCM_TXD);
   1037 	}
   1038 }
   1039 
   1040 /*
   1041  * gtmpsc_txflush - wait for output to drain
   1042  */
   1043 STATIC void
   1044 gtmpsc_txflush(gtmpsc_softc_t *sc)
   1045 {
   1046 	gtmpsc_polltx_t *vtxp;
   1047 	int ix, limit = 4000000;	/* 4 seconds */
   1048 
   1049 	ix = sc->sc_nexttx - 1;
   1050 	if (ix < 0)
   1051 		ix = GTMPSC_NTXDESC - 1;
   1052 
   1053 	vtxp = &sc->sc_poll_sdmapage->tx[ix];
   1054 	while (limit > 0) {
   1055 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
   1056 		    ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
   1057 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1058 		if ((vtxp->txdesc.sdma_csr & SDMA_CSR_TX_OWN) == 0)
   1059 			break;
   1060 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
   1061 		    ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
   1062 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1063 		DELAY(1);
   1064 		limit -= 1;
   1065 	}
   1066 }
   1067 
   1068 /*
   1069  * gtmpsc_rxdesc_init - set up RX descriptor ring
   1070  */
   1071 STATIC void
   1072 gtmpsc_rxdesc_init(struct gtmpsc_softc *sc)
   1073 {
   1074 	gtmpsc_pollrx_t *vrxp, *prxp, *first_prxp;
   1075 	sdma_desc_t *dp;
   1076 	int i;
   1077 
   1078 	first_prxp = prxp =
   1079 	    (gtmpsc_pollrx_t *)sc->sc_rxdma_map->dm_segs->ds_addr;
   1080 	vrxp = sc->sc_poll_sdmapage->rx;
   1081 	for (i = 0; i < GTMPSC_NRXDESC; i++) {
   1082 		dp = &vrxp->rxdesc;
   1083 		dp->sdma_csr =
   1084 		    SDMA_CSR_RX_L|SDMA_CSR_RX_F|SDMA_CSR_RX_OWN|SDMA_CSR_RX_EI;
   1085 		dp->sdma_cnt = GTMPSC_RXBUFSZ << SDMA_RX_CNT_BUFSZ_SHIFT;
   1086 		dp->sdma_bufp = (uint32_t)&prxp->rxbuf;
   1087 		vrxp++;
   1088 		prxp++;
   1089 		dp->sdma_next = (uint32_t)&prxp->rxdesc;
   1090 
   1091 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
   1092 		    i * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t),
   1093 		    sizeof(vrxp->rxbuf), BUS_DMASYNC_PREREAD);
   1094 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
   1095 		    i * sizeof(gtmpsc_pollrx_t), sizeof(sdma_desc_t),
   1096 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1097 	}
   1098 	dp = &vrxp->rxdesc;
   1099 	dp->sdma_csr =
   1100 	    SDMA_CSR_RX_L | SDMA_CSR_RX_F | SDMA_CSR_RX_OWN | SDMA_CSR_RX_EI;
   1101 	dp->sdma_cnt = GTMPSC_RXBUFSZ << SDMA_RX_CNT_BUFSZ_SHIFT;
   1102 	dp->sdma_bufp = (uint32_t)&prxp->rxbuf;
   1103 	dp->sdma_next = (uint32_t)&first_prxp->rxdesc;
   1104 
   1105 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
   1106 	    i * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t),
   1107 	    sizeof(vrxp->rxbuf), BUS_DMASYNC_PREREAD);
   1108 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
   1109 	    i * sizeof(gtmpsc_pollrx_t), sizeof(sdma_desc_t),
   1110 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1111 
   1112 	sc->sc_rcvcnt = 0;
   1113 	sc->sc_roffset = 0;
   1114 	sc->sc_rcvrx = 0;
   1115 	sc->sc_rcvdrx = 0;
   1116 }
   1117 
   1118 /*
   1119  * gtmpsc_txdesc_init - set up TX descriptor ring
   1120  */
   1121 STATIC void
   1122 gtmpsc_txdesc_init(struct gtmpsc_softc *sc)
   1123 {
   1124 	gtmpsc_polltx_t *vtxp, *ptxp, *first_ptxp;
   1125 	sdma_desc_t *dp;
   1126 	int i;
   1127 
   1128 	first_ptxp = ptxp =
   1129 	    (gtmpsc_polltx_t *)sc->sc_txdma_map->dm_segs->ds_addr;
   1130 	vtxp = sc->sc_poll_sdmapage->tx;
   1131 	for (i = 0; i < GTMPSC_NTXDESC; i++) {
   1132 		dp = &vtxp->txdesc;
   1133 		dp->sdma_csr = 0;
   1134 		dp->sdma_cnt = 0;
   1135 		dp->sdma_bufp = (uint32_t)&ptxp->txbuf;
   1136 		vtxp++;
   1137 		ptxp++;
   1138 		dp->sdma_next = (uint32_t)&ptxp->txdesc;
   1139 	}
   1140 	dp = &vtxp->txdesc;
   1141 	dp->sdma_csr = 0;
   1142 	dp->sdma_cnt = 0;
   1143 	dp->sdma_bufp = (uint32_t)&ptxp->txbuf;
   1144 	dp->sdma_next = (uint32_t)&first_ptxp->txdesc;
   1145 
   1146 	sc->sc_nexttx = 0;
   1147 	sc->sc_lasttx = 0;
   1148 }
   1149 
   1150 STATIC void
   1151 gtmpscinit_stop(struct gtmpsc_softc *sc)
   1152 {
   1153 	uint32_t csr;
   1154 	int timo = 10000;	/* XXXX */
   1155 
   1156 	/* Abort MPSC Rx (aborting Tx messes things up) */
   1157 	GT_MPSC_WRITE(sc, GTMPSC_CHRN(2), GTMPSC_CHR2_RXABORT);
   1158 
   1159 	/* abort SDMA RX and stop TX for MPSC unit */
   1160 	GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_AR | SDMA_SDCM_STD);
   1161 
   1162 	/* poll for SDMA RX abort completion */
   1163 	for (; timo > 0; timo--) {
   1164 		csr = GT_SDMA_READ(sc, SDMA_SDCM);
   1165 		if (!(csr & (SDMA_SDCM_AR | SDMA_SDCM_AT)))
   1166 			break;
   1167 		DELAY(50);
   1168 	}
   1169 }
   1170 
   1171 STATIC void
   1172 gtmpscinit_start(struct gtmpsc_softc *sc)
   1173 {
   1174 
   1175 	/*
   1176 	 * Set pointers of current/first descriptor of TX to SDMA register.
   1177 	 */
   1178 	GT_SDMA_WRITE(sc, SDMA_SCTDP, sc->sc_txdma_map->dm_segs->ds_addr);
   1179 	GT_SDMA_WRITE(sc, SDMA_SFTDP, sc->sc_txdma_map->dm_segs->ds_addr);
   1180 
   1181 	/*
   1182 	 * Set pointer of current descriptor of TX to SDMA register.
   1183 	 */
   1184 	GT_SDMA_WRITE(sc, SDMA_SCRDP, sc->sc_rxdma_map->dm_segs->ds_addr);
   1185 
   1186 	/*
   1187 	 * initialize SDMA unit Configuration Register
   1188 	 */
   1189 	GT_SDMA_WRITE(sc, SDMA_SDC,
   1190 	    SDMA_SDC_BSZ_8x64 | SDMA_SDC_SFM|SDMA_SDC_RFT);
   1191 
   1192 	gtmpsc_loadchannelregs(sc);
   1193 
   1194 	/*
   1195 	 * set MPSC LO and HI port config registers for GTMPSC unit
   1196  	 */
   1197 	GT_MPSC_WRITE(sc, GTMPSC_MMCR_LO,
   1198 	    GTMPSC_MMCR_LO_MODE_UART	|
   1199 	    GTMPSC_MMCR_LO_ET		|
   1200 	    GTMPSC_MMCR_LO_ER		|
   1201 	    GTMPSC_MMCR_LO_NLM);
   1202 	GT_MPSC_WRITE(sc, GTMPSC_MMCR_HI,
   1203 	    GTMPSC_MMCR_HI_TCDV_DEFAULT	|
   1204 	    GTMPSC_MMCR_HI_RDW		|
   1205 	    GTMPSC_MMCR_HI_RCDV_DEFAULT);
   1206 
   1207 	/*
   1208 	 * tell MPSC receive the Enter Hunt
   1209 	 */
   1210 	GT_MPSC_WRITE(sc, GTMPSC_CHRN(2), GTMPSC_CHR2_EH);
   1211 }
   1212 
   1213 STATIC void
   1214 gtmpscshutdown(struct gtmpsc_softc *sc)
   1215 {
   1216 	struct tty *tp;
   1217 
   1218 #ifdef KGDB
   1219 	if (sc->sc_flags & GTMPSCF_KGDB != 0)
   1220 		return;
   1221 #endif
   1222 	tp = sc->sc_tty;
   1223 	mutex_spin_enter(&sc->sc_lock);
   1224 	/* Fake carrier off */
   1225 	(void) (*tp->t_linesw->l_modem)(tp, 0);
   1226 	sdma_imask &= ~SDMA_INTR_RXBUF(sc->sc_unit);
   1227 	gt_sdma_imask(device_parent(sc->sc_dev), sdma_imask);
   1228 	mutex_spin_exit(&sc->sc_lock);
   1229 }
   1230 
   1231 STATIC void
   1232 gtmpsc_loadchannelregs(struct gtmpsc_softc *sc)
   1233 {
   1234 
   1235 	if (sc->sc_dev != NULL)
   1236 		gt_brg_bcr(device_parent(sc->sc_dev), sc->sc_brg,
   1237 	    	    GT_MPSC_CLOCK_SOURCE | compute_cdv(sc->sc_baudrate));
   1238 	GT_MPSC_WRITE(sc, GTMPSC_CHRN(3), GTMPSC_MAXIDLE(sc->sc_baudrate));
   1239 
   1240 	/*
   1241 	 * set MPSC Protocol configuration register for GTMPSC unit
   1242 	 */
   1243 	GT_MPSC_WRITE(sc, GTMPSC_MPCR, cflag2mpcr(sc->sc_cflag));
   1244 }
   1245 
   1246 
   1247 #ifdef MPSC_CONSOLE
   1248 /*
   1249  * Following are all routines needed for MPSC to act as console
   1250  */
   1251 STATIC int
   1252 gtmpsccngetc(dev_t dev)
   1253 {
   1254 
   1255 	return gtmpsc_common_getc(&gtmpsc_cn_softc);
   1256 }
   1257 
   1258 STATIC void
   1259 gtmpsccnputc(dev_t dev, int c)
   1260 {
   1261 
   1262 	gtmpsc_common_putc(&gtmpsc_cn_softc, c);
   1263 }
   1264 
   1265 STATIC void
   1266 gtmpsccnpollc(dev_t dev, int on)
   1267 {
   1268 }
   1269 
   1270 STATIC void
   1271 gtmpsccnhalt(dev_t dev)
   1272 {
   1273 	gtmpsc_softc_t *sc = &gtmpsc_cn_softc;
   1274 	uint32_t csr;
   1275 
   1276 	/*
   1277 	 * flush TX buffers
   1278 	 */
   1279 	gtmpsc_txflush(sc);
   1280 
   1281 	/*
   1282 	 * stop MPSC unit RX
   1283 	 */
   1284 	csr = GT_MPSC_READ(sc, GTMPSC_CHRN(2));
   1285 	csr &= ~GTMPSC_CHR2_EH;
   1286 	csr |= GTMPSC_CHR2_RXABORT;
   1287 	GT_MPSC_WRITE(sc, GTMPSC_CHRN(2), csr);
   1288 
   1289 	DELAY(GTMPSC_RESET_DELAY);
   1290 
   1291 	/*
   1292 	 * abort SDMA RX for MPSC unit
   1293 	 */
   1294 	GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_AR);
   1295 }
   1296 
   1297 int
   1298 gtmpsccnattach(bus_space_tag_t iot, bus_dma_tag_t dmat, bus_addr_t base,
   1299 	       int unit, int brg, int speed, tcflag_t tcflag)
   1300 {
   1301 	struct gtmpsc_softc *sc = &gtmpsc_cn_softc;
   1302 	int i, res;
   1303 	const unsigned char cp[] = "\r\nMPSC Lives!\r\n";
   1304 
   1305 	res = gtmpsc_hackinit(sc, iot, dmat, base, unit, brg, speed, tcflag);
   1306 	if (res != 0)
   1307 		return res;
   1308 
   1309 	gtmpscinit_stop(sc);
   1310 	gtmpscinit_start(sc);
   1311 
   1312 	/*
   1313 	 * enable SDMA receive
   1314 	 */
   1315 	GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_ERD);
   1316 
   1317 	for (i = 0; i < sizeof(cp); i++) {
   1318 		if (*(cp + i) == 0)
   1319 			break;
   1320 		gtmpsc_common_putc(sc, *(cp + i));
   1321 	}
   1322 
   1323 	cn_tab = &gtmpsc_consdev;
   1324 	cn_init_magic(&gtmpsc_cnm_state);
   1325 
   1326 	return 0;
   1327 }
   1328 
   1329 /*
   1330  * gtmpsc_hackinit - hacks required to supprt GTMPSC console
   1331  */
   1332 STATIC int
   1333 gtmpsc_hackinit(struct gtmpsc_softc *sc, bus_space_tag_t iot,
   1334 		bus_dma_tag_t dmat, bus_addr_t base, int unit, int brg,
   1335 		int baudrate, tcflag_t tcflag)
   1336 {
   1337 	gtmpsc_poll_sdma_t *cn_dmapage =
   1338 	    (gtmpsc_poll_sdma_t *)gtmpsc_cn_dmapage;
   1339 	int error;
   1340 
   1341 	DPRINTF(("hackinit\n"));
   1342 
   1343 	memset(sc, 0, sizeof(struct gtmpsc_softc));
   1344 	error = bus_space_map(iot, base + GTMPSC_BASE(unit), GTMPSC_SIZE, 0,
   1345 	    &sc->sc_mpsch);
   1346 	if (error != 0)
   1347 		goto fail0;
   1348 
   1349 	error = bus_space_map(iot, base + GTSDMA_BASE(unit), GTSDMA_SIZE, 0,
   1350 	    &sc->sc_sdmah);
   1351 	if (error != 0)
   1352 		goto fail1;
   1353 	error = bus_dmamap_create(dmat, sizeof(gtmpsc_polltx_t), 1,
   1354 	   sizeof(gtmpsc_polltx_t), 0, BUS_DMA_NOWAIT, &sc->sc_txdma_map);
   1355 	if (error != 0)
   1356 		goto fail2;
   1357 	error = bus_dmamap_load(dmat, sc->sc_txdma_map, cn_dmapage->tx,
   1358 	    sizeof(gtmpsc_polltx_t), NULL,
   1359 	    BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
   1360 	if (error != 0)
   1361 		goto fail3;
   1362 	error = bus_dmamap_create(dmat, sizeof(gtmpsc_pollrx_t), 1,
   1363 	   sizeof(gtmpsc_pollrx_t), 0, BUS_DMA_NOWAIT,
   1364 	   &sc->sc_rxdma_map);
   1365 	if (error != 0)
   1366 		goto fail4;
   1367 	error = bus_dmamap_load(dmat, sc->sc_rxdma_map, cn_dmapage->rx,
   1368 	    sizeof(gtmpsc_pollrx_t), NULL,
   1369 	    BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
   1370 	if (error != 0)
   1371 		goto fail5;
   1372 
   1373 	sc->sc_iot = iot;
   1374 	sc->sc_dmat = dmat;
   1375 	sc->sc_poll_sdmapage = cn_dmapage;
   1376 	sc->sc_brg = brg;
   1377 	sc->sc_baudrate = baudrate;
   1378 	sc->sc_cflag = tcflag;
   1379 
   1380 	gtmpsc_txdesc_init(sc);
   1381 	gtmpsc_rxdesc_init(sc);
   1382 
   1383 	return 0;
   1384 
   1385 fail5:
   1386 	bus_dmamap_destroy(dmat, sc->sc_rxdma_map);
   1387 fail4:
   1388 	bus_dmamap_unload(dmat, sc->sc_txdma_map);
   1389 fail3:
   1390 	bus_dmamap_destroy(dmat, sc->sc_txdma_map);
   1391 fail2:
   1392 	bus_space_unmap(iot, sc->sc_sdmah, GTSDMA_SIZE);
   1393 fail1:
   1394 	bus_space_unmap(iot, sc->sc_mpsch, GTMPSC_SIZE);
   1395 fail0:
   1396 	return error;
   1397 }
   1398 #endif	/* MPSC_CONSOLE */
   1399 
   1400 #ifdef KGDB
   1401 STATIC int
   1402 gtmpsc_kgdb_getc(void *arg)
   1403 {
   1404 	struct gtmpsc_softc *sc = (struct gtmpsc_softc *)arg;
   1405 
   1406 	return gtmpsc_common_getc(sc);
   1407 }
   1408 
   1409 STATIC void
   1410 gtmpsc_kgdb_putc(void *arg, int c)
   1411 {
   1412 	struct gtmpsc_softc *sc = (struct gtmpsc_softc *)arg;
   1413 
   1414 	return gtmpsc_common_putc(sc, c);
   1415 }
   1416 #endif /* KGDB */
   1417 
   1418 #if defined(MPSC_CONSOLE) || defined(KGDB)
   1419 /*
   1420  * gtmpsc_common_getc - polled console read
   1421  *
   1422  *	We copy data from the DMA buffers into a buffer in the softc
   1423  *	to reduce descriptor ownership turnaround time
   1424  *	MPSC can crater if it wraps descriptor rings,
   1425  *	which is asynchronous and throttled only by line speed.
   1426  */
   1427 STATIC int
   1428 gtmpsc_common_getc(struct gtmpsc_softc *sc)
   1429 {
   1430 	gtmpsc_pollrx_t *vrxp;
   1431 	uint32_t csr;
   1432 	int ix, ch, wdog_interval = 0;
   1433 
   1434 	if (!cold)
   1435 		mutex_spin_enter(&sc->sc_lock);
   1436 
   1437 	ix = sc->sc_rcvdrx;
   1438 	vrxp = &sc->sc_poll_sdmapage->rx[ix];
   1439 	while (sc->sc_rcvcnt == 0) {
   1440 		/* Wait receive */
   1441 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
   1442 		    ix * sizeof(gtmpsc_pollrx_t),
   1443 		    sizeof(sdma_desc_t),
   1444 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1445 		csr = vrxp->rxdesc.sdma_csr;
   1446 		if (csr & SDMA_CSR_RX_OWN) {
   1447 			GT_MPSC_WRITE(sc, GTMPSC_CHRN(2),
   1448 			    GTMPSC_CHR2_EH | GTMPSC_CHR2_CRD);
   1449 			if (wdog_interval++ % 32)
   1450 				gt_watchdog_service();
   1451 			bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
   1452 			    ix * sizeof(gtmpsc_pollrx_t),
   1453 			    sizeof(sdma_desc_t),
   1454 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1455 			DELAY(50);
   1456 			continue;
   1457 		}
   1458 		if (csr & SDMA_CSR_RX_ES)
   1459 			aprint_error_dev(sc->sc_dev,
   1460 			    "RX error, rxdesc csr 0x%x\n", csr);
   1461 
   1462 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
   1463 		    ix * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t),
   1464 		    sizeof(vrxp->rxbuf),
   1465 		    BUS_DMASYNC_POSTREAD);
   1466 
   1467 		vrxp->rxdesc.sdma_cnt &= SDMA_RX_CNT_BCNT_MASK;
   1468 		sc->sc_rcvcnt = vrxp->rxdesc.sdma_cnt;
   1469 		sc->sc_roffset = 0;
   1470 		sc->sc_rcvdrx = (ix + 1) % GTMPSC_NRXDESC;
   1471 
   1472 		if (sc->sc_rcvcnt == 0) {
   1473 			/* cleanup this descriptor, and return to DMA */
   1474 			CLEANUP_AND_RETURN_RXDMA(sc, sc->sc_rcvrx);
   1475 			sc->sc_rcvrx = sc->sc_rcvdrx;
   1476 		}
   1477 
   1478 		ix = sc->sc_rcvdrx;
   1479 		vrxp = &sc->sc_poll_sdmapage->rx[ix];
   1480 	}
   1481 	ch = vrxp->rxbuf[sc->sc_roffset++];
   1482 	sc->sc_rcvcnt--;
   1483 
   1484 	if (sc->sc_roffset == vrxp->rxdesc.sdma_cnt) {
   1485 		/* cleanup this descriptor, and return to DMA */
   1486 		CLEANUP_AND_RETURN_RXDMA(sc, ix);
   1487 		sc->sc_rcvrx = (ix + 1) % GTMPSC_NRXDESC;
   1488 	}
   1489 
   1490 	gt_watchdog_service();
   1491 
   1492 	if (!cold)
   1493 		mutex_spin_exit(&sc->sc_lock);
   1494 	return ch;
   1495 }
   1496 
   1497 STATIC void
   1498 gtmpsc_common_putc(struct gtmpsc_softc *sc, int c)
   1499 {
   1500 	gtmpsc_polltx_t *vtxp;
   1501 	int ix;
   1502 	const int nc = 1;
   1503 
   1504 	/* Get a DMA descriptor */
   1505 	if (!cold)
   1506 		mutex_spin_enter(&sc->sc_lock);
   1507 	ix = sc->sc_nexttx;
   1508 	sc->sc_nexttx = (ix + 1) % GTMPSC_NTXDESC;
   1509 	if (sc->sc_nexttx == sc->sc_lasttx) {
   1510 		gtmpsc_common_putc_wait_complete(sc, sc->sc_lasttx);
   1511 		sc->sc_lasttx = (sc->sc_lasttx + 1) % GTMPSC_NTXDESC;
   1512 	}
   1513 	if (!cold)
   1514 		mutex_spin_exit(&sc->sc_lock);
   1515 
   1516 	vtxp = &sc->sc_poll_sdmapage->tx[ix];
   1517 	vtxp->txbuf[0] = c;
   1518 	bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
   1519 	    ix * sizeof(gtmpsc_polltx_t) + sizeof(sdma_desc_t),
   1520 	    sizeof(vtxp->txbuf),
   1521 	    BUS_DMASYNC_PREWRITE);
   1522 
   1523 	vtxp->txdesc.sdma_cnt = (nc << SDMA_TX_CNT_BCNT_SHIFT) | nc;
   1524 	vtxp->txdesc.sdma_csr = SDMA_CSR_TX_L | SDMA_CSR_TX_F | SDMA_CSR_TX_OWN;
   1525 	bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
   1526 	    ix * sizeof(gtmpsc_polltx_t),
   1527 	    sizeof(sdma_desc_t),
   1528 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1529 
   1530 	if (!cold)
   1531 		mutex_spin_enter(&sc->sc_lock);
   1532 	/*
   1533 	 * now kick some SDMA
   1534 	 */
   1535 	GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_TXD);
   1536 
   1537 	while (sc->sc_lasttx != sc->sc_nexttx) {
   1538 		gtmpsc_common_putc_wait_complete(sc, sc->sc_lasttx);
   1539 		sc->sc_lasttx = (sc->sc_lasttx + 1) % GTMPSC_NTXDESC;
   1540 	}
   1541 	if (!cold)
   1542 		mutex_spin_exit(&sc->sc_lock);
   1543 }
   1544 
   1545 /*
   1546  * gtmpsc_common_putc - polled console putc
   1547  */
   1548 STATIC void
   1549 gtmpsc_common_putc_wait_complete(struct gtmpsc_softc *sc, int ix)
   1550 {
   1551 	gtmpsc_polltx_t *vtxp = &sc->sc_poll_sdmapage->tx[ix];
   1552 	uint32_t csr;
   1553 	int wdog_interval = 0;
   1554 
   1555 	bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
   1556 	    ix * sizeof(gtmpsc_polltx_t),
   1557 	    sizeof(sdma_desc_t),
   1558 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1559 	csr = vtxp->txdesc.sdma_csr;
   1560 	while (csr & SDMA_CSR_TX_OWN) {
   1561 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
   1562 		    ix * sizeof(gtmpsc_polltx_t),
   1563 		    sizeof(sdma_desc_t),
   1564 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1565 		DELAY(40);
   1566 		if (wdog_interval++ % 32)
   1567 			gt_watchdog_service();
   1568 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
   1569 		    ix * sizeof(gtmpsc_polltx_t),
   1570 		    sizeof(sdma_desc_t),
   1571 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1572 		csr = vtxp->txdesc.sdma_csr;
   1573 	}
   1574 	if (csr & SDMA_CSR_TX_ES)
   1575 		aprint_error_dev(sc->sc_dev,
   1576 		    "TX error, txdesc(%d) csr 0x%x\n", ix, csr);
   1577 	bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
   1578 	    ix * sizeof(gtmpsc_polltx_t) + sizeof(sdma_desc_t),
   1579 	    sizeof(vtxp->txbuf),
   1580 	    BUS_DMASYNC_POSTWRITE);
   1581 }
   1582 #endif	/* defined(MPSC_CONSOLE) || defined(KGDB) */
   1583