gtmpsc.c revision 1.37.4.2 1 1.37.4.1 rmind /* $NetBSD: gtmpsc.c,v 1.37.4.2 2011/03/05 20:53:26 rmind Exp $ */
2 1.1 matt /*
3 1.37.4.1 rmind * Copyright (c) 2009 KIYOHARA Takashi
4 1.1 matt * All rights reserved.
5 1.1 matt *
6 1.1 matt * Redistribution and use in source and binary forms, with or without
7 1.1 matt * modification, are permitted provided that the following conditions
8 1.1 matt * are met:
9 1.1 matt * 1. Redistributions of source code must retain the above copyright
10 1.1 matt * notice, this list of conditions and the following disclaimer.
11 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
12 1.1 matt * notice, this list of conditions and the following disclaimer in the
13 1.1 matt * documentation and/or other materials provided with the distribution.
14 1.1 matt *
15 1.37.4.1 rmind * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 1.37.4.1 rmind * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 1.37.4.1 rmind * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 1.37.4.1 rmind * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19 1.37.4.1 rmind * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 1.37.4.1 rmind * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 1.37.4.1 rmind * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 1.37.4.1 rmind * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23 1.37.4.1 rmind * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 1.37.4.1 rmind * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 1.1 matt * POSSIBILITY OF SUCH DAMAGE.
26 1.1 matt */
27 1.1 matt /*
28 1.37.4.1 rmind * mpsc.c - Multi-Protocol Serial Controller driver, supports UART mode only
29 1.1 matt */
30 1.9 lukem
31 1.9 lukem #include <sys/cdefs.h>
32 1.37.4.2 rmind __KERNEL_RCSID(0, "$NetBSD: gtmpsc.c,v 1.37.4.2 2011/03/05 20:53:26 rmind Exp $");
33 1.1 matt
34 1.1 matt #include "opt_kgdb.h"
35 1.1 matt
36 1.1 matt #include <sys/param.h>
37 1.37.4.1 rmind #include <sys/bus.h>
38 1.1 matt #include <sys/conf.h>
39 1.1 matt #include <sys/device.h>
40 1.37.4.1 rmind #include <sys/fcntl.h>
41 1.37.4.1 rmind #include <sys/intr.h>
42 1.18 he #include <sys/kauth.h>
43 1.37.4.1 rmind #include <sys/kernel.h>
44 1.37.4.1 rmind #include <sys/mutex.h>
45 1.1 matt #include <sys/proc.h>
46 1.1 matt #include <sys/systm.h>
47 1.37.4.1 rmind #include <sys/timepps.h>
48 1.1 matt #include <sys/tty.h>
49 1.1 matt #ifdef KGDB
50 1.1 matt #include <sys/kgdb.h>
51 1.1 matt #endif
52 1.6 thorpej
53 1.1 matt #include <dev/cons.h>
54 1.1 matt
55 1.1 matt #include <dev/marvell/gtreg.h>
56 1.1 matt #include <dev/marvell/gtvar.h>
57 1.37.4.1 rmind #include <dev/marvell/gtbrgreg.h>
58 1.37.4.1 rmind #include <dev/marvell/gtbrgvar.h>
59 1.1 matt #include <dev/marvell/gtsdmareg.h>
60 1.37.4.1 rmind #include <dev/marvell/gtsdmavar.h>
61 1.37.4.1 rmind #include <dev/marvell/gtmpscreg.h>
62 1.1 matt #include <dev/marvell/gtmpscvar.h>
63 1.37.4.1 rmind #include <dev/marvell/marvellreg.h>
64 1.37.4.1 rmind #include <dev/marvell/marvellvar.h>
65 1.37.4.1 rmind
66 1.37.4.1 rmind #include "gtmpsc.h"
67 1.37.4.1 rmind #include "ioconf.h"
68 1.37.4.1 rmind #include "locators.h"
69 1.1 matt
70 1.5 matt /*
71 1.5 matt * Wait 2 characters time for RESET_DELAY
72 1.5 matt */
73 1.5 matt #define GTMPSC_RESET_DELAY (2*8*1000000 / GT_MPSC_DEFAULT_BAUD_RATE)
74 1.1 matt
75 1.1 matt
76 1.1 matt #if defined(DEBUG)
77 1.5 matt unsigned int gtmpsc_debug = 0;
78 1.1 matt # define STATIC
79 1.37.4.1 rmind # define DPRINTF(x) do { if (gtmpsc_debug) printf x ; } while (0)
80 1.1 matt #else
81 1.1 matt # define STATIC static
82 1.1 matt # define DPRINTF(x)
83 1.1 matt #endif
84 1.1 matt
85 1.1 matt #define GTMPSCUNIT_MASK 0x7ffff
86 1.1 matt #define GTMPSCDIALOUT_MASK 0x80000
87 1.1 matt
88 1.1 matt #define GTMPSCUNIT(x) (minor(x) & GTMPSCUNIT_MASK)
89 1.1 matt #define GTMPSCDIALOUT(x) (minor(x) & GTMPSCDIALOUT_MASK)
90 1.1 matt
91 1.37.4.1 rmind #define CLEANUP_AND_RETURN_RXDMA(sc, ix) \
92 1.37.4.1 rmind do { \
93 1.37.4.1 rmind gtmpsc_pollrx_t *_vrxp = &(sc)->sc_poll_sdmapage->rx[(ix)]; \
94 1.37.4.1 rmind \
95 1.37.4.1 rmind _vrxp->rxdesc.sdma_csr = \
96 1.37.4.1 rmind SDMA_CSR_RX_L | \
97 1.37.4.1 rmind SDMA_CSR_RX_F | \
98 1.37.4.1 rmind SDMA_CSR_RX_OWN | \
99 1.37.4.1 rmind SDMA_CSR_RX_EI; \
100 1.37.4.1 rmind _vrxp->rxdesc.sdma_cnt = \
101 1.37.4.1 rmind GTMPSC_RXBUFSZ << SDMA_RX_CNT_BUFSZ_SHIFT; \
102 1.37.4.1 rmind bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_rxdma_map, \
103 1.37.4.1 rmind (ix) * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t), \
104 1.37.4.1 rmind sizeof(vrxp->rxbuf), \
105 1.37.4.1 rmind BUS_DMASYNC_PREREAD); \
106 1.37.4.1 rmind bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_rxdma_map, \
107 1.37.4.1 rmind (ix) * sizeof(gtmpsc_pollrx_t), \
108 1.37.4.1 rmind sizeof(sdma_desc_t), \
109 1.37.4.1 rmind BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); \
110 1.37.4.1 rmind } while (0);
111 1.37.4.1 rmind
112 1.37.4.1 rmind
113 1.37 cegger STATIC int gtmpscmatch(device_t, cfdata_t, void *);
114 1.37 cegger STATIC void gtmpscattach(device_t, device_t, void *);
115 1.37.4.1 rmind
116 1.37.4.1 rmind STATIC void gtmpsc_softintr(void *);
117 1.37.4.1 rmind
118 1.2 matt STATIC void gtmpscstart(struct tty *);
119 1.2 matt STATIC int gtmpscparam(struct tty *, struct termios *);
120 1.1 matt
121 1.37.4.1 rmind STATIC void gtmpsc_shutdownhook(void *);
122 1.1 matt
123 1.37.4.1 rmind STATIC uint32_t cflag2mpcr(tcflag_t);
124 1.37.4.1 rmind STATIC __inline void gtmpsc_intr_rx(struct gtmpsc_softc *);
125 1.37.4.1 rmind STATIC __inline void gtmpsc_intr_tx(struct gtmpsc_softc *);
126 1.37.4.1 rmind STATIC void gtmpsc_write(struct gtmpsc_softc *);
127 1.37.4.1 rmind STATIC void gtmpsc_txflush(gtmpsc_softc_t *);
128 1.37.4.1 rmind STATIC void gtmpsc_rxdesc_init(struct gtmpsc_softc *);
129 1.37.4.1 rmind STATIC void gtmpsc_txdesc_init(struct gtmpsc_softc *);
130 1.37.4.1 rmind STATIC void gtmpscinit_stop(struct gtmpsc_softc *);
131 1.37.4.1 rmind STATIC void gtmpscinit_start(struct gtmpsc_softc *);
132 1.37.4.1 rmind STATIC void gtmpscshutdown(struct gtmpsc_softc *);
133 1.37.4.1 rmind STATIC void gtmpsc_loadchannelregs(struct gtmpsc_softc *);
134 1.1 matt
135 1.37.4.1 rmind #ifdef MPSC_CONSOLE
136 1.37.4.1 rmind STATIC int gtmpsccngetc(dev_t);
137 1.37.4.1 rmind STATIC void gtmpsccnputc(dev_t, int);
138 1.37.4.1 rmind STATIC void gtmpsccnpollc(dev_t, int);
139 1.37.4.1 rmind STATIC void gtmpsccnhalt(dev_t);
140 1.2 matt
141 1.37.4.1 rmind STATIC int gtmpsc_hackinit(struct gtmpsc_softc *, bus_space_tag_t,
142 1.37.4.1 rmind bus_dma_tag_t, bus_addr_t, int, int, int, tcflag_t);
143 1.37.4.1 rmind #endif
144 1.37.4.1 rmind
145 1.37.4.1 rmind #if defined(MPSC_CONSOLE) || defined(KGDB)
146 1.37.4.1 rmind STATIC int gtmpsc_common_getc(struct gtmpsc_softc *);
147 1.37.4.1 rmind STATIC void gtmpsc_common_putc(struct gtmpsc_softc *, int);
148 1.37.4.1 rmind STATIC void gtmpsc_common_putc_wait_complete(struct gtmpsc_softc *, int);
149 1.37.4.1 rmind #endif
150 1.1 matt
151 1.1 matt dev_type_open(gtmpscopen);
152 1.1 matt dev_type_close(gtmpscclose);
153 1.1 matt dev_type_read(gtmpscread);
154 1.1 matt dev_type_write(gtmpscwrite);
155 1.1 matt dev_type_ioctl(gtmpscioctl);
156 1.1 matt dev_type_stop(gtmpscstop);
157 1.1 matt dev_type_tty(gtmpsctty);
158 1.1 matt dev_type_poll(gtmpscpoll);
159 1.1 matt
160 1.1 matt const struct cdevsw gtmpsc_cdevsw = {
161 1.1 matt gtmpscopen, gtmpscclose, gtmpscread, gtmpscwrite, gtmpscioctl,
162 1.1 matt gtmpscstop, gtmpsctty, gtmpscpoll, nommap, ttykqfilter, D_TTY
163 1.1 matt };
164 1.1 matt
165 1.37.4.1 rmind CFATTACH_DECL_NEW(gtmpsc, sizeof(struct gtmpsc_softc),
166 1.1 matt gtmpscmatch, gtmpscattach, NULL, NULL);
167 1.1 matt
168 1.1 matt
169 1.37.4.1 rmind STATIC uint32_t sdma_imask; /* soft copy of SDMA IMASK reg */
170 1.37.4.1 rmind STATIC struct cnm_state gtmpsc_cnm_state;
171 1.1 matt
172 1.1 matt #ifdef KGDB
173 1.1 matt static int gtmpsc_kgdb_addr;
174 1.1 matt static int gtmpsc_kgdb_attached;
175 1.1 matt
176 1.2 matt STATIC int gtmpsc_kgdb_getc(void *);
177 1.2 matt STATIC void gtmpsc_kgdb_putc(void *, int);
178 1.1 matt #endif /* KGDB */
179 1.1 matt
180 1.37.4.1 rmind #ifdef MPSC_CONSOLE
181 1.1 matt /*
182 1.1 matt * hacks for console initialization
183 1.1 matt * which happens prior to autoconfig "attach"
184 1.6 thorpej *
185 1.6 thorpej * XXX Assumes PAGE_SIZE is a constant!
186 1.1 matt */
187 1.37.4.1 rmind gtmpsc_softc_t gtmpsc_cn_softc;
188 1.37.4.1 rmind STATIC unsigned char gtmpsc_cn_dmapage[PAGE_SIZE] __aligned(PAGE_SIZE);
189 1.1 matt
190 1.1 matt
191 1.37.4.1 rmind static struct consdev gtmpsc_consdev = {
192 1.37.4.1 rmind NULL, NULL, gtmpsccngetc, gtmpsccnputc, gtmpsccnpollc,
193 1.37.4.1 rmind NULL, gtmpsccnhalt, NULL, NODEV, CN_NORMAL
194 1.37.4.1 rmind };
195 1.37.4.1 rmind #endif
196 1.1 matt
197 1.1 matt
198 1.37.4.1 rmind #define GT_MPSC_READ(sc, o) \
199 1.37.4.1 rmind bus_space_read_4((sc)->sc_iot, (sc)->sc_mpsch, (o))
200 1.37.4.1 rmind #define GT_MPSC_WRITE(sc, o, v) \
201 1.37.4.1 rmind bus_space_write_4((sc)->sc_iot, (sc)->sc_mpsch, (o), (v))
202 1.37.4.1 rmind #define GT_SDMA_READ(sc, o) \
203 1.37.4.1 rmind bus_space_read_4((sc)->sc_iot, (sc)->sc_sdmah, (o))
204 1.37.4.1 rmind #define GT_SDMA_WRITE(sc, o, v) \
205 1.37.4.1 rmind bus_space_write_4((sc)->sc_iot, (sc)->sc_sdmah, (o), (v))
206 1.1 matt
207 1.1 matt
208 1.37.4.1 rmind /* ARGSUSED */
209 1.1 matt STATIC int
210 1.37.4.1 rmind gtmpscmatch(device_t parent, cfdata_t match, void *aux)
211 1.1 matt {
212 1.37.4.1 rmind struct marvell_attach_args *mva = aux;
213 1.1 matt
214 1.37.4.1 rmind if (strcmp(mva->mva_name, match->cf_name) != 0)
215 1.1 matt return 0;
216 1.37.4.2 rmind if (mva->mva_offset == MVA_OFFSET_DEFAULT)
217 1.37.4.1 rmind return 0;
218 1.1 matt
219 1.37.4.1 rmind mva->mva_size = GTMPSC_SIZE;
220 1.37.4.1 rmind return 1;
221 1.1 matt }
222 1.1 matt
223 1.37.4.1 rmind /* ARGSUSED */
224 1.1 matt STATIC void
225 1.37 cegger gtmpscattach(device_t parent, device_t self, void *aux)
226 1.1 matt {
227 1.16 thorpej struct gtmpsc_softc *sc = device_private(self);
228 1.37.4.1 rmind struct marvell_attach_args *mva = aux;
229 1.37.4.1 rmind bus_dma_segment_t segs;
230 1.1 matt struct tty *tp;
231 1.37.4.1 rmind int rsegs, err, unit;
232 1.23 christos void *kva;
233 1.1 matt
234 1.37.4.1 rmind aprint_naive("\n");
235 1.37.4.1 rmind aprint_normal(": Multi-Protocol Serial Controller\n");
236 1.3 matt
237 1.37.4.2 rmind if (mva->mva_unit != MVA_UNIT_DEFAULT)
238 1.37.4.1 rmind unit = mva->mva_unit;
239 1.37.4.1 rmind else
240 1.37.4.1 rmind unit = (mva->mva_offset == GTMPSC_BASE(0)) ? 0 : 1;
241 1.1 matt
242 1.37.4.1 rmind #ifdef MPSC_CONSOLE
243 1.37.4.1 rmind if (cn_tab == >mpsc_consdev &&
244 1.37.4.1 rmind cn_tab->cn_dev == makedev(0, unit)) {
245 1.37.4.1 rmind gtmpsc_cn_softc.sc_dev = self;
246 1.37.4.1 rmind memcpy(sc, >mpsc_cn_softc, sizeof(struct gtmpsc_softc));
247 1.37.4.1 rmind sc->sc_flags = GTMPSC_CONSOLE;
248 1.37.4.1 rmind } else
249 1.1 matt #endif
250 1.37.4.1 rmind {
251 1.37.4.1 rmind if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
252 1.37.4.1 rmind mva->mva_offset, mva->mva_size, &sc->sc_mpsch)) {
253 1.37.4.1 rmind aprint_error_dev(self, "Cannot map MPSC registers\n");
254 1.37.4.1 rmind return;
255 1.37.4.1 rmind }
256 1.37.4.1 rmind if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
257 1.37.4.1 rmind GTSDMA_BASE(unit), GTSDMA_SIZE, &sc->sc_sdmah)) {
258 1.37.4.1 rmind aprint_error_dev(self, "Cannot map SDMA registers\n");
259 1.37.4.1 rmind return;
260 1.37.4.1 rmind }
261 1.37.4.1 rmind sc->sc_dev = self;
262 1.37.4.1 rmind sc->sc_unit = unit;
263 1.37.4.1 rmind sc->sc_iot = mva->mva_iot;
264 1.37.4.1 rmind sc->sc_dmat = mva->mva_dmat;
265 1.37.4.1 rmind
266 1.37.4.1 rmind err = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
267 1.37.4.1 rmind &segs, 1, &rsegs, BUS_DMA_NOWAIT);
268 1.37.4.1 rmind if (err) {
269 1.37.4.1 rmind aprint_error_dev(sc->sc_dev,
270 1.37.4.1 rmind "bus_dmamem_alloc error 0x%x\n", err);
271 1.37.4.1 rmind goto fail0;
272 1.37.4.1 rmind }
273 1.37.4.1 rmind err = bus_dmamem_map(sc->sc_dmat, &segs, 1, PAGE_SIZE, &kva,
274 1.37.4.1 rmind BUS_DMA_NOWAIT);
275 1.37.4.1 rmind if (err) {
276 1.37.4.1 rmind aprint_error_dev(sc->sc_dev,
277 1.37.4.1 rmind "bus_dmamem_map error 0x%x\n", err);
278 1.37.4.1 rmind goto fail1;
279 1.37.4.1 rmind }
280 1.37.4.1 rmind memset(kva, 0, PAGE_SIZE); /* paranoid/superfluous */
281 1.37.4.1 rmind sc->sc_poll_sdmapage = kva;
282 1.1 matt
283 1.37.4.1 rmind err = bus_dmamap_create(sc->sc_dmat, sizeof(gtmpsc_polltx_t), 1,
284 1.37.4.1 rmind sizeof(gtmpsc_polltx_t), 0, BUS_DMA_NOWAIT,
285 1.37.4.1 rmind &sc->sc_txdma_map);
286 1.37.4.1 rmind if (err != 0) {
287 1.37.4.1 rmind aprint_error_dev(sc->sc_dev,
288 1.37.4.1 rmind "bus_dmamap_create error 0x%x\n", err);
289 1.37.4.1 rmind goto fail2;
290 1.37.4.1 rmind }
291 1.37.4.1 rmind err = bus_dmamap_load(sc->sc_dmat, sc->sc_txdma_map,
292 1.37.4.1 rmind sc->sc_poll_sdmapage->tx, sizeof(gtmpsc_polltx_t),
293 1.37.4.1 rmind NULL, BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
294 1.37.4.1 rmind if (err != 0) {
295 1.37.4.1 rmind aprint_error_dev(sc->sc_dev,
296 1.37.4.1 rmind "bus_dmamap_load tx error 0x%x\n", err);
297 1.37.4.1 rmind goto fail3;
298 1.37.4.1 rmind }
299 1.37.4.1 rmind err = bus_dmamap_create(sc->sc_dmat, sizeof(gtmpsc_pollrx_t), 1,
300 1.37.4.1 rmind sizeof(gtmpsc_pollrx_t), 0, BUS_DMA_NOWAIT,
301 1.37.4.1 rmind &sc->sc_rxdma_map);
302 1.37.4.1 rmind if (err != 0) {
303 1.37.4.1 rmind aprint_error_dev(sc->sc_dev,
304 1.37.4.1 rmind "bus_dmamap_create rx error 0x%x\n", err);
305 1.37.4.1 rmind goto fail4;
306 1.37.4.1 rmind }
307 1.37.4.1 rmind err = bus_dmamap_load(sc->sc_dmat, sc->sc_rxdma_map,
308 1.37.4.1 rmind sc->sc_poll_sdmapage->rx, sizeof(gtmpsc_pollrx_t),
309 1.37.4.1 rmind NULL, BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
310 1.37.4.1 rmind if (err != 0) {
311 1.37.4.1 rmind aprint_error_dev(sc->sc_dev,
312 1.37.4.1 rmind "bus_dmamap_load rx error 0x%x\n", err);
313 1.37.4.1 rmind goto fail5;
314 1.37.4.1 rmind }
315 1.4 matt
316 1.37.4.1 rmind sc->sc_brg = unit; /* XXXXX */
317 1.37.4.1 rmind sc->sc_baudrate = GT_MPSC_DEFAULT_BAUD_RATE;
318 1.4 matt }
319 1.37.4.1 rmind aprint_normal_dev(self, "with SDMA offset 0x%04x-0x%04x\n",
320 1.37.4.1 rmind GTSDMA_BASE(unit), GTSDMA_BASE(unit) + GTSDMA_SIZE - 1);
321 1.1 matt
322 1.37.4.1 rmind sc->sc_rx_ready = 0;
323 1.37.4.1 rmind sc->sc_tx_busy = 0;
324 1.37.4.1 rmind sc->sc_tx_done = 0;
325 1.37.4.1 rmind sc->sc_tx_stopped = 0;
326 1.37.4.1 rmind sc->sc_heldchange = 0;
327 1.1 matt
328 1.37.4.1 rmind gtmpsc_txdesc_init(sc);
329 1.37.4.1 rmind gtmpsc_rxdesc_init(sc);
330 1.1 matt
331 1.37.4.1 rmind sc->sc_tty = tp = ttymalloc();
332 1.1 matt tp->t_oproc = gtmpscstart;
333 1.1 matt tp->t_param = gtmpscparam;
334 1.1 matt tty_attach(tp);
335 1.1 matt
336 1.37.4.1 rmind mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_HIGH);
337 1.37.4.1 rmind
338 1.37.4.1 rmind /*
339 1.37.4.1 rmind * clear any pending SDMA interrupts for this unit
340 1.37.4.1 rmind */
341 1.37.4.1 rmind (void) gt_sdma_icause(device_parent(sc->sc_dev),
342 1.37.4.1 rmind SDMA_INTR_RXBUF(sc->sc_unit) |
343 1.37.4.1 rmind SDMA_INTR_RXERR(sc->sc_unit) |
344 1.37.4.1 rmind SDMA_INTR_TXBUF(sc->sc_unit) |
345 1.37.4.1 rmind SDMA_INTR_TXEND(sc->sc_unit));
346 1.1 matt
347 1.24 ad sc->sc_si = softint_establish(SOFTINT_SERIAL, gtmpsc_softintr, sc);
348 1.3 matt if (sc->sc_si == NULL)
349 1.24 ad panic("mpscattach: cannot softint_establish IPL_SOFTSERIAL");
350 1.1 matt
351 1.1 matt shutdownhook_establish(gtmpsc_shutdownhook, sc);
352 1.1 matt
353 1.37.4.1 rmind gtmpscinit_stop(sc);
354 1.37.4.1 rmind gtmpscinit_start(sc);
355 1.5 matt
356 1.37.4.1 rmind if (sc->sc_flags & GTMPSC_CONSOLE) {
357 1.37.4.1 rmind int maj;
358 1.37.4.1 rmind
359 1.37.4.1 rmind /* locate the major number */
360 1.37.4.1 rmind maj = cdevsw_lookup_major(>mpsc_cdevsw);
361 1.37.4.1 rmind
362 1.37.4.1 rmind tp->t_dev = cn_tab->cn_dev =
363 1.37.4.1 rmind makedev(maj, device_unit(sc->sc_dev));
364 1.37.4.1 rmind
365 1.37.4.1 rmind aprint_normal_dev(self, "console\n");
366 1.37.4.1 rmind }
367 1.1 matt
368 1.1 matt #ifdef KGDB
369 1.1 matt /*
370 1.1 matt * Allow kgdb to "take over" this port. If this is
371 1.1 matt * the kgdb device, it has exclusive use.
372 1.1 matt */
373 1.37.4.1 rmind if (sc->sc_unit == gtmpsckgdbport) {
374 1.37.4.1 rmind #ifdef MPSC_CONSOLE
375 1.37.4.1 rmind if (sc->sc_unit == MPSC_CONSOLE) {
376 1.37.4.1 rmind aprint_error_dev(self,
377 1.37.4.1 rmind "(kgdb): cannot share with console\n");
378 1.1 matt return;
379 1.1 matt }
380 1.37.4.1 rmind #endif
381 1.37.4.1 rmind
382 1.37.4.1 rmind sc->sc_flags |= GTMPSC_KGDB;
383 1.37.4.1 rmind aprint_normal_dev(self, "kgdb\n");
384 1.37.4.1 rmind
385 1.37.4.1 rmind gtmpsc_txflush(sc);
386 1.1 matt
387 1.1 matt kgdb_attach(gtmpsc_kgdb_getc, gtmpsc_kgdb_putc, NULL);
388 1.37.4.1 rmind kgdb_dev = 123; /* unneeded, only to satisfy some tests */
389 1.1 matt gtmpsc_kgdb_attached = 1;
390 1.1 matt kgdb_connect(1);
391 1.1 matt }
392 1.1 matt #endif /* KGDB */
393 1.37.4.1 rmind
394 1.37.4.1 rmind return;
395 1.37.4.1 rmind
396 1.37.4.1 rmind
397 1.37.4.1 rmind fail5:
398 1.37.4.1 rmind bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxdma_map);
399 1.37.4.1 rmind fail4:
400 1.37.4.1 rmind bus_dmamap_unload(sc->sc_dmat, sc->sc_txdma_map);
401 1.37.4.1 rmind fail3:
402 1.37.4.1 rmind bus_dmamap_destroy(sc->sc_dmat, sc->sc_txdma_map);
403 1.37.4.1 rmind fail2:
404 1.37.4.1 rmind bus_dmamem_unmap(sc->sc_dmat, kva, PAGE_SIZE);
405 1.37.4.1 rmind fail1:
406 1.37.4.1 rmind bus_dmamem_free(sc->sc_dmat, &segs, 1);
407 1.37.4.1 rmind fail0:
408 1.37.4.1 rmind return;
409 1.37.4.1 rmind }
410 1.37.4.1 rmind
411 1.37.4.1 rmind /* ARGSUSED */
412 1.37.4.1 rmind int
413 1.37.4.1 rmind gtmpsc_intr(void *arg)
414 1.37.4.1 rmind {
415 1.37.4.1 rmind struct gt_softc *gt = (struct gt_softc *)arg;
416 1.37.4.1 rmind struct gtmpsc_softc *sc;
417 1.37.4.1 rmind uint32_t icause;
418 1.37.4.1 rmind int i;
419 1.37.4.1 rmind
420 1.37.4.1 rmind icause = gt_sdma_icause(gt->sc_dev, sdma_imask);
421 1.37.4.1 rmind
422 1.37.4.1 rmind for (i = 0; i < GTMPSC_NCHAN; i++) {
423 1.37.4.1 rmind sc = device_lookup_private(>mpsc_cd, i);
424 1.37.4.1 rmind if (sc == NULL)
425 1.37.4.1 rmind continue;
426 1.37.4.1 rmind mutex_spin_enter(&sc->sc_lock);
427 1.37.4.1 rmind if (icause & SDMA_INTR_RXBUF(sc->sc_unit)) {
428 1.37.4.1 rmind gtmpsc_intr_rx(sc);
429 1.37.4.1 rmind icause &= ~SDMA_INTR_RXBUF(sc->sc_unit);
430 1.37.4.1 rmind }
431 1.37.4.1 rmind if (icause & SDMA_INTR_TXBUF(sc->sc_unit)) {
432 1.37.4.1 rmind gtmpsc_intr_tx(sc);
433 1.37.4.1 rmind icause &= ~SDMA_INTR_TXBUF(sc->sc_unit);
434 1.37.4.1 rmind }
435 1.37.4.1 rmind mutex_spin_exit(&sc->sc_lock);
436 1.37.4.1 rmind }
437 1.37.4.1 rmind
438 1.37.4.1 rmind return 1;
439 1.1 matt }
440 1.1 matt
441 1.1 matt STATIC void
442 1.37.4.1 rmind gtmpsc_softintr(void *arg)
443 1.1 matt {
444 1.37.4.1 rmind struct gtmpsc_softc *sc = arg;
445 1.37.4.1 rmind struct tty *tp = sc->sc_tty;
446 1.37.4.1 rmind gtmpsc_pollrx_t *vrxp;
447 1.37.4.1 rmind int code;
448 1.37.4.1 rmind u_int cc;
449 1.37.4.1 rmind u_char *get, *end, lsr;
450 1.37.4.1 rmind int (*rint)(int, struct tty *) = tp->t_linesw->l_rint;
451 1.37.4.1 rmind
452 1.37.4.1 rmind if (sc->sc_rx_ready) {
453 1.37.4.1 rmind sc->sc_rx_ready = 0;
454 1.37.4.1 rmind
455 1.37.4.1 rmind cc = sc->sc_rcvcnt;
456 1.37.4.1 rmind
457 1.37.4.1 rmind /* If not yet open, drop the entire buffer content here */
458 1.37.4.1 rmind if (!ISSET(tp->t_state, TS_ISOPEN))
459 1.37.4.1 rmind cc = 0;
460 1.37.4.1 rmind
461 1.37.4.1 rmind vrxp = &sc->sc_poll_sdmapage->rx[sc->sc_rcvrx];
462 1.37.4.1 rmind end = vrxp->rxbuf + vrxp->rxdesc.sdma_cnt;
463 1.37.4.1 rmind get = vrxp->rxbuf + sc->sc_roffset;
464 1.37.4.1 rmind while (cc > 0) {
465 1.37.4.1 rmind code = *get;
466 1.37.4.1 rmind lsr = vrxp->rxdesc.sdma_csr;
467 1.37.4.1 rmind
468 1.37.4.1 rmind if (ISSET(lsr,
469 1.37.4.1 rmind SDMA_CSR_RX_PE |
470 1.37.4.1 rmind SDMA_CSR_RX_FR |
471 1.37.4.1 rmind SDMA_CSR_RX_OR |
472 1.37.4.1 rmind SDMA_CSR_RX_BR)) {
473 1.37.4.1 rmind if (ISSET(lsr, SDMA_CSR_RX_OR))
474 1.37.4.1 rmind ; /* XXXXX not yet... */
475 1.37.4.1 rmind if (ISSET(lsr, SDMA_CSR_RX_BR | SDMA_CSR_RX_FR))
476 1.37.4.1 rmind SET(code, TTY_FE);
477 1.37.4.1 rmind if (ISSET(lsr, SDMA_CSR_RX_PE))
478 1.37.4.1 rmind SET(code, TTY_PE);
479 1.37.4.1 rmind }
480 1.1 matt
481 1.37.4.1 rmind if ((*rint)(code, tp) == -1) {
482 1.37.4.1 rmind /*
483 1.37.4.1 rmind * The line discipline's buffer is out of space.
484 1.37.4.1 rmind */
485 1.37.4.1 rmind /* XXXXX not yet... */
486 1.37.4.1 rmind }
487 1.37.4.1 rmind if (++get >= end) {
488 1.37.4.1 rmind /* cleanup this descriptor, and return to DMA */
489 1.37.4.1 rmind CLEANUP_AND_RETURN_RXDMA(sc, sc->sc_rcvrx);
490 1.37.4.1 rmind sc->sc_rcvrx =
491 1.37.4.1 rmind (sc->sc_rcvrx + 1) % GTMPSC_NTXDESC;
492 1.37.4.1 rmind vrxp = &sc->sc_poll_sdmapage->rx[sc->sc_rcvrx];
493 1.37.4.1 rmind end = vrxp->rxbuf + vrxp->rxdesc.sdma_cnt;
494 1.37.4.1 rmind get = vrxp->rxbuf + sc->sc_roffset;
495 1.37.4.1 rmind }
496 1.37.4.1 rmind cc--;
497 1.37.4.1 rmind }
498 1.37.4.1 rmind }
499 1.37.4.1 rmind if (sc->sc_tx_done) {
500 1.37.4.1 rmind sc->sc_tx_done = 0;
501 1.37.4.1 rmind CLR(tp->t_state, TS_BUSY);
502 1.37.4.1 rmind if (ISSET(tp->t_state, TS_FLUSH))
503 1.37.4.1 rmind CLR(tp->t_state, TS_FLUSH);
504 1.37.4.1 rmind else
505 1.37.4.1 rmind ndflush(&tp->t_outq, (int)(sc->sc_tba - tp->t_outq.c_cf));
506 1.37.4.1 rmind (*tp->t_linesw->l_start)(tp);
507 1.37.4.1 rmind }
508 1.1 matt }
509 1.1 matt
510 1.1 matt int
511 1.13 christos gtmpscopen(dev_t dev, int flag, int mode, struct lwp *l)
512 1.1 matt {
513 1.1 matt struct gtmpsc_softc *sc;
514 1.1 matt int unit = GTMPSCUNIT(dev);
515 1.1 matt struct tty *tp;
516 1.1 matt int s;
517 1.1 matt int error;
518 1.10 perry
519 1.30 cegger sc = device_lookup_private(>mpsc_cd, unit);
520 1.1 matt if (!sc)
521 1.1 matt return ENXIO;
522 1.1 matt #ifdef KGDB
523 1.1 matt /*
524 1.1 matt * If this is the kgdb port, no other use is permitted.
525 1.1 matt */
526 1.37.4.1 rmind if (sc->sc_flags & GTMPSC_KGDB)
527 1.37.4.1 rmind return EBUSY;
528 1.1 matt #endif
529 1.37.4.1 rmind tp = sc->sc_tty;
530 1.21 elad if (kauth_authorize_device_tty(l->l_cred, KAUTH_DEVICE_TTY_OPEN, tp))
531 1.37.4.1 rmind return EBUSY;
532 1.1 matt
533 1.1 matt s = spltty();
534 1.1 matt
535 1.37.4.1 rmind if (!ISSET(tp->t_state, TS_ISOPEN) && tp->t_wopen == 0) {
536 1.1 matt struct termios t;
537 1.1 matt
538 1.1 matt tp->t_dev = dev;
539 1.37.4.1 rmind
540 1.37.4.1 rmind mutex_spin_enter(&sc->sc_lock);
541 1.37.4.1 rmind
542 1.37.4.1 rmind /* Turn on interrupts. */
543 1.37.4.1 rmind sdma_imask |= SDMA_INTR_RXBUF(sc->sc_unit);
544 1.37.4.1 rmind gt_sdma_imask(device_parent(sc->sc_dev), sdma_imask);
545 1.37.4.1 rmind
546 1.37.4.1 rmind /* Clear PPS capture state on first open. */
547 1.37.4.1 rmind mutex_spin_enter(&timecounter_lock);
548 1.37.4.1 rmind memset(&sc->sc_pps_state, 0, sizeof(sc->sc_pps_state));
549 1.37.4.1 rmind sc->sc_pps_state.ppscap = PPS_CAPTUREASSERT | PPS_CAPTURECLEAR;
550 1.37.4.1 rmind pps_init(&sc->sc_pps_state);
551 1.37.4.1 rmind mutex_spin_exit(&timecounter_lock);
552 1.37.4.1 rmind
553 1.37.4.1 rmind mutex_spin_exit(&sc->sc_lock);
554 1.37.4.1 rmind
555 1.37.4.1 rmind if (sc->sc_flags & GTMPSC_CONSOLE) {
556 1.37.4.1 rmind t.c_ospeed = sc->sc_baudrate;
557 1.37.4.1 rmind t.c_cflag = sc->sc_cflag;
558 1.37.4.1 rmind } else {
559 1.37.4.1 rmind t.c_ospeed = TTYDEF_SPEED;
560 1.37.4.1 rmind t.c_cflag = TTYDEF_CFLAG;
561 1.37.4.1 rmind }
562 1.37.4.1 rmind t.c_ispeed = t.c_ospeed;
563 1.37.4.1 rmind
564 1.1 matt /* Make sure gtmpscparam() will do something. */
565 1.1 matt tp->t_ospeed = 0;
566 1.1 matt (void) gtmpscparam(tp, &t);
567 1.1 matt tp->t_iflag = TTYDEF_IFLAG;
568 1.1 matt tp->t_oflag = TTYDEF_OFLAG;
569 1.1 matt tp->t_lflag = TTYDEF_LFLAG;
570 1.1 matt ttychars(tp);
571 1.1 matt ttsetwater(tp);
572 1.37.4.1 rmind
573 1.37.4.1 rmind mutex_spin_enter(&sc->sc_lock);
574 1.37.4.1 rmind
575 1.37.4.1 rmind /* Clear the input/output ring */
576 1.37.4.1 rmind sc->sc_rcvcnt = 0;
577 1.37.4.1 rmind sc->sc_roffset = 0;
578 1.37.4.1 rmind sc->sc_rcvrx = 0;
579 1.37.4.1 rmind sc->sc_rcvdrx = 0;
580 1.37.4.1 rmind sc->sc_nexttx = 0;
581 1.37.4.1 rmind sc->sc_lasttx = 0;
582 1.37.4.1 rmind
583 1.37.4.1 rmind /*
584 1.37.4.1 rmind * enable SDMA receive
585 1.37.4.1 rmind */
586 1.37.4.1 rmind GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_ERD);
587 1.37.4.1 rmind
588 1.37.4.1 rmind mutex_spin_exit(&sc->sc_lock);
589 1.1 matt }
590 1.1 matt splx(s);
591 1.1 matt error = ttyopen(tp, GTMPSCDIALOUT(dev), ISSET(flag, O_NONBLOCK));
592 1.1 matt if (error)
593 1.1 matt goto bad;
594 1.1 matt
595 1.1 matt error = (*tp->t_linesw->l_open)(dev, tp);
596 1.1 matt if (error)
597 1.1 matt goto bad;
598 1.1 matt
599 1.37.4.1 rmind return 0;
600 1.1 matt
601 1.1 matt bad:
602 1.1 matt if (!ISSET(tp->t_state, TS_ISOPEN) && tp->t_wopen == 0) {
603 1.1 matt /*
604 1.1 matt * We failed to open the device, and nobody else had it opened.
605 1.1 matt * Clean up the state as appropriate.
606 1.1 matt */
607 1.1 matt gtmpscshutdown(sc);
608 1.1 matt }
609 1.1 matt
610 1.37.4.1 rmind return error;
611 1.1 matt }
612 1.1 matt
613 1.1 matt int
614 1.13 christos gtmpscclose(dev_t dev, int flag, int mode, struct lwp *l)
615 1.1 matt {
616 1.1 matt int unit = GTMPSCUNIT(dev);
617 1.30 cegger struct gtmpsc_softc *sc = device_lookup_private(>mpsc_cd, unit);
618 1.37.4.1 rmind struct tty *tp = sc->sc_tty;
619 1.1 matt
620 1.37.4.1 rmind if (!ISSET(tp->t_state, TS_ISOPEN))
621 1.37.4.1 rmind return 0;
622 1.1 matt
623 1.1 matt (*tp->t_linesw->l_close)(tp, flag);
624 1.1 matt ttyclose(tp);
625 1.37.4.1 rmind
626 1.1 matt if (!ISSET(tp->t_state, TS_ISOPEN) && tp->t_wopen == 0) {
627 1.1 matt /*
628 1.1 matt * Although we got a last close, the device may still be in
629 1.1 matt * use; e.g. if this was the dialout node, and there are still
630 1.1 matt * processes waiting for carrier on the non-dialout node.
631 1.1 matt */
632 1.1 matt gtmpscshutdown(sc);
633 1.1 matt }
634 1.1 matt
635 1.37.4.1 rmind return 0;
636 1.1 matt }
637 1.1 matt
638 1.1 matt int
639 1.1 matt gtmpscread(dev_t dev, struct uio *uio, int flag)
640 1.1 matt {
641 1.37.4.1 rmind struct gtmpsc_softc *sc =
642 1.37.4.1 rmind device_lookup_private(>mpsc_cd, GTMPSCUNIT(dev));
643 1.37.4.1 rmind struct tty *tp = sc->sc_tty;
644 1.10 perry
645 1.1 matt return (*tp->t_linesw->l_read)(tp, uio, flag);
646 1.1 matt }
647 1.1 matt
648 1.1 matt int
649 1.1 matt gtmpscwrite(dev_t dev, struct uio *uio, int flag)
650 1.1 matt {
651 1.37.4.1 rmind struct gtmpsc_softc *sc =
652 1.37.4.1 rmind device_lookup_private(>mpsc_cd, GTMPSCUNIT(dev));
653 1.37.4.1 rmind struct tty *tp = sc->sc_tty;
654 1.10 perry
655 1.1 matt return (*tp->t_linesw->l_write)(tp, uio, flag);
656 1.1 matt }
657 1.1 matt
658 1.1 matt int
659 1.23 christos gtmpscioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
660 1.1 matt {
661 1.37.4.1 rmind struct gtmpsc_softc *sc =
662 1.37.4.1 rmind device_lookup_private(>mpsc_cd, GTMPSCUNIT(dev));
663 1.37.4.1 rmind struct tty *tp = sc->sc_tty;
664 1.1 matt int error;
665 1.10 perry
666 1.37.4.1 rmind error = (*tp->t_linesw->l_ioctl)(tp, cmd, data, flag, l);
667 1.37.4.1 rmind if (error != EPASSTHROUGH)
668 1.1 matt return error;
669 1.37.4.1 rmind
670 1.37.4.1 rmind error = ttioctl(tp, cmd, data, flag, l);
671 1.37.4.1 rmind if (error != EPASSTHROUGH)
672 1.1 matt return error;
673 1.37.4.1 rmind
674 1.37.4.1 rmind error = 0;
675 1.37.4.1 rmind switch (cmd) {
676 1.37.4.1 rmind case TIOCSFLAGS:
677 1.37.4.1 rmind error = kauth_authorize_device_tty(l->l_cred,
678 1.37.4.1 rmind KAUTH_DEVICE_TTY_PRIVSET, tp);
679 1.37.4.1 rmind if (error)
680 1.37.4.1 rmind return error;
681 1.37.4.1 rmind break;
682 1.37.4.1 rmind default:
683 1.37.4.1 rmind /* nothing */
684 1.37.4.1 rmind break;
685 1.37.4.1 rmind }
686 1.37.4.1 rmind
687 1.37.4.1 rmind mutex_spin_enter(&sc->sc_lock);
688 1.37.4.1 rmind
689 1.37.4.1 rmind switch (cmd) {
690 1.37.4.1 rmind case PPS_IOC_CREATE:
691 1.37.4.1 rmind case PPS_IOC_DESTROY:
692 1.37.4.1 rmind case PPS_IOC_GETPARAMS:
693 1.37.4.1 rmind case PPS_IOC_SETPARAMS:
694 1.37.4.1 rmind case PPS_IOC_GETCAP:
695 1.37.4.1 rmind case PPS_IOC_FETCH:
696 1.37.4.1 rmind #ifdef PPS_SYNC
697 1.37.4.1 rmind case PPS_IOC_KCBIND:
698 1.37.4.1 rmind #endif
699 1.37.4.1 rmind mutex_spin_enter(&timecounter_lock);
700 1.37.4.1 rmind error = pps_ioctl(cmd, data, &sc->sc_pps_state);
701 1.37.4.1 rmind mutex_spin_exit(&timecounter_lock);
702 1.37.4.1 rmind break;
703 1.37.4.1 rmind
704 1.37.4.1 rmind case TIOCDCDTIMESTAMP: /* XXX old, overloaded API used by xntpd v3 */
705 1.37.4.1 rmind mutex_spin_enter(&timecounter_lock);
706 1.37.4.1 rmind #ifndef PPS_TRAILING_EDGE
707 1.37.4.1 rmind TIMESPEC_TO_TIMEVAL((struct timeval *)data,
708 1.37.4.1 rmind &sc->sc_pps_state.ppsinfo.assert_timestamp);
709 1.37.4.1 rmind #else
710 1.37.4.1 rmind TIMESPEC_TO_TIMEVAL((struct timeval *)data,
711 1.37.4.1 rmind &sc->sc_pps_state.ppsinfo.clear_timestamp);
712 1.37.4.1 rmind #endif
713 1.37.4.1 rmind mutex_spin_exit(&timecounter_lock);
714 1.37.4.1 rmind break;
715 1.37.4.1 rmind
716 1.37.4.1 rmind default:
717 1.37.4.1 rmind error = EPASSTHROUGH;
718 1.37.4.1 rmind break;
719 1.37.4.1 rmind }
720 1.37.4.1 rmind
721 1.37.4.1 rmind mutex_spin_exit(&sc->sc_lock);
722 1.37.4.1 rmind
723 1.37.4.1 rmind return error;
724 1.37.4.1 rmind }
725 1.37.4.1 rmind
726 1.37.4.1 rmind void
727 1.37.4.1 rmind gtmpscstop(struct tty *tp, int flag)
728 1.37.4.1 rmind {
729 1.1 matt }
730 1.1 matt
731 1.1 matt struct tty *
732 1.1 matt gtmpsctty(dev_t dev)
733 1.1 matt {
734 1.37.4.1 rmind struct gtmpsc_softc *sc =
735 1.37.4.1 rmind device_lookup_private(>mpsc_cd, GTMPSCUNIT(dev));
736 1.1 matt
737 1.37.4.1 rmind return sc->sc_tty;
738 1.1 matt }
739 1.1 matt
740 1.37.4.1 rmind int
741 1.37.4.1 rmind gtmpscpoll(dev_t dev, int events, struct lwp *l)
742 1.1 matt {
743 1.37.4.1 rmind struct gtmpsc_softc *sc =
744 1.37.4.1 rmind device_lookup_private(>mpsc_cd, GTMPSCUNIT(dev));
745 1.37.4.1 rmind struct tty *tp = sc->sc_tty;
746 1.37.4.1 rmind
747 1.37.4.1 rmind return (*tp->t_linesw->l_poll)(tp, events, l);
748 1.1 matt }
749 1.1 matt
750 1.37.4.1 rmind
751 1.1 matt STATIC void
752 1.1 matt gtmpscstart(struct tty *tp)
753 1.1 matt {
754 1.4 matt struct gtmpsc_softc *sc;
755 1.1 matt unsigned char *tba;
756 1.1 matt unsigned int unit;
757 1.37.4.1 rmind int s, tbc;
758 1.1 matt
759 1.1 matt unit = GTMPSCUNIT(tp->t_dev);
760 1.30 cegger sc = device_lookup_private(>mpsc_cd, unit);
761 1.1 matt if (sc == NULL)
762 1.1 matt return;
763 1.1 matt
764 1.1 matt s = spltty();
765 1.37.4.1 rmind if (ISSET(tp->t_state, TS_TIMEOUT | TS_BUSY | TS_TTSTOP))
766 1.1 matt goto out;
767 1.1 matt if (sc->sc_tx_stopped)
768 1.1 matt goto out;
769 1.27 ad if (!ttypull(tp))
770 1.27 ad goto out;
771 1.1 matt
772 1.1 matt /* Grab the first contiguous region of buffer space. */
773 1.1 matt tba = tp->t_outq.c_cf;
774 1.1 matt tbc = ndqb(&tp->t_outq, 0);
775 1.1 matt
776 1.37.4.1 rmind mutex_spin_enter(&sc->sc_lock);
777 1.1 matt
778 1.1 matt sc->sc_tba = tba;
779 1.1 matt sc->sc_tbc = tbc;
780 1.37.4.1 rmind
781 1.37.4.1 rmind sdma_imask |= SDMA_INTR_TXBUF(sc->sc_unit);
782 1.37.4.1 rmind gt_sdma_imask(device_parent(sc->sc_dev), sdma_imask);
783 1.37.4.1 rmind SET(tp->t_state, TS_BUSY);
784 1.1 matt sc->sc_tx_busy = 1;
785 1.37.4.1 rmind gtmpsc_write(sc);
786 1.1 matt
787 1.37.4.1 rmind mutex_spin_exit(&sc->sc_lock);
788 1.1 matt out:
789 1.1 matt splx(s);
790 1.1 matt }
791 1.1 matt
792 1.1 matt STATIC int
793 1.1 matt gtmpscparam(struct tty *tp, struct termios *t)
794 1.1 matt {
795 1.37.4.1 rmind struct gtmpsc_softc *sc =
796 1.37.4.1 rmind device_lookup_private(>mpsc_cd, GTMPSCUNIT(tp->t_dev));
797 1.1 matt
798 1.1 matt /* Check requested parameters. */
799 1.37.4.1 rmind if (compute_cdv(t->c_ospeed) < 0)
800 1.37.4.1 rmind return EINVAL;
801 1.1 matt if (t->c_ispeed && t->c_ispeed != t->c_ospeed)
802 1.37.4.1 rmind return EINVAL;
803 1.1 matt
804 1.1 matt /*
805 1.1 matt * If there were no changes, don't do anything. This avoids dropping
806 1.1 matt * input and improves performance when all we did was frob things like
807 1.1 matt * VMIN and VTIME.
808 1.1 matt */
809 1.1 matt if (tp->t_ospeed == t->c_ospeed &&
810 1.1 matt tp->t_cflag == t->c_cflag)
811 1.37.4.1 rmind return 0;
812 1.1 matt
813 1.37.4.1 rmind mutex_spin_enter(&sc->sc_lock);
814 1.1 matt
815 1.1 matt /* And copy to tty. */
816 1.1 matt tp->t_ispeed = 0;
817 1.1 matt tp->t_ospeed = t->c_ospeed;
818 1.1 matt tp->t_cflag = t->c_cflag;
819 1.1 matt
820 1.37.4.1 rmind sc->sc_baudrate = t->c_ospeed;
821 1.37.4.1 rmind
822 1.1 matt if (!sc->sc_heldchange) {
823 1.1 matt if (sc->sc_tx_busy) {
824 1.1 matt sc->sc_heldtbc = sc->sc_tbc;
825 1.1 matt sc->sc_tbc = 0;
826 1.1 matt sc->sc_heldchange = 1;
827 1.1 matt } else
828 1.1 matt gtmpsc_loadchannelregs(sc);
829 1.1 matt }
830 1.1 matt
831 1.37.4.1 rmind mutex_spin_exit(&sc->sc_lock);
832 1.1 matt
833 1.1 matt /* Fake carrier on */
834 1.1 matt (void) (*tp->t_linesw->l_modem)(tp, 1);
835 1.1 matt
836 1.1 matt return 0;
837 1.1 matt }
838 1.1 matt
839 1.37.4.1 rmind void
840 1.37.4.1 rmind gtmpsc_shutdownhook(void *arg)
841 1.1 matt {
842 1.37.4.1 rmind gtmpsc_softc_t *sc = (gtmpsc_softc_t *)arg;
843 1.1 matt
844 1.37.4.1 rmind gtmpsc_txflush(sc);
845 1.1 matt }
846 1.1 matt
847 1.37.4.1 rmind /*
848 1.37.4.1 rmind * Convert to MPCR from cflag(CS[5678] and CSTOPB).
849 1.37.4.1 rmind */
850 1.37.4.1 rmind STATIC uint32_t
851 1.37.4.1 rmind cflag2mpcr(tcflag_t cflag)
852 1.1 matt {
853 1.37.4.1 rmind uint32_t mpcr = 0;
854 1.1 matt
855 1.37.4.1 rmind switch (ISSET(cflag, CSIZE)) {
856 1.37.4.1 rmind case CS5:
857 1.37.4.1 rmind SET(mpcr, GTMPSC_MPCR_CL_5);
858 1.37.4.1 rmind break;
859 1.37.4.1 rmind case CS6:
860 1.37.4.1 rmind SET(mpcr, GTMPSC_MPCR_CL_6);
861 1.37.4.1 rmind break;
862 1.37.4.1 rmind case CS7:
863 1.37.4.1 rmind SET(mpcr, GTMPSC_MPCR_CL_7);
864 1.37.4.1 rmind break;
865 1.37.4.1 rmind case CS8:
866 1.37.4.1 rmind SET(mpcr, GTMPSC_MPCR_CL_8);
867 1.37.4.1 rmind break;
868 1.1 matt }
869 1.37.4.1 rmind if (ISSET(cflag, CSTOPB))
870 1.37.4.1 rmind SET(mpcr, GTMPSC_MPCR_SBL_2);
871 1.1 matt
872 1.37.4.1 rmind return mpcr;
873 1.1 matt }
874 1.1 matt
875 1.3 matt STATIC void
876 1.37.4.1 rmind gtmpsc_intr_rx(struct gtmpsc_softc *sc)
877 1.1 matt {
878 1.37.4.1 rmind gtmpsc_pollrx_t *vrxp;
879 1.37.4.1 rmind uint32_t csr;
880 1.37.4.1 rmind int kick, ix;
881 1.37.4.1 rmind
882 1.37.4.1 rmind kick = 0;
883 1.37.4.1 rmind
884 1.37.4.1 rmind /* already handled in gtmpsc_common_getc() */
885 1.37.4.1 rmind if (sc->sc_rcvdrx == sc->sc_rcvrx)
886 1.37.4.1 rmind return;
887 1.37.4.1 rmind
888 1.37.4.1 rmind ix = sc->sc_rcvdrx;
889 1.37.4.1 rmind vrxp = &sc->sc_poll_sdmapage->rx[ix];
890 1.37.4.1 rmind bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
891 1.37.4.1 rmind ix * sizeof(gtmpsc_pollrx_t),
892 1.37.4.1 rmind sizeof(sdma_desc_t),
893 1.37.4.1 rmind BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
894 1.37.4.1 rmind csr = vrxp->rxdesc.sdma_csr;
895 1.37.4.1 rmind while (!(csr & SDMA_CSR_RX_OWN)) {
896 1.37.4.1 rmind bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
897 1.37.4.1 rmind ix * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t),
898 1.37.4.1 rmind sizeof(vrxp->rxbuf),
899 1.37.4.1 rmind BUS_DMASYNC_POSTREAD);
900 1.37.4.1 rmind vrxp->rxdesc.sdma_cnt &= SDMA_RX_CNT_BCNT_MASK;
901 1.37.4.1 rmind if (vrxp->rxdesc.sdma_csr & SDMA_CSR_RX_BR) {
902 1.37.4.1 rmind int cn_trapped = 0;
903 1.37.4.1 rmind
904 1.37.4.1 rmind cn_check_magic(sc->sc_tty->t_dev,
905 1.37.4.1 rmind CNC_BREAK, gtmpsc_cnm_state);
906 1.37.4.1 rmind if (cn_trapped)
907 1.37.4.1 rmind continue;
908 1.37.4.1 rmind #if defined(KGDB) && !defined(DDB)
909 1.37.4.1 rmind if (ISSET(sc->sc_flags, GTMPSC_KGDB)) {
910 1.37.4.1 rmind kgdb_connect(1);
911 1.37.4.1 rmind continue;
912 1.37.4.1 rmind }
913 1.37.4.1 rmind #endif
914 1.3 matt }
915 1.37.4.1 rmind
916 1.37.4.1 rmind sc->sc_rcvcnt += vrxp->rxdesc.sdma_cnt;
917 1.37.4.1 rmind kick = 1;
918 1.37.4.1 rmind
919 1.37.4.1 rmind ix = (ix + 1) % GTMPSC_NTXDESC;
920 1.37.4.1 rmind vrxp = &sc->sc_poll_sdmapage->rx[ix];
921 1.37.4.1 rmind bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
922 1.37.4.1 rmind ix * sizeof(gtmpsc_pollrx_t),
923 1.37.4.1 rmind sizeof(sdma_desc_t),
924 1.37.4.1 rmind BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
925 1.37.4.1 rmind csr = vrxp->rxdesc.sdma_csr;
926 1.37.4.1 rmind }
927 1.37.4.1 rmind bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
928 1.37.4.1 rmind ix * sizeof(gtmpsc_pollrx_t),
929 1.37.4.1 rmind sizeof(sdma_desc_t),
930 1.37.4.1 rmind BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
931 1.37.4.1 rmind
932 1.37.4.1 rmind if (kick) {
933 1.37.4.1 rmind sc->sc_rcvdrx = ix;
934 1.37.4.1 rmind sc->sc_rx_ready = 1;
935 1.37.4.1 rmind softint_schedule(sc->sc_si);
936 1.37.4.1 rmind }
937 1.1 matt }
938 1.1 matt
939 1.37.4.1 rmind STATIC __inline void
940 1.37.4.1 rmind gtmpsc_intr_tx(struct gtmpsc_softc *sc)
941 1.1 matt {
942 1.37.4.1 rmind gtmpsc_polltx_t *vtxp;
943 1.37.4.1 rmind uint32_t csr;
944 1.37.4.1 rmind int ix;
945 1.1 matt
946 1.37.4.1 rmind /*
947 1.37.4.1 rmind * If we've delayed a parameter change, do it now,
948 1.37.4.1 rmind * and restart output.
949 1.37.4.1 rmind */
950 1.37.4.1 rmind if (sc->sc_heldchange) {
951 1.37.4.1 rmind gtmpsc_loadchannelregs(sc);
952 1.37.4.1 rmind sc->sc_heldchange = 0;
953 1.37.4.1 rmind sc->sc_tbc = sc->sc_heldtbc;
954 1.37.4.1 rmind sc->sc_heldtbc = 0;
955 1.37.4.1 rmind }
956 1.37.4.1 rmind
957 1.37.4.1 rmind /* Clean-up TX descriptors and buffers */
958 1.37.4.1 rmind ix = sc->sc_lasttx;
959 1.37.4.1 rmind while (ix != sc->sc_nexttx) {
960 1.37.4.1 rmind vtxp = &sc->sc_poll_sdmapage->tx[ix];
961 1.37.4.1 rmind bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
962 1.37.4.1 rmind ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
963 1.37.4.1 rmind BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
964 1.37.4.1 rmind csr = vtxp->txdesc.sdma_csr;
965 1.37.4.1 rmind if (csr & SDMA_CSR_TX_OWN) {
966 1.37.4.1 rmind bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
967 1.37.4.1 rmind ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
968 1.37.4.1 rmind BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
969 1.37.4.1 rmind break;
970 1.37.4.1 rmind }
971 1.37.4.1 rmind bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
972 1.37.4.1 rmind ix * sizeof(gtmpsc_polltx_t) + sizeof(sdma_desc_t),
973 1.37.4.1 rmind sizeof(vtxp->txbuf), BUS_DMASYNC_POSTWRITE);
974 1.37.4.1 rmind ix = (ix + 1) % GTMPSC_NTXDESC;
975 1.37.4.1 rmind }
976 1.37.4.1 rmind sc->sc_lasttx = ix;
977 1.37.4.1 rmind
978 1.37.4.1 rmind /* Output the next chunk of the contiguous buffer */
979 1.37.4.1 rmind gtmpsc_write(sc);
980 1.37.4.1 rmind if (sc->sc_tbc == 0 && sc->sc_tx_busy) {
981 1.37.4.1 rmind sc->sc_tx_busy = 0;
982 1.37.4.1 rmind sc->sc_tx_done = 1;
983 1.37.4.1 rmind softint_schedule(sc->sc_si);
984 1.37.4.1 rmind sdma_imask &= ~SDMA_INTR_TXBUF(sc->sc_unit);
985 1.37.4.1 rmind gt_sdma_imask(device_parent(sc->sc_dev), sdma_imask);
986 1.37.4.1 rmind }
987 1.1 matt }
988 1.1 matt
989 1.1 matt /*
990 1.37.4.1 rmind * gtmpsc_write - write a buffer into the hardware
991 1.1 matt */
992 1.1 matt STATIC void
993 1.37.4.1 rmind gtmpsc_write(struct gtmpsc_softc *sc)
994 1.1 matt {
995 1.37.4.1 rmind gtmpsc_polltx_t *vtxp;
996 1.37.4.1 rmind uint32_t sdcm, ix;
997 1.37.4.1 rmind int kick, n;
998 1.1 matt
999 1.37.4.1 rmind kick = 0;
1000 1.37.4.1 rmind while (sc->sc_tbc > 0 && sc->sc_nexttx != sc->sc_lasttx) {
1001 1.37.4.1 rmind n = min(sc->sc_tbc, GTMPSC_TXBUFSZ);
1002 1.1 matt
1003 1.37.4.1 rmind ix = sc->sc_nexttx;
1004 1.37.4.1 rmind sc->sc_nexttx = (ix + 1) % GTMPSC_NTXDESC;
1005 1.1 matt
1006 1.37.4.1 rmind vtxp = &sc->sc_poll_sdmapage->tx[ix];
1007 1.1 matt
1008 1.37.4.1 rmind memcpy(vtxp->txbuf, sc->sc_tba, n);
1009 1.37.4.1 rmind bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1010 1.37.4.1 rmind ix * sizeof(gtmpsc_polltx_t) + sizeof(sdma_desc_t),
1011 1.37.4.1 rmind sizeof(vtxp->txbuf), BUS_DMASYNC_PREWRITE);
1012 1.37.4.1 rmind
1013 1.37.4.1 rmind vtxp->txdesc.sdma_cnt = (n << SDMA_TX_CNT_BCNT_SHIFT) | n;
1014 1.37.4.1 rmind vtxp->txdesc.sdma_csr =
1015 1.37.4.1 rmind SDMA_CSR_TX_L |
1016 1.37.4.1 rmind SDMA_CSR_TX_F |
1017 1.37.4.1 rmind SDMA_CSR_TX_EI |
1018 1.37.4.1 rmind SDMA_CSR_TX_OWN;
1019 1.37.4.1 rmind bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1020 1.37.4.1 rmind ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
1021 1.37.4.1 rmind BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1022 1.1 matt
1023 1.37.4.1 rmind sc->sc_tbc -= n;
1024 1.37.4.1 rmind sc->sc_tba += n;
1025 1.37.4.1 rmind kick = 1;
1026 1.37.4.1 rmind }
1027 1.37.4.1 rmind if (kick) {
1028 1.37.4.1 rmind /*
1029 1.37.4.1 rmind * now kick some SDMA
1030 1.37.4.1 rmind */
1031 1.37.4.1 rmind sdcm = GT_SDMA_READ(sc, SDMA_SDCM);
1032 1.37.4.1 rmind if ((sdcm & SDMA_SDCM_TXD) == 0)
1033 1.37.4.1 rmind GT_SDMA_WRITE(sc, SDMA_SDCM, sdcm | SDMA_SDCM_TXD);
1034 1.37.4.1 rmind }
1035 1.1 matt }
1036 1.1 matt
1037 1.1 matt /*
1038 1.1 matt * gtmpsc_txflush - wait for output to drain
1039 1.1 matt */
1040 1.1 matt STATIC void
1041 1.1 matt gtmpsc_txflush(gtmpsc_softc_t *sc)
1042 1.1 matt {
1043 1.1 matt gtmpsc_polltx_t *vtxp;
1044 1.37.4.1 rmind int ix, limit = 4000000; /* 4 seconds */
1045 1.1 matt
1046 1.37.4.1 rmind ix = sc->sc_nexttx - 1;
1047 1.1 matt if (ix < 0)
1048 1.1 matt ix = GTMPSC_NTXDESC - 1;
1049 1.1 matt
1050 1.37.4.1 rmind vtxp = &sc->sc_poll_sdmapage->tx[ix];
1051 1.1 matt while (limit > 0) {
1052 1.37.4.1 rmind bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1053 1.37.4.1 rmind ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
1054 1.37.4.1 rmind BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1055 1.37.4.1 rmind if ((vtxp->txdesc.sdma_csr & SDMA_CSR_TX_OWN) == 0)
1056 1.1 matt break;
1057 1.37.4.1 rmind bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1058 1.37.4.1 rmind ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
1059 1.37.4.1 rmind BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1060 1.37.4.1 rmind DELAY(1);
1061 1.37.4.1 rmind limit -= 1;
1062 1.10 perry }
1063 1.1 matt }
1064 1.1 matt
1065 1.37.4.1 rmind /*
1066 1.37.4.1 rmind * gtmpsc_rxdesc_init - set up RX descriptor ring
1067 1.37.4.1 rmind */
1068 1.1 matt STATIC void
1069 1.37.4.1 rmind gtmpsc_rxdesc_init(struct gtmpsc_softc *sc)
1070 1.1 matt {
1071 1.37.4.1 rmind gtmpsc_pollrx_t *vrxp, *prxp, *first_prxp;
1072 1.37.4.1 rmind sdma_desc_t *dp;
1073 1.37.4.1 rmind int i;
1074 1.1 matt
1075 1.37.4.1 rmind first_prxp = prxp =
1076 1.37.4.1 rmind (gtmpsc_pollrx_t *)sc->sc_rxdma_map->dm_segs->ds_addr;
1077 1.37.4.1 rmind vrxp = sc->sc_poll_sdmapage->rx;
1078 1.37.4.1 rmind for (i = 0; i < GTMPSC_NRXDESC; i++) {
1079 1.37.4.1 rmind dp = &vrxp->rxdesc;
1080 1.37.4.1 rmind dp->sdma_csr =
1081 1.37.4.1 rmind SDMA_CSR_RX_L|SDMA_CSR_RX_F|SDMA_CSR_RX_OWN|SDMA_CSR_RX_EI;
1082 1.37.4.1 rmind dp->sdma_cnt = GTMPSC_RXBUFSZ << SDMA_RX_CNT_BUFSZ_SHIFT;
1083 1.37.4.1 rmind dp->sdma_bufp = (uint32_t)&prxp->rxbuf;
1084 1.37.4.1 rmind vrxp++;
1085 1.37.4.1 rmind prxp++;
1086 1.37.4.1 rmind dp->sdma_next = (uint32_t)&prxp->rxdesc;
1087 1.37.4.1 rmind
1088 1.37.4.1 rmind bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1089 1.37.4.1 rmind i * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t),
1090 1.37.4.1 rmind sizeof(vrxp->rxbuf), BUS_DMASYNC_PREREAD);
1091 1.37.4.1 rmind bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1092 1.37.4.1 rmind i * sizeof(gtmpsc_pollrx_t), sizeof(sdma_desc_t),
1093 1.37.4.1 rmind BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1094 1.37.4.1 rmind }
1095 1.37.4.1 rmind dp = &vrxp->rxdesc;
1096 1.37.4.1 rmind dp->sdma_csr =
1097 1.37.4.1 rmind SDMA_CSR_RX_L | SDMA_CSR_RX_F | SDMA_CSR_RX_OWN | SDMA_CSR_RX_EI;
1098 1.37.4.1 rmind dp->sdma_cnt = GTMPSC_RXBUFSZ << SDMA_RX_CNT_BUFSZ_SHIFT;
1099 1.37.4.1 rmind dp->sdma_bufp = (uint32_t)&prxp->rxbuf;
1100 1.37.4.1 rmind dp->sdma_next = (uint32_t)&first_prxp->rxdesc;
1101 1.37.4.1 rmind
1102 1.37.4.1 rmind bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1103 1.37.4.1 rmind i * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t),
1104 1.37.4.1 rmind sizeof(vrxp->rxbuf), BUS_DMASYNC_PREREAD);
1105 1.37.4.1 rmind bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1106 1.37.4.1 rmind i * sizeof(gtmpsc_pollrx_t), sizeof(sdma_desc_t),
1107 1.37.4.1 rmind BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1108 1.37.4.1 rmind
1109 1.37.4.1 rmind sc->sc_rcvcnt = 0;
1110 1.37.4.1 rmind sc->sc_roffset = 0;
1111 1.37.4.1 rmind sc->sc_rcvrx = 0;
1112 1.37.4.1 rmind sc->sc_rcvdrx = 0;
1113 1.1 matt }
1114 1.1 matt
1115 1.37.4.1 rmind /*
1116 1.37.4.1 rmind * gtmpsc_txdesc_init - set up TX descriptor ring
1117 1.37.4.1 rmind */
1118 1.1 matt STATIC void
1119 1.37.4.1 rmind gtmpsc_txdesc_init(struct gtmpsc_softc *sc)
1120 1.1 matt {
1121 1.37.4.1 rmind gtmpsc_polltx_t *vtxp, *ptxp, *first_ptxp;
1122 1.37.4.1 rmind sdma_desc_t *dp;
1123 1.37.4.1 rmind int i;
1124 1.1 matt
1125 1.37.4.1 rmind first_ptxp = ptxp =
1126 1.37.4.1 rmind (gtmpsc_polltx_t *)sc->sc_txdma_map->dm_segs->ds_addr;
1127 1.37.4.1 rmind vtxp = sc->sc_poll_sdmapage->tx;
1128 1.37.4.1 rmind for (i = 0; i < GTMPSC_NTXDESC; i++) {
1129 1.37.4.1 rmind dp = &vtxp->txdesc;
1130 1.37.4.1 rmind dp->sdma_csr = 0;
1131 1.37.4.1 rmind dp->sdma_cnt = 0;
1132 1.37.4.1 rmind dp->sdma_bufp = (uint32_t)&ptxp->txbuf;
1133 1.37.4.1 rmind vtxp++;
1134 1.37.4.1 rmind ptxp++;
1135 1.37.4.1 rmind dp->sdma_next = (uint32_t)&ptxp->txdesc;
1136 1.1 matt }
1137 1.37.4.1 rmind dp = &vtxp->txdesc;
1138 1.37.4.1 rmind dp->sdma_csr = 0;
1139 1.37.4.1 rmind dp->sdma_cnt = 0;
1140 1.37.4.1 rmind dp->sdma_bufp = (uint32_t)&ptxp->txbuf;
1141 1.37.4.1 rmind dp->sdma_next = (uint32_t)&first_ptxp->txdesc;
1142 1.1 matt
1143 1.37.4.1 rmind sc->sc_nexttx = 0;
1144 1.37.4.1 rmind sc->sc_lasttx = 0;
1145 1.37.4.1 rmind }
1146 1.1 matt
1147 1.37.4.1 rmind STATIC void
1148 1.37.4.1 rmind gtmpscinit_stop(struct gtmpsc_softc *sc)
1149 1.37.4.1 rmind {
1150 1.37.4.1 rmind uint32_t csr;
1151 1.37.4.1 rmind int timo = 10000; /* XXXX */
1152 1.7 scw
1153 1.37.4.1 rmind /* Abort MPSC Rx (aborting Tx messes things up) */
1154 1.37.4.1 rmind GT_MPSC_WRITE(sc, GTMPSC_CHRN(2), GTMPSC_CHR2_RXABORT);
1155 1.7 scw
1156 1.37.4.1 rmind /* abort SDMA RX and stop TX for MPSC unit */
1157 1.37.4.1 rmind GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_AR | SDMA_SDCM_STD);
1158 1.1 matt
1159 1.37.4.1 rmind /* poll for SDMA RX abort completion */
1160 1.37.4.1 rmind for (; timo > 0; timo--) {
1161 1.37.4.1 rmind csr = GT_SDMA_READ(sc, SDMA_SDCM);
1162 1.37.4.1 rmind if (!(csr & (SDMA_SDCM_AR | SDMA_SDCM_AT)))
1163 1.1 matt break;
1164 1.5 matt DELAY(50);
1165 1.1 matt }
1166 1.1 matt }
1167 1.1 matt
1168 1.1 matt STATIC void
1169 1.37.4.1 rmind gtmpscinit_start(struct gtmpsc_softc *sc)
1170 1.1 matt {
1171 1.1 matt
1172 1.1 matt /*
1173 1.37.4.1 rmind * Set pointers of current/first descriptor of TX to SDMA register.
1174 1.1 matt */
1175 1.37.4.1 rmind GT_SDMA_WRITE(sc, SDMA_SCTDP, sc->sc_txdma_map->dm_segs->ds_addr);
1176 1.37.4.1 rmind GT_SDMA_WRITE(sc, SDMA_SFTDP, sc->sc_txdma_map->dm_segs->ds_addr);
1177 1.1 matt
1178 1.1 matt /*
1179 1.37.4.1 rmind * Set pointer of current descriptor of TX to SDMA register.
1180 1.1 matt */
1181 1.37.4.1 rmind GT_SDMA_WRITE(sc, SDMA_SCRDP, sc->sc_rxdma_map->dm_segs->ds_addr);
1182 1.1 matt
1183 1.1 matt /*
1184 1.1 matt * initialize SDMA unit Configuration Register
1185 1.1 matt */
1186 1.37.4.1 rmind GT_SDMA_WRITE(sc, SDMA_SDC,
1187 1.37.4.1 rmind SDMA_SDC_BSZ_8x64 | SDMA_SDC_SFM|SDMA_SDC_RFT);
1188 1.10 perry
1189 1.1 matt gtmpsc_loadchannelregs(sc);
1190 1.1 matt
1191 1.1 matt /*
1192 1.1 matt * set MPSC LO and HI port config registers for GTMPSC unit
1193 1.1 matt */
1194 1.37.4.1 rmind GT_MPSC_WRITE(sc, GTMPSC_MMCR_LO,
1195 1.37.4.1 rmind GTMPSC_MMCR_LO_MODE_UART |
1196 1.37.4.1 rmind GTMPSC_MMCR_LO_ET |
1197 1.37.4.1 rmind GTMPSC_MMCR_LO_ER |
1198 1.37.4.1 rmind GTMPSC_MMCR_LO_NLM);
1199 1.37.4.1 rmind GT_MPSC_WRITE(sc, GTMPSC_MMCR_HI,
1200 1.37.4.1 rmind GTMPSC_MMCR_HI_TCDV_DEFAULT |
1201 1.37.4.1 rmind GTMPSC_MMCR_HI_RDW |
1202 1.37.4.1 rmind GTMPSC_MMCR_HI_RCDV_DEFAULT);
1203 1.1 matt
1204 1.1 matt /*
1205 1.1 matt * tell MPSC receive the Enter Hunt
1206 1.1 matt */
1207 1.37.4.1 rmind GT_MPSC_WRITE(sc, GTMPSC_CHRN(2), GTMPSC_CHR2_EH);
1208 1.37.4.1 rmind }
1209 1.1 matt
1210 1.37.4.1 rmind STATIC void
1211 1.37.4.1 rmind gtmpscshutdown(struct gtmpsc_softc *sc)
1212 1.37.4.1 rmind {
1213 1.37.4.1 rmind struct tty *tp;
1214 1.1 matt
1215 1.37.4.1 rmind #ifdef KGDB
1216 1.37.4.1 rmind if (sc->sc_flags & GTMPSCF_KGDB != 0)
1217 1.37.4.1 rmind return;
1218 1.37.4.1 rmind #endif
1219 1.37.4.1 rmind tp = sc->sc_tty;
1220 1.37.4.1 rmind mutex_spin_enter(&sc->sc_lock);
1221 1.37.4.1 rmind /* Fake carrier off */
1222 1.37.4.1 rmind (void) (*tp->t_linesw->l_modem)(tp, 0);
1223 1.37.4.1 rmind sdma_imask &= ~SDMA_INTR_RXBUF(sc->sc_unit);
1224 1.37.4.1 rmind gt_sdma_imask(device_parent(sc->sc_dev), sdma_imask);
1225 1.37.4.1 rmind mutex_spin_exit(&sc->sc_lock);
1226 1.1 matt }
1227 1.1 matt
1228 1.1 matt STATIC void
1229 1.37.4.1 rmind gtmpsc_loadchannelregs(struct gtmpsc_softc *sc)
1230 1.1 matt {
1231 1.1 matt
1232 1.37.4.1 rmind if (sc->sc_dev != NULL)
1233 1.37.4.1 rmind gt_brg_bcr(device_parent(sc->sc_dev), sc->sc_brg,
1234 1.37.4.1 rmind GT_MPSC_CLOCK_SOURCE | compute_cdv(sc->sc_baudrate));
1235 1.37.4.1 rmind GT_MPSC_WRITE(sc, GTMPSC_CHRN(3), GTMPSC_MAXIDLE(sc->sc_baudrate));
1236 1.37.4.1 rmind
1237 1.37.4.1 rmind /*
1238 1.37.4.1 rmind * set MPSC Protocol configuration register for GTMPSC unit
1239 1.37.4.1 rmind */
1240 1.37.4.1 rmind GT_MPSC_WRITE(sc, GTMPSC_MPCR, cflag2mpcr(sc->sc_cflag));
1241 1.1 matt }
1242 1.1 matt
1243 1.37.4.1 rmind
1244 1.37.4.1 rmind #ifdef MPSC_CONSOLE
1245 1.1 matt /*
1246 1.37.4.1 rmind * Following are all routines needed for MPSC to act as console
1247 1.1 matt */
1248 1.37.4.1 rmind STATIC int
1249 1.1 matt gtmpsccngetc(dev_t dev)
1250 1.1 matt {
1251 1.1 matt
1252 1.37.4.1 rmind return gtmpsc_common_getc(>mpsc_cn_softc);
1253 1.1 matt }
1254 1.1 matt
1255 1.37.4.1 rmind STATIC void
1256 1.1 matt gtmpsccnputc(dev_t dev, int c)
1257 1.1 matt {
1258 1.4 matt
1259 1.37.4.1 rmind gtmpsc_common_putc(>mpsc_cn_softc, c);
1260 1.1 matt }
1261 1.1 matt
1262 1.37.4.1 rmind STATIC void
1263 1.1 matt gtmpsccnpollc(dev_t dev, int on)
1264 1.1 matt {
1265 1.1 matt }
1266 1.1 matt
1267 1.37.4.1 rmind STATIC void
1268 1.37.4.1 rmind gtmpsccnhalt(dev_t dev)
1269 1.3 matt {
1270 1.37.4.1 rmind gtmpsc_softc_t *sc = >mpsc_cn_softc;
1271 1.37.4.1 rmind uint32_t csr;
1272 1.3 matt
1273 1.37.4.1 rmind /*
1274 1.37.4.1 rmind * flush TX buffers
1275 1.37.4.1 rmind */
1276 1.37.4.1 rmind gtmpsc_txflush(sc);
1277 1.4 matt
1278 1.37.4.1 rmind /*
1279 1.37.4.1 rmind * stop MPSC unit RX
1280 1.37.4.1 rmind */
1281 1.37.4.1 rmind csr = GT_MPSC_READ(sc, GTMPSC_CHRN(2));
1282 1.37.4.1 rmind csr &= ~GTMPSC_CHR2_EH;
1283 1.37.4.1 rmind csr |= GTMPSC_CHR2_RXABORT;
1284 1.37.4.1 rmind GT_MPSC_WRITE(sc, GTMPSC_CHRN(2), csr);
1285 1.3 matt
1286 1.37.4.1 rmind DELAY(GTMPSC_RESET_DELAY);
1287 1.1 matt
1288 1.37.4.1 rmind /*
1289 1.37.4.1 rmind * abort SDMA RX for MPSC unit
1290 1.37.4.1 rmind */
1291 1.37.4.1 rmind GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_AR);
1292 1.1 matt }
1293 1.1 matt
1294 1.37.4.1 rmind int
1295 1.37.4.1 rmind gtmpsccnattach(bus_space_tag_t iot, bus_dma_tag_t dmat, bus_addr_t base,
1296 1.37.4.1 rmind int unit, int brg, int speed, tcflag_t tcflag)
1297 1.1 matt {
1298 1.37.4.1 rmind struct gtmpsc_softc *sc = >mpsc_cn_softc;
1299 1.37.4.1 rmind int i, res;
1300 1.37.4.1 rmind const unsigned char cp[] = "\r\nMPSC Lives!\r\n";
1301 1.37.4.1 rmind
1302 1.37.4.1 rmind res = gtmpsc_hackinit(sc, iot, dmat, base, unit, brg, speed, tcflag);
1303 1.37.4.1 rmind if (res != 0)
1304 1.37.4.1 rmind return res;
1305 1.1 matt
1306 1.37.4.1 rmind gtmpscinit_stop(sc);
1307 1.37.4.1 rmind gtmpscinit_start(sc);
1308 1.1 matt
1309 1.37.4.1 rmind /*
1310 1.37.4.1 rmind * enable SDMA receive
1311 1.37.4.1 rmind */
1312 1.37.4.1 rmind GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_ERD);
1313 1.1 matt
1314 1.37.4.1 rmind for (i = 0; i < sizeof(cp); i++) {
1315 1.37.4.1 rmind if (*(cp + i) == 0)
1316 1.1 matt break;
1317 1.37.4.1 rmind gtmpsc_common_putc(sc, *(cp + i));
1318 1.1 matt }
1319 1.1 matt
1320 1.37.4.1 rmind cn_tab = >mpsc_consdev;
1321 1.37.4.1 rmind cn_init_magic(>mpsc_cnm_state);
1322 1.1 matt
1323 1.37.4.1 rmind return 0;
1324 1.1 matt }
1325 1.1 matt
1326 1.1 matt /*
1327 1.37.4.1 rmind * gtmpsc_hackinit - hacks required to supprt GTMPSC console
1328 1.1 matt */
1329 1.37.4.1 rmind STATIC int
1330 1.37.4.1 rmind gtmpsc_hackinit(struct gtmpsc_softc *sc, bus_space_tag_t iot,
1331 1.37.4.1 rmind bus_dma_tag_t dmat, bus_addr_t base, int unit, int brg,
1332 1.37.4.1 rmind int baudrate, tcflag_t tcflag)
1333 1.1 matt {
1334 1.37.4.1 rmind gtmpsc_poll_sdma_t *cn_dmapage =
1335 1.37.4.1 rmind (gtmpsc_poll_sdma_t *)gtmpsc_cn_dmapage;
1336 1.37.4.1 rmind int error;
1337 1.1 matt
1338 1.37.4.1 rmind DPRINTF(("hackinit\n"));
1339 1.1 matt
1340 1.37.4.1 rmind memset(sc, 0, sizeof(struct gtmpsc_softc));
1341 1.37.4.1 rmind error = bus_space_map(iot, base + GTMPSC_BASE(unit), GTMPSC_SIZE, 0,
1342 1.37.4.1 rmind &sc->sc_mpsch);
1343 1.37.4.1 rmind if (error != 0)
1344 1.37.4.1 rmind goto fail0;
1345 1.37.4.1 rmind
1346 1.37.4.1 rmind error = bus_space_map(iot, base + GTSDMA_BASE(unit), GTSDMA_SIZE, 0,
1347 1.37.4.1 rmind &sc->sc_sdmah);
1348 1.37.4.1 rmind if (error != 0)
1349 1.37.4.1 rmind goto fail1;
1350 1.37.4.1 rmind error = bus_dmamap_create(dmat, sizeof(gtmpsc_polltx_t), 1,
1351 1.37.4.1 rmind sizeof(gtmpsc_polltx_t), 0, BUS_DMA_NOWAIT, &sc->sc_txdma_map);
1352 1.37.4.1 rmind if (error != 0)
1353 1.37.4.1 rmind goto fail2;
1354 1.37.4.1 rmind error = bus_dmamap_load(dmat, sc->sc_txdma_map, cn_dmapage->tx,
1355 1.37.4.1 rmind sizeof(gtmpsc_polltx_t), NULL,
1356 1.37.4.1 rmind BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
1357 1.37.4.1 rmind if (error != 0)
1358 1.37.4.1 rmind goto fail3;
1359 1.37.4.1 rmind error = bus_dmamap_create(dmat, sizeof(gtmpsc_pollrx_t), 1,
1360 1.37.4.1 rmind sizeof(gtmpsc_pollrx_t), 0, BUS_DMA_NOWAIT,
1361 1.37.4.1 rmind &sc->sc_rxdma_map);
1362 1.37.4.1 rmind if (error != 0)
1363 1.37.4.1 rmind goto fail4;
1364 1.37.4.1 rmind error = bus_dmamap_load(dmat, sc->sc_rxdma_map, cn_dmapage->rx,
1365 1.37.4.1 rmind sizeof(gtmpsc_pollrx_t), NULL,
1366 1.37.4.1 rmind BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
1367 1.37.4.1 rmind if (error != 0)
1368 1.37.4.1 rmind goto fail5;
1369 1.37.4.1 rmind
1370 1.37.4.1 rmind sc->sc_iot = iot;
1371 1.37.4.1 rmind sc->sc_dmat = dmat;
1372 1.37.4.1 rmind sc->sc_poll_sdmapage = cn_dmapage;
1373 1.37.4.1 rmind sc->sc_brg = brg;
1374 1.37.4.1 rmind sc->sc_baudrate = baudrate;
1375 1.37.4.1 rmind sc->sc_cflag = tcflag;
1376 1.1 matt
1377 1.37.4.1 rmind gtmpsc_txdesc_init(sc);
1378 1.37.4.1 rmind gtmpsc_rxdesc_init(sc);
1379 1.1 matt
1380 1.37.4.1 rmind return 0;
1381 1.1 matt
1382 1.37.4.1 rmind fail5:
1383 1.37.4.1 rmind bus_dmamap_destroy(dmat, sc->sc_rxdma_map);
1384 1.37.4.1 rmind fail4:
1385 1.37.4.1 rmind bus_dmamap_unload(dmat, sc->sc_txdma_map);
1386 1.37.4.1 rmind fail3:
1387 1.37.4.1 rmind bus_dmamap_destroy(dmat, sc->sc_txdma_map);
1388 1.37.4.1 rmind fail2:
1389 1.37.4.1 rmind bus_space_unmap(iot, sc->sc_sdmah, GTSDMA_SIZE);
1390 1.37.4.1 rmind fail1:
1391 1.37.4.1 rmind bus_space_unmap(iot, sc->sc_mpsch, GTMPSC_SIZE);
1392 1.37.4.1 rmind fail0:
1393 1.37.4.1 rmind return error;
1394 1.1 matt }
1395 1.37.4.1 rmind #endif /* MPSC_CONSOLE */
1396 1.1 matt
1397 1.1 matt #ifdef KGDB
1398 1.1 matt STATIC int
1399 1.32 dsl gtmpsc_kgdb_getc(void *arg)
1400 1.1 matt {
1401 1.37.4.1 rmind struct gtmpsc_softc *sc = (struct gtmpsc_softc *)arg;
1402 1.1 matt
1403 1.37.4.1 rmind return gtmpsc_common_getc(sc);
1404 1.1 matt }
1405 1.1 matt
1406 1.1 matt STATIC void
1407 1.32 dsl gtmpsc_kgdb_putc(void *arg, int c)
1408 1.1 matt {
1409 1.37.4.1 rmind struct gtmpsc_softc *sc = (struct gtmpsc_softc *)arg;
1410 1.1 matt
1411 1.37.4.1 rmind return gtmpsc_common_putc(sc, c);
1412 1.1 matt }
1413 1.37.4.1 rmind #endif /* KGDB */
1414 1.1 matt
1415 1.37.4.1 rmind #if defined(MPSC_CONSOLE) || defined(KGDB)
1416 1.37.4.1 rmind /*
1417 1.37.4.1 rmind * gtmpsc_common_getc - polled console read
1418 1.37.4.1 rmind *
1419 1.37.4.1 rmind * We copy data from the DMA buffers into a buffer in the softc
1420 1.37.4.1 rmind * to reduce descriptor ownership turnaround time
1421 1.37.4.1 rmind * MPSC can crater if it wraps descriptor rings,
1422 1.37.4.1 rmind * which is asynchronous and throttled only by line speed.
1423 1.37.4.1 rmind */
1424 1.37.4.1 rmind STATIC int
1425 1.37.4.1 rmind gtmpsc_common_getc(struct gtmpsc_softc *sc)
1426 1.1 matt {
1427 1.37.4.1 rmind gtmpsc_pollrx_t *vrxp;
1428 1.37.4.1 rmind uint32_t csr;
1429 1.37.4.1 rmind int ix, ch, wdog_interval = 0;
1430 1.1 matt
1431 1.37.4.1 rmind if (!cold)
1432 1.37.4.1 rmind mutex_spin_enter(&sc->sc_lock);
1433 1.1 matt
1434 1.37.4.1 rmind ix = sc->sc_rcvdrx;
1435 1.37.4.1 rmind vrxp = &sc->sc_poll_sdmapage->rx[ix];
1436 1.37.4.1 rmind while (sc->sc_rcvcnt == 0) {
1437 1.37.4.1 rmind /* Wait receive */
1438 1.37.4.1 rmind bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1439 1.37.4.1 rmind ix * sizeof(gtmpsc_pollrx_t),
1440 1.37.4.1 rmind sizeof(sdma_desc_t),
1441 1.37.4.1 rmind BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1442 1.37.4.1 rmind csr = vrxp->rxdesc.sdma_csr;
1443 1.37.4.1 rmind if (csr & SDMA_CSR_RX_OWN) {
1444 1.37.4.1 rmind GT_MPSC_WRITE(sc, GTMPSC_CHRN(2),
1445 1.37.4.1 rmind GTMPSC_CHR2_EH | GTMPSC_CHR2_CRD);
1446 1.37.4.1 rmind if (wdog_interval++ % 32)
1447 1.37.4.1 rmind gt_watchdog_service();
1448 1.37.4.1 rmind bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1449 1.37.4.1 rmind ix * sizeof(gtmpsc_pollrx_t),
1450 1.37.4.1 rmind sizeof(sdma_desc_t),
1451 1.37.4.1 rmind BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1452 1.37.4.1 rmind DELAY(50);
1453 1.37.4.1 rmind continue;
1454 1.37.4.1 rmind }
1455 1.37.4.1 rmind if (csr & SDMA_CSR_RX_ES)
1456 1.37.4.1 rmind aprint_error_dev(sc->sc_dev,
1457 1.37.4.1 rmind "RX error, rxdesc csr 0x%x\n", csr);
1458 1.1 matt
1459 1.37.4.1 rmind bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1460 1.37.4.1 rmind ix * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t),
1461 1.37.4.1 rmind sizeof(vrxp->rxbuf),
1462 1.37.4.1 rmind BUS_DMASYNC_POSTREAD);
1463 1.37.4.1 rmind
1464 1.37.4.1 rmind vrxp->rxdesc.sdma_cnt &= SDMA_RX_CNT_BCNT_MASK;
1465 1.37.4.1 rmind sc->sc_rcvcnt = vrxp->rxdesc.sdma_cnt;
1466 1.37.4.1 rmind sc->sc_roffset = 0;
1467 1.37.4.1 rmind sc->sc_rcvdrx = (ix + 1) % GTMPSC_NRXDESC;
1468 1.37.4.1 rmind
1469 1.37.4.1 rmind if (sc->sc_rcvcnt == 0) {
1470 1.37.4.1 rmind /* cleanup this descriptor, and return to DMA */
1471 1.37.4.1 rmind CLEANUP_AND_RETURN_RXDMA(sc, sc->sc_rcvrx);
1472 1.37.4.1 rmind sc->sc_rcvrx = sc->sc_rcvdrx;
1473 1.37.4.1 rmind }
1474 1.1 matt
1475 1.37.4.1 rmind ix = sc->sc_rcvdrx;
1476 1.37.4.1 rmind vrxp = &sc->sc_poll_sdmapage->rx[ix];
1477 1.37.4.1 rmind }
1478 1.37.4.1 rmind ch = vrxp->rxbuf[sc->sc_roffset++];
1479 1.37.4.1 rmind sc->sc_rcvcnt--;
1480 1.1 matt
1481 1.37.4.1 rmind if (sc->sc_roffset == vrxp->rxdesc.sdma_cnt) {
1482 1.37.4.1 rmind /* cleanup this descriptor, and return to DMA */
1483 1.37.4.1 rmind CLEANUP_AND_RETURN_RXDMA(sc, ix);
1484 1.37.4.1 rmind sc->sc_rcvrx = (ix + 1) % GTMPSC_NRXDESC;
1485 1.37.4.1 rmind }
1486 1.1 matt
1487 1.37.4.1 rmind gt_watchdog_service();
1488 1.1 matt
1489 1.37.4.1 rmind if (!cold)
1490 1.37.4.1 rmind mutex_spin_exit(&sc->sc_lock);
1491 1.37.4.1 rmind return ch;
1492 1.1 matt }
1493 1.1 matt
1494 1.37.4.1 rmind STATIC void
1495 1.37.4.1 rmind gtmpsc_common_putc(struct gtmpsc_softc *sc, int c)
1496 1.1 matt {
1497 1.37.4.1 rmind gtmpsc_polltx_t *vtxp;
1498 1.37.4.1 rmind int ix;
1499 1.37.4.1 rmind const int nc = 1;
1500 1.1 matt
1501 1.37.4.1 rmind /* Get a DMA descriptor */
1502 1.37.4.1 rmind if (!cold)
1503 1.37.4.1 rmind mutex_spin_enter(&sc->sc_lock);
1504 1.37.4.1 rmind ix = sc->sc_nexttx;
1505 1.37.4.1 rmind sc->sc_nexttx = (ix + 1) % GTMPSC_NTXDESC;
1506 1.37.4.1 rmind if (sc->sc_nexttx == sc->sc_lasttx) {
1507 1.37.4.1 rmind gtmpsc_common_putc_wait_complete(sc, sc->sc_lasttx);
1508 1.37.4.1 rmind sc->sc_lasttx = (sc->sc_lasttx + 1) % GTMPSC_NTXDESC;
1509 1.37.4.1 rmind }
1510 1.37.4.1 rmind if (!cold)
1511 1.37.4.1 rmind mutex_spin_exit(&sc->sc_lock);
1512 1.1 matt
1513 1.37.4.1 rmind vtxp = &sc->sc_poll_sdmapage->tx[ix];
1514 1.37.4.1 rmind vtxp->txbuf[0] = c;
1515 1.37.4.1 rmind bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1516 1.37.4.1 rmind ix * sizeof(gtmpsc_polltx_t) + sizeof(sdma_desc_t),
1517 1.37.4.1 rmind sizeof(vtxp->txbuf),
1518 1.37.4.1 rmind BUS_DMASYNC_PREWRITE);
1519 1.37.4.1 rmind
1520 1.37.4.1 rmind vtxp->txdesc.sdma_cnt = (nc << SDMA_TX_CNT_BCNT_SHIFT) | nc;
1521 1.37.4.1 rmind vtxp->txdesc.sdma_csr = SDMA_CSR_TX_L | SDMA_CSR_TX_F | SDMA_CSR_TX_OWN;
1522 1.37.4.1 rmind bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1523 1.37.4.1 rmind ix * sizeof(gtmpsc_polltx_t),
1524 1.37.4.1 rmind sizeof(sdma_desc_t),
1525 1.37.4.1 rmind BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1526 1.1 matt
1527 1.37.4.1 rmind if (!cold)
1528 1.37.4.1 rmind mutex_spin_enter(&sc->sc_lock);
1529 1.37.4.1 rmind /*
1530 1.37.4.1 rmind * now kick some SDMA
1531 1.37.4.1 rmind */
1532 1.37.4.1 rmind GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_TXD);
1533 1.1 matt
1534 1.37.4.1 rmind while (sc->sc_lasttx != sc->sc_nexttx) {
1535 1.37.4.1 rmind gtmpsc_common_putc_wait_complete(sc, sc->sc_lasttx);
1536 1.37.4.1 rmind sc->sc_lasttx = (sc->sc_lasttx + 1) % GTMPSC_NTXDESC;
1537 1.1 matt }
1538 1.37.4.1 rmind if (!cold)
1539 1.37.4.1 rmind mutex_spin_exit(&sc->sc_lock);
1540 1.1 matt }
1541 1.1 matt
1542 1.37.4.1 rmind /*
1543 1.37.4.1 rmind * gtmpsc_common_putc - polled console putc
1544 1.37.4.1 rmind */
1545 1.37.4.1 rmind STATIC void
1546 1.37.4.1 rmind gtmpsc_common_putc_wait_complete(struct gtmpsc_softc *sc, int ix)
1547 1.1 matt {
1548 1.37.4.1 rmind gtmpsc_polltx_t *vtxp = &sc->sc_poll_sdmapage->tx[ix];
1549 1.37.4.1 rmind uint32_t csr;
1550 1.37.4.1 rmind int wdog_interval = 0;
1551 1.37.4.1 rmind
1552 1.37.4.1 rmind bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1553 1.37.4.1 rmind ix * sizeof(gtmpsc_polltx_t),
1554 1.37.4.1 rmind sizeof(sdma_desc_t),
1555 1.37.4.1 rmind BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1556 1.37.4.1 rmind csr = vtxp->txdesc.sdma_csr;
1557 1.37.4.1 rmind while (csr & SDMA_CSR_TX_OWN) {
1558 1.37.4.1 rmind bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1559 1.37.4.1 rmind ix * sizeof(gtmpsc_polltx_t),
1560 1.37.4.1 rmind sizeof(sdma_desc_t),
1561 1.37.4.1 rmind BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1562 1.37.4.1 rmind DELAY(40);
1563 1.37.4.1 rmind if (wdog_interval++ % 32)
1564 1.37.4.1 rmind gt_watchdog_service();
1565 1.37.4.1 rmind bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1566 1.37.4.1 rmind ix * sizeof(gtmpsc_polltx_t),
1567 1.37.4.1 rmind sizeof(sdma_desc_t),
1568 1.37.4.1 rmind BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1569 1.37.4.1 rmind csr = vtxp->txdesc.sdma_csr;
1570 1.37.4.1 rmind }
1571 1.37.4.1 rmind if (csr & SDMA_CSR_TX_ES)
1572 1.37.4.1 rmind aprint_error_dev(sc->sc_dev,
1573 1.37.4.1 rmind "TX error, txdesc(%d) csr 0x%x\n", ix, csr);
1574 1.37.4.1 rmind bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1575 1.37.4.1 rmind ix * sizeof(gtmpsc_polltx_t) + sizeof(sdma_desc_t),
1576 1.37.4.1 rmind sizeof(vtxp->txbuf),
1577 1.37.4.1 rmind BUS_DMASYNC_POSTWRITE);
1578 1.1 matt }
1579 1.37.4.1 rmind #endif /* defined(MPSC_CONSOLE) || defined(KGDB) */
1580