gtmpsc.c revision 1.43.14.1 1 /* $NetBSD: gtmpsc.c,v 1.43.14.1 2014/08/20 00:03:39 tls Exp $ */
2 /*
3 * Copyright (c) 2009 KIYOHARA Takashi
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27 /*
28 * mpsc.c - Multi-Protocol Serial Controller driver, supports UART mode only
29 */
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: gtmpsc.c,v 1.43.14.1 2014/08/20 00:03:39 tls Exp $");
33
34 #include "opt_kgdb.h"
35
36 #include <sys/param.h>
37 #include <sys/bus.h>
38 #include <sys/conf.h>
39 #include <sys/device.h>
40 #include <sys/fcntl.h>
41 #include <sys/intr.h>
42 #include <sys/kauth.h>
43 #include <sys/kernel.h>
44 #include <sys/mutex.h>
45 #include <sys/proc.h>
46 #include <sys/systm.h>
47 #include <sys/timepps.h>
48 #include <sys/tty.h>
49 #ifdef KGDB
50 #include <sys/kgdb.h>
51 #endif
52
53 #include <dev/cons.h>
54
55 #include <dev/marvell/gtreg.h>
56 #include <dev/marvell/gtvar.h>
57 #include <dev/marvell/gtbrgreg.h>
58 #include <dev/marvell/gtbrgvar.h>
59 #include <dev/marvell/gtsdmareg.h>
60 #include <dev/marvell/gtsdmavar.h>
61 #include <dev/marvell/gtmpscreg.h>
62 #include <dev/marvell/gtmpscvar.h>
63 #include <dev/marvell/marvellreg.h>
64 #include <dev/marvell/marvellvar.h>
65
66 #include "gtmpsc.h"
67 #include "ioconf.h"
68 #include "locators.h"
69
70 /*
71 * Wait 2 characters time for RESET_DELAY
72 */
73 #define GTMPSC_RESET_DELAY (2*8*1000000 / GT_MPSC_DEFAULT_BAUD_RATE)
74
75
76 #if defined(DEBUG)
77 unsigned int gtmpsc_debug = 0;
78 # define STATIC
79 # define DPRINTF(x) do { if (gtmpsc_debug) printf x ; } while (0)
80 #else
81 # define STATIC static
82 # define DPRINTF(x)
83 #endif
84
85 #define GTMPSCUNIT_MASK 0x7ffff
86 #define GTMPSCDIALOUT_MASK 0x80000
87
88 #define GTMPSCUNIT(x) (minor(x) & GTMPSCUNIT_MASK)
89 #define GTMPSCDIALOUT(x) (minor(x) & GTMPSCDIALOUT_MASK)
90
91 #define CLEANUP_AND_RETURN_RXDMA(sc, ix) \
92 do { \
93 gtmpsc_pollrx_t *_vrxp = &(sc)->sc_poll_sdmapage->rx[(ix)]; \
94 \
95 _vrxp->rxdesc.sdma_csr = \
96 SDMA_CSR_RX_L | \
97 SDMA_CSR_RX_F | \
98 SDMA_CSR_RX_OWN | \
99 SDMA_CSR_RX_EI; \
100 _vrxp->rxdesc.sdma_cnt = \
101 GTMPSC_RXBUFSZ << SDMA_RX_CNT_BUFSZ_SHIFT; \
102 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_rxdma_map, \
103 (ix) * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t), \
104 sizeof(vrxp->rxbuf), \
105 BUS_DMASYNC_PREREAD); \
106 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_rxdma_map, \
107 (ix) * sizeof(gtmpsc_pollrx_t), \
108 sizeof(sdma_desc_t), \
109 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); \
110 } while (0);
111
112
113 STATIC int gtmpscmatch(device_t, cfdata_t, void *);
114 STATIC void gtmpscattach(device_t, device_t, void *);
115
116 STATIC void gtmpsc_softintr(void *);
117
118 STATIC void gtmpscstart(struct tty *);
119 STATIC int gtmpscparam(struct tty *, struct termios *);
120
121 STATIC void gtmpsc_shutdownhook(void *);
122
123 STATIC uint32_t cflag2mpcr(tcflag_t);
124 STATIC __inline void gtmpsc_intr_rx(struct gtmpsc_softc *);
125 STATIC __inline void gtmpsc_intr_tx(struct gtmpsc_softc *);
126 STATIC void gtmpsc_write(struct gtmpsc_softc *);
127 STATIC void gtmpsc_txflush(gtmpsc_softc_t *);
128 STATIC void gtmpsc_rxdesc_init(struct gtmpsc_softc *);
129 STATIC void gtmpsc_txdesc_init(struct gtmpsc_softc *);
130 STATIC void gtmpscinit_stop(struct gtmpsc_softc *);
131 STATIC void gtmpscinit_start(struct gtmpsc_softc *);
132 STATIC void gtmpscshutdown(struct gtmpsc_softc *);
133 STATIC void gtmpsc_loadchannelregs(struct gtmpsc_softc *);
134
135 #ifdef MPSC_CONSOLE
136 STATIC int gtmpsccngetc(dev_t);
137 STATIC void gtmpsccnputc(dev_t, int);
138 STATIC void gtmpsccnpollc(dev_t, int);
139 STATIC void gtmpsccnhalt(dev_t);
140
141 STATIC int gtmpsc_hackinit(struct gtmpsc_softc *, bus_space_tag_t,
142 bus_dma_tag_t, bus_addr_t, int, int, int, tcflag_t);
143 #endif
144
145 #if defined(MPSC_CONSOLE) || defined(KGDB)
146 STATIC int gtmpsc_common_getc(struct gtmpsc_softc *);
147 STATIC void gtmpsc_common_putc(struct gtmpsc_softc *, int);
148 STATIC void gtmpsc_common_putc_wait_complete(struct gtmpsc_softc *, int);
149 #endif
150
151 dev_type_open(gtmpscopen);
152 dev_type_close(gtmpscclose);
153 dev_type_read(gtmpscread);
154 dev_type_write(gtmpscwrite);
155 dev_type_ioctl(gtmpscioctl);
156 dev_type_stop(gtmpscstop);
157 dev_type_tty(gtmpsctty);
158 dev_type_poll(gtmpscpoll);
159
160 const struct cdevsw gtmpsc_cdevsw = {
161 .d_open = gtmpscopen,
162 .d_close = gtmpscclose,
163 .d_read = gtmpscread,
164 .d_write = gtmpscwrite,
165 .d_ioctl = gtmpscioctl,
166 .d_stop = gtmpscstop,
167 .d_tty = gtmpsctty,
168 .d_poll = gtmpscpoll,
169 .d_mmap = nommap,
170 .d_kqfilter = ttykqfilter,
171 .d_discard = nodiscard,
172 .d_flag = D_TTY
173 };
174
175 CFATTACH_DECL_NEW(gtmpsc, sizeof(struct gtmpsc_softc),
176 gtmpscmatch, gtmpscattach, NULL, NULL);
177
178
179 STATIC uint32_t sdma_imask; /* soft copy of SDMA IMASK reg */
180 STATIC struct cnm_state gtmpsc_cnm_state;
181
182 #ifdef KGDB
183 static int gtmpsc_kgdb_addr;
184 static int gtmpsc_kgdb_attached;
185
186 STATIC int gtmpsc_kgdb_getc(void *);
187 STATIC void gtmpsc_kgdb_putc(void *, int);
188 #endif /* KGDB */
189
190 #ifdef MPSC_CONSOLE
191 /*
192 * hacks for console initialization
193 * which happens prior to autoconfig "attach"
194 *
195 * XXX Assumes PAGE_SIZE is a constant!
196 */
197 gtmpsc_softc_t gtmpsc_cn_softc;
198 STATIC unsigned char gtmpsc_cn_dmapage[PAGE_SIZE] __aligned(PAGE_SIZE);
199
200
201 static struct consdev gtmpsc_consdev = {
202 NULL, NULL, gtmpsccngetc, gtmpsccnputc, gtmpsccnpollc,
203 NULL, gtmpsccnhalt, NULL, NODEV, CN_NORMAL
204 };
205 #endif
206
207
208 #define GT_MPSC_READ(sc, o) \
209 bus_space_read_4((sc)->sc_iot, (sc)->sc_mpsch, (o))
210 #define GT_MPSC_WRITE(sc, o, v) \
211 bus_space_write_4((sc)->sc_iot, (sc)->sc_mpsch, (o), (v))
212 #define GT_SDMA_READ(sc, o) \
213 bus_space_read_4((sc)->sc_iot, (sc)->sc_sdmah, (o))
214 #define GT_SDMA_WRITE(sc, o, v) \
215 bus_space_write_4((sc)->sc_iot, (sc)->sc_sdmah, (o), (v))
216
217
218 /* ARGSUSED */
219 STATIC int
220 gtmpscmatch(device_t parent, cfdata_t match, void *aux)
221 {
222 struct marvell_attach_args *mva = aux;
223
224 if (strcmp(mva->mva_name, match->cf_name) != 0)
225 return 0;
226 if (mva->mva_offset == MVA_OFFSET_DEFAULT)
227 return 0;
228
229 mva->mva_size = GTMPSC_SIZE;
230 return 1;
231 }
232
233 /* ARGSUSED */
234 STATIC void
235 gtmpscattach(device_t parent, device_t self, void *aux)
236 {
237 struct gtmpsc_softc *sc = device_private(self);
238 struct marvell_attach_args *mva = aux;
239 bus_dma_segment_t segs;
240 struct tty *tp;
241 int rsegs, err, unit;
242 void *kva;
243
244 aprint_naive("\n");
245 aprint_normal(": Multi-Protocol Serial Controller\n");
246
247 if (mva->mva_unit != MVA_UNIT_DEFAULT)
248 unit = mva->mva_unit;
249 else
250 unit = (mva->mva_offset == GTMPSC_BASE(0)) ? 0 : 1;
251
252 #ifdef MPSC_CONSOLE
253 if (cn_tab == >mpsc_consdev &&
254 cn_tab->cn_dev == makedev(0, unit)) {
255 gtmpsc_cn_softc.sc_dev = self;
256 memcpy(sc, >mpsc_cn_softc, sizeof(struct gtmpsc_softc));
257 sc->sc_flags = GTMPSC_CONSOLE;
258 } else
259 #endif
260 {
261 if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
262 mva->mva_offset, mva->mva_size, &sc->sc_mpsch)) {
263 aprint_error_dev(self, "Cannot map MPSC registers\n");
264 return;
265 }
266 if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
267 GTSDMA_BASE(unit), GTSDMA_SIZE, &sc->sc_sdmah)) {
268 aprint_error_dev(self, "Cannot map SDMA registers\n");
269 return;
270 }
271 sc->sc_dev = self;
272 sc->sc_unit = unit;
273 sc->sc_iot = mva->mva_iot;
274 sc->sc_dmat = mva->mva_dmat;
275
276 err = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
277 &segs, 1, &rsegs, BUS_DMA_NOWAIT);
278 if (err) {
279 aprint_error_dev(sc->sc_dev,
280 "bus_dmamem_alloc error 0x%x\n", err);
281 goto fail0;
282 }
283 err = bus_dmamem_map(sc->sc_dmat, &segs, 1, PAGE_SIZE, &kva,
284 BUS_DMA_NOWAIT);
285 if (err) {
286 aprint_error_dev(sc->sc_dev,
287 "bus_dmamem_map error 0x%x\n", err);
288 goto fail1;
289 }
290 memset(kva, 0, PAGE_SIZE); /* paranoid/superfluous */
291 sc->sc_poll_sdmapage = kva;
292
293 err = bus_dmamap_create(sc->sc_dmat, sizeof(gtmpsc_polltx_t), 1,
294 sizeof(gtmpsc_polltx_t), 0, BUS_DMA_NOWAIT,
295 &sc->sc_txdma_map);
296 if (err != 0) {
297 aprint_error_dev(sc->sc_dev,
298 "bus_dmamap_create error 0x%x\n", err);
299 goto fail2;
300 }
301 err = bus_dmamap_load(sc->sc_dmat, sc->sc_txdma_map,
302 sc->sc_poll_sdmapage->tx, sizeof(gtmpsc_polltx_t),
303 NULL, BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
304 if (err != 0) {
305 aprint_error_dev(sc->sc_dev,
306 "bus_dmamap_load tx error 0x%x\n", err);
307 goto fail3;
308 }
309 err = bus_dmamap_create(sc->sc_dmat, sizeof(gtmpsc_pollrx_t), 1,
310 sizeof(gtmpsc_pollrx_t), 0, BUS_DMA_NOWAIT,
311 &sc->sc_rxdma_map);
312 if (err != 0) {
313 aprint_error_dev(sc->sc_dev,
314 "bus_dmamap_create rx error 0x%x\n", err);
315 goto fail4;
316 }
317 err = bus_dmamap_load(sc->sc_dmat, sc->sc_rxdma_map,
318 sc->sc_poll_sdmapage->rx, sizeof(gtmpsc_pollrx_t),
319 NULL, BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
320 if (err != 0) {
321 aprint_error_dev(sc->sc_dev,
322 "bus_dmamap_load rx error 0x%x\n", err);
323 goto fail5;
324 }
325
326 sc->sc_brg = unit; /* XXXXX */
327 sc->sc_baudrate = GT_MPSC_DEFAULT_BAUD_RATE;
328 }
329 aprint_normal_dev(self, "with SDMA offset 0x%04x-0x%04x\n",
330 GTSDMA_BASE(unit), GTSDMA_BASE(unit) + GTSDMA_SIZE - 1);
331
332 sc->sc_rx_ready = 0;
333 sc->sc_tx_busy = 0;
334 sc->sc_tx_done = 0;
335 sc->sc_tx_stopped = 0;
336 sc->sc_heldchange = 0;
337
338 gtmpsc_txdesc_init(sc);
339 gtmpsc_rxdesc_init(sc);
340
341 sc->sc_tty = tp = tty_alloc();
342 tp->t_oproc = gtmpscstart;
343 tp->t_param = gtmpscparam;
344 tty_attach(tp);
345
346 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_HIGH);
347
348 /*
349 * clear any pending SDMA interrupts for this unit
350 */
351 (void) gt_sdma_icause(device_parent(sc->sc_dev),
352 SDMA_INTR_RXBUF(sc->sc_unit) |
353 SDMA_INTR_RXERR(sc->sc_unit) |
354 SDMA_INTR_TXBUF(sc->sc_unit) |
355 SDMA_INTR_TXEND(sc->sc_unit));
356
357 sc->sc_si = softint_establish(SOFTINT_SERIAL, gtmpsc_softintr, sc);
358 if (sc->sc_si == NULL)
359 panic("mpscattach: cannot softint_establish IPL_SOFTSERIAL");
360
361 shutdownhook_establish(gtmpsc_shutdownhook, sc);
362
363 gtmpscinit_stop(sc);
364 gtmpscinit_start(sc);
365
366 if (sc->sc_flags & GTMPSC_CONSOLE) {
367 int maj;
368
369 /* locate the major number */
370 maj = cdevsw_lookup_major(>mpsc_cdevsw);
371
372 tp->t_dev = cn_tab->cn_dev =
373 makedev(maj, device_unit(sc->sc_dev));
374
375 aprint_normal_dev(self, "console\n");
376 }
377
378 #ifdef KGDB
379 /*
380 * Allow kgdb to "take over" this port. If this is
381 * the kgdb device, it has exclusive use.
382 */
383 if (sc->sc_unit == gtmpsckgdbport) {
384 #ifdef MPSC_CONSOLE
385 if (sc->sc_unit == MPSC_CONSOLE) {
386 aprint_error_dev(self,
387 "(kgdb): cannot share with console\n");
388 return;
389 }
390 #endif
391
392 sc->sc_flags |= GTMPSC_KGDB;
393 aprint_normal_dev(self, "kgdb\n");
394
395 gtmpsc_txflush(sc);
396
397 kgdb_attach(gtmpsc_kgdb_getc, gtmpsc_kgdb_putc, NULL);
398 kgdb_dev = 123; /* unneeded, only to satisfy some tests */
399 gtmpsc_kgdb_attached = 1;
400 kgdb_connect(1);
401 }
402 #endif /* KGDB */
403
404 return;
405
406
407 fail5:
408 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxdma_map);
409 fail4:
410 bus_dmamap_unload(sc->sc_dmat, sc->sc_txdma_map);
411 fail3:
412 bus_dmamap_destroy(sc->sc_dmat, sc->sc_txdma_map);
413 fail2:
414 bus_dmamem_unmap(sc->sc_dmat, kva, PAGE_SIZE);
415 fail1:
416 bus_dmamem_free(sc->sc_dmat, &segs, 1);
417 fail0:
418 return;
419 }
420
421 /* ARGSUSED */
422 int
423 gtmpsc_intr(void *arg)
424 {
425 struct gt_softc *gt = (struct gt_softc *)arg;
426 struct gtmpsc_softc *sc;
427 uint32_t icause;
428 int i;
429
430 icause = gt_sdma_icause(gt->sc_dev, sdma_imask);
431
432 for (i = 0; i < GTMPSC_NCHAN; i++) {
433 sc = device_lookup_private(>mpsc_cd, i);
434 if (sc == NULL)
435 continue;
436 mutex_spin_enter(&sc->sc_lock);
437 if (icause & SDMA_INTR_RXBUF(sc->sc_unit)) {
438 gtmpsc_intr_rx(sc);
439 icause &= ~SDMA_INTR_RXBUF(sc->sc_unit);
440 }
441 if (icause & SDMA_INTR_TXBUF(sc->sc_unit)) {
442 gtmpsc_intr_tx(sc);
443 icause &= ~SDMA_INTR_TXBUF(sc->sc_unit);
444 }
445 mutex_spin_exit(&sc->sc_lock);
446 }
447
448 return 1;
449 }
450
451 STATIC void
452 gtmpsc_softintr(void *arg)
453 {
454 struct gtmpsc_softc *sc = arg;
455 struct tty *tp = sc->sc_tty;
456 gtmpsc_pollrx_t *vrxp;
457 int code;
458 u_int cc;
459 u_char *get, *end, lsr;
460 int (*rint)(int, struct tty *) = tp->t_linesw->l_rint;
461
462 if (sc->sc_rx_ready) {
463 sc->sc_rx_ready = 0;
464
465 cc = sc->sc_rcvcnt;
466
467 /* If not yet open, drop the entire buffer content here */
468 if (!ISSET(tp->t_state, TS_ISOPEN))
469 cc = 0;
470
471 vrxp = &sc->sc_poll_sdmapage->rx[sc->sc_rcvrx];
472 end = vrxp->rxbuf + vrxp->rxdesc.sdma_cnt;
473 get = vrxp->rxbuf + sc->sc_roffset;
474 while (cc > 0) {
475 code = *get;
476 lsr = vrxp->rxdesc.sdma_csr;
477
478 if (ISSET(lsr,
479 SDMA_CSR_RX_PE |
480 SDMA_CSR_RX_FR |
481 SDMA_CSR_RX_OR |
482 SDMA_CSR_RX_BR)) {
483 if (ISSET(lsr, SDMA_CSR_RX_OR))
484 ; /* XXXXX not yet... */
485 if (ISSET(lsr, SDMA_CSR_RX_BR | SDMA_CSR_RX_FR))
486 SET(code, TTY_FE);
487 if (ISSET(lsr, SDMA_CSR_RX_PE))
488 SET(code, TTY_PE);
489 }
490
491 if ((*rint)(code, tp) == -1) {
492 /*
493 * The line discipline's buffer is out of space.
494 */
495 /* XXXXX not yet... */
496 }
497 if (++get >= end) {
498 /* cleanup this descriptor, and return to DMA */
499 CLEANUP_AND_RETURN_RXDMA(sc, sc->sc_rcvrx);
500 sc->sc_rcvrx =
501 (sc->sc_rcvrx + 1) % GTMPSC_NTXDESC;
502 vrxp = &sc->sc_poll_sdmapage->rx[sc->sc_rcvrx];
503 end = vrxp->rxbuf + vrxp->rxdesc.sdma_cnt;
504 get = vrxp->rxbuf + sc->sc_roffset;
505 }
506 cc--;
507 }
508 }
509 if (sc->sc_tx_done) {
510 sc->sc_tx_done = 0;
511 CLR(tp->t_state, TS_BUSY);
512 if (ISSET(tp->t_state, TS_FLUSH))
513 CLR(tp->t_state, TS_FLUSH);
514 else
515 ndflush(&tp->t_outq, (int)(sc->sc_tba - tp->t_outq.c_cf));
516 (*tp->t_linesw->l_start)(tp);
517 }
518 }
519
520 int
521 gtmpscopen(dev_t dev, int flag, int mode, struct lwp *l)
522 {
523 struct gtmpsc_softc *sc;
524 int unit = GTMPSCUNIT(dev);
525 struct tty *tp;
526 int s;
527 int error;
528
529 sc = device_lookup_private(>mpsc_cd, unit);
530 if (!sc)
531 return ENXIO;
532 #ifdef KGDB
533 /*
534 * If this is the kgdb port, no other use is permitted.
535 */
536 if (sc->sc_flags & GTMPSC_KGDB)
537 return EBUSY;
538 #endif
539 tp = sc->sc_tty;
540 if (kauth_authorize_device_tty(l->l_cred, KAUTH_DEVICE_TTY_OPEN, tp))
541 return EBUSY;
542
543 s = spltty();
544
545 if (!ISSET(tp->t_state, TS_ISOPEN) && tp->t_wopen == 0) {
546 struct termios t;
547
548 tp->t_dev = dev;
549
550 mutex_spin_enter(&sc->sc_lock);
551
552 /* Turn on interrupts. */
553 sdma_imask |= SDMA_INTR_RXBUF(sc->sc_unit);
554 gt_sdma_imask(device_parent(sc->sc_dev), sdma_imask);
555
556 /* Clear PPS capture state on first open. */
557 mutex_spin_enter(&timecounter_lock);
558 memset(&sc->sc_pps_state, 0, sizeof(sc->sc_pps_state));
559 sc->sc_pps_state.ppscap = PPS_CAPTUREASSERT | PPS_CAPTURECLEAR;
560 pps_init(&sc->sc_pps_state);
561 mutex_spin_exit(&timecounter_lock);
562
563 mutex_spin_exit(&sc->sc_lock);
564
565 if (sc->sc_flags & GTMPSC_CONSOLE) {
566 t.c_ospeed = sc->sc_baudrate;
567 t.c_cflag = sc->sc_cflag;
568 } else {
569 t.c_ospeed = TTYDEF_SPEED;
570 t.c_cflag = TTYDEF_CFLAG;
571 }
572 t.c_ispeed = t.c_ospeed;
573
574 /* Make sure gtmpscparam() will do something. */
575 tp->t_ospeed = 0;
576 (void) gtmpscparam(tp, &t);
577 tp->t_iflag = TTYDEF_IFLAG;
578 tp->t_oflag = TTYDEF_OFLAG;
579 tp->t_lflag = TTYDEF_LFLAG;
580 ttychars(tp);
581 ttsetwater(tp);
582
583 mutex_spin_enter(&sc->sc_lock);
584
585 /* Clear the input/output ring */
586 sc->sc_rcvcnt = 0;
587 sc->sc_roffset = 0;
588 sc->sc_rcvrx = 0;
589 sc->sc_rcvdrx = 0;
590 sc->sc_nexttx = 0;
591 sc->sc_lasttx = 0;
592
593 /*
594 * enable SDMA receive
595 */
596 GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_ERD);
597
598 mutex_spin_exit(&sc->sc_lock);
599 }
600 splx(s);
601 error = ttyopen(tp, GTMPSCDIALOUT(dev), ISSET(flag, O_NONBLOCK));
602 if (error)
603 goto bad;
604
605 error = (*tp->t_linesw->l_open)(dev, tp);
606 if (error)
607 goto bad;
608
609 return 0;
610
611 bad:
612 if (!ISSET(tp->t_state, TS_ISOPEN) && tp->t_wopen == 0) {
613 /*
614 * We failed to open the device, and nobody else had it opened.
615 * Clean up the state as appropriate.
616 */
617 gtmpscshutdown(sc);
618 }
619
620 return error;
621 }
622
623 int
624 gtmpscclose(dev_t dev, int flag, int mode, struct lwp *l)
625 {
626 int unit = GTMPSCUNIT(dev);
627 struct gtmpsc_softc *sc = device_lookup_private(>mpsc_cd, unit);
628 struct tty *tp = sc->sc_tty;
629
630 if (!ISSET(tp->t_state, TS_ISOPEN))
631 return 0;
632
633 (*tp->t_linesw->l_close)(tp, flag);
634 ttyclose(tp);
635
636 if (!ISSET(tp->t_state, TS_ISOPEN) && tp->t_wopen == 0) {
637 /*
638 * Although we got a last close, the device may still be in
639 * use; e.g. if this was the dialout node, and there are still
640 * processes waiting for carrier on the non-dialout node.
641 */
642 gtmpscshutdown(sc);
643 }
644
645 return 0;
646 }
647
648 int
649 gtmpscread(dev_t dev, struct uio *uio, int flag)
650 {
651 struct gtmpsc_softc *sc =
652 device_lookup_private(>mpsc_cd, GTMPSCUNIT(dev));
653 struct tty *tp = sc->sc_tty;
654
655 return (*tp->t_linesw->l_read)(tp, uio, flag);
656 }
657
658 int
659 gtmpscwrite(dev_t dev, struct uio *uio, int flag)
660 {
661 struct gtmpsc_softc *sc =
662 device_lookup_private(>mpsc_cd, GTMPSCUNIT(dev));
663 struct tty *tp = sc->sc_tty;
664
665 return (*tp->t_linesw->l_write)(tp, uio, flag);
666 }
667
668 int
669 gtmpscioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
670 {
671 struct gtmpsc_softc *sc =
672 device_lookup_private(>mpsc_cd, GTMPSCUNIT(dev));
673 struct tty *tp = sc->sc_tty;
674 int error;
675
676 error = (*tp->t_linesw->l_ioctl)(tp, cmd, data, flag, l);
677 if (error != EPASSTHROUGH)
678 return error;
679
680 error = ttioctl(tp, cmd, data, flag, l);
681 if (error != EPASSTHROUGH)
682 return error;
683
684 error = 0;
685 switch (cmd) {
686 case TIOCSFLAGS:
687 error = kauth_authorize_device_tty(l->l_cred,
688 KAUTH_DEVICE_TTY_PRIVSET, tp);
689 if (error)
690 return error;
691 break;
692 default:
693 /* nothing */
694 break;
695 }
696
697 mutex_spin_enter(&sc->sc_lock);
698
699 switch (cmd) {
700 case PPS_IOC_CREATE:
701 case PPS_IOC_DESTROY:
702 case PPS_IOC_GETPARAMS:
703 case PPS_IOC_SETPARAMS:
704 case PPS_IOC_GETCAP:
705 case PPS_IOC_FETCH:
706 #ifdef PPS_SYNC
707 case PPS_IOC_KCBIND:
708 #endif
709 mutex_spin_enter(&timecounter_lock);
710 error = pps_ioctl(cmd, data, &sc->sc_pps_state);
711 mutex_spin_exit(&timecounter_lock);
712 break;
713
714 case TIOCDCDTIMESTAMP: /* XXX old, overloaded API used by xntpd v3 */
715 mutex_spin_enter(&timecounter_lock);
716 #ifndef PPS_TRAILING_EDGE
717 TIMESPEC_TO_TIMEVAL((struct timeval *)data,
718 &sc->sc_pps_state.ppsinfo.assert_timestamp);
719 #else
720 TIMESPEC_TO_TIMEVAL((struct timeval *)data,
721 &sc->sc_pps_state.ppsinfo.clear_timestamp);
722 #endif
723 mutex_spin_exit(&timecounter_lock);
724 break;
725
726 default:
727 error = EPASSTHROUGH;
728 break;
729 }
730
731 mutex_spin_exit(&sc->sc_lock);
732
733 return error;
734 }
735
736 void
737 gtmpscstop(struct tty *tp, int flag)
738 {
739 }
740
741 struct tty *
742 gtmpsctty(dev_t dev)
743 {
744 struct gtmpsc_softc *sc =
745 device_lookup_private(>mpsc_cd, GTMPSCUNIT(dev));
746
747 return sc->sc_tty;
748 }
749
750 int
751 gtmpscpoll(dev_t dev, int events, struct lwp *l)
752 {
753 struct gtmpsc_softc *sc =
754 device_lookup_private(>mpsc_cd, GTMPSCUNIT(dev));
755 struct tty *tp = sc->sc_tty;
756
757 return (*tp->t_linesw->l_poll)(tp, events, l);
758 }
759
760
761 STATIC void
762 gtmpscstart(struct tty *tp)
763 {
764 struct gtmpsc_softc *sc;
765 unsigned char *tba;
766 unsigned int unit;
767 int s, tbc;
768
769 unit = GTMPSCUNIT(tp->t_dev);
770 sc = device_lookup_private(>mpsc_cd, unit);
771 if (sc == NULL)
772 return;
773
774 s = spltty();
775 if (ISSET(tp->t_state, TS_TIMEOUT | TS_BUSY | TS_TTSTOP))
776 goto out;
777 if (sc->sc_tx_stopped)
778 goto out;
779 if (!ttypull(tp))
780 goto out;
781
782 /* Grab the first contiguous region of buffer space. */
783 tba = tp->t_outq.c_cf;
784 tbc = ndqb(&tp->t_outq, 0);
785
786 mutex_spin_enter(&sc->sc_lock);
787
788 sc->sc_tba = tba;
789 sc->sc_tbc = tbc;
790
791 sdma_imask |= SDMA_INTR_TXBUF(sc->sc_unit);
792 gt_sdma_imask(device_parent(sc->sc_dev), sdma_imask);
793 SET(tp->t_state, TS_BUSY);
794 sc->sc_tx_busy = 1;
795 gtmpsc_write(sc);
796
797 mutex_spin_exit(&sc->sc_lock);
798 out:
799 splx(s);
800 }
801
802 STATIC int
803 gtmpscparam(struct tty *tp, struct termios *t)
804 {
805 struct gtmpsc_softc *sc =
806 device_lookup_private(>mpsc_cd, GTMPSCUNIT(tp->t_dev));
807
808 /* Check requested parameters. */
809 if (compute_cdv(t->c_ospeed) < 0)
810 return EINVAL;
811 if (t->c_ispeed && t->c_ispeed != t->c_ospeed)
812 return EINVAL;
813
814 /*
815 * If there were no changes, don't do anything. This avoids dropping
816 * input and improves performance when all we did was frob things like
817 * VMIN and VTIME.
818 */
819 if (tp->t_ospeed == t->c_ospeed &&
820 tp->t_cflag == t->c_cflag)
821 return 0;
822
823 mutex_spin_enter(&sc->sc_lock);
824
825 /* And copy to tty. */
826 tp->t_ispeed = 0;
827 tp->t_ospeed = t->c_ospeed;
828 tp->t_cflag = t->c_cflag;
829
830 sc->sc_baudrate = t->c_ospeed;
831
832 if (!sc->sc_heldchange) {
833 if (sc->sc_tx_busy) {
834 sc->sc_heldtbc = sc->sc_tbc;
835 sc->sc_tbc = 0;
836 sc->sc_heldchange = 1;
837 } else
838 gtmpsc_loadchannelregs(sc);
839 }
840
841 mutex_spin_exit(&sc->sc_lock);
842
843 /* Fake carrier on */
844 (void) (*tp->t_linesw->l_modem)(tp, 1);
845
846 return 0;
847 }
848
849 void
850 gtmpsc_shutdownhook(void *arg)
851 {
852 gtmpsc_softc_t *sc = (gtmpsc_softc_t *)arg;
853
854 gtmpsc_txflush(sc);
855 }
856
857 /*
858 * Convert to MPCR from cflag(CS[5678] and CSTOPB).
859 */
860 STATIC uint32_t
861 cflag2mpcr(tcflag_t cflag)
862 {
863 uint32_t mpcr = 0;
864
865 switch (ISSET(cflag, CSIZE)) {
866 case CS5:
867 SET(mpcr, GTMPSC_MPCR_CL_5);
868 break;
869 case CS6:
870 SET(mpcr, GTMPSC_MPCR_CL_6);
871 break;
872 case CS7:
873 SET(mpcr, GTMPSC_MPCR_CL_7);
874 break;
875 case CS8:
876 SET(mpcr, GTMPSC_MPCR_CL_8);
877 break;
878 }
879 if (ISSET(cflag, CSTOPB))
880 SET(mpcr, GTMPSC_MPCR_SBL_2);
881
882 return mpcr;
883 }
884
885 STATIC void
886 gtmpsc_intr_rx(struct gtmpsc_softc *sc)
887 {
888 gtmpsc_pollrx_t *vrxp;
889 uint32_t csr;
890 int kick, ix;
891
892 kick = 0;
893
894 /* already handled in gtmpsc_common_getc() */
895 if (sc->sc_rcvdrx == sc->sc_rcvrx)
896 return;
897
898 ix = sc->sc_rcvdrx;
899 vrxp = &sc->sc_poll_sdmapage->rx[ix];
900 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
901 ix * sizeof(gtmpsc_pollrx_t),
902 sizeof(sdma_desc_t),
903 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
904 csr = vrxp->rxdesc.sdma_csr;
905 while (!(csr & SDMA_CSR_RX_OWN)) {
906 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
907 ix * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t),
908 sizeof(vrxp->rxbuf),
909 BUS_DMASYNC_POSTREAD);
910 vrxp->rxdesc.sdma_cnt &= SDMA_RX_CNT_BCNT_MASK;
911 if (vrxp->rxdesc.sdma_csr & SDMA_CSR_RX_BR) {
912 int cn_trapped = 0;
913
914 cn_check_magic(sc->sc_tty->t_dev,
915 CNC_BREAK, gtmpsc_cnm_state);
916 if (cn_trapped)
917 continue;
918 #if defined(KGDB) && !defined(DDB)
919 if (ISSET(sc->sc_flags, GTMPSC_KGDB)) {
920 kgdb_connect(1);
921 continue;
922 }
923 #endif
924 }
925
926 sc->sc_rcvcnt += vrxp->rxdesc.sdma_cnt;
927 kick = 1;
928
929 ix = (ix + 1) % GTMPSC_NTXDESC;
930 vrxp = &sc->sc_poll_sdmapage->rx[ix];
931 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
932 ix * sizeof(gtmpsc_pollrx_t),
933 sizeof(sdma_desc_t),
934 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
935 csr = vrxp->rxdesc.sdma_csr;
936 }
937 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
938 ix * sizeof(gtmpsc_pollrx_t),
939 sizeof(sdma_desc_t),
940 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
941
942 if (kick) {
943 sc->sc_rcvdrx = ix;
944 sc->sc_rx_ready = 1;
945 softint_schedule(sc->sc_si);
946 }
947 }
948
949 STATIC __inline void
950 gtmpsc_intr_tx(struct gtmpsc_softc *sc)
951 {
952 gtmpsc_polltx_t *vtxp;
953 uint32_t csr;
954 int ix;
955
956 /*
957 * If we've delayed a parameter change, do it now,
958 * and restart output.
959 */
960 if (sc->sc_heldchange) {
961 gtmpsc_loadchannelregs(sc);
962 sc->sc_heldchange = 0;
963 sc->sc_tbc = sc->sc_heldtbc;
964 sc->sc_heldtbc = 0;
965 }
966
967 /* Clean-up TX descriptors and buffers */
968 ix = sc->sc_lasttx;
969 while (ix != sc->sc_nexttx) {
970 vtxp = &sc->sc_poll_sdmapage->tx[ix];
971 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
972 ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
973 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
974 csr = vtxp->txdesc.sdma_csr;
975 if (csr & SDMA_CSR_TX_OWN) {
976 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
977 ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
978 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
979 break;
980 }
981 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
982 ix * sizeof(gtmpsc_polltx_t) + sizeof(sdma_desc_t),
983 sizeof(vtxp->txbuf), BUS_DMASYNC_POSTWRITE);
984 ix = (ix + 1) % GTMPSC_NTXDESC;
985 }
986 sc->sc_lasttx = ix;
987
988 /* Output the next chunk of the contiguous buffer */
989 gtmpsc_write(sc);
990 if (sc->sc_tbc == 0 && sc->sc_tx_busy) {
991 sc->sc_tx_busy = 0;
992 sc->sc_tx_done = 1;
993 softint_schedule(sc->sc_si);
994 sdma_imask &= ~SDMA_INTR_TXBUF(sc->sc_unit);
995 gt_sdma_imask(device_parent(sc->sc_dev), sdma_imask);
996 }
997 }
998
999 /*
1000 * gtmpsc_write - write a buffer into the hardware
1001 */
1002 STATIC void
1003 gtmpsc_write(struct gtmpsc_softc *sc)
1004 {
1005 gtmpsc_polltx_t *vtxp;
1006 uint32_t sdcm, ix;
1007 int kick, n;
1008
1009 kick = 0;
1010 while (sc->sc_tbc > 0 && sc->sc_nexttx != sc->sc_lasttx) {
1011 n = min(sc->sc_tbc, GTMPSC_TXBUFSZ);
1012
1013 ix = sc->sc_nexttx;
1014 sc->sc_nexttx = (ix + 1) % GTMPSC_NTXDESC;
1015
1016 vtxp = &sc->sc_poll_sdmapage->tx[ix];
1017
1018 memcpy(vtxp->txbuf, sc->sc_tba, n);
1019 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1020 ix * sizeof(gtmpsc_polltx_t) + sizeof(sdma_desc_t),
1021 sizeof(vtxp->txbuf), BUS_DMASYNC_PREWRITE);
1022
1023 vtxp->txdesc.sdma_cnt = (n << SDMA_TX_CNT_BCNT_SHIFT) | n;
1024 vtxp->txdesc.sdma_csr =
1025 SDMA_CSR_TX_L |
1026 SDMA_CSR_TX_F |
1027 SDMA_CSR_TX_EI |
1028 SDMA_CSR_TX_OWN;
1029 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1030 ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
1031 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1032
1033 sc->sc_tbc -= n;
1034 sc->sc_tba += n;
1035 kick = 1;
1036 }
1037 if (kick) {
1038 /*
1039 * now kick some SDMA
1040 */
1041 sdcm = GT_SDMA_READ(sc, SDMA_SDCM);
1042 if ((sdcm & SDMA_SDCM_TXD) == 0)
1043 GT_SDMA_WRITE(sc, SDMA_SDCM, sdcm | SDMA_SDCM_TXD);
1044 }
1045 }
1046
1047 /*
1048 * gtmpsc_txflush - wait for output to drain
1049 */
1050 STATIC void
1051 gtmpsc_txflush(gtmpsc_softc_t *sc)
1052 {
1053 gtmpsc_polltx_t *vtxp;
1054 int ix, limit = 4000000; /* 4 seconds */
1055
1056 ix = sc->sc_nexttx - 1;
1057 if (ix < 0)
1058 ix = GTMPSC_NTXDESC - 1;
1059
1060 vtxp = &sc->sc_poll_sdmapage->tx[ix];
1061 while (limit > 0) {
1062 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1063 ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
1064 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1065 if ((vtxp->txdesc.sdma_csr & SDMA_CSR_TX_OWN) == 0)
1066 break;
1067 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1068 ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
1069 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1070 DELAY(1);
1071 limit -= 1;
1072 }
1073 }
1074
1075 /*
1076 * gtmpsc_rxdesc_init - set up RX descriptor ring
1077 */
1078 STATIC void
1079 gtmpsc_rxdesc_init(struct gtmpsc_softc *sc)
1080 {
1081 gtmpsc_pollrx_t *vrxp, *prxp, *first_prxp;
1082 sdma_desc_t *dp;
1083 int i;
1084
1085 first_prxp = prxp =
1086 (gtmpsc_pollrx_t *)sc->sc_rxdma_map->dm_segs->ds_addr;
1087 vrxp = sc->sc_poll_sdmapage->rx;
1088 for (i = 0; i < GTMPSC_NRXDESC; i++) {
1089 dp = &vrxp->rxdesc;
1090 dp->sdma_csr =
1091 SDMA_CSR_RX_L|SDMA_CSR_RX_F|SDMA_CSR_RX_OWN|SDMA_CSR_RX_EI;
1092 dp->sdma_cnt = GTMPSC_RXBUFSZ << SDMA_RX_CNT_BUFSZ_SHIFT;
1093 dp->sdma_bufp = (uint32_t)&prxp->rxbuf;
1094 vrxp++;
1095 prxp++;
1096 dp->sdma_next = (uint32_t)&prxp->rxdesc;
1097
1098 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1099 i * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t),
1100 sizeof(vrxp->rxbuf), BUS_DMASYNC_PREREAD);
1101 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1102 i * sizeof(gtmpsc_pollrx_t), sizeof(sdma_desc_t),
1103 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1104 }
1105 dp = &vrxp->rxdesc;
1106 dp->sdma_csr =
1107 SDMA_CSR_RX_L | SDMA_CSR_RX_F | SDMA_CSR_RX_OWN | SDMA_CSR_RX_EI;
1108 dp->sdma_cnt = GTMPSC_RXBUFSZ << SDMA_RX_CNT_BUFSZ_SHIFT;
1109 dp->sdma_bufp = (uint32_t)&prxp->rxbuf;
1110 dp->sdma_next = (uint32_t)&first_prxp->rxdesc;
1111
1112 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1113 i * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t),
1114 sizeof(vrxp->rxbuf), BUS_DMASYNC_PREREAD);
1115 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1116 i * sizeof(gtmpsc_pollrx_t), sizeof(sdma_desc_t),
1117 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1118
1119 sc->sc_rcvcnt = 0;
1120 sc->sc_roffset = 0;
1121 sc->sc_rcvrx = 0;
1122 sc->sc_rcvdrx = 0;
1123 }
1124
1125 /*
1126 * gtmpsc_txdesc_init - set up TX descriptor ring
1127 */
1128 STATIC void
1129 gtmpsc_txdesc_init(struct gtmpsc_softc *sc)
1130 {
1131 gtmpsc_polltx_t *vtxp, *ptxp, *first_ptxp;
1132 sdma_desc_t *dp;
1133 int i;
1134
1135 first_ptxp = ptxp =
1136 (gtmpsc_polltx_t *)sc->sc_txdma_map->dm_segs->ds_addr;
1137 vtxp = sc->sc_poll_sdmapage->tx;
1138 for (i = 0; i < GTMPSC_NTXDESC; i++) {
1139 dp = &vtxp->txdesc;
1140 dp->sdma_csr = 0;
1141 dp->sdma_cnt = 0;
1142 dp->sdma_bufp = (uint32_t)&ptxp->txbuf;
1143 vtxp++;
1144 ptxp++;
1145 dp->sdma_next = (uint32_t)&ptxp->txdesc;
1146 }
1147 dp = &vtxp->txdesc;
1148 dp->sdma_csr = 0;
1149 dp->sdma_cnt = 0;
1150 dp->sdma_bufp = (uint32_t)&ptxp->txbuf;
1151 dp->sdma_next = (uint32_t)&first_ptxp->txdesc;
1152
1153 sc->sc_nexttx = 0;
1154 sc->sc_lasttx = 0;
1155 }
1156
1157 STATIC void
1158 gtmpscinit_stop(struct gtmpsc_softc *sc)
1159 {
1160 uint32_t csr;
1161 int timo = 10000; /* XXXX */
1162
1163 /* Abort MPSC Rx (aborting Tx messes things up) */
1164 GT_MPSC_WRITE(sc, GTMPSC_CHRN(2), GTMPSC_CHR2_RXABORT);
1165
1166 /* abort SDMA RX and stop TX for MPSC unit */
1167 GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_AR | SDMA_SDCM_STD);
1168
1169 /* poll for SDMA RX abort completion */
1170 for (; timo > 0; timo--) {
1171 csr = GT_SDMA_READ(sc, SDMA_SDCM);
1172 if (!(csr & (SDMA_SDCM_AR | SDMA_SDCM_AT)))
1173 break;
1174 DELAY(50);
1175 }
1176 }
1177
1178 STATIC void
1179 gtmpscinit_start(struct gtmpsc_softc *sc)
1180 {
1181
1182 /*
1183 * Set pointers of current/first descriptor of TX to SDMA register.
1184 */
1185 GT_SDMA_WRITE(sc, SDMA_SCTDP, sc->sc_txdma_map->dm_segs->ds_addr);
1186 GT_SDMA_WRITE(sc, SDMA_SFTDP, sc->sc_txdma_map->dm_segs->ds_addr);
1187
1188 /*
1189 * Set pointer of current descriptor of TX to SDMA register.
1190 */
1191 GT_SDMA_WRITE(sc, SDMA_SCRDP, sc->sc_rxdma_map->dm_segs->ds_addr);
1192
1193 /*
1194 * initialize SDMA unit Configuration Register
1195 */
1196 GT_SDMA_WRITE(sc, SDMA_SDC,
1197 SDMA_SDC_BSZ_8x64 | SDMA_SDC_SFM|SDMA_SDC_RFT);
1198
1199 gtmpsc_loadchannelregs(sc);
1200
1201 /*
1202 * set MPSC LO and HI port config registers for GTMPSC unit
1203 */
1204 GT_MPSC_WRITE(sc, GTMPSC_MMCR_LO,
1205 GTMPSC_MMCR_LO_MODE_UART |
1206 GTMPSC_MMCR_LO_ET |
1207 GTMPSC_MMCR_LO_ER |
1208 GTMPSC_MMCR_LO_NLM);
1209 GT_MPSC_WRITE(sc, GTMPSC_MMCR_HI,
1210 GTMPSC_MMCR_HI_TCDV_DEFAULT |
1211 GTMPSC_MMCR_HI_RDW |
1212 GTMPSC_MMCR_HI_RCDV_DEFAULT);
1213
1214 /*
1215 * tell MPSC receive the Enter Hunt
1216 */
1217 GT_MPSC_WRITE(sc, GTMPSC_CHRN(2), GTMPSC_CHR2_EH);
1218 }
1219
1220 STATIC void
1221 gtmpscshutdown(struct gtmpsc_softc *sc)
1222 {
1223 struct tty *tp;
1224
1225 #ifdef KGDB
1226 if (sc->sc_flags & GTMPSCF_KGDB != 0)
1227 return;
1228 #endif
1229 tp = sc->sc_tty;
1230 mutex_spin_enter(&sc->sc_lock);
1231 /* Fake carrier off */
1232 (void) (*tp->t_linesw->l_modem)(tp, 0);
1233 sdma_imask &= ~SDMA_INTR_RXBUF(sc->sc_unit);
1234 gt_sdma_imask(device_parent(sc->sc_dev), sdma_imask);
1235 mutex_spin_exit(&sc->sc_lock);
1236 }
1237
1238 STATIC void
1239 gtmpsc_loadchannelregs(struct gtmpsc_softc *sc)
1240 {
1241
1242 if (sc->sc_dev != NULL)
1243 gt_brg_bcr(device_parent(sc->sc_dev), sc->sc_brg,
1244 GT_MPSC_CLOCK_SOURCE | compute_cdv(sc->sc_baudrate));
1245 GT_MPSC_WRITE(sc, GTMPSC_CHRN(3), GTMPSC_MAXIDLE(sc->sc_baudrate));
1246
1247 /*
1248 * set MPSC Protocol configuration register for GTMPSC unit
1249 */
1250 GT_MPSC_WRITE(sc, GTMPSC_MPCR, cflag2mpcr(sc->sc_cflag));
1251 }
1252
1253
1254 #ifdef MPSC_CONSOLE
1255 /*
1256 * Following are all routines needed for MPSC to act as console
1257 */
1258 STATIC int
1259 gtmpsccngetc(dev_t dev)
1260 {
1261
1262 return gtmpsc_common_getc(>mpsc_cn_softc);
1263 }
1264
1265 STATIC void
1266 gtmpsccnputc(dev_t dev, int c)
1267 {
1268
1269 gtmpsc_common_putc(>mpsc_cn_softc, c);
1270 }
1271
1272 STATIC void
1273 gtmpsccnpollc(dev_t dev, int on)
1274 {
1275 }
1276
1277 STATIC void
1278 gtmpsccnhalt(dev_t dev)
1279 {
1280 gtmpsc_softc_t *sc = >mpsc_cn_softc;
1281 uint32_t csr;
1282
1283 /*
1284 * flush TX buffers
1285 */
1286 gtmpsc_txflush(sc);
1287
1288 /*
1289 * stop MPSC unit RX
1290 */
1291 csr = GT_MPSC_READ(sc, GTMPSC_CHRN(2));
1292 csr &= ~GTMPSC_CHR2_EH;
1293 csr |= GTMPSC_CHR2_RXABORT;
1294 GT_MPSC_WRITE(sc, GTMPSC_CHRN(2), csr);
1295
1296 DELAY(GTMPSC_RESET_DELAY);
1297
1298 /*
1299 * abort SDMA RX for MPSC unit
1300 */
1301 GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_AR);
1302 }
1303
1304 int
1305 gtmpsccnattach(bus_space_tag_t iot, bus_dma_tag_t dmat, bus_addr_t base,
1306 int unit, int brg, int speed, tcflag_t tcflag)
1307 {
1308 struct gtmpsc_softc *sc = >mpsc_cn_softc;
1309 int i, res;
1310 const unsigned char cp[] = "\r\nMPSC Lives!\r\n";
1311
1312 res = gtmpsc_hackinit(sc, iot, dmat, base, unit, brg, speed, tcflag);
1313 if (res != 0)
1314 return res;
1315
1316 gtmpscinit_stop(sc);
1317 gtmpscinit_start(sc);
1318
1319 /*
1320 * enable SDMA receive
1321 */
1322 GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_ERD);
1323
1324 for (i = 0; i < sizeof(cp); i++) {
1325 if (*(cp + i) == 0)
1326 break;
1327 gtmpsc_common_putc(sc, *(cp + i));
1328 }
1329
1330 cn_tab = >mpsc_consdev;
1331 cn_init_magic(>mpsc_cnm_state);
1332
1333 return 0;
1334 }
1335
1336 /*
1337 * gtmpsc_hackinit - hacks required to supprt GTMPSC console
1338 */
1339 STATIC int
1340 gtmpsc_hackinit(struct gtmpsc_softc *sc, bus_space_tag_t iot,
1341 bus_dma_tag_t dmat, bus_addr_t base, int unit, int brg,
1342 int baudrate, tcflag_t tcflag)
1343 {
1344 gtmpsc_poll_sdma_t *cn_dmapage =
1345 (gtmpsc_poll_sdma_t *)gtmpsc_cn_dmapage;
1346 int error;
1347
1348 DPRINTF(("hackinit\n"));
1349
1350 memset(sc, 0, sizeof(struct gtmpsc_softc));
1351 error = bus_space_map(iot, base + GTMPSC_BASE(unit), GTMPSC_SIZE, 0,
1352 &sc->sc_mpsch);
1353 if (error != 0)
1354 goto fail0;
1355
1356 error = bus_space_map(iot, base + GTSDMA_BASE(unit), GTSDMA_SIZE, 0,
1357 &sc->sc_sdmah);
1358 if (error != 0)
1359 goto fail1;
1360 error = bus_dmamap_create(dmat, sizeof(gtmpsc_polltx_t), 1,
1361 sizeof(gtmpsc_polltx_t), 0, BUS_DMA_NOWAIT, &sc->sc_txdma_map);
1362 if (error != 0)
1363 goto fail2;
1364 error = bus_dmamap_load(dmat, sc->sc_txdma_map, cn_dmapage->tx,
1365 sizeof(gtmpsc_polltx_t), NULL,
1366 BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
1367 if (error != 0)
1368 goto fail3;
1369 error = bus_dmamap_create(dmat, sizeof(gtmpsc_pollrx_t), 1,
1370 sizeof(gtmpsc_pollrx_t), 0, BUS_DMA_NOWAIT,
1371 &sc->sc_rxdma_map);
1372 if (error != 0)
1373 goto fail4;
1374 error = bus_dmamap_load(dmat, sc->sc_rxdma_map, cn_dmapage->rx,
1375 sizeof(gtmpsc_pollrx_t), NULL,
1376 BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
1377 if (error != 0)
1378 goto fail5;
1379
1380 sc->sc_iot = iot;
1381 sc->sc_dmat = dmat;
1382 sc->sc_poll_sdmapage = cn_dmapage;
1383 sc->sc_brg = brg;
1384 sc->sc_baudrate = baudrate;
1385 sc->sc_cflag = tcflag;
1386
1387 gtmpsc_txdesc_init(sc);
1388 gtmpsc_rxdesc_init(sc);
1389
1390 return 0;
1391
1392 fail5:
1393 bus_dmamap_destroy(dmat, sc->sc_rxdma_map);
1394 fail4:
1395 bus_dmamap_unload(dmat, sc->sc_txdma_map);
1396 fail3:
1397 bus_dmamap_destroy(dmat, sc->sc_txdma_map);
1398 fail2:
1399 bus_space_unmap(iot, sc->sc_sdmah, GTSDMA_SIZE);
1400 fail1:
1401 bus_space_unmap(iot, sc->sc_mpsch, GTMPSC_SIZE);
1402 fail0:
1403 return error;
1404 }
1405 #endif /* MPSC_CONSOLE */
1406
1407 #ifdef KGDB
1408 STATIC int
1409 gtmpsc_kgdb_getc(void *arg)
1410 {
1411 struct gtmpsc_softc *sc = (struct gtmpsc_softc *)arg;
1412
1413 return gtmpsc_common_getc(sc);
1414 }
1415
1416 STATIC void
1417 gtmpsc_kgdb_putc(void *arg, int c)
1418 {
1419 struct gtmpsc_softc *sc = (struct gtmpsc_softc *)arg;
1420
1421 return gtmpsc_common_putc(sc, c);
1422 }
1423 #endif /* KGDB */
1424
1425 #if defined(MPSC_CONSOLE) || defined(KGDB)
1426 /*
1427 * gtmpsc_common_getc - polled console read
1428 *
1429 * We copy data from the DMA buffers into a buffer in the softc
1430 * to reduce descriptor ownership turnaround time
1431 * MPSC can crater if it wraps descriptor rings,
1432 * which is asynchronous and throttled only by line speed.
1433 */
1434 STATIC int
1435 gtmpsc_common_getc(struct gtmpsc_softc *sc)
1436 {
1437 gtmpsc_pollrx_t *vrxp;
1438 uint32_t csr;
1439 int ix, ch, wdog_interval = 0;
1440
1441 if (!cold)
1442 mutex_spin_enter(&sc->sc_lock);
1443
1444 ix = sc->sc_rcvdrx;
1445 vrxp = &sc->sc_poll_sdmapage->rx[ix];
1446 while (sc->sc_rcvcnt == 0) {
1447 /* Wait receive */
1448 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1449 ix * sizeof(gtmpsc_pollrx_t),
1450 sizeof(sdma_desc_t),
1451 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1452 csr = vrxp->rxdesc.sdma_csr;
1453 if (csr & SDMA_CSR_RX_OWN) {
1454 GT_MPSC_WRITE(sc, GTMPSC_CHRN(2),
1455 GTMPSC_CHR2_EH | GTMPSC_CHR2_CRD);
1456 if (wdog_interval++ % 32)
1457 gt_watchdog_service();
1458 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1459 ix * sizeof(gtmpsc_pollrx_t),
1460 sizeof(sdma_desc_t),
1461 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1462 DELAY(50);
1463 continue;
1464 }
1465 if (csr & SDMA_CSR_RX_ES)
1466 aprint_error_dev(sc->sc_dev,
1467 "RX error, rxdesc csr 0x%x\n", csr);
1468
1469 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1470 ix * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t),
1471 sizeof(vrxp->rxbuf),
1472 BUS_DMASYNC_POSTREAD);
1473
1474 vrxp->rxdesc.sdma_cnt &= SDMA_RX_CNT_BCNT_MASK;
1475 sc->sc_rcvcnt = vrxp->rxdesc.sdma_cnt;
1476 sc->sc_roffset = 0;
1477 sc->sc_rcvdrx = (ix + 1) % GTMPSC_NRXDESC;
1478
1479 if (sc->sc_rcvcnt == 0) {
1480 /* cleanup this descriptor, and return to DMA */
1481 CLEANUP_AND_RETURN_RXDMA(sc, sc->sc_rcvrx);
1482 sc->sc_rcvrx = sc->sc_rcvdrx;
1483 }
1484
1485 ix = sc->sc_rcvdrx;
1486 vrxp = &sc->sc_poll_sdmapage->rx[ix];
1487 }
1488 ch = vrxp->rxbuf[sc->sc_roffset++];
1489 sc->sc_rcvcnt--;
1490
1491 if (sc->sc_roffset == vrxp->rxdesc.sdma_cnt) {
1492 /* cleanup this descriptor, and return to DMA */
1493 CLEANUP_AND_RETURN_RXDMA(sc, ix);
1494 sc->sc_rcvrx = (ix + 1) % GTMPSC_NRXDESC;
1495 }
1496
1497 gt_watchdog_service();
1498
1499 if (!cold)
1500 mutex_spin_exit(&sc->sc_lock);
1501 return ch;
1502 }
1503
1504 STATIC void
1505 gtmpsc_common_putc(struct gtmpsc_softc *sc, int c)
1506 {
1507 gtmpsc_polltx_t *vtxp;
1508 int ix;
1509 const int nc = 1;
1510
1511 /* Get a DMA descriptor */
1512 if (!cold)
1513 mutex_spin_enter(&sc->sc_lock);
1514 ix = sc->sc_nexttx;
1515 sc->sc_nexttx = (ix + 1) % GTMPSC_NTXDESC;
1516 if (sc->sc_nexttx == sc->sc_lasttx) {
1517 gtmpsc_common_putc_wait_complete(sc, sc->sc_lasttx);
1518 sc->sc_lasttx = (sc->sc_lasttx + 1) % GTMPSC_NTXDESC;
1519 }
1520 if (!cold)
1521 mutex_spin_exit(&sc->sc_lock);
1522
1523 vtxp = &sc->sc_poll_sdmapage->tx[ix];
1524 vtxp->txbuf[0] = c;
1525 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1526 ix * sizeof(gtmpsc_polltx_t) + sizeof(sdma_desc_t),
1527 sizeof(vtxp->txbuf),
1528 BUS_DMASYNC_PREWRITE);
1529
1530 vtxp->txdesc.sdma_cnt = (nc << SDMA_TX_CNT_BCNT_SHIFT) | nc;
1531 vtxp->txdesc.sdma_csr = SDMA_CSR_TX_L | SDMA_CSR_TX_F | SDMA_CSR_TX_OWN;
1532 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1533 ix * sizeof(gtmpsc_polltx_t),
1534 sizeof(sdma_desc_t),
1535 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1536
1537 if (!cold)
1538 mutex_spin_enter(&sc->sc_lock);
1539 /*
1540 * now kick some SDMA
1541 */
1542 GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_TXD);
1543
1544 while (sc->sc_lasttx != sc->sc_nexttx) {
1545 gtmpsc_common_putc_wait_complete(sc, sc->sc_lasttx);
1546 sc->sc_lasttx = (sc->sc_lasttx + 1) % GTMPSC_NTXDESC;
1547 }
1548 if (!cold)
1549 mutex_spin_exit(&sc->sc_lock);
1550 }
1551
1552 /*
1553 * gtmpsc_common_putc - polled console putc
1554 */
1555 STATIC void
1556 gtmpsc_common_putc_wait_complete(struct gtmpsc_softc *sc, int ix)
1557 {
1558 gtmpsc_polltx_t *vtxp = &sc->sc_poll_sdmapage->tx[ix];
1559 uint32_t csr;
1560 int wdog_interval = 0;
1561
1562 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1563 ix * sizeof(gtmpsc_polltx_t),
1564 sizeof(sdma_desc_t),
1565 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1566 csr = vtxp->txdesc.sdma_csr;
1567 while (csr & SDMA_CSR_TX_OWN) {
1568 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1569 ix * sizeof(gtmpsc_polltx_t),
1570 sizeof(sdma_desc_t),
1571 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1572 DELAY(40);
1573 if (wdog_interval++ % 32)
1574 gt_watchdog_service();
1575 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1576 ix * sizeof(gtmpsc_polltx_t),
1577 sizeof(sdma_desc_t),
1578 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1579 csr = vtxp->txdesc.sdma_csr;
1580 }
1581 if (csr & SDMA_CSR_TX_ES)
1582 aprint_error_dev(sc->sc_dev,
1583 "TX error, txdesc(%d) csr 0x%x\n", ix, csr);
1584 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1585 ix * sizeof(gtmpsc_polltx_t) + sizeof(sdma_desc_t),
1586 sizeof(vtxp->txbuf),
1587 BUS_DMASYNC_POSTWRITE);
1588 }
1589 #endif /* defined(MPSC_CONSOLE) || defined(KGDB) */
1590