gtmpsc.c revision 1.42 1 /* $NetBSD: gtmpsc.c,v 1.42 2010/11/14 03:49:53 uebayasi Exp $ */
2 /*
3 * Copyright (c) 2009 KIYOHARA Takashi
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27 /*
28 * mpsc.c - Multi-Protocol Serial Controller driver, supports UART mode only
29 */
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: gtmpsc.c,v 1.42 2010/11/14 03:49:53 uebayasi Exp $");
33
34 #include "opt_kgdb.h"
35
36 #include <sys/param.h>
37 #include <sys/bus.h>
38 #include <sys/conf.h>
39 #include <sys/device.h>
40 #include <sys/fcntl.h>
41 #include <sys/intr.h>
42 #include <sys/kauth.h>
43 #include <sys/kernel.h>
44 #include <sys/mutex.h>
45 #include <sys/proc.h>
46 #include <sys/systm.h>
47 #include <sys/timepps.h>
48 #include <sys/tty.h>
49 #ifdef KGDB
50 #include <sys/kgdb.h>
51 #endif
52
53 #include <dev/cons.h>
54
55 #include <dev/marvell/gtreg.h>
56 #include <dev/marvell/gtvar.h>
57 #include <dev/marvell/gtbrgreg.h>
58 #include <dev/marvell/gtbrgvar.h>
59 #include <dev/marvell/gtsdmareg.h>
60 #include <dev/marvell/gtsdmavar.h>
61 #include <dev/marvell/gtmpscreg.h>
62 #include <dev/marvell/gtmpscvar.h>
63 #include <dev/marvell/marvellreg.h>
64 #include <dev/marvell/marvellvar.h>
65
66 #include "gtmpsc.h"
67 #include "ioconf.h"
68 #include "locators.h"
69
70 /*
71 * Wait 2 characters time for RESET_DELAY
72 */
73 #define GTMPSC_RESET_DELAY (2*8*1000000 / GT_MPSC_DEFAULT_BAUD_RATE)
74
75
76 #if defined(DEBUG)
77 unsigned int gtmpsc_debug = 0;
78 # define STATIC
79 # define DPRINTF(x) do { if (gtmpsc_debug) printf x ; } while (0)
80 #else
81 # define STATIC static
82 # define DPRINTF(x)
83 #endif
84
85 #define GTMPSCUNIT_MASK 0x7ffff
86 #define GTMPSCDIALOUT_MASK 0x80000
87
88 #define GTMPSCUNIT(x) (minor(x) & GTMPSCUNIT_MASK)
89 #define GTMPSCDIALOUT(x) (minor(x) & GTMPSCDIALOUT_MASK)
90
91 #define CLEANUP_AND_RETURN_RXDMA(sc, ix) \
92 do { \
93 gtmpsc_pollrx_t *_vrxp = &(sc)->sc_poll_sdmapage->rx[(ix)]; \
94 \
95 _vrxp->rxdesc.sdma_csr = \
96 SDMA_CSR_RX_L | \
97 SDMA_CSR_RX_F | \
98 SDMA_CSR_RX_OWN | \
99 SDMA_CSR_RX_EI; \
100 _vrxp->rxdesc.sdma_cnt = \
101 GTMPSC_RXBUFSZ << SDMA_RX_CNT_BUFSZ_SHIFT; \
102 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_rxdma_map, \
103 (ix) * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t), \
104 sizeof(vrxp->rxbuf), \
105 BUS_DMASYNC_PREREAD); \
106 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_rxdma_map, \
107 (ix) * sizeof(gtmpsc_pollrx_t), \
108 sizeof(sdma_desc_t), \
109 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); \
110 } while (0);
111
112
113 STATIC int gtmpscmatch(device_t, cfdata_t, void *);
114 STATIC void gtmpscattach(device_t, device_t, void *);
115
116 STATIC void gtmpsc_softintr(void *);
117
118 STATIC void gtmpscstart(struct tty *);
119 STATIC int gtmpscparam(struct tty *, struct termios *);
120
121 STATIC void gtmpsc_shutdownhook(void *);
122
123 STATIC uint32_t cflag2mpcr(tcflag_t);
124 STATIC __inline void gtmpsc_intr_rx(struct gtmpsc_softc *);
125 STATIC __inline void gtmpsc_intr_tx(struct gtmpsc_softc *);
126 STATIC void gtmpsc_write(struct gtmpsc_softc *);
127 STATIC void gtmpsc_txflush(gtmpsc_softc_t *);
128 STATIC void gtmpsc_rxdesc_init(struct gtmpsc_softc *);
129 STATIC void gtmpsc_txdesc_init(struct gtmpsc_softc *);
130 STATIC void gtmpscinit_stop(struct gtmpsc_softc *);
131 STATIC void gtmpscinit_start(struct gtmpsc_softc *);
132 STATIC void gtmpscshutdown(struct gtmpsc_softc *);
133 STATIC void gtmpsc_loadchannelregs(struct gtmpsc_softc *);
134
135 #ifdef MPSC_CONSOLE
136 STATIC int gtmpsccngetc(dev_t);
137 STATIC void gtmpsccnputc(dev_t, int);
138 STATIC void gtmpsccnpollc(dev_t, int);
139 STATIC void gtmpsccnhalt(dev_t);
140
141 STATIC int gtmpsc_hackinit(struct gtmpsc_softc *, bus_space_tag_t,
142 bus_dma_tag_t, bus_addr_t, int, int, int, tcflag_t);
143 #endif
144
145 #if defined(MPSC_CONSOLE) || defined(KGDB)
146 STATIC int gtmpsc_common_getc(struct gtmpsc_softc *);
147 STATIC void gtmpsc_common_putc(struct gtmpsc_softc *, int);
148 STATIC void gtmpsc_common_putc_wait_complete(struct gtmpsc_softc *, int);
149 #endif
150
151 dev_type_open(gtmpscopen);
152 dev_type_close(gtmpscclose);
153 dev_type_read(gtmpscread);
154 dev_type_write(gtmpscwrite);
155 dev_type_ioctl(gtmpscioctl);
156 dev_type_stop(gtmpscstop);
157 dev_type_tty(gtmpsctty);
158 dev_type_poll(gtmpscpoll);
159
160 const struct cdevsw gtmpsc_cdevsw = {
161 gtmpscopen, gtmpscclose, gtmpscread, gtmpscwrite, gtmpscioctl,
162 gtmpscstop, gtmpsctty, gtmpscpoll, nommap, ttykqfilter, D_TTY
163 };
164
165 CFATTACH_DECL_NEW(gtmpsc, sizeof(struct gtmpsc_softc),
166 gtmpscmatch, gtmpscattach, NULL, NULL);
167
168
169 STATIC uint32_t sdma_imask; /* soft copy of SDMA IMASK reg */
170 STATIC struct cnm_state gtmpsc_cnm_state;
171
172 #ifdef KGDB
173 static int gtmpsc_kgdb_addr;
174 static int gtmpsc_kgdb_attached;
175
176 STATIC int gtmpsc_kgdb_getc(void *);
177 STATIC void gtmpsc_kgdb_putc(void *, int);
178 #endif /* KGDB */
179
180 #ifdef MPSC_CONSOLE
181 /*
182 * hacks for console initialization
183 * which happens prior to autoconfig "attach"
184 *
185 * XXX Assumes PAGE_SIZE is a constant!
186 */
187 gtmpsc_softc_t gtmpsc_cn_softc;
188 STATIC unsigned char gtmpsc_cn_dmapage[PAGE_SIZE] __aligned(PAGE_SIZE);
189
190
191 static struct consdev gtmpsc_consdev = {
192 NULL, NULL, gtmpsccngetc, gtmpsccnputc, gtmpsccnpollc,
193 NULL, gtmpsccnhalt, NULL, NODEV, CN_NORMAL
194 };
195 #endif
196
197
198 #define GT_MPSC_READ(sc, o) \
199 bus_space_read_4((sc)->sc_iot, (sc)->sc_mpsch, (o))
200 #define GT_MPSC_WRITE(sc, o, v) \
201 bus_space_write_4((sc)->sc_iot, (sc)->sc_mpsch, (o), (v))
202 #define GT_SDMA_READ(sc, o) \
203 bus_space_read_4((sc)->sc_iot, (sc)->sc_sdmah, (o))
204 #define GT_SDMA_WRITE(sc, o, v) \
205 bus_space_write_4((sc)->sc_iot, (sc)->sc_sdmah, (o), (v))
206
207
208 /* ARGSUSED */
209 STATIC int
210 gtmpscmatch(device_t parent, cfdata_t match, void *aux)
211 {
212 struct marvell_attach_args *mva = aux;
213
214 if (strcmp(mva->mva_name, match->cf_name) != 0)
215 return 0;
216 if (mva->mva_offset == MVA_OFFSET_DEFAULT)
217 return 0;
218
219 mva->mva_size = GTMPSC_SIZE;
220 return 1;
221 }
222
223 /* ARGSUSED */
224 STATIC void
225 gtmpscattach(device_t parent, device_t self, void *aux)
226 {
227 struct gtmpsc_softc *sc = device_private(self);
228 struct marvell_attach_args *mva = aux;
229 bus_dma_segment_t segs;
230 struct tty *tp;
231 int rsegs, err, unit;
232 void *kva;
233
234 aprint_naive("\n");
235 aprint_normal(": Multi-Protocol Serial Controller\n");
236
237 if (mva->mva_unit != MVA_UNIT_DEFAULT)
238 unit = mva->mva_unit;
239 else
240 unit = (mva->mva_offset == GTMPSC_BASE(0)) ? 0 : 1;
241
242 #ifdef MPSC_CONSOLE
243 if (cn_tab == >mpsc_consdev &&
244 cn_tab->cn_dev == makedev(0, unit)) {
245 gtmpsc_cn_softc.sc_dev = self;
246 memcpy(sc, >mpsc_cn_softc, sizeof(struct gtmpsc_softc));
247 sc->sc_flags = GTMPSC_CONSOLE;
248 } else
249 #endif
250 {
251 if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
252 mva->mva_offset, mva->mva_size, &sc->sc_mpsch)) {
253 aprint_error_dev(self, "Cannot map MPSC registers\n");
254 return;
255 }
256 if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
257 GTSDMA_BASE(unit), GTSDMA_SIZE, &sc->sc_sdmah)) {
258 aprint_error_dev(self, "Cannot map SDMA registers\n");
259 return;
260 }
261 sc->sc_dev = self;
262 sc->sc_unit = unit;
263 sc->sc_iot = mva->mva_iot;
264 sc->sc_dmat = mva->mva_dmat;
265
266 err = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
267 &segs, 1, &rsegs, BUS_DMA_NOWAIT);
268 if (err) {
269 aprint_error_dev(sc->sc_dev,
270 "bus_dmamem_alloc error 0x%x\n", err);
271 goto fail0;
272 }
273 err = bus_dmamem_map(sc->sc_dmat, &segs, 1, PAGE_SIZE, &kva,
274 BUS_DMA_NOWAIT);
275 if (err) {
276 aprint_error_dev(sc->sc_dev,
277 "bus_dmamem_map error 0x%x\n", err);
278 goto fail1;
279 }
280 memset(kva, 0, PAGE_SIZE); /* paranoid/superfluous */
281 sc->sc_poll_sdmapage = kva;
282
283 err = bus_dmamap_create(sc->sc_dmat, sizeof(gtmpsc_polltx_t), 1,
284 sizeof(gtmpsc_polltx_t), 0, BUS_DMA_NOWAIT,
285 &sc->sc_txdma_map);
286 if (err != 0) {
287 aprint_error_dev(sc->sc_dev,
288 "bus_dmamap_create error 0x%x\n", err);
289 goto fail2;
290 }
291 err = bus_dmamap_load(sc->sc_dmat, sc->sc_txdma_map,
292 sc->sc_poll_sdmapage->tx, sizeof(gtmpsc_polltx_t),
293 NULL, BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
294 if (err != 0) {
295 aprint_error_dev(sc->sc_dev,
296 "bus_dmamap_load tx error 0x%x\n", err);
297 goto fail3;
298 }
299 err = bus_dmamap_create(sc->sc_dmat, sizeof(gtmpsc_pollrx_t), 1,
300 sizeof(gtmpsc_pollrx_t), 0, BUS_DMA_NOWAIT,
301 &sc->sc_rxdma_map);
302 if (err != 0) {
303 aprint_error_dev(sc->sc_dev,
304 "bus_dmamap_create rx error 0x%x\n", err);
305 goto fail4;
306 }
307 err = bus_dmamap_load(sc->sc_dmat, sc->sc_rxdma_map,
308 sc->sc_poll_sdmapage->rx, sizeof(gtmpsc_pollrx_t),
309 NULL, BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
310 if (err != 0) {
311 aprint_error_dev(sc->sc_dev,
312 "bus_dmamap_load rx error 0x%x\n", err);
313 goto fail5;
314 }
315
316 sc->sc_brg = unit; /* XXXXX */
317 sc->sc_baudrate = GT_MPSC_DEFAULT_BAUD_RATE;
318 }
319 aprint_normal_dev(self, "with SDMA offset 0x%04x-0x%04x\n",
320 GTSDMA_BASE(unit), GTSDMA_BASE(unit) + GTSDMA_SIZE - 1);
321
322 sc->sc_rx_ready = 0;
323 sc->sc_tx_busy = 0;
324 sc->sc_tx_done = 0;
325 sc->sc_tx_stopped = 0;
326 sc->sc_heldchange = 0;
327
328 gtmpsc_txdesc_init(sc);
329 gtmpsc_rxdesc_init(sc);
330
331 sc->sc_tty = tp = ttymalloc();
332 tp->t_oproc = gtmpscstart;
333 tp->t_param = gtmpscparam;
334 tty_attach(tp);
335
336 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_HIGH);
337
338 /*
339 * clear any pending SDMA interrupts for this unit
340 */
341 (void) gt_sdma_icause(device_parent(sc->sc_dev),
342 SDMA_INTR_RXBUF(sc->sc_unit) |
343 SDMA_INTR_RXERR(sc->sc_unit) |
344 SDMA_INTR_TXBUF(sc->sc_unit) |
345 SDMA_INTR_TXEND(sc->sc_unit));
346
347 sc->sc_si = softint_establish(SOFTINT_SERIAL, gtmpsc_softintr, sc);
348 if (sc->sc_si == NULL)
349 panic("mpscattach: cannot softint_establish IPL_SOFTSERIAL");
350
351 shutdownhook_establish(gtmpsc_shutdownhook, sc);
352
353 gtmpscinit_stop(sc);
354 gtmpscinit_start(sc);
355
356 if (sc->sc_flags & GTMPSC_CONSOLE) {
357 int maj;
358
359 /* locate the major number */
360 maj = cdevsw_lookup_major(>mpsc_cdevsw);
361
362 tp->t_dev = cn_tab->cn_dev =
363 makedev(maj, device_unit(sc->sc_dev));
364
365 aprint_normal_dev(self, "console\n");
366 }
367
368 #ifdef KGDB
369 /*
370 * Allow kgdb to "take over" this port. If this is
371 * the kgdb device, it has exclusive use.
372 */
373 if (sc->sc_unit == gtmpsckgdbport) {
374 #ifdef MPSC_CONSOLE
375 if (sc->sc_unit == MPSC_CONSOLE) {
376 aprint_error_dev(self,
377 "(kgdb): cannot share with console\n");
378 return;
379 }
380 #endif
381
382 sc->sc_flags |= GTMPSC_KGDB;
383 aprint_normal_dev(self, "kgdb\n");
384
385 gtmpsc_txflush(sc);
386
387 kgdb_attach(gtmpsc_kgdb_getc, gtmpsc_kgdb_putc, NULL);
388 kgdb_dev = 123; /* unneeded, only to satisfy some tests */
389 gtmpsc_kgdb_attached = 1;
390 kgdb_connect(1);
391 }
392 #endif /* KGDB */
393
394 return;
395
396
397 fail5:
398 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxdma_map);
399 fail4:
400 bus_dmamap_unload(sc->sc_dmat, sc->sc_txdma_map);
401 fail3:
402 bus_dmamap_destroy(sc->sc_dmat, sc->sc_txdma_map);
403 fail2:
404 bus_dmamem_unmap(sc->sc_dmat, kva, PAGE_SIZE);
405 fail1:
406 bus_dmamem_free(sc->sc_dmat, &segs, 1);
407 fail0:
408 return;
409 }
410
411 /* ARGSUSED */
412 int
413 gtmpsc_intr(void *arg)
414 {
415 struct gt_softc *gt = (struct gt_softc *)arg;
416 struct gtmpsc_softc *sc;
417 uint32_t icause;
418 int i;
419
420 icause = gt_sdma_icause(gt->sc_dev, sdma_imask);
421
422 for (i = 0; i < GTMPSC_NCHAN; i++) {
423 sc = device_lookup_private(>mpsc_cd, i);
424 if (sc == NULL)
425 continue;
426 mutex_spin_enter(&sc->sc_lock);
427 if (icause & SDMA_INTR_RXBUF(sc->sc_unit)) {
428 gtmpsc_intr_rx(sc);
429 icause &= ~SDMA_INTR_RXBUF(sc->sc_unit);
430 }
431 if (icause & SDMA_INTR_TXBUF(sc->sc_unit)) {
432 gtmpsc_intr_tx(sc);
433 icause &= ~SDMA_INTR_TXBUF(sc->sc_unit);
434 }
435 mutex_spin_exit(&sc->sc_lock);
436 }
437
438 return 1;
439 }
440
441 STATIC void
442 gtmpsc_softintr(void *arg)
443 {
444 struct gtmpsc_softc *sc = arg;
445 struct tty *tp = sc->sc_tty;
446 gtmpsc_pollrx_t *vrxp;
447 int code;
448 u_int cc;
449 u_char *get, *end, lsr;
450 int (*rint)(int, struct tty *) = tp->t_linesw->l_rint;
451
452 if (sc->sc_rx_ready) {
453 sc->sc_rx_ready = 0;
454
455 cc = sc->sc_rcvcnt;
456
457 /* If not yet open, drop the entire buffer content here */
458 if (!ISSET(tp->t_state, TS_ISOPEN))
459 cc = 0;
460
461 vrxp = &sc->sc_poll_sdmapage->rx[sc->sc_rcvrx];
462 end = vrxp->rxbuf + vrxp->rxdesc.sdma_cnt;
463 get = vrxp->rxbuf + sc->sc_roffset;
464 while (cc > 0) {
465 code = *get;
466 lsr = vrxp->rxdesc.sdma_csr;
467
468 if (ISSET(lsr,
469 SDMA_CSR_RX_PE |
470 SDMA_CSR_RX_FR |
471 SDMA_CSR_RX_OR |
472 SDMA_CSR_RX_BR)) {
473 if (ISSET(lsr, SDMA_CSR_RX_OR))
474 ; /* XXXXX not yet... */
475 if (ISSET(lsr, SDMA_CSR_RX_BR | SDMA_CSR_RX_FR))
476 SET(code, TTY_FE);
477 if (ISSET(lsr, SDMA_CSR_RX_PE))
478 SET(code, TTY_PE);
479 }
480
481 if ((*rint)(code, tp) == -1) {
482 /*
483 * The line discipline's buffer is out of space.
484 */
485 /* XXXXX not yet... */
486 }
487 if (++get >= end) {
488 /* cleanup this descriptor, and return to DMA */
489 CLEANUP_AND_RETURN_RXDMA(sc, sc->sc_rcvrx);
490 sc->sc_rcvrx =
491 (sc->sc_rcvrx + 1) % GTMPSC_NTXDESC;
492 vrxp = &sc->sc_poll_sdmapage->rx[sc->sc_rcvrx];
493 end = vrxp->rxbuf + vrxp->rxdesc.sdma_cnt;
494 get = vrxp->rxbuf + sc->sc_roffset;
495 }
496 cc--;
497 }
498 }
499 if (sc->sc_tx_done) {
500 sc->sc_tx_done = 0;
501 CLR(tp->t_state, TS_BUSY);
502 if (ISSET(tp->t_state, TS_FLUSH))
503 CLR(tp->t_state, TS_FLUSH);
504 else
505 ndflush(&tp->t_outq, (int)(sc->sc_tba - tp->t_outq.c_cf));
506 (*tp->t_linesw->l_start)(tp);
507 }
508 }
509
510 int
511 gtmpscopen(dev_t dev, int flag, int mode, struct lwp *l)
512 {
513 struct gtmpsc_softc *sc;
514 int unit = GTMPSCUNIT(dev);
515 struct tty *tp;
516 int s;
517 int error;
518
519 sc = device_lookup_private(>mpsc_cd, unit);
520 if (!sc)
521 return ENXIO;
522 #ifdef KGDB
523 /*
524 * If this is the kgdb port, no other use is permitted.
525 */
526 if (sc->sc_flags & GTMPSC_KGDB)
527 return EBUSY;
528 #endif
529 tp = sc->sc_tty;
530 if (kauth_authorize_device_tty(l->l_cred, KAUTH_DEVICE_TTY_OPEN, tp))
531 return EBUSY;
532
533 s = spltty();
534
535 if (!ISSET(tp->t_state, TS_ISOPEN) && tp->t_wopen == 0) {
536 struct termios t;
537
538 tp->t_dev = dev;
539
540 mutex_spin_enter(&sc->sc_lock);
541
542 /* Turn on interrupts. */
543 sdma_imask |= SDMA_INTR_RXBUF(sc->sc_unit);
544 gt_sdma_imask(device_parent(sc->sc_dev), sdma_imask);
545
546 /* Clear PPS capture state on first open. */
547 mutex_spin_enter(&timecounter_lock);
548 memset(&sc->sc_pps_state, 0, sizeof(sc->sc_pps_state));
549 sc->sc_pps_state.ppscap = PPS_CAPTUREASSERT | PPS_CAPTURECLEAR;
550 pps_init(&sc->sc_pps_state);
551 mutex_spin_exit(&timecounter_lock);
552
553 mutex_spin_exit(&sc->sc_lock);
554
555 if (sc->sc_flags & GTMPSC_CONSOLE) {
556 t.c_ospeed = sc->sc_baudrate;
557 t.c_cflag = sc->sc_cflag;
558 } else {
559 t.c_ospeed = TTYDEF_SPEED;
560 t.c_cflag = TTYDEF_CFLAG;
561 }
562 t.c_ispeed = t.c_ospeed;
563
564 /* Make sure gtmpscparam() will do something. */
565 tp->t_ospeed = 0;
566 (void) gtmpscparam(tp, &t);
567 tp->t_iflag = TTYDEF_IFLAG;
568 tp->t_oflag = TTYDEF_OFLAG;
569 tp->t_lflag = TTYDEF_LFLAG;
570 ttychars(tp);
571 ttsetwater(tp);
572
573 mutex_spin_enter(&sc->sc_lock);
574
575 /* Clear the input/output ring */
576 sc->sc_rcvcnt = 0;
577 sc->sc_roffset = 0;
578 sc->sc_rcvrx = 0;
579 sc->sc_rcvdrx = 0;
580 sc->sc_nexttx = 0;
581 sc->sc_lasttx = 0;
582
583 /*
584 * enable SDMA receive
585 */
586 GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_ERD);
587
588 mutex_spin_exit(&sc->sc_lock);
589 }
590 splx(s);
591 error = ttyopen(tp, GTMPSCDIALOUT(dev), ISSET(flag, O_NONBLOCK));
592 if (error)
593 goto bad;
594
595 error = (*tp->t_linesw->l_open)(dev, tp);
596 if (error)
597 goto bad;
598
599 return 0;
600
601 bad:
602 if (!ISSET(tp->t_state, TS_ISOPEN) && tp->t_wopen == 0) {
603 /*
604 * We failed to open the device, and nobody else had it opened.
605 * Clean up the state as appropriate.
606 */
607 gtmpscshutdown(sc);
608 }
609
610 return error;
611 }
612
613 int
614 gtmpscclose(dev_t dev, int flag, int mode, struct lwp *l)
615 {
616 int unit = GTMPSCUNIT(dev);
617 struct gtmpsc_softc *sc = device_lookup_private(>mpsc_cd, unit);
618 struct tty *tp = sc->sc_tty;
619
620 if (!ISSET(tp->t_state, TS_ISOPEN))
621 return 0;
622
623 (*tp->t_linesw->l_close)(tp, flag);
624 ttyclose(tp);
625
626 if (!ISSET(tp->t_state, TS_ISOPEN) && tp->t_wopen == 0) {
627 /*
628 * Although we got a last close, the device may still be in
629 * use; e.g. if this was the dialout node, and there are still
630 * processes waiting for carrier on the non-dialout node.
631 */
632 gtmpscshutdown(sc);
633 }
634
635 return 0;
636 }
637
638 int
639 gtmpscread(dev_t dev, struct uio *uio, int flag)
640 {
641 struct gtmpsc_softc *sc =
642 device_lookup_private(>mpsc_cd, GTMPSCUNIT(dev));
643 struct tty *tp = sc->sc_tty;
644
645 return (*tp->t_linesw->l_read)(tp, uio, flag);
646 }
647
648 int
649 gtmpscwrite(dev_t dev, struct uio *uio, int flag)
650 {
651 struct gtmpsc_softc *sc =
652 device_lookup_private(>mpsc_cd, GTMPSCUNIT(dev));
653 struct tty *tp = sc->sc_tty;
654
655 return (*tp->t_linesw->l_write)(tp, uio, flag);
656 }
657
658 int
659 gtmpscioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
660 {
661 struct gtmpsc_softc *sc =
662 device_lookup_private(>mpsc_cd, GTMPSCUNIT(dev));
663 struct tty *tp = sc->sc_tty;
664 int error;
665
666 error = (*tp->t_linesw->l_ioctl)(tp, cmd, data, flag, l);
667 if (error != EPASSTHROUGH)
668 return error;
669
670 error = ttioctl(tp, cmd, data, flag, l);
671 if (error != EPASSTHROUGH)
672 return error;
673
674 error = 0;
675 switch (cmd) {
676 case TIOCSFLAGS:
677 error = kauth_authorize_device_tty(l->l_cred,
678 KAUTH_DEVICE_TTY_PRIVSET, tp);
679 if (error)
680 return error;
681 break;
682 default:
683 /* nothing */
684 break;
685 }
686
687 mutex_spin_enter(&sc->sc_lock);
688
689 switch (cmd) {
690 case PPS_IOC_CREATE:
691 case PPS_IOC_DESTROY:
692 case PPS_IOC_GETPARAMS:
693 case PPS_IOC_SETPARAMS:
694 case PPS_IOC_GETCAP:
695 case PPS_IOC_FETCH:
696 #ifdef PPS_SYNC
697 case PPS_IOC_KCBIND:
698 #endif
699 mutex_spin_enter(&timecounter_lock);
700 error = pps_ioctl(cmd, data, &sc->sc_pps_state);
701 mutex_spin_exit(&timecounter_lock);
702 break;
703
704 case TIOCDCDTIMESTAMP: /* XXX old, overloaded API used by xntpd v3 */
705 mutex_spin_enter(&timecounter_lock);
706 #ifndef PPS_TRAILING_EDGE
707 TIMESPEC_TO_TIMEVAL((struct timeval *)data,
708 &sc->sc_pps_state.ppsinfo.assert_timestamp);
709 #else
710 TIMESPEC_TO_TIMEVAL((struct timeval *)data,
711 &sc->sc_pps_state.ppsinfo.clear_timestamp);
712 #endif
713 mutex_spin_exit(&timecounter_lock);
714 break;
715
716 default:
717 error = EPASSTHROUGH;
718 break;
719 }
720
721 mutex_spin_exit(&sc->sc_lock);
722
723 return error;
724 }
725
726 void
727 gtmpscstop(struct tty *tp, int flag)
728 {
729 }
730
731 struct tty *
732 gtmpsctty(dev_t dev)
733 {
734 struct gtmpsc_softc *sc =
735 device_lookup_private(>mpsc_cd, GTMPSCUNIT(dev));
736
737 return sc->sc_tty;
738 }
739
740 int
741 gtmpscpoll(dev_t dev, int events, struct lwp *l)
742 {
743 struct gtmpsc_softc *sc =
744 device_lookup_private(>mpsc_cd, GTMPSCUNIT(dev));
745 struct tty *tp = sc->sc_tty;
746
747 return (*tp->t_linesw->l_poll)(tp, events, l);
748 }
749
750
751 STATIC void
752 gtmpscstart(struct tty *tp)
753 {
754 struct gtmpsc_softc *sc;
755 unsigned char *tba;
756 unsigned int unit;
757 int s, tbc;
758
759 unit = GTMPSCUNIT(tp->t_dev);
760 sc = device_lookup_private(>mpsc_cd, unit);
761 if (sc == NULL)
762 return;
763
764 s = spltty();
765 if (ISSET(tp->t_state, TS_TIMEOUT | TS_BUSY | TS_TTSTOP))
766 goto out;
767 if (sc->sc_tx_stopped)
768 goto out;
769 if (!ttypull(tp))
770 goto out;
771
772 /* Grab the first contiguous region of buffer space. */
773 tba = tp->t_outq.c_cf;
774 tbc = ndqb(&tp->t_outq, 0);
775
776 mutex_spin_enter(&sc->sc_lock);
777
778 sc->sc_tba = tba;
779 sc->sc_tbc = tbc;
780
781 sdma_imask |= SDMA_INTR_TXBUF(sc->sc_unit);
782 gt_sdma_imask(device_parent(sc->sc_dev), sdma_imask);
783 SET(tp->t_state, TS_BUSY);
784 sc->sc_tx_busy = 1;
785 gtmpsc_write(sc);
786
787 mutex_spin_exit(&sc->sc_lock);
788 out:
789 splx(s);
790 }
791
792 STATIC int
793 gtmpscparam(struct tty *tp, struct termios *t)
794 {
795 struct gtmpsc_softc *sc =
796 device_lookup_private(>mpsc_cd, GTMPSCUNIT(tp->t_dev));
797
798 /* Check requested parameters. */
799 if (compute_cdv(t->c_ospeed) < 0)
800 return EINVAL;
801 if (t->c_ispeed && t->c_ispeed != t->c_ospeed)
802 return EINVAL;
803
804 /*
805 * If there were no changes, don't do anything. This avoids dropping
806 * input and improves performance when all we did was frob things like
807 * VMIN and VTIME.
808 */
809 if (tp->t_ospeed == t->c_ospeed &&
810 tp->t_cflag == t->c_cflag)
811 return 0;
812
813 mutex_spin_enter(&sc->sc_lock);
814
815 /* And copy to tty. */
816 tp->t_ispeed = 0;
817 tp->t_ospeed = t->c_ospeed;
818 tp->t_cflag = t->c_cflag;
819
820 sc->sc_baudrate = t->c_ospeed;
821
822 if (!sc->sc_heldchange) {
823 if (sc->sc_tx_busy) {
824 sc->sc_heldtbc = sc->sc_tbc;
825 sc->sc_tbc = 0;
826 sc->sc_heldchange = 1;
827 } else
828 gtmpsc_loadchannelregs(sc);
829 }
830
831 mutex_spin_exit(&sc->sc_lock);
832
833 /* Fake carrier on */
834 (void) (*tp->t_linesw->l_modem)(tp, 1);
835
836 return 0;
837 }
838
839 void
840 gtmpsc_shutdownhook(void *arg)
841 {
842 gtmpsc_softc_t *sc = (gtmpsc_softc_t *)arg;
843
844 gtmpsc_txflush(sc);
845 }
846
847 /*
848 * Convert to MPCR from cflag(CS[5678] and CSTOPB).
849 */
850 STATIC uint32_t
851 cflag2mpcr(tcflag_t cflag)
852 {
853 uint32_t mpcr = 0;
854
855 switch (ISSET(cflag, CSIZE)) {
856 case CS5:
857 SET(mpcr, GTMPSC_MPCR_CL_5);
858 break;
859 case CS6:
860 SET(mpcr, GTMPSC_MPCR_CL_6);
861 break;
862 case CS7:
863 SET(mpcr, GTMPSC_MPCR_CL_7);
864 break;
865 case CS8:
866 SET(mpcr, GTMPSC_MPCR_CL_8);
867 break;
868 }
869 if (ISSET(cflag, CSTOPB))
870 SET(mpcr, GTMPSC_MPCR_SBL_2);
871
872 return mpcr;
873 }
874
875 STATIC void
876 gtmpsc_intr_rx(struct gtmpsc_softc *sc)
877 {
878 gtmpsc_pollrx_t *vrxp;
879 uint32_t csr;
880 int kick, ix;
881
882 kick = 0;
883
884 /* already handled in gtmpsc_common_getc() */
885 if (sc->sc_rcvdrx == sc->sc_rcvrx)
886 return;
887
888 ix = sc->sc_rcvdrx;
889 vrxp = &sc->sc_poll_sdmapage->rx[ix];
890 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
891 ix * sizeof(gtmpsc_pollrx_t),
892 sizeof(sdma_desc_t),
893 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
894 csr = vrxp->rxdesc.sdma_csr;
895 while (!(csr & SDMA_CSR_RX_OWN)) {
896 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
897 ix * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t),
898 sizeof(vrxp->rxbuf),
899 BUS_DMASYNC_POSTREAD);
900 vrxp->rxdesc.sdma_cnt &= SDMA_RX_CNT_BCNT_MASK;
901 if (vrxp->rxdesc.sdma_csr & SDMA_CSR_RX_BR) {
902 int cn_trapped = 0;
903
904 cn_check_magic(sc->sc_tty->t_dev,
905 CNC_BREAK, gtmpsc_cnm_state);
906 if (cn_trapped)
907 continue;
908 #if defined(KGDB) && !defined(DDB)
909 if (ISSET(sc->sc_flags, GTMPSC_KGDB)) {
910 kgdb_connect(1);
911 continue;
912 }
913 #endif
914 }
915
916 sc->sc_rcvcnt += vrxp->rxdesc.sdma_cnt;
917 kick = 1;
918
919 ix = (ix + 1) % GTMPSC_NTXDESC;
920 vrxp = &sc->sc_poll_sdmapage->rx[ix];
921 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
922 ix * sizeof(gtmpsc_pollrx_t),
923 sizeof(sdma_desc_t),
924 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
925 csr = vrxp->rxdesc.sdma_csr;
926 }
927 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
928 ix * sizeof(gtmpsc_pollrx_t),
929 sizeof(sdma_desc_t),
930 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
931
932 if (kick) {
933 sc->sc_rcvdrx = ix;
934 sc->sc_rx_ready = 1;
935 softint_schedule(sc->sc_si);
936 }
937 }
938
939 STATIC __inline void
940 gtmpsc_intr_tx(struct gtmpsc_softc *sc)
941 {
942 gtmpsc_polltx_t *vtxp;
943 uint32_t csr;
944 int ix;
945
946 /*
947 * If we've delayed a parameter change, do it now,
948 * and restart output.
949 */
950 if (sc->sc_heldchange) {
951 gtmpsc_loadchannelregs(sc);
952 sc->sc_heldchange = 0;
953 sc->sc_tbc = sc->sc_heldtbc;
954 sc->sc_heldtbc = 0;
955 }
956
957 /* Clean-up TX descriptors and buffers */
958 ix = sc->sc_lasttx;
959 while (ix != sc->sc_nexttx) {
960 vtxp = &sc->sc_poll_sdmapage->tx[ix];
961 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
962 ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
963 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
964 csr = vtxp->txdesc.sdma_csr;
965 if (csr & SDMA_CSR_TX_OWN) {
966 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
967 ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
968 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
969 break;
970 }
971 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
972 ix * sizeof(gtmpsc_polltx_t) + sizeof(sdma_desc_t),
973 sizeof(vtxp->txbuf), BUS_DMASYNC_POSTWRITE);
974 ix = (ix + 1) % GTMPSC_NTXDESC;
975 }
976 sc->sc_lasttx = ix;
977
978 /* Output the next chunk of the contiguous buffer */
979 gtmpsc_write(sc);
980 if (sc->sc_tbc == 0 && sc->sc_tx_busy) {
981 sc->sc_tx_busy = 0;
982 sc->sc_tx_done = 1;
983 softint_schedule(sc->sc_si);
984 sdma_imask &= ~SDMA_INTR_TXBUF(sc->sc_unit);
985 gt_sdma_imask(device_parent(sc->sc_dev), sdma_imask);
986 }
987 }
988
989 /*
990 * gtmpsc_write - write a buffer into the hardware
991 */
992 STATIC void
993 gtmpsc_write(struct gtmpsc_softc *sc)
994 {
995 gtmpsc_polltx_t *vtxp;
996 uint32_t sdcm, ix;
997 int kick, n;
998
999 kick = 0;
1000 while (sc->sc_tbc > 0 && sc->sc_nexttx != sc->sc_lasttx) {
1001 n = min(sc->sc_tbc, GTMPSC_TXBUFSZ);
1002
1003 ix = sc->sc_nexttx;
1004 sc->sc_nexttx = (ix + 1) % GTMPSC_NTXDESC;
1005
1006 vtxp = &sc->sc_poll_sdmapage->tx[ix];
1007
1008 memcpy(vtxp->txbuf, sc->sc_tba, n);
1009 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1010 ix * sizeof(gtmpsc_polltx_t) + sizeof(sdma_desc_t),
1011 sizeof(vtxp->txbuf), BUS_DMASYNC_PREWRITE);
1012
1013 vtxp->txdesc.sdma_cnt = (n << SDMA_TX_CNT_BCNT_SHIFT) | n;
1014 vtxp->txdesc.sdma_csr =
1015 SDMA_CSR_TX_L |
1016 SDMA_CSR_TX_F |
1017 SDMA_CSR_TX_EI |
1018 SDMA_CSR_TX_OWN;
1019 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1020 ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
1021 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1022
1023 sc->sc_tbc -= n;
1024 sc->sc_tba += n;
1025 kick = 1;
1026 }
1027 if (kick) {
1028 /*
1029 * now kick some SDMA
1030 */
1031 sdcm = GT_SDMA_READ(sc, SDMA_SDCM);
1032 if ((sdcm & SDMA_SDCM_TXD) == 0)
1033 GT_SDMA_WRITE(sc, SDMA_SDCM, sdcm | SDMA_SDCM_TXD);
1034 }
1035 }
1036
1037 /*
1038 * gtmpsc_txflush - wait for output to drain
1039 */
1040 STATIC void
1041 gtmpsc_txflush(gtmpsc_softc_t *sc)
1042 {
1043 gtmpsc_polltx_t *vtxp;
1044 int ix, limit = 4000000; /* 4 seconds */
1045
1046 ix = sc->sc_nexttx - 1;
1047 if (ix < 0)
1048 ix = GTMPSC_NTXDESC - 1;
1049
1050 vtxp = &sc->sc_poll_sdmapage->tx[ix];
1051 while (limit > 0) {
1052 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1053 ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
1054 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1055 if ((vtxp->txdesc.sdma_csr & SDMA_CSR_TX_OWN) == 0)
1056 break;
1057 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1058 ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
1059 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1060 DELAY(1);
1061 limit -= 1;
1062 }
1063 }
1064
1065 /*
1066 * gtmpsc_rxdesc_init - set up RX descriptor ring
1067 */
1068 STATIC void
1069 gtmpsc_rxdesc_init(struct gtmpsc_softc *sc)
1070 {
1071 gtmpsc_pollrx_t *vrxp, *prxp, *first_prxp;
1072 sdma_desc_t *dp;
1073 int i;
1074
1075 first_prxp = prxp =
1076 (gtmpsc_pollrx_t *)sc->sc_rxdma_map->dm_segs->ds_addr;
1077 vrxp = sc->sc_poll_sdmapage->rx;
1078 for (i = 0; i < GTMPSC_NRXDESC; i++) {
1079 dp = &vrxp->rxdesc;
1080 dp->sdma_csr =
1081 SDMA_CSR_RX_L|SDMA_CSR_RX_F|SDMA_CSR_RX_OWN|SDMA_CSR_RX_EI;
1082 dp->sdma_cnt = GTMPSC_RXBUFSZ << SDMA_RX_CNT_BUFSZ_SHIFT;
1083 dp->sdma_bufp = (uint32_t)&prxp->rxbuf;
1084 vrxp++;
1085 prxp++;
1086 dp->sdma_next = (uint32_t)&prxp->rxdesc;
1087
1088 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1089 i * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t),
1090 sizeof(vrxp->rxbuf), BUS_DMASYNC_PREREAD);
1091 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1092 i * sizeof(gtmpsc_pollrx_t), sizeof(sdma_desc_t),
1093 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1094 }
1095 dp = &vrxp->rxdesc;
1096 dp->sdma_csr =
1097 SDMA_CSR_RX_L | SDMA_CSR_RX_F | SDMA_CSR_RX_OWN | SDMA_CSR_RX_EI;
1098 dp->sdma_cnt = GTMPSC_RXBUFSZ << SDMA_RX_CNT_BUFSZ_SHIFT;
1099 dp->sdma_bufp = (uint32_t)&prxp->rxbuf;
1100 dp->sdma_next = (uint32_t)&first_prxp->rxdesc;
1101
1102 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1103 i * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t),
1104 sizeof(vrxp->rxbuf), BUS_DMASYNC_PREREAD);
1105 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1106 i * sizeof(gtmpsc_pollrx_t), sizeof(sdma_desc_t),
1107 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1108
1109 sc->sc_rcvcnt = 0;
1110 sc->sc_roffset = 0;
1111 sc->sc_rcvrx = 0;
1112 sc->sc_rcvdrx = 0;
1113 }
1114
1115 /*
1116 * gtmpsc_txdesc_init - set up TX descriptor ring
1117 */
1118 STATIC void
1119 gtmpsc_txdesc_init(struct gtmpsc_softc *sc)
1120 {
1121 gtmpsc_polltx_t *vtxp, *ptxp, *first_ptxp;
1122 sdma_desc_t *dp;
1123 int i;
1124
1125 first_ptxp = ptxp =
1126 (gtmpsc_polltx_t *)sc->sc_txdma_map->dm_segs->ds_addr;
1127 vtxp = sc->sc_poll_sdmapage->tx;
1128 for (i = 0; i < GTMPSC_NTXDESC; i++) {
1129 dp = &vtxp->txdesc;
1130 dp->sdma_csr = 0;
1131 dp->sdma_cnt = 0;
1132 dp->sdma_bufp = (uint32_t)&ptxp->txbuf;
1133 vtxp++;
1134 ptxp++;
1135 dp->sdma_next = (uint32_t)&ptxp->txdesc;
1136 }
1137 dp = &vtxp->txdesc;
1138 dp->sdma_csr = 0;
1139 dp->sdma_cnt = 0;
1140 dp->sdma_bufp = (uint32_t)&ptxp->txbuf;
1141 dp->sdma_next = (uint32_t)&first_ptxp->txdesc;
1142
1143 sc->sc_nexttx = 0;
1144 sc->sc_lasttx = 0;
1145 }
1146
1147 STATIC void
1148 gtmpscinit_stop(struct gtmpsc_softc *sc)
1149 {
1150 uint32_t csr;
1151 int timo = 10000; /* XXXX */
1152
1153 /* Abort MPSC Rx (aborting Tx messes things up) */
1154 GT_MPSC_WRITE(sc, GTMPSC_CHRN(2), GTMPSC_CHR2_RXABORT);
1155
1156 /* abort SDMA RX and stop TX for MPSC unit */
1157 GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_AR | SDMA_SDCM_STD);
1158
1159 /* poll for SDMA RX abort completion */
1160 for (; timo > 0; timo--) {
1161 csr = GT_SDMA_READ(sc, SDMA_SDCM);
1162 if (!(csr & (SDMA_SDCM_AR | SDMA_SDCM_AT)))
1163 break;
1164 DELAY(50);
1165 }
1166 }
1167
1168 STATIC void
1169 gtmpscinit_start(struct gtmpsc_softc *sc)
1170 {
1171
1172 /*
1173 * Set pointers of current/first descriptor of TX to SDMA register.
1174 */
1175 GT_SDMA_WRITE(sc, SDMA_SCTDP, sc->sc_txdma_map->dm_segs->ds_addr);
1176 GT_SDMA_WRITE(sc, SDMA_SFTDP, sc->sc_txdma_map->dm_segs->ds_addr);
1177
1178 /*
1179 * Set pointer of current descriptor of TX to SDMA register.
1180 */
1181 GT_SDMA_WRITE(sc, SDMA_SCRDP, sc->sc_rxdma_map->dm_segs->ds_addr);
1182
1183 /*
1184 * initialize SDMA unit Configuration Register
1185 */
1186 GT_SDMA_WRITE(sc, SDMA_SDC,
1187 SDMA_SDC_BSZ_8x64 | SDMA_SDC_SFM|SDMA_SDC_RFT);
1188
1189 gtmpsc_loadchannelregs(sc);
1190
1191 /*
1192 * set MPSC LO and HI port config registers for GTMPSC unit
1193 */
1194 GT_MPSC_WRITE(sc, GTMPSC_MMCR_LO,
1195 GTMPSC_MMCR_LO_MODE_UART |
1196 GTMPSC_MMCR_LO_ET |
1197 GTMPSC_MMCR_LO_ER |
1198 GTMPSC_MMCR_LO_NLM);
1199 GT_MPSC_WRITE(sc, GTMPSC_MMCR_HI,
1200 GTMPSC_MMCR_HI_TCDV_DEFAULT |
1201 GTMPSC_MMCR_HI_RDW |
1202 GTMPSC_MMCR_HI_RCDV_DEFAULT);
1203
1204 /*
1205 * tell MPSC receive the Enter Hunt
1206 */
1207 GT_MPSC_WRITE(sc, GTMPSC_CHRN(2), GTMPSC_CHR2_EH);
1208 }
1209
1210 STATIC void
1211 gtmpscshutdown(struct gtmpsc_softc *sc)
1212 {
1213 struct tty *tp;
1214
1215 #ifdef KGDB
1216 if (sc->sc_flags & GTMPSCF_KGDB != 0)
1217 return;
1218 #endif
1219 tp = sc->sc_tty;
1220 mutex_spin_enter(&sc->sc_lock);
1221 /* Fake carrier off */
1222 (void) (*tp->t_linesw->l_modem)(tp, 0);
1223 sdma_imask &= ~SDMA_INTR_RXBUF(sc->sc_unit);
1224 gt_sdma_imask(device_parent(sc->sc_dev), sdma_imask);
1225 mutex_spin_exit(&sc->sc_lock);
1226 }
1227
1228 STATIC void
1229 gtmpsc_loadchannelregs(struct gtmpsc_softc *sc)
1230 {
1231
1232 if (sc->sc_dev != NULL)
1233 gt_brg_bcr(device_parent(sc->sc_dev), sc->sc_brg,
1234 GT_MPSC_CLOCK_SOURCE | compute_cdv(sc->sc_baudrate));
1235 GT_MPSC_WRITE(sc, GTMPSC_CHRN(3), GTMPSC_MAXIDLE(sc->sc_baudrate));
1236
1237 /*
1238 * set MPSC Protocol configuration register for GTMPSC unit
1239 */
1240 GT_MPSC_WRITE(sc, GTMPSC_MPCR, cflag2mpcr(sc->sc_cflag));
1241 }
1242
1243
1244 #ifdef MPSC_CONSOLE
1245 /*
1246 * Following are all routines needed for MPSC to act as console
1247 */
1248 STATIC int
1249 gtmpsccngetc(dev_t dev)
1250 {
1251
1252 return gtmpsc_common_getc(>mpsc_cn_softc);
1253 }
1254
1255 STATIC void
1256 gtmpsccnputc(dev_t dev, int c)
1257 {
1258
1259 gtmpsc_common_putc(>mpsc_cn_softc, c);
1260 }
1261
1262 STATIC void
1263 gtmpsccnpollc(dev_t dev, int on)
1264 {
1265 }
1266
1267 STATIC void
1268 gtmpsccnhalt(dev_t dev)
1269 {
1270 gtmpsc_softc_t *sc = >mpsc_cn_softc;
1271 uint32_t csr;
1272
1273 /*
1274 * flush TX buffers
1275 */
1276 gtmpsc_txflush(sc);
1277
1278 /*
1279 * stop MPSC unit RX
1280 */
1281 csr = GT_MPSC_READ(sc, GTMPSC_CHRN(2));
1282 csr &= ~GTMPSC_CHR2_EH;
1283 csr |= GTMPSC_CHR2_RXABORT;
1284 GT_MPSC_WRITE(sc, GTMPSC_CHRN(2), csr);
1285
1286 DELAY(GTMPSC_RESET_DELAY);
1287
1288 /*
1289 * abort SDMA RX for MPSC unit
1290 */
1291 GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_AR);
1292 }
1293
1294 int
1295 gtmpsccnattach(bus_space_tag_t iot, bus_dma_tag_t dmat, bus_addr_t base,
1296 int unit, int brg, int speed, tcflag_t tcflag)
1297 {
1298 struct gtmpsc_softc *sc = >mpsc_cn_softc;
1299 int i, res;
1300 const unsigned char cp[] = "\r\nMPSC Lives!\r\n";
1301
1302 res = gtmpsc_hackinit(sc, iot, dmat, base, unit, brg, speed, tcflag);
1303 if (res != 0)
1304 return res;
1305
1306 gtmpscinit_stop(sc);
1307 gtmpscinit_start(sc);
1308
1309 /*
1310 * enable SDMA receive
1311 */
1312 GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_ERD);
1313
1314 for (i = 0; i < sizeof(cp); i++) {
1315 if (*(cp + i) == 0)
1316 break;
1317 gtmpsc_common_putc(sc, *(cp + i));
1318 }
1319
1320 cn_tab = >mpsc_consdev;
1321 cn_init_magic(>mpsc_cnm_state);
1322
1323 return 0;
1324 }
1325
1326 /*
1327 * gtmpsc_hackinit - hacks required to supprt GTMPSC console
1328 */
1329 STATIC int
1330 gtmpsc_hackinit(struct gtmpsc_softc *sc, bus_space_tag_t iot,
1331 bus_dma_tag_t dmat, bus_addr_t base, int unit, int brg,
1332 int baudrate, tcflag_t tcflag)
1333 {
1334 gtmpsc_poll_sdma_t *cn_dmapage =
1335 (gtmpsc_poll_sdma_t *)gtmpsc_cn_dmapage;
1336 int error;
1337
1338 DPRINTF(("hackinit\n"));
1339
1340 memset(sc, 0, sizeof(struct gtmpsc_softc));
1341 error = bus_space_map(iot, base + GTMPSC_BASE(unit), GTMPSC_SIZE, 0,
1342 &sc->sc_mpsch);
1343 if (error != 0)
1344 goto fail0;
1345
1346 error = bus_space_map(iot, base + GTSDMA_BASE(unit), GTSDMA_SIZE, 0,
1347 &sc->sc_sdmah);
1348 if (error != 0)
1349 goto fail1;
1350 error = bus_dmamap_create(dmat, sizeof(gtmpsc_polltx_t), 1,
1351 sizeof(gtmpsc_polltx_t), 0, BUS_DMA_NOWAIT, &sc->sc_txdma_map);
1352 if (error != 0)
1353 goto fail2;
1354 error = bus_dmamap_load(dmat, sc->sc_txdma_map, cn_dmapage->tx,
1355 sizeof(gtmpsc_polltx_t), NULL,
1356 BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
1357 if (error != 0)
1358 goto fail3;
1359 error = bus_dmamap_create(dmat, sizeof(gtmpsc_pollrx_t), 1,
1360 sizeof(gtmpsc_pollrx_t), 0, BUS_DMA_NOWAIT,
1361 &sc->sc_rxdma_map);
1362 if (error != 0)
1363 goto fail4;
1364 error = bus_dmamap_load(dmat, sc->sc_rxdma_map, cn_dmapage->rx,
1365 sizeof(gtmpsc_pollrx_t), NULL,
1366 BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
1367 if (error != 0)
1368 goto fail5;
1369
1370 sc->sc_iot = iot;
1371 sc->sc_dmat = dmat;
1372 sc->sc_poll_sdmapage = cn_dmapage;
1373 sc->sc_brg = brg;
1374 sc->sc_baudrate = baudrate;
1375 sc->sc_cflag = tcflag;
1376
1377 gtmpsc_txdesc_init(sc);
1378 gtmpsc_rxdesc_init(sc);
1379
1380 return 0;
1381
1382 fail5:
1383 bus_dmamap_destroy(dmat, sc->sc_rxdma_map);
1384 fail4:
1385 bus_dmamap_unload(dmat, sc->sc_txdma_map);
1386 fail3:
1387 bus_dmamap_destroy(dmat, sc->sc_txdma_map);
1388 fail2:
1389 bus_space_unmap(iot, sc->sc_sdmah, GTSDMA_SIZE);
1390 fail1:
1391 bus_space_unmap(iot, sc->sc_mpsch, GTMPSC_SIZE);
1392 fail0:
1393 return error;
1394 }
1395 #endif /* MPSC_CONSOLE */
1396
1397 #ifdef KGDB
1398 STATIC int
1399 gtmpsc_kgdb_getc(void *arg)
1400 {
1401 struct gtmpsc_softc *sc = (struct gtmpsc_softc *)arg;
1402
1403 return gtmpsc_common_getc(sc);
1404 }
1405
1406 STATIC void
1407 gtmpsc_kgdb_putc(void *arg, int c)
1408 {
1409 struct gtmpsc_softc *sc = (struct gtmpsc_softc *)arg;
1410
1411 return gtmpsc_common_putc(sc, c);
1412 }
1413 #endif /* KGDB */
1414
1415 #if defined(MPSC_CONSOLE) || defined(KGDB)
1416 /*
1417 * gtmpsc_common_getc - polled console read
1418 *
1419 * We copy data from the DMA buffers into a buffer in the softc
1420 * to reduce descriptor ownership turnaround time
1421 * MPSC can crater if it wraps descriptor rings,
1422 * which is asynchronous and throttled only by line speed.
1423 */
1424 STATIC int
1425 gtmpsc_common_getc(struct gtmpsc_softc *sc)
1426 {
1427 gtmpsc_pollrx_t *vrxp;
1428 uint32_t csr;
1429 int ix, ch, wdog_interval = 0;
1430
1431 if (!cold)
1432 mutex_spin_enter(&sc->sc_lock);
1433
1434 ix = sc->sc_rcvdrx;
1435 vrxp = &sc->sc_poll_sdmapage->rx[ix];
1436 while (sc->sc_rcvcnt == 0) {
1437 /* Wait receive */
1438 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1439 ix * sizeof(gtmpsc_pollrx_t),
1440 sizeof(sdma_desc_t),
1441 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1442 csr = vrxp->rxdesc.sdma_csr;
1443 if (csr & SDMA_CSR_RX_OWN) {
1444 GT_MPSC_WRITE(sc, GTMPSC_CHRN(2),
1445 GTMPSC_CHR2_EH | GTMPSC_CHR2_CRD);
1446 if (wdog_interval++ % 32)
1447 gt_watchdog_service();
1448 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1449 ix * sizeof(gtmpsc_pollrx_t),
1450 sizeof(sdma_desc_t),
1451 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1452 DELAY(50);
1453 continue;
1454 }
1455 if (csr & SDMA_CSR_RX_ES)
1456 aprint_error_dev(sc->sc_dev,
1457 "RX error, rxdesc csr 0x%x\n", csr);
1458
1459 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1460 ix * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t),
1461 sizeof(vrxp->rxbuf),
1462 BUS_DMASYNC_POSTREAD);
1463
1464 vrxp->rxdesc.sdma_cnt &= SDMA_RX_CNT_BCNT_MASK;
1465 sc->sc_rcvcnt = vrxp->rxdesc.sdma_cnt;
1466 sc->sc_roffset = 0;
1467 sc->sc_rcvdrx = (ix + 1) % GTMPSC_NRXDESC;
1468
1469 if (sc->sc_rcvcnt == 0) {
1470 /* cleanup this descriptor, and return to DMA */
1471 CLEANUP_AND_RETURN_RXDMA(sc, sc->sc_rcvrx);
1472 sc->sc_rcvrx = sc->sc_rcvdrx;
1473 }
1474
1475 ix = sc->sc_rcvdrx;
1476 vrxp = &sc->sc_poll_sdmapage->rx[ix];
1477 }
1478 ch = vrxp->rxbuf[sc->sc_roffset++];
1479 sc->sc_rcvcnt--;
1480
1481 if (sc->sc_roffset == vrxp->rxdesc.sdma_cnt) {
1482 /* cleanup this descriptor, and return to DMA */
1483 CLEANUP_AND_RETURN_RXDMA(sc, ix);
1484 sc->sc_rcvrx = (ix + 1) % GTMPSC_NRXDESC;
1485 }
1486
1487 gt_watchdog_service();
1488
1489 if (!cold)
1490 mutex_spin_exit(&sc->sc_lock);
1491 return ch;
1492 }
1493
1494 STATIC void
1495 gtmpsc_common_putc(struct gtmpsc_softc *sc, int c)
1496 {
1497 gtmpsc_polltx_t *vtxp;
1498 int ix;
1499 const int nc = 1;
1500
1501 /* Get a DMA descriptor */
1502 if (!cold)
1503 mutex_spin_enter(&sc->sc_lock);
1504 ix = sc->sc_nexttx;
1505 sc->sc_nexttx = (ix + 1) % GTMPSC_NTXDESC;
1506 if (sc->sc_nexttx == sc->sc_lasttx) {
1507 gtmpsc_common_putc_wait_complete(sc, sc->sc_lasttx);
1508 sc->sc_lasttx = (sc->sc_lasttx + 1) % GTMPSC_NTXDESC;
1509 }
1510 if (!cold)
1511 mutex_spin_exit(&sc->sc_lock);
1512
1513 vtxp = &sc->sc_poll_sdmapage->tx[ix];
1514 vtxp->txbuf[0] = c;
1515 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1516 ix * sizeof(gtmpsc_polltx_t) + sizeof(sdma_desc_t),
1517 sizeof(vtxp->txbuf),
1518 BUS_DMASYNC_PREWRITE);
1519
1520 vtxp->txdesc.sdma_cnt = (nc << SDMA_TX_CNT_BCNT_SHIFT) | nc;
1521 vtxp->txdesc.sdma_csr = SDMA_CSR_TX_L | SDMA_CSR_TX_F | SDMA_CSR_TX_OWN;
1522 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1523 ix * sizeof(gtmpsc_polltx_t),
1524 sizeof(sdma_desc_t),
1525 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1526
1527 if (!cold)
1528 mutex_spin_enter(&sc->sc_lock);
1529 /*
1530 * now kick some SDMA
1531 */
1532 GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_TXD);
1533
1534 while (sc->sc_lasttx != sc->sc_nexttx) {
1535 gtmpsc_common_putc_wait_complete(sc, sc->sc_lasttx);
1536 sc->sc_lasttx = (sc->sc_lasttx + 1) % GTMPSC_NTXDESC;
1537 }
1538 if (!cold)
1539 mutex_spin_exit(&sc->sc_lock);
1540 }
1541
1542 /*
1543 * gtmpsc_common_putc - polled console putc
1544 */
1545 STATIC void
1546 gtmpsc_common_putc_wait_complete(struct gtmpsc_softc *sc, int ix)
1547 {
1548 gtmpsc_polltx_t *vtxp = &sc->sc_poll_sdmapage->tx[ix];
1549 uint32_t csr;
1550 int wdog_interval = 0;
1551
1552 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1553 ix * sizeof(gtmpsc_polltx_t),
1554 sizeof(sdma_desc_t),
1555 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1556 csr = vtxp->txdesc.sdma_csr;
1557 while (csr & SDMA_CSR_TX_OWN) {
1558 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1559 ix * sizeof(gtmpsc_polltx_t),
1560 sizeof(sdma_desc_t),
1561 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1562 DELAY(40);
1563 if (wdog_interval++ % 32)
1564 gt_watchdog_service();
1565 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1566 ix * sizeof(gtmpsc_polltx_t),
1567 sizeof(sdma_desc_t),
1568 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1569 csr = vtxp->txdesc.sdma_csr;
1570 }
1571 if (csr & SDMA_CSR_TX_ES)
1572 aprint_error_dev(sc->sc_dev,
1573 "TX error, txdesc(%d) csr 0x%x\n", ix, csr);
1574 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1575 ix * sizeof(gtmpsc_polltx_t) + sizeof(sdma_desc_t),
1576 sizeof(vtxp->txbuf),
1577 BUS_DMASYNC_POSTWRITE);
1578 }
1579 #endif /* defined(MPSC_CONSOLE) || defined(KGDB) */
1580