xref: /netbsd-src/sys/dev/marvell/gtmpsc.c (revision 9ca714fb0e917c6ada7a22d1ff73d591bdc04559)
1 /*	$NetBSD: gtmpsc.c,v 1.50 2024/09/10 17:56:35 andvar Exp $	*/
2 /*
3  * Copyright (c) 2009 KIYOHARA Takashi
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 /*
28  * mpsc.c - Multi-Protocol Serial Controller driver, supports UART mode only
29  */
30 
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: gtmpsc.c,v 1.50 2024/09/10 17:56:35 andvar Exp $");
33 
34 #include "opt_kgdb.h"
35 
36 #include <sys/param.h>
37 #include <sys/bus.h>
38 #include <sys/conf.h>
39 #include <sys/device.h>
40 #include <sys/fcntl.h>
41 #include <sys/intr.h>
42 #include <sys/kauth.h>
43 #include <sys/kernel.h>
44 #include <sys/mutex.h>
45 #include <sys/proc.h>
46 #include <sys/systm.h>
47 #include <sys/timepps.h>
48 #include <sys/tty.h>
49 #ifdef KGDB
50 #include <sys/kgdb.h>
51 #endif
52 
53 #include <dev/cons.h>
54 
55 #include <dev/marvell/gtreg.h>
56 #include <dev/marvell/gtvar.h>
57 #include <dev/marvell/gtbrgreg.h>
58 #include <dev/marvell/gtbrgvar.h>
59 #include <dev/marvell/gtsdmareg.h>
60 #include <dev/marvell/gtsdmavar.h>
61 #include <dev/marvell/gtmpscreg.h>
62 #include <dev/marvell/gtmpscvar.h>
63 #include <dev/marvell/marvellreg.h>
64 #include <dev/marvell/marvellvar.h>
65 
66 #include "gtmpsc.h"
67 #include "ioconf.h"
68 #include "locators.h"
69 
70 /*
71  * Wait 2 characters time for RESET_DELAY
72  */
73 #define GTMPSC_RESET_DELAY	(2*8*1000000 / GT_MPSC_DEFAULT_BAUD_RATE)
74 
75 
76 #if defined(DEBUG)
77 unsigned int gtmpsc_debug = 0;
78 # define STATIC
79 # define DPRINTF(x)	do { if (gtmpsc_debug) printf x ; } while (0)
80 #else
81 # define STATIC static
82 # define DPRINTF(x)
83 #endif
84 
85 #define GTMPSCUNIT(x)      TTUNIT(x)
86 #define GTMPSCDIALOUT(x)   TTDIALOUT(x)
87 
88 #define CLEANUP_AND_RETURN_RXDMA(sc, ix)				    \
89 	do {								    \
90 		gtmpsc_pollrx_t *_vrxp = &(sc)->sc_poll_sdmapage->rx[(ix)]; \
91 									    \
92 		_vrxp->rxdesc.sdma_csr =				    \
93 		    SDMA_CSR_RX_L	|				    \
94 		    SDMA_CSR_RX_F	|				    \
95 		    SDMA_CSR_RX_OWN	|				    \
96 		    SDMA_CSR_RX_EI;					    \
97 		_vrxp->rxdesc.sdma_cnt =				    \
98 		    GTMPSC_RXBUFSZ << SDMA_RX_CNT_BUFSZ_SHIFT;		    \
99 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_rxdma_map,	    \
100 		    (ix) * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t),   \
101 		    sizeof(vrxp->rxbuf),				    \
102 		    BUS_DMASYNC_PREREAD);				    \
103 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_rxdma_map,	    \
104 		    (ix) * sizeof(gtmpsc_pollrx_t),			    \
105 		    sizeof(sdma_desc_t),				    \
106 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);	    \
107 	} while (0);
108 
109 
110 STATIC int  gtmpscmatch(device_t, cfdata_t, void *);
111 STATIC void gtmpscattach(device_t, device_t, void *);
112 
113 STATIC void gtmpsc_softintr(void *);
114 
115 STATIC void gtmpscstart(struct tty *);
116 STATIC int  gtmpscparam(struct tty *, struct termios *);
117 
118 STATIC void gtmpsc_shutdownhook(void *);
119 
120 STATIC uint32_t cflag2mpcr(tcflag_t);
121 static __inline void gtmpsc_intr_rx(struct gtmpsc_softc *);
122 static __inline void gtmpsc_intr_tx(struct gtmpsc_softc *);
123 STATIC void gtmpsc_write(struct gtmpsc_softc *);
124 STATIC void gtmpsc_txflush(gtmpsc_softc_t *);
125 STATIC void gtmpsc_rxdesc_init(struct gtmpsc_softc *);
126 STATIC void gtmpsc_txdesc_init(struct gtmpsc_softc *);
127 STATIC void gtmpscinit_stop(struct gtmpsc_softc *);
128 STATIC void gtmpscinit_start(struct gtmpsc_softc *);
129 STATIC void gtmpscshutdown(struct gtmpsc_softc *);
130 STATIC void gtmpsc_loadchannelregs(struct gtmpsc_softc *);
131 
132 #ifdef MPSC_CONSOLE
133 STATIC int gtmpsccngetc(dev_t);
134 STATIC void gtmpsccnputc(dev_t, int);
135 STATIC void gtmpsccnpollc(dev_t, int);
136 STATIC void gtmpsccnhalt(dev_t);
137 
138 STATIC int gtmpsc_hackinit(struct gtmpsc_softc *, bus_space_tag_t,
139 			   bus_dma_tag_t, bus_addr_t, int, int, int, tcflag_t);
140 #endif
141 
142 #if defined(MPSC_CONSOLE) || defined(KGDB)
143 STATIC int  gtmpsc_common_getc(struct gtmpsc_softc *);
144 STATIC void gtmpsc_common_putc(struct gtmpsc_softc *, int);
145 STATIC void gtmpsc_common_putc_wait_complete(struct gtmpsc_softc *, int);
146 #endif
147 
148 dev_type_open(gtmpscopen);
149 dev_type_close(gtmpscclose);
150 dev_type_read(gtmpscread);
151 dev_type_write(gtmpscwrite);
152 dev_type_ioctl(gtmpscioctl);
153 dev_type_stop(gtmpscstop);
154 dev_type_tty(gtmpsctty);
155 dev_type_poll(gtmpscpoll);
156 
157 const struct cdevsw gtmpsc_cdevsw = {
158 	.d_open = gtmpscopen,
159 	.d_close = gtmpscclose,
160 	.d_read = gtmpscread,
161 	.d_write = gtmpscwrite,
162 	.d_ioctl = gtmpscioctl,
163 	.d_stop = gtmpscstop,
164 	.d_tty = gtmpsctty,
165 	.d_poll = gtmpscpoll,
166 	.d_mmap = nommap,
167 	.d_kqfilter = ttykqfilter,
168 	.d_discard = nodiscard,
169 	.d_flag = D_TTY
170 };
171 
172 CFATTACH_DECL_NEW(gtmpsc, sizeof(struct gtmpsc_softc),
173     gtmpscmatch, gtmpscattach, NULL, NULL);
174 
175 
176 STATIC uint32_t sdma_imask;		/* soft copy of SDMA IMASK reg */
177 STATIC struct cnm_state gtmpsc_cnm_state;
178 
179 #ifdef KGDB
180 static int gtmpsc_kgdb_attached;
181 
182 STATIC int      gtmpsc_kgdb_getc(void *);
183 STATIC void     gtmpsc_kgdb_putc(void *, int);
184 #endif /* KGDB */
185 
186 #ifdef MPSC_CONSOLE
187 /*
188  * hacks for console initialization
189  * which happens prior to autoconfig "attach"
190  *
191  * XXX Assumes PAGE_SIZE is a constant!
192  */
193 gtmpsc_softc_t gtmpsc_cn_softc;
194 STATIC unsigned char gtmpsc_cn_dmapage[PAGE_SIZE] __aligned(PAGE_SIZE);
195 
196 
197 static struct consdev gtmpsc_consdev = {
198 	NULL, NULL, gtmpsccngetc, gtmpsccnputc, gtmpsccnpollc,
199 	NULL, gtmpsccnhalt, NULL, NODEV, CN_NORMAL
200 };
201 #endif
202 
203 
204 #define GT_MPSC_READ(sc, o) \
205 	bus_space_read_4((sc)->sc_iot, (sc)->sc_mpsch, (o))
206 #define GT_MPSC_WRITE(sc, o, v) \
207 	bus_space_write_4((sc)->sc_iot, (sc)->sc_mpsch, (o), (v))
208 #define GT_SDMA_READ(sc, o) \
209 	bus_space_read_4((sc)->sc_iot, (sc)->sc_sdmah, (o))
210 #define GT_SDMA_WRITE(sc, o, v) \
211 	bus_space_write_4((sc)->sc_iot, (sc)->sc_sdmah, (o), (v))
212 
213 
214 /* ARGSUSED */
215 STATIC int
216 gtmpscmatch(device_t parent, cfdata_t match, void *aux)
217 {
218 	struct marvell_attach_args *mva = aux;
219 
220 	if (strcmp(mva->mva_name, match->cf_name) != 0)
221 		return 0;
222 	if (mva->mva_offset == MVA_OFFSET_DEFAULT)
223 		return 0;
224 
225 	mva->mva_size = GTMPSC_SIZE;
226 	return 1;
227 }
228 
229 /* ARGSUSED */
230 STATIC void
231 gtmpscattach(device_t parent, device_t self, void *aux)
232 {
233 	struct gtmpsc_softc *sc = device_private(self);
234 	struct marvell_attach_args *mva = aux;
235 	bus_dma_segment_t segs;
236 	struct tty *tp;
237 	int rsegs, err, unit;
238 	void *kva;
239 
240 	aprint_naive("\n");
241 	aprint_normal(": Multi-Protocol Serial Controller\n");
242 
243 	if (mva->mva_unit != MVA_UNIT_DEFAULT)
244 		unit = mva->mva_unit;
245 	else
246 		unit = (mva->mva_offset == GTMPSC_BASE(0)) ? 0 : 1;
247 
248 #ifdef MPSC_CONSOLE
249 	if (cn_tab == &gtmpsc_consdev &&
250 	    cn_tab->cn_dev == makedev(0, unit)) {
251 		gtmpsc_cn_softc.sc_dev = self;
252 		memcpy(sc, &gtmpsc_cn_softc, sizeof(struct gtmpsc_softc));
253 		sc->sc_flags = GTMPSC_CONSOLE;
254 	} else
255 #endif
256 	{
257 		if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
258 		    mva->mva_offset, mva->mva_size, &sc->sc_mpsch)) {
259 			aprint_error_dev(self, "Cannot map MPSC registers\n");
260 			return;
261 		}
262 		if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
263 		    GTSDMA_BASE(unit), GTSDMA_SIZE, &sc->sc_sdmah)) {
264 			aprint_error_dev(self, "Cannot map SDMA registers\n");
265 			return;
266 		}
267 		sc->sc_dev = self;
268 		sc->sc_unit = unit;
269 		sc->sc_iot = mva->mva_iot;
270 		sc->sc_dmat = mva->mva_dmat;
271 
272 		err = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
273 		    &segs, 1, &rsegs, BUS_DMA_NOWAIT);
274 		if (err) {
275 			aprint_error_dev(sc->sc_dev,
276 			    "bus_dmamem_alloc error 0x%x\n", err);
277 			goto fail0;
278 		}
279 		err = bus_dmamem_map(sc->sc_dmat, &segs, 1, PAGE_SIZE, &kva,
280 		    BUS_DMA_NOWAIT);
281 		if (err) {
282 			aprint_error_dev(sc->sc_dev,
283 			    "bus_dmamem_map error 0x%x\n", err);
284 			goto fail1;
285 		}
286 		memset(kva, 0, PAGE_SIZE);	/* paranoid/superfluous */
287 		sc->sc_poll_sdmapage = kva;
288 
289 		err = bus_dmamap_create(sc->sc_dmat, sizeof(gtmpsc_polltx_t), 1,
290 		   sizeof(gtmpsc_polltx_t), 0, BUS_DMA_NOWAIT,
291 		   &sc->sc_txdma_map);
292 		if (err != 0) {
293 			aprint_error_dev(sc->sc_dev,
294 			    "bus_dmamap_create error 0x%x\n", err);
295 			goto fail2;
296 		}
297 		err = bus_dmamap_load(sc->sc_dmat, sc->sc_txdma_map,
298 		    sc->sc_poll_sdmapage->tx, sizeof(gtmpsc_polltx_t),
299 		    NULL, BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
300 		if (err != 0) {
301 			aprint_error_dev(sc->sc_dev,
302 			    "bus_dmamap_load tx error 0x%x\n", err);
303 			goto fail3;
304 		}
305 		err = bus_dmamap_create(sc->sc_dmat, sizeof(gtmpsc_pollrx_t), 1,
306 		   sizeof(gtmpsc_pollrx_t), 0, BUS_DMA_NOWAIT,
307 		   &sc->sc_rxdma_map);
308 		if (err != 0) {
309 			aprint_error_dev(sc->sc_dev,
310 			    "bus_dmamap_create rx error 0x%x\n", err);
311 			goto fail4;
312 		}
313 		err = bus_dmamap_load(sc->sc_dmat, sc->sc_rxdma_map,
314 		    sc->sc_poll_sdmapage->rx, sizeof(gtmpsc_pollrx_t),
315 		    NULL, BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
316 		if (err != 0) {
317 			aprint_error_dev(sc->sc_dev,
318 			    "bus_dmamap_load rx error 0x%x\n", err);
319 			goto fail5;
320 		}
321 
322 		sc->sc_brg = unit;		/* XXXXX */
323 		sc->sc_baudrate = GT_MPSC_DEFAULT_BAUD_RATE;
324 	}
325 	aprint_normal_dev(self, "with SDMA offset 0x%04x-0x%04x\n",
326 	    GTSDMA_BASE(unit), GTSDMA_BASE(unit) + GTSDMA_SIZE - 1);
327 
328 	sc->sc_rx_ready = 0;
329 	sc->sc_tx_busy = 0;
330 	sc->sc_tx_done = 0;
331 	sc->sc_tx_stopped = 0;
332 	sc->sc_heldchange = 0;
333 
334 	gtmpsc_txdesc_init(sc);
335 	gtmpsc_rxdesc_init(sc);
336 
337 	sc->sc_tty = tp = tty_alloc();
338 	tp->t_oproc = gtmpscstart;
339 	tp->t_param = gtmpscparam;
340 	tty_attach(tp);
341 
342 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_HIGH);
343 
344 	/*
345 	 * clear any pending SDMA interrupts for this unit
346 	 */
347 	(void) gt_sdma_icause(device_parent(sc->sc_dev),
348 	    SDMA_INTR_RXBUF(sc->sc_unit) |
349 	    SDMA_INTR_RXERR(sc->sc_unit) |
350 	    SDMA_INTR_TXBUF(sc->sc_unit) |
351 	    SDMA_INTR_TXEND(sc->sc_unit));
352 
353 	sc->sc_si = softint_establish(SOFTINT_SERIAL, gtmpsc_softintr, sc);
354 	if (sc->sc_si == NULL)
355 		panic("mpscattach: cannot softint_establish IPL_SOFTSERIAL");
356 
357 	shutdownhook_establish(gtmpsc_shutdownhook, sc);
358 
359 	gtmpscinit_stop(sc);
360 	gtmpscinit_start(sc);
361 
362 	if (sc->sc_flags & GTMPSC_CONSOLE) {
363 		int maj;
364 
365 		/* locate the major number */
366 		maj = cdevsw_lookup_major(&gtmpsc_cdevsw);
367 
368 		tp->t_dev = cn_tab->cn_dev =
369 		    makedev(maj, device_unit(sc->sc_dev));
370 
371 		aprint_normal_dev(self, "console\n");
372 	}
373 
374 #ifdef KGDB
375 	/*
376 	 * Allow kgdb to "take over" this port.  If this is
377 	 * the kgdb device, it has exclusive use.
378 	 */
379 	if (sc->sc_unit == gtmpsckgdbport) {
380 #ifdef MPSC_CONSOLE
381 		if (sc->sc_unit == MPSC_CONSOLE) {
382 			aprint_error_dev(self,
383 			    "(kgdb): cannot share with console\n");
384 			return;
385 		}
386 #endif
387 
388 		sc->sc_flags |= GTMPSC_KGDB;
389 		aprint_normal_dev(self, "kgdb\n");
390 
391 		gtmpsc_txflush(sc);
392 
393 		kgdb_attach(gtmpsc_kgdb_getc, gtmpsc_kgdb_putc, NULL);
394 		kgdb_dev = 123;	/* unneeded, only to satisfy some tests */
395 		gtmpsc_kgdb_attached = 1;
396 		kgdb_connect(1);
397 	}
398 #endif /* KGDB */
399 
400 	return;
401 
402 
403 fail5:
404 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxdma_map);
405 fail4:
406 	bus_dmamap_unload(sc->sc_dmat, sc->sc_txdma_map);
407 fail3:
408 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_txdma_map);
409 fail2:
410 	bus_dmamem_unmap(sc->sc_dmat, kva, PAGE_SIZE);
411 fail1:
412 	bus_dmamem_free(sc->sc_dmat, &segs, 1);
413 fail0:
414 	return;
415 }
416 
417 /* ARGSUSED */
418 int
419 gtmpsc_intr(void *arg)
420 {
421 	struct gt_softc *gt = (struct gt_softc *)arg;
422 	struct gtmpsc_softc *sc;
423 	uint32_t icause;
424 	int i;
425 
426 	icause = gt_sdma_icause(gt->sc_dev, sdma_imask);
427 
428 	for (i = 0; i < GTMPSC_NCHAN; i++) {
429 		sc = device_lookup_private(&gtmpsc_cd, i);
430 		if (sc == NULL)
431 			continue;
432 		mutex_spin_enter(&sc->sc_lock);
433 		if (icause & SDMA_INTR_RXBUF(sc->sc_unit)) {
434 			gtmpsc_intr_rx(sc);
435 			icause &= ~SDMA_INTR_RXBUF(sc->sc_unit);
436 		}
437 		if (icause & SDMA_INTR_TXBUF(sc->sc_unit)) {
438 			gtmpsc_intr_tx(sc);
439 			icause &= ~SDMA_INTR_TXBUF(sc->sc_unit);
440 		}
441 		mutex_spin_exit(&sc->sc_lock);
442 	}
443 
444 	return 1;
445 }
446 
447 STATIC void
448 gtmpsc_softintr(void *arg)
449 {
450 	struct gtmpsc_softc *sc = arg;
451 	struct tty *tp = sc->sc_tty;
452 	gtmpsc_pollrx_t *vrxp;
453 	int code;
454 	u_int cc;
455 	u_char *get, *end, lsr;
456 	int (*rint)(int, struct tty *) = tp->t_linesw->l_rint;
457 
458 	if (sc->sc_rx_ready) {
459 		sc->sc_rx_ready = 0;
460 
461 		cc = sc->sc_rcvcnt;
462 
463 		/* If not yet open, drop the entire buffer content here */
464 		if (!ISSET(tp->t_state, TS_ISOPEN))
465 			cc = 0;
466 
467 		vrxp = &sc->sc_poll_sdmapage->rx[sc->sc_rcvrx];
468 		end = vrxp->rxbuf + vrxp->rxdesc.sdma_cnt;
469 		get = vrxp->rxbuf + sc->sc_roffset;
470 		while (cc > 0) {
471 			code = *get;
472 			lsr = vrxp->rxdesc.sdma_csr;
473 
474 			if (ISSET(lsr,
475 			    SDMA_CSR_RX_PE |
476 			    SDMA_CSR_RX_FR |
477 			    SDMA_CSR_RX_OR |
478 			    SDMA_CSR_RX_BR)) {
479 				if (ISSET(lsr, SDMA_CSR_RX_OR))
480 					;	/* XXXXX not yet... */
481 				if (ISSET(lsr, SDMA_CSR_RX_BR | SDMA_CSR_RX_FR))
482 					SET(code, TTY_FE);
483 				if (ISSET(lsr, SDMA_CSR_RX_PE))
484 					SET(code, TTY_PE);
485 			}
486 
487 			if ((*rint)(code, tp) == -1) {
488 				/*
489 				 * The line discipline's buffer is out of space.
490 				 */
491 				/* XXXXX not yet... */
492 			}
493 			if (++get >= end) {
494 				/* cleanup this descriptor, and return to DMA */
495 				CLEANUP_AND_RETURN_RXDMA(sc, sc->sc_rcvrx);
496 				sc->sc_rcvrx =
497 				    (sc->sc_rcvrx + 1) % GTMPSC_NTXDESC;
498 				vrxp = &sc->sc_poll_sdmapage->rx[sc->sc_rcvrx];
499 				end = vrxp->rxbuf + vrxp->rxdesc.sdma_cnt;
500 				get = vrxp->rxbuf + sc->sc_roffset;
501 			}
502 			cc--;
503 		}
504 	}
505 	if (sc->sc_tx_done) {
506 		sc->sc_tx_done = 0;
507 		CLR(tp->t_state, TS_BUSY);
508 		if (ISSET(tp->t_state, TS_FLUSH))
509 		    CLR(tp->t_state, TS_FLUSH);
510 		else
511 		    ndflush(&tp->t_outq, (int)(sc->sc_tba - tp->t_outq.c_cf));
512 		(*tp->t_linesw->l_start)(tp);
513 	}
514 }
515 
516 int
517 gtmpscopen(dev_t dev, int flag, int mode, struct lwp *l)
518 {
519 	struct gtmpsc_softc *sc;
520 	int unit = GTMPSCUNIT(dev);
521 	struct tty *tp;
522 	int s;
523 	int error;
524 
525 	sc = device_lookup_private(&gtmpsc_cd, unit);
526 	if (!sc)
527 		return ENXIO;
528 #ifdef KGDB
529 	/*
530 	 * If this is the kgdb port, no other use is permitted.
531 	 */
532 	if (sc->sc_flags & GTMPSC_KGDB)
533 		return EBUSY;
534 #endif
535 	tp = sc->sc_tty;
536 	if (kauth_authorize_device_tty(l->l_cred, KAUTH_DEVICE_TTY_OPEN, tp))
537 		return EBUSY;
538 
539 	s = spltty();
540 
541 	if (!ISSET(tp->t_state, TS_ISOPEN) && tp->t_wopen == 0) {
542 		struct termios t;
543 
544 		tp->t_dev = dev;
545 
546 		mutex_spin_enter(&sc->sc_lock);
547 
548 		/* Turn on interrupts. */
549 		sdma_imask |= SDMA_INTR_RXBUF(sc->sc_unit);
550 		gt_sdma_imask(device_parent(sc->sc_dev), sdma_imask);
551 
552 		/* Clear PPS capture state on first open. */
553 		mutex_spin_enter(&timecounter_lock);
554 		memset(&sc->sc_pps_state, 0, sizeof(sc->sc_pps_state));
555 		sc->sc_pps_state.ppscap = PPS_CAPTUREASSERT | PPS_CAPTURECLEAR;
556 		pps_init(&sc->sc_pps_state);
557 		mutex_spin_exit(&timecounter_lock);
558 
559 		mutex_spin_exit(&sc->sc_lock);
560 
561 		if (sc->sc_flags & GTMPSC_CONSOLE) {
562 			t.c_ospeed = sc->sc_baudrate;
563 			t.c_cflag = sc->sc_cflag;
564 		} else {
565 			t.c_ospeed = TTYDEF_SPEED;
566 			t.c_cflag = TTYDEF_CFLAG;
567 		}
568 		t.c_ispeed = t.c_ospeed;
569 
570 		/* Make sure gtmpscparam() will do something. */
571 		tp->t_ospeed = 0;
572 		(void) gtmpscparam(tp, &t);
573 		tp->t_iflag = TTYDEF_IFLAG;
574 		tp->t_oflag = TTYDEF_OFLAG;
575 		tp->t_lflag = TTYDEF_LFLAG;
576 		ttychars(tp);
577 		ttsetwater(tp);
578 
579 		mutex_spin_enter(&sc->sc_lock);
580 
581 		/* Clear the input/output ring */
582 		sc->sc_rcvcnt = 0;
583 		sc->sc_roffset = 0;
584 		sc->sc_rcvrx = 0;
585 		sc->sc_rcvdrx = 0;
586 		sc->sc_nexttx = 0;
587 		sc->sc_lasttx = 0;
588 
589 		/*
590 		 * enable SDMA receive
591 		 */
592 		GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_ERD);
593 
594 		mutex_spin_exit(&sc->sc_lock);
595 	}
596 	splx(s);
597 	error = ttyopen(tp, GTMPSCDIALOUT(dev), ISSET(flag, O_NONBLOCK));
598 	if (error)
599 		goto bad;
600 
601 	error = (*tp->t_linesw->l_open)(dev, tp);
602 	if (error)
603 		goto bad;
604 
605 	return 0;
606 
607 bad:
608 	if (!ISSET(tp->t_state, TS_ISOPEN) && tp->t_wopen == 0) {
609 		/*
610 		 * We failed to open the device, and nobody else had it opened.
611 		 * Clean up the state as appropriate.
612 		 */
613 		gtmpscshutdown(sc);
614 	}
615 
616 	return error;
617 }
618 
619 int
620 gtmpscclose(dev_t dev, int flag, int mode, struct lwp *l)
621 {
622 	int unit = GTMPSCUNIT(dev);
623 	struct gtmpsc_softc *sc = device_lookup_private(&gtmpsc_cd, unit);
624 	struct tty *tp = sc->sc_tty;
625 
626 	if (!ISSET(tp->t_state, TS_ISOPEN))
627 		return 0;
628 
629 	(*tp->t_linesw->l_close)(tp, flag);
630 	ttyclose(tp);
631 
632 	if (!ISSET(tp->t_state, TS_ISOPEN) && tp->t_wopen == 0) {
633 		/*
634 		 * Although we got a last close, the device may still be in
635 		 * use; e.g. if this was the dialout node, and there are still
636 		 * processes waiting for carrier on the non-dialout node.
637 		 */
638 		gtmpscshutdown(sc);
639 	}
640 
641 	return 0;
642 }
643 
644 int
645 gtmpscread(dev_t dev, struct uio *uio, int flag)
646 {
647 	struct gtmpsc_softc *sc =
648 	    device_lookup_private(&gtmpsc_cd, GTMPSCUNIT(dev));
649 	struct tty *tp = sc->sc_tty;
650 
651 	return (*tp->t_linesw->l_read)(tp, uio, flag);
652 }
653 
654 int
655 gtmpscwrite(dev_t dev, struct uio *uio, int flag)
656 {
657 	struct gtmpsc_softc *sc =
658 	    device_lookup_private(&gtmpsc_cd, GTMPSCUNIT(dev));
659 	struct tty *tp = sc->sc_tty;
660 
661 	return (*tp->t_linesw->l_write)(tp, uio, flag);
662 }
663 
664 int
665 gtmpscioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
666 {
667 	struct gtmpsc_softc *sc =
668 	    device_lookup_private(&gtmpsc_cd, GTMPSCUNIT(dev));
669 	struct tty *tp = sc->sc_tty;
670 	int error;
671 
672 	error = (*tp->t_linesw->l_ioctl)(tp, cmd, data, flag, l);
673 	if (error != EPASSTHROUGH)
674 		return error;
675 
676 	error = ttioctl(tp, cmd, data, flag, l);
677 	if (error != EPASSTHROUGH)
678 		return error;
679 
680 	error = 0;
681 	switch (cmd) {
682 	case TIOCSFLAGS:
683 		error = kauth_authorize_device_tty(l->l_cred,
684 		    KAUTH_DEVICE_TTY_PRIVSET, tp);
685 		if (error)
686 			return error;
687 		break;
688 	default:
689 		/* nothing */
690 		break;
691 	}
692 
693 	mutex_spin_enter(&sc->sc_lock);
694 
695 	switch (cmd) {
696 	case PPS_IOC_CREATE:
697 	case PPS_IOC_DESTROY:
698 	case PPS_IOC_GETPARAMS:
699 	case PPS_IOC_SETPARAMS:
700 	case PPS_IOC_GETCAP:
701 	case PPS_IOC_FETCH:
702 #ifdef PPS_SYNC
703 	case PPS_IOC_KCBIND:
704 #endif
705 		mutex_spin_enter(&timecounter_lock);
706 		error = pps_ioctl(cmd, data, &sc->sc_pps_state);
707 		mutex_spin_exit(&timecounter_lock);
708 		break;
709 
710 	case TIOCDCDTIMESTAMP:	/* XXX old, overloaded  API used by xntpd v3 */
711 		mutex_spin_enter(&timecounter_lock);
712 #ifndef PPS_TRAILING_EDGE
713 		TIMESPEC_TO_TIMEVAL((struct timeval *)data,
714 		    &sc->sc_pps_state.ppsinfo.assert_timestamp);
715 #else
716 		TIMESPEC_TO_TIMEVAL((struct timeval *)data,
717 		    &sc->sc_pps_state.ppsinfo.clear_timestamp);
718 #endif
719 		mutex_spin_exit(&timecounter_lock);
720 		break;
721 
722 	default:
723 		error = EPASSTHROUGH;
724 		break;
725 	}
726 
727 	mutex_spin_exit(&sc->sc_lock);
728 
729 	return error;
730 }
731 
732 void
733 gtmpscstop(struct tty *tp, int flag)
734 {
735 }
736 
737 struct tty *
738 gtmpsctty(dev_t dev)
739 {
740 	struct gtmpsc_softc *sc =
741 	    device_lookup_private(&gtmpsc_cd, GTMPSCUNIT(dev));
742 
743 	return sc->sc_tty;
744 }
745 
746 int
747 gtmpscpoll(dev_t dev, int events, struct lwp *l)
748 {
749 	struct gtmpsc_softc *sc =
750 	    device_lookup_private(&gtmpsc_cd, GTMPSCUNIT(dev));
751 	struct tty *tp = sc->sc_tty;
752 
753 	return (*tp->t_linesw->l_poll)(tp, events, l);
754 }
755 
756 
757 STATIC void
758 gtmpscstart(struct tty *tp)
759 {
760 	struct gtmpsc_softc *sc;
761 	unsigned char *tba;
762 	unsigned int unit;
763 	int s, tbc;
764 
765 	unit = GTMPSCUNIT(tp->t_dev);
766 	sc = device_lookup_private(&gtmpsc_cd, unit);
767 	if (sc == NULL)
768 		return;
769 
770 	s = spltty();
771 	if (ISSET(tp->t_state, TS_TIMEOUT | TS_BUSY | TS_TTSTOP))
772 		goto out;
773 	if (sc->sc_tx_stopped)
774 		goto out;
775 	if (!ttypull(tp))
776 		goto out;
777 
778 	/* Grab the first contiguous region of buffer space. */
779 	tba = tp->t_outq.c_cf;
780 	tbc = ndqb(&tp->t_outq, 0);
781 
782 	mutex_spin_enter(&sc->sc_lock);
783 
784 	sc->sc_tba = tba;
785 	sc->sc_tbc = tbc;
786 
787 	sdma_imask |= SDMA_INTR_TXBUF(sc->sc_unit);
788 	gt_sdma_imask(device_parent(sc->sc_dev), sdma_imask);
789 	SET(tp->t_state, TS_BUSY);
790 	sc->sc_tx_busy = 1;
791 	gtmpsc_write(sc);
792 
793 	mutex_spin_exit(&sc->sc_lock);
794 out:
795 	splx(s);
796 }
797 
798 STATIC int
799 gtmpscparam(struct tty *tp, struct termios *t)
800 {
801 	struct gtmpsc_softc *sc =
802 	    device_lookup_private(&gtmpsc_cd, GTMPSCUNIT(tp->t_dev));
803 
804 	/* Check requested parameters. */
805 	if (compute_cdv(t->c_ospeed) < 0)
806 		return EINVAL;
807 	if (t->c_ispeed && t->c_ispeed != t->c_ospeed)
808 		return EINVAL;
809 
810 	/*
811 	 * If there were no changes, don't do anything.  This avoids dropping
812 	 * input and improves performance when all we did was frob things like
813 	 * VMIN and VTIME.
814 	 */
815 	if (tp->t_ospeed == t->c_ospeed &&
816 	    tp->t_cflag == t->c_cflag)
817 		return 0;
818 
819 	mutex_spin_enter(&sc->sc_lock);
820 
821 	/* And copy to tty. */
822 	tp->t_ispeed = 0;
823 	tp->t_ospeed = t->c_ospeed;
824 	tp->t_cflag = t->c_cflag;
825 
826 	sc->sc_baudrate = t->c_ospeed;
827 
828 	if (!sc->sc_heldchange) {
829 		if (sc->sc_tx_busy) {
830 			sc->sc_heldtbc = sc->sc_tbc;
831 			sc->sc_tbc = 0;
832 			sc->sc_heldchange = 1;
833 		} else
834 			gtmpsc_loadchannelregs(sc);
835 	}
836 
837 	mutex_spin_exit(&sc->sc_lock);
838 
839 	/* Fake carrier on */
840 	(void) (*tp->t_linesw->l_modem)(tp, 1);
841 
842 	return 0;
843 }
844 
845 void
846 gtmpsc_shutdownhook(void *arg)
847 {
848 	gtmpsc_softc_t *sc = (gtmpsc_softc_t *)arg;
849 
850 	gtmpsc_txflush(sc);
851 }
852 
853 /*
854  * Convert to MPCR from cflag(CS[5678] and CSTOPB).
855  */
856 STATIC uint32_t
857 cflag2mpcr(tcflag_t cflag)
858 {
859 	uint32_t mpcr = 0;
860 
861 	switch (ISSET(cflag, CSIZE)) {
862 	case CS5:
863 		SET(mpcr, GTMPSC_MPCR_CL_5);
864 		break;
865 	case CS6:
866 		SET(mpcr, GTMPSC_MPCR_CL_6);
867 		break;
868 	case CS7:
869 		SET(mpcr, GTMPSC_MPCR_CL_7);
870 		break;
871 	case CS8:
872 		SET(mpcr, GTMPSC_MPCR_CL_8);
873 		break;
874 	}
875 	if (ISSET(cflag, CSTOPB))
876 		SET(mpcr, GTMPSC_MPCR_SBL_2);
877 
878 	return mpcr;
879 }
880 
881 static __inline void
882 gtmpsc_intr_rx(struct gtmpsc_softc *sc)
883 {
884 	gtmpsc_pollrx_t *vrxp;
885 	uint32_t csr;
886 	int kick, ix;
887 
888 	kick = 0;
889 
890 	/* already handled in gtmpsc_common_getc() */
891 	if (sc->sc_rcvdrx == sc->sc_rcvrx)
892 		return;
893 
894 	ix = sc->sc_rcvdrx;
895 	vrxp = &sc->sc_poll_sdmapage->rx[ix];
896 	bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
897 	    ix * sizeof(gtmpsc_pollrx_t),
898 	    sizeof(sdma_desc_t),
899 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
900 	csr = vrxp->rxdesc.sdma_csr;
901 	while (!(csr & SDMA_CSR_RX_OWN)) {
902 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
903 		    ix * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t),
904 		    sizeof(vrxp->rxbuf),
905 		    BUS_DMASYNC_POSTREAD);
906 		vrxp->rxdesc.sdma_cnt &= SDMA_RX_CNT_BCNT_MASK;
907 		if (vrxp->rxdesc.sdma_csr & SDMA_CSR_RX_BR) {
908 			int cn_trapped = 0;
909 
910 			cn_check_magic(sc->sc_tty->t_dev,
911 			    CNC_BREAK, gtmpsc_cnm_state);
912 			if (cn_trapped)
913 				continue;
914 #if defined(KGDB) && !defined(DDB)
915 			if (ISSET(sc->sc_flags, GTMPSC_KGDB)) {
916 				kgdb_connect(1);
917 				continue;
918 			}
919 #endif
920 		}
921 
922 		sc->sc_rcvcnt += vrxp->rxdesc.sdma_cnt;
923 		kick = 1;
924 
925 		ix = (ix + 1) % GTMPSC_NTXDESC;
926 		vrxp = &sc->sc_poll_sdmapage->rx[ix];
927 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
928 		    ix * sizeof(gtmpsc_pollrx_t),
929 		    sizeof(sdma_desc_t),
930 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
931 		csr = vrxp->rxdesc.sdma_csr;
932 	}
933 	bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
934 	    ix * sizeof(gtmpsc_pollrx_t),
935 	    sizeof(sdma_desc_t),
936 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
937 
938 	if (kick) {
939 		sc->sc_rcvdrx = ix;
940 		sc->sc_rx_ready = 1;
941 		softint_schedule(sc->sc_si);
942 	}
943 }
944 
945 static __inline void
946 gtmpsc_intr_tx(struct gtmpsc_softc *sc)
947 {
948 	gtmpsc_polltx_t *vtxp;
949 	uint32_t csr;
950 	int ix;
951 
952 	/*
953 	 * If we've delayed a parameter change, do it now,
954 	 * and restart output.
955 	 */
956 	if (sc->sc_heldchange) {
957 		gtmpsc_loadchannelregs(sc);
958 		sc->sc_heldchange = 0;
959 		sc->sc_tbc = sc->sc_heldtbc;
960 		sc->sc_heldtbc = 0;
961 	}
962 
963 	/* Clean-up TX descriptors and buffers */
964 	ix = sc->sc_lasttx;
965 	while (ix != sc->sc_nexttx) {
966 		vtxp = &sc->sc_poll_sdmapage->tx[ix];
967 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
968 		    ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
969 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
970 		csr = vtxp->txdesc.sdma_csr;
971 		if (csr & SDMA_CSR_TX_OWN) {
972 			bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
973 			    ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
974 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
975 			break;
976 		}
977 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
978 		    ix * sizeof(gtmpsc_polltx_t) + sizeof(sdma_desc_t),
979 		    sizeof(vtxp->txbuf), BUS_DMASYNC_POSTWRITE);
980 		ix = (ix + 1) % GTMPSC_NTXDESC;
981 	}
982 	sc->sc_lasttx = ix;
983 
984 	/* Output the next chunk of the contiguous buffer */
985 	gtmpsc_write(sc);
986 	if (sc->sc_tbc == 0 && sc->sc_tx_busy) {
987 		sc->sc_tx_busy = 0;
988 		sc->sc_tx_done = 1;
989 		softint_schedule(sc->sc_si);
990 		sdma_imask &= ~SDMA_INTR_TXBUF(sc->sc_unit);
991 		gt_sdma_imask(device_parent(sc->sc_dev), sdma_imask);
992 	}
993 }
994 
995 /*
996  * gtmpsc_write - write a buffer into the hardware
997  */
998 STATIC void
999 gtmpsc_write(struct gtmpsc_softc *sc)
1000 {
1001 	gtmpsc_polltx_t *vtxp;
1002 	uint32_t sdcm, ix;
1003 	int kick, n;
1004 
1005 	kick = 0;
1006 	while (sc->sc_tbc > 0 && sc->sc_nexttx != sc->sc_lasttx) {
1007 		n = uimin(sc->sc_tbc, GTMPSC_TXBUFSZ);
1008 
1009 		ix = sc->sc_nexttx;
1010 		sc->sc_nexttx = (ix + 1) % GTMPSC_NTXDESC;
1011 
1012 		vtxp = &sc->sc_poll_sdmapage->tx[ix];
1013 
1014 		memcpy(vtxp->txbuf, sc->sc_tba, n);
1015 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1016 		    ix * sizeof(gtmpsc_polltx_t) + sizeof(sdma_desc_t),
1017 		    sizeof(vtxp->txbuf), BUS_DMASYNC_PREWRITE);
1018 
1019 		vtxp->txdesc.sdma_cnt = (n << SDMA_TX_CNT_BCNT_SHIFT) | n;
1020 		vtxp->txdesc.sdma_csr =
1021 		    SDMA_CSR_TX_L	|
1022 		    SDMA_CSR_TX_F	|
1023 		    SDMA_CSR_TX_EI	|
1024 		    SDMA_CSR_TX_OWN;
1025 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1026 		    ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
1027 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1028 
1029 		sc->sc_tbc -= n;
1030 		sc->sc_tba += n;
1031 		kick = 1;
1032 	}
1033 	if (kick) {
1034 		/*
1035 		 * now kick some SDMA
1036 		 */
1037 		sdcm = GT_SDMA_READ(sc, SDMA_SDCM);
1038 		if ((sdcm & SDMA_SDCM_TXD) == 0)
1039 			GT_SDMA_WRITE(sc, SDMA_SDCM, sdcm | SDMA_SDCM_TXD);
1040 	}
1041 }
1042 
1043 /*
1044  * gtmpsc_txflush - wait for output to drain
1045  */
1046 STATIC void
1047 gtmpsc_txflush(gtmpsc_softc_t *sc)
1048 {
1049 	gtmpsc_polltx_t *vtxp;
1050 	int ix, limit = 4000000;	/* 4 seconds */
1051 
1052 	ix = sc->sc_nexttx - 1;
1053 	if (ix < 0)
1054 		ix = GTMPSC_NTXDESC - 1;
1055 
1056 	vtxp = &sc->sc_poll_sdmapage->tx[ix];
1057 	while (limit > 0) {
1058 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1059 		    ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
1060 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1061 		if ((vtxp->txdesc.sdma_csr & SDMA_CSR_TX_OWN) == 0)
1062 			break;
1063 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1064 		    ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
1065 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1066 		DELAY(1);
1067 		limit -= 1;
1068 	}
1069 }
1070 
1071 /*
1072  * gtmpsc_rxdesc_init - set up RX descriptor ring
1073  */
1074 STATIC void
1075 gtmpsc_rxdesc_init(struct gtmpsc_softc *sc)
1076 {
1077 	gtmpsc_pollrx_t *vrxp, *prxp, *first_prxp;
1078 	sdma_desc_t *dp;
1079 	int i;
1080 
1081 	first_prxp = prxp =
1082 	    (gtmpsc_pollrx_t *)sc->sc_rxdma_map->dm_segs->ds_addr;
1083 	vrxp = sc->sc_poll_sdmapage->rx;
1084 	for (i = 0; i < GTMPSC_NRXDESC; i++) {
1085 		dp = &vrxp->rxdesc;
1086 		dp->sdma_csr =
1087 		    SDMA_CSR_RX_L|SDMA_CSR_RX_F|SDMA_CSR_RX_OWN|SDMA_CSR_RX_EI;
1088 		dp->sdma_cnt = GTMPSC_RXBUFSZ << SDMA_RX_CNT_BUFSZ_SHIFT;
1089 		dp->sdma_bufp = (uint32_t)&prxp->rxbuf;
1090 		vrxp++;
1091 		prxp++;
1092 		dp->sdma_next = (uint32_t)&prxp->rxdesc;
1093 
1094 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1095 		    i * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t),
1096 		    sizeof(vrxp->rxbuf), BUS_DMASYNC_PREREAD);
1097 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1098 		    i * sizeof(gtmpsc_pollrx_t), sizeof(sdma_desc_t),
1099 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1100 	}
1101 	dp = &vrxp->rxdesc;
1102 	dp->sdma_csr =
1103 	    SDMA_CSR_RX_L | SDMA_CSR_RX_F | SDMA_CSR_RX_OWN | SDMA_CSR_RX_EI;
1104 	dp->sdma_cnt = GTMPSC_RXBUFSZ << SDMA_RX_CNT_BUFSZ_SHIFT;
1105 	dp->sdma_bufp = (uint32_t)&prxp->rxbuf;
1106 	dp->sdma_next = (uint32_t)&first_prxp->rxdesc;
1107 
1108 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1109 	    i * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t),
1110 	    sizeof(vrxp->rxbuf), BUS_DMASYNC_PREREAD);
1111 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1112 	    i * sizeof(gtmpsc_pollrx_t), sizeof(sdma_desc_t),
1113 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1114 
1115 	sc->sc_rcvcnt = 0;
1116 	sc->sc_roffset = 0;
1117 	sc->sc_rcvrx = 0;
1118 	sc->sc_rcvdrx = 0;
1119 }
1120 
1121 /*
1122  * gtmpsc_txdesc_init - set up TX descriptor ring
1123  */
1124 STATIC void
1125 gtmpsc_txdesc_init(struct gtmpsc_softc *sc)
1126 {
1127 	gtmpsc_polltx_t *vtxp, *ptxp, *first_ptxp;
1128 	sdma_desc_t *dp;
1129 	int i;
1130 
1131 	first_ptxp = ptxp =
1132 	    (gtmpsc_polltx_t *)sc->sc_txdma_map->dm_segs->ds_addr;
1133 	vtxp = sc->sc_poll_sdmapage->tx;
1134 	for (i = 0; i < GTMPSC_NTXDESC; i++) {
1135 		dp = &vtxp->txdesc;
1136 		dp->sdma_csr = 0;
1137 		dp->sdma_cnt = 0;
1138 		dp->sdma_bufp = (uint32_t)&ptxp->txbuf;
1139 		vtxp++;
1140 		ptxp++;
1141 		dp->sdma_next = (uint32_t)&ptxp->txdesc;
1142 	}
1143 	dp = &vtxp->txdesc;
1144 	dp->sdma_csr = 0;
1145 	dp->sdma_cnt = 0;
1146 	dp->sdma_bufp = (uint32_t)&ptxp->txbuf;
1147 	dp->sdma_next = (uint32_t)&first_ptxp->txdesc;
1148 
1149 	sc->sc_nexttx = 0;
1150 	sc->sc_lasttx = 0;
1151 }
1152 
1153 STATIC void
1154 gtmpscinit_stop(struct gtmpsc_softc *sc)
1155 {
1156 	uint32_t csr;
1157 	int timo = 10000;	/* XXXX */
1158 
1159 	/* Abort MPSC Rx (aborting Tx messes things up) */
1160 	GT_MPSC_WRITE(sc, GTMPSC_CHRN(2), GTMPSC_CHR2_RXABORT);
1161 
1162 	/* abort SDMA RX and stop TX for MPSC unit */
1163 	GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_AR | SDMA_SDCM_STD);
1164 
1165 	/* poll for SDMA RX abort completion */
1166 	for (; timo > 0; timo--) {
1167 		csr = GT_SDMA_READ(sc, SDMA_SDCM);
1168 		if (!(csr & (SDMA_SDCM_AR | SDMA_SDCM_AT)))
1169 			break;
1170 		DELAY(50);
1171 	}
1172 }
1173 
1174 STATIC void
1175 gtmpscinit_start(struct gtmpsc_softc *sc)
1176 {
1177 
1178 	/*
1179 	 * Set pointers of current/first descriptor of TX to SDMA register.
1180 	 */
1181 	GT_SDMA_WRITE(sc, SDMA_SCTDP, sc->sc_txdma_map->dm_segs->ds_addr);
1182 	GT_SDMA_WRITE(sc, SDMA_SFTDP, sc->sc_txdma_map->dm_segs->ds_addr);
1183 
1184 	/*
1185 	 * Set pointer of current descriptor of TX to SDMA register.
1186 	 */
1187 	GT_SDMA_WRITE(sc, SDMA_SCRDP, sc->sc_rxdma_map->dm_segs->ds_addr);
1188 
1189 	/*
1190 	 * initialize SDMA unit Configuration Register
1191 	 */
1192 	GT_SDMA_WRITE(sc, SDMA_SDC,
1193 	    SDMA_SDC_BSZ_8x64 | SDMA_SDC_SFM|SDMA_SDC_RFT);
1194 
1195 	gtmpsc_loadchannelregs(sc);
1196 
1197 	/*
1198 	 * set MPSC LO and HI port config registers for GTMPSC unit
1199  	 */
1200 	GT_MPSC_WRITE(sc, GTMPSC_MMCR_LO,
1201 	    GTMPSC_MMCR_LO_MODE_UART	|
1202 	    GTMPSC_MMCR_LO_ET		|
1203 	    GTMPSC_MMCR_LO_ER		|
1204 	    GTMPSC_MMCR_LO_NLM);
1205 	GT_MPSC_WRITE(sc, GTMPSC_MMCR_HI,
1206 	    GTMPSC_MMCR_HI_TCDV_DEFAULT	|
1207 	    GTMPSC_MMCR_HI_RDW		|
1208 	    GTMPSC_MMCR_HI_RCDV_DEFAULT);
1209 
1210 	/*
1211 	 * tell MPSC receive the Enter Hunt
1212 	 */
1213 	GT_MPSC_WRITE(sc, GTMPSC_CHRN(2), GTMPSC_CHR2_EH);
1214 }
1215 
1216 STATIC void
1217 gtmpscshutdown(struct gtmpsc_softc *sc)
1218 {
1219 	struct tty *tp;
1220 
1221 #ifdef KGDB
1222 	if (sc->sc_flags & GTMPSC_KGDB)
1223 		return;
1224 #endif
1225 	tp = sc->sc_tty;
1226 	mutex_spin_enter(&sc->sc_lock);
1227 	/* Fake carrier off */
1228 	(void) (*tp->t_linesw->l_modem)(tp, 0);
1229 	sdma_imask &= ~SDMA_INTR_RXBUF(sc->sc_unit);
1230 	gt_sdma_imask(device_parent(sc->sc_dev), sdma_imask);
1231 	mutex_spin_exit(&sc->sc_lock);
1232 }
1233 
1234 STATIC void
1235 gtmpsc_loadchannelregs(struct gtmpsc_softc *sc)
1236 {
1237 
1238 	if (sc->sc_dev != NULL)
1239 		gt_brg_bcr(device_parent(sc->sc_dev), sc->sc_brg,
1240 	    	    GT_MPSC_CLOCK_SOURCE | compute_cdv(sc->sc_baudrate));
1241 	GT_MPSC_WRITE(sc, GTMPSC_CHRN(3), GTMPSC_MAXIDLE(sc->sc_baudrate));
1242 
1243 	/*
1244 	 * set MPSC Protocol configuration register for GTMPSC unit
1245 	 */
1246 	GT_MPSC_WRITE(sc, GTMPSC_MPCR, cflag2mpcr(sc->sc_cflag));
1247 }
1248 
1249 
1250 #ifdef MPSC_CONSOLE
1251 /*
1252  * Following are all routines needed for MPSC to act as console
1253  */
1254 STATIC int
1255 gtmpsccngetc(dev_t dev)
1256 {
1257 
1258 	return gtmpsc_common_getc(&gtmpsc_cn_softc);
1259 }
1260 
1261 STATIC void
1262 gtmpsccnputc(dev_t dev, int c)
1263 {
1264 
1265 	gtmpsc_common_putc(&gtmpsc_cn_softc, c);
1266 }
1267 
1268 STATIC void
1269 gtmpsccnpollc(dev_t dev, int on)
1270 {
1271 }
1272 
1273 STATIC void
1274 gtmpsccnhalt(dev_t dev)
1275 {
1276 	gtmpsc_softc_t *sc = &gtmpsc_cn_softc;
1277 	uint32_t csr;
1278 
1279 	/*
1280 	 * flush TX buffers
1281 	 */
1282 	gtmpsc_txflush(sc);
1283 
1284 	/*
1285 	 * stop MPSC unit RX
1286 	 */
1287 	csr = GT_MPSC_READ(sc, GTMPSC_CHRN(2));
1288 	csr &= ~GTMPSC_CHR2_EH;
1289 	csr |= GTMPSC_CHR2_RXABORT;
1290 	GT_MPSC_WRITE(sc, GTMPSC_CHRN(2), csr);
1291 
1292 	DELAY(GTMPSC_RESET_DELAY);
1293 
1294 	/*
1295 	 * abort SDMA RX for MPSC unit
1296 	 */
1297 	GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_AR);
1298 }
1299 
1300 int
1301 gtmpsccnattach(bus_space_tag_t iot, bus_dma_tag_t dmat, bus_addr_t base,
1302 	       int unit, int brg, int speed, tcflag_t tcflag)
1303 {
1304 	struct gtmpsc_softc *sc = &gtmpsc_cn_softc;
1305 	int i, res;
1306 	const unsigned char cp[] = "\r\nMPSC Lives!\r\n";
1307 
1308 	res = gtmpsc_hackinit(sc, iot, dmat, base, unit, brg, speed, tcflag);
1309 	if (res != 0)
1310 		return res;
1311 
1312 	gtmpscinit_stop(sc);
1313 	gtmpscinit_start(sc);
1314 
1315 	/*
1316 	 * enable SDMA receive
1317 	 */
1318 	GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_ERD);
1319 
1320 	for (i = 0; i < sizeof(cp); i++) {
1321 		if (*(cp + i) == 0)
1322 			break;
1323 		gtmpsc_common_putc(sc, *(cp + i));
1324 	}
1325 
1326 	cn_tab = &gtmpsc_consdev;
1327 	cn_init_magic(&gtmpsc_cnm_state);
1328 
1329 	return 0;
1330 }
1331 
1332 /*
1333  * gtmpsc_hackinit - hacks required to support GTMPSC console
1334  */
1335 STATIC int
1336 gtmpsc_hackinit(struct gtmpsc_softc *sc, bus_space_tag_t iot,
1337 		bus_dma_tag_t dmat, bus_addr_t base, int unit, int brg,
1338 		int baudrate, tcflag_t tcflag)
1339 {
1340 	gtmpsc_poll_sdma_t *cn_dmapage =
1341 	    (gtmpsc_poll_sdma_t *)gtmpsc_cn_dmapage;
1342 	int error;
1343 
1344 	DPRINTF(("hackinit\n"));
1345 
1346 	memset(sc, 0, sizeof(struct gtmpsc_softc));
1347 	error = bus_space_map(iot, base + GTMPSC_BASE(unit), GTMPSC_SIZE, 0,
1348 	    &sc->sc_mpsch);
1349 	if (error != 0)
1350 		goto fail0;
1351 
1352 	error = bus_space_map(iot, base + GTSDMA_BASE(unit), GTSDMA_SIZE, 0,
1353 	    &sc->sc_sdmah);
1354 	if (error != 0)
1355 		goto fail1;
1356 	error = bus_dmamap_create(dmat, sizeof(gtmpsc_polltx_t), 1,
1357 	   sizeof(gtmpsc_polltx_t), 0, BUS_DMA_NOWAIT, &sc->sc_txdma_map);
1358 	if (error != 0)
1359 		goto fail2;
1360 	error = bus_dmamap_load(dmat, sc->sc_txdma_map, cn_dmapage->tx,
1361 	    sizeof(gtmpsc_polltx_t), NULL,
1362 	    BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
1363 	if (error != 0)
1364 		goto fail3;
1365 	error = bus_dmamap_create(dmat, sizeof(gtmpsc_pollrx_t), 1,
1366 	   sizeof(gtmpsc_pollrx_t), 0, BUS_DMA_NOWAIT,
1367 	   &sc->sc_rxdma_map);
1368 	if (error != 0)
1369 		goto fail4;
1370 	error = bus_dmamap_load(dmat, sc->sc_rxdma_map, cn_dmapage->rx,
1371 	    sizeof(gtmpsc_pollrx_t), NULL,
1372 	    BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
1373 	if (error != 0)
1374 		goto fail5;
1375 
1376 	sc->sc_iot = iot;
1377 	sc->sc_dmat = dmat;
1378 	sc->sc_poll_sdmapage = cn_dmapage;
1379 	sc->sc_brg = brg;
1380 	sc->sc_baudrate = baudrate;
1381 	sc->sc_cflag = tcflag;
1382 
1383 	gtmpsc_txdesc_init(sc);
1384 	gtmpsc_rxdesc_init(sc);
1385 
1386 	return 0;
1387 
1388 fail5:
1389 	bus_dmamap_destroy(dmat, sc->sc_rxdma_map);
1390 fail4:
1391 	bus_dmamap_unload(dmat, sc->sc_txdma_map);
1392 fail3:
1393 	bus_dmamap_destroy(dmat, sc->sc_txdma_map);
1394 fail2:
1395 	bus_space_unmap(iot, sc->sc_sdmah, GTSDMA_SIZE);
1396 fail1:
1397 	bus_space_unmap(iot, sc->sc_mpsch, GTMPSC_SIZE);
1398 fail0:
1399 	return error;
1400 }
1401 #endif	/* MPSC_CONSOLE */
1402 
1403 #ifdef KGDB
1404 STATIC int
1405 gtmpsc_kgdb_getc(void *arg)
1406 {
1407 	struct gtmpsc_softc *sc = (struct gtmpsc_softc *)arg;
1408 
1409 	return gtmpsc_common_getc(sc);
1410 }
1411 
1412 STATIC void
1413 gtmpsc_kgdb_putc(void *arg, int c)
1414 {
1415 	struct gtmpsc_softc *sc = (struct gtmpsc_softc *)arg;
1416 
1417 	return gtmpsc_common_putc(sc, c);
1418 }
1419 #endif /* KGDB */
1420 
1421 #if defined(MPSC_CONSOLE) || defined(KGDB)
1422 /*
1423  * gtmpsc_common_getc - polled console read
1424  *
1425  *	We copy data from the DMA buffers into a buffer in the softc
1426  *	to reduce descriptor ownership turnaround time
1427  *	MPSC can crater if it wraps descriptor rings,
1428  *	which is asynchronous and throttled only by line speed.
1429  */
1430 STATIC int
1431 gtmpsc_common_getc(struct gtmpsc_softc *sc)
1432 {
1433 	gtmpsc_pollrx_t *vrxp;
1434 	uint32_t csr;
1435 	int ix, ch, wdog_interval = 0;
1436 
1437 	if (!cold)
1438 		mutex_spin_enter(&sc->sc_lock);
1439 
1440 	ix = sc->sc_rcvdrx;
1441 	vrxp = &sc->sc_poll_sdmapage->rx[ix];
1442 	while (sc->sc_rcvcnt == 0) {
1443 		/* Wait receive */
1444 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1445 		    ix * sizeof(gtmpsc_pollrx_t),
1446 		    sizeof(sdma_desc_t),
1447 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1448 		csr = vrxp->rxdesc.sdma_csr;
1449 		if (csr & SDMA_CSR_RX_OWN) {
1450 			GT_MPSC_WRITE(sc, GTMPSC_CHRN(2),
1451 			    GTMPSC_CHR2_EH | GTMPSC_CHR2_CRD);
1452 			if (wdog_interval++ % 32)
1453 				gt_watchdog_service();
1454 			bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1455 			    ix * sizeof(gtmpsc_pollrx_t),
1456 			    sizeof(sdma_desc_t),
1457 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1458 			DELAY(50);
1459 			continue;
1460 		}
1461 		if (csr & SDMA_CSR_RX_ES)
1462 			aprint_error_dev(sc->sc_dev,
1463 			    "RX error, rxdesc csr 0x%x\n", csr);
1464 
1465 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1466 		    ix * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t),
1467 		    sizeof(vrxp->rxbuf),
1468 		    BUS_DMASYNC_POSTREAD);
1469 
1470 		vrxp->rxdesc.sdma_cnt &= SDMA_RX_CNT_BCNT_MASK;
1471 		sc->sc_rcvcnt = vrxp->rxdesc.sdma_cnt;
1472 		sc->sc_roffset = 0;
1473 		sc->sc_rcvdrx = (ix + 1) % GTMPSC_NRXDESC;
1474 
1475 		if (sc->sc_rcvcnt == 0) {
1476 			/* cleanup this descriptor, and return to DMA */
1477 			CLEANUP_AND_RETURN_RXDMA(sc, sc->sc_rcvrx);
1478 			sc->sc_rcvrx = sc->sc_rcvdrx;
1479 		}
1480 
1481 		ix = sc->sc_rcvdrx;
1482 		vrxp = &sc->sc_poll_sdmapage->rx[ix];
1483 	}
1484 	ch = vrxp->rxbuf[sc->sc_roffset++];
1485 	sc->sc_rcvcnt--;
1486 
1487 	if (sc->sc_roffset == vrxp->rxdesc.sdma_cnt) {
1488 		/* cleanup this descriptor, and return to DMA */
1489 		CLEANUP_AND_RETURN_RXDMA(sc, ix);
1490 		sc->sc_rcvrx = (ix + 1) % GTMPSC_NRXDESC;
1491 	}
1492 
1493 	gt_watchdog_service();
1494 
1495 	if (!cold)
1496 		mutex_spin_exit(&sc->sc_lock);
1497 	return ch;
1498 }
1499 
1500 STATIC void
1501 gtmpsc_common_putc(struct gtmpsc_softc *sc, int c)
1502 {
1503 	gtmpsc_polltx_t *vtxp;
1504 	int ix;
1505 	const int nc = 1;
1506 
1507 	/* Get a DMA descriptor */
1508 	if (!cold)
1509 		mutex_spin_enter(&sc->sc_lock);
1510 	ix = sc->sc_nexttx;
1511 	sc->sc_nexttx = (ix + 1) % GTMPSC_NTXDESC;
1512 	if (sc->sc_nexttx == sc->sc_lasttx) {
1513 		gtmpsc_common_putc_wait_complete(sc, sc->sc_lasttx);
1514 		sc->sc_lasttx = (sc->sc_lasttx + 1) % GTMPSC_NTXDESC;
1515 	}
1516 	if (!cold)
1517 		mutex_spin_exit(&sc->sc_lock);
1518 
1519 	vtxp = &sc->sc_poll_sdmapage->tx[ix];
1520 	vtxp->txbuf[0] = c;
1521 	bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1522 	    ix * sizeof(gtmpsc_polltx_t) + sizeof(sdma_desc_t),
1523 	    sizeof(vtxp->txbuf),
1524 	    BUS_DMASYNC_PREWRITE);
1525 
1526 	vtxp->txdesc.sdma_cnt = (nc << SDMA_TX_CNT_BCNT_SHIFT) | nc;
1527 	vtxp->txdesc.sdma_csr = SDMA_CSR_TX_L | SDMA_CSR_TX_F | SDMA_CSR_TX_OWN;
1528 	bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1529 	    ix * sizeof(gtmpsc_polltx_t),
1530 	    sizeof(sdma_desc_t),
1531 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1532 
1533 	if (!cold)
1534 		mutex_spin_enter(&sc->sc_lock);
1535 	/*
1536 	 * now kick some SDMA
1537 	 */
1538 	GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_TXD);
1539 
1540 	while (sc->sc_lasttx != sc->sc_nexttx) {
1541 		gtmpsc_common_putc_wait_complete(sc, sc->sc_lasttx);
1542 		sc->sc_lasttx = (sc->sc_lasttx + 1) % GTMPSC_NTXDESC;
1543 	}
1544 	if (!cold)
1545 		mutex_spin_exit(&sc->sc_lock);
1546 }
1547 
1548 /*
1549  * gtmpsc_common_putc - polled console putc
1550  */
1551 STATIC void
1552 gtmpsc_common_putc_wait_complete(struct gtmpsc_softc *sc, int ix)
1553 {
1554 	gtmpsc_polltx_t *vtxp = &sc->sc_poll_sdmapage->tx[ix];
1555 	uint32_t csr;
1556 	int wdog_interval = 0;
1557 
1558 	bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1559 	    ix * sizeof(gtmpsc_polltx_t),
1560 	    sizeof(sdma_desc_t),
1561 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1562 	csr = vtxp->txdesc.sdma_csr;
1563 	while (csr & SDMA_CSR_TX_OWN) {
1564 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1565 		    ix * sizeof(gtmpsc_polltx_t),
1566 		    sizeof(sdma_desc_t),
1567 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1568 		DELAY(40);
1569 		if (wdog_interval++ % 32)
1570 			gt_watchdog_service();
1571 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1572 		    ix * sizeof(gtmpsc_polltx_t),
1573 		    sizeof(sdma_desc_t),
1574 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1575 		csr = vtxp->txdesc.sdma_csr;
1576 	}
1577 	if (csr & SDMA_CSR_TX_ES)
1578 		aprint_error_dev(sc->sc_dev,
1579 		    "TX error, txdesc(%d) csr 0x%x\n", ix, csr);
1580 	bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1581 	    ix * sizeof(gtmpsc_polltx_t) + sizeof(sdma_desc_t),
1582 	    sizeof(vtxp->txbuf),
1583 	    BUS_DMASYNC_POSTWRITE);
1584 }
1585 #endif	/* defined(MPSC_CONSOLE) || defined(KGDB) */
1586