xref: /netbsd-src/sys/dev/marvell/gtmpsc.c (revision b7b7574d3bf8eeb51a1fa3977b59142ec6434a55)
1 /*	$NetBSD: gtmpsc.c,v 1.44 2014/03/16 05:20:28 dholland Exp $	*/
2 /*
3  * Copyright (c) 2009 KIYOHARA Takashi
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 /*
28  * mpsc.c - Multi-Protocol Serial Controller driver, supports UART mode only
29  */
30 
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: gtmpsc.c,v 1.44 2014/03/16 05:20:28 dholland Exp $");
33 
34 #include "opt_kgdb.h"
35 
36 #include <sys/param.h>
37 #include <sys/bus.h>
38 #include <sys/conf.h>
39 #include <sys/device.h>
40 #include <sys/fcntl.h>
41 #include <sys/intr.h>
42 #include <sys/kauth.h>
43 #include <sys/kernel.h>
44 #include <sys/mutex.h>
45 #include <sys/proc.h>
46 #include <sys/systm.h>
47 #include <sys/timepps.h>
48 #include <sys/tty.h>
49 #ifdef KGDB
50 #include <sys/kgdb.h>
51 #endif
52 
53 #include <dev/cons.h>
54 
55 #include <dev/marvell/gtreg.h>
56 #include <dev/marvell/gtvar.h>
57 #include <dev/marvell/gtbrgreg.h>
58 #include <dev/marvell/gtbrgvar.h>
59 #include <dev/marvell/gtsdmareg.h>
60 #include <dev/marvell/gtsdmavar.h>
61 #include <dev/marvell/gtmpscreg.h>
62 #include <dev/marvell/gtmpscvar.h>
63 #include <dev/marvell/marvellreg.h>
64 #include <dev/marvell/marvellvar.h>
65 
66 #include "gtmpsc.h"
67 #include "ioconf.h"
68 #include "locators.h"
69 
70 /*
71  * Wait 2 characters time for RESET_DELAY
72  */
73 #define GTMPSC_RESET_DELAY	(2*8*1000000 / GT_MPSC_DEFAULT_BAUD_RATE)
74 
75 
76 #if defined(DEBUG)
77 unsigned int gtmpsc_debug = 0;
78 # define STATIC
79 # define DPRINTF(x)	do { if (gtmpsc_debug) printf x ; } while (0)
80 #else
81 # define STATIC static
82 # define DPRINTF(x)
83 #endif
84 
85 #define GTMPSCUNIT_MASK    0x7ffff
86 #define GTMPSCDIALOUT_MASK 0x80000
87 
88 #define GTMPSCUNIT(x)      (minor(x) & GTMPSCUNIT_MASK)
89 #define GTMPSCDIALOUT(x)   (minor(x) & GTMPSCDIALOUT_MASK)
90 
91 #define CLEANUP_AND_RETURN_RXDMA(sc, ix)				    \
92 	do {								    \
93 		gtmpsc_pollrx_t *_vrxp = &(sc)->sc_poll_sdmapage->rx[(ix)]; \
94 									    \
95 		_vrxp->rxdesc.sdma_csr =				    \
96 		    SDMA_CSR_RX_L	|				    \
97 		    SDMA_CSR_RX_F	|				    \
98 		    SDMA_CSR_RX_OWN	|				    \
99 		    SDMA_CSR_RX_EI;					    \
100 		_vrxp->rxdesc.sdma_cnt =				    \
101 		    GTMPSC_RXBUFSZ << SDMA_RX_CNT_BUFSZ_SHIFT;		    \
102 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_rxdma_map,	    \
103 		    (ix) * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t),   \
104 		    sizeof(vrxp->rxbuf),				    \
105 		    BUS_DMASYNC_PREREAD);				    \
106 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_rxdma_map,	    \
107 		    (ix) * sizeof(gtmpsc_pollrx_t),			    \
108 		    sizeof(sdma_desc_t),				    \
109 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);	    \
110 	} while (0);
111 
112 
113 STATIC int  gtmpscmatch(device_t, cfdata_t, void *);
114 STATIC void gtmpscattach(device_t, device_t, void *);
115 
116 STATIC void gtmpsc_softintr(void *);
117 
118 STATIC void gtmpscstart(struct tty *);
119 STATIC int  gtmpscparam(struct tty *, struct termios *);
120 
121 STATIC void gtmpsc_shutdownhook(void *);
122 
123 STATIC uint32_t cflag2mpcr(tcflag_t);
124 STATIC __inline void gtmpsc_intr_rx(struct gtmpsc_softc *);
125 STATIC __inline void gtmpsc_intr_tx(struct gtmpsc_softc *);
126 STATIC void gtmpsc_write(struct gtmpsc_softc *);
127 STATIC void gtmpsc_txflush(gtmpsc_softc_t *);
128 STATIC void gtmpsc_rxdesc_init(struct gtmpsc_softc *);
129 STATIC void gtmpsc_txdesc_init(struct gtmpsc_softc *);
130 STATIC void gtmpscinit_stop(struct gtmpsc_softc *);
131 STATIC void gtmpscinit_start(struct gtmpsc_softc *);
132 STATIC void gtmpscshutdown(struct gtmpsc_softc *);
133 STATIC void gtmpsc_loadchannelregs(struct gtmpsc_softc *);
134 
135 #ifdef MPSC_CONSOLE
136 STATIC int gtmpsccngetc(dev_t);
137 STATIC void gtmpsccnputc(dev_t, int);
138 STATIC void gtmpsccnpollc(dev_t, int);
139 STATIC void gtmpsccnhalt(dev_t);
140 
141 STATIC int gtmpsc_hackinit(struct gtmpsc_softc *, bus_space_tag_t,
142 			   bus_dma_tag_t, bus_addr_t, int, int, int, tcflag_t);
143 #endif
144 
145 #if defined(MPSC_CONSOLE) || defined(KGDB)
146 STATIC int  gtmpsc_common_getc(struct gtmpsc_softc *);
147 STATIC void gtmpsc_common_putc(struct gtmpsc_softc *, int);
148 STATIC void gtmpsc_common_putc_wait_complete(struct gtmpsc_softc *, int);
149 #endif
150 
151 dev_type_open(gtmpscopen);
152 dev_type_close(gtmpscclose);
153 dev_type_read(gtmpscread);
154 dev_type_write(gtmpscwrite);
155 dev_type_ioctl(gtmpscioctl);
156 dev_type_stop(gtmpscstop);
157 dev_type_tty(gtmpsctty);
158 dev_type_poll(gtmpscpoll);
159 
160 const struct cdevsw gtmpsc_cdevsw = {
161 	.d_open = gtmpscopen,
162 	.d_close = gtmpscclose,
163 	.d_read = gtmpscread,
164 	.d_write = gtmpscwrite,
165 	.d_ioctl = gtmpscioctl,
166 	.d_stop = gtmpscstop,
167 	.d_tty = gtmpsctty,
168 	.d_poll = gtmpscpoll,
169 	.d_mmap = nommap,
170 	.d_kqfilter = ttykqfilter,
171 	.d_flag = D_TTY
172 };
173 
174 CFATTACH_DECL_NEW(gtmpsc, sizeof(struct gtmpsc_softc),
175     gtmpscmatch, gtmpscattach, NULL, NULL);
176 
177 
178 STATIC uint32_t sdma_imask;		/* soft copy of SDMA IMASK reg */
179 STATIC struct cnm_state gtmpsc_cnm_state;
180 
181 #ifdef KGDB
182 static int gtmpsc_kgdb_addr;
183 static int gtmpsc_kgdb_attached;
184 
185 STATIC int      gtmpsc_kgdb_getc(void *);
186 STATIC void     gtmpsc_kgdb_putc(void *, int);
187 #endif /* KGDB */
188 
189 #ifdef MPSC_CONSOLE
190 /*
191  * hacks for console initialization
192  * which happens prior to autoconfig "attach"
193  *
194  * XXX Assumes PAGE_SIZE is a constant!
195  */
196 gtmpsc_softc_t gtmpsc_cn_softc;
197 STATIC unsigned char gtmpsc_cn_dmapage[PAGE_SIZE] __aligned(PAGE_SIZE);
198 
199 
200 static struct consdev gtmpsc_consdev = {
201 	NULL, NULL, gtmpsccngetc, gtmpsccnputc, gtmpsccnpollc,
202 	NULL, gtmpsccnhalt, NULL, NODEV, CN_NORMAL
203 };
204 #endif
205 
206 
207 #define GT_MPSC_READ(sc, o) \
208 	bus_space_read_4((sc)->sc_iot, (sc)->sc_mpsch, (o))
209 #define GT_MPSC_WRITE(sc, o, v) \
210 	bus_space_write_4((sc)->sc_iot, (sc)->sc_mpsch, (o), (v))
211 #define GT_SDMA_READ(sc, o) \
212 	bus_space_read_4((sc)->sc_iot, (sc)->sc_sdmah, (o))
213 #define GT_SDMA_WRITE(sc, o, v) \
214 	bus_space_write_4((sc)->sc_iot, (sc)->sc_sdmah, (o), (v))
215 
216 
217 /* ARGSUSED */
218 STATIC int
219 gtmpscmatch(device_t parent, cfdata_t match, void *aux)
220 {
221 	struct marvell_attach_args *mva = aux;
222 
223 	if (strcmp(mva->mva_name, match->cf_name) != 0)
224 		return 0;
225 	if (mva->mva_offset == MVA_OFFSET_DEFAULT)
226 		return 0;
227 
228 	mva->mva_size = GTMPSC_SIZE;
229 	return 1;
230 }
231 
232 /* ARGSUSED */
233 STATIC void
234 gtmpscattach(device_t parent, device_t self, void *aux)
235 {
236 	struct gtmpsc_softc *sc = device_private(self);
237 	struct marvell_attach_args *mva = aux;
238 	bus_dma_segment_t segs;
239 	struct tty *tp;
240 	int rsegs, err, unit;
241 	void *kva;
242 
243 	aprint_naive("\n");
244 	aprint_normal(": Multi-Protocol Serial Controller\n");
245 
246 	if (mva->mva_unit != MVA_UNIT_DEFAULT)
247 		unit = mva->mva_unit;
248 	else
249 		unit = (mva->mva_offset == GTMPSC_BASE(0)) ? 0 : 1;
250 
251 #ifdef MPSC_CONSOLE
252 	if (cn_tab == &gtmpsc_consdev &&
253 	    cn_tab->cn_dev == makedev(0, unit)) {
254 		gtmpsc_cn_softc.sc_dev = self;
255 		memcpy(sc, &gtmpsc_cn_softc, sizeof(struct gtmpsc_softc));
256 		sc->sc_flags = GTMPSC_CONSOLE;
257 	} else
258 #endif
259 	{
260 		if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
261 		    mva->mva_offset, mva->mva_size, &sc->sc_mpsch)) {
262 			aprint_error_dev(self, "Cannot map MPSC registers\n");
263 			return;
264 		}
265 		if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
266 		    GTSDMA_BASE(unit), GTSDMA_SIZE, &sc->sc_sdmah)) {
267 			aprint_error_dev(self, "Cannot map SDMA registers\n");
268 			return;
269 		}
270 		sc->sc_dev = self;
271 		sc->sc_unit = unit;
272 		sc->sc_iot = mva->mva_iot;
273 		sc->sc_dmat = mva->mva_dmat;
274 
275 		err = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
276 		    &segs, 1, &rsegs, BUS_DMA_NOWAIT);
277 		if (err) {
278 			aprint_error_dev(sc->sc_dev,
279 			    "bus_dmamem_alloc error 0x%x\n", err);
280 			goto fail0;
281 		}
282 		err = bus_dmamem_map(sc->sc_dmat, &segs, 1, PAGE_SIZE, &kva,
283 		    BUS_DMA_NOWAIT);
284 		if (err) {
285 			aprint_error_dev(sc->sc_dev,
286 			    "bus_dmamem_map error 0x%x\n", err);
287 			goto fail1;
288 		}
289 		memset(kva, 0, PAGE_SIZE);	/* paranoid/superfluous */
290 		sc->sc_poll_sdmapage = kva;
291 
292 		err = bus_dmamap_create(sc->sc_dmat, sizeof(gtmpsc_polltx_t), 1,
293 		   sizeof(gtmpsc_polltx_t), 0, BUS_DMA_NOWAIT,
294 		   &sc->sc_txdma_map);
295 		if (err != 0) {
296 			aprint_error_dev(sc->sc_dev,
297 			    "bus_dmamap_create error 0x%x\n", err);
298 			goto fail2;
299 		}
300 		err = bus_dmamap_load(sc->sc_dmat, sc->sc_txdma_map,
301 		    sc->sc_poll_sdmapage->tx, sizeof(gtmpsc_polltx_t),
302 		    NULL, BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
303 		if (err != 0) {
304 			aprint_error_dev(sc->sc_dev,
305 			    "bus_dmamap_load tx error 0x%x\n", err);
306 			goto fail3;
307 		}
308 		err = bus_dmamap_create(sc->sc_dmat, sizeof(gtmpsc_pollrx_t), 1,
309 		   sizeof(gtmpsc_pollrx_t), 0, BUS_DMA_NOWAIT,
310 		   &sc->sc_rxdma_map);
311 		if (err != 0) {
312 			aprint_error_dev(sc->sc_dev,
313 			    "bus_dmamap_create rx error 0x%x\n", err);
314 			goto fail4;
315 		}
316 		err = bus_dmamap_load(sc->sc_dmat, sc->sc_rxdma_map,
317 		    sc->sc_poll_sdmapage->rx, sizeof(gtmpsc_pollrx_t),
318 		    NULL, BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
319 		if (err != 0) {
320 			aprint_error_dev(sc->sc_dev,
321 			    "bus_dmamap_load rx error 0x%x\n", err);
322 			goto fail5;
323 		}
324 
325 		sc->sc_brg = unit;		/* XXXXX */
326 		sc->sc_baudrate = GT_MPSC_DEFAULT_BAUD_RATE;
327 	}
328 	aprint_normal_dev(self, "with SDMA offset 0x%04x-0x%04x\n",
329 	    GTSDMA_BASE(unit), GTSDMA_BASE(unit) + GTSDMA_SIZE - 1);
330 
331 	sc->sc_rx_ready = 0;
332 	sc->sc_tx_busy = 0;
333 	sc->sc_tx_done = 0;
334 	sc->sc_tx_stopped = 0;
335 	sc->sc_heldchange = 0;
336 
337 	gtmpsc_txdesc_init(sc);
338 	gtmpsc_rxdesc_init(sc);
339 
340 	sc->sc_tty = tp = tty_alloc();
341 	tp->t_oproc = gtmpscstart;
342 	tp->t_param = gtmpscparam;
343 	tty_attach(tp);
344 
345 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_HIGH);
346 
347 	/*
348 	 * clear any pending SDMA interrupts for this unit
349 	 */
350 	(void) gt_sdma_icause(device_parent(sc->sc_dev),
351 	    SDMA_INTR_RXBUF(sc->sc_unit) |
352 	    SDMA_INTR_RXERR(sc->sc_unit) |
353 	    SDMA_INTR_TXBUF(sc->sc_unit) |
354 	    SDMA_INTR_TXEND(sc->sc_unit));
355 
356 	sc->sc_si = softint_establish(SOFTINT_SERIAL, gtmpsc_softintr, sc);
357 	if (sc->sc_si == NULL)
358 		panic("mpscattach: cannot softint_establish IPL_SOFTSERIAL");
359 
360 	shutdownhook_establish(gtmpsc_shutdownhook, sc);
361 
362 	gtmpscinit_stop(sc);
363 	gtmpscinit_start(sc);
364 
365 	if (sc->sc_flags & GTMPSC_CONSOLE) {
366 		int maj;
367 
368 		/* locate the major number */
369 		maj = cdevsw_lookup_major(&gtmpsc_cdevsw);
370 
371 		tp->t_dev = cn_tab->cn_dev =
372 		    makedev(maj, device_unit(sc->sc_dev));
373 
374 		aprint_normal_dev(self, "console\n");
375 	}
376 
377 #ifdef KGDB
378 	/*
379 	 * Allow kgdb to "take over" this port.  If this is
380 	 * the kgdb device, it has exclusive use.
381 	 */
382 	if (sc->sc_unit == gtmpsckgdbport) {
383 #ifdef MPSC_CONSOLE
384 		if (sc->sc_unit == MPSC_CONSOLE) {
385 			aprint_error_dev(self,
386 			    "(kgdb): cannot share with console\n");
387 			return;
388 		}
389 #endif
390 
391 		sc->sc_flags |= GTMPSC_KGDB;
392 		aprint_normal_dev(self, "kgdb\n");
393 
394 		gtmpsc_txflush(sc);
395 
396 		kgdb_attach(gtmpsc_kgdb_getc, gtmpsc_kgdb_putc, NULL);
397 		kgdb_dev = 123;	/* unneeded, only to satisfy some tests */
398 		gtmpsc_kgdb_attached = 1;
399 		kgdb_connect(1);
400 	}
401 #endif /* KGDB */
402 
403 	return;
404 
405 
406 fail5:
407 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxdma_map);
408 fail4:
409 	bus_dmamap_unload(sc->sc_dmat, sc->sc_txdma_map);
410 fail3:
411 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_txdma_map);
412 fail2:
413 	bus_dmamem_unmap(sc->sc_dmat, kva, PAGE_SIZE);
414 fail1:
415 	bus_dmamem_free(sc->sc_dmat, &segs, 1);
416 fail0:
417 	return;
418 }
419 
420 /* ARGSUSED */
421 int
422 gtmpsc_intr(void *arg)
423 {
424 	struct gt_softc *gt = (struct gt_softc *)arg;
425 	struct gtmpsc_softc *sc;
426 	uint32_t icause;
427 	int i;
428 
429 	icause = gt_sdma_icause(gt->sc_dev, sdma_imask);
430 
431 	for (i = 0; i < GTMPSC_NCHAN; i++) {
432 		sc = device_lookup_private(&gtmpsc_cd, i);
433 		if (sc == NULL)
434 			continue;
435 		mutex_spin_enter(&sc->sc_lock);
436 		if (icause & SDMA_INTR_RXBUF(sc->sc_unit)) {
437 			gtmpsc_intr_rx(sc);
438 			icause &= ~SDMA_INTR_RXBUF(sc->sc_unit);
439 		}
440 		if (icause & SDMA_INTR_TXBUF(sc->sc_unit)) {
441 			gtmpsc_intr_tx(sc);
442 			icause &= ~SDMA_INTR_TXBUF(sc->sc_unit);
443 		}
444 		mutex_spin_exit(&sc->sc_lock);
445 	}
446 
447 	return 1;
448 }
449 
450 STATIC void
451 gtmpsc_softintr(void *arg)
452 {
453 	struct gtmpsc_softc *sc = arg;
454 	struct tty *tp = sc->sc_tty;
455 	gtmpsc_pollrx_t *vrxp;
456 	int code;
457 	u_int cc;
458 	u_char *get, *end, lsr;
459 	int (*rint)(int, struct tty *) = tp->t_linesw->l_rint;
460 
461 	if (sc->sc_rx_ready) {
462 		sc->sc_rx_ready = 0;
463 
464 		cc = sc->sc_rcvcnt;
465 
466 		/* If not yet open, drop the entire buffer content here */
467 		if (!ISSET(tp->t_state, TS_ISOPEN))
468 			cc = 0;
469 
470 		vrxp = &sc->sc_poll_sdmapage->rx[sc->sc_rcvrx];
471 		end = vrxp->rxbuf + vrxp->rxdesc.sdma_cnt;
472 		get = vrxp->rxbuf + sc->sc_roffset;
473 		while (cc > 0) {
474 			code = *get;
475 			lsr = vrxp->rxdesc.sdma_csr;
476 
477 			if (ISSET(lsr,
478 			    SDMA_CSR_RX_PE |
479 			    SDMA_CSR_RX_FR |
480 			    SDMA_CSR_RX_OR |
481 			    SDMA_CSR_RX_BR)) {
482 				if (ISSET(lsr, SDMA_CSR_RX_OR))
483 					;	/* XXXXX not yet... */
484 				if (ISSET(lsr, SDMA_CSR_RX_BR | SDMA_CSR_RX_FR))
485 					SET(code, TTY_FE);
486 				if (ISSET(lsr, SDMA_CSR_RX_PE))
487 					SET(code, TTY_PE);
488 			}
489 
490 			if ((*rint)(code, tp) == -1) {
491 				/*
492 				 * The line discipline's buffer is out of space.
493 				 */
494 				/* XXXXX not yet... */
495 			}
496 			if (++get >= end) {
497 				/* cleanup this descriptor, and return to DMA */
498 				CLEANUP_AND_RETURN_RXDMA(sc, sc->sc_rcvrx);
499 				sc->sc_rcvrx =
500 				    (sc->sc_rcvrx + 1) % GTMPSC_NTXDESC;
501 				vrxp = &sc->sc_poll_sdmapage->rx[sc->sc_rcvrx];
502 				end = vrxp->rxbuf + vrxp->rxdesc.sdma_cnt;
503 				get = vrxp->rxbuf + sc->sc_roffset;
504 			}
505 			cc--;
506 		}
507 	}
508 	if (sc->sc_tx_done) {
509 		sc->sc_tx_done = 0;
510 		CLR(tp->t_state, TS_BUSY);
511 		if (ISSET(tp->t_state, TS_FLUSH))
512 		    CLR(tp->t_state, TS_FLUSH);
513 		else
514 		    ndflush(&tp->t_outq, (int)(sc->sc_tba - tp->t_outq.c_cf));
515 		(*tp->t_linesw->l_start)(tp);
516 	}
517 }
518 
519 int
520 gtmpscopen(dev_t dev, int flag, int mode, struct lwp *l)
521 {
522 	struct gtmpsc_softc *sc;
523 	int unit = GTMPSCUNIT(dev);
524 	struct tty *tp;
525 	int s;
526 	int error;
527 
528 	sc = device_lookup_private(&gtmpsc_cd, unit);
529 	if (!sc)
530 		return ENXIO;
531 #ifdef KGDB
532 	/*
533 	 * If this is the kgdb port, no other use is permitted.
534 	 */
535 	if (sc->sc_flags & GTMPSC_KGDB)
536 		return EBUSY;
537 #endif
538 	tp = sc->sc_tty;
539 	if (kauth_authorize_device_tty(l->l_cred, KAUTH_DEVICE_TTY_OPEN, tp))
540 		return EBUSY;
541 
542 	s = spltty();
543 
544 	if (!ISSET(tp->t_state, TS_ISOPEN) && tp->t_wopen == 0) {
545 		struct termios t;
546 
547 		tp->t_dev = dev;
548 
549 		mutex_spin_enter(&sc->sc_lock);
550 
551 		/* Turn on interrupts. */
552 		sdma_imask |= SDMA_INTR_RXBUF(sc->sc_unit);
553 		gt_sdma_imask(device_parent(sc->sc_dev), sdma_imask);
554 
555 		/* Clear PPS capture state on first open. */
556 		mutex_spin_enter(&timecounter_lock);
557 		memset(&sc->sc_pps_state, 0, sizeof(sc->sc_pps_state));
558 		sc->sc_pps_state.ppscap = PPS_CAPTUREASSERT | PPS_CAPTURECLEAR;
559 		pps_init(&sc->sc_pps_state);
560 		mutex_spin_exit(&timecounter_lock);
561 
562 		mutex_spin_exit(&sc->sc_lock);
563 
564 		if (sc->sc_flags & GTMPSC_CONSOLE) {
565 			t.c_ospeed = sc->sc_baudrate;
566 			t.c_cflag = sc->sc_cflag;
567 		} else {
568 			t.c_ospeed = TTYDEF_SPEED;
569 			t.c_cflag = TTYDEF_CFLAG;
570 		}
571 		t.c_ispeed = t.c_ospeed;
572 
573 		/* Make sure gtmpscparam() will do something. */
574 		tp->t_ospeed = 0;
575 		(void) gtmpscparam(tp, &t);
576 		tp->t_iflag = TTYDEF_IFLAG;
577 		tp->t_oflag = TTYDEF_OFLAG;
578 		tp->t_lflag = TTYDEF_LFLAG;
579 		ttychars(tp);
580 		ttsetwater(tp);
581 
582 		mutex_spin_enter(&sc->sc_lock);
583 
584 		/* Clear the input/output ring */
585 		sc->sc_rcvcnt = 0;
586 		sc->sc_roffset = 0;
587 		sc->sc_rcvrx = 0;
588 		sc->sc_rcvdrx = 0;
589 		sc->sc_nexttx = 0;
590 		sc->sc_lasttx = 0;
591 
592 		/*
593 		 * enable SDMA receive
594 		 */
595 		GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_ERD);
596 
597 		mutex_spin_exit(&sc->sc_lock);
598 	}
599 	splx(s);
600 	error = ttyopen(tp, GTMPSCDIALOUT(dev), ISSET(flag, O_NONBLOCK));
601 	if (error)
602 		goto bad;
603 
604 	error = (*tp->t_linesw->l_open)(dev, tp);
605 	if (error)
606 		goto bad;
607 
608 	return 0;
609 
610 bad:
611 	if (!ISSET(tp->t_state, TS_ISOPEN) && tp->t_wopen == 0) {
612 		/*
613 		 * We failed to open the device, and nobody else had it opened.
614 		 * Clean up the state as appropriate.
615 		 */
616 		gtmpscshutdown(sc);
617 	}
618 
619 	return error;
620 }
621 
622 int
623 gtmpscclose(dev_t dev, int flag, int mode, struct lwp *l)
624 {
625 	int unit = GTMPSCUNIT(dev);
626 	struct gtmpsc_softc *sc = device_lookup_private(&gtmpsc_cd, unit);
627 	struct tty *tp = sc->sc_tty;
628 
629 	if (!ISSET(tp->t_state, TS_ISOPEN))
630 		return 0;
631 
632 	(*tp->t_linesw->l_close)(tp, flag);
633 	ttyclose(tp);
634 
635 	if (!ISSET(tp->t_state, TS_ISOPEN) && tp->t_wopen == 0) {
636 		/*
637 		 * Although we got a last close, the device may still be in
638 		 * use; e.g. if this was the dialout node, and there are still
639 		 * processes waiting for carrier on the non-dialout node.
640 		 */
641 		gtmpscshutdown(sc);
642 	}
643 
644 	return 0;
645 }
646 
647 int
648 gtmpscread(dev_t dev, struct uio *uio, int flag)
649 {
650 	struct gtmpsc_softc *sc =
651 	    device_lookup_private(&gtmpsc_cd, GTMPSCUNIT(dev));
652 	struct tty *tp = sc->sc_tty;
653 
654 	return (*tp->t_linesw->l_read)(tp, uio, flag);
655 }
656 
657 int
658 gtmpscwrite(dev_t dev, struct uio *uio, int flag)
659 {
660 	struct gtmpsc_softc *sc =
661 	    device_lookup_private(&gtmpsc_cd, GTMPSCUNIT(dev));
662 	struct tty *tp = sc->sc_tty;
663 
664 	return (*tp->t_linesw->l_write)(tp, uio, flag);
665 }
666 
667 int
668 gtmpscioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
669 {
670 	struct gtmpsc_softc *sc =
671 	    device_lookup_private(&gtmpsc_cd, GTMPSCUNIT(dev));
672 	struct tty *tp = sc->sc_tty;
673 	int error;
674 
675 	error = (*tp->t_linesw->l_ioctl)(tp, cmd, data, flag, l);
676 	if (error != EPASSTHROUGH)
677 		return error;
678 
679 	error = ttioctl(tp, cmd, data, flag, l);
680 	if (error != EPASSTHROUGH)
681 		return error;
682 
683 	error = 0;
684 	switch (cmd) {
685 	case TIOCSFLAGS:
686 		error = kauth_authorize_device_tty(l->l_cred,
687 		    KAUTH_DEVICE_TTY_PRIVSET, tp);
688 		if (error)
689 			return error;
690 		break;
691 	default:
692 		/* nothing */
693 		break;
694 	}
695 
696 	mutex_spin_enter(&sc->sc_lock);
697 
698 	switch (cmd) {
699 	case PPS_IOC_CREATE:
700 	case PPS_IOC_DESTROY:
701 	case PPS_IOC_GETPARAMS:
702 	case PPS_IOC_SETPARAMS:
703 	case PPS_IOC_GETCAP:
704 	case PPS_IOC_FETCH:
705 #ifdef PPS_SYNC
706 	case PPS_IOC_KCBIND:
707 #endif
708 		mutex_spin_enter(&timecounter_lock);
709 		error = pps_ioctl(cmd, data, &sc->sc_pps_state);
710 		mutex_spin_exit(&timecounter_lock);
711 		break;
712 
713 	case TIOCDCDTIMESTAMP:	/* XXX old, overloaded  API used by xntpd v3 */
714 		mutex_spin_enter(&timecounter_lock);
715 #ifndef PPS_TRAILING_EDGE
716 		TIMESPEC_TO_TIMEVAL((struct timeval *)data,
717 		    &sc->sc_pps_state.ppsinfo.assert_timestamp);
718 #else
719 		TIMESPEC_TO_TIMEVAL((struct timeval *)data,
720 		    &sc->sc_pps_state.ppsinfo.clear_timestamp);
721 #endif
722 		mutex_spin_exit(&timecounter_lock);
723 		break;
724 
725 	default:
726 		error = EPASSTHROUGH;
727 		break;
728 	}
729 
730 	mutex_spin_exit(&sc->sc_lock);
731 
732 	return error;
733 }
734 
735 void
736 gtmpscstop(struct tty *tp, int flag)
737 {
738 }
739 
740 struct tty *
741 gtmpsctty(dev_t dev)
742 {
743 	struct gtmpsc_softc *sc =
744 	    device_lookup_private(&gtmpsc_cd, GTMPSCUNIT(dev));
745 
746 	return sc->sc_tty;
747 }
748 
749 int
750 gtmpscpoll(dev_t dev, int events, struct lwp *l)
751 {
752 	struct gtmpsc_softc *sc =
753 	    device_lookup_private(&gtmpsc_cd, GTMPSCUNIT(dev));
754 	struct tty *tp = sc->sc_tty;
755 
756 	return (*tp->t_linesw->l_poll)(tp, events, l);
757 }
758 
759 
760 STATIC void
761 gtmpscstart(struct tty *tp)
762 {
763 	struct gtmpsc_softc *sc;
764 	unsigned char *tba;
765 	unsigned int unit;
766 	int s, tbc;
767 
768 	unit = GTMPSCUNIT(tp->t_dev);
769 	sc = device_lookup_private(&gtmpsc_cd, unit);
770 	if (sc == NULL)
771 		return;
772 
773 	s = spltty();
774 	if (ISSET(tp->t_state, TS_TIMEOUT | TS_BUSY | TS_TTSTOP))
775 		goto out;
776 	if (sc->sc_tx_stopped)
777 		goto out;
778 	if (!ttypull(tp))
779 		goto out;
780 
781 	/* Grab the first contiguous region of buffer space. */
782 	tba = tp->t_outq.c_cf;
783 	tbc = ndqb(&tp->t_outq, 0);
784 
785 	mutex_spin_enter(&sc->sc_lock);
786 
787 	sc->sc_tba = tba;
788 	sc->sc_tbc = tbc;
789 
790 	sdma_imask |= SDMA_INTR_TXBUF(sc->sc_unit);
791 	gt_sdma_imask(device_parent(sc->sc_dev), sdma_imask);
792 	SET(tp->t_state, TS_BUSY);
793 	sc->sc_tx_busy = 1;
794 	gtmpsc_write(sc);
795 
796 	mutex_spin_exit(&sc->sc_lock);
797 out:
798 	splx(s);
799 }
800 
801 STATIC int
802 gtmpscparam(struct tty *tp, struct termios *t)
803 {
804 	struct gtmpsc_softc *sc =
805 	    device_lookup_private(&gtmpsc_cd, GTMPSCUNIT(tp->t_dev));
806 
807 	/* Check requested parameters. */
808 	if (compute_cdv(t->c_ospeed) < 0)
809 		return EINVAL;
810 	if (t->c_ispeed && t->c_ispeed != t->c_ospeed)
811 		return EINVAL;
812 
813 	/*
814 	 * If there were no changes, don't do anything.  This avoids dropping
815 	 * input and improves performance when all we did was frob things like
816 	 * VMIN and VTIME.
817 	 */
818 	if (tp->t_ospeed == t->c_ospeed &&
819 	    tp->t_cflag == t->c_cflag)
820 		return 0;
821 
822 	mutex_spin_enter(&sc->sc_lock);
823 
824 	/* And copy to tty. */
825 	tp->t_ispeed = 0;
826 	tp->t_ospeed = t->c_ospeed;
827 	tp->t_cflag = t->c_cflag;
828 
829 	sc->sc_baudrate = t->c_ospeed;
830 
831 	if (!sc->sc_heldchange) {
832 		if (sc->sc_tx_busy) {
833 			sc->sc_heldtbc = sc->sc_tbc;
834 			sc->sc_tbc = 0;
835 			sc->sc_heldchange = 1;
836 		} else
837 			gtmpsc_loadchannelregs(sc);
838 	}
839 
840 	mutex_spin_exit(&sc->sc_lock);
841 
842 	/* Fake carrier on */
843 	(void) (*tp->t_linesw->l_modem)(tp, 1);
844 
845 	return 0;
846 }
847 
848 void
849 gtmpsc_shutdownhook(void *arg)
850 {
851 	gtmpsc_softc_t *sc = (gtmpsc_softc_t *)arg;
852 
853 	gtmpsc_txflush(sc);
854 }
855 
856 /*
857  * Convert to MPCR from cflag(CS[5678] and CSTOPB).
858  */
859 STATIC uint32_t
860 cflag2mpcr(tcflag_t cflag)
861 {
862 	uint32_t mpcr = 0;
863 
864 	switch (ISSET(cflag, CSIZE)) {
865 	case CS5:
866 		SET(mpcr, GTMPSC_MPCR_CL_5);
867 		break;
868 	case CS6:
869 		SET(mpcr, GTMPSC_MPCR_CL_6);
870 		break;
871 	case CS7:
872 		SET(mpcr, GTMPSC_MPCR_CL_7);
873 		break;
874 	case CS8:
875 		SET(mpcr, GTMPSC_MPCR_CL_8);
876 		break;
877 	}
878 	if (ISSET(cflag, CSTOPB))
879 		SET(mpcr, GTMPSC_MPCR_SBL_2);
880 
881 	return mpcr;
882 }
883 
884 STATIC void
885 gtmpsc_intr_rx(struct gtmpsc_softc *sc)
886 {
887 	gtmpsc_pollrx_t *vrxp;
888 	uint32_t csr;
889 	int kick, ix;
890 
891 	kick = 0;
892 
893 	/* already handled in gtmpsc_common_getc() */
894 	if (sc->sc_rcvdrx == sc->sc_rcvrx)
895 		return;
896 
897 	ix = sc->sc_rcvdrx;
898 	vrxp = &sc->sc_poll_sdmapage->rx[ix];
899 	bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
900 	    ix * sizeof(gtmpsc_pollrx_t),
901 	    sizeof(sdma_desc_t),
902 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
903 	csr = vrxp->rxdesc.sdma_csr;
904 	while (!(csr & SDMA_CSR_RX_OWN)) {
905 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
906 		    ix * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t),
907 		    sizeof(vrxp->rxbuf),
908 		    BUS_DMASYNC_POSTREAD);
909 		vrxp->rxdesc.sdma_cnt &= SDMA_RX_CNT_BCNT_MASK;
910 		if (vrxp->rxdesc.sdma_csr & SDMA_CSR_RX_BR) {
911 			int cn_trapped = 0;
912 
913 			cn_check_magic(sc->sc_tty->t_dev,
914 			    CNC_BREAK, gtmpsc_cnm_state);
915 			if (cn_trapped)
916 				continue;
917 #if defined(KGDB) && !defined(DDB)
918 			if (ISSET(sc->sc_flags, GTMPSC_KGDB)) {
919 				kgdb_connect(1);
920 				continue;
921 			}
922 #endif
923 		}
924 
925 		sc->sc_rcvcnt += vrxp->rxdesc.sdma_cnt;
926 		kick = 1;
927 
928 		ix = (ix + 1) % GTMPSC_NTXDESC;
929 		vrxp = &sc->sc_poll_sdmapage->rx[ix];
930 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
931 		    ix * sizeof(gtmpsc_pollrx_t),
932 		    sizeof(sdma_desc_t),
933 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
934 		csr = vrxp->rxdesc.sdma_csr;
935 	}
936 	bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
937 	    ix * sizeof(gtmpsc_pollrx_t),
938 	    sizeof(sdma_desc_t),
939 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
940 
941 	if (kick) {
942 		sc->sc_rcvdrx = ix;
943 		sc->sc_rx_ready = 1;
944 		softint_schedule(sc->sc_si);
945 	}
946 }
947 
948 STATIC __inline void
949 gtmpsc_intr_tx(struct gtmpsc_softc *sc)
950 {
951 	gtmpsc_polltx_t *vtxp;
952 	uint32_t csr;
953 	int ix;
954 
955 	/*
956 	 * If we've delayed a parameter change, do it now,
957 	 * and restart output.
958 	 */
959 	if (sc->sc_heldchange) {
960 		gtmpsc_loadchannelregs(sc);
961 		sc->sc_heldchange = 0;
962 		sc->sc_tbc = sc->sc_heldtbc;
963 		sc->sc_heldtbc = 0;
964 	}
965 
966 	/* Clean-up TX descriptors and buffers */
967 	ix = sc->sc_lasttx;
968 	while (ix != sc->sc_nexttx) {
969 		vtxp = &sc->sc_poll_sdmapage->tx[ix];
970 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
971 		    ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
972 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
973 		csr = vtxp->txdesc.sdma_csr;
974 		if (csr & SDMA_CSR_TX_OWN) {
975 			bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
976 			    ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
977 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
978 			break;
979 		}
980 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
981 		    ix * sizeof(gtmpsc_polltx_t) + sizeof(sdma_desc_t),
982 		    sizeof(vtxp->txbuf), BUS_DMASYNC_POSTWRITE);
983 		ix = (ix + 1) % GTMPSC_NTXDESC;
984 	}
985 	sc->sc_lasttx = ix;
986 
987 	/* Output the next chunk of the contiguous buffer */
988 	gtmpsc_write(sc);
989 	if (sc->sc_tbc == 0 && sc->sc_tx_busy) {
990 		sc->sc_tx_busy = 0;
991 		sc->sc_tx_done = 1;
992 		softint_schedule(sc->sc_si);
993 		sdma_imask &= ~SDMA_INTR_TXBUF(sc->sc_unit);
994 		gt_sdma_imask(device_parent(sc->sc_dev), sdma_imask);
995 	}
996 }
997 
998 /*
999  * gtmpsc_write - write a buffer into the hardware
1000  */
1001 STATIC void
1002 gtmpsc_write(struct gtmpsc_softc *sc)
1003 {
1004 	gtmpsc_polltx_t *vtxp;
1005 	uint32_t sdcm, ix;
1006 	int kick, n;
1007 
1008 	kick = 0;
1009 	while (sc->sc_tbc > 0 && sc->sc_nexttx != sc->sc_lasttx) {
1010 		n = min(sc->sc_tbc, GTMPSC_TXBUFSZ);
1011 
1012 		ix = sc->sc_nexttx;
1013 		sc->sc_nexttx = (ix + 1) % GTMPSC_NTXDESC;
1014 
1015 		vtxp = &sc->sc_poll_sdmapage->tx[ix];
1016 
1017 		memcpy(vtxp->txbuf, sc->sc_tba, n);
1018 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1019 		    ix * sizeof(gtmpsc_polltx_t) + sizeof(sdma_desc_t),
1020 		    sizeof(vtxp->txbuf), BUS_DMASYNC_PREWRITE);
1021 
1022 		vtxp->txdesc.sdma_cnt = (n << SDMA_TX_CNT_BCNT_SHIFT) | n;
1023 		vtxp->txdesc.sdma_csr =
1024 		    SDMA_CSR_TX_L	|
1025 		    SDMA_CSR_TX_F	|
1026 		    SDMA_CSR_TX_EI	|
1027 		    SDMA_CSR_TX_OWN;
1028 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1029 		    ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
1030 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1031 
1032 		sc->sc_tbc -= n;
1033 		sc->sc_tba += n;
1034 		kick = 1;
1035 	}
1036 	if (kick) {
1037 		/*
1038 		 * now kick some SDMA
1039 		 */
1040 		sdcm = GT_SDMA_READ(sc, SDMA_SDCM);
1041 		if ((sdcm & SDMA_SDCM_TXD) == 0)
1042 			GT_SDMA_WRITE(sc, SDMA_SDCM, sdcm | SDMA_SDCM_TXD);
1043 	}
1044 }
1045 
1046 /*
1047  * gtmpsc_txflush - wait for output to drain
1048  */
1049 STATIC void
1050 gtmpsc_txflush(gtmpsc_softc_t *sc)
1051 {
1052 	gtmpsc_polltx_t *vtxp;
1053 	int ix, limit = 4000000;	/* 4 seconds */
1054 
1055 	ix = sc->sc_nexttx - 1;
1056 	if (ix < 0)
1057 		ix = GTMPSC_NTXDESC - 1;
1058 
1059 	vtxp = &sc->sc_poll_sdmapage->tx[ix];
1060 	while (limit > 0) {
1061 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1062 		    ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
1063 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1064 		if ((vtxp->txdesc.sdma_csr & SDMA_CSR_TX_OWN) == 0)
1065 			break;
1066 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1067 		    ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
1068 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1069 		DELAY(1);
1070 		limit -= 1;
1071 	}
1072 }
1073 
1074 /*
1075  * gtmpsc_rxdesc_init - set up RX descriptor ring
1076  */
1077 STATIC void
1078 gtmpsc_rxdesc_init(struct gtmpsc_softc *sc)
1079 {
1080 	gtmpsc_pollrx_t *vrxp, *prxp, *first_prxp;
1081 	sdma_desc_t *dp;
1082 	int i;
1083 
1084 	first_prxp = prxp =
1085 	    (gtmpsc_pollrx_t *)sc->sc_rxdma_map->dm_segs->ds_addr;
1086 	vrxp = sc->sc_poll_sdmapage->rx;
1087 	for (i = 0; i < GTMPSC_NRXDESC; i++) {
1088 		dp = &vrxp->rxdesc;
1089 		dp->sdma_csr =
1090 		    SDMA_CSR_RX_L|SDMA_CSR_RX_F|SDMA_CSR_RX_OWN|SDMA_CSR_RX_EI;
1091 		dp->sdma_cnt = GTMPSC_RXBUFSZ << SDMA_RX_CNT_BUFSZ_SHIFT;
1092 		dp->sdma_bufp = (uint32_t)&prxp->rxbuf;
1093 		vrxp++;
1094 		prxp++;
1095 		dp->sdma_next = (uint32_t)&prxp->rxdesc;
1096 
1097 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1098 		    i * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t),
1099 		    sizeof(vrxp->rxbuf), BUS_DMASYNC_PREREAD);
1100 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1101 		    i * sizeof(gtmpsc_pollrx_t), sizeof(sdma_desc_t),
1102 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1103 	}
1104 	dp = &vrxp->rxdesc;
1105 	dp->sdma_csr =
1106 	    SDMA_CSR_RX_L | SDMA_CSR_RX_F | SDMA_CSR_RX_OWN | SDMA_CSR_RX_EI;
1107 	dp->sdma_cnt = GTMPSC_RXBUFSZ << SDMA_RX_CNT_BUFSZ_SHIFT;
1108 	dp->sdma_bufp = (uint32_t)&prxp->rxbuf;
1109 	dp->sdma_next = (uint32_t)&first_prxp->rxdesc;
1110 
1111 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1112 	    i * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t),
1113 	    sizeof(vrxp->rxbuf), BUS_DMASYNC_PREREAD);
1114 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1115 	    i * sizeof(gtmpsc_pollrx_t), sizeof(sdma_desc_t),
1116 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1117 
1118 	sc->sc_rcvcnt = 0;
1119 	sc->sc_roffset = 0;
1120 	sc->sc_rcvrx = 0;
1121 	sc->sc_rcvdrx = 0;
1122 }
1123 
1124 /*
1125  * gtmpsc_txdesc_init - set up TX descriptor ring
1126  */
1127 STATIC void
1128 gtmpsc_txdesc_init(struct gtmpsc_softc *sc)
1129 {
1130 	gtmpsc_polltx_t *vtxp, *ptxp, *first_ptxp;
1131 	sdma_desc_t *dp;
1132 	int i;
1133 
1134 	first_ptxp = ptxp =
1135 	    (gtmpsc_polltx_t *)sc->sc_txdma_map->dm_segs->ds_addr;
1136 	vtxp = sc->sc_poll_sdmapage->tx;
1137 	for (i = 0; i < GTMPSC_NTXDESC; i++) {
1138 		dp = &vtxp->txdesc;
1139 		dp->sdma_csr = 0;
1140 		dp->sdma_cnt = 0;
1141 		dp->sdma_bufp = (uint32_t)&ptxp->txbuf;
1142 		vtxp++;
1143 		ptxp++;
1144 		dp->sdma_next = (uint32_t)&ptxp->txdesc;
1145 	}
1146 	dp = &vtxp->txdesc;
1147 	dp->sdma_csr = 0;
1148 	dp->sdma_cnt = 0;
1149 	dp->sdma_bufp = (uint32_t)&ptxp->txbuf;
1150 	dp->sdma_next = (uint32_t)&first_ptxp->txdesc;
1151 
1152 	sc->sc_nexttx = 0;
1153 	sc->sc_lasttx = 0;
1154 }
1155 
1156 STATIC void
1157 gtmpscinit_stop(struct gtmpsc_softc *sc)
1158 {
1159 	uint32_t csr;
1160 	int timo = 10000;	/* XXXX */
1161 
1162 	/* Abort MPSC Rx (aborting Tx messes things up) */
1163 	GT_MPSC_WRITE(sc, GTMPSC_CHRN(2), GTMPSC_CHR2_RXABORT);
1164 
1165 	/* abort SDMA RX and stop TX for MPSC unit */
1166 	GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_AR | SDMA_SDCM_STD);
1167 
1168 	/* poll for SDMA RX abort completion */
1169 	for (; timo > 0; timo--) {
1170 		csr = GT_SDMA_READ(sc, SDMA_SDCM);
1171 		if (!(csr & (SDMA_SDCM_AR | SDMA_SDCM_AT)))
1172 			break;
1173 		DELAY(50);
1174 	}
1175 }
1176 
1177 STATIC void
1178 gtmpscinit_start(struct gtmpsc_softc *sc)
1179 {
1180 
1181 	/*
1182 	 * Set pointers of current/first descriptor of TX to SDMA register.
1183 	 */
1184 	GT_SDMA_WRITE(sc, SDMA_SCTDP, sc->sc_txdma_map->dm_segs->ds_addr);
1185 	GT_SDMA_WRITE(sc, SDMA_SFTDP, sc->sc_txdma_map->dm_segs->ds_addr);
1186 
1187 	/*
1188 	 * Set pointer of current descriptor of TX to SDMA register.
1189 	 */
1190 	GT_SDMA_WRITE(sc, SDMA_SCRDP, sc->sc_rxdma_map->dm_segs->ds_addr);
1191 
1192 	/*
1193 	 * initialize SDMA unit Configuration Register
1194 	 */
1195 	GT_SDMA_WRITE(sc, SDMA_SDC,
1196 	    SDMA_SDC_BSZ_8x64 | SDMA_SDC_SFM|SDMA_SDC_RFT);
1197 
1198 	gtmpsc_loadchannelregs(sc);
1199 
1200 	/*
1201 	 * set MPSC LO and HI port config registers for GTMPSC unit
1202  	 */
1203 	GT_MPSC_WRITE(sc, GTMPSC_MMCR_LO,
1204 	    GTMPSC_MMCR_LO_MODE_UART	|
1205 	    GTMPSC_MMCR_LO_ET		|
1206 	    GTMPSC_MMCR_LO_ER		|
1207 	    GTMPSC_MMCR_LO_NLM);
1208 	GT_MPSC_WRITE(sc, GTMPSC_MMCR_HI,
1209 	    GTMPSC_MMCR_HI_TCDV_DEFAULT	|
1210 	    GTMPSC_MMCR_HI_RDW		|
1211 	    GTMPSC_MMCR_HI_RCDV_DEFAULT);
1212 
1213 	/*
1214 	 * tell MPSC receive the Enter Hunt
1215 	 */
1216 	GT_MPSC_WRITE(sc, GTMPSC_CHRN(2), GTMPSC_CHR2_EH);
1217 }
1218 
1219 STATIC void
1220 gtmpscshutdown(struct gtmpsc_softc *sc)
1221 {
1222 	struct tty *tp;
1223 
1224 #ifdef KGDB
1225 	if (sc->sc_flags & GTMPSCF_KGDB != 0)
1226 		return;
1227 #endif
1228 	tp = sc->sc_tty;
1229 	mutex_spin_enter(&sc->sc_lock);
1230 	/* Fake carrier off */
1231 	(void) (*tp->t_linesw->l_modem)(tp, 0);
1232 	sdma_imask &= ~SDMA_INTR_RXBUF(sc->sc_unit);
1233 	gt_sdma_imask(device_parent(sc->sc_dev), sdma_imask);
1234 	mutex_spin_exit(&sc->sc_lock);
1235 }
1236 
1237 STATIC void
1238 gtmpsc_loadchannelregs(struct gtmpsc_softc *sc)
1239 {
1240 
1241 	if (sc->sc_dev != NULL)
1242 		gt_brg_bcr(device_parent(sc->sc_dev), sc->sc_brg,
1243 	    	    GT_MPSC_CLOCK_SOURCE | compute_cdv(sc->sc_baudrate));
1244 	GT_MPSC_WRITE(sc, GTMPSC_CHRN(3), GTMPSC_MAXIDLE(sc->sc_baudrate));
1245 
1246 	/*
1247 	 * set MPSC Protocol configuration register for GTMPSC unit
1248 	 */
1249 	GT_MPSC_WRITE(sc, GTMPSC_MPCR, cflag2mpcr(sc->sc_cflag));
1250 }
1251 
1252 
1253 #ifdef MPSC_CONSOLE
1254 /*
1255  * Following are all routines needed for MPSC to act as console
1256  */
1257 STATIC int
1258 gtmpsccngetc(dev_t dev)
1259 {
1260 
1261 	return gtmpsc_common_getc(&gtmpsc_cn_softc);
1262 }
1263 
1264 STATIC void
1265 gtmpsccnputc(dev_t dev, int c)
1266 {
1267 
1268 	gtmpsc_common_putc(&gtmpsc_cn_softc, c);
1269 }
1270 
1271 STATIC void
1272 gtmpsccnpollc(dev_t dev, int on)
1273 {
1274 }
1275 
1276 STATIC void
1277 gtmpsccnhalt(dev_t dev)
1278 {
1279 	gtmpsc_softc_t *sc = &gtmpsc_cn_softc;
1280 	uint32_t csr;
1281 
1282 	/*
1283 	 * flush TX buffers
1284 	 */
1285 	gtmpsc_txflush(sc);
1286 
1287 	/*
1288 	 * stop MPSC unit RX
1289 	 */
1290 	csr = GT_MPSC_READ(sc, GTMPSC_CHRN(2));
1291 	csr &= ~GTMPSC_CHR2_EH;
1292 	csr |= GTMPSC_CHR2_RXABORT;
1293 	GT_MPSC_WRITE(sc, GTMPSC_CHRN(2), csr);
1294 
1295 	DELAY(GTMPSC_RESET_DELAY);
1296 
1297 	/*
1298 	 * abort SDMA RX for MPSC unit
1299 	 */
1300 	GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_AR);
1301 }
1302 
1303 int
1304 gtmpsccnattach(bus_space_tag_t iot, bus_dma_tag_t dmat, bus_addr_t base,
1305 	       int unit, int brg, int speed, tcflag_t tcflag)
1306 {
1307 	struct gtmpsc_softc *sc = &gtmpsc_cn_softc;
1308 	int i, res;
1309 	const unsigned char cp[] = "\r\nMPSC Lives!\r\n";
1310 
1311 	res = gtmpsc_hackinit(sc, iot, dmat, base, unit, brg, speed, tcflag);
1312 	if (res != 0)
1313 		return res;
1314 
1315 	gtmpscinit_stop(sc);
1316 	gtmpscinit_start(sc);
1317 
1318 	/*
1319 	 * enable SDMA receive
1320 	 */
1321 	GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_ERD);
1322 
1323 	for (i = 0; i < sizeof(cp); i++) {
1324 		if (*(cp + i) == 0)
1325 			break;
1326 		gtmpsc_common_putc(sc, *(cp + i));
1327 	}
1328 
1329 	cn_tab = &gtmpsc_consdev;
1330 	cn_init_magic(&gtmpsc_cnm_state);
1331 
1332 	return 0;
1333 }
1334 
1335 /*
1336  * gtmpsc_hackinit - hacks required to supprt GTMPSC console
1337  */
1338 STATIC int
1339 gtmpsc_hackinit(struct gtmpsc_softc *sc, bus_space_tag_t iot,
1340 		bus_dma_tag_t dmat, bus_addr_t base, int unit, int brg,
1341 		int baudrate, tcflag_t tcflag)
1342 {
1343 	gtmpsc_poll_sdma_t *cn_dmapage =
1344 	    (gtmpsc_poll_sdma_t *)gtmpsc_cn_dmapage;
1345 	int error;
1346 
1347 	DPRINTF(("hackinit\n"));
1348 
1349 	memset(sc, 0, sizeof(struct gtmpsc_softc));
1350 	error = bus_space_map(iot, base + GTMPSC_BASE(unit), GTMPSC_SIZE, 0,
1351 	    &sc->sc_mpsch);
1352 	if (error != 0)
1353 		goto fail0;
1354 
1355 	error = bus_space_map(iot, base + GTSDMA_BASE(unit), GTSDMA_SIZE, 0,
1356 	    &sc->sc_sdmah);
1357 	if (error != 0)
1358 		goto fail1;
1359 	error = bus_dmamap_create(dmat, sizeof(gtmpsc_polltx_t), 1,
1360 	   sizeof(gtmpsc_polltx_t), 0, BUS_DMA_NOWAIT, &sc->sc_txdma_map);
1361 	if (error != 0)
1362 		goto fail2;
1363 	error = bus_dmamap_load(dmat, sc->sc_txdma_map, cn_dmapage->tx,
1364 	    sizeof(gtmpsc_polltx_t), NULL,
1365 	    BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
1366 	if (error != 0)
1367 		goto fail3;
1368 	error = bus_dmamap_create(dmat, sizeof(gtmpsc_pollrx_t), 1,
1369 	   sizeof(gtmpsc_pollrx_t), 0, BUS_DMA_NOWAIT,
1370 	   &sc->sc_rxdma_map);
1371 	if (error != 0)
1372 		goto fail4;
1373 	error = bus_dmamap_load(dmat, sc->sc_rxdma_map, cn_dmapage->rx,
1374 	    sizeof(gtmpsc_pollrx_t), NULL,
1375 	    BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
1376 	if (error != 0)
1377 		goto fail5;
1378 
1379 	sc->sc_iot = iot;
1380 	sc->sc_dmat = dmat;
1381 	sc->sc_poll_sdmapage = cn_dmapage;
1382 	sc->sc_brg = brg;
1383 	sc->sc_baudrate = baudrate;
1384 	sc->sc_cflag = tcflag;
1385 
1386 	gtmpsc_txdesc_init(sc);
1387 	gtmpsc_rxdesc_init(sc);
1388 
1389 	return 0;
1390 
1391 fail5:
1392 	bus_dmamap_destroy(dmat, sc->sc_rxdma_map);
1393 fail4:
1394 	bus_dmamap_unload(dmat, sc->sc_txdma_map);
1395 fail3:
1396 	bus_dmamap_destroy(dmat, sc->sc_txdma_map);
1397 fail2:
1398 	bus_space_unmap(iot, sc->sc_sdmah, GTSDMA_SIZE);
1399 fail1:
1400 	bus_space_unmap(iot, sc->sc_mpsch, GTMPSC_SIZE);
1401 fail0:
1402 	return error;
1403 }
1404 #endif	/* MPSC_CONSOLE */
1405 
1406 #ifdef KGDB
1407 STATIC int
1408 gtmpsc_kgdb_getc(void *arg)
1409 {
1410 	struct gtmpsc_softc *sc = (struct gtmpsc_softc *)arg;
1411 
1412 	return gtmpsc_common_getc(sc);
1413 }
1414 
1415 STATIC void
1416 gtmpsc_kgdb_putc(void *arg, int c)
1417 {
1418 	struct gtmpsc_softc *sc = (struct gtmpsc_softc *)arg;
1419 
1420 	return gtmpsc_common_putc(sc, c);
1421 }
1422 #endif /* KGDB */
1423 
1424 #if defined(MPSC_CONSOLE) || defined(KGDB)
1425 /*
1426  * gtmpsc_common_getc - polled console read
1427  *
1428  *	We copy data from the DMA buffers into a buffer in the softc
1429  *	to reduce descriptor ownership turnaround time
1430  *	MPSC can crater if it wraps descriptor rings,
1431  *	which is asynchronous and throttled only by line speed.
1432  */
1433 STATIC int
1434 gtmpsc_common_getc(struct gtmpsc_softc *sc)
1435 {
1436 	gtmpsc_pollrx_t *vrxp;
1437 	uint32_t csr;
1438 	int ix, ch, wdog_interval = 0;
1439 
1440 	if (!cold)
1441 		mutex_spin_enter(&sc->sc_lock);
1442 
1443 	ix = sc->sc_rcvdrx;
1444 	vrxp = &sc->sc_poll_sdmapage->rx[ix];
1445 	while (sc->sc_rcvcnt == 0) {
1446 		/* Wait receive */
1447 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1448 		    ix * sizeof(gtmpsc_pollrx_t),
1449 		    sizeof(sdma_desc_t),
1450 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1451 		csr = vrxp->rxdesc.sdma_csr;
1452 		if (csr & SDMA_CSR_RX_OWN) {
1453 			GT_MPSC_WRITE(sc, GTMPSC_CHRN(2),
1454 			    GTMPSC_CHR2_EH | GTMPSC_CHR2_CRD);
1455 			if (wdog_interval++ % 32)
1456 				gt_watchdog_service();
1457 			bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1458 			    ix * sizeof(gtmpsc_pollrx_t),
1459 			    sizeof(sdma_desc_t),
1460 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1461 			DELAY(50);
1462 			continue;
1463 		}
1464 		if (csr & SDMA_CSR_RX_ES)
1465 			aprint_error_dev(sc->sc_dev,
1466 			    "RX error, rxdesc csr 0x%x\n", csr);
1467 
1468 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1469 		    ix * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t),
1470 		    sizeof(vrxp->rxbuf),
1471 		    BUS_DMASYNC_POSTREAD);
1472 
1473 		vrxp->rxdesc.sdma_cnt &= SDMA_RX_CNT_BCNT_MASK;
1474 		sc->sc_rcvcnt = vrxp->rxdesc.sdma_cnt;
1475 		sc->sc_roffset = 0;
1476 		sc->sc_rcvdrx = (ix + 1) % GTMPSC_NRXDESC;
1477 
1478 		if (sc->sc_rcvcnt == 0) {
1479 			/* cleanup this descriptor, and return to DMA */
1480 			CLEANUP_AND_RETURN_RXDMA(sc, sc->sc_rcvrx);
1481 			sc->sc_rcvrx = sc->sc_rcvdrx;
1482 		}
1483 
1484 		ix = sc->sc_rcvdrx;
1485 		vrxp = &sc->sc_poll_sdmapage->rx[ix];
1486 	}
1487 	ch = vrxp->rxbuf[sc->sc_roffset++];
1488 	sc->sc_rcvcnt--;
1489 
1490 	if (sc->sc_roffset == vrxp->rxdesc.sdma_cnt) {
1491 		/* cleanup this descriptor, and return to DMA */
1492 		CLEANUP_AND_RETURN_RXDMA(sc, ix);
1493 		sc->sc_rcvrx = (ix + 1) % GTMPSC_NRXDESC;
1494 	}
1495 
1496 	gt_watchdog_service();
1497 
1498 	if (!cold)
1499 		mutex_spin_exit(&sc->sc_lock);
1500 	return ch;
1501 }
1502 
1503 STATIC void
1504 gtmpsc_common_putc(struct gtmpsc_softc *sc, int c)
1505 {
1506 	gtmpsc_polltx_t *vtxp;
1507 	int ix;
1508 	const int nc = 1;
1509 
1510 	/* Get a DMA descriptor */
1511 	if (!cold)
1512 		mutex_spin_enter(&sc->sc_lock);
1513 	ix = sc->sc_nexttx;
1514 	sc->sc_nexttx = (ix + 1) % GTMPSC_NTXDESC;
1515 	if (sc->sc_nexttx == sc->sc_lasttx) {
1516 		gtmpsc_common_putc_wait_complete(sc, sc->sc_lasttx);
1517 		sc->sc_lasttx = (sc->sc_lasttx + 1) % GTMPSC_NTXDESC;
1518 	}
1519 	if (!cold)
1520 		mutex_spin_exit(&sc->sc_lock);
1521 
1522 	vtxp = &sc->sc_poll_sdmapage->tx[ix];
1523 	vtxp->txbuf[0] = c;
1524 	bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1525 	    ix * sizeof(gtmpsc_polltx_t) + sizeof(sdma_desc_t),
1526 	    sizeof(vtxp->txbuf),
1527 	    BUS_DMASYNC_PREWRITE);
1528 
1529 	vtxp->txdesc.sdma_cnt = (nc << SDMA_TX_CNT_BCNT_SHIFT) | nc;
1530 	vtxp->txdesc.sdma_csr = SDMA_CSR_TX_L | SDMA_CSR_TX_F | SDMA_CSR_TX_OWN;
1531 	bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1532 	    ix * sizeof(gtmpsc_polltx_t),
1533 	    sizeof(sdma_desc_t),
1534 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1535 
1536 	if (!cold)
1537 		mutex_spin_enter(&sc->sc_lock);
1538 	/*
1539 	 * now kick some SDMA
1540 	 */
1541 	GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_TXD);
1542 
1543 	while (sc->sc_lasttx != sc->sc_nexttx) {
1544 		gtmpsc_common_putc_wait_complete(sc, sc->sc_lasttx);
1545 		sc->sc_lasttx = (sc->sc_lasttx + 1) % GTMPSC_NTXDESC;
1546 	}
1547 	if (!cold)
1548 		mutex_spin_exit(&sc->sc_lock);
1549 }
1550 
1551 /*
1552  * gtmpsc_common_putc - polled console putc
1553  */
1554 STATIC void
1555 gtmpsc_common_putc_wait_complete(struct gtmpsc_softc *sc, int ix)
1556 {
1557 	gtmpsc_polltx_t *vtxp = &sc->sc_poll_sdmapage->tx[ix];
1558 	uint32_t csr;
1559 	int wdog_interval = 0;
1560 
1561 	bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1562 	    ix * sizeof(gtmpsc_polltx_t),
1563 	    sizeof(sdma_desc_t),
1564 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1565 	csr = vtxp->txdesc.sdma_csr;
1566 	while (csr & SDMA_CSR_TX_OWN) {
1567 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1568 		    ix * sizeof(gtmpsc_polltx_t),
1569 		    sizeof(sdma_desc_t),
1570 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1571 		DELAY(40);
1572 		if (wdog_interval++ % 32)
1573 			gt_watchdog_service();
1574 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1575 		    ix * sizeof(gtmpsc_polltx_t),
1576 		    sizeof(sdma_desc_t),
1577 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1578 		csr = vtxp->txdesc.sdma_csr;
1579 	}
1580 	if (csr & SDMA_CSR_TX_ES)
1581 		aprint_error_dev(sc->sc_dev,
1582 		    "TX error, txdesc(%d) csr 0x%x\n", ix, csr);
1583 	bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1584 	    ix * sizeof(gtmpsc_polltx_t) + sizeof(sdma_desc_t),
1585 	    sizeof(vtxp->txbuf),
1586 	    BUS_DMASYNC_POSTWRITE);
1587 }
1588 #endif	/* defined(MPSC_CONSOLE) || defined(KGDB) */
1589