xref: /netbsd-src/sys/dev/marvell/gtmpsc.c (revision bbde328be4e75ea9ad02e9715ea13ca54b797ada)
1 /*	$NetBSD: gtmpsc.c,v 1.38 2010/04/28 13:51:56 kiyohara Exp $	*/
2 /*
3  * Copyright (c) 2009 KIYOHARA Takashi
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 /*
28  * mpsc.c - Multi-Protocol Serial Controller driver, supports UART mode only
29  */
30 
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: gtmpsc.c,v 1.38 2010/04/28 13:51:56 kiyohara Exp $");
33 
34 #include "opt_kgdb.h"
35 
36 #include <sys/param.h>
37 #include <sys/bus.h>
38 #include <sys/conf.h>
39 #include <sys/device.h>
40 #include <sys/fcntl.h>
41 #include <sys/intr.h>
42 #include <sys/kauth.h>
43 #include <sys/kernel.h>
44 #include <sys/mutex.h>
45 #include <sys/proc.h>
46 #include <sys/systm.h>
47 #include <sys/timepps.h>
48 #include <sys/tty.h>
49 #ifdef KGDB
50 #include <sys/kgdb.h>
51 #endif
52 
53 #include <uvm/uvm.h>
54 #include <uvm/uvm_extern.h>
55 
56 #include <dev/cons.h>
57 
58 #include <dev/marvell/gtreg.h>
59 #include <dev/marvell/gtvar.h>
60 #include <dev/marvell/gtbrgreg.h>
61 #include <dev/marvell/gtbrgvar.h>
62 #include <dev/marvell/gtsdmareg.h>
63 #include <dev/marvell/gtsdmavar.h>
64 #include <dev/marvell/gtmpscreg.h>
65 #include <dev/marvell/gtmpscvar.h>
66 #include <dev/marvell/marvellreg.h>
67 #include <dev/marvell/marvellvar.h>
68 
69 #include "gtmpsc.h"
70 #include "ioconf.h"
71 #include "locators.h"
72 
73 /*
74  * Wait 2 characters time for RESET_DELAY
75  */
76 #define GTMPSC_RESET_DELAY	(2*8*1000000 / GT_MPSC_DEFAULT_BAUD_RATE)
77 
78 
79 #if defined(DEBUG)
80 unsigned int gtmpsc_debug = 0;
81 # define STATIC
82 # define DPRINTF(x)	do { if (gtmpsc_debug) printf x ; } while (0)
83 #else
84 # define STATIC static
85 # define DPRINTF(x)
86 #endif
87 
88 #define GTMPSCUNIT_MASK    0x7ffff
89 #define GTMPSCDIALOUT_MASK 0x80000
90 
91 #define GTMPSCUNIT(x)      (minor(x) & GTMPSCUNIT_MASK)
92 #define GTMPSCDIALOUT(x)   (minor(x) & GTMPSCDIALOUT_MASK)
93 
94 #define CLEANUP_AND_RETURN_RXDMA(sc, ix)				    \
95 	do {								    \
96 		gtmpsc_pollrx_t *_vrxp = &(sc)->sc_poll_sdmapage->rx[(ix)]; \
97 									    \
98 		_vrxp->rxdesc.sdma_csr =				    \
99 		    SDMA_CSR_RX_L	|				    \
100 		    SDMA_CSR_RX_F	|				    \
101 		    SDMA_CSR_RX_OWN	|				    \
102 		    SDMA_CSR_RX_EI;					    \
103 		_vrxp->rxdesc.sdma_cnt =				    \
104 		    GTMPSC_RXBUFSZ << SDMA_RX_CNT_BUFSZ_SHIFT;		    \
105 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_rxdma_map,	    \
106 		    (ix) * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t),   \
107 		    sizeof(vrxp->rxbuf),				    \
108 		    BUS_DMASYNC_PREREAD);				    \
109 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_rxdma_map,	    \
110 		    (ix) * sizeof(gtmpsc_pollrx_t),			    \
111 		    sizeof(sdma_desc_t),				    \
112 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);	    \
113 	} while (0);
114 
115 
116 STATIC int  gtmpscmatch(device_t, cfdata_t, void *);
117 STATIC void gtmpscattach(device_t, device_t, void *);
118 
119 STATIC void gtmpsc_softintr(void *);
120 
121 STATIC void gtmpscstart(struct tty *);
122 STATIC int  gtmpscparam(struct tty *, struct termios *);
123 
124 STATIC void gtmpsc_shutdownhook(void *);
125 
126 STATIC uint32_t cflag2mpcr(tcflag_t);
127 STATIC __inline void gtmpsc_intr_rx(struct gtmpsc_softc *);
128 STATIC __inline void gtmpsc_intr_tx(struct gtmpsc_softc *);
129 STATIC void gtmpsc_write(struct gtmpsc_softc *);
130 STATIC void gtmpsc_txflush(gtmpsc_softc_t *);
131 STATIC void gtmpsc_rxdesc_init(struct gtmpsc_softc *);
132 STATIC void gtmpsc_txdesc_init(struct gtmpsc_softc *);
133 STATIC void gtmpscinit_stop(struct gtmpsc_softc *);
134 STATIC void gtmpscinit_start(struct gtmpsc_softc *);
135 STATIC void gtmpscshutdown(struct gtmpsc_softc *);
136 STATIC void gtmpsc_loadchannelregs(struct gtmpsc_softc *);
137 
138 #ifdef MPSC_CONSOLE
139 STATIC int gtmpsccngetc(dev_t);
140 STATIC void gtmpsccnputc(dev_t, int);
141 STATIC void gtmpsccnpollc(dev_t, int);
142 STATIC void gtmpsccnhalt(dev_t);
143 
144 STATIC int gtmpsc_hackinit(struct gtmpsc_softc *, bus_space_tag_t,
145 			   bus_dma_tag_t, bus_addr_t, int, int, int, tcflag_t);
146 #endif
147 
148 #if defined(MPSC_CONSOLE) || defined(KGDB)
149 STATIC int  gtmpsc_common_getc(struct gtmpsc_softc *);
150 STATIC void gtmpsc_common_putc(struct gtmpsc_softc *, int);
151 STATIC void gtmpsc_common_putc_wait_complete(struct gtmpsc_softc *, int);
152 #endif
153 
154 dev_type_open(gtmpscopen);
155 dev_type_close(gtmpscclose);
156 dev_type_read(gtmpscread);
157 dev_type_write(gtmpscwrite);
158 dev_type_ioctl(gtmpscioctl);
159 dev_type_stop(gtmpscstop);
160 dev_type_tty(gtmpsctty);
161 dev_type_poll(gtmpscpoll);
162 
163 const struct cdevsw gtmpsc_cdevsw = {
164 	gtmpscopen, gtmpscclose, gtmpscread, gtmpscwrite, gtmpscioctl,
165 	gtmpscstop, gtmpsctty, gtmpscpoll, nommap, ttykqfilter, D_TTY
166 };
167 
168 CFATTACH_DECL_NEW(gtmpsc, sizeof(struct gtmpsc_softc),
169     gtmpscmatch, gtmpscattach, NULL, NULL);
170 
171 
172 STATIC uint32_t sdma_imask;		/* soft copy of SDMA IMASK reg */
173 STATIC struct cnm_state gtmpsc_cnm_state;
174 
175 #ifdef KGDB
176 static int gtmpsc_kgdb_addr;
177 static int gtmpsc_kgdb_attached;
178 
179 STATIC int      gtmpsc_kgdb_getc(void *);
180 STATIC void     gtmpsc_kgdb_putc(void *, int);
181 #endif /* KGDB */
182 
183 #ifdef MPSC_CONSOLE
184 /*
185  * hacks for console initialization
186  * which happens prior to autoconfig "attach"
187  *
188  * XXX Assumes PAGE_SIZE is a constant!
189  */
190 gtmpsc_softc_t gtmpsc_cn_softc;
191 STATIC unsigned char gtmpsc_cn_dmapage[PAGE_SIZE] __aligned(PAGE_SIZE);
192 
193 
194 static struct consdev gtmpsc_consdev = {
195 	NULL, NULL, gtmpsccngetc, gtmpsccnputc, gtmpsccnpollc,
196 	NULL, gtmpsccnhalt, NULL, NODEV, CN_NORMAL
197 };
198 #endif
199 
200 
201 #define GT_MPSC_READ(sc, o) \
202 	bus_space_read_4((sc)->sc_iot, (sc)->sc_mpsch, (o))
203 #define GT_MPSC_WRITE(sc, o, v) \
204 	bus_space_write_4((sc)->sc_iot, (sc)->sc_mpsch, (o), (v))
205 #define GT_SDMA_READ(sc, o) \
206 	bus_space_read_4((sc)->sc_iot, (sc)->sc_sdmah, (o))
207 #define GT_SDMA_WRITE(sc, o, v) \
208 	bus_space_write_4((sc)->sc_iot, (sc)->sc_sdmah, (o), (v))
209 
210 
211 /* ARGSUSED */
212 STATIC int
213 gtmpscmatch(device_t parent, cfdata_t match, void *aux)
214 {
215 	struct marvell_attach_args *mva = aux;
216 
217 	if (strcmp(mva->mva_name, match->cf_name) != 0)
218 		return 0;
219 
220 	switch (mva->mva_model) {
221 	case MARVELL_DISCOVERY:
222 	case MARVELL_DISCOVERY_II:
223 	case MARVELL_DISCOVERY_III:
224 #if 0
225 	case MARVELL_DISCOVERY_LT:
226 #endif
227 		break;
228 
229 	default:
230 		return 0;
231 	}
232 	if (mva->mva_offset == GTCF_OFFSET_DEFAULT ||
233 	    mva->mva_irq == GTCF_IRQ_DEFAULT)
234 		return 0;
235 
236 	mva->mva_size = GTMPSC_SIZE;
237 	return 1;
238 }
239 
240 /* ARGSUSED */
241 STATIC void
242 gtmpscattach(device_t parent, device_t self, void *aux)
243 {
244 	struct gtmpsc_softc *sc = device_private(self);
245 	struct marvell_attach_args *mva = aux;
246 	bus_dma_segment_t segs;
247 	struct tty *tp;
248 	int rsegs, err, unit;
249 	void *kva;
250 
251 	aprint_naive("\n");
252 	aprint_normal(": Multi-Protocol Serial Controller\n");
253 
254 	if (mva->mva_unit != GTCF_UNIT_DEFAULT)
255 		unit = mva->mva_unit;
256 	else
257 		unit = (mva->mva_offset == GTMPSC_BASE(0)) ? 0 : 1;
258 
259 #ifdef MPSC_CONSOLE
260 	if (cn_tab == &gtmpsc_consdev &&
261 	    cn_tab->cn_dev == makedev(0, unit)) {
262 		gtmpsc_cn_softc.sc_dev = self;
263 		memcpy(sc, &gtmpsc_cn_softc, sizeof(struct gtmpsc_softc));
264 		sc->sc_flags = GTMPSC_CONSOLE;
265 	} else
266 #endif
267 	{
268 		if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
269 		    mva->mva_offset, mva->mva_size, &sc->sc_mpsch)) {
270 			aprint_error_dev(self, "Cannot map MPSC registers\n");
271 			return;
272 		}
273 		if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
274 		    GTSDMA_BASE(unit), GTSDMA_SIZE, &sc->sc_sdmah)) {
275 			aprint_error_dev(self, "Cannot map SDMA registers\n");
276 			return;
277 		}
278 		sc->sc_dev = self;
279 		sc->sc_unit = unit;
280 		sc->sc_iot = mva->mva_iot;
281 		sc->sc_dmat = mva->mva_dmat;
282 
283 		err = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
284 		    &segs, 1, &rsegs, BUS_DMA_NOWAIT);
285 		if (err) {
286 			aprint_error_dev(sc->sc_dev,
287 			    "bus_dmamem_alloc error 0x%x\n", err);
288 			goto fail0;
289 		}
290 		err = bus_dmamem_map(sc->sc_dmat, &segs, 1, PAGE_SIZE, &kva,
291 		    BUS_DMA_NOWAIT);
292 		if (err) {
293 			aprint_error_dev(sc->sc_dev,
294 			    "bus_dmamem_map error 0x%x\n", err);
295 			goto fail1;
296 		}
297 		memset(kva, 0, PAGE_SIZE);	/* paranoid/superfluous */
298 		sc->sc_poll_sdmapage = kva;
299 
300 		err = bus_dmamap_create(sc->sc_dmat, sizeof(gtmpsc_polltx_t), 1,
301 		   sizeof(gtmpsc_polltx_t), 0, BUS_DMA_NOWAIT,
302 		   &sc->sc_txdma_map);
303 		if (err != 0) {
304 			aprint_error_dev(sc->sc_dev,
305 			    "bus_dmamap_create error 0x%x\n", err);
306 			goto fail2;
307 		}
308 		err = bus_dmamap_load(sc->sc_dmat, sc->sc_txdma_map,
309 		    sc->sc_poll_sdmapage->tx, sizeof(gtmpsc_polltx_t),
310 		    NULL, BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
311 		if (err != 0) {
312 			aprint_error_dev(sc->sc_dev,
313 			    "bus_dmamap_load tx error 0x%x\n", err);
314 			goto fail3;
315 		}
316 		err = bus_dmamap_create(sc->sc_dmat, sizeof(gtmpsc_pollrx_t), 1,
317 		   sizeof(gtmpsc_pollrx_t), 0, BUS_DMA_NOWAIT,
318 		   &sc->sc_rxdma_map);
319 		if (err != 0) {
320 			aprint_error_dev(sc->sc_dev,
321 			    "bus_dmamap_create rx error 0x%x\n", err);
322 			goto fail4;
323 		}
324 		err = bus_dmamap_load(sc->sc_dmat, sc->sc_rxdma_map,
325 		    sc->sc_poll_sdmapage->rx, sizeof(gtmpsc_pollrx_t),
326 		    NULL, BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
327 		if (err != 0) {
328 			aprint_error_dev(sc->sc_dev,
329 			    "bus_dmamap_load rx error 0x%x\n", err);
330 			goto fail5;
331 		}
332 
333 		sc->sc_brg = unit;		/* XXXXX */
334 		sc->sc_baudrate = GT_MPSC_DEFAULT_BAUD_RATE;
335 	}
336 	aprint_normal_dev(self, "with SDMA offset 0x%04x-0x%04x\n",
337 	    GTSDMA_BASE(unit), GTSDMA_BASE(unit) + GTSDMA_SIZE - 1);
338 
339 	sc->sc_rx_ready = 0;
340 	sc->sc_tx_busy = 0;
341 	sc->sc_tx_done = 0;
342 	sc->sc_tx_stopped = 0;
343 	sc->sc_heldchange = 0;
344 
345 	gtmpsc_txdesc_init(sc);
346 	gtmpsc_rxdesc_init(sc);
347 
348 	sc->sc_tty = tp = ttymalloc();
349 	tp->t_oproc = gtmpscstart;
350 	tp->t_param = gtmpscparam;
351 	tty_attach(tp);
352 
353 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_HIGH);
354 
355 	/*
356 	 * clear any pending SDMA interrupts for this unit
357 	 */
358 	(void) gt_sdma_icause(device_parent(sc->sc_dev),
359 	    SDMA_INTR_RXBUF(sc->sc_unit) |
360 	    SDMA_INTR_RXERR(sc->sc_unit) |
361 	    SDMA_INTR_TXBUF(sc->sc_unit) |
362 	    SDMA_INTR_TXEND(sc->sc_unit));
363 
364 	sc->sc_si = softint_establish(SOFTINT_SERIAL, gtmpsc_softintr, sc);
365 	if (sc->sc_si == NULL)
366 		panic("mpscattach: cannot softint_establish IPL_SOFTSERIAL");
367 
368 	shutdownhook_establish(gtmpsc_shutdownhook, sc);
369 
370 	gtmpscinit_stop(sc);
371 	gtmpscinit_start(sc);
372 
373 	if (sc->sc_flags & GTMPSC_CONSOLE) {
374 		int maj;
375 
376 		/* locate the major number */
377 		maj = cdevsw_lookup_major(&gtmpsc_cdevsw);
378 
379 		tp->t_dev = cn_tab->cn_dev =
380 		    makedev(maj, device_unit(sc->sc_dev));
381 
382 		aprint_normal_dev(self, "console\n");
383 	}
384 
385 #ifdef KGDB
386 	/*
387 	 * Allow kgdb to "take over" this port.  If this is
388 	 * the kgdb device, it has exclusive use.
389 	 */
390 	if (sc->sc_unit == gtmpsckgdbport) {
391 #ifdef MPSC_CONSOLE
392 		if (sc->sc_unit == MPSC_CONSOLE) {
393 			aprint_error_dev(self,
394 			    "(kgdb): cannot share with console\n");
395 			return;
396 		}
397 #endif
398 
399 		sc->sc_flags |= GTMPSC_KGDB;
400 		aprint_normal_dev(self, "kgdb\n");
401 
402 		gtmpsc_txflush(sc);
403 
404 		kgdb_attach(gtmpsc_kgdb_getc, gtmpsc_kgdb_putc, NULL);
405 		kgdb_dev = 123;	/* unneeded, only to satisfy some tests */
406 		gtmpsc_kgdb_attached = 1;
407 		kgdb_connect(1);
408 	}
409 #endif /* KGDB */
410 
411 	return;
412 
413 
414 fail5:
415 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxdma_map);
416 fail4:
417 	bus_dmamap_unload(sc->sc_dmat, sc->sc_txdma_map);
418 fail3:
419 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_txdma_map);
420 fail2:
421 	bus_dmamem_unmap(sc->sc_dmat, kva, PAGE_SIZE);
422 fail1:
423 	bus_dmamem_free(sc->sc_dmat, &segs, 1);
424 fail0:
425 	return;
426 }
427 
428 /* ARGSUSED */
429 int
430 gtmpsc_intr(void *arg)
431 {
432 	struct gt_softc *gt = (struct gt_softc *)arg;
433 	struct gtmpsc_softc *sc;
434 	uint32_t icause;
435 	int i;
436 
437 	icause = gt_sdma_icause(gt->sc_dev, sdma_imask);
438 
439 	for (i = 0; i < GTMPSC_NCHAN; i++) {
440 		sc = device_lookup_private(&gtmpsc_cd, i);
441 		if (sc == NULL)
442 			continue;
443 		mutex_spin_enter(&sc->sc_lock);
444 		if (icause & SDMA_INTR_RXBUF(sc->sc_unit)) {
445 			gtmpsc_intr_rx(sc);
446 			icause &= ~SDMA_INTR_RXBUF(sc->sc_unit);
447 		}
448 		if (icause & SDMA_INTR_TXBUF(sc->sc_unit)) {
449 			gtmpsc_intr_tx(sc);
450 			icause &= ~SDMA_INTR_TXBUF(sc->sc_unit);
451 		}
452 		mutex_spin_exit(&sc->sc_lock);
453 	}
454 
455 	return 1;
456 }
457 
458 STATIC void
459 gtmpsc_softintr(void *arg)
460 {
461 	struct gtmpsc_softc *sc = arg;
462 	struct tty *tp = sc->sc_tty;
463 	gtmpsc_pollrx_t *vrxp;
464 	int code;
465 	u_int cc;
466 	u_char *get, *end, lsr;
467 	int (*rint)(int, struct tty *) = tp->t_linesw->l_rint;
468 
469 	if (sc->sc_rx_ready) {
470 		sc->sc_rx_ready = 0;
471 
472 		cc = sc->sc_rcvcnt;
473 
474 		/* If not yet open, drop the entire buffer content here */
475 		if (!ISSET(tp->t_state, TS_ISOPEN))
476 			cc = 0;
477 
478 		vrxp = &sc->sc_poll_sdmapage->rx[sc->sc_rcvrx];
479 		end = vrxp->rxbuf + vrxp->rxdesc.sdma_cnt;
480 		get = vrxp->rxbuf + sc->sc_roffset;
481 		while (cc > 0) {
482 			code = *get;
483 			lsr = vrxp->rxdesc.sdma_csr;
484 
485 			if (ISSET(lsr,
486 			    SDMA_CSR_RX_PE |
487 			    SDMA_CSR_RX_FR |
488 			    SDMA_CSR_RX_OR |
489 			    SDMA_CSR_RX_BR)) {
490 				if (ISSET(lsr, SDMA_CSR_RX_OR))
491 					;	/* XXXXX not yet... */
492 				if (ISSET(lsr, SDMA_CSR_RX_BR | SDMA_CSR_RX_FR))
493 					SET(code, TTY_FE);
494 				if (ISSET(lsr, SDMA_CSR_RX_PE))
495 					SET(code, TTY_PE);
496 			}
497 
498 			if ((*rint)(code, tp) == -1) {
499 				/*
500 				 * The line discipline's buffer is out of space.
501 				 */
502 				/* XXXXX not yet... */
503 			}
504 			if (++get >= end) {
505 				/* cleanup this descriptor, and return to DMA */
506 				CLEANUP_AND_RETURN_RXDMA(sc, sc->sc_rcvrx);
507 				sc->sc_rcvrx =
508 				    (sc->sc_rcvrx + 1) % GTMPSC_NTXDESC;
509 				vrxp = &sc->sc_poll_sdmapage->rx[sc->sc_rcvrx];
510 				end = vrxp->rxbuf + vrxp->rxdesc.sdma_cnt;
511 				get = vrxp->rxbuf + sc->sc_roffset;
512 			}
513 			cc--;
514 		}
515 	}
516 	if (sc->sc_tx_done) {
517 		sc->sc_tx_done = 0;
518 		CLR(tp->t_state, TS_BUSY);
519 		if (ISSET(tp->t_state, TS_FLUSH))
520 		    CLR(tp->t_state, TS_FLUSH);
521 		else
522 		    ndflush(&tp->t_outq, (int)(sc->sc_tba - tp->t_outq.c_cf));
523 		(*tp->t_linesw->l_start)(tp);
524 	}
525 }
526 
527 int
528 gtmpscopen(dev_t dev, int flag, int mode, struct lwp *l)
529 {
530 	struct gtmpsc_softc *sc;
531 	int unit = GTMPSCUNIT(dev);
532 	struct tty *tp;
533 	int s;
534 	int error;
535 
536 	sc = device_lookup_private(&gtmpsc_cd, unit);
537 	if (!sc)
538 		return ENXIO;
539 #ifdef KGDB
540 	/*
541 	 * If this is the kgdb port, no other use is permitted.
542 	 */
543 	if (sc->sc_flags & GTMPSC_KGDB)
544 		return EBUSY;
545 #endif
546 	tp = sc->sc_tty;
547 	if (kauth_authorize_device_tty(l->l_cred, KAUTH_DEVICE_TTY_OPEN, tp))
548 		return EBUSY;
549 
550 	s = spltty();
551 
552 	if (!ISSET(tp->t_state, TS_ISOPEN) && tp->t_wopen == 0) {
553 		struct termios t;
554 
555 		tp->t_dev = dev;
556 
557 		mutex_spin_enter(&sc->sc_lock);
558 
559 		/* Turn on interrupts. */
560 		sdma_imask |= SDMA_INTR_RXBUF(sc->sc_unit);
561 		gt_sdma_imask(device_parent(sc->sc_dev), sdma_imask);
562 
563 		/* Clear PPS capture state on first open. */
564 		mutex_spin_enter(&timecounter_lock);
565 		memset(&sc->sc_pps_state, 0, sizeof(sc->sc_pps_state));
566 		sc->sc_pps_state.ppscap = PPS_CAPTUREASSERT | PPS_CAPTURECLEAR;
567 		pps_init(&sc->sc_pps_state);
568 		mutex_spin_exit(&timecounter_lock);
569 
570 		mutex_spin_exit(&sc->sc_lock);
571 
572 		if (sc->sc_flags & GTMPSC_CONSOLE) {
573 			t.c_ospeed = sc->sc_baudrate;
574 			t.c_cflag = sc->sc_cflag;
575 		} else {
576 			t.c_ospeed = TTYDEF_SPEED;
577 			t.c_cflag = TTYDEF_CFLAG;
578 		}
579 		t.c_ispeed = t.c_ospeed;
580 
581 		/* Make sure gtmpscparam() will do something. */
582 		tp->t_ospeed = 0;
583 		(void) gtmpscparam(tp, &t);
584 		tp->t_iflag = TTYDEF_IFLAG;
585 		tp->t_oflag = TTYDEF_OFLAG;
586 		tp->t_lflag = TTYDEF_LFLAG;
587 		ttychars(tp);
588 		ttsetwater(tp);
589 
590 		mutex_spin_enter(&sc->sc_lock);
591 
592 		/* Clear the input/output ring */
593 		sc->sc_rcvcnt = 0;
594 		sc->sc_roffset = 0;
595 		sc->sc_rcvrx = 0;
596 		sc->sc_rcvdrx = 0;
597 		sc->sc_nexttx = 0;
598 		sc->sc_lasttx = 0;
599 
600 		/*
601 		 * enable SDMA receive
602 		 */
603 		GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_ERD);
604 
605 		mutex_spin_exit(&sc->sc_lock);
606 	}
607 	splx(s);
608 	error = ttyopen(tp, GTMPSCDIALOUT(dev), ISSET(flag, O_NONBLOCK));
609 	if (error)
610 		goto bad;
611 
612 	error = (*tp->t_linesw->l_open)(dev, tp);
613 	if (error)
614 		goto bad;
615 
616 	return 0;
617 
618 bad:
619 	if (!ISSET(tp->t_state, TS_ISOPEN) && tp->t_wopen == 0) {
620 		/*
621 		 * We failed to open the device, and nobody else had it opened.
622 		 * Clean up the state as appropriate.
623 		 */
624 		gtmpscshutdown(sc);
625 	}
626 
627 	return error;
628 }
629 
630 int
631 gtmpscclose(dev_t dev, int flag, int mode, struct lwp *l)
632 {
633 	int unit = GTMPSCUNIT(dev);
634 	struct gtmpsc_softc *sc = device_lookup_private(&gtmpsc_cd, unit);
635 	struct tty *tp = sc->sc_tty;
636 
637 	if (!ISSET(tp->t_state, TS_ISOPEN))
638 		return 0;
639 
640 	(*tp->t_linesw->l_close)(tp, flag);
641 	ttyclose(tp);
642 
643 	if (!ISSET(tp->t_state, TS_ISOPEN) && tp->t_wopen == 0) {
644 		/*
645 		 * Although we got a last close, the device may still be in
646 		 * use; e.g. if this was the dialout node, and there are still
647 		 * processes waiting for carrier on the non-dialout node.
648 		 */
649 		gtmpscshutdown(sc);
650 	}
651 
652 	return 0;
653 }
654 
655 int
656 gtmpscread(dev_t dev, struct uio *uio, int flag)
657 {
658 	struct gtmpsc_softc *sc =
659 	    device_lookup_private(&gtmpsc_cd, GTMPSCUNIT(dev));
660 	struct tty *tp = sc->sc_tty;
661 
662 	return (*tp->t_linesw->l_read)(tp, uio, flag);
663 }
664 
665 int
666 gtmpscwrite(dev_t dev, struct uio *uio, int flag)
667 {
668 	struct gtmpsc_softc *sc =
669 	    device_lookup_private(&gtmpsc_cd, GTMPSCUNIT(dev));
670 	struct tty *tp = sc->sc_tty;
671 
672 	return (*tp->t_linesw->l_write)(tp, uio, flag);
673 }
674 
675 int
676 gtmpscioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
677 {
678 	struct gtmpsc_softc *sc =
679 	    device_lookup_private(&gtmpsc_cd, GTMPSCUNIT(dev));
680 	struct tty *tp = sc->sc_tty;
681 	int error;
682 
683 	error = (*tp->t_linesw->l_ioctl)(tp, cmd, data, flag, l);
684 	if (error != EPASSTHROUGH)
685 		return error;
686 
687 	error = ttioctl(tp, cmd, data, flag, l);
688 	if (error != EPASSTHROUGH)
689 		return error;
690 
691 	error = 0;
692 	switch (cmd) {
693 	case TIOCSFLAGS:
694 		error = kauth_authorize_device_tty(l->l_cred,
695 		    KAUTH_DEVICE_TTY_PRIVSET, tp);
696 		if (error)
697 			return error;
698 		break;
699 	default:
700 		/* nothing */
701 		break;
702 	}
703 
704 	mutex_spin_enter(&sc->sc_lock);
705 
706 	switch (cmd) {
707 	case PPS_IOC_CREATE:
708 	case PPS_IOC_DESTROY:
709 	case PPS_IOC_GETPARAMS:
710 	case PPS_IOC_SETPARAMS:
711 	case PPS_IOC_GETCAP:
712 	case PPS_IOC_FETCH:
713 #ifdef PPS_SYNC
714 	case PPS_IOC_KCBIND:
715 #endif
716 		mutex_spin_enter(&timecounter_lock);
717 		error = pps_ioctl(cmd, data, &sc->sc_pps_state);
718 		mutex_spin_exit(&timecounter_lock);
719 		break;
720 
721 	case TIOCDCDTIMESTAMP:	/* XXX old, overloaded  API used by xntpd v3 */
722 		mutex_spin_enter(&timecounter_lock);
723 #ifndef PPS_TRAILING_EDGE
724 		TIMESPEC_TO_TIMEVAL((struct timeval *)data,
725 		    &sc->sc_pps_state.ppsinfo.assert_timestamp);
726 #else
727 		TIMESPEC_TO_TIMEVAL((struct timeval *)data,
728 		    &sc->sc_pps_state.ppsinfo.clear_timestamp);
729 #endif
730 		mutex_spin_exit(&timecounter_lock);
731 		break;
732 
733 	default:
734 		error = EPASSTHROUGH;
735 		break;
736 	}
737 
738 	mutex_spin_exit(&sc->sc_lock);
739 
740 	return error;
741 }
742 
743 void
744 gtmpscstop(struct tty *tp, int flag)
745 {
746 }
747 
748 struct tty *
749 gtmpsctty(dev_t dev)
750 {
751 	struct gtmpsc_softc *sc =
752 	    device_lookup_private(&gtmpsc_cd, GTMPSCUNIT(dev));
753 
754 	return sc->sc_tty;
755 }
756 
757 int
758 gtmpscpoll(dev_t dev, int events, struct lwp *l)
759 {
760 	struct gtmpsc_softc *sc =
761 	    device_lookup_private(&gtmpsc_cd, GTMPSCUNIT(dev));
762 	struct tty *tp = sc->sc_tty;
763 
764 	return (*tp->t_linesw->l_poll)(tp, events, l);
765 }
766 
767 
768 STATIC void
769 gtmpscstart(struct tty *tp)
770 {
771 	struct gtmpsc_softc *sc;
772 	unsigned char *tba;
773 	unsigned int unit;
774 	int s, tbc;
775 
776 	unit = GTMPSCUNIT(tp->t_dev);
777 	sc = device_lookup_private(&gtmpsc_cd, unit);
778 	if (sc == NULL)
779 		return;
780 
781 	s = spltty();
782 	if (ISSET(tp->t_state, TS_TIMEOUT | TS_BUSY | TS_TTSTOP))
783 		goto out;
784 	if (sc->sc_tx_stopped)
785 		goto out;
786 	if (!ttypull(tp))
787 		goto out;
788 
789 	/* Grab the first contiguous region of buffer space. */
790 	tba = tp->t_outq.c_cf;
791 	tbc = ndqb(&tp->t_outq, 0);
792 
793 	mutex_spin_enter(&sc->sc_lock);
794 
795 	sc->sc_tba = tba;
796 	sc->sc_tbc = tbc;
797 
798 	sdma_imask |= SDMA_INTR_TXBUF(sc->sc_unit);
799 	gt_sdma_imask(device_parent(sc->sc_dev), sdma_imask);
800 	SET(tp->t_state, TS_BUSY);
801 	sc->sc_tx_busy = 1;
802 	gtmpsc_write(sc);
803 
804 	mutex_spin_exit(&sc->sc_lock);
805 out:
806 	splx(s);
807 }
808 
809 STATIC int
810 gtmpscparam(struct tty *tp, struct termios *t)
811 {
812 	struct gtmpsc_softc *sc =
813 	    device_lookup_private(&gtmpsc_cd, GTMPSCUNIT(tp->t_dev));
814 
815 	/* Check requested parameters. */
816 	if (compute_cdv(t->c_ospeed) < 0)
817 		return EINVAL;
818 	if (t->c_ispeed && t->c_ispeed != t->c_ospeed)
819 		return EINVAL;
820 
821 	/*
822 	 * If there were no changes, don't do anything.  This avoids dropping
823 	 * input and improves performance when all we did was frob things like
824 	 * VMIN and VTIME.
825 	 */
826 	if (tp->t_ospeed == t->c_ospeed &&
827 	    tp->t_cflag == t->c_cflag)
828 		return 0;
829 
830 	mutex_spin_enter(&sc->sc_lock);
831 
832 	/* And copy to tty. */
833 	tp->t_ispeed = 0;
834 	tp->t_ospeed = t->c_ospeed;
835 	tp->t_cflag = t->c_cflag;
836 
837 	sc->sc_baudrate = t->c_ospeed;
838 
839 	if (!sc->sc_heldchange) {
840 		if (sc->sc_tx_busy) {
841 			sc->sc_heldtbc = sc->sc_tbc;
842 			sc->sc_tbc = 0;
843 			sc->sc_heldchange = 1;
844 		} else
845 			gtmpsc_loadchannelregs(sc);
846 	}
847 
848 	mutex_spin_exit(&sc->sc_lock);
849 
850 	/* Fake carrier on */
851 	(void) (*tp->t_linesw->l_modem)(tp, 1);
852 
853 	return 0;
854 }
855 
856 void
857 gtmpsc_shutdownhook(void *arg)
858 {
859 	gtmpsc_softc_t *sc = (gtmpsc_softc_t *)arg;
860 
861 	gtmpsc_txflush(sc);
862 }
863 
864 /*
865  * Convert to MPCR from cflag(CS[5678] and CSTOPB).
866  */
867 STATIC uint32_t
868 cflag2mpcr(tcflag_t cflag)
869 {
870 	uint32_t mpcr = 0;
871 
872 	switch (ISSET(cflag, CSIZE)) {
873 	case CS5:
874 		SET(mpcr, GTMPSC_MPCR_CL_5);
875 		break;
876 	case CS6:
877 		SET(mpcr, GTMPSC_MPCR_CL_6);
878 		break;
879 	case CS7:
880 		SET(mpcr, GTMPSC_MPCR_CL_7);
881 		break;
882 	case CS8:
883 		SET(mpcr, GTMPSC_MPCR_CL_8);
884 		break;
885 	}
886 	if (ISSET(cflag, CSTOPB))
887 		SET(mpcr, GTMPSC_MPCR_SBL_2);
888 
889 	return mpcr;
890 }
891 
892 STATIC void
893 gtmpsc_intr_rx(struct gtmpsc_softc *sc)
894 {
895 	gtmpsc_pollrx_t *vrxp;
896 	uint32_t csr;
897 	int kick, ix;
898 
899 	kick = 0;
900 
901 	/* already handled in gtmpsc_common_getc() */
902 	if (sc->sc_rcvdrx == sc->sc_rcvrx)
903 		return;
904 
905 	ix = sc->sc_rcvdrx;
906 	vrxp = &sc->sc_poll_sdmapage->rx[ix];
907 	bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
908 	    ix * sizeof(gtmpsc_pollrx_t),
909 	    sizeof(sdma_desc_t),
910 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
911 	csr = vrxp->rxdesc.sdma_csr;
912 	while (!(csr & SDMA_CSR_RX_OWN)) {
913 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
914 		    ix * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t),
915 		    sizeof(vrxp->rxbuf),
916 		    BUS_DMASYNC_POSTREAD);
917 		vrxp->rxdesc.sdma_cnt &= SDMA_RX_CNT_BCNT_MASK;
918 		if (vrxp->rxdesc.sdma_csr & SDMA_CSR_RX_BR) {
919 			int cn_trapped = 0;
920 
921 			cn_check_magic(sc->sc_tty->t_dev,
922 			    CNC_BREAK, gtmpsc_cnm_state);
923 			if (cn_trapped)
924 				continue;
925 #if defined(KGDB) && !defined(DDB)
926 			if (ISSET(sc->sc_flags, GTMPSC_KGDB)) {
927 				kgdb_connect(1);
928 				continue;
929 			}
930 #endif
931 		}
932 
933 		sc->sc_rcvcnt += vrxp->rxdesc.sdma_cnt;
934 		kick = 1;
935 
936 		ix = (ix + 1) % GTMPSC_NTXDESC;
937 		vrxp = &sc->sc_poll_sdmapage->rx[ix];
938 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
939 		    ix * sizeof(gtmpsc_pollrx_t),
940 		    sizeof(sdma_desc_t),
941 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
942 		csr = vrxp->rxdesc.sdma_csr;
943 	}
944 	bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
945 	    ix * sizeof(gtmpsc_pollrx_t),
946 	    sizeof(sdma_desc_t),
947 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
948 
949 	if (kick) {
950 		sc->sc_rcvdrx = ix;
951 		sc->sc_rx_ready = 1;
952 		softint_schedule(sc->sc_si);
953 	}
954 }
955 
956 STATIC __inline void
957 gtmpsc_intr_tx(struct gtmpsc_softc *sc)
958 {
959 	gtmpsc_polltx_t *vtxp;
960 	uint32_t csr;
961 	int ix;
962 
963 	/*
964 	 * If we've delayed a parameter change, do it now,
965 	 * and restart output.
966 	 */
967 	if (sc->sc_heldchange) {
968 		gtmpsc_loadchannelregs(sc);
969 		sc->sc_heldchange = 0;
970 		sc->sc_tbc = sc->sc_heldtbc;
971 		sc->sc_heldtbc = 0;
972 	}
973 
974 	/* Clean-up TX descriptors and buffers */
975 	ix = sc->sc_lasttx;
976 	while (ix != sc->sc_nexttx) {
977 		vtxp = &sc->sc_poll_sdmapage->tx[ix];
978 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
979 		    ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
980 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
981 		csr = vtxp->txdesc.sdma_csr;
982 		if (csr & SDMA_CSR_TX_OWN) {
983 			bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
984 			    ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
985 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
986 			break;
987 		}
988 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
989 		    ix * sizeof(gtmpsc_polltx_t) + sizeof(sdma_desc_t),
990 		    sizeof(vtxp->txbuf), BUS_DMASYNC_POSTWRITE);
991 		ix = (ix + 1) % GTMPSC_NTXDESC;
992 	}
993 	sc->sc_lasttx = ix;
994 
995 	/* Output the next chunk of the contiguous buffer */
996 	gtmpsc_write(sc);
997 	if (sc->sc_tbc == 0 && sc->sc_tx_busy) {
998 		sc->sc_tx_busy = 0;
999 		sc->sc_tx_done = 1;
1000 		softint_schedule(sc->sc_si);
1001 		sdma_imask &= ~SDMA_INTR_TXBUF(sc->sc_unit);
1002 		gt_sdma_imask(device_parent(sc->sc_dev), sdma_imask);
1003 	}
1004 }
1005 
1006 /*
1007  * gtmpsc_write - write a buffer into the hardware
1008  */
1009 STATIC void
1010 gtmpsc_write(struct gtmpsc_softc *sc)
1011 {
1012 	gtmpsc_polltx_t *vtxp;
1013 	uint32_t sdcm, ix;
1014 	int kick, n;
1015 
1016 	kick = 0;
1017 	while (sc->sc_tbc > 0 && sc->sc_nexttx != sc->sc_lasttx) {
1018 		n = min(sc->sc_tbc, GTMPSC_TXBUFSZ);
1019 
1020 		ix = sc->sc_nexttx;
1021 		sc->sc_nexttx = (ix + 1) % GTMPSC_NTXDESC;
1022 
1023 		vtxp = &sc->sc_poll_sdmapage->tx[ix];
1024 
1025 		memcpy(vtxp->txbuf, sc->sc_tba, n);
1026 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1027 		    ix * sizeof(gtmpsc_polltx_t) + sizeof(sdma_desc_t),
1028 		    sizeof(vtxp->txbuf), BUS_DMASYNC_PREWRITE);
1029 
1030 		vtxp->txdesc.sdma_cnt = (n << SDMA_TX_CNT_BCNT_SHIFT) | n;
1031 		vtxp->txdesc.sdma_csr =
1032 		    SDMA_CSR_TX_L	|
1033 		    SDMA_CSR_TX_F	|
1034 		    SDMA_CSR_TX_EI	|
1035 		    SDMA_CSR_TX_OWN;
1036 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1037 		    ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
1038 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1039 
1040 		sc->sc_tbc -= n;
1041 		sc->sc_tba += n;
1042 		kick = 1;
1043 	}
1044 	if (kick) {
1045 		/*
1046 		 * now kick some SDMA
1047 		 */
1048 		sdcm = GT_SDMA_READ(sc, SDMA_SDCM);
1049 		if ((sdcm & SDMA_SDCM_TXD) == 0)
1050 			GT_SDMA_WRITE(sc, SDMA_SDCM, sdcm | SDMA_SDCM_TXD);
1051 	}
1052 }
1053 
1054 /*
1055  * gtmpsc_txflush - wait for output to drain
1056  */
1057 STATIC void
1058 gtmpsc_txflush(gtmpsc_softc_t *sc)
1059 {
1060 	gtmpsc_polltx_t *vtxp;
1061 	int ix, limit = 4000000;	/* 4 seconds */
1062 
1063 	ix = sc->sc_nexttx - 1;
1064 	if (ix < 0)
1065 		ix = GTMPSC_NTXDESC - 1;
1066 
1067 	vtxp = &sc->sc_poll_sdmapage->tx[ix];
1068 	while (limit > 0) {
1069 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1070 		    ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
1071 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1072 		if ((vtxp->txdesc.sdma_csr & SDMA_CSR_TX_OWN) == 0)
1073 			break;
1074 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1075 		    ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
1076 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1077 		DELAY(1);
1078 		limit -= 1;
1079 	}
1080 }
1081 
1082 /*
1083  * gtmpsc_rxdesc_init - set up RX descriptor ring
1084  */
1085 STATIC void
1086 gtmpsc_rxdesc_init(struct gtmpsc_softc *sc)
1087 {
1088 	gtmpsc_pollrx_t *vrxp, *prxp, *first_prxp;
1089 	sdma_desc_t *dp;
1090 	int i;
1091 
1092 	first_prxp = prxp =
1093 	    (gtmpsc_pollrx_t *)sc->sc_rxdma_map->dm_segs->ds_addr;
1094 	vrxp = sc->sc_poll_sdmapage->rx;
1095 	for (i = 0; i < GTMPSC_NRXDESC; i++) {
1096 		dp = &vrxp->rxdesc;
1097 		dp->sdma_csr =
1098 		    SDMA_CSR_RX_L|SDMA_CSR_RX_F|SDMA_CSR_RX_OWN|SDMA_CSR_RX_EI;
1099 		dp->sdma_cnt = GTMPSC_RXBUFSZ << SDMA_RX_CNT_BUFSZ_SHIFT;
1100 		dp->sdma_bufp = (uint32_t)&prxp->rxbuf;
1101 		vrxp++;
1102 		prxp++;
1103 		dp->sdma_next = (uint32_t)&prxp->rxdesc;
1104 
1105 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1106 		    i * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t),
1107 		    sizeof(vrxp->rxbuf), BUS_DMASYNC_PREREAD);
1108 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1109 		    i * sizeof(gtmpsc_pollrx_t), sizeof(sdma_desc_t),
1110 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1111 	}
1112 	dp = &vrxp->rxdesc;
1113 	dp->sdma_csr =
1114 	    SDMA_CSR_RX_L | SDMA_CSR_RX_F | SDMA_CSR_RX_OWN | SDMA_CSR_RX_EI;
1115 	dp->sdma_cnt = GTMPSC_RXBUFSZ << SDMA_RX_CNT_BUFSZ_SHIFT;
1116 	dp->sdma_bufp = (uint32_t)&prxp->rxbuf;
1117 	dp->sdma_next = (uint32_t)&first_prxp->rxdesc;
1118 
1119 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1120 	    i * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t),
1121 	    sizeof(vrxp->rxbuf), BUS_DMASYNC_PREREAD);
1122 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1123 	    i * sizeof(gtmpsc_pollrx_t), sizeof(sdma_desc_t),
1124 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1125 
1126 	sc->sc_rcvcnt = 0;
1127 	sc->sc_roffset = 0;
1128 	sc->sc_rcvrx = 0;
1129 	sc->sc_rcvdrx = 0;
1130 }
1131 
1132 /*
1133  * gtmpsc_txdesc_init - set up TX descriptor ring
1134  */
1135 STATIC void
1136 gtmpsc_txdesc_init(struct gtmpsc_softc *sc)
1137 {
1138 	gtmpsc_polltx_t *vtxp, *ptxp, *first_ptxp;
1139 	sdma_desc_t *dp;
1140 	int i;
1141 
1142 	first_ptxp = ptxp =
1143 	    (gtmpsc_polltx_t *)sc->sc_txdma_map->dm_segs->ds_addr;
1144 	vtxp = sc->sc_poll_sdmapage->tx;
1145 	for (i = 0; i < GTMPSC_NTXDESC; i++) {
1146 		dp = &vtxp->txdesc;
1147 		dp->sdma_csr = 0;
1148 		dp->sdma_cnt = 0;
1149 		dp->sdma_bufp = (uint32_t)&ptxp->txbuf;
1150 		vtxp++;
1151 		ptxp++;
1152 		dp->sdma_next = (uint32_t)&ptxp->txdesc;
1153 	}
1154 	dp = &vtxp->txdesc;
1155 	dp->sdma_csr = 0;
1156 	dp->sdma_cnt = 0;
1157 	dp->sdma_bufp = (uint32_t)&ptxp->txbuf;
1158 	dp->sdma_next = (uint32_t)&first_ptxp->txdesc;
1159 
1160 	sc->sc_nexttx = 0;
1161 	sc->sc_lasttx = 0;
1162 }
1163 
1164 STATIC void
1165 gtmpscinit_stop(struct gtmpsc_softc *sc)
1166 {
1167 	uint32_t csr;
1168 	int timo = 10000;	/* XXXX */
1169 
1170 	/* Abort MPSC Rx (aborting Tx messes things up) */
1171 	GT_MPSC_WRITE(sc, GTMPSC_CHRN(2), GTMPSC_CHR2_RXABORT);
1172 
1173 	/* abort SDMA RX and stop TX for MPSC unit */
1174 	GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_AR | SDMA_SDCM_STD);
1175 
1176 	/* poll for SDMA RX abort completion */
1177 	for (; timo > 0; timo--) {
1178 		csr = GT_SDMA_READ(sc, SDMA_SDCM);
1179 		if (!(csr & (SDMA_SDCM_AR | SDMA_SDCM_AT)))
1180 			break;
1181 		DELAY(50);
1182 	}
1183 }
1184 
1185 STATIC void
1186 gtmpscinit_start(struct gtmpsc_softc *sc)
1187 {
1188 
1189 	/*
1190 	 * Set pointers of current/first descriptor of TX to SDMA register.
1191 	 */
1192 	GT_SDMA_WRITE(sc, SDMA_SCTDP, sc->sc_txdma_map->dm_segs->ds_addr);
1193 	GT_SDMA_WRITE(sc, SDMA_SFTDP, sc->sc_txdma_map->dm_segs->ds_addr);
1194 
1195 	/*
1196 	 * Set pointer of current descriptor of TX to SDMA register.
1197 	 */
1198 	GT_SDMA_WRITE(sc, SDMA_SCRDP, sc->sc_rxdma_map->dm_segs->ds_addr);
1199 
1200 	/*
1201 	 * initialize SDMA unit Configuration Register
1202 	 */
1203 	GT_SDMA_WRITE(sc, SDMA_SDC,
1204 	    SDMA_SDC_BSZ_8x64 | SDMA_SDC_SFM|SDMA_SDC_RFT);
1205 
1206 	gtmpsc_loadchannelregs(sc);
1207 
1208 	/*
1209 	 * set MPSC LO and HI port config registers for GTMPSC unit
1210  	 */
1211 	GT_MPSC_WRITE(sc, GTMPSC_MMCR_LO,
1212 	    GTMPSC_MMCR_LO_MODE_UART	|
1213 	    GTMPSC_MMCR_LO_ET		|
1214 	    GTMPSC_MMCR_LO_ER		|
1215 	    GTMPSC_MMCR_LO_NLM);
1216 	GT_MPSC_WRITE(sc, GTMPSC_MMCR_HI,
1217 	    GTMPSC_MMCR_HI_TCDV_DEFAULT	|
1218 	    GTMPSC_MMCR_HI_RDW		|
1219 	    GTMPSC_MMCR_HI_RCDV_DEFAULT);
1220 
1221 	/*
1222 	 * tell MPSC receive the Enter Hunt
1223 	 */
1224 	GT_MPSC_WRITE(sc, GTMPSC_CHRN(2), GTMPSC_CHR2_EH);
1225 }
1226 
1227 STATIC void
1228 gtmpscshutdown(struct gtmpsc_softc *sc)
1229 {
1230 	struct tty *tp;
1231 
1232 #ifdef KGDB
1233 	if (sc->sc_flags & GTMPSCF_KGDB != 0)
1234 		return;
1235 #endif
1236 	tp = sc->sc_tty;
1237 	mutex_spin_enter(&sc->sc_lock);
1238 	/* Fake carrier off */
1239 	(void) (*tp->t_linesw->l_modem)(tp, 0);
1240 	sdma_imask &= ~SDMA_INTR_RXBUF(sc->sc_unit);
1241 	gt_sdma_imask(device_parent(sc->sc_dev), sdma_imask);
1242 	mutex_spin_exit(&sc->sc_lock);
1243 }
1244 
1245 STATIC void
1246 gtmpsc_loadchannelregs(struct gtmpsc_softc *sc)
1247 {
1248 
1249 	if (sc->sc_dev != NULL)
1250 		gt_brg_bcr(device_parent(sc->sc_dev), sc->sc_brg,
1251 	    	    GT_MPSC_CLOCK_SOURCE | compute_cdv(sc->sc_baudrate));
1252 	GT_MPSC_WRITE(sc, GTMPSC_CHRN(3), GTMPSC_MAXIDLE(sc->sc_baudrate));
1253 
1254 	/*
1255 	 * set MPSC Protocol configuration register for GTMPSC unit
1256 	 */
1257 	GT_MPSC_WRITE(sc, GTMPSC_MPCR, cflag2mpcr(sc->sc_cflag));
1258 }
1259 
1260 
1261 #ifdef MPSC_CONSOLE
1262 /*
1263  * Following are all routines needed for MPSC to act as console
1264  */
1265 STATIC int
1266 gtmpsccngetc(dev_t dev)
1267 {
1268 
1269 	return gtmpsc_common_getc(&gtmpsc_cn_softc);
1270 }
1271 
1272 STATIC void
1273 gtmpsccnputc(dev_t dev, int c)
1274 {
1275 
1276 	gtmpsc_common_putc(&gtmpsc_cn_softc, c);
1277 }
1278 
1279 STATIC void
1280 gtmpsccnpollc(dev_t dev, int on)
1281 {
1282 }
1283 
1284 STATIC void
1285 gtmpsccnhalt(dev_t dev)
1286 {
1287 	gtmpsc_softc_t *sc = &gtmpsc_cn_softc;
1288 	uint32_t csr;
1289 
1290 	/*
1291 	 * flush TX buffers
1292 	 */
1293 	gtmpsc_txflush(sc);
1294 
1295 	/*
1296 	 * stop MPSC unit RX
1297 	 */
1298 	csr = GT_MPSC_READ(sc, GTMPSC_CHRN(2));
1299 	csr &= ~GTMPSC_CHR2_EH;
1300 	csr |= GTMPSC_CHR2_RXABORT;
1301 	GT_MPSC_WRITE(sc, GTMPSC_CHRN(2), csr);
1302 
1303 	DELAY(GTMPSC_RESET_DELAY);
1304 
1305 	/*
1306 	 * abort SDMA RX for MPSC unit
1307 	 */
1308 	GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_AR);
1309 }
1310 
1311 int
1312 gtmpsccnattach(bus_space_tag_t iot, bus_dma_tag_t dmat, bus_addr_t base,
1313 	       int unit, int brg, int speed, tcflag_t tcflag)
1314 {
1315 	struct gtmpsc_softc *sc = &gtmpsc_cn_softc;
1316 	int i, res;
1317 	const unsigned char cp[] = "\r\nMPSC Lives!\r\n";
1318 
1319 	res = gtmpsc_hackinit(sc, iot, dmat, base, unit, brg, speed, tcflag);
1320 	if (res != 0)
1321 		return res;
1322 
1323 	gtmpscinit_stop(sc);
1324 	gtmpscinit_start(sc);
1325 
1326 	/*
1327 	 * enable SDMA receive
1328 	 */
1329 	GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_ERD);
1330 
1331 	for (i = 0; i < sizeof(cp); i++) {
1332 		if (*(cp + i) == 0)
1333 			break;
1334 		gtmpsc_common_putc(sc, *(cp + i));
1335 	}
1336 
1337 	cn_tab = &gtmpsc_consdev;
1338 	cn_init_magic(&gtmpsc_cnm_state);
1339 
1340 	return 0;
1341 }
1342 
1343 /*
1344  * gtmpsc_hackinit - hacks required to supprt GTMPSC console
1345  */
1346 STATIC int
1347 gtmpsc_hackinit(struct gtmpsc_softc *sc, bus_space_tag_t iot,
1348 		bus_dma_tag_t dmat, bus_addr_t base, int unit, int brg,
1349 		int baudrate, tcflag_t tcflag)
1350 {
1351 	gtmpsc_poll_sdma_t *cn_dmapage =
1352 	    (gtmpsc_poll_sdma_t *)gtmpsc_cn_dmapage;
1353 	int error;
1354 
1355 	DPRINTF(("hackinit\n"));
1356 
1357 	memset(sc, 0, sizeof(struct gtmpsc_softc));
1358 	error = bus_space_map(iot, base + GTMPSC_BASE(unit), GTMPSC_SIZE, 0,
1359 	    &sc->sc_mpsch);
1360 	if (error != 0)
1361 		goto fail0;
1362 
1363 	error = bus_space_map(iot, base + GTSDMA_BASE(unit), GTSDMA_SIZE, 0,
1364 	    &sc->sc_sdmah);
1365 	if (error != 0)
1366 		goto fail1;
1367 	error = bus_dmamap_create(dmat, sizeof(gtmpsc_polltx_t), 1,
1368 	   sizeof(gtmpsc_polltx_t), 0, BUS_DMA_NOWAIT, &sc->sc_txdma_map);
1369 	if (error != 0)
1370 		goto fail2;
1371 	error = bus_dmamap_load(dmat, sc->sc_txdma_map, cn_dmapage->tx,
1372 	    sizeof(gtmpsc_polltx_t), NULL,
1373 	    BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
1374 	if (error != 0)
1375 		goto fail3;
1376 	error = bus_dmamap_create(dmat, sizeof(gtmpsc_pollrx_t), 1,
1377 	   sizeof(gtmpsc_pollrx_t), 0, BUS_DMA_NOWAIT,
1378 	   &sc->sc_rxdma_map);
1379 	if (error != 0)
1380 		goto fail4;
1381 	error = bus_dmamap_load(dmat, sc->sc_rxdma_map, cn_dmapage->rx,
1382 	    sizeof(gtmpsc_pollrx_t), NULL,
1383 	    BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
1384 	if (error != 0)
1385 		goto fail5;
1386 
1387 	sc->sc_iot = iot;
1388 	sc->sc_dmat = dmat;
1389 	sc->sc_poll_sdmapage = cn_dmapage;
1390 	sc->sc_brg = brg;
1391 	sc->sc_baudrate = baudrate;
1392 	sc->sc_cflag = tcflag;
1393 
1394 	gtmpsc_txdesc_init(sc);
1395 	gtmpsc_rxdesc_init(sc);
1396 
1397 	return 0;
1398 
1399 fail5:
1400 	bus_dmamap_destroy(dmat, sc->sc_rxdma_map);
1401 fail4:
1402 	bus_dmamap_unload(dmat, sc->sc_txdma_map);
1403 fail3:
1404 	bus_dmamap_destroy(dmat, sc->sc_txdma_map);
1405 fail2:
1406 	bus_space_unmap(iot, sc->sc_sdmah, GTSDMA_SIZE);
1407 fail1:
1408 	bus_space_unmap(iot, sc->sc_mpsch, GTMPSC_SIZE);
1409 fail0:
1410 	return error;
1411 }
1412 #endif	/* MPSC_CONSOLE */
1413 
1414 #ifdef KGDB
1415 STATIC int
1416 gtmpsc_kgdb_getc(void *arg)
1417 {
1418 	struct gtmpsc_softc *sc = (struct gtmpsc_softc *)arg;
1419 
1420 	return gtmpsc_common_getc(sc);
1421 }
1422 
1423 STATIC void
1424 gtmpsc_kgdb_putc(void *arg, int c)
1425 {
1426 	struct gtmpsc_softc *sc = (struct gtmpsc_softc *)arg;
1427 
1428 	return gtmpsc_common_putc(sc, c);
1429 }
1430 #endif /* KGDB */
1431 
1432 #if defined(MPSC_CONSOLE) || defined(KGDB)
1433 /*
1434  * gtmpsc_common_getc - polled console read
1435  *
1436  *	We copy data from the DMA buffers into a buffer in the softc
1437  *	to reduce descriptor ownership turnaround time
1438  *	MPSC can crater if it wraps descriptor rings,
1439  *	which is asynchronous and throttled only by line speed.
1440  */
1441 STATIC int
1442 gtmpsc_common_getc(struct gtmpsc_softc *sc)
1443 {
1444 	gtmpsc_pollrx_t *vrxp;
1445 	uint32_t csr;
1446 	int ix, ch, wdog_interval = 0;
1447 
1448 	if (!cold)
1449 		mutex_spin_enter(&sc->sc_lock);
1450 
1451 	ix = sc->sc_rcvdrx;
1452 	vrxp = &sc->sc_poll_sdmapage->rx[ix];
1453 	while (sc->sc_rcvcnt == 0) {
1454 		/* Wait receive */
1455 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1456 		    ix * sizeof(gtmpsc_pollrx_t),
1457 		    sizeof(sdma_desc_t),
1458 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1459 		csr = vrxp->rxdesc.sdma_csr;
1460 		if (csr & SDMA_CSR_RX_OWN) {
1461 			GT_MPSC_WRITE(sc, GTMPSC_CHRN(2),
1462 			    GTMPSC_CHR2_EH | GTMPSC_CHR2_CRD);
1463 			if (wdog_interval++ % 32)
1464 				gt_watchdog_service();
1465 			bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1466 			    ix * sizeof(gtmpsc_pollrx_t),
1467 			    sizeof(sdma_desc_t),
1468 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1469 			DELAY(50);
1470 			continue;
1471 		}
1472 		if (csr & SDMA_CSR_RX_ES)
1473 			aprint_error_dev(sc->sc_dev,
1474 			    "RX error, rxdesc csr 0x%x\n", csr);
1475 
1476 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1477 		    ix * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t),
1478 		    sizeof(vrxp->rxbuf),
1479 		    BUS_DMASYNC_POSTREAD);
1480 
1481 		vrxp->rxdesc.sdma_cnt &= SDMA_RX_CNT_BCNT_MASK;
1482 		sc->sc_rcvcnt = vrxp->rxdesc.sdma_cnt;
1483 		sc->sc_roffset = 0;
1484 		sc->sc_rcvdrx = (ix + 1) % GTMPSC_NRXDESC;
1485 
1486 		if (sc->sc_rcvcnt == 0) {
1487 			/* cleanup this descriptor, and return to DMA */
1488 			CLEANUP_AND_RETURN_RXDMA(sc, sc->sc_rcvrx);
1489 			sc->sc_rcvrx = sc->sc_rcvdrx;
1490 		}
1491 
1492 		ix = sc->sc_rcvdrx;
1493 		vrxp = &sc->sc_poll_sdmapage->rx[ix];
1494 	}
1495 	ch = vrxp->rxbuf[sc->sc_roffset++];
1496 	sc->sc_rcvcnt--;
1497 
1498 	if (sc->sc_roffset == vrxp->rxdesc.sdma_cnt) {
1499 		/* cleanup this descriptor, and return to DMA */
1500 		CLEANUP_AND_RETURN_RXDMA(sc, ix);
1501 		sc->sc_rcvrx = (ix + 1) % GTMPSC_NRXDESC;
1502 	}
1503 
1504 	gt_watchdog_service();
1505 
1506 	if (!cold)
1507 		mutex_spin_exit(&sc->sc_lock);
1508 	return ch;
1509 }
1510 
1511 STATIC void
1512 gtmpsc_common_putc(struct gtmpsc_softc *sc, int c)
1513 {
1514 	gtmpsc_polltx_t *vtxp;
1515 	int ix;
1516 	const int nc = 1;
1517 
1518 	/* Get a DMA descriptor */
1519 	if (!cold)
1520 		mutex_spin_enter(&sc->sc_lock);
1521 	ix = sc->sc_nexttx;
1522 	sc->sc_nexttx = (ix + 1) % GTMPSC_NTXDESC;
1523 	if (sc->sc_nexttx == sc->sc_lasttx) {
1524 		gtmpsc_common_putc_wait_complete(sc, sc->sc_lasttx);
1525 		sc->sc_lasttx = (sc->sc_lasttx + 1) % GTMPSC_NTXDESC;
1526 	}
1527 	if (!cold)
1528 		mutex_spin_exit(&sc->sc_lock);
1529 
1530 	vtxp = &sc->sc_poll_sdmapage->tx[ix];
1531 	vtxp->txbuf[0] = c;
1532 	bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1533 	    ix * sizeof(gtmpsc_polltx_t) + sizeof(sdma_desc_t),
1534 	    sizeof(vtxp->txbuf),
1535 	    BUS_DMASYNC_PREWRITE);
1536 
1537 	vtxp->txdesc.sdma_cnt = (nc << SDMA_TX_CNT_BCNT_SHIFT) | nc;
1538 	vtxp->txdesc.sdma_csr = SDMA_CSR_TX_L | SDMA_CSR_TX_F | SDMA_CSR_TX_OWN;
1539 	bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1540 	    ix * sizeof(gtmpsc_polltx_t),
1541 	    sizeof(sdma_desc_t),
1542 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1543 
1544 	if (!cold)
1545 		mutex_spin_enter(&sc->sc_lock);
1546 	/*
1547 	 * now kick some SDMA
1548 	 */
1549 	GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_TXD);
1550 
1551 	while (sc->sc_lasttx != sc->sc_nexttx) {
1552 		gtmpsc_common_putc_wait_complete(sc, sc->sc_lasttx);
1553 		sc->sc_lasttx = (sc->sc_lasttx + 1) % GTMPSC_NTXDESC;
1554 	}
1555 	if (!cold)
1556 		mutex_spin_exit(&sc->sc_lock);
1557 }
1558 
1559 /*
1560  * gtmpsc_common_putc - polled console putc
1561  */
1562 STATIC void
1563 gtmpsc_common_putc_wait_complete(struct gtmpsc_softc *sc, int ix)
1564 {
1565 	gtmpsc_polltx_t *vtxp = &sc->sc_poll_sdmapage->tx[ix];
1566 	uint32_t csr;
1567 	int wdog_interval = 0;
1568 
1569 	bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1570 	    ix * sizeof(gtmpsc_polltx_t),
1571 	    sizeof(sdma_desc_t),
1572 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1573 	csr = vtxp->txdesc.sdma_csr;
1574 	while (csr & SDMA_CSR_TX_OWN) {
1575 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1576 		    ix * sizeof(gtmpsc_polltx_t),
1577 		    sizeof(sdma_desc_t),
1578 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1579 		DELAY(40);
1580 		if (wdog_interval++ % 32)
1581 			gt_watchdog_service();
1582 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1583 		    ix * sizeof(gtmpsc_polltx_t),
1584 		    sizeof(sdma_desc_t),
1585 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1586 		csr = vtxp->txdesc.sdma_csr;
1587 	}
1588 	if (csr & SDMA_CSR_TX_ES)
1589 		aprint_error_dev(sc->sc_dev,
1590 		    "TX error, txdesc(%d) csr 0x%x\n", ix, csr);
1591 	bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1592 	    ix * sizeof(gtmpsc_polltx_t) + sizeof(sdma_desc_t),
1593 	    sizeof(vtxp->txbuf),
1594 	    BUS_DMASYNC_POSTWRITE);
1595 }
1596 #endif	/* defined(MPSC_CONSOLE) || defined(KGDB) */
1597