xref: /netbsd-src/sys/dev/ic/hd64570.c (revision d0a16ee0e93301e8906edb895b35bac5994d7ddd)
1 /*	$NetBSD: hd64570.c,v 1.62 2024/09/14 21:22:37 andvar Exp $	*/
2 
3 /*
4  * Copyright (c) 1999 Christian E. Hopps
5  * Copyright (c) 1998 Vixie Enterprises
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. Neither the name of Vixie Enterprises nor the names
18  *    of its contributors may be used to endorse or promote products derived
19  *    from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY VIXIE ENTERPRISES AND
22  * CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
23  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25  * DISCLAIMED.  IN NO EVENT SHALL VIXIE ENTERPRISES OR
26  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29  * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  * This software has been written for Vixie Enterprises by Michael Graff
36  * <explorer@flame.org>.  To learn more about Vixie Enterprises, see
37  * ``http://www.vix.com''.
38  */
39 
40 /*
41  * TODO:
42  *
43  *	o  teach the receive logic about errors, and about long frames that
44  *         span more than one input buffer.  (Right now, receive/transmit is
45  *	   limited to one descriptor's buffer space, which is MTU + 4 bytes.
46  *	   This is currently 1504, which is large enough to hold the HDLC
47  *	   header and the packet itself.  Packets which are too long are
48  *	   silently dropped on transmit and silently dropped on receive.
49  *	o  write code to handle the msci interrupts, needed only for CD
50  *	   and CTS changes.
51  *	o  consider switching back to a "queue tx with DMA active" model which
52  *	   should help sustain outgoing traffic
53  *	o  through clever use of bus_dma*() functions, it should be possible
54  *	   to map the mbuf's data area directly into a descriptor transmit
55  *	   buffer, removing the need to allocate extra memory.  If, however,
56  *	   we run out of descriptors for this, we will need to then allocate
57  *	   one large mbuf, copy the fragmented chain into it, and put it onto
58  *	   a single descriptor.
59  *	o  use bus_dmamap_sync() with the right offset and lengths, rather
60  *	   than cheating and always sync'ing the whole region.
61  *
62  *	o  perhaps allow rx and tx to be in more than one page
63  *	   if not using DMA.  currently the assumption is that
64  *	   rx uses a page and tx uses a page.
65  */
66 
67 #include <sys/cdefs.h>
68 __KERNEL_RCSID(0, "$NetBSD: hd64570.c,v 1.62 2024/09/14 21:22:37 andvar Exp $");
69 
70 #include "opt_inet.h"
71 
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/device.h>
75 #include <sys/mbuf.h>
76 #include <sys/socket.h>
77 #include <sys/sockio.h>
78 #include <sys/kernel.h>
79 
80 #include <net/if.h>
81 #include <net/if_types.h>
82 
83 #if defined(INET) || defined(INET6)
84 #include <netinet/in.h>
85 #include <netinet/in_systm.h>
86 #include <netinet/in_var.h>
87 #include <netinet/ip.h>
88 #ifdef INET6
89 #include <netinet6/in6_var.h>
90 #endif
91 #endif
92 
93 #include <net/bpf.h>
94 
95 #include <sys/cpu.h>
96 #include <sys/bus.h>
97 #include <sys/intr.h>
98 
99 #include <dev/pci/pcivar.h>
100 #include <dev/pci/pcireg.h>
101 #include <dev/pci/pcidevs.h>
102 
103 #include <dev/ic/hd64570reg.h>
104 #include <dev/ic/hd64570var.h>
105 
106 #define SCA_DEBUG_RX		0x0001
107 #define SCA_DEBUG_TX		0x0002
108 #define SCA_DEBUG_CISCO		0x0004
109 #define SCA_DEBUG_DMA		0x0008
110 #define SCA_DEBUG_RXPKT		0x0010
111 #define SCA_DEBUG_TXPKT		0x0020
112 #define SCA_DEBUG_INTR		0x0040
113 #define SCA_DEBUG_CLOCK		0x0080
114 
115 #if 0
116 #define SCA_DEBUG_LEVEL	( 0xFFFF )
117 #else
118 #define SCA_DEBUG_LEVEL 0
119 #endif
120 
121 u_int32_t sca_debug = SCA_DEBUG_LEVEL;
122 
123 #if SCA_DEBUG_LEVEL > 0
124 #define SCA_DPRINTF(l, x) do { \
125 	if ((l) & sca_debug) \
126 		printf x;\
127 	} while (0)
128 #else
129 #define SCA_DPRINTF(l, x)
130 #endif
131 
132 #if 0
133 #define SCA_USE_FASTQ		/* use a split queue, one for fast traffic */
134 #endif
135 
136 static inline void msci_write_1(sca_port_t *, u_int, u_int8_t);
137 static inline u_int8_t msci_read_1(sca_port_t *, u_int);
138 
139 static inline void dmac_write_1(sca_port_t *, u_int, u_int8_t);
140 static inline void dmac_write_2(sca_port_t *, u_int, u_int16_t);
141 static inline u_int8_t dmac_read_1(sca_port_t *, u_int);
142 static inline u_int16_t dmac_read_2(sca_port_t *, u_int);
143 
144 static	void sca_msci_init(struct sca_softc *, sca_port_t *);
145 static	void sca_dmac_init(struct sca_softc *, sca_port_t *);
146 static void sca_dmac_rxinit(sca_port_t *);
147 
148 static	int sca_dmac_intr(sca_port_t *, u_int8_t);
149 static	int sca_msci_intr(sca_port_t *, u_int8_t);
150 
151 static	void sca_get_packets(sca_port_t *);
152 static	int sca_frame_avail(sca_port_t *);
153 static	void sca_frame_process(sca_port_t *);
154 static	void sca_frame_read_done(sca_port_t *);
155 
156 static	void sca_port_starttx(sca_port_t *);
157 
158 static	void sca_port_up(sca_port_t *);
159 static	void sca_port_down(sca_port_t *);
160 
161 static	int sca_output(struct ifnet *, struct mbuf *, const struct sockaddr *,
162 			    const struct rtentry *);
163 static	int sca_ioctl(struct ifnet *, u_long, void *);
164 static	void sca_start(struct ifnet *);
165 static	void sca_watchdog(struct ifnet *);
166 
167 static struct mbuf *sca_mbuf_alloc(struct sca_softc *, void *, u_int);
168 
169 #if SCA_DEBUG_LEVEL > 0
170 static	void sca_frame_print(sca_port_t *, sca_desc_t *, u_int8_t *);
171 #endif
172 
173 
174 #define	sca_read_1(sc, reg)		(sc)->sc_read_1(sc, reg)
175 #define	sca_read_2(sc, reg)		(sc)->sc_read_2(sc, reg)
176 #define	sca_write_1(sc, reg, val)	(sc)->sc_write_1(sc, reg, val)
177 #define	sca_write_2(sc, reg, val)	(sc)->sc_write_2(sc, reg, val)
178 
179 #define	sca_page_addr(sc, addr)	((bus_addr_t)(u_long)(addr) & (sc)->scu_pagemask)
180 
181 static inline void
182 msci_write_1(sca_port_t *scp, u_int reg, u_int8_t val)
183 {
184 	sca_write_1(scp->sca, scp->msci_off + reg, val);
185 }
186 
187 static inline u_int8_t
188 msci_read_1(sca_port_t *scp, u_int reg)
189 {
190 	return sca_read_1(scp->sca, scp->msci_off + reg);
191 }
192 
193 static inline void
194 dmac_write_1(sca_port_t *scp, u_int reg, u_int8_t val)
195 {
196 	sca_write_1(scp->sca, scp->dmac_off + reg, val);
197 }
198 
199 static inline void
200 dmac_write_2(sca_port_t *scp, u_int reg, u_int16_t val)
201 {
202 	sca_write_2(scp->sca, scp->dmac_off + reg, val);
203 }
204 
205 static inline u_int8_t
206 dmac_read_1(sca_port_t *scp, u_int reg)
207 {
208 	return sca_read_1(scp->sca, scp->dmac_off + reg);
209 }
210 
211 static inline u_int16_t
212 dmac_read_2(sca_port_t *scp, u_int reg)
213 {
214 	return sca_read_2(scp->sca, scp->dmac_off + reg);
215 }
216 
217 #if SCA_DEBUG_LEVEL > 0
218 /*
219  * read the chain pointer
220  */
221 static inline u_int16_t
222 sca_desc_read_chainp(struct sca_softc *sc, struct sca_desc *dp)
223 {
224 	if (sc->sc_usedma)
225 		return ((dp)->sd_chainp);
226 	return (bus_space_read_2(sc->scu_memt, sc->scu_memh,
227 	    sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_chainp)));
228 }
229 #endif
230 
231 /*
232  * write the chain pointer
233  */
234 static inline void
235 sca_desc_write_chainp(struct sca_softc *sc, struct sca_desc *dp, u_int16_t cp)
236 {
237 	if (sc->sc_usedma)
238 		(dp)->sd_chainp = cp;
239 	else
240 		bus_space_write_2(sc->scu_memt, sc->scu_memh,
241 		    sca_page_addr(sc, dp)
242 		    + offsetof(struct sca_desc, sd_chainp), cp);
243 }
244 
245 #if SCA_DEBUG_LEVEL > 0
246 /*
247  * read the buffer pointer
248  */
249 static inline u_int32_t
250 sca_desc_read_bufp(struct sca_softc *sc, struct sca_desc *dp)
251 {
252 	u_int32_t address;
253 
254 	if (sc->sc_usedma)
255 		address = dp->sd_bufp | dp->sd_hbufp << 16;
256 	else {
257 		address = bus_space_read_2(sc->scu_memt, sc->scu_memh,
258 		    sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_bufp));
259 		address |= bus_space_read_1(sc->scu_memt, sc->scu_memh,
260 		    sca_page_addr(sc, dp)
261 		    + offsetof(struct sca_desc, sd_hbufp)) << 16;
262 	}
263 	return (address);
264 }
265 #endif
266 
267 /*
268  * write the buffer pointer
269  */
270 static inline void
271 sca_desc_write_bufp(struct sca_softc *sc, struct sca_desc *dp, u_int32_t bufp)
272 {
273 	if (sc->sc_usedma) {
274 		dp->sd_bufp = bufp & 0xFFFF;
275 		dp->sd_hbufp = (bufp & 0x00FF0000) >> 16;
276 	} else {
277 		bus_space_write_2(sc->scu_memt, sc->scu_memh,
278 		    sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_bufp),
279 		    bufp & 0xFFFF);
280 		bus_space_write_1(sc->scu_memt, sc->scu_memh,
281 		    sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_hbufp),
282 		    (bufp & 0x00FF0000) >> 16);
283 	}
284 }
285 
286 /*
287  * read the buffer length
288  */
289 static inline u_int16_t
290 sca_desc_read_buflen(struct sca_softc *sc, struct sca_desc *dp)
291 {
292 	if (sc->sc_usedma)
293 		return ((dp)->sd_buflen);
294 	return (bus_space_read_2(sc->scu_memt, sc->scu_memh,
295 	    sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_buflen)));
296 }
297 
298 /*
299  * write the buffer length
300  */
301 static inline void
302 sca_desc_write_buflen(struct sca_softc *sc, struct sca_desc *dp, u_int16_t len)
303 {
304 	if (sc->sc_usedma)
305 		(dp)->sd_buflen = len;
306 	else
307 		bus_space_write_2(sc->scu_memt, sc->scu_memh,
308 		    sca_page_addr(sc, dp)
309 		    + offsetof(struct sca_desc, sd_buflen), len);
310 }
311 
312 /*
313  * read the descriptor status
314  */
315 static inline u_int8_t
316 sca_desc_read_stat(struct sca_softc *sc, struct sca_desc *dp)
317 {
318 	if (sc->sc_usedma)
319 		return ((dp)->sd_stat);
320 	return (bus_space_read_1(sc->scu_memt, sc->scu_memh,
321 	    sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_stat)));
322 }
323 
324 /*
325  * write the descriptor status
326  */
327 static inline void
328 sca_desc_write_stat(struct sca_softc *sc, struct sca_desc *dp, u_int8_t stat)
329 {
330 	if (sc->sc_usedma)
331 		(dp)->sd_stat = stat;
332 	else
333 		bus_space_write_1(sc->scu_memt, sc->scu_memh,
334 		    sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_stat),
335 		    stat);
336 }
337 
338 void
339 sca_init(struct sca_softc *sc)
340 {
341 	/*
342 	 * Do a little sanity check:  check number of ports.
343 	 */
344 	if (sc->sc_numports < 1 || sc->sc_numports > 2)
345 		panic("sca can\'t handle more than 2 or less than 1 ports");
346 
347 	/*
348 	 * disable DMA and MSCI interrupts
349 	 */
350 	sca_write_1(sc, SCA_DMER, 0);
351 	sca_write_1(sc, SCA_IER0, 0);
352 	sca_write_1(sc, SCA_IER1, 0);
353 	sca_write_1(sc, SCA_IER2, 0);
354 
355 	/*
356 	 * configure interrupt system
357 	 */
358 	sca_write_1(sc, SCA_ITCR,
359 	    SCA_ITCR_INTR_PRI_MSCI | SCA_ITCR_ACK_NONE | SCA_ITCR_VOUT_IVR);
360 #if 0
361 	/* these are for the interrupt ack cycle which we don't use */
362 	sca_write_1(sc, SCA_IVR, 0x40);
363 	sca_write_1(sc, SCA_IMVR, 0x40);
364 #endif
365 
366 	/*
367 	 * set wait control register to zero wait states
368 	 */
369 	sca_write_1(sc, SCA_PABR0, 0);
370 	sca_write_1(sc, SCA_PABR1, 0);
371 	sca_write_1(sc, SCA_WCRL, 0);
372 	sca_write_1(sc, SCA_WCRM, 0);
373 	sca_write_1(sc, SCA_WCRH, 0);
374 
375 	/*
376 	 * disable DMA and reset status
377 	 */
378 	sca_write_1(sc, SCA_PCR, SCA_PCR_PR2);
379 
380 	/*
381 	 * disable transmit DMA for all channels
382 	 */
383 	sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_0, 0);
384 	sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_0, SCA_DCR_ABRT);
385 	sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_0, 0);
386 	sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_0, SCA_DCR_ABRT);
387 	sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_1, 0);
388 	sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_1, SCA_DCR_ABRT);
389 	sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_1, 0);
390 	sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_1, SCA_DCR_ABRT);
391 
392 	/*
393 	 * enable DMA based on channel enable flags for each channel
394 	 */
395 	sca_write_1(sc, SCA_DMER, SCA_DMER_EN);
396 
397 	/*
398 	 * Should check to see if the chip is responding, but for now
399 	 * assume it is.
400 	 */
401 }
402 
403 /*
404  * initialize the port and attach it to the networking layer
405  */
406 void
407 sca_port_attach(struct sca_softc *sc, u_int port)
408 {
409 	struct timeval now;
410 	sca_port_t *scp = &sc->sc_ports[port];
411 	struct ifnet *ifp;
412 	static u_int ntwo_unit = 0;
413 
414 	scp->sca = sc;  /* point back to the parent */
415 
416 	scp->sp_port = port;
417 
418 	if (port == 0) {
419 		scp->msci_off = SCA_MSCI_OFF_0;
420 		scp->dmac_off = SCA_DMAC_OFF_0;
421 		if(sc->sc_parent != NULL)
422 			ntwo_unit = device_unit(sc->sc_parent) * 2 + 0;
423 		else
424 			ntwo_unit = 0;	/* XXX */
425 	} else {
426 		scp->msci_off = SCA_MSCI_OFF_1;
427 		scp->dmac_off = SCA_DMAC_OFF_1;
428 		if(sc->sc_parent != NULL)
429 			ntwo_unit = device_unit(sc->sc_parent) * 2 + 1;
430 		else
431 			ntwo_unit = 1;	/* XXX */
432 	}
433 
434 	sca_msci_init(sc, scp);
435 	sca_dmac_init(sc, scp);
436 
437 	/*
438 	 * attach to the network layer
439 	 */
440 	ifp = &scp->sp_if;
441 	snprintf(ifp->if_xname, sizeof(ifp->if_xname), "ntwo%d", ntwo_unit);
442 	ifp->if_softc = scp;
443 	ifp->if_mtu = SCA_MTU;
444 	ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST;
445 	ifp->if_type = IFT_PTPSERIAL;
446 	ifp->if_hdrlen = HDLC_HDRLEN;
447 	ifp->if_ioctl = sca_ioctl;
448 	ifp->if_output = sca_output;
449 	ifp->if_watchdog = sca_watchdog;
450 	ifp->if_snd.ifq_maxlen = IFQ_MAXLEN;
451 	scp->linkq.ifq_maxlen = 5; /* if we exceed this we are hosed already */
452 #ifdef SCA_USE_FASTQ
453 	scp->fastq.ifq_maxlen = IFQ_MAXLEN;
454 #endif
455 	IFQ_SET_READY(&ifp->if_snd);
456 	if_attach(ifp);
457 	if_deferred_start_init(ifp, NULL);
458 	if_alloc_sadl(ifp);
459 	bpf_attach(ifp, DLT_HDLC, HDLC_HDRLEN);
460 	bpf_mtap_softint_init(ifp);
461 
462 	if (sc->sc_parent == NULL)
463 		printf("%s: port %d\n", ifp->if_xname, port);
464 	else
465 		printf("%s at %s port %d\n",
466 		       ifp->if_xname, device_xname(sc->sc_parent), port);
467 
468 	/*
469 	 * reset the last seen times on the cisco keepalive protocol
470 	 */
471 	getmicrotime(&now);
472 	scp->cka_lasttx = now.tv_usec;
473 	scp->cka_lastrx = 0;
474 }
475 
476 #if 0
477 /*
478  * returns log2(div), sets 'tmc' for the required freq 'hz'
479  */
480 static u_int8_t
481 sca_msci_get_baud_rate_values(u_int32_t hz, u_int8_t *tmcp)
482 {
483 	u_int32_t tmc, div;
484 	u_int32_t clock;
485 
486 	/* clock hz = (chipclock / tmc) / 2^(div); */
487 	/*
488 	 * TD == tmc * 2^(n)
489 	 *
490 	 * note:
491 	 * 1 <= TD <= 256		TD is inc of 1
492 	 * 2 <= TD <= 512		TD is inc of 2
493 	 * 4 <= TD <= 1024		TD is inc of 4
494 	 * ...
495 	 * 512 <= TD <= 256*512		TD is inc of 512
496 	 *
497 	 * so note there are overlaps.  We lose prec
498 	 * as div increases so we wish to minize div.
499 	 *
500 	 * basically we want to do
501 	 *
502 	 * tmc = chip / hz, but have tmc <= 256
503 	 */
504 
505 	/* assume system clock is 9.8304MHz or 9830400Hz */
506 	clock = clock = 9830400 >> 1;
507 
508 	/* round down */
509 	div = 0;
510 	while ((tmc = clock / hz) > 256 || (tmc == 256 && (clock / tmc) > hz)) {
511 		clock >>= 1;
512 		div++;
513 	}
514 	if (clock / tmc > hz)
515 		tmc++;
516 	if (!tmc)
517 		tmc = 1;
518 
519 	if (div > SCA_RXS_DIV_512) {
520 		/* set to maximums */
521 		div = SCA_RXS_DIV_512;
522 		tmc = 0;
523 	}
524 
525 	*tmcp = (tmc & 0xFF);	/* 0 == 256 */
526 	return (div & 0xFF);
527 }
528 #endif
529 
530 /*
531  * initialize the port's MSCI
532  */
533 static void
534 sca_msci_init(struct sca_softc *sc, sca_port_t *scp)
535 {
536 	/* reset the channel */
537 	msci_write_1(scp, SCA_CMD0, SCA_CMD_RESET);
538 
539 	msci_write_1(scp, SCA_MD00,
540 		     (  SCA_MD0_CRC_1
541 		      | SCA_MD0_CRC_CCITT
542 		      | SCA_MD0_CRC_ENABLE
543 		      | SCA_MD0_MODE_HDLC));
544 #if 0
545 	/* immediately send receive reset so the above takes */
546 	msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
547 #endif
548 
549 	msci_write_1(scp, SCA_MD10, SCA_MD1_NOADDRCHK);
550 	msci_write_1(scp, SCA_MD20,
551 		     (SCA_MD2_DUPLEX | SCA_MD2_ADPLLx8 | SCA_MD2_NRZ));
552 
553 	/* be safe and do it again */
554 	msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
555 
556 	/* setup underrun and idle control, and initial RTS state */
557 	msci_write_1(scp, SCA_CTL0,
558 	     (SCA_CTL_IDLC_PATTERN
559 	     | SCA_CTL_UDRNC_AFTER_FCS
560 	     | SCA_CTL_RTS_LOW));
561 
562 	/* reset the transmitter */
563 	msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET);
564 
565 	/*
566 	 * set the clock sources
567 	 */
568 	msci_write_1(scp, SCA_RXS0, scp->sp_rxs);
569 	msci_write_1(scp, SCA_TXS0, scp->sp_txs);
570 	msci_write_1(scp, SCA_TMC0, scp->sp_tmc);
571 
572 	/* set external clock generate as requested */
573 	sc->sc_clock_callback(sc->sc_aux, scp->sp_port, scp->sp_eclock);
574 
575 	/*
576 	 * XXX don't pay attention to CTS or CD changes right now.  I can't
577 	 * simulate one, and the transmitter will try to transmit even if
578 	 * CD isn't there anyway, so nothing bad SHOULD happen.
579 	 */
580 #if 0
581 	msci_write_1(scp, SCA_IE00, 0);
582 	msci_write_1(scp, SCA_IE10, 0); /* 0x0c == CD and CTS changes only */
583 #else
584 	/* this would deliver transmitter underrun to ST1/ISR1 */
585 	msci_write_1(scp, SCA_IE10, SCA_ST1_UDRN);
586 	msci_write_1(scp, SCA_IE00, SCA_ST0_TXINT);
587 #endif
588 	msci_write_1(scp, SCA_IE20, 0);
589 
590 	msci_write_1(scp, SCA_FIE0, 0);
591 
592 	msci_write_1(scp, SCA_SA00, 0);
593 	msci_write_1(scp, SCA_SA10, 0);
594 
595 	msci_write_1(scp, SCA_IDL0, 0x7e);
596 
597 	msci_write_1(scp, SCA_RRC0, 0x0e);
598 	/* msci_write_1(scp, SCA_TRC00, 0x10); */
599 	/*
600 	 * the correct values here are important for avoiding underruns
601 	 * for any value less than or equal to TRC0 txrdy is activated
602 	 * which will start the dmac transfer to the fifo.
603 	 * for buffer size >= TRC1 + 1 txrdy is cleared which will stop DMA.
604 	 *
605 	 * thus if we are using a very fast clock that empties the fifo
606 	 * quickly, delays in the dmac starting to fill the fifo can
607 	 * lead to underruns so we want a fairly full fifo to still
608 	 * cause the dmac to start.  for cards with on board ram this
609 	 * has no effect on system performance.  For cards that DMA
610 	 * to/from system memory it will cause more, shorter,
611 	 * bus accesses rather than fewer longer ones.
612 	 */
613 	msci_write_1(scp, SCA_TRC00, 0x00);
614 	msci_write_1(scp, SCA_TRC10, 0x1f);
615 }
616 
617 /*
618  * Take the memory for the port and construct two circular linked lists of
619  * descriptors (one tx, one rx) and set the pointers in these descriptors
620  * to point to the buffer space for this port.
621  */
622 static void
623 sca_dmac_init(struct sca_softc *sc, sca_port_t *scp)
624 {
625 	sca_desc_t *desc;
626 	u_int32_t desc_p;
627 	u_int32_t buf_p;
628 	int i;
629 
630 	if (sc->sc_usedma)
631 		bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 0, sc->scu_allocsize,
632 		    BUS_DMASYNC_PREWRITE);
633 	else {
634 		/*
635 		 * XXX assumes that all tx desc and bufs in same page
636 		 */
637 		sc->scu_page_on(sc);
638 		sc->scu_set_page(sc, scp->sp_txdesc_p);
639 	}
640 
641 	desc = scp->sp_txdesc;
642 	desc_p = scp->sp_txdesc_p;
643 	buf_p = scp->sp_txbuf_p;
644 	scp->sp_txcur = 0;
645 	scp->sp_txinuse = 0;
646 
647 #ifdef DEBUG
648 	/* make sure that we won't wrap */
649 	if ((desc_p & 0xffff0000) !=
650 	    ((desc_p + sizeof(*desc) * scp->sp_ntxdesc) & 0xffff0000))
651 		panic("sca: tx descriptors cross architecural boundary");
652 	if ((buf_p & 0xff000000) !=
653 	    ((buf_p + SCA_BSIZE * scp->sp_ntxdesc) & 0xff000000))
654 		panic("sca: tx buffers cross architecural boundary");
655 #endif
656 
657 	for (i = 0 ; i < scp->sp_ntxdesc ; i++) {
658 		/*
659 		 * desc_p points to the physical address of the NEXT desc
660 		 */
661 		desc_p += sizeof(sca_desc_t);
662 
663 		sca_desc_write_chainp(sc, desc, desc_p & 0x0000ffff);
664 		sca_desc_write_bufp(sc, desc, buf_p);
665 		sca_desc_write_buflen(sc, desc, SCA_BSIZE);
666 		sca_desc_write_stat(sc, desc, 0);
667 
668 		desc++;  /* point to the next descriptor */
669 		buf_p += SCA_BSIZE;
670 	}
671 
672 	/*
673 	 * "heal" the circular list by making the last entry point to the
674 	 * first.
675 	 */
676 	sca_desc_write_chainp(sc, desc - 1, scp->sp_txdesc_p & 0x0000ffff);
677 
678 	/*
679 	 * Now, initialize the transmit DMA logic
680 	 *
681 	 * CPB == chain pointer base address
682 	 */
683 	dmac_write_1(scp, SCA_DSR1, 0);
684 	dmac_write_1(scp, SCA_DCR1, SCA_DCR_ABRT);
685 	dmac_write_1(scp, SCA_DMR1, SCA_DMR_TMOD | SCA_DMR_NF);
686 	/* XXX1
687 	dmac_write_1(scp, SCA_DIR1,
688 		     (SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF));
689 	 */
690 	dmac_write_1(scp, SCA_DIR1,
691 		     (SCA_DIR_EOM | SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF));
692 	dmac_write_1(scp, SCA_CPB1,
693 		     (u_int8_t)((scp->sp_txdesc_p & 0x00ff0000) >> 16));
694 
695 	/*
696 	 * now, do the same thing for receive descriptors
697 	 *
698 	 * XXX assumes that all rx desc and bufs in same page
699 	 */
700 	if (!sc->sc_usedma)
701 		sc->scu_set_page(sc, scp->sp_rxdesc_p);
702 
703 	desc = scp->sp_rxdesc;
704 	desc_p = scp->sp_rxdesc_p;
705 	buf_p = scp->sp_rxbuf_p;
706 
707 #ifdef DEBUG
708 	/* make sure that we won't wrap */
709 	if ((desc_p & 0xffff0000) !=
710 	    ((desc_p + sizeof(*desc) * scp->sp_nrxdesc) & 0xffff0000))
711 		panic("sca: rx descriptors cross architecural boundary");
712 	if ((buf_p & 0xff000000) !=
713 	    ((buf_p + SCA_BSIZE * scp->sp_nrxdesc) & 0xff000000))
714 		panic("sca: rx buffers cross architecural boundary");
715 #endif
716 
717 	for (i = 0 ; i < scp->sp_nrxdesc; i++) {
718 		/*
719 		 * desc_p points to the physical address of the NEXT desc
720 		 */
721 		desc_p += sizeof(sca_desc_t);
722 
723 		sca_desc_write_chainp(sc, desc, desc_p & 0x0000ffff);
724 		sca_desc_write_bufp(sc, desc, buf_p);
725 		/* sca_desc_write_buflen(sc, desc, SCA_BSIZE); */
726 		sca_desc_write_buflen(sc, desc, 0);
727 		sca_desc_write_stat(sc, desc, 0);
728 
729 		desc++;  /* point to the next descriptor */
730 		buf_p += SCA_BSIZE;
731 	}
732 
733 	/*
734 	 * "heal" the circular list by making the last entry point to the
735 	 * first.
736 	 */
737 	sca_desc_write_chainp(sc, desc - 1, scp->sp_rxdesc_p & 0x0000ffff);
738 
739 	sca_dmac_rxinit(scp);
740 
741 	if (sc->sc_usedma)
742 		bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
743 		    0, sc->scu_allocsize, BUS_DMASYNC_POSTWRITE);
744 	else
745 		sc->scu_page_off(sc);
746 }
747 
748 /*
749  * reset and reinitialize the receive DMA logic
750  */
751 static void
752 sca_dmac_rxinit(sca_port_t *scp)
753 {
754 	/*
755 	 * ... and the receive DMA logic ...
756 	 */
757 	dmac_write_1(scp, SCA_DSR0, 0);  /* disable DMA */
758 	dmac_write_1(scp, SCA_DCR0, SCA_DCR_ABRT);
759 
760 	dmac_write_1(scp, SCA_DMR0, SCA_DMR_TMOD | SCA_DMR_NF);
761 	dmac_write_2(scp, SCA_BFLL0, SCA_BSIZE);
762 
763 	/* reset descriptors to initial state */
764 	scp->sp_rxstart = 0;
765 	scp->sp_rxend = scp->sp_nrxdesc - 1;
766 
767 	/*
768 	 * CPB == chain pointer base
769 	 * CDA == current descriptor address
770 	 * EDA == error descriptor address (overwrite position)
771 	 *	because cda can't be eda when starting we always
772 	 *	have a single buffer gap between cda and eda
773 	 */
774 	dmac_write_1(scp, SCA_CPB0,
775 	    (u_int8_t)((scp->sp_rxdesc_p & 0x00ff0000) >> 16));
776 	dmac_write_2(scp, SCA_CDAL0, (u_int16_t)(scp->sp_rxdesc_p & 0xffff));
777 	dmac_write_2(scp, SCA_EDAL0, (u_int16_t)
778 	    (scp->sp_rxdesc_p + (sizeof(sca_desc_t) * scp->sp_rxend)));
779 
780 	/*
781 	 * enable receiver DMA
782 	 */
783 	dmac_write_1(scp, SCA_DIR0,
784 		     (SCA_DIR_EOT | SCA_DIR_EOM | SCA_DIR_BOF | SCA_DIR_COF));
785 	dmac_write_1(scp, SCA_DSR0, SCA_DSR_DE);
786 }
787 
788 /*
789  * Queue the packet for our start routine to transmit
790  */
791 static int
792 sca_output(
793     struct ifnet *ifp,
794     struct mbuf *m,
795     const struct sockaddr *dst,
796     const struct rtentry *rt0)
797 {
798 	struct hdlc_header *hdlc;
799 	struct ifqueue *ifq = NULL;
800 	int s, error, len;
801 	short mflags;
802 
803 	error = 0;
804 
805 	if ((ifp->if_flags & IFF_UP) != IFF_UP) {
806 		error = ENETDOWN;
807 		goto bad;
808 	}
809 
810 	/*
811 	 * If the queueing discipline needs packet classification,
812 	 * do it before prepending link headers.
813 	 */
814 	IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family);
815 
816 	/*
817 	 * determine address family, and priority for this packet
818 	 */
819 	switch (dst->sa_family) {
820 #ifdef INET
821 	case AF_INET:
822 #ifdef SCA_USE_FASTQ
823 		if ((mtod(m, struct ip *)->ip_tos & IPTOS_LOWDELAY)
824 		    == IPTOS_LOWDELAY)
825 			ifq = &((sca_port_t *)ifp->if_softc)->fastq;
826 #endif
827 		/*
828 		 * Add cisco serial line header. If there is no
829 		 * space in the first mbuf, allocate another.
830 		 */
831 		M_PREPEND(m, sizeof(struct hdlc_header), M_DONTWAIT);
832 		if (m == 0)
833 			return (ENOBUFS);
834 		hdlc = mtod(m, struct hdlc_header *);
835 		hdlc->h_proto = htons(HDLC_PROTOCOL_IP);
836 		break;
837 #endif
838 #ifdef INET6
839 	case AF_INET6:
840 		/*
841 		 * Add cisco serial line header. If there is no
842 		 * space in the first mbuf, allocate another.
843 		 */
844 		M_PREPEND(m, sizeof(struct hdlc_header), M_DONTWAIT);
845 		if (m == 0)
846 			return (ENOBUFS);
847 		hdlc = mtod(m, struct hdlc_header *);
848 		hdlc->h_proto = htons(HDLC_PROTOCOL_IPV6);
849 		break;
850 #endif
851 	default:
852 		printf("%s: address family %d unsupported\n",
853 		       ifp->if_xname, dst->sa_family);
854 		error = EAFNOSUPPORT;
855 		goto bad;
856 	}
857 
858 	/* finish */
859 	if ((m->m_flags & (M_BCAST | M_MCAST)) != 0)
860 		hdlc->h_addr = CISCO_MULTICAST;
861 	else
862 		hdlc->h_addr = CISCO_UNICAST;
863 	hdlc->h_resv = 0;
864 
865 	/*
866 	 * queue the packet.  If interactive, use the fast queue.
867 	 */
868 	mflags = m->m_flags;
869 	len = m->m_pkthdr.len;
870 	s = splnet();
871 	if (ifq != NULL) {
872 		if (IF_QFULL(ifq)) {
873 			IF_DROP(ifq);
874 			m_freem(m);
875 			error = ENOBUFS;
876 		} else
877 			IF_ENQUEUE(ifq, m);
878 	} else
879 		IFQ_ENQUEUE(&ifp->if_snd, m, error);
880 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
881 	if (error != 0) {
882 		if_statinc_ref(ifp, nsr, if_oerrors);
883 		if_statinc_ref(ifp, nsr, if_collisions);
884 		IF_STAT_PUTREF(ifp);
885 		splx(s);
886 		return (error);
887 	}
888 	if_statadd_ref(ifp, nsr, if_obytes, len);
889 	if (mflags & M_MCAST)
890 		if_statinc_ref(ifp, nsr, if_omcasts);
891 	IF_STAT_PUTREF(ifp);
892 
893 	sca_start(ifp);
894 	splx(s);
895 
896 	return (error);
897 
898  bad:
899 	m_freem(m);
900 	return (error);
901 }
902 
903 static int
904 sca_ioctl(struct ifnet *ifp, u_long cmd, void *data)
905 {
906 	struct ifreq *ifr;
907 	struct ifaddr *ifa;
908 	int error;
909 	int s;
910 
911 	s = splnet();
912 
913 	ifr = (struct ifreq *)data;
914 	ifa = (struct ifaddr *)data;
915 	error = 0;
916 
917 	switch (cmd) {
918 	case SIOCINITIFADDR:
919 		switch(ifa->ifa_addr->sa_family) {
920 #ifdef INET
921 		case AF_INET:
922 #endif
923 #ifdef INET6
924 		case AF_INET6:
925 #endif
926 #if defined(INET) || defined(INET6)
927 			ifp->if_flags |= IFF_UP;
928 			sca_port_up(ifp->if_softc);
929 			break;
930 #endif
931 		default:
932 			error = EAFNOSUPPORT;
933 			break;
934 		}
935 		break;
936 
937 	case SIOCSIFDSTADDR:
938 #ifdef INET
939 		if (ifa->ifa_addr->sa_family == AF_INET)
940 			break;
941 #endif
942 #ifdef INET6
943 		if (ifa->ifa_addr->sa_family == AF_INET6)
944 			break;
945 #endif
946 		error = EAFNOSUPPORT;
947 		break;
948 
949 	case SIOCADDMULTI:
950 	case SIOCDELMULTI:
951 		/* XXX need multicast group management code */
952 		if (ifr == 0) {
953 			error = EAFNOSUPPORT;		/* XXX */
954 			break;
955 		}
956 		switch (ifreq_getaddr(cmd, ifr)->sa_family) {
957 #ifdef INET
958 		case AF_INET:
959 			break;
960 #endif
961 #ifdef INET6
962 		case AF_INET6:
963 			break;
964 #endif
965 		default:
966 			error = EAFNOSUPPORT;
967 			break;
968 		}
969 		break;
970 
971 	case SIOCSIFFLAGS:
972 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
973 			break;
974 		if (ifr->ifr_flags & IFF_UP) {
975 			ifp->if_flags |= IFF_UP;
976 			sca_port_up(ifp->if_softc);
977 		} else {
978 			ifp->if_flags &= ~IFF_UP;
979 			sca_port_down(ifp->if_softc);
980 		}
981 
982 		break;
983 
984 	default:
985 		error = ifioctl_common(ifp, cmd, data);
986 	}
987 
988 	splx(s);
989 	return error;
990 }
991 
992 /*
993  * start packet transmission on the interface
994  *
995  * MUST BE CALLED AT splnet()
996  */
997 static void
998 sca_start(struct ifnet *ifp)
999 {
1000 	sca_port_t *scp = ifp->if_softc;
1001 	struct sca_softc *sc = scp->sca;
1002 	struct mbuf *m, *mb_head;
1003 	sca_desc_t *desc;
1004 	u_int8_t *buf, stat;
1005 	u_int32_t buf_p;
1006 	int nexttx;
1007 	int trigger_xmit;
1008 	u_int len;
1009 
1010 	SCA_DPRINTF(SCA_DEBUG_TX, ("TX: enter start\n"));
1011 
1012 	/*
1013 	 * can't queue when we are full or transmitter is busy
1014 	 */
1015 #ifdef oldcode
1016 	if ((scp->sp_txinuse >= (scp->sp_ntxdesc - 1))
1017 	    || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE))
1018 		return;
1019 #else
1020 	if (scp->sp_txinuse
1021 	    || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE))
1022 		return;
1023 #endif
1024 	SCA_DPRINTF(SCA_DEBUG_TX, ("TX: txinuse %d\n", scp->sp_txinuse));
1025 
1026 	/*
1027 	 * XXX assume that all tx desc and bufs in same page
1028 	 */
1029 	if (sc->sc_usedma)
1030 		bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1031 		    0, sc->scu_allocsize,
1032 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1033 	else {
1034 		sc->scu_page_on(sc);
1035 		sc->scu_set_page(sc, scp->sp_txdesc_p);
1036 	}
1037 
1038 	trigger_xmit = 0;
1039 
1040  txloop:
1041 	IF_DEQUEUE(&scp->linkq, mb_head);
1042 	if (mb_head == NULL)
1043 #ifdef SCA_USE_FASTQ
1044 		IF_DEQUEUE(&scp->fastq, mb_head);
1045 	if (mb_head == NULL)
1046 #endif
1047 		IFQ_DEQUEUE(&ifp->if_snd, mb_head);
1048 	if (mb_head == NULL)
1049 		goto start_xmit;
1050 
1051 	SCA_DPRINTF(SCA_DEBUG_TX, ("TX: got mbuf\n"));
1052 #ifdef oldcode
1053 	if (scp->txinuse != 0) {
1054 		/* Kill EOT interrupts on the previous descriptor. */
1055 		desc = &scp->sp_txdesc[scp->txcur];
1056 		stat = sca_desc_read_stat(sc, desc);
1057 		sca_desc_write_stat(sc, desc, stat & ~SCA_DESC_EOT);
1058 
1059 		/* Figure out what the next free descriptor is. */
1060 		nexttx = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1061 	} else
1062 		nexttx = 0;
1063 #endif	/* oldcode */
1064 
1065 	if (scp->sp_txinuse)
1066 		nexttx = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1067 	else
1068 		nexttx = 0;
1069 
1070 	SCA_DPRINTF(SCA_DEBUG_TX, ("TX: nexttx %d\n", nexttx));
1071 
1072 	buf = scp->sp_txbuf + SCA_BSIZE * nexttx;
1073 	buf_p = scp->sp_txbuf_p + SCA_BSIZE * nexttx;
1074 
1075 	/* XXX hoping we can delay the desc write till after we don't drop. */
1076 	desc = &scp->sp_txdesc[nexttx];
1077 
1078 	/* XXX isn't this set already?? */
1079 	sca_desc_write_bufp(sc, desc, buf_p);
1080 	len = 0;
1081 
1082 	SCA_DPRINTF(SCA_DEBUG_TX, ("TX: buf %x buf_p %x\n", (u_int)buf, buf_p));
1083 
1084 #if 0	/* uncomment this for a core in cc1 */
1085 X
1086 #endif
1087 	/*
1088 	 * Run through the chain, copying data into the descriptor as we
1089 	 * go.  If it won't fit in one transmission block, drop the packet.
1090 	 * No, this isn't nice, but most of the time it _will_ fit.
1091 	 */
1092 	for (m = mb_head ; m != NULL ; m = m->m_next) {
1093 		if (m->m_len != 0) {
1094 			len += m->m_len;
1095 			if (len > SCA_BSIZE) {
1096 				m_freem(mb_head);
1097 				goto txloop;
1098 			}
1099 			SCA_DPRINTF(SCA_DEBUG_TX,
1100 			    ("TX: about to mbuf len %d\n", m->m_len));
1101 
1102 			if (sc->sc_usedma)
1103 				memcpy(buf, mtod(m, u_int8_t *), m->m_len);
1104 			else
1105 				bus_space_write_region_1(sc->scu_memt,
1106 				    sc->scu_memh, sca_page_addr(sc, buf_p),
1107 				    mtod(m, u_int8_t *), m->m_len);
1108 			buf += m->m_len;
1109 			buf_p += m->m_len;
1110 		}
1111 	}
1112 
1113 	/* set the buffer, the length, and mark end of frame and end of xfer */
1114 	sca_desc_write_buflen(sc, desc, len);
1115 	sca_desc_write_stat(sc, desc, SCA_DESC_EOM);
1116 
1117 	if_statinc(ifp, if_opackets);
1118 
1119 	/*
1120 	 * Pass packet to bpf if there is a listener.
1121 	 */
1122 	bpf_mtap(ifp, mb_head, BPF_D_OUT);
1123 
1124 	m_freem(mb_head);
1125 
1126 	scp->sp_txcur = nexttx;
1127 	scp->sp_txinuse++;
1128 	trigger_xmit = 1;
1129 
1130 	SCA_DPRINTF(SCA_DEBUG_TX,
1131 	    ("TX: inuse %d index %d\n", scp->sp_txinuse, scp->sp_txcur));
1132 
1133 	/*
1134 	 * XXX so didn't this used to limit us to 1?! - multi may be untested
1135 	 * sp_ntxdesc used to be hard coded to 2 with claim of a too hard
1136 	 * to find bug
1137 	 */
1138 #ifdef oldcode
1139 	if (scp->sp_txinuse < (scp->sp_ntxdesc - 1))
1140 #endif
1141 	if (scp->sp_txinuse < scp->sp_ntxdesc)
1142 		goto txloop;
1143 
1144  start_xmit:
1145 	SCA_DPRINTF(SCA_DEBUG_TX, ("TX: trigger_xmit %d\n", trigger_xmit));
1146 
1147 	if (trigger_xmit != 0) {
1148 		/* set EOT on final descriptor */
1149 		desc = &scp->sp_txdesc[scp->sp_txcur];
1150 		stat = sca_desc_read_stat(sc, desc);
1151 		sca_desc_write_stat(sc, desc, stat | SCA_DESC_EOT);
1152 	}
1153 
1154 	if (sc->sc_usedma)
1155 		bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 0,
1156 		    sc->scu_allocsize,
1157 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1158 
1159 	if (trigger_xmit != 0)
1160 		sca_port_starttx(scp);
1161 
1162 	if (!sc->sc_usedma)
1163 		sc->scu_page_off(sc);
1164 }
1165 
1166 static void
1167 sca_watchdog(struct ifnet *ifp)
1168 {
1169 }
1170 
1171 int
1172 sca_hardintr(struct sca_softc *sc)
1173 {
1174 	u_int8_t isr0, isr1, isr2;
1175 	int	ret;
1176 
1177 	ret = 0;  /* non-zero means we processed at least one interrupt */
1178 
1179 	SCA_DPRINTF(SCA_DEBUG_INTR, ("sca_hardintr entered\n"));
1180 
1181 	while (1) {
1182 		/*
1183 		 * read SCA interrupts
1184 		 */
1185 		isr0 = sca_read_1(sc, SCA_ISR0);
1186 		isr1 = sca_read_1(sc, SCA_ISR1);
1187 		isr2 = sca_read_1(sc, SCA_ISR2);
1188 
1189 		if (isr0 == 0 && isr1 == 0 && isr2 == 0)
1190 			break;
1191 
1192 		SCA_DPRINTF(SCA_DEBUG_INTR,
1193 			    ("isr0 = %02x, isr1 = %02x, isr2 = %02x\n",
1194 			     isr0, isr1, isr2));
1195 
1196 		/*
1197 		 * check DMAC interrupt
1198 		 */
1199 		if (isr1 & 0x0f)
1200 			ret += sca_dmac_intr(&sc->sc_ports[0],
1201 					     isr1 & 0x0f);
1202 
1203 		if (isr1 & 0xf0)
1204 			ret += sca_dmac_intr(&sc->sc_ports[1],
1205 			     (isr1 & 0xf0) >> 4);
1206 
1207 		/*
1208 		 * msci interrupts
1209 		 */
1210 		if (isr0 & 0x0f)
1211 			ret += sca_msci_intr(&sc->sc_ports[0], isr0 & 0x0f);
1212 
1213 		if (isr0 & 0xf0)
1214 			ret += sca_msci_intr(&sc->sc_ports[1],
1215 			    (isr0 & 0xf0) >> 4);
1216 
1217 #if 0 /* We don't GET timer interrupts, we have them disabled (msci IE20) */
1218 		if (isr2)
1219 			ret += sca_timer_intr(sc, isr2);
1220 #endif
1221 	}
1222 
1223 	return (ret);
1224 }
1225 
1226 static int
1227 sca_dmac_intr(sca_port_t *scp, u_int8_t isr)
1228 {
1229 	u_int8_t	 dsr;
1230 	int		 ret;
1231 
1232 	ret = 0;
1233 
1234 	/*
1235 	 * Check transmit channel
1236 	 */
1237 	if (isr & (SCA_ISR1_DMAC_TX0A | SCA_ISR1_DMAC_TX0B)) {
1238 		SCA_DPRINTF(SCA_DEBUG_INTR,
1239 		    ("TX INTERRUPT port %d\n", scp->sp_port));
1240 
1241 		dsr = 1;
1242 		while (dsr != 0) {
1243 			ret++;
1244 			/*
1245 			 * reset interrupt
1246 			 */
1247 			dsr = dmac_read_1(scp, SCA_DSR1);
1248 			dmac_write_1(scp, SCA_DSR1,
1249 				     dsr | SCA_DSR_DEWD);
1250 
1251 			/*
1252 			 * filter out the bits we don't care about
1253 			 */
1254 			dsr &= ( SCA_DSR_COF | SCA_DSR_BOF | SCA_DSR_EOT);
1255 			if (dsr == 0)
1256 				break;
1257 
1258 			/*
1259 			 * check for counter overflow
1260 			 */
1261 			if (dsr & SCA_DSR_COF) {
1262 				printf("%s: TXDMA counter overflow\n",
1263 				       scp->sp_if.if_xname);
1264 
1265 				scp->sp_if.if_flags &= ~IFF_OACTIVE;
1266 				scp->sp_txcur = 0;
1267 				scp->sp_txinuse = 0;
1268 			}
1269 
1270 			/*
1271 			 * check for buffer overflow
1272 			 */
1273 			if (dsr & SCA_DSR_BOF) {
1274 				printf("%s: TXDMA buffer overflow, cda 0x%04x, eda 0x%04x, cpb 0x%02x\n",
1275 				       scp->sp_if.if_xname,
1276 				       dmac_read_2(scp, SCA_CDAL1),
1277 				       dmac_read_2(scp, SCA_EDAL1),
1278 				       dmac_read_1(scp, SCA_CPB1));
1279 
1280 				/*
1281 				 * Yikes.  Arrange for a full
1282 				 * transmitter restart.
1283 				 */
1284 				scp->sp_if.if_flags &= ~IFF_OACTIVE;
1285 				scp->sp_txcur = 0;
1286 				scp->sp_txinuse = 0;
1287 			}
1288 
1289 			/*
1290 			 * check for end of transfer, which is not
1291 			 * an error. It means that all data queued
1292 			 * was transmitted, and we mark ourself as
1293 			 * not in use and stop the watchdog timer.
1294 			 */
1295 			if (dsr & SCA_DSR_EOT) {
1296 				SCA_DPRINTF(SCA_DEBUG_TX,
1297 			    ("Transmit completed. cda %x eda %x dsr %x\n",
1298 				    dmac_read_2(scp, SCA_CDAL1),
1299 				    dmac_read_2(scp, SCA_EDAL1),
1300 				    dsr));
1301 
1302 				scp->sp_if.if_flags &= ~IFF_OACTIVE;
1303 				scp->sp_txcur = 0;
1304 				scp->sp_txinuse = 0;
1305 
1306 				/*
1307 				 * check for more packets
1308 				 */
1309 				if_schedule_deferred_start(&scp->sp_if);
1310 			}
1311 		}
1312 	}
1313 	/*
1314 	 * receive channel check
1315 	 */
1316 	if (isr & (SCA_ISR1_DMAC_RX0A | SCA_ISR1_DMAC_RX0B)) {
1317 		SCA_DPRINTF(SCA_DEBUG_INTR, ("RX INTERRUPT port %d\n",
1318 		    (scp == &scp->sca->sc_ports[0] ? 0 : 1)));
1319 
1320 		dsr = 1;
1321 		while (dsr != 0) {
1322 			ret++;
1323 
1324 			dsr = dmac_read_1(scp, SCA_DSR0);
1325 			dmac_write_1(scp, SCA_DSR0, dsr | SCA_DSR_DEWD);
1326 
1327 			/*
1328 			 * filter out the bits we don't care about
1329 			 */
1330 			dsr &= (SCA_DSR_EOM | SCA_DSR_COF
1331 				| SCA_DSR_BOF | SCA_DSR_EOT);
1332 			if (dsr == 0)
1333 				break;
1334 
1335 			/*
1336 			 * End of frame
1337 			 */
1338 			if (dsr & SCA_DSR_EOM) {
1339 				SCA_DPRINTF(SCA_DEBUG_RX, ("Got a frame!\n"));
1340 
1341 				sca_get_packets(scp);
1342 			}
1343 
1344 			/*
1345 			 * check for counter overflow
1346 			 */
1347 			if (dsr & SCA_DSR_COF) {
1348 				printf("%s: RXDMA counter overflow\n",
1349 				       scp->sp_if.if_xname);
1350 
1351 				sca_dmac_rxinit(scp);
1352 			}
1353 
1354 			/*
1355 			 * check for end of transfer, which means we
1356 			 * ran out of descriptors to receive into.
1357 			 * This means the line is much faster than
1358 			 * we can handle.
1359 			 */
1360 			if (dsr & (SCA_DSR_BOF | SCA_DSR_EOT)) {
1361 				printf("%s: RXDMA buffer overflow\n",
1362 				       scp->sp_if.if_xname);
1363 
1364 				sca_dmac_rxinit(scp);
1365 			}
1366 		}
1367 	}
1368 
1369 	return ret;
1370 }
1371 
1372 static int
1373 sca_msci_intr(sca_port_t *scp, u_int8_t isr)
1374 {
1375 	u_int8_t st1, trc0;
1376 
1377 	/* get and clear the specific interrupt -- should act on it :)*/
1378 	if ((st1 = msci_read_1(scp, SCA_ST10))) {
1379 		/* clear the interrupt */
1380 		msci_write_1(scp, SCA_ST10, st1);
1381 
1382 		if (st1 & SCA_ST1_UDRN) {
1383 			/* underrun -- try to increase ready control */
1384 			trc0 = msci_read_1(scp, SCA_TRC00);
1385 			if (trc0 == 0x1f)
1386 				printf("TX: underrun - fifo depth maxed\n");
1387 			else {
1388 				if ((trc0 += 2) > 0x1f)
1389 					trc0 = 0x1f;
1390 				SCA_DPRINTF(SCA_DEBUG_TX,
1391 				   ("TX: udrn - incr fifo to %d\n", trc0));
1392 				msci_write_1(scp, SCA_TRC00, trc0);
1393 			}
1394 		}
1395 	}
1396 	return (0);
1397 }
1398 
1399 static void
1400 sca_get_packets(sca_port_t *scp)
1401 {
1402 	struct sca_softc *sc;
1403 
1404 	SCA_DPRINTF(SCA_DEBUG_RX, ("RX: sca_get_packets\n"));
1405 
1406 	sc = scp->sca;
1407 	if (sc->sc_usedma)
1408 		bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1409 		    0, sc->scu_allocsize,
1410 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1411 	else {
1412 		/*
1413 		 * XXX this code is unable to deal with rx stuff
1414 		 * in more than 1 page
1415 		 */
1416 		sc->scu_page_on(sc);
1417 		sc->scu_set_page(sc, scp->sp_rxdesc_p);
1418 	}
1419 
1420 	/* process as many frames as are available */
1421 	while (sca_frame_avail(scp)) {
1422 		sca_frame_process(scp);
1423 		sca_frame_read_done(scp);
1424 	}
1425 
1426 	if (sc->sc_usedma)
1427 		bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1428 		    0, sc->scu_allocsize,
1429 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1430 	else
1431 		sc->scu_page_off(sc);
1432 }
1433 
1434 /*
1435  * Starting with the first descriptor we wanted to read into, up to but
1436  * not including the current SCA read descriptor, look for a packet.
1437  *
1438  * must be called at splnet()
1439  */
1440 static int
1441 sca_frame_avail(sca_port_t *scp)
1442 {
1443 	u_int16_t cda;
1444 	u_int32_t desc_p;	/* physical address (lower 16 bits) */
1445 	sca_desc_t *desc;
1446 	u_int8_t rxstat;
1447 	int cdaidx, toolong;
1448 
1449 	/*
1450 	 * Read the current descriptor from the SCA.
1451 	 */
1452 	cda = dmac_read_2(scp, SCA_CDAL0);
1453 
1454 	/*
1455 	 * calculate the index of the current descriptor
1456 	 */
1457 	desc_p = (scp->sp_rxdesc_p & 0xFFFF);
1458 	desc_p = cda - desc_p;
1459 	cdaidx = desc_p / sizeof(sca_desc_t);
1460 
1461 	SCA_DPRINTF(SCA_DEBUG_RX,
1462 	    ("RX: cda %x desc_p %x cdaidx %u, nrxdesc %d rxstart %d\n",
1463 	    cda, desc_p, cdaidx, scp->sp_nrxdesc, scp->sp_rxstart));
1464 
1465 	/* note confusion */
1466 	if (cdaidx >= scp->sp_nrxdesc)
1467 		panic("current descriptor index out of range");
1468 
1469 	/* see if we have a valid frame available */
1470 	toolong = 0;
1471 	for (; scp->sp_rxstart != cdaidx; sca_frame_read_done(scp)) {
1472 		/*
1473 		 * We might have a valid descriptor.  Set up a pointer
1474 		 * to the kva address for it so we can more easily examine
1475 		 * the contents.
1476 		 */
1477 		desc = &scp->sp_rxdesc[scp->sp_rxstart];
1478 		rxstat = sca_desc_read_stat(scp->sca, desc);
1479 
1480 		SCA_DPRINTF(SCA_DEBUG_RX, ("port %d RX: idx %d rxstat %x\n",
1481 		    scp->sp_port, scp->sp_rxstart, rxstat));
1482 
1483 		SCA_DPRINTF(SCA_DEBUG_RX, ("port %d RX: buflen %d\n",
1484 		    scp->sp_port, sca_desc_read_buflen(scp->sca, desc)));
1485 
1486 		/*
1487 		 * check for errors
1488 		 */
1489 		if (rxstat & SCA_DESC_ERRORS) {
1490 			/*
1491 			 * consider an error condition the end
1492 			 * of a frame
1493 			 */
1494 			if_statinc(&scp->sp_if, if_ierrors);
1495 			toolong = 0;
1496 			continue;
1497 		}
1498 
1499 		/*
1500 		 * if we aren't skipping overlong frames
1501 		 * we are done, otherwise reset and look for
1502 		 * another good frame
1503 		 */
1504 		if (rxstat & SCA_DESC_EOM) {
1505 			if (!toolong)
1506 				return (1);
1507 			toolong = 0;
1508 		} else if (!toolong) {
1509 			/*
1510 			 * we currently don't deal with frames
1511 			 * larger than a single buffer (fixed MTU)
1512 			 */
1513 			if_statinc(&scp->sp_if, if_ierrors);
1514 			toolong = 1;
1515 		}
1516 		SCA_DPRINTF(SCA_DEBUG_RX, ("RX: idx %d no EOM\n",
1517 		    scp->sp_rxstart));
1518 	}
1519 
1520 	SCA_DPRINTF(SCA_DEBUG_RX, ("RX: returning none\n"));
1521 	return 0;
1522 }
1523 
1524 /*
1525  * Pass the packet up to the kernel if it is a packet we want to pay
1526  * attention to.
1527  *
1528  * MUST BE CALLED AT splnet()
1529  */
1530 static void
1531 sca_frame_process(sca_port_t *scp)
1532 {
1533 	pktqueue_t *pktq = NULL;
1534 	struct hdlc_header *hdlc;
1535 	struct cisco_pkt *cisco;
1536 	sca_desc_t *desc;
1537 	struct mbuf *m;
1538 	u_int8_t *bufp;
1539 	u_int16_t len;
1540 	u_int32_t t;
1541 
1542 	t = time_uptime * 1000;
1543 	desc = &scp->sp_rxdesc[scp->sp_rxstart];
1544 	bufp = scp->sp_rxbuf + SCA_BSIZE * scp->sp_rxstart;
1545 	len = sca_desc_read_buflen(scp->sca, desc);
1546 
1547 	SCA_DPRINTF(SCA_DEBUG_RX,
1548 	    ("RX: desc %lx bufp %lx len %d\n", (bus_addr_t)desc,
1549 	    (bus_addr_t)bufp, len));
1550 
1551 #if SCA_DEBUG_LEVEL > 0
1552 	if (sca_debug & SCA_DEBUG_RXPKT)
1553 		sca_frame_print(scp, desc, bufp);
1554 #endif
1555 	/*
1556 	 * skip packets that are too short
1557 	 */
1558 	if (len < sizeof(struct hdlc_header)) {
1559 		if_statinc(&scp->sp_if, if_ierrors);
1560 		return;
1561 	}
1562 
1563 	m = sca_mbuf_alloc(scp->sca, bufp, len);
1564 	if (m == NULL) {
1565 		SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no mbuf!\n"));
1566 		return;
1567 	}
1568 
1569 	/*
1570 	 * read and then strip off the HDLC information
1571 	 */
1572 	m = m_pullup(m, sizeof(struct hdlc_header));
1573 	if (m == NULL) {
1574 		SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no m_pullup!\n"));
1575 		return;
1576 	}
1577 
1578 	bpf_mtap_softint(&scp->sp_if, m);
1579 
1580 	if_statinc(&scp->sp_if, if_ipackets);
1581 
1582 	hdlc = mtod(m, struct hdlc_header *);
1583 	switch (ntohs(hdlc->h_proto)) {
1584 #ifdef INET
1585 	case HDLC_PROTOCOL_IP:
1586 		SCA_DPRINTF(SCA_DEBUG_RX, ("Received IP packet\n"));
1587 		m_set_rcvif(m, &scp->sp_if);
1588 		m->m_pkthdr.len -= sizeof(struct hdlc_header);
1589 		m->m_data += sizeof(struct hdlc_header);
1590 		m->m_len -= sizeof(struct hdlc_header);
1591 		pktq = ip_pktq;
1592 		break;
1593 #endif	/* INET */
1594 #ifdef INET6
1595 	case HDLC_PROTOCOL_IPV6:
1596 		SCA_DPRINTF(SCA_DEBUG_RX, ("Received IP packet\n"));
1597 		m_set_rcvif(m, &scp->sp_if);
1598 		m->m_pkthdr.len -= sizeof(struct hdlc_header);
1599 		m->m_data += sizeof(struct hdlc_header);
1600 		m->m_len -= sizeof(struct hdlc_header);
1601 		pktq = ip6_pktq;
1602 		break;
1603 #endif	/* INET6 */
1604 	case CISCO_KEEPALIVE:
1605 		SCA_DPRINTF(SCA_DEBUG_CISCO,
1606 			    ("Received CISCO keepalive packet\n"));
1607 
1608 		if (len < CISCO_PKT_LEN) {
1609 			SCA_DPRINTF(SCA_DEBUG_CISCO,
1610 				    ("short CISCO packet %d, wanted %d\n",
1611 				     len, CISCO_PKT_LEN));
1612 			if_statinc(&scp->sp_if, if_ierrors);
1613 			goto dropit;
1614 		}
1615 
1616 		m = m_pullup(m, sizeof(struct cisco_pkt));
1617 		if (m == NULL) {
1618 			SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no m_pullup!\n"));
1619 			return;
1620 		}
1621 
1622 		cisco = (struct cisco_pkt *)
1623 		    (mtod(m, u_int8_t *) + HDLC_HDRLEN);
1624 		m_set_rcvif(m, &scp->sp_if);
1625 
1626 		switch (ntohl(cisco->type)) {
1627 		case CISCO_ADDR_REQ:
1628 			printf("Got CISCO addr_req, ignoring\n");
1629 			if_statinc(&scp->sp_if, if_ierrors);
1630 			goto dropit;
1631 
1632 		case CISCO_ADDR_REPLY:
1633 			printf("Got CISCO addr_reply, ignoring\n");
1634 			if_statinc(&scp->sp_if, if_ierrors);
1635 			goto dropit;
1636 
1637 		case CISCO_KEEPALIVE_REQ:
1638 
1639 			SCA_DPRINTF(SCA_DEBUG_CISCO,
1640 				    ("Received KA, mseq %d,"
1641 				     " yseq %d, rel 0x%04x, t0"
1642 				     " %04x, t1 %04x\n",
1643 				     ntohl(cisco->par1), ntohl(cisco->par2),
1644 				     ntohs(cisco->rel), ntohs(cisco->time0),
1645 				     ntohs(cisco->time1)));
1646 
1647 			scp->cka_lastrx = ntohl(cisco->par1);
1648 			scp->cka_lasttx++;
1649 
1650 			/*
1651 			 * schedule the transmit right here.
1652 			 */
1653 			cisco->par2 = cisco->par1;
1654 			cisco->par1 = htonl(scp->cka_lasttx);
1655 			cisco->time0 = htons((u_int16_t)(t >> 16));
1656 			cisco->time1 = htons((u_int16_t)(t & 0x0000ffff));
1657 
1658 			if (IF_QFULL(&scp->linkq)) {
1659 				IF_DROP(&scp->linkq);
1660 				goto dropit;
1661 			}
1662 			IF_ENQUEUE(&scp->linkq, m);
1663 
1664 			sca_start(&scp->sp_if);
1665 
1666 			/* since start may have reset this fix */
1667 			if (!scp->sca->sc_usedma) {
1668 				scp->sca->scu_set_page(scp->sca,
1669 				    scp->sp_rxdesc_p);
1670 				scp->sca->scu_page_on(scp->sca);
1671 			}
1672 			return;
1673 		default:
1674 			SCA_DPRINTF(SCA_DEBUG_CISCO,
1675 				    ("Unknown CISCO keepalive protocol 0x%04x\n",
1676 				     ntohl(cisco->type)));
1677 
1678 			if_statinc(&scp->sp_if, if_noproto);
1679 			goto dropit;
1680 		}
1681 		return;
1682 	default:
1683 		SCA_DPRINTF(SCA_DEBUG_RX,
1684 			    ("Unknown/unexpected ethertype 0x%04x\n",
1685 			     ntohs(hdlc->h_proto)));
1686 		if_statinc(&scp->sp_if, if_noproto);
1687 		goto dropit;
1688 	}
1689 
1690 	/* Queue the packet */
1691 	KASSERT(pktq != NULL);
1692 	if (__predict_false(!pktq_enqueue(pktq, m, 0))) {
1693 		if_statinc(&scp->sp_if, if_iqdrops);
1694 		goto dropit;
1695 	}
1696 	return;
1697 dropit:
1698 	m_freem(m);
1699 	return;
1700 }
1701 
1702 #if SCA_DEBUG_LEVEL > 0
1703 /*
1704  * do a hex dump of the packet received into descriptor "desc" with
1705  * data buffer "p"
1706  */
1707 static void
1708 sca_frame_print(sca_port_t *scp, sca_desc_t *desc, u_int8_t *p)
1709 {
1710 	int i;
1711 	int nothing_yet = 1;
1712 	struct sca_softc *sc;
1713 	u_int len;
1714 
1715 	sc = scp->sca;
1716 	printf("desc va %p: chainp 0x%x bufp 0x%0x stat 0x%0x len %d\n",
1717 	       desc,
1718 	       sca_desc_read_chainp(sc, desc),
1719 	       sca_desc_read_bufp(sc, desc),
1720 	       sca_desc_read_stat(sc, desc),
1721 	       (len = sca_desc_read_buflen(sc, desc)));
1722 
1723 	for (i = 0 ; i < len && i < 256; i++) {
1724 		if (nothing_yet == 1 &&
1725 		    (sc->sc_usedma ? *p
1726 			: bus_space_read_1(sc->scu_memt, sc->scu_memh,
1727 		    sca_page_addr(sc, p))) == 0) {
1728 			p++;
1729 			continue;
1730 		}
1731 		nothing_yet = 0;
1732 		if (i % 16 == 0)
1733 			printf("\n");
1734 		printf("%02x ",
1735 		    (sc->sc_usedma ? *p
1736 		    : bus_space_read_1(sc->scu_memt, sc->scu_memh,
1737 		    sca_page_addr(sc, p))));
1738 		p++;
1739 	}
1740 
1741 	if (i % 16 != 1)
1742 		printf("\n");
1743 }
1744 #endif
1745 
1746 /*
1747  * adjust things because we have just read the current starting
1748  * frame
1749  *
1750  * must be called at splnet()
1751  */
1752 static void
1753 sca_frame_read_done(sca_port_t *scp)
1754 {
1755 	u_int16_t edesc_p;
1756 
1757 	/* update where our indices are */
1758 	scp->sp_rxend = scp->sp_rxstart;
1759 	scp->sp_rxstart = (scp->sp_rxstart + 1) % scp->sp_nrxdesc;
1760 
1761 	/* update the error [end] descriptor */
1762 	edesc_p = (u_int16_t)scp->sp_rxdesc_p +
1763 	    (sizeof(sca_desc_t) * scp->sp_rxend);
1764 	dmac_write_2(scp, SCA_EDAL0, edesc_p);
1765 }
1766 
1767 /*
1768  * set a port to the "up" state
1769  */
1770 static void
1771 sca_port_up(sca_port_t *scp)
1772 {
1773 	struct sca_softc *sc = scp->sca;
1774 	struct timeval now;
1775 #if 0
1776 	u_int8_t ier0, ier1;
1777 #endif
1778 
1779 	/*
1780 	 * reset things
1781 	 */
1782 #if 0
1783 	msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET);
1784 	msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
1785 #endif
1786 	/*
1787 	 * clear in-use flag
1788 	 */
1789 	scp->sp_if.if_flags &= ~IFF_OACTIVE;
1790 	scp->sp_if.if_flags |= IFF_RUNNING;
1791 
1792 	/*
1793 	 * raise DTR
1794 	 */
1795 	sc->sc_dtr_callback(sc->sc_aux, scp->sp_port, 1);
1796 
1797 	/*
1798 	 * raise RTS
1799 	 */
1800 	msci_write_1(scp, SCA_CTL0,
1801 	     (msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS_MASK)
1802 	     | SCA_CTL_RTS_HIGH);
1803 
1804 #if 0
1805 	/*
1806 	 * enable interrupts (no timer IER2)
1807 	 */
1808 	ier0 = SCA_IER0_MSCI_RXRDY0 | SCA_IER0_MSCI_TXRDY0
1809 	    | SCA_IER0_MSCI_RXINT0 | SCA_IER0_MSCI_TXINT0;
1810 	ier1 = SCA_IER1_DMAC_RX0A | SCA_IER1_DMAC_RX0B
1811 	    | SCA_IER1_DMAC_TX0A | SCA_IER1_DMAC_TX0B;
1812 	if (scp->sp_port == 1) {
1813 		ier0 <<= 4;
1814 		ier1 <<= 4;
1815 	}
1816 	sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | ier0);
1817 	sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | ier1);
1818 #else
1819 	if (scp->sp_port == 0) {
1820 		sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0x0f);
1821 		sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0x0f);
1822 	} else {
1823 		sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0xf0);
1824 		sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0xf0);
1825 	}
1826 #endif
1827 
1828 	/*
1829 	 * enable transmit and receive
1830 	 */
1831 	msci_write_1(scp, SCA_CMD0, SCA_CMD_TXENABLE);
1832 	msci_write_1(scp, SCA_CMD0, SCA_CMD_RXENABLE);
1833 
1834 	/*
1835 	 * reset internal state
1836 	 */
1837 	scp->sp_txinuse = 0;
1838 	scp->sp_txcur = 0;
1839 	getmicrotime(&now);
1840 	scp->cka_lasttx = now.tv_usec;
1841 	scp->cka_lastrx = 0;
1842 }
1843 
1844 /*
1845  * set a port to the "down" state
1846  */
1847 static void
1848 sca_port_down(sca_port_t *scp)
1849 {
1850 	struct sca_softc *sc = scp->sca;
1851 #if 0
1852 	u_int8_t ier0, ier1;
1853 #endif
1854 
1855 	/*
1856 	 * lower DTR
1857 	 */
1858 	sc->sc_dtr_callback(sc->sc_aux, scp->sp_port, 0);
1859 
1860 	/*
1861 	 * lower RTS
1862 	 */
1863 	msci_write_1(scp, SCA_CTL0,
1864 	     (msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS_MASK)
1865 	     | SCA_CTL_RTS_LOW);
1866 
1867 	/*
1868 	 * disable interrupts
1869 	 */
1870 #if 0
1871 	ier0 = SCA_IER0_MSCI_RXRDY0 | SCA_IER0_MSCI_TXRDY0
1872 	    | SCA_IER0_MSCI_RXINT0 | SCA_IER0_MSCI_TXINT0;
1873 	ier1 = SCA_IER1_DMAC_RX0A | SCA_IER1_DMAC_RX0B
1874 	    | SCA_IER1_DMAC_TX0A | SCA_IER1_DMAC_TX0B;
1875 	if (scp->sp_port == 1) {
1876 		ier0 <<= 4;
1877 		ier1 <<= 4;
1878 	}
1879 	sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & ~ier0);
1880 	sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & ~ier1);
1881 #else
1882 	if (scp->sp_port == 0) {
1883 		sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0xf0);
1884 		sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0xf0);
1885 	} else {
1886 		sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0x0f);
1887 		sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0x0f);
1888 	}
1889 #endif
1890 
1891 	/*
1892 	 * disable transmit and receive
1893 	 */
1894 	msci_write_1(scp, SCA_CMD0, SCA_CMD_RXDISABLE);
1895 	msci_write_1(scp, SCA_CMD0, SCA_CMD_TXDISABLE);
1896 
1897 	/*
1898 	 * no, we're not in use anymore
1899 	 */
1900 	scp->sp_if.if_flags &= ~(IFF_OACTIVE|IFF_RUNNING);
1901 }
1902 
1903 /*
1904  * disable all DMA and interrupts for all ports at once.
1905  */
1906 void
1907 sca_shutdown(struct sca_softc *sca)
1908 {
1909 	/*
1910 	 * disable DMA and interrupts
1911 	 */
1912 	sca_write_1(sca, SCA_DMER, 0);
1913 	sca_write_1(sca, SCA_IER0, 0);
1914 	sca_write_1(sca, SCA_IER1, 0);
1915 }
1916 
1917 /*
1918  * If there are packets to transmit, start the transmit DMA logic.
1919  */
1920 static void
1921 sca_port_starttx(sca_port_t *scp)
1922 {
1923 	u_int32_t	startdesc_p, enddesc_p;
1924 	int enddesc;
1925 
1926 	SCA_DPRINTF(SCA_DEBUG_TX, ("TX: starttx\n"));
1927 
1928 	if (((scp->sp_if.if_flags & IFF_OACTIVE) == IFF_OACTIVE)
1929 	    || scp->sp_txinuse == 0)
1930 		return;
1931 
1932 	SCA_DPRINTF(SCA_DEBUG_TX, ("TX: setting oactive\n"));
1933 
1934 	scp->sp_if.if_flags |= IFF_OACTIVE;
1935 
1936 	/*
1937 	 * We have something to do, since we have at least one packet
1938 	 * waiting, and we are not already marked as active.
1939 	 */
1940 	enddesc = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1941 	startdesc_p = scp->sp_txdesc_p;
1942 	enddesc_p = scp->sp_txdesc_p + sizeof(sca_desc_t) * enddesc;
1943 
1944 	SCA_DPRINTF(SCA_DEBUG_TX, ("TX: start %x end %x\n",
1945 	    startdesc_p, enddesc_p));
1946 
1947 	dmac_write_2(scp, SCA_EDAL1, (u_int16_t)(enddesc_p & 0x0000ffff));
1948 	dmac_write_2(scp, SCA_CDAL1,
1949 		     (u_int16_t)(startdesc_p & 0x0000ffff));
1950 
1951 	/*
1952 	 * enable the DMA
1953 	 */
1954 	dmac_write_1(scp, SCA_DSR1, SCA_DSR_DE);
1955 }
1956 
1957 /*
1958  * allocate an mbuf at least long enough to hold "len" bytes.
1959  * If "p" is non-NULL, copy "len" bytes from it into the new mbuf,
1960  * otherwise let the caller handle copying the data in.
1961  */
1962 static struct mbuf *
1963 sca_mbuf_alloc(struct sca_softc *sc, void *p, u_int len)
1964 {
1965 	struct mbuf *m;
1966 
1967 	/*
1968 	 * allocate an mbuf and copy the important bits of data
1969 	 * into it.  If the packet won't fit in the header,
1970 	 * allocate a cluster for it and store it there.
1971 	 */
1972 	MGETHDR(m, M_DONTWAIT, MT_DATA);
1973 	if (m == NULL)
1974 		return NULL;
1975 	if (len > MHLEN) {
1976 		if (len > MCLBYTES) {
1977 			m_freem(m);
1978 			return NULL;
1979 		}
1980 		MCLGET(m, M_DONTWAIT);
1981 		if ((m->m_flags & M_EXT) == 0) {
1982 			m_freem(m);
1983 			return NULL;
1984 		}
1985 	}
1986 	if (p != NULL) {
1987 		/* XXX do we need to sync here? */
1988 		if (sc->sc_usedma)
1989 			memcpy(mtod(m, void *), p, len);
1990 		else
1991 			bus_space_read_region_1(sc->scu_memt, sc->scu_memh,
1992 			    sca_page_addr(sc, p), mtod(m, u_int8_t *), len);
1993 	}
1994 	m->m_len = len;
1995 	m->m_pkthdr.len = len;
1996 
1997 	return (m);
1998 }
1999 
2000 /*
2001  * get the base clock
2002  */
2003 void
2004 sca_get_base_clock(struct sca_softc *sc)
2005 {
2006 	struct timeval btv, ctv, dtv;
2007 	u_int64_t bcnt;
2008 	u_int32_t cnt;
2009 	u_int16_t subcnt;
2010 
2011 	/* disable the timer, set prescale to 0 */
2012 	sca_write_1(sc, SCA_TCSR0, 0);
2013 	sca_write_1(sc, SCA_TEPR0, 0);
2014 
2015 	/* reset the counter */
2016 	(void)sca_read_1(sc, SCA_TCSR0);
2017 	subcnt = sca_read_2(sc, SCA_TCNTL0);
2018 
2019 	/* count to max */
2020 	sca_write_2(sc, SCA_TCONRL0, 0xffff);
2021 
2022 	cnt = 0;
2023 	microtime(&btv);
2024 	/* start the timer -- no interrupt enable */
2025 	sca_write_1(sc, SCA_TCSR0, SCA_TCSR_TME);
2026 	for (;;) {
2027 		microtime(&ctv);
2028 
2029 		/* end around 3/4 of a second */
2030 		timersub(&ctv, &btv, &dtv);
2031 		if (dtv.tv_usec >= 750000)
2032 			break;
2033 
2034 		/* spin */
2035 		while (!(sca_read_1(sc, SCA_TCSR0) & SCA_TCSR_CMF))
2036 			;
2037 		/* reset the timer */
2038 		(void)sca_read_2(sc, SCA_TCNTL0);
2039 		cnt++;
2040 	}
2041 
2042 	/* stop the timer */
2043 	sca_write_1(sc, SCA_TCSR0, 0);
2044 
2045 	subcnt = sca_read_2(sc, SCA_TCNTL0);
2046 	/* add the slop in and get the total timer ticks */
2047 	cnt = (cnt << 16) | subcnt;
2048 
2049 	/* cnt is 1/8 the actual time */
2050 	bcnt = cnt * 8;
2051 	/* make it proportional to 3/4 of a second */
2052 	bcnt *= (u_int64_t)750000;
2053 	bcnt /= (u_int64_t)dtv.tv_usec;
2054 	cnt = bcnt;
2055 
2056 	/* make it Hz */
2057 	cnt *= 4;
2058 	cnt /= 3;
2059 
2060 	SCA_DPRINTF(SCA_DEBUG_CLOCK,
2061 	    ("sca: unadjusted base %lu Hz\n", (u_long)cnt));
2062 
2063 	/*
2064 	 * round to the nearest 200 -- this allows for +-3 ticks error
2065 	 */
2066 	sc->sc_baseclock = ((cnt + 100) / 200) * 200;
2067 }
2068 
2069 /*
2070  * print the information about the clock on the ports
2071  */
2072 void
2073 sca_print_clock_info(struct sca_softc *sc)
2074 {
2075 	struct sca_port *scp;
2076 	u_int32_t mhz, div;
2077 	int i;
2078 
2079 	printf("%s: base clock %d Hz\n", device_xname(sc->sc_parent),
2080 	    sc->sc_baseclock);
2081 
2082 	/* print the information about the port clock selection */
2083 	for (i = 0; i < sc->sc_numports; i++) {
2084 		scp = &sc->sc_ports[i];
2085 		mhz = sc->sc_baseclock / (scp->sp_tmc ? scp->sp_tmc : 256);
2086 		div = scp->sp_rxs & SCA_RXS_DIV_MASK;
2087 
2088 		printf("%s: rx clock: ", scp->sp_if.if_xname);
2089 		switch (scp->sp_rxs & SCA_RXS_CLK_MASK) {
2090 		case SCA_RXS_CLK_LINE:
2091 			printf("line");
2092 			break;
2093 		case SCA_RXS_CLK_LINE_SN:
2094 			printf("line with noise suppression");
2095 			break;
2096 		case SCA_RXS_CLK_INTERNAL:
2097 			printf("internal %d Hz", (mhz >> div));
2098 			break;
2099 		case SCA_RXS_CLK_ADPLL_OUT:
2100 			printf("adpll using internal %d Hz", (mhz >> div));
2101 			break;
2102 		case SCA_RXS_CLK_ADPLL_IN:
2103 			printf("adpll using line clock");
2104 			break;
2105 		}
2106 		printf("  tx clock: ");
2107 		div = scp->sp_txs & SCA_TXS_DIV_MASK;
2108 		switch (scp->sp_txs & SCA_TXS_CLK_MASK) {
2109 		case SCA_TXS_CLK_LINE:
2110 			printf("line\n");
2111 			break;
2112 		case SCA_TXS_CLK_INTERNAL:
2113 			printf("internal %d Hz\n", (mhz >> div));
2114 			break;
2115 		case SCA_TXS_CLK_RXCLK:
2116 			printf("rxclock\n");
2117 			break;
2118 		}
2119 		if (scp->sp_eclock)
2120 			printf("%s: outputting line clock\n",
2121 			    scp->sp_if.if_xname);
2122 	}
2123 }
2124 
2125