xref: /netbsd-src/sys/dev/ic/hd64570.c (revision cef8759bd76c1b621f8eab8faa6f208faabc2e15)
1 /*	$NetBSD: hd64570.c,v 1.55 2020/01/29 14:47:08 thorpej Exp $	*/
2 
3 /*
4  * Copyright (c) 1999 Christian E. Hopps
5  * Copyright (c) 1998 Vixie Enterprises
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. Neither the name of Vixie Enterprises nor the names
18  *    of its contributors may be used to endorse or promote products derived
19  *    from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY VIXIE ENTERPRISES AND
22  * CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
23  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25  * DISCLAIMED.  IN NO EVENT SHALL VIXIE ENTERPRISES OR
26  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29  * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  * This software has been written for Vixie Enterprises by Michael Graff
36  * <explorer@flame.org>.  To learn more about Vixie Enterprises, see
37  * ``http://www.vix.com''.
38  */
39 
40 /*
41  * TODO:
42  *
43  *	o  teach the receive logic about errors, and about long frames that
44  *         span more than one input buffer.  (Right now, receive/transmit is
45  *	   limited to one descriptor's buffer space, which is MTU + 4 bytes.
46  *	   This is currently 1504, which is large enough to hold the HDLC
47  *	   header and the packet itself.  Packets which are too long are
48  *	   silently dropped on transmit and silently dropped on receive.
49  *	o  write code to handle the msci interrupts, needed only for CD
50  *	   and CTS changes.
51  *	o  consider switching back to a "queue tx with DMA active" model which
52  *	   should help sustain outgoing traffic
53  *	o  through clever use of bus_dma*() functions, it should be possible
54  *	   to map the mbuf's data area directly into a descriptor transmit
55  *	   buffer, removing the need to allocate extra memory.  If, however,
56  *	   we run out of descriptors for this, we will need to then allocate
57  *	   one large mbuf, copy the fragmented chain into it, and put it onto
58  *	   a single descriptor.
59  *	o  use bus_dmamap_sync() with the right offset and lengths, rather
60  *	   than cheating and always sync'ing the whole region.
61  *
62  *	o  perhaps allow rx and tx to be in more than one page
63  *	   if not using DMA.  currently the assumption is that
64  *	   rx uses a page and tx uses a page.
65  */
66 
67 #include <sys/cdefs.h>
68 __KERNEL_RCSID(0, "$NetBSD: hd64570.c,v 1.55 2020/01/29 14:47:08 thorpej Exp $");
69 
70 #include "opt_inet.h"
71 
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/device.h>
75 #include <sys/mbuf.h>
76 #include <sys/socket.h>
77 #include <sys/sockio.h>
78 #include <sys/kernel.h>
79 
80 #include <net/if.h>
81 #include <net/if_types.h>
82 #include <net/netisr.h>
83 
84 #if defined(INET) || defined(INET6)
85 #include <netinet/in.h>
86 #include <netinet/in_systm.h>
87 #include <netinet/in_var.h>
88 #include <netinet/ip.h>
89 #ifdef INET6
90 #include <netinet6/in6_var.h>
91 #endif
92 #endif
93 
94 #include <net/bpf.h>
95 
96 #include <sys/cpu.h>
97 #include <sys/bus.h>
98 #include <sys/intr.h>
99 
100 #include <dev/pci/pcivar.h>
101 #include <dev/pci/pcireg.h>
102 #include <dev/pci/pcidevs.h>
103 
104 #include <dev/ic/hd64570reg.h>
105 #include <dev/ic/hd64570var.h>
106 
107 #define SCA_DEBUG_RX		0x0001
108 #define SCA_DEBUG_TX		0x0002
109 #define SCA_DEBUG_CISCO		0x0004
110 #define SCA_DEBUG_DMA		0x0008
111 #define SCA_DEBUG_RXPKT		0x0010
112 #define SCA_DEBUG_TXPKT		0x0020
113 #define SCA_DEBUG_INTR		0x0040
114 #define SCA_DEBUG_CLOCK		0x0080
115 
116 #if 0
117 #define SCA_DEBUG_LEVEL	( 0xFFFF )
118 #else
119 #define SCA_DEBUG_LEVEL 0
120 #endif
121 
122 u_int32_t sca_debug = SCA_DEBUG_LEVEL;
123 
124 #if SCA_DEBUG_LEVEL > 0
125 #define SCA_DPRINTF(l, x) do { \
126 	if ((l) & sca_debug) \
127 		printf x;\
128 	} while (0)
129 #else
130 #define SCA_DPRINTF(l, x)
131 #endif
132 
133 #if 0
134 #define SCA_USE_FASTQ		/* use a split queue, one for fast traffic */
135 #endif
136 
137 static inline void msci_write_1(sca_port_t *, u_int, u_int8_t);
138 static inline u_int8_t msci_read_1(sca_port_t *, u_int);
139 
140 static inline void dmac_write_1(sca_port_t *, u_int, u_int8_t);
141 static inline void dmac_write_2(sca_port_t *, u_int, u_int16_t);
142 static inline u_int8_t dmac_read_1(sca_port_t *, u_int);
143 static inline u_int16_t dmac_read_2(sca_port_t *, u_int);
144 
145 static	void sca_msci_init(struct sca_softc *, sca_port_t *);
146 static	void sca_dmac_init(struct sca_softc *, sca_port_t *);
147 static void sca_dmac_rxinit(sca_port_t *);
148 
149 static	int sca_dmac_intr(sca_port_t *, u_int8_t);
150 static	int sca_msci_intr(sca_port_t *, u_int8_t);
151 
152 static	void sca_get_packets(sca_port_t *);
153 static	int sca_frame_avail(sca_port_t *);
154 static	void sca_frame_process(sca_port_t *);
155 static	void sca_frame_read_done(sca_port_t *);
156 
157 static	void sca_port_starttx(sca_port_t *);
158 
159 static	void sca_port_up(sca_port_t *);
160 static	void sca_port_down(sca_port_t *);
161 
162 static	int sca_output(struct ifnet *, struct mbuf *, const struct sockaddr *,
163 			    const struct rtentry *);
164 static	int sca_ioctl(struct ifnet *, u_long, void *);
165 static	void sca_start(struct ifnet *);
166 static	void sca_watchdog(struct ifnet *);
167 
168 static struct mbuf *sca_mbuf_alloc(struct sca_softc *, void *, u_int);
169 
170 #if SCA_DEBUG_LEVEL > 0
171 static	void sca_frame_print(sca_port_t *, sca_desc_t *, u_int8_t *);
172 #endif
173 
174 
175 #define	sca_read_1(sc, reg)		(sc)->sc_read_1(sc, reg)
176 #define	sca_read_2(sc, reg)		(sc)->sc_read_2(sc, reg)
177 #define	sca_write_1(sc, reg, val)	(sc)->sc_write_1(sc, reg, val)
178 #define	sca_write_2(sc, reg, val)	(sc)->sc_write_2(sc, reg, val)
179 
180 #define	sca_page_addr(sc, addr)	((bus_addr_t)(u_long)(addr) & (sc)->scu_pagemask)
181 
182 static inline void
183 msci_write_1(sca_port_t *scp, u_int reg, u_int8_t val)
184 {
185 	sca_write_1(scp->sca, scp->msci_off + reg, val);
186 }
187 
188 static inline u_int8_t
189 msci_read_1(sca_port_t *scp, u_int reg)
190 {
191 	return sca_read_1(scp->sca, scp->msci_off + reg);
192 }
193 
194 static inline void
195 dmac_write_1(sca_port_t *scp, u_int reg, u_int8_t val)
196 {
197 	sca_write_1(scp->sca, scp->dmac_off + reg, val);
198 }
199 
200 static inline void
201 dmac_write_2(sca_port_t *scp, u_int reg, u_int16_t val)
202 {
203 	sca_write_2(scp->sca, scp->dmac_off + reg, val);
204 }
205 
206 static inline u_int8_t
207 dmac_read_1(sca_port_t *scp, u_int reg)
208 {
209 	return sca_read_1(scp->sca, scp->dmac_off + reg);
210 }
211 
212 static inline u_int16_t
213 dmac_read_2(sca_port_t *scp, u_int reg)
214 {
215 	return sca_read_2(scp->sca, scp->dmac_off + reg);
216 }
217 
218 #if SCA_DEBUG_LEVEL > 0
219 /*
220  * read the chain pointer
221  */
222 static inline u_int16_t
223 sca_desc_read_chainp(struct sca_softc *sc, struct sca_desc *dp)
224 {
225 	if (sc->sc_usedma)
226 		return ((dp)->sd_chainp);
227 	return (bus_space_read_2(sc->scu_memt, sc->scu_memh,
228 	    sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_chainp)));
229 }
230 #endif
231 
232 /*
233  * write the chain pointer
234  */
235 static inline void
236 sca_desc_write_chainp(struct sca_softc *sc, struct sca_desc *dp, u_int16_t cp)
237 {
238 	if (sc->sc_usedma)
239 		(dp)->sd_chainp = cp;
240 	else
241 		bus_space_write_2(sc->scu_memt, sc->scu_memh,
242 		    sca_page_addr(sc, dp)
243 		    + offsetof(struct sca_desc, sd_chainp), cp);
244 }
245 
246 #if SCA_DEBUG_LEVEL > 0
247 /*
248  * read the buffer pointer
249  */
250 static inline u_int32_t
251 sca_desc_read_bufp(struct sca_softc *sc, struct sca_desc *dp)
252 {
253 	u_int32_t address;
254 
255 	if (sc->sc_usedma)
256 		address = dp->sd_bufp | dp->sd_hbufp << 16;
257 	else {
258 		address = bus_space_read_2(sc->scu_memt, sc->scu_memh,
259 		    sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_bufp));
260 		address |= bus_space_read_1(sc->scu_memt, sc->scu_memh,
261 		    sca_page_addr(sc, dp)
262 		    + offsetof(struct sca_desc, sd_hbufp)) << 16;
263 	}
264 	return (address);
265 }
266 #endif
267 
268 /*
269  * write the buffer pointer
270  */
271 static inline void
272 sca_desc_write_bufp(struct sca_softc *sc, struct sca_desc *dp, u_int32_t bufp)
273 {
274 	if (sc->sc_usedma) {
275 		dp->sd_bufp = bufp & 0xFFFF;
276 		dp->sd_hbufp = (bufp & 0x00FF0000) >> 16;
277 	} else {
278 		bus_space_write_2(sc->scu_memt, sc->scu_memh,
279 		    sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_bufp),
280 		    bufp & 0xFFFF);
281 		bus_space_write_1(sc->scu_memt, sc->scu_memh,
282 		    sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_hbufp),
283 		    (bufp & 0x00FF0000) >> 16);
284 	}
285 }
286 
287 /*
288  * read the buffer length
289  */
290 static inline u_int16_t
291 sca_desc_read_buflen(struct sca_softc *sc, struct sca_desc *dp)
292 {
293 	if (sc->sc_usedma)
294 		return ((dp)->sd_buflen);
295 	return (bus_space_read_2(sc->scu_memt, sc->scu_memh,
296 	    sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_buflen)));
297 }
298 
299 /*
300  * write the buffer length
301  */
302 static inline void
303 sca_desc_write_buflen(struct sca_softc *sc, struct sca_desc *dp, u_int16_t len)
304 {
305 	if (sc->sc_usedma)
306 		(dp)->sd_buflen = len;
307 	else
308 		bus_space_write_2(sc->scu_memt, sc->scu_memh,
309 		    sca_page_addr(sc, dp)
310 		    + offsetof(struct sca_desc, sd_buflen), len);
311 }
312 
313 /*
314  * read the descriptor status
315  */
316 static inline u_int8_t
317 sca_desc_read_stat(struct sca_softc *sc, struct sca_desc *dp)
318 {
319 	if (sc->sc_usedma)
320 		return ((dp)->sd_stat);
321 	return (bus_space_read_1(sc->scu_memt, sc->scu_memh,
322 	    sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_stat)));
323 }
324 
325 /*
326  * write the descriptor status
327  */
328 static inline void
329 sca_desc_write_stat(struct sca_softc *sc, struct sca_desc *dp, u_int8_t stat)
330 {
331 	if (sc->sc_usedma)
332 		(dp)->sd_stat = stat;
333 	else
334 		bus_space_write_1(sc->scu_memt, sc->scu_memh,
335 		    sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_stat),
336 		    stat);
337 }
338 
339 void
340 sca_init(struct sca_softc *sc)
341 {
342 	/*
343 	 * Do a little sanity check:  check number of ports.
344 	 */
345 	if (sc->sc_numports < 1 || sc->sc_numports > 2)
346 		panic("sca can\'t handle more than 2 or less than 1 ports");
347 
348 	/*
349 	 * disable DMA and MSCI interrupts
350 	 */
351 	sca_write_1(sc, SCA_DMER, 0);
352 	sca_write_1(sc, SCA_IER0, 0);
353 	sca_write_1(sc, SCA_IER1, 0);
354 	sca_write_1(sc, SCA_IER2, 0);
355 
356 	/*
357 	 * configure interrupt system
358 	 */
359 	sca_write_1(sc, SCA_ITCR,
360 	    SCA_ITCR_INTR_PRI_MSCI | SCA_ITCR_ACK_NONE | SCA_ITCR_VOUT_IVR);
361 #if 0
362 	/* these are for the intrerrupt ack cycle which we don't use */
363 	sca_write_1(sc, SCA_IVR, 0x40);
364 	sca_write_1(sc, SCA_IMVR, 0x40);
365 #endif
366 
367 	/*
368 	 * set wait control register to zero wait states
369 	 */
370 	sca_write_1(sc, SCA_PABR0, 0);
371 	sca_write_1(sc, SCA_PABR1, 0);
372 	sca_write_1(sc, SCA_WCRL, 0);
373 	sca_write_1(sc, SCA_WCRM, 0);
374 	sca_write_1(sc, SCA_WCRH, 0);
375 
376 	/*
377 	 * disable DMA and reset status
378 	 */
379 	sca_write_1(sc, SCA_PCR, SCA_PCR_PR2);
380 
381 	/*
382 	 * disable transmit DMA for all channels
383 	 */
384 	sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_0, 0);
385 	sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_0, SCA_DCR_ABRT);
386 	sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_0, 0);
387 	sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_0, SCA_DCR_ABRT);
388 	sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_1, 0);
389 	sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_1, SCA_DCR_ABRT);
390 	sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_1, 0);
391 	sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_1, SCA_DCR_ABRT);
392 
393 	/*
394 	 * enable DMA based on channel enable flags for each channel
395 	 */
396 	sca_write_1(sc, SCA_DMER, SCA_DMER_EN);
397 
398 	/*
399 	 * Should check to see if the chip is responding, but for now
400 	 * assume it is.
401 	 */
402 }
403 
404 /*
405  * initialize the port and attach it to the networking layer
406  */
407 void
408 sca_port_attach(struct sca_softc *sc, u_int port)
409 {
410 	struct timeval now;
411 	sca_port_t *scp = &sc->sc_ports[port];
412 	struct ifnet *ifp;
413 	static u_int ntwo_unit = 0;
414 
415 	scp->sca = sc;  /* point back to the parent */
416 
417 	scp->sp_port = port;
418 
419 	if (port == 0) {
420 		scp->msci_off = SCA_MSCI_OFF_0;
421 		scp->dmac_off = SCA_DMAC_OFF_0;
422 		if(sc->sc_parent != NULL)
423 			ntwo_unit = device_unit(sc->sc_parent) * 2 + 0;
424 		else
425 			ntwo_unit = 0;	/* XXX */
426 	} else {
427 		scp->msci_off = SCA_MSCI_OFF_1;
428 		scp->dmac_off = SCA_DMAC_OFF_1;
429 		if(sc->sc_parent != NULL)
430 			ntwo_unit = device_unit(sc->sc_parent) * 2 + 1;
431 		else
432 			ntwo_unit = 1;	/* XXX */
433 	}
434 
435 	sca_msci_init(sc, scp);
436 	sca_dmac_init(sc, scp);
437 
438 	/*
439 	 * attach to the network layer
440 	 */
441 	ifp = &scp->sp_if;
442 	snprintf(ifp->if_xname, sizeof(ifp->if_xname), "ntwo%d", ntwo_unit);
443 	ifp->if_softc = scp;
444 	ifp->if_mtu = SCA_MTU;
445 	ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST;
446 	ifp->if_type = IFT_PTPSERIAL;
447 	ifp->if_hdrlen = HDLC_HDRLEN;
448 	ifp->if_ioctl = sca_ioctl;
449 	ifp->if_output = sca_output;
450 	ifp->if_watchdog = sca_watchdog;
451 	ifp->if_snd.ifq_maxlen = IFQ_MAXLEN;
452 	scp->linkq.ifq_maxlen = 5; /* if we exceed this we are hosed already */
453 #ifdef SCA_USE_FASTQ
454 	scp->fastq.ifq_maxlen = IFQ_MAXLEN;
455 #endif
456 	IFQ_SET_READY(&ifp->if_snd);
457 	if_attach(ifp);
458 	if_deferred_start_init(ifp, NULL);
459 	if_alloc_sadl(ifp);
460 	bpf_attach(ifp, DLT_HDLC, HDLC_HDRLEN);
461 	bpf_mtap_softint_init(ifp);
462 
463 	if (sc->sc_parent == NULL)
464 		printf("%s: port %d\n", ifp->if_xname, port);
465 	else
466 		printf("%s at %s port %d\n",
467 		       ifp->if_xname, device_xname(sc->sc_parent), port);
468 
469 	/*
470 	 * reset the last seen times on the cisco keepalive protocol
471 	 */
472 	getmicrotime(&now);
473 	scp->cka_lasttx = now.tv_usec;
474 	scp->cka_lastrx = 0;
475 }
476 
477 #if 0
478 /*
479  * returns log2(div), sets 'tmc' for the required freq 'hz'
480  */
481 static u_int8_t
482 sca_msci_get_baud_rate_values(u_int32_t hz, u_int8_t *tmcp)
483 {
484 	u_int32_t tmc, div;
485 	u_int32_t clock;
486 
487 	/* clock hz = (chipclock / tmc) / 2^(div); */
488 	/*
489 	 * TD == tmc * 2^(n)
490 	 *
491 	 * note:
492 	 * 1 <= TD <= 256		TD is inc of 1
493 	 * 2 <= TD <= 512		TD is inc of 2
494 	 * 4 <= TD <= 1024		TD is inc of 4
495 	 * ...
496 	 * 512 <= TD <= 256*512		TD is inc of 512
497 	 *
498 	 * so note there are overlaps.  We lose prec
499 	 * as div increases so we wish to minize div.
500 	 *
501 	 * basically we want to do
502 	 *
503 	 * tmc = chip / hz, but have tmc <= 256
504 	 */
505 
506 	/* assume system clock is 9.8304MHz or 9830400Hz */
507 	clock = clock = 9830400 >> 1;
508 
509 	/* round down */
510 	div = 0;
511 	while ((tmc = clock / hz) > 256 || (tmc == 256 && (clock / tmc) > hz)) {
512 		clock >>= 1;
513 		div++;
514 	}
515 	if (clock / tmc > hz)
516 		tmc++;
517 	if (!tmc)
518 		tmc = 1;
519 
520 	if (div > SCA_RXS_DIV_512) {
521 		/* set to maximums */
522 		div = SCA_RXS_DIV_512;
523 		tmc = 0;
524 	}
525 
526 	*tmcp = (tmc & 0xFF);	/* 0 == 256 */
527 	return (div & 0xFF);
528 }
529 #endif
530 
531 /*
532  * initialize the port's MSCI
533  */
534 static void
535 sca_msci_init(struct sca_softc *sc, sca_port_t *scp)
536 {
537 	/* reset the channel */
538 	msci_write_1(scp, SCA_CMD0, SCA_CMD_RESET);
539 
540 	msci_write_1(scp, SCA_MD00,
541 		     (  SCA_MD0_CRC_1
542 		      | SCA_MD0_CRC_CCITT
543 		      | SCA_MD0_CRC_ENABLE
544 		      | SCA_MD0_MODE_HDLC));
545 #if 0
546 	/* immediately send receive reset so the above takes */
547 	msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
548 #endif
549 
550 	msci_write_1(scp, SCA_MD10, SCA_MD1_NOADDRCHK);
551 	msci_write_1(scp, SCA_MD20,
552 		     (SCA_MD2_DUPLEX | SCA_MD2_ADPLLx8 | SCA_MD2_NRZ));
553 
554 	/* be safe and do it again */
555 	msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
556 
557 	/* setup underrun and idle control, and initial RTS state */
558 	msci_write_1(scp, SCA_CTL0,
559 	     (SCA_CTL_IDLC_PATTERN
560 	     | SCA_CTL_UDRNC_AFTER_FCS
561 	     | SCA_CTL_RTS_LOW));
562 
563 	/* reset the transmitter */
564 	msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET);
565 
566 	/*
567 	 * set the clock sources
568 	 */
569 	msci_write_1(scp, SCA_RXS0, scp->sp_rxs);
570 	msci_write_1(scp, SCA_TXS0, scp->sp_txs);
571 	msci_write_1(scp, SCA_TMC0, scp->sp_tmc);
572 
573 	/* set external clock generate as requested */
574 	sc->sc_clock_callback(sc->sc_aux, scp->sp_port, scp->sp_eclock);
575 
576 	/*
577 	 * XXX don't pay attention to CTS or CD changes right now.  I can't
578 	 * simulate one, and the transmitter will try to transmit even if
579 	 * CD isn't there anyway, so nothing bad SHOULD happen.
580 	 */
581 #if 0
582 	msci_write_1(scp, SCA_IE00, 0);
583 	msci_write_1(scp, SCA_IE10, 0); /* 0x0c == CD and CTS changes only */
584 #else
585 	/* this would deliver transmitter underrun to ST1/ISR1 */
586 	msci_write_1(scp, SCA_IE10, SCA_ST1_UDRN);
587 	msci_write_1(scp, SCA_IE00, SCA_ST0_TXINT);
588 #endif
589 	msci_write_1(scp, SCA_IE20, 0);
590 
591 	msci_write_1(scp, SCA_FIE0, 0);
592 
593 	msci_write_1(scp, SCA_SA00, 0);
594 	msci_write_1(scp, SCA_SA10, 0);
595 
596 	msci_write_1(scp, SCA_IDL0, 0x7e);
597 
598 	msci_write_1(scp, SCA_RRC0, 0x0e);
599 	/* msci_write_1(scp, SCA_TRC00, 0x10); */
600 	/*
601 	 * the correct values here are important for avoiding underruns
602 	 * for any value less than or equal to TRC0 txrdy is activated
603 	 * which will start the dmac transfer to the fifo.
604 	 * for buffer size >= TRC1 + 1 txrdy is cleared which will stop DMA.
605 	 *
606 	 * thus if we are using a very fast clock that empties the fifo
607 	 * quickly, delays in the dmac starting to fill the fifo can
608 	 * lead to underruns so we want a fairly full fifo to still
609 	 * cause the dmac to start.  for cards with on board ram this
610 	 * has no effect on system performance.  For cards that DMA
611 	 * to/from system memory it will cause more, shorter,
612 	 * bus accesses rather than fewer longer ones.
613 	 */
614 	msci_write_1(scp, SCA_TRC00, 0x00);
615 	msci_write_1(scp, SCA_TRC10, 0x1f);
616 }
617 
618 /*
619  * Take the memory for the port and construct two circular linked lists of
620  * descriptors (one tx, one rx) and set the pointers in these descriptors
621  * to point to the buffer space for this port.
622  */
623 static void
624 sca_dmac_init(struct sca_softc *sc, sca_port_t *scp)
625 {
626 	sca_desc_t *desc;
627 	u_int32_t desc_p;
628 	u_int32_t buf_p;
629 	int i;
630 
631 	if (sc->sc_usedma)
632 		bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 0, sc->scu_allocsize,
633 		    BUS_DMASYNC_PREWRITE);
634 	else {
635 		/*
636 		 * XXX assumes that all tx desc and bufs in same page
637 		 */
638 		sc->scu_page_on(sc);
639 		sc->scu_set_page(sc, scp->sp_txdesc_p);
640 	}
641 
642 	desc = scp->sp_txdesc;
643 	desc_p = scp->sp_txdesc_p;
644 	buf_p = scp->sp_txbuf_p;
645 	scp->sp_txcur = 0;
646 	scp->sp_txinuse = 0;
647 
648 #ifdef DEBUG
649 	/* make sure that we won't wrap */
650 	if ((desc_p & 0xffff0000) !=
651 	    ((desc_p + sizeof(*desc) * scp->sp_ntxdesc) & 0xffff0000))
652 		panic("sca: tx descriptors cross architecural boundary");
653 	if ((buf_p & 0xff000000) !=
654 	    ((buf_p + SCA_BSIZE * scp->sp_ntxdesc) & 0xff000000))
655 		panic("sca: tx buffers cross architecural boundary");
656 #endif
657 
658 	for (i = 0 ; i < scp->sp_ntxdesc ; i++) {
659 		/*
660 		 * desc_p points to the physcial address of the NEXT desc
661 		 */
662 		desc_p += sizeof(sca_desc_t);
663 
664 		sca_desc_write_chainp(sc, desc, desc_p & 0x0000ffff);
665 		sca_desc_write_bufp(sc, desc, buf_p);
666 		sca_desc_write_buflen(sc, desc, SCA_BSIZE);
667 		sca_desc_write_stat(sc, desc, 0);
668 
669 		desc++;  /* point to the next descriptor */
670 		buf_p += SCA_BSIZE;
671 	}
672 
673 	/*
674 	 * "heal" the circular list by making the last entry point to the
675 	 * first.
676 	 */
677 	sca_desc_write_chainp(sc, desc - 1, scp->sp_txdesc_p & 0x0000ffff);
678 
679 	/*
680 	 * Now, initialize the transmit DMA logic
681 	 *
682 	 * CPB == chain pointer base address
683 	 */
684 	dmac_write_1(scp, SCA_DSR1, 0);
685 	dmac_write_1(scp, SCA_DCR1, SCA_DCR_ABRT);
686 	dmac_write_1(scp, SCA_DMR1, SCA_DMR_TMOD | SCA_DMR_NF);
687 	/* XXX1
688 	dmac_write_1(scp, SCA_DIR1,
689 		     (SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF));
690 	 */
691 	dmac_write_1(scp, SCA_DIR1,
692 		     (SCA_DIR_EOM | SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF));
693 	dmac_write_1(scp, SCA_CPB1,
694 		     (u_int8_t)((scp->sp_txdesc_p & 0x00ff0000) >> 16));
695 
696 	/*
697 	 * now, do the same thing for receive descriptors
698 	 *
699 	 * XXX assumes that all rx desc and bufs in same page
700 	 */
701 	if (!sc->sc_usedma)
702 		sc->scu_set_page(sc, scp->sp_rxdesc_p);
703 
704 	desc = scp->sp_rxdesc;
705 	desc_p = scp->sp_rxdesc_p;
706 	buf_p = scp->sp_rxbuf_p;
707 
708 #ifdef DEBUG
709 	/* make sure that we won't wrap */
710 	if ((desc_p & 0xffff0000) !=
711 	    ((desc_p + sizeof(*desc) * scp->sp_nrxdesc) & 0xffff0000))
712 		panic("sca: rx descriptors cross architecural boundary");
713 	if ((buf_p & 0xff000000) !=
714 	    ((buf_p + SCA_BSIZE * scp->sp_nrxdesc) & 0xff000000))
715 		panic("sca: rx buffers cross architecural boundary");
716 #endif
717 
718 	for (i = 0 ; i < scp->sp_nrxdesc; i++) {
719 		/*
720 		 * desc_p points to the physcial address of the NEXT desc
721 		 */
722 		desc_p += sizeof(sca_desc_t);
723 
724 		sca_desc_write_chainp(sc, desc, desc_p & 0x0000ffff);
725 		sca_desc_write_bufp(sc, desc, buf_p);
726 		/* sca_desc_write_buflen(sc, desc, SCA_BSIZE); */
727 		sca_desc_write_buflen(sc, desc, 0);
728 		sca_desc_write_stat(sc, desc, 0);
729 
730 		desc++;  /* point to the next descriptor */
731 		buf_p += SCA_BSIZE;
732 	}
733 
734 	/*
735 	 * "heal" the circular list by making the last entry point to the
736 	 * first.
737 	 */
738 	sca_desc_write_chainp(sc, desc - 1, scp->sp_rxdesc_p & 0x0000ffff);
739 
740 	sca_dmac_rxinit(scp);
741 
742 	if (sc->sc_usedma)
743 		bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
744 		    0, sc->scu_allocsize, BUS_DMASYNC_POSTWRITE);
745 	else
746 		sc->scu_page_off(sc);
747 }
748 
749 /*
750  * reset and reinitialize the receive DMA logic
751  */
752 static void
753 sca_dmac_rxinit(sca_port_t *scp)
754 {
755 	/*
756 	 * ... and the receive DMA logic ...
757 	 */
758 	dmac_write_1(scp, SCA_DSR0, 0);  /* disable DMA */
759 	dmac_write_1(scp, SCA_DCR0, SCA_DCR_ABRT);
760 
761 	dmac_write_1(scp, SCA_DMR0, SCA_DMR_TMOD | SCA_DMR_NF);
762 	dmac_write_2(scp, SCA_BFLL0, SCA_BSIZE);
763 
764 	/* reset descriptors to initial state */
765 	scp->sp_rxstart = 0;
766 	scp->sp_rxend = scp->sp_nrxdesc - 1;
767 
768 	/*
769 	 * CPB == chain pointer base
770 	 * CDA == current descriptor address
771 	 * EDA == error descriptor address (overwrite position)
772 	 *	because cda can't be eda when starting we always
773 	 *	have a single buffer gap between cda and eda
774 	 */
775 	dmac_write_1(scp, SCA_CPB0,
776 	    (u_int8_t)((scp->sp_rxdesc_p & 0x00ff0000) >> 16));
777 	dmac_write_2(scp, SCA_CDAL0, (u_int16_t)(scp->sp_rxdesc_p & 0xffff));
778 	dmac_write_2(scp, SCA_EDAL0, (u_int16_t)
779 	    (scp->sp_rxdesc_p + (sizeof(sca_desc_t) * scp->sp_rxend)));
780 
781 	/*
782 	 * enable receiver DMA
783 	 */
784 	dmac_write_1(scp, SCA_DIR0,
785 		     (SCA_DIR_EOT | SCA_DIR_EOM | SCA_DIR_BOF | SCA_DIR_COF));
786 	dmac_write_1(scp, SCA_DSR0, SCA_DSR_DE);
787 }
788 
789 /*
790  * Queue the packet for our start routine to transmit
791  */
792 static int
793 sca_output(
794     struct ifnet *ifp,
795     struct mbuf *m,
796     const struct sockaddr *dst,
797     const struct rtentry *rt0)
798 {
799 	struct hdlc_header *hdlc;
800 	struct ifqueue *ifq = NULL;
801 	int s, error, len;
802 	short mflags;
803 
804 	error = 0;
805 
806 	if ((ifp->if_flags & IFF_UP) != IFF_UP) {
807 		error = ENETDOWN;
808 		goto bad;
809 	}
810 
811 	/*
812 	 * If the queueing discipline needs packet classification,
813 	 * do it before prepending link headers.
814 	 */
815 	IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family);
816 
817 	/*
818 	 * determine address family, and priority for this packet
819 	 */
820 	switch (dst->sa_family) {
821 #ifdef INET
822 	case AF_INET:
823 #ifdef SCA_USE_FASTQ
824 		if ((mtod(m, struct ip *)->ip_tos & IPTOS_LOWDELAY)
825 		    == IPTOS_LOWDELAY)
826 			ifq = &((sca_port_t *)ifp->if_softc)->fastq;
827 #endif
828 		/*
829 		 * Add cisco serial line header. If there is no
830 		 * space in the first mbuf, allocate another.
831 		 */
832 		M_PREPEND(m, sizeof(struct hdlc_header), M_DONTWAIT);
833 		if (m == 0)
834 			return (ENOBUFS);
835 		hdlc = mtod(m, struct hdlc_header *);
836 		hdlc->h_proto = htons(HDLC_PROTOCOL_IP);
837 		break;
838 #endif
839 #ifdef INET6
840 	case AF_INET6:
841 		/*
842 		 * Add cisco serial line header. If there is no
843 		 * space in the first mbuf, allocate another.
844 		 */
845 		M_PREPEND(m, sizeof(struct hdlc_header), M_DONTWAIT);
846 		if (m == 0)
847 			return (ENOBUFS);
848 		hdlc = mtod(m, struct hdlc_header *);
849 		hdlc->h_proto = htons(HDLC_PROTOCOL_IPV6);
850 		break;
851 #endif
852 	default:
853 		printf("%s: address family %d unsupported\n",
854 		       ifp->if_xname, dst->sa_family);
855 		error = EAFNOSUPPORT;
856 		goto bad;
857 	}
858 
859 	/* finish */
860 	if ((m->m_flags & (M_BCAST | M_MCAST)) != 0)
861 		hdlc->h_addr = CISCO_MULTICAST;
862 	else
863 		hdlc->h_addr = CISCO_UNICAST;
864 	hdlc->h_resv = 0;
865 
866 	/*
867 	 * queue the packet.  If interactive, use the fast queue.
868 	 */
869 	mflags = m->m_flags;
870 	len = m->m_pkthdr.len;
871 	s = splnet();
872 	if (ifq != NULL) {
873 		if (IF_QFULL(ifq)) {
874 			IF_DROP(ifq);
875 			m_freem(m);
876 			error = ENOBUFS;
877 		} else
878 			IF_ENQUEUE(ifq, m);
879 	} else
880 		IFQ_ENQUEUE(&ifp->if_snd, m, error);
881 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
882 	if (error != 0) {
883 		if_statinc_ref(nsr, if_oerrors);
884 		if_statinc_ref(nsr, if_collisions);
885 		IF_STAT_PUTREF(ifp);
886 		splx(s);
887 		return (error);
888 	}
889 	if_statadd_ref(nsr, if_obytes, len);
890 	if (mflags & M_MCAST)
891 		if_statinc_ref(nsr, if_omcasts);
892 	IF_STAT_PUTREF(ifp);
893 
894 	sca_start(ifp);
895 	splx(s);
896 
897 	return (error);
898 
899  bad:
900 	if (m)
901 		m_freem(m);
902 	return (error);
903 }
904 
905 static int
906 sca_ioctl(struct ifnet *ifp, u_long cmd, void *data)
907 {
908 	struct ifreq *ifr;
909 	struct ifaddr *ifa;
910 	int error;
911 	int s;
912 
913 	s = splnet();
914 
915 	ifr = (struct ifreq *)data;
916 	ifa = (struct ifaddr *)data;
917 	error = 0;
918 
919 	switch (cmd) {
920 	case SIOCINITIFADDR:
921 		switch(ifa->ifa_addr->sa_family) {
922 #ifdef INET
923 		case AF_INET:
924 #endif
925 #ifdef INET6
926 		case AF_INET6:
927 #endif
928 #if defined(INET) || defined(INET6)
929 			ifp->if_flags |= IFF_UP;
930 			sca_port_up(ifp->if_softc);
931 			break;
932 #endif
933 		default:
934 			error = EAFNOSUPPORT;
935 			break;
936 		}
937 		break;
938 
939 	case SIOCSIFDSTADDR:
940 #ifdef INET
941 		if (ifa->ifa_addr->sa_family == AF_INET)
942 			break;
943 #endif
944 #ifdef INET6
945 		if (ifa->ifa_addr->sa_family == AF_INET6)
946 			break;
947 #endif
948 		error = EAFNOSUPPORT;
949 		break;
950 
951 	case SIOCADDMULTI:
952 	case SIOCDELMULTI:
953 		/* XXX need multicast group management code */
954 		if (ifr == 0) {
955 			error = EAFNOSUPPORT;		/* XXX */
956 			break;
957 		}
958 		switch (ifreq_getaddr(cmd, ifr)->sa_family) {
959 #ifdef INET
960 		case AF_INET:
961 			break;
962 #endif
963 #ifdef INET6
964 		case AF_INET6:
965 			break;
966 #endif
967 		default:
968 			error = EAFNOSUPPORT;
969 			break;
970 		}
971 		break;
972 
973 	case SIOCSIFFLAGS:
974 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
975 			break;
976 		if (ifr->ifr_flags & IFF_UP) {
977 			ifp->if_flags |= IFF_UP;
978 			sca_port_up(ifp->if_softc);
979 		} else {
980 			ifp->if_flags &= ~IFF_UP;
981 			sca_port_down(ifp->if_softc);
982 		}
983 
984 		break;
985 
986 	default:
987 		error = ifioctl_common(ifp, cmd, data);
988 	}
989 
990 	splx(s);
991 	return error;
992 }
993 
994 /*
995  * start packet transmission on the interface
996  *
997  * MUST BE CALLED AT splnet()
998  */
999 static void
1000 sca_start(struct ifnet *ifp)
1001 {
1002 	sca_port_t *scp = ifp->if_softc;
1003 	struct sca_softc *sc = scp->sca;
1004 	struct mbuf *m, *mb_head;
1005 	sca_desc_t *desc;
1006 	u_int8_t *buf, stat;
1007 	u_int32_t buf_p;
1008 	int nexttx;
1009 	int trigger_xmit;
1010 	u_int len;
1011 
1012 	SCA_DPRINTF(SCA_DEBUG_TX, ("TX: enter start\n"));
1013 
1014 	/*
1015 	 * can't queue when we are full or transmitter is busy
1016 	 */
1017 #ifdef oldcode
1018 	if ((scp->sp_txinuse >= (scp->sp_ntxdesc - 1))
1019 	    || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE))
1020 		return;
1021 #else
1022 	if (scp->sp_txinuse
1023 	    || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE))
1024 		return;
1025 #endif
1026 	SCA_DPRINTF(SCA_DEBUG_TX, ("TX: txinuse %d\n", scp->sp_txinuse));
1027 
1028 	/*
1029 	 * XXX assume that all tx desc and bufs in same page
1030 	 */
1031 	if (sc->sc_usedma)
1032 		bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1033 		    0, sc->scu_allocsize,
1034 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1035 	else {
1036 		sc->scu_page_on(sc);
1037 		sc->scu_set_page(sc, scp->sp_txdesc_p);
1038 	}
1039 
1040 	trigger_xmit = 0;
1041 
1042  txloop:
1043 	IF_DEQUEUE(&scp->linkq, mb_head);
1044 	if (mb_head == NULL)
1045 #ifdef SCA_USE_FASTQ
1046 		IF_DEQUEUE(&scp->fastq, mb_head);
1047 	if (mb_head == NULL)
1048 #endif
1049 		IFQ_DEQUEUE(&ifp->if_snd, mb_head);
1050 	if (mb_head == NULL)
1051 		goto start_xmit;
1052 
1053 	SCA_DPRINTF(SCA_DEBUG_TX, ("TX: got mbuf\n"));
1054 #ifdef oldcode
1055 	if (scp->txinuse != 0) {
1056 		/* Kill EOT interrupts on the previous descriptor. */
1057 		desc = &scp->sp_txdesc[scp->txcur];
1058 		stat = sca_desc_read_stat(sc, desc);
1059 		sca_desc_write_stat(sc, desc, stat & ~SCA_DESC_EOT);
1060 
1061 		/* Figure out what the next free descriptor is. */
1062 		nexttx = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1063 	} else
1064 		nexttx = 0;
1065 #endif	/* oldcode */
1066 
1067 	if (scp->sp_txinuse)
1068 		nexttx = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1069 	else
1070 		nexttx = 0;
1071 
1072 	SCA_DPRINTF(SCA_DEBUG_TX, ("TX: nexttx %d\n", nexttx));
1073 
1074 	buf = scp->sp_txbuf + SCA_BSIZE * nexttx;
1075 	buf_p = scp->sp_txbuf_p + SCA_BSIZE * nexttx;
1076 
1077 	/* XXX hoping we can delay the desc write till after we don't drop. */
1078 	desc = &scp->sp_txdesc[nexttx];
1079 
1080 	/* XXX isn't this set already?? */
1081 	sca_desc_write_bufp(sc, desc, buf_p);
1082 	len = 0;
1083 
1084 	SCA_DPRINTF(SCA_DEBUG_TX, ("TX: buf %x buf_p %x\n", (u_int)buf, buf_p));
1085 
1086 #if 0	/* uncomment this for a core in cc1 */
1087 X
1088 #endif
1089 	/*
1090 	 * Run through the chain, copying data into the descriptor as we
1091 	 * go.  If it won't fit in one transmission block, drop the packet.
1092 	 * No, this isn't nice, but most of the time it _will_ fit.
1093 	 */
1094 	for (m = mb_head ; m != NULL ; m = m->m_next) {
1095 		if (m->m_len != 0) {
1096 			len += m->m_len;
1097 			if (len > SCA_BSIZE) {
1098 				m_freem(mb_head);
1099 				goto txloop;
1100 			}
1101 			SCA_DPRINTF(SCA_DEBUG_TX,
1102 			    ("TX: about to mbuf len %d\n", m->m_len));
1103 
1104 			if (sc->sc_usedma)
1105 				memcpy(buf, mtod(m, u_int8_t *), m->m_len);
1106 			else
1107 				bus_space_write_region_1(sc->scu_memt,
1108 				    sc->scu_memh, sca_page_addr(sc, buf_p),
1109 				    mtod(m, u_int8_t *), m->m_len);
1110 			buf += m->m_len;
1111 			buf_p += m->m_len;
1112 		}
1113 	}
1114 
1115 	/* set the buffer, the length, and mark end of frame and end of xfer */
1116 	sca_desc_write_buflen(sc, desc, len);
1117 	sca_desc_write_stat(sc, desc, SCA_DESC_EOM);
1118 
1119 	if_statinc(ifp, if_opackets);
1120 
1121 	/*
1122 	 * Pass packet to bpf if there is a listener.
1123 	 */
1124 	bpf_mtap(ifp, mb_head, BPF_D_OUT);
1125 
1126 	m_freem(mb_head);
1127 
1128 	scp->sp_txcur = nexttx;
1129 	scp->sp_txinuse++;
1130 	trigger_xmit = 1;
1131 
1132 	SCA_DPRINTF(SCA_DEBUG_TX,
1133 	    ("TX: inuse %d index %d\n", scp->sp_txinuse, scp->sp_txcur));
1134 
1135 	/*
1136 	 * XXX so didn't this used to limit us to 1?! - multi may be untested
1137 	 * sp_ntxdesc used to be hard coded to 2 with claim of a too hard
1138 	 * to find bug
1139 	 */
1140 #ifdef oldcode
1141 	if (scp->sp_txinuse < (scp->sp_ntxdesc - 1))
1142 #endif
1143 	if (scp->sp_txinuse < scp->sp_ntxdesc)
1144 		goto txloop;
1145 
1146  start_xmit:
1147 	SCA_DPRINTF(SCA_DEBUG_TX, ("TX: trigger_xmit %d\n", trigger_xmit));
1148 
1149 	if (trigger_xmit != 0) {
1150 		/* set EOT on final descriptor */
1151 		desc = &scp->sp_txdesc[scp->sp_txcur];
1152 		stat = sca_desc_read_stat(sc, desc);
1153 		sca_desc_write_stat(sc, desc, stat | SCA_DESC_EOT);
1154 	}
1155 
1156 	if (sc->sc_usedma)
1157 		bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 0,
1158 		    sc->scu_allocsize,
1159 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1160 
1161 	if (trigger_xmit != 0)
1162 		sca_port_starttx(scp);
1163 
1164 	if (!sc->sc_usedma)
1165 		sc->scu_page_off(sc);
1166 }
1167 
1168 static void
1169 sca_watchdog(struct ifnet *ifp)
1170 {
1171 }
1172 
1173 int
1174 sca_hardintr(struct sca_softc *sc)
1175 {
1176 	u_int8_t isr0, isr1, isr2;
1177 	int	ret;
1178 
1179 	ret = 0;  /* non-zero means we processed at least one interrupt */
1180 
1181 	SCA_DPRINTF(SCA_DEBUG_INTR, ("sca_hardintr entered\n"));
1182 
1183 	while (1) {
1184 		/*
1185 		 * read SCA interrupts
1186 		 */
1187 		isr0 = sca_read_1(sc, SCA_ISR0);
1188 		isr1 = sca_read_1(sc, SCA_ISR1);
1189 		isr2 = sca_read_1(sc, SCA_ISR2);
1190 
1191 		if (isr0 == 0 && isr1 == 0 && isr2 == 0)
1192 			break;
1193 
1194 		SCA_DPRINTF(SCA_DEBUG_INTR,
1195 			    ("isr0 = %02x, isr1 = %02x, isr2 = %02x\n",
1196 			     isr0, isr1, isr2));
1197 
1198 		/*
1199 		 * check DMAC interrupt
1200 		 */
1201 		if (isr1 & 0x0f)
1202 			ret += sca_dmac_intr(&sc->sc_ports[0],
1203 					     isr1 & 0x0f);
1204 
1205 		if (isr1 & 0xf0)
1206 			ret += sca_dmac_intr(&sc->sc_ports[1],
1207 			     (isr1 & 0xf0) >> 4);
1208 
1209 		/*
1210 		 * mcsi intterupts
1211 		 */
1212 		if (isr0 & 0x0f)
1213 			ret += sca_msci_intr(&sc->sc_ports[0], isr0 & 0x0f);
1214 
1215 		if (isr0 & 0xf0)
1216 			ret += sca_msci_intr(&sc->sc_ports[1],
1217 			    (isr0 & 0xf0) >> 4);
1218 
1219 #if 0 /* We don't GET timer interrupts, we have them disabled (msci IE20) */
1220 		if (isr2)
1221 			ret += sca_timer_intr(sc, isr2);
1222 #endif
1223 	}
1224 
1225 	return (ret);
1226 }
1227 
1228 static int
1229 sca_dmac_intr(sca_port_t *scp, u_int8_t isr)
1230 {
1231 	u_int8_t	 dsr;
1232 	int		 ret;
1233 
1234 	ret = 0;
1235 
1236 	/*
1237 	 * Check transmit channel
1238 	 */
1239 	if (isr & (SCA_ISR1_DMAC_TX0A | SCA_ISR1_DMAC_TX0B)) {
1240 		SCA_DPRINTF(SCA_DEBUG_INTR,
1241 		    ("TX INTERRUPT port %d\n", scp->sp_port));
1242 
1243 		dsr = 1;
1244 		while (dsr != 0) {
1245 			ret++;
1246 			/*
1247 			 * reset interrupt
1248 			 */
1249 			dsr = dmac_read_1(scp, SCA_DSR1);
1250 			dmac_write_1(scp, SCA_DSR1,
1251 				     dsr | SCA_DSR_DEWD);
1252 
1253 			/*
1254 			 * filter out the bits we don't care about
1255 			 */
1256 			dsr &= ( SCA_DSR_COF | SCA_DSR_BOF | SCA_DSR_EOT);
1257 			if (dsr == 0)
1258 				break;
1259 
1260 			/*
1261 			 * check for counter overflow
1262 			 */
1263 			if (dsr & SCA_DSR_COF) {
1264 				printf("%s: TXDMA counter overflow\n",
1265 				       scp->sp_if.if_xname);
1266 
1267 				scp->sp_if.if_flags &= ~IFF_OACTIVE;
1268 				scp->sp_txcur = 0;
1269 				scp->sp_txinuse = 0;
1270 			}
1271 
1272 			/*
1273 			 * check for buffer overflow
1274 			 */
1275 			if (dsr & SCA_DSR_BOF) {
1276 				printf("%s: TXDMA buffer overflow, cda 0x%04x, eda 0x%04x, cpb 0x%02x\n",
1277 				       scp->sp_if.if_xname,
1278 				       dmac_read_2(scp, SCA_CDAL1),
1279 				       dmac_read_2(scp, SCA_EDAL1),
1280 				       dmac_read_1(scp, SCA_CPB1));
1281 
1282 				/*
1283 				 * Yikes.  Arrange for a full
1284 				 * transmitter restart.
1285 				 */
1286 				scp->sp_if.if_flags &= ~IFF_OACTIVE;
1287 				scp->sp_txcur = 0;
1288 				scp->sp_txinuse = 0;
1289 			}
1290 
1291 			/*
1292 			 * check for end of transfer, which is not
1293 			 * an error. It means that all data queued
1294 			 * was transmitted, and we mark ourself as
1295 			 * not in use and stop the watchdog timer.
1296 			 */
1297 			if (dsr & SCA_DSR_EOT) {
1298 				SCA_DPRINTF(SCA_DEBUG_TX,
1299 			    ("Transmit completed. cda %x eda %x dsr %x\n",
1300 				    dmac_read_2(scp, SCA_CDAL1),
1301 				    dmac_read_2(scp, SCA_EDAL1),
1302 				    dsr));
1303 
1304 				scp->sp_if.if_flags &= ~IFF_OACTIVE;
1305 				scp->sp_txcur = 0;
1306 				scp->sp_txinuse = 0;
1307 
1308 				/*
1309 				 * check for more packets
1310 				 */
1311 				if_schedule_deferred_start(&scp->sp_if);
1312 			}
1313 		}
1314 	}
1315 	/*
1316 	 * receive channel check
1317 	 */
1318 	if (isr & (SCA_ISR1_DMAC_RX0A | SCA_ISR1_DMAC_RX0B)) {
1319 		SCA_DPRINTF(SCA_DEBUG_INTR, ("RX INTERRUPT port %d\n",
1320 		    (scp == &scp->sca->sc_ports[0] ? 0 : 1)));
1321 
1322 		dsr = 1;
1323 		while (dsr != 0) {
1324 			ret++;
1325 
1326 			dsr = dmac_read_1(scp, SCA_DSR0);
1327 			dmac_write_1(scp, SCA_DSR0, dsr | SCA_DSR_DEWD);
1328 
1329 			/*
1330 			 * filter out the bits we don't care about
1331 			 */
1332 			dsr &= (SCA_DSR_EOM | SCA_DSR_COF
1333 				| SCA_DSR_BOF | SCA_DSR_EOT);
1334 			if (dsr == 0)
1335 				break;
1336 
1337 			/*
1338 			 * End of frame
1339 			 */
1340 			if (dsr & SCA_DSR_EOM) {
1341 				SCA_DPRINTF(SCA_DEBUG_RX, ("Got a frame!\n"));
1342 
1343 				sca_get_packets(scp);
1344 			}
1345 
1346 			/*
1347 			 * check for counter overflow
1348 			 */
1349 			if (dsr & SCA_DSR_COF) {
1350 				printf("%s: RXDMA counter overflow\n",
1351 				       scp->sp_if.if_xname);
1352 
1353 				sca_dmac_rxinit(scp);
1354 			}
1355 
1356 			/*
1357 			 * check for end of transfer, which means we
1358 			 * ran out of descriptors to receive into.
1359 			 * This means the line is much faster than
1360 			 * we can handle.
1361 			 */
1362 			if (dsr & (SCA_DSR_BOF | SCA_DSR_EOT)) {
1363 				printf("%s: RXDMA buffer overflow\n",
1364 				       scp->sp_if.if_xname);
1365 
1366 				sca_dmac_rxinit(scp);
1367 			}
1368 		}
1369 	}
1370 
1371 	return ret;
1372 }
1373 
1374 static int
1375 sca_msci_intr(sca_port_t *scp, u_int8_t isr)
1376 {
1377 	u_int8_t st1, trc0;
1378 
1379 	/* get and clear the specific interrupt -- should act on it :)*/
1380 	if ((st1 = msci_read_1(scp, SCA_ST10))) {
1381 		/* clear the interrupt */
1382 		msci_write_1(scp, SCA_ST10, st1);
1383 
1384 		if (st1 & SCA_ST1_UDRN) {
1385 			/* underrun -- try to increase ready control */
1386 			trc0 = msci_read_1(scp, SCA_TRC00);
1387 			if (trc0 == 0x1f)
1388 				printf("TX: underrun - fifo depth maxed\n");
1389 			else {
1390 				if ((trc0 += 2) > 0x1f)
1391 					trc0 = 0x1f;
1392 				SCA_DPRINTF(SCA_DEBUG_TX,
1393 				   ("TX: udrn - incr fifo to %d\n", trc0));
1394 				msci_write_1(scp, SCA_TRC00, trc0);
1395 			}
1396 		}
1397 	}
1398 	return (0);
1399 }
1400 
1401 static void
1402 sca_get_packets(sca_port_t *scp)
1403 {
1404 	struct sca_softc *sc;
1405 
1406 	SCA_DPRINTF(SCA_DEBUG_RX, ("RX: sca_get_packets\n"));
1407 
1408 	sc = scp->sca;
1409 	if (sc->sc_usedma)
1410 		bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1411 		    0, sc->scu_allocsize,
1412 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1413 	else {
1414 		/*
1415 		 * XXX this code is unable to deal with rx stuff
1416 		 * in more than 1 page
1417 		 */
1418 		sc->scu_page_on(sc);
1419 		sc->scu_set_page(sc, scp->sp_rxdesc_p);
1420 	}
1421 
1422 	/* process as many frames as are available */
1423 	while (sca_frame_avail(scp)) {
1424 		sca_frame_process(scp);
1425 		sca_frame_read_done(scp);
1426 	}
1427 
1428 	if (sc->sc_usedma)
1429 		bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1430 		    0, sc->scu_allocsize,
1431 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1432 	else
1433 		sc->scu_page_off(sc);
1434 }
1435 
1436 /*
1437  * Starting with the first descriptor we wanted to read into, up to but
1438  * not including the current SCA read descriptor, look for a packet.
1439  *
1440  * must be called at splnet()
1441  */
1442 static int
1443 sca_frame_avail(sca_port_t *scp)
1444 {
1445 	u_int16_t cda;
1446 	u_int32_t desc_p;	/* physical address (lower 16 bits) */
1447 	sca_desc_t *desc;
1448 	u_int8_t rxstat;
1449 	int cdaidx, toolong;
1450 
1451 	/*
1452 	 * Read the current descriptor from the SCA.
1453 	 */
1454 	cda = dmac_read_2(scp, SCA_CDAL0);
1455 
1456 	/*
1457 	 * calculate the index of the current descriptor
1458 	 */
1459 	desc_p = (scp->sp_rxdesc_p & 0xFFFF);
1460 	desc_p = cda - desc_p;
1461 	cdaidx = desc_p / sizeof(sca_desc_t);
1462 
1463 	SCA_DPRINTF(SCA_DEBUG_RX,
1464 	    ("RX: cda %x desc_p %x cdaidx %u, nrxdesc %d rxstart %d\n",
1465 	    cda, desc_p, cdaidx, scp->sp_nrxdesc, scp->sp_rxstart));
1466 
1467 	/* note confusion */
1468 	if (cdaidx >= scp->sp_nrxdesc)
1469 		panic("current descriptor index out of range");
1470 
1471 	/* see if we have a valid frame available */
1472 	toolong = 0;
1473 	for (; scp->sp_rxstart != cdaidx; sca_frame_read_done(scp)) {
1474 		/*
1475 		 * We might have a valid descriptor.  Set up a pointer
1476 		 * to the kva address for it so we can more easily examine
1477 		 * the contents.
1478 		 */
1479 		desc = &scp->sp_rxdesc[scp->sp_rxstart];
1480 		rxstat = sca_desc_read_stat(scp->sca, desc);
1481 
1482 		SCA_DPRINTF(SCA_DEBUG_RX, ("port %d RX: idx %d rxstat %x\n",
1483 		    scp->sp_port, scp->sp_rxstart, rxstat));
1484 
1485 		SCA_DPRINTF(SCA_DEBUG_RX, ("port %d RX: buflen %d\n",
1486 		    scp->sp_port, sca_desc_read_buflen(scp->sca, desc)));
1487 
1488 		/*
1489 		 * check for errors
1490 		 */
1491 		if (rxstat & SCA_DESC_ERRORS) {
1492 			/*
1493 			 * consider an error condition the end
1494 			 * of a frame
1495 			 */
1496 			if_statinc(&scp->sp_if, if_ierrors);
1497 			toolong = 0;
1498 			continue;
1499 		}
1500 
1501 		/*
1502 		 * if we aren't skipping overlong frames
1503 		 * we are done, otherwise reset and look for
1504 		 * another good frame
1505 		 */
1506 		if (rxstat & SCA_DESC_EOM) {
1507 			if (!toolong)
1508 				return (1);
1509 			toolong = 0;
1510 		} else if (!toolong) {
1511 			/*
1512 			 * we currently don't deal with frames
1513 			 * larger than a single buffer (fixed MTU)
1514 			 */
1515 			if_statinc(&scp->sp_if, if_ierrors);
1516 			toolong = 1;
1517 		}
1518 		SCA_DPRINTF(SCA_DEBUG_RX, ("RX: idx %d no EOM\n",
1519 		    scp->sp_rxstart));
1520 	}
1521 
1522 	SCA_DPRINTF(SCA_DEBUG_RX, ("RX: returning none\n"));
1523 	return 0;
1524 }
1525 
1526 /*
1527  * Pass the packet up to the kernel if it is a packet we want to pay
1528  * attention to.
1529  *
1530  * MUST BE CALLED AT splnet()
1531  */
1532 static void
1533 sca_frame_process(sca_port_t *scp)
1534 {
1535 	pktqueue_t *pktq = NULL;
1536 	struct ifqueue *ifq = NULL;
1537 	struct hdlc_header *hdlc;
1538 	struct cisco_pkt *cisco;
1539 	sca_desc_t *desc;
1540 	struct mbuf *m;
1541 	u_int8_t *bufp;
1542 	u_int16_t len;
1543 	u_int32_t t;
1544 	int isr = 0;
1545 
1546 	t = time_uptime * 1000;
1547 	desc = &scp->sp_rxdesc[scp->sp_rxstart];
1548 	bufp = scp->sp_rxbuf + SCA_BSIZE * scp->sp_rxstart;
1549 	len = sca_desc_read_buflen(scp->sca, desc);
1550 
1551 	SCA_DPRINTF(SCA_DEBUG_RX,
1552 	    ("RX: desc %lx bufp %lx len %d\n", (bus_addr_t)desc,
1553 	    (bus_addr_t)bufp, len));
1554 
1555 #if SCA_DEBUG_LEVEL > 0
1556 	if (sca_debug & SCA_DEBUG_RXPKT)
1557 		sca_frame_print(scp, desc, bufp);
1558 #endif
1559 	/*
1560 	 * skip packets that are too short
1561 	 */
1562 	if (len < sizeof(struct hdlc_header)) {
1563 		if_statinc(&scp->sp_if, if_ierrors);
1564 		return;
1565 	}
1566 
1567 	m = sca_mbuf_alloc(scp->sca, bufp, len);
1568 	if (m == NULL) {
1569 		SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no mbuf!\n"));
1570 		return;
1571 	}
1572 
1573 	/*
1574 	 * read and then strip off the HDLC information
1575 	 */
1576 	m = m_pullup(m, sizeof(struct hdlc_header));
1577 	if (m == NULL) {
1578 		SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no m_pullup!\n"));
1579 		return;
1580 	}
1581 
1582 	bpf_mtap_softint(&scp->sp_if, m);
1583 
1584 	if_statinc(&scp->sp_if, if_ipackets);
1585 
1586 	hdlc = mtod(m, struct hdlc_header *);
1587 	switch (ntohs(hdlc->h_proto)) {
1588 #ifdef INET
1589 	case HDLC_PROTOCOL_IP:
1590 		SCA_DPRINTF(SCA_DEBUG_RX, ("Received IP packet\n"));
1591 		m_set_rcvif(m, &scp->sp_if);
1592 		m->m_pkthdr.len -= sizeof(struct hdlc_header);
1593 		m->m_data += sizeof(struct hdlc_header);
1594 		m->m_len -= sizeof(struct hdlc_header);
1595 		pktq = ip_pktq;
1596 		break;
1597 #endif	/* INET */
1598 #ifdef INET6
1599 	case HDLC_PROTOCOL_IPV6:
1600 		SCA_DPRINTF(SCA_DEBUG_RX, ("Received IP packet\n"));
1601 		m_set_rcvif(m, &scp->sp_if);
1602 		m->m_pkthdr.len -= sizeof(struct hdlc_header);
1603 		m->m_data += sizeof(struct hdlc_header);
1604 		m->m_len -= sizeof(struct hdlc_header);
1605 		pktq = ip6_pktq;
1606 		break;
1607 #endif	/* INET6 */
1608 	case CISCO_KEEPALIVE:
1609 		SCA_DPRINTF(SCA_DEBUG_CISCO,
1610 			    ("Received CISCO keepalive packet\n"));
1611 
1612 		if (len < CISCO_PKT_LEN) {
1613 			SCA_DPRINTF(SCA_DEBUG_CISCO,
1614 				    ("short CISCO packet %d, wanted %d\n",
1615 				     len, CISCO_PKT_LEN));
1616 			if_statinc(&scp->sp_if, if_ierrors);
1617 			goto dropit;
1618 		}
1619 
1620 		m = m_pullup(m, sizeof(struct cisco_pkt));
1621 		if (m == NULL) {
1622 			SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no m_pullup!\n"));
1623 			return;
1624 		}
1625 
1626 		cisco = (struct cisco_pkt *)
1627 		    (mtod(m, u_int8_t *) + HDLC_HDRLEN);
1628 		m_set_rcvif(m, &scp->sp_if);
1629 
1630 		switch (ntohl(cisco->type)) {
1631 		case CISCO_ADDR_REQ:
1632 			printf("Got CISCO addr_req, ignoring\n");
1633 			if_statinc(&scp->sp_if, if_ierrors);
1634 			goto dropit;
1635 
1636 		case CISCO_ADDR_REPLY:
1637 			printf("Got CISCO addr_reply, ignoring\n");
1638 			if_statinc(&scp->sp_if, if_ierrors);
1639 			goto dropit;
1640 
1641 		case CISCO_KEEPALIVE_REQ:
1642 
1643 			SCA_DPRINTF(SCA_DEBUG_CISCO,
1644 				    ("Received KA, mseq %d,"
1645 				     " yseq %d, rel 0x%04x, t0"
1646 				     " %04x, t1 %04x\n",
1647 				     ntohl(cisco->par1), ntohl(cisco->par2),
1648 				     ntohs(cisco->rel), ntohs(cisco->time0),
1649 				     ntohs(cisco->time1)));
1650 
1651 			scp->cka_lastrx = ntohl(cisco->par1);
1652 			scp->cka_lasttx++;
1653 
1654 			/*
1655 			 * schedule the transmit right here.
1656 			 */
1657 			cisco->par2 = cisco->par1;
1658 			cisco->par1 = htonl(scp->cka_lasttx);
1659 			cisco->time0 = htons((u_int16_t)(t >> 16));
1660 			cisco->time1 = htons((u_int16_t)(t & 0x0000ffff));
1661 
1662 			ifq = &scp->linkq;
1663 			if (IF_QFULL(ifq)) {
1664 				IF_DROP(ifq);
1665 				goto dropit;
1666 			}
1667 			IF_ENQUEUE(ifq, m);
1668 
1669 			sca_start(&scp->sp_if);
1670 
1671 			/* since start may have reset this fix */
1672 			if (!scp->sca->sc_usedma) {
1673 				scp->sca->scu_set_page(scp->sca,
1674 				    scp->sp_rxdesc_p);
1675 				scp->sca->scu_page_on(scp->sca);
1676 			}
1677 			return;
1678 		default:
1679 			SCA_DPRINTF(SCA_DEBUG_CISCO,
1680 				    ("Unknown CISCO keepalive protocol 0x%04x\n",
1681 				     ntohl(cisco->type)));
1682 
1683 			if_statinc(&scp->sp_if, if_noproto);
1684 			goto dropit;
1685 		}
1686 		return;
1687 	default:
1688 		SCA_DPRINTF(SCA_DEBUG_RX,
1689 			    ("Unknown/unexpected ethertype 0x%04x\n",
1690 			     ntohs(hdlc->h_proto)));
1691 		if_statinc(&scp->sp_if, if_noproto);
1692 		goto dropit;
1693 	}
1694 
1695 	/* Queue the packet */
1696 	if (__predict_true(pktq)) {
1697 		if (__predict_false(!pktq_enqueue(pktq, m, 0))) {
1698 			if_statinc(&scp->sp_if, if_iqdrops);
1699 			goto dropit;
1700 		}
1701 		return;
1702 	}
1703 	if (!IF_QFULL(ifq)) {
1704 		IF_ENQUEUE(ifq, m);
1705 		schednetisr(isr);
1706 	} else {
1707 		IF_DROP(ifq);
1708 		if_statinc(&scp->sp_if, if_iqdrops);
1709 		goto dropit;
1710 	}
1711 	return;
1712 dropit:
1713 	if (m)
1714 		m_freem(m);
1715 	return;
1716 }
1717 
1718 #if SCA_DEBUG_LEVEL > 0
1719 /*
1720  * do a hex dump of the packet received into descriptor "desc" with
1721  * data buffer "p"
1722  */
1723 static void
1724 sca_frame_print(sca_port_t *scp, sca_desc_t *desc, u_int8_t *p)
1725 {
1726 	int i;
1727 	int nothing_yet = 1;
1728 	struct sca_softc *sc;
1729 	u_int len;
1730 
1731 	sc = scp->sca;
1732 	printf("desc va %p: chainp 0x%x bufp 0x%0x stat 0x%0x len %d\n",
1733 	       desc,
1734 	       sca_desc_read_chainp(sc, desc),
1735 	       sca_desc_read_bufp(sc, desc),
1736 	       sca_desc_read_stat(sc, desc),
1737 	       (len = sca_desc_read_buflen(sc, desc)));
1738 
1739 	for (i = 0 ; i < len && i < 256; i++) {
1740 		if (nothing_yet == 1 &&
1741 		    (sc->sc_usedma ? *p
1742 			: bus_space_read_1(sc->scu_memt, sc->scu_memh,
1743 		    sca_page_addr(sc, p))) == 0) {
1744 			p++;
1745 			continue;
1746 		}
1747 		nothing_yet = 0;
1748 		if (i % 16 == 0)
1749 			printf("\n");
1750 		printf("%02x ",
1751 		    (sc->sc_usedma ? *p
1752 		    : bus_space_read_1(sc->scu_memt, sc->scu_memh,
1753 		    sca_page_addr(sc, p))));
1754 		p++;
1755 	}
1756 
1757 	if (i % 16 != 1)
1758 		printf("\n");
1759 }
1760 #endif
1761 
1762 /*
1763  * adjust things because we have just read the current starting
1764  * frame
1765  *
1766  * must be called at splnet()
1767  */
1768 static void
1769 sca_frame_read_done(sca_port_t *scp)
1770 {
1771 	u_int16_t edesc_p;
1772 
1773 	/* update where our indicies are */
1774 	scp->sp_rxend = scp->sp_rxstart;
1775 	scp->sp_rxstart = (scp->sp_rxstart + 1) % scp->sp_nrxdesc;
1776 
1777 	/* update the error [end] descriptor */
1778 	edesc_p = (u_int16_t)scp->sp_rxdesc_p +
1779 	    (sizeof(sca_desc_t) * scp->sp_rxend);
1780 	dmac_write_2(scp, SCA_EDAL0, edesc_p);
1781 }
1782 
1783 /*
1784  * set a port to the "up" state
1785  */
1786 static void
1787 sca_port_up(sca_port_t *scp)
1788 {
1789 	struct sca_softc *sc = scp->sca;
1790 	struct timeval now;
1791 #if 0
1792 	u_int8_t ier0, ier1;
1793 #endif
1794 
1795 	/*
1796 	 * reset things
1797 	 */
1798 #if 0
1799 	msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET);
1800 	msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
1801 #endif
1802 	/*
1803 	 * clear in-use flag
1804 	 */
1805 	scp->sp_if.if_flags &= ~IFF_OACTIVE;
1806 	scp->sp_if.if_flags |= IFF_RUNNING;
1807 
1808 	/*
1809 	 * raise DTR
1810 	 */
1811 	sc->sc_dtr_callback(sc->sc_aux, scp->sp_port, 1);
1812 
1813 	/*
1814 	 * raise RTS
1815 	 */
1816 	msci_write_1(scp, SCA_CTL0,
1817 	     (msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS_MASK)
1818 	     | SCA_CTL_RTS_HIGH);
1819 
1820 #if 0
1821 	/*
1822 	 * enable interrupts (no timer IER2)
1823 	 */
1824 	ier0 = SCA_IER0_MSCI_RXRDY0 | SCA_IER0_MSCI_TXRDY0
1825 	    | SCA_IER0_MSCI_RXINT0 | SCA_IER0_MSCI_TXINT0;
1826 	ier1 = SCA_IER1_DMAC_RX0A | SCA_IER1_DMAC_RX0B
1827 	    | SCA_IER1_DMAC_TX0A | SCA_IER1_DMAC_TX0B;
1828 	if (scp->sp_port == 1) {
1829 		ier0 <<= 4;
1830 		ier1 <<= 4;
1831 	}
1832 	sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | ier0);
1833 	sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | ier1);
1834 #else
1835 	if (scp->sp_port == 0) {
1836 		sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0x0f);
1837 		sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0x0f);
1838 	} else {
1839 		sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0xf0);
1840 		sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0xf0);
1841 	}
1842 #endif
1843 
1844 	/*
1845 	 * enable transmit and receive
1846 	 */
1847 	msci_write_1(scp, SCA_CMD0, SCA_CMD_TXENABLE);
1848 	msci_write_1(scp, SCA_CMD0, SCA_CMD_RXENABLE);
1849 
1850 	/*
1851 	 * reset internal state
1852 	 */
1853 	scp->sp_txinuse = 0;
1854 	scp->sp_txcur = 0;
1855 	getmicrotime(&now);
1856 	scp->cka_lasttx = now.tv_usec;
1857 	scp->cka_lastrx = 0;
1858 }
1859 
1860 /*
1861  * set a port to the "down" state
1862  */
1863 static void
1864 sca_port_down(sca_port_t *scp)
1865 {
1866 	struct sca_softc *sc = scp->sca;
1867 #if 0
1868 	u_int8_t ier0, ier1;
1869 #endif
1870 
1871 	/*
1872 	 * lower DTR
1873 	 */
1874 	sc->sc_dtr_callback(sc->sc_aux, scp->sp_port, 0);
1875 
1876 	/*
1877 	 * lower RTS
1878 	 */
1879 	msci_write_1(scp, SCA_CTL0,
1880 	     (msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS_MASK)
1881 	     | SCA_CTL_RTS_LOW);
1882 
1883 	/*
1884 	 * disable interrupts
1885 	 */
1886 #if 0
1887 	ier0 = SCA_IER0_MSCI_RXRDY0 | SCA_IER0_MSCI_TXRDY0
1888 	    | SCA_IER0_MSCI_RXINT0 | SCA_IER0_MSCI_TXINT0;
1889 	ier1 = SCA_IER1_DMAC_RX0A | SCA_IER1_DMAC_RX0B
1890 	    | SCA_IER1_DMAC_TX0A | SCA_IER1_DMAC_TX0B;
1891 	if (scp->sp_port == 1) {
1892 		ier0 <<= 4;
1893 		ier1 <<= 4;
1894 	}
1895 	sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & ~ier0);
1896 	sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & ~ier1);
1897 #else
1898 	if (scp->sp_port == 0) {
1899 		sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0xf0);
1900 		sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0xf0);
1901 	} else {
1902 		sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0x0f);
1903 		sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0x0f);
1904 	}
1905 #endif
1906 
1907 	/*
1908 	 * disable transmit and receive
1909 	 */
1910 	msci_write_1(scp, SCA_CMD0, SCA_CMD_RXDISABLE);
1911 	msci_write_1(scp, SCA_CMD0, SCA_CMD_TXDISABLE);
1912 
1913 	/*
1914 	 * no, we're not in use anymore
1915 	 */
1916 	scp->sp_if.if_flags &= ~(IFF_OACTIVE|IFF_RUNNING);
1917 }
1918 
1919 /*
1920  * disable all DMA and interrupts for all ports at once.
1921  */
1922 void
1923 sca_shutdown(struct sca_softc *sca)
1924 {
1925 	/*
1926 	 * disable DMA and interrupts
1927 	 */
1928 	sca_write_1(sca, SCA_DMER, 0);
1929 	sca_write_1(sca, SCA_IER0, 0);
1930 	sca_write_1(sca, SCA_IER1, 0);
1931 }
1932 
1933 /*
1934  * If there are packets to transmit, start the transmit DMA logic.
1935  */
1936 static void
1937 sca_port_starttx(sca_port_t *scp)
1938 {
1939 	u_int32_t	startdesc_p, enddesc_p;
1940 	int enddesc;
1941 
1942 	SCA_DPRINTF(SCA_DEBUG_TX, ("TX: starttx\n"));
1943 
1944 	if (((scp->sp_if.if_flags & IFF_OACTIVE) == IFF_OACTIVE)
1945 	    || scp->sp_txinuse == 0)
1946 		return;
1947 
1948 	SCA_DPRINTF(SCA_DEBUG_TX, ("TX: setting oactive\n"));
1949 
1950 	scp->sp_if.if_flags |= IFF_OACTIVE;
1951 
1952 	/*
1953 	 * We have something to do, since we have at least one packet
1954 	 * waiting, and we are not already marked as active.
1955 	 */
1956 	enddesc = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1957 	startdesc_p = scp->sp_txdesc_p;
1958 	enddesc_p = scp->sp_txdesc_p + sizeof(sca_desc_t) * enddesc;
1959 
1960 	SCA_DPRINTF(SCA_DEBUG_TX, ("TX: start %x end %x\n",
1961 	    startdesc_p, enddesc_p));
1962 
1963 	dmac_write_2(scp, SCA_EDAL1, (u_int16_t)(enddesc_p & 0x0000ffff));
1964 	dmac_write_2(scp, SCA_CDAL1,
1965 		     (u_int16_t)(startdesc_p & 0x0000ffff));
1966 
1967 	/*
1968 	 * enable the DMA
1969 	 */
1970 	dmac_write_1(scp, SCA_DSR1, SCA_DSR_DE);
1971 }
1972 
1973 /*
1974  * allocate an mbuf at least long enough to hold "len" bytes.
1975  * If "p" is non-NULL, copy "len" bytes from it into the new mbuf,
1976  * otherwise let the caller handle copying the data in.
1977  */
1978 static struct mbuf *
1979 sca_mbuf_alloc(struct sca_softc *sc, void *p, u_int len)
1980 {
1981 	struct mbuf *m;
1982 
1983 	/*
1984 	 * allocate an mbuf and copy the important bits of data
1985 	 * into it.  If the packet won't fit in the header,
1986 	 * allocate a cluster for it and store it there.
1987 	 */
1988 	MGETHDR(m, M_DONTWAIT, MT_DATA);
1989 	if (m == NULL)
1990 		return NULL;
1991 	if (len > MHLEN) {
1992 		if (len > MCLBYTES) {
1993 			m_freem(m);
1994 			return NULL;
1995 		}
1996 		MCLGET(m, M_DONTWAIT);
1997 		if ((m->m_flags & M_EXT) == 0) {
1998 			m_freem(m);
1999 			return NULL;
2000 		}
2001 	}
2002 	if (p != NULL) {
2003 		/* XXX do we need to sync here? */
2004 		if (sc->sc_usedma)
2005 			memcpy(mtod(m, void *), p, len);
2006 		else
2007 			bus_space_read_region_1(sc->scu_memt, sc->scu_memh,
2008 			    sca_page_addr(sc, p), mtod(m, u_int8_t *), len);
2009 	}
2010 	m->m_len = len;
2011 	m->m_pkthdr.len = len;
2012 
2013 	return (m);
2014 }
2015 
2016 /*
2017  * get the base clock
2018  */
2019 void
2020 sca_get_base_clock(struct sca_softc *sc)
2021 {
2022 	struct timeval btv, ctv, dtv;
2023 	u_int64_t bcnt;
2024 	u_int32_t cnt;
2025 	u_int16_t subcnt;
2026 
2027 	/* disable the timer, set prescale to 0 */
2028 	sca_write_1(sc, SCA_TCSR0, 0);
2029 	sca_write_1(sc, SCA_TEPR0, 0);
2030 
2031 	/* reset the counter */
2032 	(void)sca_read_1(sc, SCA_TCSR0);
2033 	subcnt = sca_read_2(sc, SCA_TCNTL0);
2034 
2035 	/* count to max */
2036 	sca_write_2(sc, SCA_TCONRL0, 0xffff);
2037 
2038 	cnt = 0;
2039 	microtime(&btv);
2040 	/* start the timer -- no interrupt enable */
2041 	sca_write_1(sc, SCA_TCSR0, SCA_TCSR_TME);
2042 	for (;;) {
2043 		microtime(&ctv);
2044 
2045 		/* end around 3/4 of a second */
2046 		timersub(&ctv, &btv, &dtv);
2047 		if (dtv.tv_usec >= 750000)
2048 			break;
2049 
2050 		/* spin */
2051 		while (!(sca_read_1(sc, SCA_TCSR0) & SCA_TCSR_CMF))
2052 			;
2053 		/* reset the timer */
2054 		(void)sca_read_2(sc, SCA_TCNTL0);
2055 		cnt++;
2056 	}
2057 
2058 	/* stop the timer */
2059 	sca_write_1(sc, SCA_TCSR0, 0);
2060 
2061 	subcnt = sca_read_2(sc, SCA_TCNTL0);
2062 	/* add the slop in and get the total timer ticks */
2063 	cnt = (cnt << 16) | subcnt;
2064 
2065 	/* cnt is 1/8 the actual time */
2066 	bcnt = cnt * 8;
2067 	/* make it proportional to 3/4 of a second */
2068 	bcnt *= (u_int64_t)750000;
2069 	bcnt /= (u_int64_t)dtv.tv_usec;
2070 	cnt = bcnt;
2071 
2072 	/* make it Hz */
2073 	cnt *= 4;
2074 	cnt /= 3;
2075 
2076 	SCA_DPRINTF(SCA_DEBUG_CLOCK,
2077 	    ("sca: unadjusted base %lu Hz\n", (u_long)cnt));
2078 
2079 	/*
2080 	 * round to the nearest 200 -- this allows for +-3 ticks error
2081 	 */
2082 	sc->sc_baseclock = ((cnt + 100) / 200) * 200;
2083 }
2084 
2085 /*
2086  * print the information about the clock on the ports
2087  */
2088 void
2089 sca_print_clock_info(struct sca_softc *sc)
2090 {
2091 	struct sca_port *scp;
2092 	u_int32_t mhz, div;
2093 	int i;
2094 
2095 	printf("%s: base clock %d Hz\n", device_xname(sc->sc_parent),
2096 	    sc->sc_baseclock);
2097 
2098 	/* print the information about the port clock selection */
2099 	for (i = 0; i < sc->sc_numports; i++) {
2100 		scp = &sc->sc_ports[i];
2101 		mhz = sc->sc_baseclock / (scp->sp_tmc ? scp->sp_tmc : 256);
2102 		div = scp->sp_rxs & SCA_RXS_DIV_MASK;
2103 
2104 		printf("%s: rx clock: ", scp->sp_if.if_xname);
2105 		switch (scp->sp_rxs & SCA_RXS_CLK_MASK) {
2106 		case SCA_RXS_CLK_LINE:
2107 			printf("line");
2108 			break;
2109 		case SCA_RXS_CLK_LINE_SN:
2110 			printf("line with noise suppression");
2111 			break;
2112 		case SCA_RXS_CLK_INTERNAL:
2113 			printf("internal %d Hz", (mhz >> div));
2114 			break;
2115 		case SCA_RXS_CLK_ADPLL_OUT:
2116 			printf("adpll using internal %d Hz", (mhz >> div));
2117 			break;
2118 		case SCA_RXS_CLK_ADPLL_IN:
2119 			printf("adpll using line clock");
2120 			break;
2121 		}
2122 		printf("  tx clock: ");
2123 		div = scp->sp_txs & SCA_TXS_DIV_MASK;
2124 		switch (scp->sp_txs & SCA_TXS_CLK_MASK) {
2125 		case SCA_TXS_CLK_LINE:
2126 			printf("line\n");
2127 			break;
2128 		case SCA_TXS_CLK_INTERNAL:
2129 			printf("internal %d Hz\n", (mhz >> div));
2130 			break;
2131 		case SCA_TXS_CLK_RXCLK:
2132 			printf("rxclock\n");
2133 			break;
2134 		}
2135 		if (scp->sp_eclock)
2136 			printf("%s: outputting line clock\n",
2137 			    scp->sp_if.if_xname);
2138 	}
2139 }
2140 
2141