xref: /netbsd-src/sys/arch/sgimips/hpc/if_sq.c (revision 220b5c059a84c51ea44107ea8951a57ffaecdc8c)
1 /*	$NetBSD: if_sq.c,v 1.9 2001/11/20 16:10:49 rafal Exp $	*/
2 
3 /*
4  * Copyright (c) 2001 Rafal K. Boni
5  * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * Portions of this code are derived from software contributed to The
9  * NetBSD Foundation by Jason R. Thorpe of the Numerical Aerospace
10  * Simulation Facility, NASA Ames Research Center.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. The name of the author may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include "bpfilter.h"
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/device.h>
40 #include <sys/callout.h>
41 #include <sys/mbuf.h>
42 #include <sys/malloc.h>
43 #include <sys/kernel.h>
44 #include <sys/socket.h>
45 #include <sys/ioctl.h>
46 #include <sys/errno.h>
47 #include <sys/syslog.h>
48 
49 #include <uvm/uvm_extern.h>
50 
51 #include <machine/endian.h>
52 
53 #include <net/if.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56 #include <net/if_ether.h>
57 
58 #if NBPFILTER > 0
59 #include <net/bpf.h>
60 #endif
61 
62 #include <machine/bus.h>
63 #include <machine/intr.h>
64 
65 #include <dev/ic/seeq8003reg.h>
66 
67 #include <sgimips/hpc/sqvar.h>
68 #include <sgimips/hpc/hpcvar.h>
69 #include <sgimips/hpc/hpcreg.h>
70 
71 #include <dev/arcbios/arcbios.h>
72 #include <dev/arcbios/arcbiosvar.h>
73 
74 #define static
75 
76 /*
77  * Short TODO list:
78  *	(1) Do counters for bad-RX packets.
79  *	(2) Allow multi-segment transmits, instead of copying to a single,
80  *	    contiguous mbuf.
81  *	(3) Verify sq_stop() turns off enough stuff; I was still getting
82  *	    seeq interrupts after sq_stop().
83  *	(4) Fix up printfs in driver (most should only fire ifdef SQ_DEBUG
84  *	    or something similar.
85  *	(5) Implement EDLC modes: especially packet auto-pad and simplex
86  *	    mode.
87  *	(6) Should the driver filter out its own transmissions in non-EDLC
88  *	    mode?
89  *	(7) Multicast support -- multicast filter, address management, ...
90  *	(8) Deal with RB0 (recv buffer overflow) on reception.  Will need
91  *	    to figure out if RB0 is read-only as stated in one spot in the
92  *	    HPC spec or read-write (ie, is the 'write a one to clear it')
93  *	    the correct thing?
94  */
95 
96 static int	sq_match(struct device *, struct cfdata *, void *);
97 static void	sq_attach(struct device *, struct device *, void *);
98 static int	sq_init(struct ifnet *);
99 static void	sq_start(struct ifnet *);
100 static void	sq_stop(struct ifnet *, int);
101 static void	sq_watchdog(struct ifnet *);
102 static int	sq_ioctl(struct ifnet *, u_long, caddr_t);
103 
104 static void	sq_set_filter(struct sq_softc *);
105 static int	sq_intr(void *);
106 static int	sq_rxintr(struct sq_softc *);
107 static int	sq_txintr(struct sq_softc *);
108 static void	sq_reset(struct sq_softc *);
109 static int 	sq_add_rxbuf(struct sq_softc *, int);
110 static void 	sq_dump_buffer(u_int32_t addr, u_int32_t len);
111 
112 static void	enaddr_aton(const char*, u_int8_t*);
113 
114 /* Actions */
115 #define SQ_RESET		1
116 #define SQ_ADD_TO_DMA		2
117 #define SQ_START_DMA		3
118 #define SQ_DONE_DMA		4
119 #define SQ_RESTART_DMA		5
120 #define SQ_TXINTR_ENTER		6
121 #define SQ_TXINTR_EXIT		7
122 #define SQ_TXINTR_BUSY		8
123 
124 struct sq_action_trace {
125 	int action;
126 	int bufno;
127 	int status;
128 	int freebuf;
129 };
130 
131 #define SQ_TRACEBUF_SIZE	100
132 int sq_trace_idx = 0;
133 struct sq_action_trace sq_trace[SQ_TRACEBUF_SIZE];
134 
135 void sq_trace_dump(struct sq_softc* sc);
136 
137 #define SQ_TRACE(act, buf, stat, free) do {				\
138 	sq_trace[sq_trace_idx].action = (act);				\
139 	sq_trace[sq_trace_idx].bufno = (buf);				\
140 	sq_trace[sq_trace_idx].status = (stat);				\
141 	sq_trace[sq_trace_idx].freebuf = (free);			\
142 	if (++sq_trace_idx == SQ_TRACEBUF_SIZE) {			\
143 		memset(&sq_trace, 0, sizeof(sq_trace));			\
144 		sq_trace_idx = 0;					\
145 	}								\
146 } while (0)
147 
148 struct cfattach sq_ca = {
149 	sizeof(struct sq_softc), sq_match, sq_attach
150 };
151 
152 static int
153 sq_match(struct device *parent, struct cfdata *cf, void *aux)
154 {
155 	struct hpc_attach_args *ha = aux;
156 
157 	if (strcmp(ha->ha_name, cf->cf_driver->cd_name) == 0)
158 		return (1);
159 
160 	return (0);
161 }
162 
163 static void
164 sq_attach(struct device *parent, struct device *self, void *aux)
165 {
166 	int i, err;
167 	char* macaddr;
168 	struct sq_softc *sc = (void *)self;
169 	struct hpc_attach_args *haa = aux;
170 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
171 
172 	sc->sc_hpct = haa->ha_st;
173 	if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh,
174 				       haa->ha_dmaoff,
175 				       HPC_ENET_REGS_SIZE,
176 				       &sc->sc_hpch)) != 0) {
177 		printf(": unable to map HPC DMA registers, error = %d\n", err);
178 		goto fail_0;
179 	}
180 
181 	sc->sc_regt = haa->ha_st;
182 	if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh,
183 				       haa->ha_devoff,
184 				       HPC_ENET_DEVREGS_SIZE,
185 				       &sc->sc_regh)) != 0) {
186 		printf(": unable to map Seeq registers, error = %d\n", err);
187 		goto fail_0;
188 	}
189 
190 	sc->sc_dmat = haa->ha_dmat;
191 
192 	if ((err = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sq_control),
193 				    PAGE_SIZE, PAGE_SIZE, &sc->sc_cdseg,
194 				    1, &sc->sc_ncdseg, BUS_DMA_NOWAIT)) != 0) {
195 		printf(": unable to allocate control data, error = %d\n", err);
196 		goto fail_0;
197 	}
198 
199 	if ((err = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg,
200 				  sizeof(struct sq_control),
201 				  (caddr_t *)&sc->sc_control,
202 				  BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
203 		printf(": unable to map control data, error = %d\n", err);
204 		goto fail_1;
205 	}
206 
207 	if ((err = bus_dmamap_create(sc->sc_dmat, sizeof(struct sq_control),
208 				     1, sizeof(struct sq_control), PAGE_SIZE,
209 				     BUS_DMA_NOWAIT, &sc->sc_cdmap)) != 0) {
210 		printf(": unable to create DMA map for control data, error "
211 			"= %d\n", err);
212 		goto fail_2;
213 	}
214 
215 	if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cdmap, sc->sc_control,
216 				   sizeof(struct sq_control),
217 				   NULL, BUS_DMA_NOWAIT)) != 0) {
218 		printf(": unable to load DMA map for control data, error "
219 			"= %d\n", err);
220 		goto fail_3;
221 	}
222 
223 	memset(sc->sc_control, 0, sizeof(struct sq_control));
224 
225 	/* Create transmit buffer DMA maps */
226 	for (i = 0; i < SQ_NTXDESC; i++) {
227 	    if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
228 					 0, BUS_DMA_NOWAIT,
229 					 &sc->sc_txmap[i])) != 0) {
230 		    printf(": unable to create tx DMA map %d, error = %d\n",
231 			   i, err);
232 		    goto fail_4;
233 	    }
234 	}
235 
236 	/* Create transmit buffer DMA maps */
237 	for (i = 0; i < SQ_NRXDESC; i++) {
238 	    if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
239 					 0, BUS_DMA_NOWAIT,
240 					 &sc->sc_rxmap[i])) != 0) {
241 		    printf(": unable to create rx DMA map %d, error = %d\n",
242 			   i, err);
243 		    goto fail_5;
244 	    }
245 	}
246 
247 	/* Pre-allocate the receive buffers.  */
248 	for (i = 0; i < SQ_NRXDESC; i++) {
249 		if ((err = sq_add_rxbuf(sc, i)) != 0) {
250 			printf(": unable to allocate or map rx buffer %d\n,"
251 			       " error = %d\n", i, err);
252 			goto fail_6;
253 		}
254 	}
255 
256 	if ((macaddr = ARCBIOS->GetEnvironmentVariable("eaddr")) == NULL) {
257 		printf(": unable to get MAC address!\n");
258 		goto fail_6;
259 	}
260 
261 	if ((cpu_intr_establish(haa->ha_irq, IPL_NET, sq_intr, sc)) == NULL) {
262 		printf(": unable to establish interrupt!\n");
263 		goto fail_6;
264 	}
265 
266 	/* Reset the chip to a known state. */
267 	sq_reset(sc);
268 
269 	/*
270 	 * Determine if we're an 8003 or 80c03 by setting the first
271 	 * MAC address register to non-zero, and then reading it back.
272 	 * If it's zero, we have an 80c03, because we will have read
273 	 * the TxCollLSB register.
274 	 */
275 	bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0, 0xa5);
276 	if (bus_space_read_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0) == 0)
277 		sc->sc_type = SQ_TYPE_80C03;
278 	else
279 		sc->sc_type = SQ_TYPE_8003;
280 	bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0, 0x00);
281 
282 	printf(": SGI Seeq %s\n",
283 	    sc->sc_type == SQ_TYPE_80C03 ? "80c03" : "8003");
284 
285 	enaddr_aton(macaddr, sc->sc_enaddr);
286 
287 	printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
288 					   ether_sprintf(sc->sc_enaddr));
289 
290 	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
291 	ifp->if_softc = sc;
292 	ifp->if_mtu = ETHERMTU;
293 	ifp->if_init = sq_init;
294 	ifp->if_stop = sq_stop;
295 	ifp->if_start = sq_start;
296 	ifp->if_ioctl = sq_ioctl;
297 	ifp->if_watchdog = sq_watchdog;
298 	ifp->if_flags = IFF_BROADCAST | IFF_NOTRAILERS | IFF_MULTICAST;
299 	IFQ_SET_READY(&ifp->if_snd);
300 
301 	if_attach(ifp);
302 	ether_ifattach(ifp, sc->sc_enaddr);
303 
304 	memset(&sq_trace, 0, sizeof(sq_trace));
305 	/* Done! */
306 	return;
307 
308 	/*
309 	 * Free any resources we've allocated during the failed attach
310 	 * attempt.  Do this in reverse order and fall through.
311 	 */
312 fail_6:
313 	for (i = 0; i < SQ_NRXDESC; i++) {
314 		if (sc->sc_rxmbuf[i] != NULL) {
315 			bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[i]);
316 			m_freem(sc->sc_rxmbuf[i]);
317 		}
318 	}
319 fail_5:
320 	for (i = 0; i < SQ_NRXDESC; i++) {
321 	    if (sc->sc_rxmap[i] !=  NULL)
322 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmap[i]);
323 	}
324 fail_4:
325 	for (i = 0; i < SQ_NTXDESC; i++) {
326 	    if (sc->sc_txmap[i] !=  NULL)
327 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap[i]);
328 	}
329 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cdmap);
330 fail_3:
331 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdmap);
332 fail_2:
333 	bus_dmamem_unmap(sc->sc_dmat, (caddr_t) sc->sc_control,
334 				      sizeof(struct sq_control));
335 fail_1:
336 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg);
337 fail_0:
338 	return;
339 }
340 
341 /* Set up data to get the interface up and running. */
342 int
343 sq_init(struct ifnet *ifp)
344 {
345 	int i;
346 	u_int32_t reg;
347 	struct sq_softc *sc = ifp->if_softc;
348 
349 	/* Cancel any in-progress I/O */
350 	sq_stop(ifp, 0);
351 
352 	sc->sc_nextrx = 0;
353 
354 	sc->sc_nfreetx = SQ_NTXDESC;
355 	sc->sc_nexttx = sc->sc_prevtx = 0;
356 
357 	SQ_TRACE(SQ_RESET, 0, 0, sc->sc_nfreetx);
358 
359 	/* Set into 8003 mode, bank 0 to program ethernet address */
360 	bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCMD, TXCMD_BANK0);
361 
362 	/* Now write the address */
363 	for (i = 0; i < ETHER_ADDR_LEN; i++)
364 		bus_space_write_1(sc->sc_regt, sc->sc_regh, i,
365 		    sc->sc_enaddr[i]);
366 
367 	sc->sc_rxcmd = RXCMD_IE_CRC |
368 		       RXCMD_IE_DRIB |
369 		       RXCMD_IE_SHORT |
370 		       RXCMD_IE_END |
371 		       RXCMD_IE_GOOD;
372 
373 	/*
374 	 * Set the receive filter -- this will add some bits to the
375 	 * prototype RXCMD register.  Do this before setting the
376 	 * transmit config register, since we might need to switch
377 	 * banks.
378 	 */
379 	sq_set_filter(sc);
380 
381 	/* Set up Seeq transmit command register */
382 	bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCMD,
383 						    TXCMD_IE_UFLOW |
384 						    TXCMD_IE_COLL |
385 						    TXCMD_IE_16COLL |
386 						    TXCMD_IE_GOOD);
387 
388 	/* Now write the receive command register. */
389 	bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_RXCMD, sc->sc_rxcmd);
390 
391 	/* Set up HPC ethernet DMA config */
392 	reg = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_DMACFG);
393 	bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_DMACFG,
394 			    	reg | ENETR_DMACFG_FIX_RXDC |
395 				ENETR_DMACFG_FIX_INTR |
396 				ENETR_DMACFG_FIX_EOP);
397 
398 	/* Pass the start of the receive ring to the HPC */
399         bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_NDBP,
400 						    SQ_CDRXADDR(sc, 0));
401 
402 	/* And turn on the HPC ethernet receive channel */
403 	bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_CTL,
404 						    ENETR_CTL_ACTIVE);
405 
406         ifp->if_flags |= IFF_RUNNING;
407 	ifp->if_flags &= ~IFF_OACTIVE;
408 
409 	return 0;
410 }
411 
412 static void
413 sq_set_filter(struct sq_softc *sc)
414 {
415 	struct ethercom *ec = &sc->sc_ethercom;
416 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
417 	struct ether_multi *enm;
418 	struct ether_multistep step;
419 
420 	/*
421 	 * Check for promiscuous mode.  Also implies
422 	 * all-multicast.
423 	 */
424 	if (ifp->if_flags & IFF_PROMISC) {
425 		sc->sc_rxcmd |= RXCMD_REC_ALL;
426 		ifp->if_flags |= IFF_ALLMULTI;
427 		return;
428 	}
429 
430 	/*
431 	 * The 8003 has no hash table.  If we have any multicast
432 	 * addresses on the list, enable reception of all multicast
433 	 * frames.
434 	 *
435 	 * XXX The 80c03 has a hash table.  We should use it.
436 	 */
437 
438 	ETHER_FIRST_MULTI(step, ec, enm);
439 
440 	if (enm == NULL) {
441 		sc->sc_rxcmd |= RXCMD_REC_BROAD;
442 		return;
443 	}
444 
445 	sc->sc_rxcmd |= RXCMD_REC_MULTI;
446 	ifp->if_flags |= IFF_ALLMULTI;
447 }
448 
449 int
450 sq_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
451 {
452 	int s, error = 0;
453 
454 	s = splnet();
455 
456 	error = ether_ioctl(ifp, cmd, data);
457 	if (error == ENETRESET) {
458 		/*
459 		 * Multicast list has changed; set the hardware filter
460 		 * accordingly.
461 		 */
462 		error = sq_init(ifp);
463 	}
464 
465 	splx(s);
466 	return (error);
467 }
468 
469 void
470 sq_start(struct ifnet *ifp)
471 {
472 	struct sq_softc *sc = ifp->if_softc;
473 	u_int32_t status;
474 	struct mbuf *m0, *m;
475 	bus_dmamap_t dmamap;
476 	int err, totlen, nexttx, firsttx, lasttx, ofree, seg;
477 
478 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
479 		return;
480 
481 	/*
482 	 * Remember the previous number of free descriptors and
483 	 * the first descriptor we'll use.
484 	 */
485 	ofree = sc->sc_nfreetx;
486 	firsttx = sc->sc_nexttx;
487 
488 	/*
489 	 * Loop through the send queue, setting up transmit descriptors
490 	 * until we drain the queue, or use up all available transmit
491 	 * descriptors.
492 	 */
493 	while (sc->sc_nfreetx != 0) {
494 		/*
495 		 * Grab a packet off the queue.
496 		 */
497 		IFQ_POLL(&ifp->if_snd, m0);
498 		if (m0 == NULL)
499 			break;
500 		m = NULL;
501 
502 		dmamap = sc->sc_txmap[sc->sc_nexttx];
503 
504 		/*
505 		 * Load the DMA map.  If this fails, the packet either
506 		 * didn't fit in the alloted number of segments, or we were
507 		 * short on resources.  In this case, we'll copy and try
508 		 * again.
509 		 */
510 		if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
511 						      BUS_DMA_NOWAIT) != 0) {
512 			MGETHDR(m, M_DONTWAIT, MT_DATA);
513 			if (m == NULL) {
514 				printf("%s: unable to allocate Tx mbuf\n",
515 				    sc->sc_dev.dv_xname);
516 				break;
517 			}
518 			if (m0->m_pkthdr.len > MHLEN) {
519 				MCLGET(m, M_DONTWAIT);
520 				if ((m->m_flags & M_EXT) == 0) {
521 					printf("%s: unable to allocate Tx "
522 					    "cluster\n", sc->sc_dev.dv_xname);
523 					m_freem(m);
524 					break;
525 				}
526 			}
527 
528 			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
529 			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
530 
531 			if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
532 						m, BUS_DMA_NOWAIT)) != 0) {
533 				printf("%s: unable to load Tx buffer, "
534 				    "error = %d\n", sc->sc_dev.dv_xname, err);
535 				break;
536 			}
537 		}
538 
539 		/*
540 		 * Ensure we have enough descriptors free to describe
541 		 * the packet.
542 		 */
543 		if (dmamap->dm_nsegs > sc->sc_nfreetx) {
544 			/*
545 			 * Not enough free descriptors to transmit this
546 			 * packet.  We haven't committed to anything yet,
547 			 * so just unload the DMA map, put the packet
548 			 * back on the queue, and punt.  Notify the upper
549 			 * layer that there are no more slots left.
550 			 *
551 			 * XXX We could allocate an mbuf and copy, but
552 			 * XXX it is worth it?
553 			 */
554 			ifp->if_flags |= IFF_OACTIVE;
555 			bus_dmamap_unload(sc->sc_dmat, dmamap);
556 			if (m != NULL)
557 				m_freem(m);
558 			break;
559 		}
560 
561 		IFQ_DEQUEUE(&ifp->if_snd, m0);
562 		if (m != NULL) {
563 			m_freem(m0);
564 			m0 = m;
565 		}
566 
567 		/*
568 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
569 		 */
570 
571 		/* Sync the DMA map. */
572 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
573 		    BUS_DMASYNC_PREWRITE);
574 
575 		/*
576 		 * Initialize the transmit descriptors.
577 		 */
578 		for (nexttx = sc->sc_nexttx, seg = 0, totlen = 0;
579 		     seg < dmamap->dm_nsegs;
580 		     seg++, nexttx = SQ_NEXTTX(nexttx)) {
581 			sc->sc_txdesc[nexttx].hdd_bufptr =
582 					    dmamap->dm_segs[seg].ds_addr;
583 			sc->sc_txdesc[nexttx].hdd_ctl =
584 					    dmamap->dm_segs[seg].ds_len;
585 			sc->sc_txdesc[nexttx].hdd_descptr=
586 					    SQ_CDTXADDR(sc, SQ_NEXTTX(nexttx));
587 			lasttx = nexttx;
588 			totlen += dmamap->dm_segs[seg].ds_len;
589 		}
590 
591 		/* Last descriptor gets end-of-packet */
592 		sc->sc_txdesc[lasttx].hdd_ctl |= HDD_CTL_EOPACKET;
593 
594 		/* XXXrkb: if not EDLC, pad to min len manually */
595 		if (totlen < ETHER_MIN_LEN) {
596 		    sc->sc_txdesc[lasttx].hdd_ctl += (ETHER_MIN_LEN - totlen);
597 		    totlen = ETHER_MIN_LEN;
598 		}
599 
600 #if 0
601 		printf("%s: transmit %d-%d, len %d\n", sc->sc_dev.dv_xname,
602 						       sc->sc_nexttx, lasttx,
603 						       totlen);
604 #endif
605 
606 		if (ifp->if_flags & IFF_DEBUG) {
607 			printf("     transmit chain:\n");
608 			for (seg = sc->sc_nexttx;; seg = SQ_NEXTTX(seg)) {
609 				printf("     descriptor %d:\n", seg);
610 				printf("       hdd_bufptr:      0x%08x\n",
611 					sc->sc_txdesc[seg].hdd_bufptr);
612 				printf("       hdd_ctl: 0x%08x\n",
613 					sc->sc_txdesc[seg].hdd_ctl);
614 				printf("       hdd_descptr:      0x%08x\n",
615 					sc->sc_txdesc[seg].hdd_descptr);
616 
617 				if (seg == lasttx)
618 					break;
619 			}
620 		}
621 
622 		/* Sync the descriptors we're using. */
623 		SQ_CDTXSYNC(sc, sc->sc_nexttx, dmamap->dm_nsegs,
624 				BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
625 
626 		/* Store a pointer to the packet so we can free it later */
627 		sc->sc_txmbuf[sc->sc_nexttx] = m0;
628 
629 		/* Advance the tx pointer. */
630 		sc->sc_nfreetx -= dmamap->dm_nsegs;
631 		sc->sc_nexttx = nexttx;
632 
633 #if NBPFILTER > 0
634 		/*
635 		 * Pass the packet to any BPF listeners.
636 		 */
637 		if (ifp->if_bpf)
638 			bpf_mtap(ifp->if_bpf, m0);
639 #endif /* NBPFILTER > 0 */
640 	}
641 
642 	/* All transmit descriptors used up, let upper layers know */
643 	if (sc->sc_nfreetx == 0)
644 		ifp->if_flags |= IFF_OACTIVE;
645 
646 	if (sc->sc_nfreetx != ofree) {
647 #if 0
648 		printf("%s: %d packets enqueued, first %d, INTR on %d\n",
649 			    sc->sc_dev.dv_xname, lasttx - firsttx + 1,
650 			    firsttx, lasttx);
651 #endif
652 
653 		/*
654 		 * Cause a transmit interrupt to happen on the
655 		 * last packet we enqueued, mark it as the last
656 		 * descriptor.
657 		 */
658 		sc->sc_txdesc[lasttx].hdd_ctl |= (HDD_CTL_INTR |
659 						  HDD_CTL_EOCHAIN);
660 		SQ_CDTXSYNC(sc, lasttx, 1,
661 				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
662 
663 		/*
664 		 * There is a potential race condition here if the HPC
665 		 * DMA channel is active and we try and either update
666 		 * the 'next descriptor' pointer in the HPC PIO space
667 		 * or the 'next descriptor' pointer in a previous desc-
668 		 * riptor.
669 		 *
670 		 * To avoid this, if the channel is active, we rely on
671 		 * the transmit interrupt routine noticing that there
672 		 * are more packets to send and restarting the HPC DMA
673 		 * engine, rather than mucking with the DMA state here.
674 		 */
675 		status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
676 						       HPC_ENETX_CTL);
677 
678 		if ((status & ENETX_CTL_ACTIVE) != 0) {
679 			SQ_TRACE(SQ_ADD_TO_DMA, firsttx, status,
680 			    sc->sc_nfreetx);
681 			sc->sc_txdesc[SQ_PREVTX(firsttx)].hdd_ctl &=
682 			    ~HDD_CTL_EOCHAIN;
683 			SQ_CDTXSYNC(sc, SQ_PREVTX(firsttx),  1,
684 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
685 		} else {
686 			SQ_TRACE(SQ_START_DMA, firsttx, status, sc->sc_nfreetx);
687 
688 			bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
689 			    HPC_ENETX_NDBP, SQ_CDTXADDR(sc, firsttx));
690 
691 			/* Kick DMA channel into life */
692 			bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
693 			    HPC_ENETX_CTL, ENETX_CTL_ACTIVE);
694 		}
695 
696 		/* Set a watchdog timer in case the chip flakes out. */
697 		ifp->if_timer = 5;
698 	}
699 }
700 
701 void
702 sq_stop(struct ifnet *ifp, int disable)
703 {
704 	int i;
705 	struct sq_softc *sc = ifp->if_softc;
706 
707 	for (i =0; i < SQ_NTXDESC; i++) {
708 		if (sc->sc_txmbuf[i] != NULL) {
709 			bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
710 			m_freem(sc->sc_txmbuf[i]);
711 			sc->sc_txmbuf[i] = NULL;
712 		}
713 	}
714 
715 	/* Clear Seeq transmit/receive command registers */
716 	bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCMD, 0);
717 	bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_RXCMD, 0);
718 
719 	sq_reset(sc);
720 
721         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
722 	ifp->if_timer = 0;
723 }
724 
725 /* Device timeout/watchdog routine. */
726 void
727 sq_watchdog(struct ifnet *ifp)
728 {
729 	u_int32_t status;
730 	struct sq_softc *sc = ifp->if_softc;
731 
732 	status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETX_CTL);
733 	log(LOG_ERR, "%s: device timeout (prev %d, next %d, free %d, "
734 		     "status %08x)\n", sc->sc_dev.dv_xname, sc->sc_prevtx,
735 				       sc->sc_nexttx, sc->sc_nfreetx, status);
736 
737 	sq_trace_dump(sc);
738 
739 	memset(&sq_trace, 0, sizeof(sq_trace));
740 	sq_trace_idx = 0;
741 
742 	++ifp->if_oerrors;
743 
744 	sq_init(ifp);
745 }
746 
747 void sq_trace_dump(struct sq_softc* sc)
748 {
749 	int i;
750 
751 	for(i = 0; i < sq_trace_idx; i++) {
752 		printf("%s: [%d] action %d, buf %d, free %d, status %08x\n",
753 			sc->sc_dev.dv_xname, i, sq_trace[i].action,
754 			sq_trace[i].bufno, sq_trace[i].freebuf,
755 			sq_trace[i].status);
756 	}
757 }
758 
759 static int
760 sq_intr(void * arg)
761 {
762 	struct sq_softc *sc = arg;
763 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
764 	int handled = 0;
765 	u_int32_t stat;
766 
767         stat = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_RESET);
768 
769 	if ((stat & 2) == 0) {
770 		printf("%s: Unexpected interrupt!\n", sc->sc_dev.dv_xname);
771 		return 0;
772 	}
773 
774 	bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_RESET, 2);
775 
776 	/*
777 	 * If the interface isn't running, the interrupt couldn't
778 	 * possibly have come from us.
779 	 */
780 	if ((ifp->if_flags & IFF_RUNNING) == 0)
781 		return 0;
782 
783 	/* Always check for received packets */
784 	if (sq_rxintr(sc) != 0)
785 		handled++;
786 
787 	/* Only handle transmit interrupts if we actually sent something */
788 	if (sc->sc_nfreetx < SQ_NTXDESC) {
789 		sq_txintr(sc);
790 		handled++;
791 	}
792 
793 #if NRND > 0
794 	if (handled)
795 		rnd_add_uint32(&sc->rnd_source, stat);
796 #endif
797 	return (handled);
798 }
799 
800 static int
801 sq_rxintr(struct sq_softc *sc)
802 {
803 	int count = 0;
804 	struct mbuf* m;
805 	int i, framelen;
806 	u_int8_t pktstat;
807 	u_int32_t status;
808 	int new_end, orig_end;
809 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
810 
811 	for(i = sc->sc_nextrx;; i = SQ_NEXTRX(i)) {
812 	    SQ_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
813 
814 	    /* If this is a CPU-owned buffer, we're at the end of the list */
815 	    if (sc->sc_rxdesc[i].hdd_ctl & HDD_CTL_OWN) {
816 #if 0
817 		u_int32_t reg;
818 
819 		reg = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_CTL);
820 		printf("%s: rxintr: done at %d (ctl %08x)\n",
821 				sc->sc_dev.dv_xname, i, reg);
822 #endif
823 		break;
824 	    }
825 
826 	    count++;
827 
828 	    m = sc->sc_rxmbuf[i];
829 	    framelen = m->m_ext.ext_size -
830 			HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hdd_ctl) - 3;
831 
832 	    /* Now sync the actual packet data */
833 	    bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
834 			    sc->sc_rxmap[i]->dm_mapsize, BUS_DMASYNC_POSTREAD);
835 
836 	    pktstat = *((u_int8_t*)m->m_data + framelen + 2);
837 
838 	    if ((pktstat & RXSTAT_GOOD) == 0) {
839 		ifp->if_ierrors++;
840 
841 		if (pktstat & RXSTAT_OFLOW)
842 		    printf("%s: receive FIFO overflow\n", sc->sc_dev.dv_xname);
843 
844 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
845 				sc->sc_rxmap[i]->dm_mapsize,
846 				BUS_DMASYNC_PREREAD);
847 		SQ_INIT_RXDESC(sc, i);
848 		continue;
849 	    }
850 
851 	    if (sq_add_rxbuf(sc, i) != 0) {
852 		ifp->if_ierrors++;
853 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
854 				sc->sc_rxmap[i]->dm_mapsize,
855 				BUS_DMASYNC_PREREAD);
856 		SQ_INIT_RXDESC(sc, i);
857 		continue;
858 	    }
859 
860 
861 	    m->m_data += 2;
862 	    m->m_pkthdr.rcvif = ifp;
863 	    m->m_pkthdr.len = m->m_len = framelen;
864 
865 	    ifp->if_ipackets++;
866 
867 #if 0
868 	    printf("%s: sq_rxintr: buf %d len %d\n", sc->sc_dev.dv_xname,
869 						     i, framelen);
870 #endif
871 
872 #if NBPFILTER > 0
873 	    if (ifp->if_bpf)
874 		    bpf_mtap(ifp->if_bpf, m);
875 #endif
876 	    (*ifp->if_input)(ifp, m);
877 	}
878 
879 
880 	/* If anything happened, move ring start/end pointers to new spot */
881 	if (i != sc->sc_nextrx) {
882 	    new_end = SQ_PREVRX(i);
883 	    sc->sc_rxdesc[new_end].hdd_ctl |= HDD_CTL_EOCHAIN;
884 	    SQ_CDRXSYNC(sc, new_end, BUS_DMASYNC_PREREAD |
885 				     BUS_DMASYNC_PREWRITE);
886 
887 	    orig_end = SQ_PREVRX(sc->sc_nextrx);
888 	    sc->sc_rxdesc[orig_end].hdd_ctl &= ~HDD_CTL_EOCHAIN;
889 	    SQ_CDRXSYNC(sc, orig_end, BUS_DMASYNC_PREREAD |
890 				      BUS_DMASYNC_PREWRITE);
891 
892 	    sc->sc_nextrx = i;
893 	}
894 
895 	status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
896 					       HPC_ENETR_CTL);
897 
898 	/* If receive channel is stopped, restart it... */
899 	if ((status & ENETR_CTL_ACTIVE) == 0) {
900 	    /* Pass the start of the receive ring to the HPC */
901 	    bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
902 			      HPC_ENETR_NDBP, SQ_CDRXADDR(sc, sc->sc_nextrx));
903 
904 	    /* And turn on the HPC ethernet receive channel */
905 	    bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_CTL,
906 							ENETR_CTL_ACTIVE);
907 	}
908 
909 	return count;
910 }
911 
912 static int
913 sq_txintr(struct sq_softc *sc)
914 {
915 	int i;
916 	u_int32_t status;
917 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
918 
919 	status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETX_CTL);
920 
921 	SQ_TRACE(SQ_TXINTR_ENTER, sc->sc_prevtx, status, sc->sc_nfreetx);
922 
923 	if ((status & (ENETX_CTL_ACTIVE | TXSTAT_GOOD)) == 0) {
924 		if (status & TXSTAT_COLL)
925 		    ifp->if_collisions++;
926 
927 		if (status & TXSTAT_UFLOW) {
928 		    printf("%s: transmit underflow\n", sc->sc_dev.dv_xname);
929 		    ifp->if_oerrors++;
930 		}
931 
932 		if (status & TXSTAT_16COLL) {
933 		    printf("%s: max collisions reached\n", sc->sc_dev.dv_xname);
934 		    ifp->if_oerrors++;
935 		    ifp->if_collisions += 16;
936 		}
937 	}
938 
939 	i = sc->sc_prevtx;
940 	while (sc->sc_nfreetx < SQ_NTXDESC) {
941 		/*
942 		 * Check status first so we don't end up with a case of
943 		 * the buffer not being finished while the DMA channel
944 		 * has gone idle.
945 		 */
946 		status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
947 							HPC_ENETX_CTL);
948 
949 		SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs,
950 				BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
951 
952 		/* If not yet transmitted, try and start DMA engine again */
953 		if ((sc->sc_txdesc[i].hdd_ctl & HDD_CTL_XMITDONE) == 0) {
954 		    if ((status & ENETX_CTL_ACTIVE) == 0) {
955 			SQ_TRACE(SQ_RESTART_DMA, i, status, sc->sc_nfreetx);
956 
957 			bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
958 					  HPC_ENETX_NDBP, SQ_CDTXADDR(sc, i));
959 
960 			/* Kick DMA channel into life */
961 			bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
962 					  HPC_ENETX_CTL, ENETX_CTL_ACTIVE);
963 
964 			/* Set a watchdog timer in case the chip flakes out. */
965 			ifp->if_timer = 5;
966 		    } else {
967 			SQ_TRACE(SQ_TXINTR_BUSY, i, status, sc->sc_nfreetx);
968 		    }
969 		    break;
970 		}
971 
972 		/* Sync the packet data, unload DMA map, free mbuf */
973 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 0,
974 				sc->sc_txmap[i]->dm_mapsize,
975 				BUS_DMASYNC_POSTWRITE);
976 		bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
977 		m_freem(sc->sc_txmbuf[i]);
978 		sc->sc_txmbuf[i] = NULL;
979 
980 		ifp->if_opackets++;
981 		sc->sc_nfreetx++;
982 
983 		SQ_TRACE(SQ_DONE_DMA, i, status, sc->sc_nfreetx);
984 		i = SQ_NEXTTX(i);
985 	}
986 
987 	/* prevtx now points to next xmit packet not yet finished */
988 	sc->sc_prevtx = i;
989 
990 	/* If we have buffers free, let upper layers know */
991 	if (sc->sc_nfreetx > 0)
992 	    ifp->if_flags &= ~IFF_OACTIVE;
993 
994 	/* If all packets have left the coop, cancel watchdog */
995 	if (sc->sc_nfreetx == SQ_NTXDESC)
996 	    ifp->if_timer = 0;
997 
998 	SQ_TRACE(SQ_TXINTR_EXIT, sc->sc_prevtx, status, sc->sc_nfreetx);
999     	sq_start(ifp);
1000 
1001 	return 1;
1002 }
1003 
1004 
1005 void
1006 sq_reset(struct sq_softc *sc)
1007 {
1008 	/* Stop HPC dma channels */
1009 	bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_CTL, 0);
1010 	bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETX_CTL, 0);
1011 
1012         bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_RESET, 3);
1013         delay(20);
1014         bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_RESET, 0);
1015 }
1016 
1017 /* sq_add_rxbuf: Add a receive buffer to the indicated descriptor.  */
1018 int
1019 sq_add_rxbuf(struct sq_softc *sc, int idx)
1020 {
1021 	int err;
1022 	struct mbuf *m;
1023 
1024 	MGETHDR(m, M_DONTWAIT, MT_DATA);
1025 	if (m == NULL)
1026 		return (ENOBUFS);
1027 
1028 	MCLGET(m, M_DONTWAIT);
1029 	if ((m->m_flags & M_EXT) == 0) {
1030 		m_freem(m);
1031 		return (ENOBUFS);
1032 	}
1033 
1034 	if (sc->sc_rxmbuf[idx] != NULL)
1035 		bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[idx]);
1036 
1037 	sc->sc_rxmbuf[idx] = m;
1038 
1039 	if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_rxmap[idx],
1040 				   m->m_ext.ext_buf, m->m_ext.ext_size,
1041 				   NULL, BUS_DMA_NOWAIT)) != 0) {
1042 		printf("%s: can't load rx DMA map %d, error = %d\n",
1043 		    sc->sc_dev.dv_xname, idx, err);
1044 		panic("sq_add_rxbuf");	/* XXX */
1045 	}
1046 
1047 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[idx], 0,
1048 			sc->sc_rxmap[idx]->dm_mapsize, BUS_DMASYNC_PREREAD);
1049 
1050 	SQ_INIT_RXDESC(sc, idx);
1051 
1052 	return 0;
1053 }
1054 
1055 void
1056 sq_dump_buffer(u_int32_t addr, u_int32_t len)
1057 {
1058 	int i;
1059 	u_char* physaddr = (char*) MIPS_PHYS_TO_KSEG1((caddr_t)addr);
1060 
1061 	if (len == 0)
1062 		return;
1063 
1064 	printf("%p: ", physaddr);
1065 
1066 	for(i = 0; i < len; i++) {
1067 		printf("%02x ", *(physaddr + i) & 0xff);
1068 		if ((i % 16) ==  15 && i != len - 1)
1069 		    printf("\n%p: ", physaddr + i);
1070 	}
1071 
1072 	printf("\n");
1073 }
1074 
1075 
1076 void
1077 enaddr_aton(const char* str, u_int8_t* eaddr)
1078 {
1079 	int i;
1080 	char c;
1081 
1082 	for(i = 0; i < ETHER_ADDR_LEN; i++) {
1083 		if (*str == ':')
1084 			str++;
1085 
1086 		c = *str++;
1087 		if (isdigit(c)) {
1088 			eaddr[i] = (c - '0');
1089 		} else if (isxdigit(c)) {
1090 			eaddr[i] = (toupper(c) + 10 - 'A');
1091 		}
1092 
1093 		c = *str++;
1094 		if (isdigit(c)) {
1095 			eaddr[i] = (eaddr[i] << 4) | (c - '0');
1096 		} else if (isxdigit(c)) {
1097 			eaddr[i] = (eaddr[i] << 4) | (toupper(c) + 10 - 'A');
1098 		}
1099 	}
1100 }
1101