xref: /netbsd-src/sys/dev/ic/mtd803.c (revision 404fbe5fb94ca1e054339640cabb2801ce52dd30)
1 /* $NetBSD: mtd803.c,v 1.20 2008/04/28 20:23:50 martin Exp $ */
2 
3 /*-
4  *
5  * Copyright (c) 2002 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Peter Bex <Peter.Bex@student.kun.nl>.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * TODO:
35  * - Most importantly, get some bus_dmamap_syncs in the correct places.
36  *    I don't have access to a computer with PCI other than i386, and i386
37  *    is just such a machine where dmamap_syncs don't do anything.
38  * - Powerhook for when resuming after standby.
39  * - Watchdog stuff doesn't work yet, the system crashes.
40  * - There seems to be a CardBus version of the card. (see datasheet)
41  *    Perhaps a detach function is necessary then? (free buffs, stop rx/tx etc)
42  * - When you enable the TXBUN (Tx buffer unavailable) interrupt, it gets
43  *    raised every time a packet is sent. Strange, since everything works anyway
44  */
45 
46 #include <sys/cdefs.h>
47 __KERNEL_RCSID(0, "$NetBSD: mtd803.c,v 1.20 2008/04/28 20:23:50 martin Exp $");
48 
49 #include "bpfilter.h"
50 
51 #include <sys/param.h>
52 #include <sys/mbuf.h>
53 #include <sys/systm.h>
54 #include <sys/device.h>
55 #include <sys/socket.h>
56 #include <sys/ioctl.h>
57 #include <sys/syslog.h>
58 
59 #include <net/if.h>
60 #include <net/if_ether.h>
61 #include <net/if_media.h>
62 
63 #ifdef INET
64 #include <netinet/in.h>
65 #include <netinet/if_inarp.h>
66 #include <netinet/in_systm.h>
67 #include <netinet/in_var.h>
68 #include <netinet/ip.h>
69 #endif
70 
71 #if NBPFILTER > 0
72 #include <net/bpf.h>
73 #include <net/bpfdesc.h>
74 #endif
75 
76 #include <sys/bus.h>
77 
78 #include <dev/ic/mtd803reg.h>
79 #include <dev/ic/mtd803var.h>
80 #include <dev/mii/mii.h>
81 #include <dev/mii/miivar.h>
82 
83 /*
84  * Device driver for the MTD803 3-in-1 Fast Ethernet Controller
85  * Written by Peter Bex (peter.bex@student.kun.nl)
86  *
87  * Datasheet at:   http://www.myson.com.tw   or   http://www.century-semi.com
88  */
89 
90 #define MTD_READ_1(sc, reg) \
91 	bus_space_read_1((sc)->bus_tag, (sc)->bus_handle, (reg))
92 #define MTD_WRITE_1(sc, reg, data) \
93 	bus_space_write_1((sc)->bus_tag, (sc)->bus_handle, (reg), (data))
94 
95 #define MTD_READ_2(sc, reg) \
96 	bus_space_read_2((sc)->bus_tag, (sc)->bus_handle, (reg))
97 #define MTD_WRITE_2(sc, reg, data) \
98 	bus_space_write_2((sc)->bus_tag, (sc)->bus_handle, (reg), (data))
99 
100 #define MTD_READ_4(sc, reg) \
101 	bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, (reg))
102 #define MTD_WRITE_4(sc, reg, data) \
103 	bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, (reg), (data))
104 
105 #define MTD_SETBIT(sc, reg, x) \
106 	MTD_WRITE_4((sc), (reg), MTD_READ_4((sc), (reg)) | (x))
107 #define MTD_CLRBIT(sc, reg, x) \
108 	MTD_WRITE_4((sc), (reg), MTD_READ_4((sc), (reg)) & ~(x))
109 
110 #define ETHER_CRC32(buf, len)	(ether_crc32_be((buf), (len)))
111 
112 int mtd_mii_readreg(struct device *, int, int);
113 void mtd_mii_writereg(struct device *, int, int, int);
114 void mtd_mii_statchg(struct device *);
115 
116 void mtd_start(struct ifnet *);
117 void mtd_stop(struct ifnet *, int);
118 int mtd_ioctl(struct ifnet *, u_long, void *);
119 void mtd_setmulti(struct mtd_softc *);
120 void mtd_watchdog(struct ifnet *);
121 
122 int mtd_init(struct ifnet *);
123 void mtd_reset(struct mtd_softc *);
124 void mtd_shutdown(void *);
125 int mtd_init_desc(struct mtd_softc *);
126 int mtd_put(struct mtd_softc *, int, struct mbuf *);
127 struct mbuf *mtd_get(struct mtd_softc *, int, int);
128 
129 int mtd_rxirq(struct mtd_softc *);
130 int mtd_txirq(struct mtd_softc *);
131 int mtd_bufirq(struct mtd_softc *);
132 
133 
134 int
135 mtd_config(struct mtd_softc *sc)
136 {
137 	struct ifnet *ifp = &sc->ethercom.ec_if;
138 	int i;
139 
140 	/* Read station address */
141 	for (i = 0; i < ETHER_ADDR_LEN; ++i)
142 		sc->eaddr[i] = MTD_READ_1(sc, MTD_PAR0 + i);
143 
144 	/* Initialize ifnet structure */
145 	memcpy(ifp->if_xname, device_xname(&sc->dev), IFNAMSIZ);
146 	ifp->if_softc = sc;
147 	ifp->if_init = mtd_init;
148 	ifp->if_start = mtd_start;
149 	ifp->if_stop = mtd_stop;
150 	ifp->if_ioctl = mtd_ioctl;
151 	ifp->if_watchdog = mtd_watchdog;
152 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
153 	IFQ_SET_READY(&ifp->if_snd);
154 
155 	/* Setup MII interface */
156 	sc->mii.mii_ifp = ifp;
157 	sc->mii.mii_readreg = mtd_mii_readreg;
158 	sc->mii.mii_writereg = mtd_mii_writereg;
159 	sc->mii.mii_statchg = mtd_mii_statchg;
160 
161 	sc->ethercom.ec_mii = &sc->mii;
162 	ifmedia_init(&sc->mii.mii_media, 0, ether_mediachange,
163 	    ether_mediastatus);
164 
165 	mii_attach(&sc->dev, &sc->mii, 0xffffffff, MII_PHY_ANY, 0, 0);
166 
167 	if (LIST_FIRST(&sc->mii.mii_phys) == NULL) {
168 		aprint_error_dev(&sc->dev, "Unable to configure MII\n");
169 		return 1;
170 	} else {
171 		ifmedia_set(&sc->mii.mii_media, IFM_ETHER | IFM_AUTO);
172 	}
173 
174 	if (mtd_init_desc(sc))
175 		return 1;
176 
177 	/* Attach interface */
178 	if_attach(ifp);
179 	ether_ifattach(ifp, sc->eaddr);
180 
181 #if NRND > 0
182 	/* Initialise random source */
183 	rnd_attach_source(&sc->rnd_src, device_xname(&sc->dev), RND_TYPE_NET, 0);
184 #endif
185 
186 	/* Add shutdown hook to reset card when we reboot */
187 	sc->sd_hook = shutdownhook_establish(mtd_shutdown, sc);
188 
189 	return 0;
190 }
191 
192 
193 /*
194  * mtd_init
195  * Must be called at splnet()
196  */
197 int
198 mtd_init(struct ifnet *ifp)
199 {
200 	struct mtd_softc *sc = ifp->if_softc;
201 
202 	mtd_reset(sc);
203 
204 	/*
205 	 * Set cache alignment and burst length. Don't really know what these
206 	 * mean, so their values are probably suboptimal.
207 	 */
208 	MTD_WRITE_4(sc, MTD_BCR, MTD_BCR_BLEN16);
209 
210 	MTD_WRITE_4(sc, MTD_RXTXR, MTD_TX_STFWD | MTD_TX_FDPLX);
211 
212 	/* Promiscuous mode? */
213 	if (ifp->if_flags & IFF_PROMISC)
214 		MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_PROM);
215 	else
216 		MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_PROM);
217 
218 	/* Broadcast mode? */
219 	if (ifp->if_flags & IFF_BROADCAST)
220 		MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_ABROAD);
221 	else
222 		MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_ABROAD);
223 
224 	mtd_setmulti(sc);
225 
226 	/* Enable interrupts */
227 	MTD_WRITE_4(sc, MTD_IMR, MTD_IMR_MASK);
228 	MTD_WRITE_4(sc, MTD_ISR, MTD_ISR_ENABLE);
229 
230 	/* Set descriptor base addresses */
231 	MTD_WRITE_4(sc, MTD_TXLBA, htole32(sc->desc_dma_map->dm_segs[0].ds_addr
232 				+ sizeof(struct mtd_desc) * MTD_NUM_RXD));
233 	MTD_WRITE_4(sc, MTD_RXLBA,
234 		htole32(sc->desc_dma_map->dm_segs[0].ds_addr));
235 
236 	/* Enable receiver and transmitter */
237 	MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_ENABLE);
238 	MTD_SETBIT(sc, MTD_RXTXR, MTD_TX_ENABLE);
239 
240 	/* Interface is running */
241 	ifp->if_flags |= IFF_RUNNING;
242 	ifp->if_flags &= ~IFF_OACTIVE;
243 
244 	return 0;
245 }
246 
247 
248 int
249 mtd_init_desc(struct mtd_softc *sc)
250 {
251 	int rseg, err, i;
252 	bus_dma_segment_t seg;
253 	bus_size_t size;
254 
255 	/* Allocate memory for descriptors */
256 	size = (MTD_NUM_RXD + MTD_NUM_TXD) * sizeof(struct mtd_desc);
257 
258 	/* Allocate DMA-safe memory */
259 	if ((err = bus_dmamem_alloc(sc->dma_tag, size, MTD_DMA_ALIGN,
260 			 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
261 		aprint_error_dev(&sc->dev, "unable to allocate DMA buffer, error = %d\n", err);
262 		return 1;
263 	}
264 
265 	/* Map memory to kernel addressable space */
266 	if ((err = bus_dmamem_map(sc->dma_tag, &seg, 1, size,
267 		(void **)&sc->desc, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
268 		aprint_error_dev(&sc->dev, "unable to map DMA buffer, error = %d\n", err);
269 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
270 		return 1;
271 	}
272 
273 	/* Create a DMA map */
274 	if ((err = bus_dmamap_create(sc->dma_tag, size, 1,
275 		size, 0, BUS_DMA_NOWAIT, &sc->desc_dma_map)) != 0) {
276 		aprint_error_dev(&sc->dev, "unable to create DMA map, error = %d\n", err);
277 		bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
278 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
279 		return 1;
280 	}
281 
282 	/* Load the DMA map */
283 	if ((err = bus_dmamap_load(sc->dma_tag, sc->desc_dma_map, sc->desc,
284 		size, NULL, BUS_DMA_NOWAIT)) != 0) {
285 		aprint_error_dev(&sc->dev, "unable to load DMA map, error = %d\n",
286 			err);
287 		bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
288 		bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
289 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
290 		return 1;
291 	}
292 
293 	/* Allocate memory for the buffers */
294 	size = MTD_NUM_RXD * MTD_RXBUF_SIZE + MTD_NUM_TXD * MTD_TXBUF_SIZE;
295 
296 	/* Allocate DMA-safe memory */
297 	if ((err = bus_dmamem_alloc(sc->dma_tag, size, MTD_DMA_ALIGN,
298 			 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
299 		aprint_error_dev(&sc->dev, "unable to allocate DMA buffer, error = %d\n",
300 			err);
301 
302 		/* Undo DMA map for descriptors */
303 		bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
304 		bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
305 		bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
306 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
307 		return 1;
308 	}
309 
310 	/* Map memory to kernel addressable space */
311 	if ((err = bus_dmamem_map(sc->dma_tag, &seg, 1, size,
312 		&sc->buf, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
313 		aprint_error_dev(&sc->dev, "unable to map DMA buffer, error = %d\n",
314 			err);
315 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
316 
317 		/* Undo DMA map for descriptors */
318 		bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
319 		bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
320 		bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
321 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
322 		return 1;
323 	}
324 
325 	/* Create a DMA map */
326 	if ((err = bus_dmamap_create(sc->dma_tag, size, 1,
327 		size, 0, BUS_DMA_NOWAIT, &sc->buf_dma_map)) != 0) {
328 		aprint_error_dev(&sc->dev, "unable to create DMA map, error = %d\n",
329 			err);
330 		bus_dmamem_unmap(sc->dma_tag, sc->buf, size);
331 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
332 
333 		/* Undo DMA map for descriptors */
334 		bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
335 		bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
336 		bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
337 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
338 		return 1;
339 	}
340 
341 	/* Load the DMA map */
342 	if ((err = bus_dmamap_load(sc->dma_tag, sc->buf_dma_map, sc->buf,
343 		size, NULL, BUS_DMA_NOWAIT)) != 0) {
344 		aprint_error_dev(&sc->dev, "unable to load DMA map, error = %d\n",
345 			err);
346 		bus_dmamap_destroy(sc->dma_tag, sc->buf_dma_map);
347 		bus_dmamem_unmap(sc->dma_tag, sc->buf, size);
348 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
349 
350 		/* Undo DMA map for descriptors */
351 		bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
352 		bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
353 		bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
354 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
355 		return 1;
356 	}
357 
358 	/* Descriptors are stored as a circular linked list */
359 	/* Fill in rx descriptors */
360 	for (i = 0; i < MTD_NUM_RXD; ++i) {
361 		sc->desc[i].stat = MTD_RXD_OWNER;
362 		if (i == MTD_NUM_RXD - 1) {	/* Last descriptor */
363 			/* Link back to first rx descriptor */
364 			sc->desc[i].next =
365 				htole32(sc->desc_dma_map->dm_segs[0].ds_addr);
366 		} else {
367 			/* Link forward to next rx descriptor */
368 			sc->desc[i].next =
369 			htole32(sc->desc_dma_map->dm_segs[0].ds_addr
370 					+ (i + 1) * sizeof(struct mtd_desc));
371 		}
372 		sc->desc[i].conf = MTD_RXBUF_SIZE & MTD_RXD_CONF_BUFS;
373 		/* Set buffer's address */
374 		sc->desc[i].data = htole32(sc->buf_dma_map->dm_segs[0].ds_addr
375 					+ i * MTD_RXBUF_SIZE);
376 	}
377 
378 	/* Fill in tx descriptors */
379 	for (/* i = MTD_NUM_RXD */; i < (MTD_NUM_TXD + MTD_NUM_RXD); ++i) {
380 		sc->desc[i].stat = 0;	/* At least, NOT MTD_TXD_OWNER! */
381 		if (i == (MTD_NUM_RXD + MTD_NUM_TXD - 1)) {	/* Last descr */
382 			/* Link back to first tx descriptor */
383 			sc->desc[i].next =
384 				htole32(sc->desc_dma_map->dm_segs[0].ds_addr
385 					+MTD_NUM_RXD * sizeof(struct mtd_desc));
386 		} else {
387 			/* Link forward to next tx descriptor */
388 			sc->desc[i].next =
389 				htole32(sc->desc_dma_map->dm_segs[0].ds_addr
390 					+ (i + 1) * sizeof(struct mtd_desc));
391 		}
392 		/* sc->desc[i].conf = MTD_TXBUF_SIZE & MTD_TXD_CONF_BUFS; */
393 		/* Set buffer's address */
394 		sc->desc[i].data = htole32(sc->buf_dma_map->dm_segs[0].ds_addr
395 					+ MTD_NUM_RXD * MTD_RXBUF_SIZE
396 					+ (i - MTD_NUM_RXD) * MTD_TXBUF_SIZE);
397 	}
398 
399 	return 0;
400 }
401 
402 
403 void
404 mtd_mii_statchg(device_t self)
405 {
406 	/* Should we do something here? :) */
407 }
408 
409 
410 int
411 mtd_mii_readreg(device_t self, int phy, int reg)
412 {
413 	struct mtd_softc *sc = device_private(self);
414 
415 	return (MTD_READ_2(sc, MTD_PHYBASE + reg * 2));
416 }
417 
418 
419 void
420 mtd_mii_writereg(device_t self, int phy, int reg, int val)
421 {
422 	struct mtd_softc *sc = device_private(self);
423 
424 	MTD_WRITE_2(sc, MTD_PHYBASE + reg * 2, val);
425 }
426 
427 
428 int
429 mtd_put(struct mtd_softc *sc, int index, struct mbuf *m)
430 {
431 	int len, tlen;
432 	char *buf = (char *)sc->buf + MTD_NUM_RXD * MTD_RXBUF_SIZE
433 			+ index * MTD_TXBUF_SIZE;
434 	struct mbuf *n;
435 
436 	for (tlen = 0; m != NULL; m = n) {
437 		len = m->m_len;
438 		if (len == 0) {
439 			MFREE(m, n);
440 			continue;
441 		} else if (tlen > MTD_TXBUF_SIZE) {
442 			/* XXX FIXME: No idea what to do here. */
443 			aprint_error_dev(&sc->dev, "packet too large! Size = %i\n",
444 				tlen);
445 			MFREE(m, n);
446 			continue;
447 		}
448 		memcpy(buf, mtod(m, void *), len);
449 		buf += len;
450 		tlen += len;
451 		MFREE(m, n);
452 	}
453 	sc->desc[MTD_NUM_RXD + index].conf = MTD_TXD_CONF_PAD | MTD_TXD_CONF_CRC
454 		| MTD_TXD_CONF_IRQC
455 		| ((tlen << MTD_TXD_PKTS_SHIFT) & MTD_TXD_CONF_PKTS)
456 		| (tlen & MTD_TXD_CONF_BUFS);
457 
458 	return tlen;
459 }
460 
461 
462 void
463 mtd_start(struct ifnet *ifp)
464 {
465 	struct mtd_softc *sc = ifp->if_softc;
466 	struct mbuf *m;
467 	int len;
468 	int first_tx = sc->cur_tx;
469 
470 	/* Don't transmit when the interface is busy or inactive */
471 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
472 		return;
473 
474 	for (;;) {
475 		IF_DEQUEUE(&ifp->if_snd, m);
476 
477 		if (m == NULL)
478 			break;
479 
480 #if NBPFILTER > 0
481 		if (ifp->if_bpf)
482 			bpf_mtap(ifp->if_bpf, m);
483 #endif
484 
485 		/* Copy mbuf chain into tx buffer */
486 		len = mtd_put(sc, sc->cur_tx, m);
487 
488 		if (sc->cur_tx != first_tx)
489 			sc->desc[MTD_NUM_RXD + sc->cur_tx].stat = MTD_TXD_OWNER;
490 
491 		if (++sc->cur_tx >= MTD_NUM_TXD)
492 			sc->cur_tx = 0;
493 	}
494 	/* Mark first & last descriptor */
495 	sc->desc[MTD_NUM_RXD + first_tx].conf |= MTD_TXD_CONF_FSD;
496 
497 	if (sc->cur_tx == 0) {
498 		sc->desc[MTD_NUM_RXD + MTD_NUM_TXD - 1].conf |=MTD_TXD_CONF_LSD;
499 	} else {
500 		sc->desc[MTD_NUM_RXD + sc->cur_tx - 1].conf |= MTD_TXD_CONF_LSD;
501 	}
502 
503 	/* Give first descriptor to chip to complete transaction */
504 	sc->desc[MTD_NUM_RXD + first_tx].stat = MTD_TXD_OWNER;
505 
506 	/* Transmit polling demand */
507 	MTD_WRITE_4(sc, MTD_TXPDR, MTD_TXPDR_DEMAND);
508 
509 	/* XXX FIXME: Set up a watchdog timer */
510 	/* ifp->if_timer = 5; */
511 }
512 
513 
514 void
515 mtd_stop(struct ifnet *ifp, int disable)
516 {
517 	struct mtd_softc *sc = ifp->if_softc;
518 
519 	/* Disable transmitter and receiver */
520 	MTD_CLRBIT(sc, MTD_RXTXR, MTD_TX_ENABLE);
521 	MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_ENABLE);
522 
523 	/* Disable interrupts */
524 	MTD_WRITE_4(sc, MTD_IMR, 0x00000000);
525 
526 	/* Must do more at disable??... */
527 	if (disable) {
528 		/* Delete tx and rx descriptor base addresses */
529 		MTD_WRITE_4(sc, MTD_RXLBA, 0x00000000);
530 		MTD_WRITE_4(sc, MTD_TXLBA, 0x00000000);
531 	}
532 
533 	ifp->if_timer = 0;
534 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
535 }
536 
537 
538 void
539 mtd_watchdog(struct ifnet *ifp)
540 {
541 	struct mtd_softc *sc = ifp->if_softc;
542 	int s;
543 
544 	log(LOG_ERR, "%s: device timeout\n", device_xname(&sc->dev));
545 	++sc->ethercom.ec_if.if_oerrors;
546 
547 	mtd_stop(ifp, 0);
548 
549 	s = splnet();
550 	mtd_init(ifp);
551 	splx(s);
552 
553 	return;
554 }
555 
556 
557 int
558 mtd_ioctl(struct ifnet *ifp, u_long cmd, void *data)
559 {
560 	struct mtd_softc *sc = ifp->if_softc;
561 	int s, error = 0;
562 
563 	s = splnet();
564 
565 	if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
566 		/*
567 		 * Multicast list has changed; set the hardware
568 		 * filter accordingly.
569 		 */
570 		 if (ifp->if_flags & IFF_RUNNING)
571 			 mtd_setmulti(sc);
572 		 error = 0;
573 	}
574 
575 	splx(s);
576 	return error;
577 }
578 
579 
580 struct mbuf *
581 mtd_get(struct mtd_softc *sc, int index, int totlen)
582 {
583 	struct ifnet *ifp = &sc->ethercom.ec_if;
584 	struct mbuf *m, *m0, *newm;
585 	int len;
586 	char *buf = (char *)sc->buf + index * MTD_RXBUF_SIZE;
587 
588 	MGETHDR(m0, M_DONTWAIT, MT_DATA);
589 	if (m0 == NULL)
590 		return NULL;
591 
592 	m0->m_pkthdr.rcvif = ifp;
593 	m0->m_pkthdr.len = totlen;
594 	m = m0;
595 	len = MHLEN;
596 
597 	while (totlen > 0) {
598 		if (totlen >= MINCLSIZE) {
599 			MCLGET(m, M_DONTWAIT);
600 			if (!(m->m_flags & M_EXT)) {
601 				m_freem(m0);
602 				return NULL;
603 			}
604 			len = MCLBYTES;
605 		}
606 
607 		if (m == m0) {
608 			char *newdata = (char *)
609 				ALIGN(m->m_data + sizeof(struct ether_header)) -
610 				sizeof(struct ether_header);
611 			len -= newdata - m->m_data;
612 			m->m_data = newdata;
613 		}
614 
615 		m->m_len = len = min(totlen, len);
616 		memcpy(mtod(m, void *), buf, len);
617 		buf += len;
618 
619 		totlen -= len;
620 		if (totlen > 0) {
621 			MGET(newm, M_DONTWAIT, MT_DATA);
622 			if (newm == NULL) {
623 				m_freem(m0);
624 				return NULL;
625 			}
626 			len = MLEN;
627 			m = m->m_next = newm;
628 		}
629 	}
630 
631 	return m0;
632 }
633 
634 
635 int
636 mtd_rxirq(struct mtd_softc *sc)
637 {
638 	struct ifnet *ifp = &sc->ethercom.ec_if;
639 	int len;
640 	struct mbuf *m;
641 
642 	for (; !(sc->desc[sc->cur_rx].stat & MTD_RXD_OWNER);) {
643 		/* Error summary set? */
644 		if (sc->desc[sc->cur_rx].stat & MTD_RXD_ERRSUM) {
645 			aprint_error_dev(&sc->dev, "received packet with errors\n");
646 			/* Give up packet, since an error occurred */
647 			sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER;
648 			sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE &
649 							MTD_RXD_CONF_BUFS;
650 			++ifp->if_ierrors;
651 			if (++sc->cur_rx >= MTD_NUM_RXD)
652 				sc->cur_rx = 0;
653 			continue;
654 		}
655 		/* Get buffer length */
656 		len = (sc->desc[sc->cur_rx].stat & MTD_RXD_FLEN)
657 			>> MTD_RXD_FLEN_SHIFT;
658 		len -= ETHER_CRC_LEN;
659 
660 		/* Check packet size */
661 		if (len <= sizeof(struct ether_header)) {
662 			aprint_error_dev(&sc->dev, "invalid packet size %d; dropping\n",
663 				len);
664 			sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER;
665 			sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE &
666 							MTD_RXD_CONF_BUFS;
667 			++ifp->if_ierrors;
668 			if (++sc->cur_rx >= MTD_NUM_RXD)
669 				sc->cur_rx = 0;
670 			continue;
671 		}
672 
673 		m = mtd_get(sc, (sc->cur_rx), len);
674 
675 		/* Give descriptor back to card */
676 		sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE & MTD_RXD_CONF_BUFS;
677 		sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER;
678 
679 		if (++sc->cur_rx >= MTD_NUM_RXD)
680 			sc->cur_rx = 0;
681 
682 		if (m == NULL) {
683 			aprint_error_dev(&sc->dev, "error pulling packet off interface\n");
684 			++ifp->if_ierrors;
685 			continue;
686 		}
687 
688 		++ifp->if_ipackets;
689 
690 #if NBPFILTER > 0
691 		if (ifp->if_bpf)
692 			bpf_mtap(ifp->if_bpf, m);
693 #endif
694 		/* Pass the packet up */
695 		(*ifp->if_input)(ifp, m);
696 	}
697 
698 	return 1;
699 }
700 
701 
702 int
703 mtd_txirq(struct mtd_softc *sc)
704 {
705 	struct ifnet *ifp = &sc->ethercom.ec_if;
706 
707 	/* Clear timeout */
708 	ifp->if_timer = 0;
709 
710 	ifp->if_flags &= ~IFF_OACTIVE;
711 	++ifp->if_opackets;
712 
713 	/* XXX FIXME If there is some queued, do an mtd_start? */
714 
715 	return 1;
716 }
717 
718 
719 int
720 mtd_bufirq(struct mtd_softc *sc)
721 {
722 	struct ifnet *ifp = &sc->ethercom.ec_if;
723 
724 	/* Clear timeout */
725 	ifp->if_timer = 0;
726 
727 	/* XXX FIXME: Do something here to make sure we get some buffers! */
728 
729 	return 1;
730 }
731 
732 
733 int
734 mtd_irq_h(void *args)
735 {
736 	struct mtd_softc *sc = args;
737 	struct ifnet *ifp = &sc->ethercom.ec_if;
738 	u_int32_t status;
739 	int r = 0;
740 
741 	if (!(ifp->if_flags & IFF_RUNNING) || !device_is_active(&sc->dev))
742 		return 0;
743 
744 	/* Disable interrupts */
745 	MTD_WRITE_4(sc, MTD_IMR, 0x00000000);
746 
747 	for(;;) {
748 		status = MTD_READ_4(sc, MTD_ISR);
749 #if NRND > 0
750 		/* Add random seed before masking out bits */
751 		if (status)
752 			rnd_add_uint32(&sc->rnd_src, status);
753 #endif
754 		status &= MTD_ISR_MASK;
755 		if (!status)		/* We didn't ask for this */
756 			break;
757 
758 		MTD_WRITE_4(sc, MTD_ISR, status);
759 
760 		/* NOTE: Perhaps we should reset with some of these errors? */
761 
762 		if (status & MTD_ISR_RXBUN) {
763 			aprint_error_dev(&sc->dev, "receive buffer unavailable\n");
764 			++ifp->if_ierrors;
765 		}
766 
767 		if (status & MTD_ISR_RXERR) {
768 			aprint_error_dev(&sc->dev, "receive error\n");
769 			++ifp->if_ierrors;
770 		}
771 
772 		if (status & MTD_ISR_TXBUN) {
773 			aprint_error_dev(&sc->dev, "transmit buffer unavailable\n");
774 			++ifp->if_ierrors;
775 		}
776 
777 		if ((status & MTD_ISR_PDF)) {
778 			aprint_error_dev(&sc->dev, "parallel detection fault\n");
779 			++ifp->if_ierrors;
780 		}
781 
782 		if (status & MTD_ISR_FBUSERR) {
783 			aprint_error_dev(&sc->dev, "fatal bus error\n");
784 			++ifp->if_ierrors;
785 		}
786 
787 		if (status & MTD_ISR_TARERR) {
788 			aprint_error_dev(&sc->dev, "target error\n");
789 			++ifp->if_ierrors;
790 		}
791 
792 		if (status & MTD_ISR_MASTERR) {
793 			aprint_error_dev(&sc->dev, "master error\n");
794 			++ifp->if_ierrors;
795 		}
796 
797 		if (status & MTD_ISR_PARERR) {
798 			aprint_error_dev(&sc->dev, "parity error\n");
799 			++ifp->if_ierrors;
800 		}
801 
802 		if (status & MTD_ISR_RXIRQ)	/* Receive interrupt */
803 			r |= mtd_rxirq(sc);
804 
805 		if (status & MTD_ISR_TXIRQ)	/* Transmit interrupt */
806 			r |= mtd_txirq(sc);
807 
808 		if (status & MTD_ISR_TXEARLY)	/* Transmit early */
809 			r |= mtd_txirq(sc);
810 
811 		if (status & MTD_ISR_TXBUN)	/* Transmit buffer n/a */
812 			r |= mtd_bufirq(sc);
813 
814 	}
815 
816 	/* Enable interrupts */
817 	MTD_WRITE_4(sc, MTD_IMR, MTD_IMR_MASK);
818 
819 	return r;
820 }
821 
822 
823 void
824 mtd_setmulti(struct mtd_softc *sc)
825 {
826 	struct ifnet *ifp = &sc->ethercom.ec_if;
827 	u_int32_t rxtx_stat;
828 	u_int32_t hash[2] = {0, 0};
829 	u_int32_t crc;
830 	struct ether_multi *enm;
831 	struct ether_multistep step;
832 	int mcnt = 0;
833 
834 	/* Get old status */
835 	rxtx_stat = MTD_READ_4(sc, MTD_RXTXR);
836 
837 	if ((ifp->if_flags & IFF_ALLMULTI) || (ifp->if_flags & IFF_PROMISC)) {
838 		rxtx_stat |= MTD_RX_AMULTI;
839 		MTD_WRITE_4(sc, MTD_RXTXR, rxtx_stat);
840 		MTD_WRITE_4(sc, MTD_MAR0, MTD_ALL_ADDR);
841 		MTD_WRITE_4(sc, MTD_MAR1, MTD_ALL_ADDR);
842 		return;
843 	}
844 
845 	ETHER_FIRST_MULTI(step, &sc->ethercom, enm);
846 	while (enm != NULL) {
847 		/* We need the 6 most significant bits of the CRC */
848 		crc = ETHER_CRC32(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26;
849 
850 		hash[crc >> 5] |= 1 << (crc & 0xf);
851 
852 		++mcnt;
853 		ETHER_NEXT_MULTI(step, enm);
854 	}
855 
856 	/* Accept multicast bit needs to be on? */
857 	if (mcnt)
858 		rxtx_stat |= MTD_RX_AMULTI;
859 	else
860 		rxtx_stat &= ~MTD_RX_AMULTI;
861 
862 	/* Write out the hash */
863 	MTD_WRITE_4(sc, MTD_MAR0, hash[0]);
864 	MTD_WRITE_4(sc, MTD_MAR1, hash[1]);
865 	MTD_WRITE_4(sc, MTD_RXTXR, rxtx_stat);
866 }
867 
868 
869 void
870 mtd_reset(struct mtd_softc *sc)
871 {
872 	int i;
873 
874 	MTD_SETBIT(sc, MTD_BCR, MTD_BCR_RESET);
875 
876 	/* Reset descriptor status */
877 	sc->cur_tx = 0;
878 	sc->cur_rx = 0;
879 
880 	/* Wait until done with reset */
881 	for (i = 0; i < MTD_TIMEOUT; ++i) {
882 		DELAY(10);
883 		if (!(MTD_READ_4(sc, MTD_BCR) & MTD_BCR_RESET))
884 			break;
885 	}
886 
887 	if (i == MTD_TIMEOUT) {
888 		aprint_error_dev(&sc->dev, "reset timed out\n");
889 	}
890 
891 	/* Wait a little so chip can stabilize */
892 	DELAY(1000);
893 }
894 
895 
896 void
897 mtd_shutdown (arg)
898 	void *arg;
899 {
900 	struct mtd_softc *sc = arg;
901 	struct ifnet *ifp = &sc->ethercom.ec_if;
902 
903 #if NRND > 0
904 	rnd_detach_source(&sc->rnd_src);
905 #endif
906 	mtd_stop(ifp, 1);
907 }
908