xref: /netbsd-src/sys/dev/ic/mtd803.c (revision aaf4ece63a859a04e37cf3a7229b5fab0157cc06)
1 /* $NetBSD: mtd803.c,v 1.9 2005/12/11 12:21:28 christos Exp $ */
2 
3 /*-
4  *
5  * Copyright (c) 2002 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Peter Bex <Peter.Bex@student.kun.nl>.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *      This product includes software developed by the NetBSD
22  *      Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*
41  * TODO:
42  * - Most importantly, get some bus_dmamap_syncs in the correct places.
43  *    I don't have access to a computer with PCI other than i386, and i386
44  *    is just such a machine where dmamap_syncs don't do anything.
45  * - Powerhook for when resuming after standby.
46  * - Watchdog stuff doesn't work yet, the system crashes.(lockmgr: no context)
47  * - There seems to be a CardBus version of the card. (see datasheet)
48  *    Perhaps a detach function is necessary then? (free buffs, stop rx/tx etc)
49  * - When you enable the TXBUN (Tx buffer unavailable) interrupt, it gets
50  *    raised every time a packet is sent. Strange, since everything works anyway
51  */
52 
53 #include <sys/cdefs.h>
54 __KERNEL_RCSID(0, "$NetBSD: mtd803.c,v 1.9 2005/12/11 12:21:28 christos Exp $");
55 
56 #include "bpfilter.h"
57 
58 #include <sys/param.h>
59 #include <sys/mbuf.h>
60 #include <sys/systm.h>
61 #include <sys/device.h>
62 #include <sys/socket.h>
63 #include <sys/ioctl.h>
64 #include <sys/syslog.h>
65 
66 #include <net/if.h>
67 #include <net/if_ether.h>
68 #include <net/if_media.h>
69 
70 #ifdef INET
71 #include <netinet/in.h>
72 #include <netinet/if_inarp.h>
73 #include <netinet/in_systm.h>
74 #include <netinet/in_var.h>
75 #include <netinet/ip.h>
76 #endif
77 
78 #if NBPFILTER > 0
79 #include <net/bpf.h>
80 #include <net/bpfdesc.h>
81 #endif
82 
83 #include <machine/bus.h>
84 
85 #include <dev/ic/mtd803reg.h>
86 #include <dev/ic/mtd803var.h>
87 #include <dev/mii/mii.h>
88 #include <dev/mii/miivar.h>
89 
90 /*
91  * Device driver for the MTD803 3-in-1 Fast Ethernet Controller
92  * Written by Peter Bex (peter.bex@student.kun.nl)
93  *
94  * Datasheet at:   http://www.myson.com.tw   or   http://www.century-semi.com
95  */
96 
97 #define MTD_READ_1(sc, reg) \
98 	bus_space_read_1((sc)->bus_tag, (sc)->bus_handle, (reg))
99 #define MTD_WRITE_1(sc, reg, data) \
100 	bus_space_write_1((sc)->bus_tag, (sc)->bus_handle, (reg), (data))
101 
102 #define MTD_READ_2(sc, reg) \
103 	bus_space_read_2((sc)->bus_tag, (sc)->bus_handle, (reg))
104 #define MTD_WRITE_2(sc, reg, data) \
105 	bus_space_write_2((sc)->bus_tag, (sc)->bus_handle, (reg), (data))
106 
107 #define MTD_READ_4(sc, reg) \
108 	bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, (reg))
109 #define MTD_WRITE_4(sc, reg, data) \
110 	bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, (reg), (data))
111 
112 #define MTD_SETBIT(sc, reg, x) \
113 	MTD_WRITE_4((sc), (reg), MTD_READ_4((sc), (reg)) | (x))
114 #define MTD_CLRBIT(sc, reg, x) \
115 	MTD_WRITE_4((sc), (reg), MTD_READ_4((sc), (reg)) & ~(x))
116 
117 #define ETHER_CRC32(buf, len)	(ether_crc32_be((buf), (len)))
118 
119 int mtd_mii_readreg(struct device *, int, int);
120 void mtd_mii_writereg(struct device *, int, int, int);
121 void mtd_mii_statchg(struct device *);
122 
123 void mtd_start(struct ifnet *);
124 void mtd_stop(struct ifnet *, int);
125 int mtd_ioctl(struct ifnet *, u_long, caddr_t);
126 void mtd_setmulti(struct mtd_softc *);
127 void mtd_watchdog(struct ifnet *);
128 int mtd_mediachange(struct ifnet *);
129 void mtd_mediastatus(struct ifnet *, struct ifmediareq *);
130 
131 int mtd_init(struct ifnet *);
132 void mtd_reset(struct mtd_softc *);
133 void mtd_shutdown(void *);
134 int mtd_init_desc(struct mtd_softc *);
135 int mtd_put(struct mtd_softc *, int, struct mbuf *);
136 struct mbuf *mtd_get(struct mtd_softc *, int, int);
137 
138 int mtd_rxirq(struct mtd_softc *);
139 int mtd_txirq(struct mtd_softc *);
140 int mtd_bufirq(struct mtd_softc *);
141 
142 
143 int
144 mtd_config(sc)
145 	struct mtd_softc *sc;
146 {
147 	struct ifnet *ifp = &sc->ethercom.ec_if;
148 	int i;
149 
150 	/* Read station address */
151 	for (i = 0; i < ETHER_ADDR_LEN; ++i)
152 		sc->eaddr[i] = MTD_READ_1(sc, MTD_PAR0 + i);
153 
154 	/* Initialize ifnet structure */
155 	memcpy(ifp->if_xname, sc->dev.dv_xname, IFNAMSIZ);
156 	ifp->if_softc = sc;
157 	ifp->if_init = mtd_init;
158 	ifp->if_start = mtd_start;
159 	ifp->if_stop = mtd_stop;
160 	ifp->if_ioctl = mtd_ioctl;
161 	ifp->if_watchdog = mtd_watchdog;
162 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
163 	IFQ_SET_READY(&ifp->if_snd);
164 
165 	/* Setup MII interface */
166 	sc->mii.mii_ifp = ifp;
167 	sc->mii.mii_readreg = mtd_mii_readreg;
168 	sc->mii.mii_writereg = mtd_mii_writereg;
169 	sc->mii.mii_statchg = mtd_mii_statchg;
170 
171 	ifmedia_init(&sc->mii.mii_media, 0, mtd_mediachange, mtd_mediastatus);
172 
173 	mii_attach(&sc->dev, &sc->mii, 0xffffffff, MII_PHY_ANY, 0, 0);
174 
175 	if (LIST_FIRST(&sc->mii.mii_phys) == NULL) {
176 		printf("%s: Unable to configure MII\n", sc->dev.dv_xname);
177 		return 1;
178 	} else {
179 		ifmedia_set(&sc->mii.mii_media, IFM_ETHER | IFM_AUTO);
180 	}
181 
182 	if (mtd_init_desc(sc))
183 		return 1;
184 
185 	/* Attach interface */
186 	if_attach(ifp);
187 	ether_ifattach(ifp, sc->eaddr);
188 
189 #if NRND > 0
190 	/* Initialise random source */
191 	rnd_attach_source(&sc->rnd_src, sc->dev.dv_xname, RND_TYPE_NET, 0);
192 #endif
193 
194 	/* Add shutdown hook to reset card when we reboot */
195 	sc->sd_hook = shutdownhook_establish(mtd_shutdown, sc);
196 
197 	return 0;
198 }
199 
200 
201 /*
202  * mtd_init
203  * Must be called at splnet()
204  */
205 int
206 mtd_init(ifp)
207 	struct ifnet *ifp;
208 {
209 	struct mtd_softc *sc = ifp->if_softc;
210 
211 	mtd_reset(sc);
212 
213 	/*
214 	 * Set cache alignment and burst length. Don't really know what these
215 	 * mean, so their values are probably suboptimal.
216 	 */
217 	MTD_WRITE_4(sc, MTD_BCR, MTD_BCR_BLEN16);
218 
219 	MTD_WRITE_4(sc, MTD_RXTXR, MTD_TX_STFWD | MTD_TX_FDPLX);
220 
221 	/* Promiscuous mode? */
222 	if (ifp->if_flags & IFF_PROMISC)
223 		MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_PROM);
224 	else
225 		MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_PROM);
226 
227 	/* Broadcast mode? */
228 	if (ifp->if_flags & IFF_BROADCAST)
229 		MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_ABROAD);
230 	else
231 		MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_ABROAD);
232 
233 	mtd_setmulti(sc);
234 
235 	/* Enable interrupts */
236 	MTD_WRITE_4(sc, MTD_IMR, MTD_IMR_MASK);
237 	MTD_WRITE_4(sc, MTD_ISR, MTD_ISR_ENABLE);
238 
239 	/* Set descriptor base addresses */
240 	MTD_WRITE_4(sc, MTD_TXLBA, htole32(sc->desc_dma_map->dm_segs[0].ds_addr
241 				+ sizeof(struct mtd_desc) * MTD_NUM_RXD));
242 	MTD_WRITE_4(sc, MTD_RXLBA,
243 		htole32(sc->desc_dma_map->dm_segs[0].ds_addr));
244 
245 	/* Enable receiver and transmitter */
246 	MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_ENABLE);
247 	MTD_SETBIT(sc, MTD_RXTXR, MTD_TX_ENABLE);
248 
249 	/* Interface is running */
250 	ifp->if_flags |= IFF_RUNNING;
251 	ifp->if_flags &= ~IFF_OACTIVE;
252 
253 	return 0;
254 }
255 
256 
257 int
258 mtd_init_desc(sc)
259 	struct mtd_softc *sc;
260 {
261 	int rseg, err, i;
262 	bus_dma_segment_t seg;
263 	bus_size_t size;
264 
265 	/* Allocate memory for descriptors */
266 	size = (MTD_NUM_RXD + MTD_NUM_TXD) * sizeof(struct mtd_desc);
267 
268 	/* Allocate DMA-safe memory */
269 	if ((err = bus_dmamem_alloc(sc->dma_tag, size, MTD_DMA_ALIGN,
270 			 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
271 		printf("%s: unable to allocate DMA buffer, error = %d\n",
272 			sc->dev.dv_xname, err);
273 		return 1;
274 	}
275 
276 	/* Map memory to kernel addressable space */
277 	if ((err = bus_dmamem_map(sc->dma_tag, &seg, 1, size,
278 		(caddr_t *)&sc->desc, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
279 		printf("%s: unable to map DMA buffer, error = %d\n",
280 			sc->dev.dv_xname, err);
281 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
282 		return 1;
283 	}
284 
285 	/* Create a DMA map */
286 	if ((err = bus_dmamap_create(sc->dma_tag, size, 1,
287 		size, 0, BUS_DMA_NOWAIT, &sc->desc_dma_map)) != 0) {
288 		printf("%s: unable to create DMA map, error = %d\n",
289 			sc->dev.dv_xname, err);
290 		bus_dmamem_unmap(sc->dma_tag, (caddr_t)sc->desc, size);
291 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
292 		return 1;
293 	}
294 
295 	/* Load the DMA map */
296 	if ((err = bus_dmamap_load(sc->dma_tag, sc->desc_dma_map, sc->desc,
297 		size, NULL, BUS_DMA_NOWAIT)) != 0) {
298 		printf("%s: unable to load DMA map, error = %d\n",
299 			sc->dev.dv_xname, err);
300 		bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
301 		bus_dmamem_unmap(sc->dma_tag, (caddr_t)sc->desc, size);
302 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
303 		return 1;
304 	}
305 
306 	/* Allocate memory for the buffers */
307 	size = MTD_NUM_RXD * MTD_RXBUF_SIZE + MTD_NUM_TXD * MTD_TXBUF_SIZE;
308 
309 	/* Allocate DMA-safe memory */
310 	if ((err = bus_dmamem_alloc(sc->dma_tag, size, MTD_DMA_ALIGN,
311 			 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
312 		printf("%s: unable to allocate DMA buffer, error = %d\n",
313 			sc->dev.dv_xname, err);
314 
315 		/* Undo DMA map for descriptors */
316 		bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
317 		bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
318 		bus_dmamem_unmap(sc->dma_tag, (caddr_t)sc->desc, size);
319 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
320 		return 1;
321 	}
322 
323 	/* Map memory to kernel addressable space */
324 	if ((err = bus_dmamem_map(sc->dma_tag, &seg, 1, size,
325 		&sc->buf, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
326 		printf("%s: unable to map DMA buffer, error = %d\n",
327 			sc->dev.dv_xname, err);
328 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
329 
330 		/* Undo DMA map for descriptors */
331 		bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
332 		bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
333 		bus_dmamem_unmap(sc->dma_tag, (caddr_t)sc->desc, size);
334 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
335 		return 1;
336 	}
337 
338 	/* Create a DMA map */
339 	if ((err = bus_dmamap_create(sc->dma_tag, size, 1,
340 		size, 0, BUS_DMA_NOWAIT, &sc->buf_dma_map)) != 0) {
341 		printf("%s: unable to create DMA map, error = %d\n",
342 			sc->dev.dv_xname, err);
343 		bus_dmamem_unmap(sc->dma_tag, sc->buf, size);
344 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
345 
346 		/* Undo DMA map for descriptors */
347 		bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
348 		bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
349 		bus_dmamem_unmap(sc->dma_tag, (caddr_t)sc->desc, size);
350 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
351 		return 1;
352 	}
353 
354 	/* Load the DMA map */
355 	if ((err = bus_dmamap_load(sc->dma_tag, sc->buf_dma_map, sc->buf,
356 		size, NULL, BUS_DMA_NOWAIT)) != 0) {
357 		printf("%s: unable to load DMA map, error = %d\n",
358 			sc->dev.dv_xname, err);
359 		bus_dmamap_destroy(sc->dma_tag, sc->buf_dma_map);
360 		bus_dmamem_unmap(sc->dma_tag, sc->buf, size);
361 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
362 
363 		/* Undo DMA map for descriptors */
364 		bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
365 		bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
366 		bus_dmamem_unmap(sc->dma_tag, (caddr_t)sc->desc, size);
367 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
368 		return 1;
369 	}
370 
371 	/* Descriptors are stored as a circular linked list */
372 	/* Fill in rx descriptors */
373 	for (i = 0; i < MTD_NUM_RXD; ++i) {
374 		sc->desc[i].stat = MTD_RXD_OWNER;
375 		if (i == MTD_NUM_RXD - 1) {	/* Last descriptor */
376 			/* Link back to first rx descriptor */
377 			sc->desc[i].next =
378 				htole32(sc->desc_dma_map->dm_segs[0].ds_addr);
379 		} else {
380 			/* Link forward to next rx descriptor */
381 			sc->desc[i].next =
382 			htole32(sc->desc_dma_map->dm_segs[0].ds_addr
383 					+ (i + 1) * sizeof(struct mtd_desc));
384 		}
385 		sc->desc[i].conf = MTD_RXBUF_SIZE & MTD_RXD_CONF_BUFS;
386 		/* Set buffer's address */
387 		sc->desc[i].data = htole32(sc->buf_dma_map->dm_segs[0].ds_addr
388 					+ i * MTD_RXBUF_SIZE);
389 	}
390 
391 	/* Fill in tx descriptors */
392 	for (/* i = MTD_NUM_RXD */; i < (MTD_NUM_TXD + MTD_NUM_RXD); ++i) {
393 		sc->desc[i].stat = 0;	/* At least, NOT MTD_TXD_OWNER! */
394 		if (i == (MTD_NUM_RXD + MTD_NUM_TXD - 1)) {	/* Last descr */
395 			/* Link back to first tx descriptor */
396 			sc->desc[i].next =
397 				htole32(sc->desc_dma_map->dm_segs[0].ds_addr
398 					+MTD_NUM_RXD * sizeof(struct mtd_desc));
399 		} else {
400 			/* Link forward to next tx descriptor */
401 			sc->desc[i].next =
402 				htole32(sc->desc_dma_map->dm_segs[0].ds_addr
403 					+ (i + 1) * sizeof(struct mtd_desc));
404 		}
405 		/* sc->desc[i].conf = MTD_TXBUF_SIZE & MTD_TXD_CONF_BUFS; */
406 		/* Set buffer's address */
407 		sc->desc[i].data = htole32(sc->buf_dma_map->dm_segs[0].ds_addr
408 					+ MTD_NUM_RXD * MTD_RXBUF_SIZE
409 					+ (i - MTD_NUM_RXD) * MTD_TXBUF_SIZE);
410 	}
411 
412 	return 0;
413 }
414 
415 
416 void
417 mtd_mii_statchg(self)
418 	struct device *self;
419 {
420 	/*struct mtd_softc *sc = (void *)self;*/
421 
422 	/* Should we do something here? :) */
423 }
424 
425 
426 int
427 mtd_mii_readreg(self, phy, reg)
428 	struct device *self;
429 	int phy, reg;
430 {
431 	struct mtd_softc *sc = (void *)self;
432 
433 	return (MTD_READ_2(sc, MTD_PHYBASE + reg * 2));
434 }
435 
436 
437 void
438 mtd_mii_writereg(self, phy, reg, val)
439 	struct device *self;
440 	int phy, reg, val;
441 {
442 	struct mtd_softc *sc = (void *)self;
443 
444 	MTD_WRITE_2(sc, MTD_PHYBASE + reg * 2, val);
445 }
446 
447 
448 int
449 mtd_put(sc, index, m)
450 	struct mtd_softc *sc;
451 	int index;
452 	struct mbuf *m;
453 {
454 	int len, tlen;
455 	caddr_t buf = sc->buf + MTD_NUM_RXD * MTD_RXBUF_SIZE
456 			+ index * MTD_TXBUF_SIZE;
457 	struct mbuf *n;
458 
459 	for (tlen = 0; m != NULL; m = n) {
460 		len = m->m_len;
461 		if (len == 0) {
462 			MFREE(m, n);
463 			continue;
464 		} else if (tlen > MTD_TXBUF_SIZE) {
465 			/* XXX FIXME: No idea what to do here. */
466 			printf("%s: packet too large! Size = %i\n",
467 				sc->dev.dv_xname, tlen);
468 			MFREE(m, n);
469 			continue;
470 		}
471 		memcpy(buf, mtod(m, caddr_t), len);
472 		buf += len;
473 		tlen += len;
474 		MFREE(m, n);
475 	}
476 	sc->desc[MTD_NUM_RXD + index].conf = MTD_TXD_CONF_PAD | MTD_TXD_CONF_CRC
477 		| MTD_TXD_CONF_IRQC
478 		| ((tlen << MTD_TXD_PKTS_SHIFT) & MTD_TXD_CONF_PKTS)
479 		| (tlen & MTD_TXD_CONF_BUFS);
480 
481 	return tlen;
482 }
483 
484 
485 void
486 mtd_start(ifp)
487 	struct ifnet *ifp;
488 {
489 	struct mtd_softc *sc = ifp->if_softc;
490 	struct mbuf *m;
491 	int len;
492 	int first_tx = sc->cur_tx;
493 
494 	/* Don't transmit when the interface is busy or inactive */
495 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
496 		return;
497 
498 	for (;;) {
499 		IF_DEQUEUE(&ifp->if_snd, m);
500 
501 		if (m == NULL)
502 			break;
503 
504 #if NBPFILTER > 0
505 		if (ifp->if_bpf)
506 			bpf_mtap(ifp->if_bpf, m);
507 #endif
508 
509 		/* Copy mbuf chain into tx buffer */
510 		len = mtd_put(sc, sc->cur_tx, m);
511 
512 		if (sc->cur_tx != first_tx)
513 			sc->desc[MTD_NUM_RXD + sc->cur_tx].stat = MTD_TXD_OWNER;
514 
515 		if (++sc->cur_tx >= MTD_NUM_TXD)
516 			sc->cur_tx = 0;
517 	}
518 	/* Mark first & last descriptor */
519 	sc->desc[MTD_NUM_RXD + first_tx].conf |= MTD_TXD_CONF_FSD;
520 
521 	if (sc->cur_tx == 0) {
522 		sc->desc[MTD_NUM_RXD + MTD_NUM_TXD - 1].conf |=MTD_TXD_CONF_LSD;
523 	} else {
524 		sc->desc[MTD_NUM_RXD + sc->cur_tx - 1].conf |= MTD_TXD_CONF_LSD;
525 	}
526 
527 	/* Give first descriptor to chip to complete transaction */
528 	sc->desc[MTD_NUM_RXD + first_tx].stat = MTD_TXD_OWNER;
529 
530 	/* Transmit polling demand */
531 	MTD_WRITE_4(sc, MTD_TXPDR, MTD_TXPDR_DEMAND);
532 
533 	/* XXX FIXME: Set up a watchdog timer */
534 	/* ifp->if_timer = 5; */
535 }
536 
537 
538 void
539 mtd_stop (ifp, disable)
540 	struct ifnet *ifp;
541 	int disable;
542 {
543 	struct mtd_softc *sc = ifp->if_softc;
544 
545 	/* Disable transmitter and receiver */
546 	MTD_CLRBIT(sc, MTD_RXTXR, MTD_TX_ENABLE);
547 	MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_ENABLE);
548 
549 	/* Disable interrupts */
550 	MTD_WRITE_4(sc, MTD_IMR, 0x00000000);
551 
552 	/* Must do more at disable??... */
553 	if (disable) {
554 		/* Delete tx and rx descriptor base addresses */
555 		MTD_WRITE_4(sc, MTD_RXLBA, 0x00000000);
556 		MTD_WRITE_4(sc, MTD_TXLBA, 0x00000000);
557 	}
558 
559 	ifp->if_timer = 0;
560 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
561 }
562 
563 
564 void
565 mtd_watchdog(ifp)
566 	struct ifnet *ifp;
567 {
568 	struct mtd_softc *sc = ifp->if_softc;
569 	int s;
570 
571 	log(LOG_ERR, "%s: device timeout\n", sc->dev.dv_xname);
572 	++sc->ethercom.ec_if.if_oerrors;
573 
574 	mtd_stop(ifp, 0);
575 
576 	s = splnet();
577 	mtd_init(ifp);
578 	splx(s);
579 
580 	return;
581 }
582 
583 
584 int
585 mtd_ioctl(ifp, cmd, data)
586 	struct ifnet * ifp;
587 	u_long cmd;
588 	caddr_t data;
589 {
590 	struct mtd_softc *sc = ifp->if_softc;
591 	struct ifreq *ifr = (struct ifreq *)data;
592 	int s, error = 0;
593 
594 	s = splnet();
595 
596 	/* Don't do anything special */
597 	switch(cmd) {
598 		case SIOCADDMULTI:
599 		case SIOCDELMULTI:
600 			error = (cmd == SIOCADDMULTI) ?
601 			    ether_addmulti(ifr, &sc->ethercom) :
602 			    ether_delmulti(ifr, &sc->ethercom);
603 
604 			if (error == ENETRESET) {
605 				/*
606 				 * Multicast list has changed; set the hardware
607 				 * filter accordingly.
608 				 */
609 				 if (ifp->if_flags & IFF_RUNNING)
610 					 mtd_setmulti(sc);
611 				 error = 0;
612 			}
613 			break;
614 
615 		default:
616 			error = ether_ioctl(ifp, cmd, data);
617 			break;
618 	}
619 
620 	splx(s);
621 	return error;
622 }
623 
624 
625 struct mbuf *
626 mtd_get(sc, index, totlen)
627 	struct mtd_softc *sc;
628 	int index;
629 	int totlen;
630 {
631 	struct ifnet *ifp = &sc->ethercom.ec_if;
632 	struct mbuf *m, *m0, *newm;
633 	int len;
634 	caddr_t buf = sc->buf + index * MTD_RXBUF_SIZE;
635 
636 	MGETHDR(m0, M_DONTWAIT, MT_DATA);
637 	if (m0 == NULL)
638 		return NULL;
639 
640 	m0->m_pkthdr.rcvif = ifp;
641 	m0->m_pkthdr.len = totlen;
642 	m = m0;
643 	len = MHLEN;
644 
645 	while (totlen > 0) {
646 		if (totlen >= MINCLSIZE) {
647 			MCLGET(m, M_DONTWAIT);
648 			if (!(m->m_flags & M_EXT)) {
649 				m_freem(m0);
650 				return NULL;
651 			}
652 			len = MCLBYTES;
653 		}
654 
655 		if (m == m0) {
656 			caddr_t newdata = (caddr_t)
657 				ALIGN(m->m_data + sizeof(struct ether_header)) -
658 				sizeof(struct ether_header);
659 			len -= newdata - m->m_data;
660 			m->m_data = newdata;
661 		}
662 
663 		m->m_len = len = min(totlen, len);
664 		memcpy(mtod(m, caddr_t), buf, len);
665 		buf += len;
666 
667 		totlen -= len;
668 		if (totlen > 0) {
669 			MGET(newm, M_DONTWAIT, MT_DATA);
670 			if (newm == NULL) {
671 				m_freem(m0);
672 				return NULL;
673 			}
674 			len = MLEN;
675 			m = m->m_next = newm;
676 		}
677 	}
678 
679 	return m0;
680 }
681 
682 
683 int
684 mtd_rxirq(sc)
685 	struct mtd_softc *sc;
686 {
687 	struct ifnet *ifp = &sc->ethercom.ec_if;
688 	int len;
689 	struct mbuf *m;
690 
691 	for (; !(sc->desc[sc->cur_rx].stat & MTD_RXD_OWNER);) {
692 		/* Error summary set? */
693 		if (sc->desc[sc->cur_rx].stat & MTD_RXD_ERRSUM) {
694 			printf("%s: received packet with errors\n",
695 				sc->dev.dv_xname);
696 			/* Give up packet, since an error occurred */
697 			sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER;
698 			sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE &
699 							MTD_RXD_CONF_BUFS;
700 			++ifp->if_ierrors;
701 			if (++sc->cur_rx >= MTD_NUM_RXD)
702 				sc->cur_rx = 0;
703 			continue;
704 		}
705 		/* Get buffer length */
706 		len = (sc->desc[sc->cur_rx].stat & MTD_RXD_FLEN)
707 			>> MTD_RXD_FLEN_SHIFT;
708 		len -= ETHER_CRC_LEN;
709 
710 		/* Check packet size */
711 		if (len <= sizeof(struct ether_header)) {
712 			printf("%s: invalid packet size %d; dropping\n",
713 				sc->dev.dv_xname, len);
714 			sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER;
715 			sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE &
716 							MTD_RXD_CONF_BUFS;
717 			++ifp->if_ierrors;
718 			if (++sc->cur_rx >= MTD_NUM_RXD)
719 				sc->cur_rx = 0;
720 			continue;
721 		}
722 
723 		m = mtd_get(sc, (sc->cur_rx), len);
724 
725 		/* Give descriptor back to card */
726 		sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE & MTD_RXD_CONF_BUFS;
727 		sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER;
728 
729 		if (++sc->cur_rx >= MTD_NUM_RXD)
730 			sc->cur_rx = 0;
731 
732 		if (m == NULL) {
733 			printf("%s: error pulling packet off interface\n",
734 				sc->dev.dv_xname);
735 			++ifp->if_ierrors;
736 			continue;
737 		}
738 
739 		++ifp->if_ipackets;
740 
741 #if NBPFILTER > 0
742 		if (ifp->if_bpf)
743 			bpf_mtap(ifp->if_bpf, m);
744 #endif
745 		/* Pass the packet up */
746 		(*ifp->if_input)(ifp, m);
747 	}
748 
749 	return 1;
750 }
751 
752 
753 int
754 mtd_txirq(sc)
755 	struct mtd_softc *sc;
756 {
757 	struct ifnet *ifp = &sc->ethercom.ec_if;
758 
759 	/* Clear timeout */
760 	ifp->if_timer = 0;
761 
762 	ifp->if_flags &= ~IFF_OACTIVE;
763 	++ifp->if_opackets;
764 
765 	/* XXX FIXME If there is some queued, do an mtd_start? */
766 
767 	return 1;
768 }
769 
770 
771 int
772 mtd_bufirq(sc)
773 	struct mtd_softc *sc;
774 {
775 	struct ifnet *ifp = &sc->ethercom.ec_if;
776 
777 	/* Clear timeout */
778 	ifp->if_timer = 0;
779 
780 	/* XXX FIXME: Do something here to make sure we get some buffers! */
781 
782 	return 1;
783 }
784 
785 
786 int
787 mtd_irq_h(args)
788 	void *args;
789 {
790 	struct mtd_softc *sc = args;
791 	struct ifnet *ifp = &sc->ethercom.ec_if;
792 	u_int32_t status;
793 	int r = 0;
794 
795 	if (!(ifp->if_flags & IFF_RUNNING) ||
796 		!(sc->dev.dv_flags & DVF_ACTIVE))
797 		return 0;
798 
799 	/* Disable interrupts */
800 	MTD_WRITE_4(sc, MTD_IMR, 0x00000000);
801 
802 	for(;;) {
803 		status = MTD_READ_4(sc, MTD_ISR);
804 #if NRND > 0
805 		/* Add random seed before masking out bits */
806 		if (status)
807 			rnd_add_uint32(&sc->rnd_src, status);
808 #endif
809 		status &= MTD_ISR_MASK;
810 		if (!status)		/* We didn't ask for this */
811 			break;
812 
813 		MTD_WRITE_4(sc, MTD_ISR, status);
814 
815 		/* NOTE: Perhaps we should reset with some of these errors? */
816 
817 		if (status & MTD_ISR_RXBUN) {
818 			printf("%s: receive buffer unavailable\n",
819 				sc->dev.dv_xname);
820 			++ifp->if_ierrors;
821 		}
822 
823 		if (status & MTD_ISR_RXERR) {
824 			printf("%s: receive error\n", sc->dev.dv_xname);
825 			++ifp->if_ierrors;
826 		}
827 
828 		if (status & MTD_ISR_TXBUN) {
829 			printf("%s: transmit buffer unavailable\n",
830 				sc->dev.dv_xname);
831 			++ifp->if_ierrors;
832 		}
833 
834 		if ((status & MTD_ISR_PDF)) {
835 			printf("%s: parallel detection fault\n",
836 				sc->dev.dv_xname);
837 			++ifp->if_ierrors;
838 		}
839 
840 		if (status & MTD_ISR_FBUSERR) {
841 			printf("%s: fatal bus error\n",
842 				sc->dev.dv_xname);
843 			++ifp->if_ierrors;
844 		}
845 
846 		if (status & MTD_ISR_TARERR) {
847 			printf("%s: target error\n",
848 				sc->dev.dv_xname);
849 			++ifp->if_ierrors;
850 		}
851 
852 		if (status & MTD_ISR_MASTERR) {
853 			printf("%s: master error\n",
854 				sc->dev.dv_xname);
855 			++ifp->if_ierrors;
856 		}
857 
858 		if (status & MTD_ISR_PARERR) {
859 			printf("%s: parity error\n",
860 				sc->dev.dv_xname);
861 			++ifp->if_ierrors;
862 		}
863 
864 		if (status & MTD_ISR_RXIRQ)	/* Receive interrupt */
865 			r |= mtd_rxirq(sc);
866 
867 		if (status & MTD_ISR_TXIRQ)	/* Transmit interrupt */
868 			r |= mtd_txirq(sc);
869 
870 		if (status & MTD_ISR_TXEARLY)	/* Transmit early */
871 			r |= mtd_txirq(sc);
872 
873 		if (status & MTD_ISR_TXBUN)	/* Transmit buffer n/a */
874 			r |= mtd_bufirq(sc);
875 
876 	}
877 
878 	/* Enable interrupts */
879 	MTD_WRITE_4(sc, MTD_IMR, MTD_IMR_MASK);
880 
881 	return r;
882 }
883 
884 
885 void
886 mtd_setmulti(sc)
887 	struct mtd_softc *sc;
888 {
889 	struct ifnet *ifp = &sc->ethercom.ec_if;
890 	u_int32_t rxtx_stat;
891 	u_int32_t hash[2] = {0, 0};
892 	u_int32_t crc;
893 	struct ether_multi *enm;
894 	struct ether_multistep step;
895 	int mcnt = 0;
896 
897 	/* Get old status */
898 	rxtx_stat = MTD_READ_4(sc, MTD_RXTXR);
899 
900 	if ((ifp->if_flags & IFF_ALLMULTI) || (ifp->if_flags & IFF_PROMISC)) {
901 		rxtx_stat |= MTD_RX_AMULTI;
902 		MTD_WRITE_4(sc, MTD_RXTXR, rxtx_stat);
903 		MTD_WRITE_4(sc, MTD_MAR0, MTD_ALL_ADDR);
904 		MTD_WRITE_4(sc, MTD_MAR1, MTD_ALL_ADDR);
905 		return;
906 	}
907 
908 	ETHER_FIRST_MULTI(step, &sc->ethercom, enm);
909 	while (enm != NULL) {
910 		/* We need the 6 most significant bits of the CRC */
911 		crc = ETHER_CRC32(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26;
912 
913 		hash[crc >> 5] |= 1 << (crc & 0xf);
914 
915 		++mcnt;
916 		ETHER_NEXT_MULTI(step, enm);
917 	}
918 
919 	/* Accept multicast bit needs to be on? */
920 	if (mcnt)
921 		rxtx_stat |= MTD_RX_AMULTI;
922 	else
923 		rxtx_stat &= ~MTD_RX_AMULTI;
924 
925 	/* Write out the hash */
926 	MTD_WRITE_4(sc, MTD_MAR0, hash[0]);
927 	MTD_WRITE_4(sc, MTD_MAR1, hash[1]);
928 	MTD_WRITE_4(sc, MTD_RXTXR, rxtx_stat);
929 }
930 
931 
932 void
933 mtd_reset(sc)
934 	struct mtd_softc *sc;
935 {
936 	int i;
937 
938 	MTD_SETBIT(sc, MTD_BCR, MTD_BCR_RESET);
939 
940 	/* Reset descriptor status */
941 	sc->cur_tx = 0;
942 	sc->cur_rx = 0;
943 
944 	/* Wait until done with reset */
945 	for (i = 0; i < MTD_TIMEOUT; ++i) {
946 		DELAY(10);
947 		if (!(MTD_READ_4(sc, MTD_BCR) & MTD_BCR_RESET))
948 			break;
949 	}
950 
951 	if (i == MTD_TIMEOUT) {
952 		printf("%s: reset timed out\n", sc->dev.dv_xname);
953 	}
954 
955 	/* Wait a little so chip can stabilize */
956 	DELAY(1000);
957 }
958 
959 
960 int
961 mtd_mediachange(ifp)
962 	struct ifnet *ifp;
963 {
964 	struct mtd_softc *sc = ifp->if_softc;
965 
966 	if (IFM_TYPE(sc->mii.mii_media.ifm_media) != IFM_ETHER)
967 		return EINVAL;
968 
969 	return mii_mediachg(&sc->mii);
970 }
971 
972 
973 void
974 mtd_mediastatus(ifp, ifmr)
975 	struct ifnet *ifp;
976 	struct ifmediareq *ifmr;
977 {
978 	struct mtd_softc *sc = ifp->if_softc;
979 
980 	if ((ifp->if_flags & IFF_UP) == 0)
981 		return;
982 
983 	mii_pollstat(&sc->mii);
984 	ifmr->ifm_active = sc->mii.mii_media_active;
985 	ifmr->ifm_status = sc->mii.mii_media_status;
986 }
987 
988 
989 void
990 mtd_shutdown (arg)
991 	void *arg;
992 {
993 	struct mtd_softc *sc = arg;
994 	struct ifnet *ifp = &sc->ethercom.ec_if;
995 
996 #if NRND > 0
997 	rnd_detach_source(&sc->rnd_src);
998 #endif
999 	mtd_stop(ifp, 1);
1000 }
1001