xref: /netbsd-src/sys/dev/pci/if_ste.c (revision 267197ec1eebfcb9810ea27a89625b6ddf68e3e7)
1 /*	$NetBSD: if_ste.c,v 1.32 2008/01/19 22:10:19 dyoung Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the NetBSD
21  *	Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * Device driver for the Sundance Tech. ST-201 10/100
41  * Ethernet controller.
42  */
43 
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: if_ste.c,v 1.32 2008/01/19 22:10:19 dyoung Exp $");
46 
47 #include "bpfilter.h"
48 
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/callout.h>
52 #include <sys/mbuf.h>
53 #include <sys/malloc.h>
54 #include <sys/kernel.h>
55 #include <sys/socket.h>
56 #include <sys/ioctl.h>
57 #include <sys/errno.h>
58 #include <sys/device.h>
59 #include <sys/queue.h>
60 
61 #include <uvm/uvm_extern.h>		/* for PAGE_SIZE */
62 
63 #include <net/if.h>
64 #include <net/if_dl.h>
65 #include <net/if_media.h>
66 #include <net/if_ether.h>
67 
68 #if NBPFILTER > 0
69 #include <net/bpf.h>
70 #endif
71 
72 #include <sys/bus.h>
73 #include <sys/intr.h>
74 
75 #include <dev/mii/mii.h>
76 #include <dev/mii/miivar.h>
77 #include <dev/mii/mii_bitbang.h>
78 
79 #include <dev/pci/pcireg.h>
80 #include <dev/pci/pcivar.h>
81 #include <dev/pci/pcidevs.h>
82 
83 #include <dev/pci/if_stereg.h>
84 
85 /*
86  * Transmit descriptor list size.
87  */
88 #define	STE_NTXDESC		256
89 #define	STE_NTXDESC_MASK	(STE_NTXDESC - 1)
90 #define	STE_NEXTTX(x)		(((x) + 1) & STE_NTXDESC_MASK)
91 
92 /*
93  * Receive descriptor list size.
94  */
95 #define	STE_NRXDESC		128
96 #define	STE_NRXDESC_MASK	(STE_NRXDESC - 1)
97 #define	STE_NEXTRX(x)		(((x) + 1) & STE_NRXDESC_MASK)
98 
99 /*
100  * Control structures are DMA'd to the ST-201 chip.  We allocate them in
101  * a single clump that maps to a single DMA segment to make several things
102  * easier.
103  */
104 struct ste_control_data {
105 	/*
106 	 * The transmit descriptors.
107 	 */
108 	struct ste_tfd scd_txdescs[STE_NTXDESC];
109 
110 	/*
111 	 * The receive descriptors.
112 	 */
113 	struct ste_rfd scd_rxdescs[STE_NRXDESC];
114 };
115 
116 #define	STE_CDOFF(x)	offsetof(struct ste_control_data, x)
117 #define	STE_CDTXOFF(x)	STE_CDOFF(scd_txdescs[(x)])
118 #define	STE_CDRXOFF(x)	STE_CDOFF(scd_rxdescs[(x)])
119 
120 /*
121  * Software state for transmit and receive jobs.
122  */
123 struct ste_descsoft {
124 	struct mbuf *ds_mbuf;		/* head of our mbuf chain */
125 	bus_dmamap_t ds_dmamap;		/* our DMA map */
126 };
127 
128 /*
129  * Software state per device.
130  */
131 struct ste_softc {
132 	struct device sc_dev;		/* generic device information */
133 	bus_space_tag_t sc_st;		/* bus space tag */
134 	bus_space_handle_t sc_sh;	/* bus space handle */
135 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
136 	struct ethercom sc_ethercom;	/* ethernet common data */
137 	void *sc_sdhook;		/* shutdown hook */
138 
139 	void *sc_ih;			/* interrupt cookie */
140 
141 	struct mii_data sc_mii;		/* MII/media information */
142 
143 	callout_t sc_tick_ch;		/* tick callout */
144 
145 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
146 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
147 
148 	/*
149 	 * Software state for transmit and receive descriptors.
150 	 */
151 	struct ste_descsoft sc_txsoft[STE_NTXDESC];
152 	struct ste_descsoft sc_rxsoft[STE_NRXDESC];
153 
154 	/*
155 	 * Control data structures.
156 	 */
157 	struct ste_control_data *sc_control_data;
158 #define	sc_txdescs	sc_control_data->scd_txdescs
159 #define	sc_rxdescs	sc_control_data->scd_rxdescs
160 
161 	int	sc_txpending;		/* number of Tx requests pending */
162 	int	sc_txdirty;		/* first dirty Tx descriptor */
163 	int	sc_txlast;		/* last used Tx descriptor */
164 
165 	int	sc_rxptr;		/* next ready Rx descriptor/descsoft */
166 
167 	int	sc_txthresh;		/* Tx threshold */
168 	uint32_t sc_DMACtrl;		/* prototype DMACtrl register */
169 	uint16_t sc_IntEnable;		/* prototype IntEnable register */
170 	uint16_t sc_MacCtrl0;		/* prototype MacCtrl0 register */
171 	uint8_t	sc_ReceiveMode;		/* prototype ReceiveMode register */
172 };
173 
174 #define	STE_CDTXADDR(sc, x)	((sc)->sc_cddma + STE_CDTXOFF((x)))
175 #define	STE_CDRXADDR(sc, x)	((sc)->sc_cddma + STE_CDRXOFF((x)))
176 
177 #define	STE_CDTXSYNC(sc, x, ops)					\
178 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
179 	    STE_CDTXOFF((x)), sizeof(struct ste_tfd), (ops))
180 
181 #define	STE_CDRXSYNC(sc, x, ops)					\
182 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
183 	    STE_CDRXOFF((x)), sizeof(struct ste_rfd), (ops))
184 
185 #define	STE_INIT_RXDESC(sc, x)						\
186 do {									\
187 	struct ste_descsoft *__ds = &(sc)->sc_rxsoft[(x)];		\
188 	struct ste_rfd *__rfd = &(sc)->sc_rxdescs[(x)];			\
189 	struct mbuf *__m = __ds->ds_mbuf;				\
190 									\
191 	/*								\
192 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
193 	 * so that the payload after the Ethernet header is aligned	\
194 	 * to a 4-byte boundary.					\
195 	 */								\
196 	__m->m_data = __m->m_ext.ext_buf + 2;				\
197 	__rfd->rfd_frag.frag_addr =					\
198 	    htole32(__ds->ds_dmamap->dm_segs[0].ds_addr + 2);		\
199 	__rfd->rfd_frag.frag_len = htole32((MCLBYTES - 2) | FRAG_LAST);	\
200 	__rfd->rfd_next = htole32(STE_CDRXADDR((sc), STE_NEXTRX((x))));	\
201 	__rfd->rfd_status = 0;						\
202 	STE_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
203 } while (/*CONSTCOND*/0)
204 
205 #define STE_TIMEOUT 1000
206 
207 static void	ste_start(struct ifnet *);
208 static void	ste_watchdog(struct ifnet *);
209 static int	ste_ioctl(struct ifnet *, u_long, void *);
210 static int	ste_init(struct ifnet *);
211 static void	ste_stop(struct ifnet *, int);
212 
213 static void	ste_shutdown(void *);
214 
215 static void	ste_reset(struct ste_softc *, u_int32_t);
216 static void	ste_setthresh(struct ste_softc *);
217 static void	ste_txrestart(struct ste_softc *, u_int8_t);
218 static void	ste_rxdrain(struct ste_softc *);
219 static int	ste_add_rxbuf(struct ste_softc *, int);
220 static void	ste_read_eeprom(struct ste_softc *, int, uint16_t *);
221 static void	ste_tick(void *);
222 
223 static void	ste_stats_update(struct ste_softc *);
224 
225 static void	ste_set_filter(struct ste_softc *);
226 
227 static int	ste_intr(void *);
228 static void	ste_txintr(struct ste_softc *);
229 static void	ste_rxintr(struct ste_softc *);
230 
231 static int	ste_mii_readreg(struct device *, int, int);
232 static void	ste_mii_writereg(struct device *, int, int, int);
233 static void	ste_mii_statchg(struct device *);
234 
235 static int	ste_match(struct device *, struct cfdata *, void *);
236 static void	ste_attach(struct device *, struct device *, void *);
237 
238 int	ste_copy_small = 0;
239 
240 CFATTACH_DECL(ste, sizeof(struct ste_softc),
241     ste_match, ste_attach, NULL, NULL);
242 
243 static uint32_t ste_mii_bitbang_read(struct device *);
244 static void	ste_mii_bitbang_write(struct device *, uint32_t);
245 
246 static const struct mii_bitbang_ops ste_mii_bitbang_ops = {
247 	ste_mii_bitbang_read,
248 	ste_mii_bitbang_write,
249 	{
250 		PC_MgmtData,		/* MII_BIT_MDO */
251 		PC_MgmtData,		/* MII_BIT_MDI */
252 		PC_MgmtClk,		/* MII_BIT_MDC */
253 		PC_MgmtDir,		/* MII_BIT_DIR_HOST_PHY */
254 		0,			/* MII_BIT_DIR_PHY_HOST */
255 	}
256 };
257 
258 /*
259  * Devices supported by this driver.
260  */
261 static const struct ste_product {
262 	pci_vendor_id_t		ste_vendor;
263 	pci_product_id_t	ste_product;
264 	const char		*ste_name;
265 } ste_products[] = {
266 	{ PCI_VENDOR_SUNDANCETI, 	PCI_PRODUCT_SUNDANCETI_IP100A,
267 	  "IC Plus Corp. IP00A 10/100 Fast Ethernet Adapter" },
268 
269 	{ PCI_VENDOR_SUNDANCETI,	PCI_PRODUCT_SUNDANCETI_ST201,
270 	  "Sundance ST-201 10/100 Ethernet" },
271 
272 	{ PCI_VENDOR_DLINK,		PCI_PRODUCT_DLINK_DL1002,
273 	  "D-Link DL-1002 10/100 Ethernet" },
274 
275 	{ 0,				0,
276 	  NULL },
277 };
278 
279 static const struct ste_product *
280 ste_lookup(const struct pci_attach_args *pa)
281 {
282 	const struct ste_product *sp;
283 
284 	for (sp = ste_products; sp->ste_name != NULL; sp++) {
285 		if (PCI_VENDOR(pa->pa_id) == sp->ste_vendor &&
286 		    PCI_PRODUCT(pa->pa_id) == sp->ste_product)
287 			return (sp);
288 	}
289 	return (NULL);
290 }
291 
292 static int
293 ste_match(struct device *parent, struct cfdata *cf, void *aux)
294 {
295 	struct pci_attach_args *pa = aux;
296 
297 	if (ste_lookup(pa) != NULL)
298 		return (1);
299 
300 	return (0);
301 }
302 
303 static void
304 ste_attach(struct device *parent, struct device *self, void *aux)
305 {
306 	struct ste_softc *sc = (struct ste_softc *) self;
307 	struct pci_attach_args *pa = aux;
308 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
309 	pci_chipset_tag_t pc = pa->pa_pc;
310 	pci_intr_handle_t ih;
311 	const char *intrstr = NULL;
312 	bus_space_tag_t iot, memt;
313 	bus_space_handle_t ioh, memh;
314 	bus_dma_segment_t seg;
315 	int ioh_valid, memh_valid;
316 	int i, rseg, error;
317 	const struct ste_product *sp;
318 	uint8_t enaddr[ETHER_ADDR_LEN];
319 	uint16_t myea[ETHER_ADDR_LEN / 2];
320 
321 	callout_init(&sc->sc_tick_ch, 0);
322 
323 	sp = ste_lookup(pa);
324 	if (sp == NULL) {
325 		printf("\n");
326 		panic("ste_attach: impossible");
327 	}
328 
329 	printf(": %s\n", sp->ste_name);
330 
331 	/*
332 	 * Map the device.
333 	 */
334 	ioh_valid = (pci_mapreg_map(pa, STE_PCI_IOBA,
335 	    PCI_MAPREG_TYPE_IO, 0,
336 	    &iot, &ioh, NULL, NULL) == 0);
337 	memh_valid = (pci_mapreg_map(pa, STE_PCI_MMBA,
338 	    PCI_MAPREG_TYPE_MEM|PCI_MAPREG_MEM_TYPE_32BIT, 0,
339 	    &memt, &memh, NULL, NULL) == 0);
340 
341 	if (memh_valid) {
342 		sc->sc_st = memt;
343 		sc->sc_sh = memh;
344 	} else if (ioh_valid) {
345 		sc->sc_st = iot;
346 		sc->sc_sh = ioh;
347 	} else {
348 		printf("%s: unable to map device registers\n",
349 		    sc->sc_dev.dv_xname);
350 		return;
351 	}
352 
353 	sc->sc_dmat = pa->pa_dmat;
354 
355 	/* Enable bus mastering. */
356 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
357 	    pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) |
358 	    PCI_COMMAND_MASTER_ENABLE);
359 
360 	/* power up chip */
361 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, sc,
362 	    NULL)) && error != EOPNOTSUPP) {
363 		aprint_error("%s: cannot activate %d\n", sc->sc_dev.dv_xname,
364 		    error);
365 		return;
366 	}
367 
368 	/*
369 	 * Map and establish our interrupt.
370 	 */
371 	if (pci_intr_map(pa, &ih)) {
372 		printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname);
373 		return;
374 	}
375 	intrstr = pci_intr_string(pc, ih);
376 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, ste_intr, sc);
377 	if (sc->sc_ih == NULL) {
378 		printf("%s: unable to establish interrupt",
379 		    sc->sc_dev.dv_xname);
380 		if (intrstr != NULL)
381 			printf(" at %s", intrstr);
382 		printf("\n");
383 		return;
384 	}
385 	printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
386 
387 	/*
388 	 * Allocate the control data structures, and create and load the
389 	 * DMA map for it.
390 	 */
391 	if ((error = bus_dmamem_alloc(sc->sc_dmat,
392 	    sizeof(struct ste_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
393 	    0)) != 0) {
394 		printf("%s: unable to allocate control data, error = %d\n",
395 		    sc->sc_dev.dv_xname, error);
396 		goto fail_0;
397 	}
398 
399 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
400 	    sizeof(struct ste_control_data), (void **)&sc->sc_control_data,
401 	    BUS_DMA_COHERENT)) != 0) {
402 		printf("%s: unable to map control data, error = %d\n",
403 		    sc->sc_dev.dv_xname, error);
404 		goto fail_1;
405 	}
406 
407 	if ((error = bus_dmamap_create(sc->sc_dmat,
408 	    sizeof(struct ste_control_data), 1,
409 	    sizeof(struct ste_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
410 		printf("%s: unable to create control data DMA map, "
411 		    "error = %d\n", sc->sc_dev.dv_xname, error);
412 		goto fail_2;
413 	}
414 
415 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
416 	    sc->sc_control_data, sizeof(struct ste_control_data), NULL,
417 	    0)) != 0) {
418 		printf("%s: unable to load control data DMA map, error = %d\n",
419 		    sc->sc_dev.dv_xname, error);
420 		goto fail_3;
421 	}
422 
423 	/*
424 	 * Create the transmit buffer DMA maps.
425 	 */
426 	for (i = 0; i < STE_NTXDESC; i++) {
427 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
428 		    STE_NTXFRAGS, MCLBYTES, 0, 0,
429 		    &sc->sc_txsoft[i].ds_dmamap)) != 0) {
430 			printf("%s: unable to create tx DMA map %d, "
431 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
432 			goto fail_4;
433 		}
434 	}
435 
436 	/*
437 	 * Create the receive buffer DMA maps.
438 	 */
439 	for (i = 0; i < STE_NRXDESC; i++) {
440 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
441 		    MCLBYTES, 0, 0, &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
442 			printf("%s: unable to create rx DMA map %d, "
443 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
444 			goto fail_5;
445 		}
446 		sc->sc_rxsoft[i].ds_mbuf = NULL;
447 	}
448 
449 	/*
450 	 * Reset the chip to a known state.
451 	 */
452 	ste_reset(sc, AC_GlobalReset | AC_RxReset | AC_TxReset | AC_DMA |
453 	    AC_FIFO | AC_Network | AC_Host | AC_AutoInit | AC_RstOut);
454 
455 	/*
456 	 * Read the Ethernet address from the EEPROM.
457 	 */
458 	for (i = 0; i < 3; i++) {
459 		ste_read_eeprom(sc, STE_EEPROM_StationAddress0 + i, &myea[i]);
460 		myea[i] = le16toh(myea[i]);
461 	}
462 	memcpy(enaddr, myea, sizeof(enaddr));
463 
464 	printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
465 	    ether_sprintf(enaddr));
466 
467 	/*
468 	 * Initialize our media structures and probe the MII.
469 	 */
470 	sc->sc_mii.mii_ifp = ifp;
471 	sc->sc_mii.mii_readreg = ste_mii_readreg;
472 	sc->sc_mii.mii_writereg = ste_mii_writereg;
473 	sc->sc_mii.mii_statchg = ste_mii_statchg;
474 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
475 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, ether_mediachange,
476 	    ether_mediastatus);
477 	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
478 	    MII_OFFSET_ANY, 0);
479 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
480 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
481 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
482 	} else
483 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
484 
485 	ifp = &sc->sc_ethercom.ec_if;
486 	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
487 	ifp->if_softc = sc;
488 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
489 	ifp->if_ioctl = ste_ioctl;
490 	ifp->if_start = ste_start;
491 	ifp->if_watchdog = ste_watchdog;
492 	ifp->if_init = ste_init;
493 	ifp->if_stop = ste_stop;
494 	IFQ_SET_READY(&ifp->if_snd);
495 
496 	/*
497 	 * Default the transmit threshold to 128 bytes.
498 	 */
499 	sc->sc_txthresh = 128;
500 
501 	/*
502 	 * Disable MWI if the PCI layer tells us to.
503 	 */
504 	sc->sc_DMACtrl = 0;
505 	if ((pa->pa_flags & PCI_FLAGS_MWI_OKAY) == 0)
506 		sc->sc_DMACtrl |= DC_MWIDisable;
507 
508 	/*
509 	 * We can support 802.1Q VLAN-sized frames.
510 	 */
511 	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
512 
513 	/*
514 	 * Attach the interface.
515 	 */
516 	if_attach(ifp);
517 	ether_ifattach(ifp, enaddr);
518 
519 	/*
520 	 * Make sure the interface is shutdown during reboot.
521 	 */
522 	sc->sc_sdhook = shutdownhook_establish(ste_shutdown, sc);
523 	if (sc->sc_sdhook == NULL)
524 		printf("%s: WARNING: unable to establish shutdown hook\n",
525 		    sc->sc_dev.dv_xname);
526 	return;
527 
528 	/*
529 	 * Free any resources we've allocated during the failed attach
530 	 * attempt.  Do this in reverse order and fall through.
531 	 */
532  fail_5:
533 	for (i = 0; i < STE_NRXDESC; i++) {
534 		if (sc->sc_rxsoft[i].ds_dmamap != NULL)
535 			bus_dmamap_destroy(sc->sc_dmat,
536 			    sc->sc_rxsoft[i].ds_dmamap);
537 	}
538  fail_4:
539 	for (i = 0; i < STE_NTXDESC; i++) {
540 		if (sc->sc_txsoft[i].ds_dmamap != NULL)
541 			bus_dmamap_destroy(sc->sc_dmat,
542 			    sc->sc_txsoft[i].ds_dmamap);
543 	}
544 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
545  fail_3:
546 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
547  fail_2:
548 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
549 	    sizeof(struct ste_control_data));
550  fail_1:
551 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
552  fail_0:
553 	return;
554 }
555 
556 /*
557  * ste_shutdown:
558  *
559  *	Make sure the interface is stopped at reboot time.
560  */
561 static void
562 ste_shutdown(void *arg)
563 {
564 	struct ste_softc *sc = arg;
565 
566 	ste_stop(&sc->sc_ethercom.ec_if, 1);
567 }
568 
569 static void
570 ste_dmahalt_wait(struct ste_softc *sc)
571 {
572 	int i;
573 
574 	for (i = 0; i < STE_TIMEOUT; i++) {
575 		delay(2);
576 		if ((bus_space_read_4(sc->sc_st, sc->sc_sh, STE_DMACtrl) &
577 		     DC_DMAHaltBusy) == 0)
578 			break;
579 	}
580 
581 	if (i == STE_TIMEOUT)
582 		printf("%s: DMA halt timed out\n", sc->sc_dev.dv_xname);
583 }
584 
585 /*
586  * ste_start:		[ifnet interface function]
587  *
588  *	Start packet transmission on the interface.
589  */
590 static void
591 ste_start(struct ifnet *ifp)
592 {
593 	struct ste_softc *sc = ifp->if_softc;
594 	struct mbuf *m0, *m;
595 	struct ste_descsoft *ds;
596 	struct ste_tfd *tfd;
597 	bus_dmamap_t dmamap;
598 	int error, olasttx, nexttx, opending, seg, totlen;
599 
600 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
601 		return;
602 
603 	/*
604 	 * Remember the previous number of pending transmissions
605 	 * and the current last descriptor in the list.
606 	 */
607 	opending = sc->sc_txpending;
608 	olasttx = sc->sc_txlast;
609 
610 	/*
611 	 * Loop through the send queue, setting up transmit descriptors
612 	 * until we drain the queue, or use up all available transmit
613 	 * descriptors.
614 	 */
615 	while (sc->sc_txpending < STE_NTXDESC) {
616 		/*
617 		 * Grab a packet off the queue.
618 		 */
619 		IFQ_POLL(&ifp->if_snd, m0);
620 		if (m0 == NULL)
621 			break;
622 		m = NULL;
623 
624 		/*
625 		 * Get the last and next available transmit descriptor.
626 		 */
627 		nexttx = STE_NEXTTX(sc->sc_txlast);
628 		tfd = &sc->sc_txdescs[nexttx];
629 		ds = &sc->sc_txsoft[nexttx];
630 
631 		dmamap = ds->ds_dmamap;
632 
633 		/*
634 		 * Load the DMA map.  If this fails, the packet either
635 		 * didn't fit in the alloted number of segments, or we
636 		 * were short on resources.  In this case, we'll copy
637 		 * and try again.
638 		 */
639 		if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
640 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
641 			MGETHDR(m, M_DONTWAIT, MT_DATA);
642 			if (m == NULL) {
643 				printf("%s: unable to allocate Tx mbuf\n",
644 				    sc->sc_dev.dv_xname);
645 				break;
646 			}
647 			if (m0->m_pkthdr.len > MHLEN) {
648 				MCLGET(m, M_DONTWAIT);
649 				if ((m->m_flags & M_EXT) == 0) {
650 					printf("%s: unable to allocate Tx "
651 					    "cluster\n", sc->sc_dev.dv_xname);
652 					m_freem(m);
653 					break;
654 				}
655 			}
656 			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
657 			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
658 			error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
659 			    m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
660 			if (error) {
661 				printf("%s: unable to load Tx buffer, "
662 				    "error = %d\n", sc->sc_dev.dv_xname, error);
663 				break;
664 			}
665 		}
666 
667 		IFQ_DEQUEUE(&ifp->if_snd, m0);
668 		if (m != NULL) {
669 			m_freem(m0);
670 			m0 = m;
671 		}
672 
673 		/*
674 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
675 		 */
676 
677 		/* Sync the DMA map. */
678 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
679 		    BUS_DMASYNC_PREWRITE);
680 
681 		/* Initialize the fragment list. */
682 		for (totlen = 0, seg = 0; seg < dmamap->dm_nsegs; seg++) {
683 			tfd->tfd_frags[seg].frag_addr =
684 			    htole32(dmamap->dm_segs[seg].ds_addr);
685 			tfd->tfd_frags[seg].frag_len =
686 			    htole32(dmamap->dm_segs[seg].ds_len);
687 			totlen += dmamap->dm_segs[seg].ds_len;
688 		}
689 		tfd->tfd_frags[seg - 1].frag_len |= htole32(FRAG_LAST);
690 
691 		/* Initialize the descriptor. */
692 		tfd->tfd_next = htole32(STE_CDTXADDR(sc, nexttx));
693 		tfd->tfd_control = htole32(TFD_FrameId(nexttx) | (totlen & 3));
694 
695 		/* Sync the descriptor. */
696 		STE_CDTXSYNC(sc, nexttx,
697 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
698 
699 		/*
700 		 * Store a pointer to the packet so we can free it later,
701 		 * and remember what txdirty will be once the packet is
702 		 * done.
703 		 */
704 		ds->ds_mbuf = m0;
705 
706 		/* Advance the tx pointer. */
707 		sc->sc_txpending++;
708 		sc->sc_txlast = nexttx;
709 
710 #if NBPFILTER > 0
711 		/*
712 		 * Pass the packet to any BPF listeners.
713 		 */
714 		if (ifp->if_bpf)
715 			bpf_mtap(ifp->if_bpf, m0);
716 #endif /* NBPFILTER > 0 */
717 	}
718 
719 	if (sc->sc_txpending == STE_NTXDESC) {
720 		/* No more slots left; notify upper layer. */
721 		ifp->if_flags |= IFF_OACTIVE;
722 	}
723 
724 	if (sc->sc_txpending != opending) {
725 		/*
726 		 * We enqueued packets.  If the transmitter was idle,
727 		 * reset the txdirty pointer.
728 		 */
729 		if (opending == 0)
730 			sc->sc_txdirty = STE_NEXTTX(olasttx);
731 
732 		/*
733 		 * Cause a descriptor interrupt to happen on the
734 		 * last packet we enqueued, and also cause the
735 		 * DMA engine to wait after is has finished processing
736 		 * it.
737 		 */
738 		sc->sc_txdescs[sc->sc_txlast].tfd_next = 0;
739 		sc->sc_txdescs[sc->sc_txlast].tfd_control |=
740 		    htole32(TFD_TxDMAIndicate);
741 		STE_CDTXSYNC(sc, sc->sc_txlast,
742 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
743 
744 		/*
745 		 * Link up the new chain of descriptors to the
746 		 * last.
747 		 */
748 		sc->sc_txdescs[olasttx].tfd_next =
749 		    htole32(STE_CDTXADDR(sc, STE_NEXTTX(olasttx)));
750 		STE_CDTXSYNC(sc, olasttx,
751 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
752 
753 		/*
754 		 * Kick the transmit DMA logic.  Note that since we're
755 		 * using auto-polling, reading the Tx desc pointer will
756 		 * give it the nudge it needs to get going.
757 		 */
758 		if (bus_space_read_4(sc->sc_st, sc->sc_sh,
759 		    STE_TxDMAListPtr) == 0) {
760 			bus_space_write_4(sc->sc_st, sc->sc_sh,
761 			    STE_DMACtrl, DC_TxDMAHalt);
762 			ste_dmahalt_wait(sc);
763 			bus_space_write_4(sc->sc_st, sc->sc_sh,
764 			    STE_TxDMAListPtr,
765 			    STE_CDTXADDR(sc, STE_NEXTTX(olasttx)));
766 			bus_space_write_4(sc->sc_st, sc->sc_sh,
767 			    STE_DMACtrl, DC_TxDMAResume);
768 		}
769 
770 		/* Set a watchdog timer in case the chip flakes out. */
771 		ifp->if_timer = 5;
772 	}
773 }
774 
775 /*
776  * ste_watchdog:	[ifnet interface function]
777  *
778  *	Watchdog timer handler.
779  */
780 static void
781 ste_watchdog(struct ifnet *ifp)
782 {
783 	struct ste_softc *sc = ifp->if_softc;
784 
785 	printf("%s: device timeout\n", sc->sc_dev.dv_xname);
786 	ifp->if_oerrors++;
787 
788 	ste_txintr(sc);
789 	ste_rxintr(sc);
790 	(void) ste_init(ifp);
791 
792 	/* Try to get more packets going. */
793 	ste_start(ifp);
794 }
795 
796 /*
797  * ste_ioctl:		[ifnet interface function]
798  *
799  *	Handle control requests from the operator.
800  */
801 static int
802 ste_ioctl(struct ifnet *ifp, u_long cmd, void *data)
803 {
804 	struct ste_softc *sc = ifp->if_softc;
805 	int s, error;
806 
807 	s = splnet();
808 
809 	error = ether_ioctl(ifp, cmd, data);
810 	if (error == ENETRESET) {
811 		/*
812 		 * Multicast list has changed; set the hardware filter
813 		 * accordingly.
814 		 */
815 		if (ifp->if_flags & IFF_RUNNING)
816 			ste_set_filter(sc);
817 		error = 0;
818 	}
819 
820 	/* Try to get more packets going. */
821 	ste_start(ifp);
822 
823 	splx(s);
824 	return (error);
825 }
826 
827 /*
828  * ste_intr:
829  *
830  *	Interrupt service routine.
831  */
832 static int
833 ste_intr(void *arg)
834 {
835 	struct ste_softc *sc = arg;
836 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
837 	uint16_t isr;
838 	uint8_t txstat;
839 	int wantinit;
840 
841 	if ((bus_space_read_2(sc->sc_st, sc->sc_sh, STE_IntStatus) &
842 	     IS_InterruptStatus) == 0)
843 		return (0);
844 
845 	for (wantinit = 0; wantinit == 0;) {
846 		isr = bus_space_read_2(sc->sc_st, sc->sc_sh, STE_IntStatusAck);
847 		if ((isr & sc->sc_IntEnable) == 0)
848 			break;
849 
850 		/* Receive interrupts. */
851 		if (isr & IE_RxDMAComplete)
852 			ste_rxintr(sc);
853 
854 		/* Transmit interrupts. */
855 		if (isr & (IE_TxDMAComplete|IE_TxComplete))
856 			ste_txintr(sc);
857 
858 		/* Statistics overflow. */
859 		if (isr & IE_UpdateStats)
860 			ste_stats_update(sc);
861 
862 		/* Transmission errors. */
863 		if (isr & IE_TxComplete) {
864 			for (;;) {
865 				txstat = bus_space_read_1(sc->sc_st, sc->sc_sh,
866 				    STE_TxStatus);
867 				if ((txstat & TS_TxComplete) == 0)
868 					break;
869 				if (txstat & TS_TxUnderrun) {
870 					sc->sc_txthresh += 32;
871 					if (sc->sc_txthresh > 0x1ffc)
872 						sc->sc_txthresh = 0x1ffc;
873 					printf("%s: transmit underrun, new "
874 					    "threshold: %d bytes\n",
875 					    sc->sc_dev.dv_xname,
876 					    sc->sc_txthresh);
877 					ste_reset(sc, AC_TxReset | AC_DMA |
878 					    AC_FIFO | AC_Network);
879 					ste_setthresh(sc);
880 					bus_space_write_1(sc->sc_st, sc->sc_sh,
881 					    STE_TxDMAPollPeriod, 127);
882 					ste_txrestart(sc,
883 					    bus_space_read_1(sc->sc_st,
884 						sc->sc_sh, STE_TxFrameId));
885 				}
886 				if (txstat & TS_TxReleaseError) {
887 					printf("%s: Tx FIFO release error\n",
888 					    sc->sc_dev.dv_xname);
889 					wantinit = 1;
890 				}
891 				if (txstat & TS_MaxCollisions) {
892 					printf("%s: excessive collisions\n",
893 					    sc->sc_dev.dv_xname);
894 					wantinit = 1;
895 				}
896 				if (txstat & TS_TxStatusOverflow) {
897 					printf("%s: status overflow\n",
898 					    sc->sc_dev.dv_xname);
899 					wantinit = 1;
900 				}
901 				bus_space_write_2(sc->sc_st, sc->sc_sh,
902 				    STE_TxStatus, 0);
903 			}
904 		}
905 
906 		/* Host interface errors. */
907 		if (isr & IE_HostError) {
908 			printf("%s: Host interface error\n",
909 			    sc->sc_dev.dv_xname);
910 			wantinit = 1;
911 		}
912 	}
913 
914 	if (wantinit)
915 		ste_init(ifp);
916 
917 	bus_space_write_2(sc->sc_st, sc->sc_sh, STE_IntEnable,
918 	    sc->sc_IntEnable);
919 
920 	/* Try to get more packets going. */
921 	ste_start(ifp);
922 
923 	return (1);
924 }
925 
926 /*
927  * ste_txintr:
928  *
929  *	Helper; handle transmit interrupts.
930  */
931 static void
932 ste_txintr(struct ste_softc *sc)
933 {
934 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
935 	struct ste_descsoft *ds;
936 	uint32_t control;
937 	int i;
938 
939 	ifp->if_flags &= ~IFF_OACTIVE;
940 
941 	/*
942 	 * Go through our Tx list and free mbufs for those
943 	 * frames which have been transmitted.
944 	 */
945 	for (i = sc->sc_txdirty; sc->sc_txpending != 0;
946 	     i = STE_NEXTTX(i), sc->sc_txpending--) {
947 		ds = &sc->sc_txsoft[i];
948 
949 		STE_CDTXSYNC(sc, i,
950 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
951 
952 		control = le32toh(sc->sc_txdescs[i].tfd_control);
953 		if ((control & TFD_TxDMAComplete) == 0)
954 			break;
955 
956 		bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
957 		    0, ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
958 		bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
959 		m_freem(ds->ds_mbuf);
960 		ds->ds_mbuf = NULL;
961 	}
962 
963 	/* Update the dirty transmit buffer pointer. */
964 	sc->sc_txdirty = i;
965 
966 	/*
967 	 * If there are no more pending transmissions, cancel the watchdog
968 	 * timer.
969 	 */
970 	if (sc->sc_txpending == 0)
971 		ifp->if_timer = 0;
972 }
973 
974 /*
975  * ste_rxintr:
976  *
977  *	Helper; handle receive interrupts.
978  */
979 static void
980 ste_rxintr(struct ste_softc *sc)
981 {
982 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
983 	struct ste_descsoft *ds;
984 	struct mbuf *m;
985 	uint32_t status;
986 	int i, len;
987 
988 	for (i = sc->sc_rxptr;; i = STE_NEXTRX(i)) {
989 		ds = &sc->sc_rxsoft[i];
990 
991 		STE_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
992 
993 		status = le32toh(sc->sc_rxdescs[i].rfd_status);
994 
995 		if ((status & RFD_RxDMAComplete) == 0)
996 			break;
997 
998 		/*
999 		 * If the packet had an error, simply recycle the
1000 		 * buffer.  Note, we count the error later in the
1001 		 * periodic stats update.
1002 		 */
1003 		if (status & RFD_RxFrameError) {
1004 			STE_INIT_RXDESC(sc, i);
1005 			continue;
1006 		}
1007 
1008 		bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1009 		    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1010 
1011 		/*
1012 		 * No errors; receive the packet.  Note, we have
1013 		 * configured the chip to not include the CRC at
1014 		 * the end of the packet.
1015 		 */
1016 		len = RFD_RxDMAFrameLen(status);
1017 
1018 		/*
1019 		 * If the packet is small enough to fit in a
1020 		 * single header mbuf, allocate one and copy
1021 		 * the data into it.  This greatly reduces
1022 		 * memory consumption when we receive lots
1023 		 * of small packets.
1024 		 *
1025 		 * Otherwise, we add a new buffer to the receive
1026 		 * chain.  If this fails, we drop the packet and
1027 		 * recycle the old buffer.
1028 		 */
1029 		if (ste_copy_small != 0 && len <= (MHLEN - 2)) {
1030 			MGETHDR(m, M_DONTWAIT, MT_DATA);
1031 			if (m == NULL)
1032 				goto dropit;
1033 			m->m_data += 2;
1034 			memcpy(mtod(m, void *),
1035 			    mtod(ds->ds_mbuf, void *), len);
1036 			STE_INIT_RXDESC(sc, i);
1037 			bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1038 			    ds->ds_dmamap->dm_mapsize,
1039 			    BUS_DMASYNC_PREREAD);
1040 		} else {
1041 			m = ds->ds_mbuf;
1042 			if (ste_add_rxbuf(sc, i) != 0) {
1043  dropit:
1044 				ifp->if_ierrors++;
1045 				STE_INIT_RXDESC(sc, i);
1046 				bus_dmamap_sync(sc->sc_dmat,
1047 				    ds->ds_dmamap, 0,
1048 				    ds->ds_dmamap->dm_mapsize,
1049 				    BUS_DMASYNC_PREREAD);
1050 				continue;
1051 			}
1052 		}
1053 
1054 		m->m_pkthdr.rcvif = ifp;
1055 		m->m_pkthdr.len = m->m_len = len;
1056 
1057 #if NBPFILTER > 0
1058 		/*
1059 		 * Pass this up to any BPF listeners, but only
1060 		 * pass if up the stack if it's for us.
1061 		 */
1062 		if (ifp->if_bpf)
1063 			bpf_mtap(ifp->if_bpf, m);
1064 #endif /* NBPFILTER > 0 */
1065 
1066 		/* Pass it on. */
1067 		(*ifp->if_input)(ifp, m);
1068 	}
1069 
1070 	/* Update the receive pointer. */
1071 	sc->sc_rxptr = i;
1072 }
1073 
1074 /*
1075  * ste_tick:
1076  *
1077  *	One second timer, used to tick the MII.
1078  */
1079 static void
1080 ste_tick(void *arg)
1081 {
1082 	struct ste_softc *sc = arg;
1083 	int s;
1084 
1085 	s = splnet();
1086 	mii_tick(&sc->sc_mii);
1087 	ste_stats_update(sc);
1088 	splx(s);
1089 
1090 	callout_reset(&sc->sc_tick_ch, hz, ste_tick, sc);
1091 }
1092 
1093 /*
1094  * ste_stats_update:
1095  *
1096  *	Read the ST-201 statistics counters.
1097  */
1098 static void
1099 ste_stats_update(struct ste_softc *sc)
1100 {
1101 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1102 	bus_space_tag_t st = sc->sc_st;
1103 	bus_space_handle_t sh = sc->sc_sh;
1104 
1105 	(void) bus_space_read_2(st, sh, STE_OctetsReceivedOk0);
1106 	(void) bus_space_read_2(st, sh, STE_OctetsReceivedOk1);
1107 
1108 	(void) bus_space_read_2(st, sh, STE_OctetsTransmittedOk0);
1109 	(void) bus_space_read_2(st, sh, STE_OctetsTransmittedOk1);
1110 
1111 	ifp->if_opackets +=
1112 	    (u_int) bus_space_read_2(st, sh, STE_FramesTransmittedOK);
1113 	ifp->if_ipackets +=
1114 	    (u_int) bus_space_read_2(st, sh, STE_FramesReceivedOK);
1115 
1116 	ifp->if_collisions +=
1117 	    (u_int) bus_space_read_1(st, sh, STE_LateCollisions) +
1118 	    (u_int) bus_space_read_1(st, sh, STE_MultipleColFrames) +
1119 	    (u_int) bus_space_read_1(st, sh, STE_SingleColFrames);
1120 
1121 	(void) bus_space_read_1(st, sh, STE_FramesWDeferredXmt);
1122 
1123 	ifp->if_ierrors +=
1124 	    (u_int) bus_space_read_1(st, sh, STE_FramesLostRxErrors);
1125 
1126 	ifp->if_oerrors +=
1127 	    (u_int) bus_space_read_1(st, sh, STE_FramesWExDeferral) +
1128 	    (u_int) bus_space_read_1(st, sh, STE_FramesXbortXSColls) +
1129 	    bus_space_read_1(st, sh, STE_CarrierSenseErrors);
1130 
1131 	(void) bus_space_read_1(st, sh, STE_BcstFramesXmtdOk);
1132 	(void) bus_space_read_1(st, sh, STE_BcstFramesRcvdOk);
1133 	(void) bus_space_read_1(st, sh, STE_McstFramesXmtdOk);
1134 	(void) bus_space_read_1(st, sh, STE_McstFramesRcvdOk);
1135 }
1136 
1137 /*
1138  * ste_reset:
1139  *
1140  *	Perform a soft reset on the ST-201.
1141  */
1142 static void
1143 ste_reset(struct ste_softc *sc, u_int32_t rstbits)
1144 {
1145 	uint32_t ac;
1146 	int i;
1147 
1148 	ac = bus_space_read_4(sc->sc_st, sc->sc_sh, STE_AsicCtrl);
1149 
1150 	bus_space_write_4(sc->sc_st, sc->sc_sh, STE_AsicCtrl, ac | rstbits);
1151 
1152 	delay(50000);
1153 
1154 	for (i = 0; i < STE_TIMEOUT; i++) {
1155 		delay(1000);
1156 		if ((bus_space_read_4(sc->sc_st, sc->sc_sh, STE_AsicCtrl) &
1157 		     AC_ResetBusy) == 0)
1158 			break;
1159 	}
1160 
1161 	if (i == STE_TIMEOUT)
1162 		printf("%s: reset failed to complete\n", sc->sc_dev.dv_xname);
1163 
1164 	delay(1000);
1165 }
1166 
1167 /*
1168  * ste_setthresh:
1169  *
1170  * 	set the various transmit threshold registers
1171  */
1172 static void
1173 ste_setthresh(struct ste_softc *sc)
1174 {
1175 	/* set the TX threhold */
1176 	bus_space_write_2(sc->sc_st, sc->sc_sh,
1177 	    STE_TxStartThresh, sc->sc_txthresh);
1178 	/* Urgent threshold: set to sc_txthresh / 2 */
1179 	bus_space_write_2(sc->sc_st, sc->sc_sh, STE_TxDMAUrgentThresh,
1180 	    sc->sc_txthresh >> 6);
1181 	/* Burst threshold: use default value (256 bytes) */
1182 }
1183 
1184 /*
1185  * restart TX at the given frame ID in the transmitter ring
1186  */
1187 static void
1188 ste_txrestart(struct ste_softc *sc, u_int8_t id)
1189 {
1190 	u_int32_t control;
1191 
1192 	STE_CDTXSYNC(sc, id, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1193 	control = le32toh(sc->sc_txdescs[id].tfd_control);
1194 	control &= ~TFD_TxDMAComplete;
1195 	sc->sc_txdescs[id].tfd_control = htole32(control);
1196 	STE_CDTXSYNC(sc, id, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1197 
1198 	bus_space_write_4(sc->sc_st, sc->sc_sh, STE_TxDMAListPtr, 0);
1199 	bus_space_write_2(sc->sc_st, sc->sc_sh, STE_MacCtrl1, MC1_TxEnable);
1200 	bus_space_write_4(sc->sc_st, sc->sc_sh, STE_DMACtrl, DC_TxDMAHalt);
1201 	ste_dmahalt_wait(sc);
1202 	bus_space_write_4(sc->sc_st, sc->sc_sh, STE_TxDMAListPtr,
1203 	    STE_CDTXADDR(sc, id));
1204 	bus_space_write_4(sc->sc_st, sc->sc_sh, STE_DMACtrl, DC_TxDMAResume);
1205 }
1206 
1207 /*
1208  * ste_init:		[ ifnet interface function ]
1209  *
1210  *	Initialize the interface.  Must be called at splnet().
1211  */
1212 static int
1213 ste_init(struct ifnet *ifp)
1214 {
1215 	struct ste_softc *sc = ifp->if_softc;
1216 	bus_space_tag_t st = sc->sc_st;
1217 	bus_space_handle_t sh = sc->sc_sh;
1218 	struct ste_descsoft *ds;
1219 	int i, error = 0;
1220 
1221 	/*
1222 	 * Cancel any pending I/O.
1223 	 */
1224 	ste_stop(ifp, 0);
1225 
1226 	/*
1227 	 * Reset the chip to a known state.
1228 	 */
1229 	ste_reset(sc, AC_GlobalReset | AC_RxReset | AC_TxReset | AC_DMA |
1230 	    AC_FIFO | AC_Network | AC_Host | AC_AutoInit | AC_RstOut);
1231 
1232 	/*
1233 	 * Initialize the transmit descriptor ring.
1234 	 */
1235 	memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1236 	sc->sc_txpending = 0;
1237 	sc->sc_txdirty = 0;
1238 	sc->sc_txlast = STE_NTXDESC - 1;
1239 
1240 	/*
1241 	 * Initialize the receive descriptor and receive job
1242 	 * descriptor rings.
1243 	 */
1244 	for (i = 0; i < STE_NRXDESC; i++) {
1245 		ds = &sc->sc_rxsoft[i];
1246 		if (ds->ds_mbuf == NULL) {
1247 			if ((error = ste_add_rxbuf(sc, i)) != 0) {
1248 				printf("%s: unable to allocate or map rx "
1249 				    "buffer %d, error = %d\n",
1250 				    sc->sc_dev.dv_xname, i, error);
1251 				/*
1252 				 * XXX Should attempt to run with fewer receive
1253 				 * XXX buffers instead of just failing.
1254 				 */
1255 				ste_rxdrain(sc);
1256 				goto out;
1257 			}
1258 		} else
1259 			STE_INIT_RXDESC(sc, i);
1260 	}
1261 	sc->sc_rxptr = 0;
1262 
1263 	/* Set the station address. */
1264 	for (i = 0; i < ETHER_ADDR_LEN; i++)
1265 		bus_space_write_1(st, sh, STE_StationAddress0 + 1,
1266 		    CLLADDR(ifp->if_sadl)[i]);
1267 
1268 	/* Set up the receive filter. */
1269 	ste_set_filter(sc);
1270 
1271 	/*
1272 	 * Give the receive ring to the chip.
1273 	 */
1274 	bus_space_write_4(st, sh, STE_RxDMAListPtr,
1275 	    STE_CDRXADDR(sc, sc->sc_rxptr));
1276 
1277 	/*
1278 	 * We defer giving the transmit ring to the chip until we
1279 	 * transmit the first packet.
1280 	 */
1281 
1282 	/*
1283 	 * Initialize the Tx auto-poll period.  It's OK to make this number
1284 	 * large (127 is the max) -- we explicitly kick the transmit engine
1285 	 * when there's actually a packet.  We are using auto-polling only
1286 	 * to make the interface to the transmit engine not suck.
1287 	 */
1288 	bus_space_write_1(sc->sc_st, sc->sc_sh, STE_TxDMAPollPeriod, 127);
1289 
1290 	/* ..and the Rx auto-poll period. */
1291 	bus_space_write_1(st, sh, STE_RxDMAPollPeriod, 64);
1292 
1293 	/* Initialize the Tx start threshold. */
1294 	ste_setthresh(sc);
1295 
1296 	/* Set the FIFO release threshold to 512 bytes. */
1297 	bus_space_write_1(st, sh, STE_TxReleaseThresh, 512 >> 4);
1298 
1299 	/* Set maximum packet size for VLAN. */
1300 	if (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU)
1301 		bus_space_write_2(st, sh, STE_MaxFrameSize, ETHER_MAX_LEN + 4);
1302 	else
1303 		bus_space_write_2(st, sh, STE_MaxFrameSize, ETHER_MAX_LEN);
1304 
1305 	/*
1306 	 * Initialize the interrupt mask.
1307 	 */
1308 	sc->sc_IntEnable = IE_HostError | IE_TxComplete | IE_UpdateStats |
1309 	    IE_TxDMAComplete | IE_RxDMAComplete;
1310 
1311 	bus_space_write_2(st, sh, STE_IntStatus, 0xffff);
1312 	bus_space_write_2(st, sh, STE_IntEnable, sc->sc_IntEnable);
1313 
1314 	/*
1315 	 * Start the receive DMA engine.
1316 	 */
1317 	bus_space_write_4(st, sh, STE_DMACtrl, sc->sc_DMACtrl | DC_RxDMAResume);
1318 
1319 	/*
1320 	 * Initialize MacCtrl0 -- do it before setting the media,
1321 	 * as setting the media will actually program the register.
1322 	 */
1323 	sc->sc_MacCtrl0 = MC0_IFSSelect(0);
1324 	if (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU)
1325 		sc->sc_MacCtrl0 |= MC0_RcvLargeFrames;
1326 
1327 	/*
1328 	 * Set the current media.
1329 	 */
1330 	if ((error = ether_mediachange(ifp)) != 0)
1331 		goto out;
1332 
1333 	/*
1334 	 * Start the MAC.
1335 	 */
1336 	bus_space_write_2(st, sh, STE_MacCtrl1,
1337 	    MC1_StatisticsEnable | MC1_TxEnable | MC1_RxEnable);
1338 
1339 	/*
1340 	 * Start the one second MII clock.
1341 	 */
1342 	callout_reset(&sc->sc_tick_ch, hz, ste_tick, sc);
1343 
1344 	/*
1345 	 * ...all done!
1346 	 */
1347 	ifp->if_flags |= IFF_RUNNING;
1348 	ifp->if_flags &= ~IFF_OACTIVE;
1349 
1350  out:
1351 	if (error)
1352 		printf("%s: interface not running\n", sc->sc_dev.dv_xname);
1353 	return (error);
1354 }
1355 
1356 /*
1357  * ste_drain:
1358  *
1359  *	Drain the receive queue.
1360  */
1361 static void
1362 ste_rxdrain(struct ste_softc *sc)
1363 {
1364 	struct ste_descsoft *ds;
1365 	int i;
1366 
1367 	for (i = 0; i < STE_NRXDESC; i++) {
1368 		ds = &sc->sc_rxsoft[i];
1369 		if (ds->ds_mbuf != NULL) {
1370 			bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1371 			m_freem(ds->ds_mbuf);
1372 			ds->ds_mbuf = NULL;
1373 		}
1374 	}
1375 }
1376 
1377 /*
1378  * ste_stop:		[ ifnet interface function ]
1379  *
1380  *	Stop transmission on the interface.
1381  */
1382 static void
1383 ste_stop(struct ifnet *ifp, int disable)
1384 {
1385 	struct ste_softc *sc = ifp->if_softc;
1386 	struct ste_descsoft *ds;
1387 	int i;
1388 
1389 	/*
1390 	 * Stop the one second clock.
1391 	 */
1392 	callout_stop(&sc->sc_tick_ch);
1393 
1394 	/* Down the MII. */
1395 	mii_down(&sc->sc_mii);
1396 
1397 	/*
1398 	 * Disable interrupts.
1399 	 */
1400 	bus_space_write_2(sc->sc_st, sc->sc_sh, STE_IntEnable, 0);
1401 
1402 	/*
1403 	 * Stop receiver, transmitter, and stats update.
1404 	 */
1405 	bus_space_write_2(sc->sc_st, sc->sc_sh, STE_MacCtrl1,
1406 	    MC1_StatisticsDisable | MC1_TxDisable | MC1_RxDisable);
1407 
1408 	/*
1409 	 * Stop the transmit and receive DMA.
1410 	 */
1411 	bus_space_write_4(sc->sc_st, sc->sc_sh, STE_DMACtrl,
1412 	    DC_RxDMAHalt | DC_TxDMAHalt);
1413 	ste_dmahalt_wait(sc);
1414 
1415 	/*
1416 	 * Release any queued transmit buffers.
1417 	 */
1418 	for (i = 0; i < STE_NTXDESC; i++) {
1419 		ds = &sc->sc_txsoft[i];
1420 		if (ds->ds_mbuf != NULL) {
1421 			bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1422 			m_freem(ds->ds_mbuf);
1423 			ds->ds_mbuf = NULL;
1424 		}
1425 	}
1426 
1427 	if (disable)
1428 		ste_rxdrain(sc);
1429 
1430 	/*
1431 	 * Mark the interface down and cancel the watchdog timer.
1432 	 */
1433 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1434 	ifp->if_timer = 0;
1435 }
1436 
1437 static int
1438 ste_eeprom_wait(struct ste_softc *sc)
1439 {
1440 	int i;
1441 
1442 	for (i = 0; i < STE_TIMEOUT; i++) {
1443 		delay(1000);
1444 		if ((bus_space_read_2(sc->sc_st, sc->sc_sh, STE_EepromCtrl) &
1445 		     EC_EepromBusy) == 0)
1446 			return (0);
1447 	}
1448 	return (1);
1449 }
1450 
1451 /*
1452  * ste_read_eeprom:
1453  *
1454  *	Read data from the serial EEPROM.
1455  */
1456 static void
1457 ste_read_eeprom(struct ste_softc *sc, int offset, uint16_t *data)
1458 {
1459 
1460 	if (ste_eeprom_wait(sc))
1461 		printf("%s: EEPROM failed to come ready\n",
1462 		    sc->sc_dev.dv_xname);
1463 
1464 	bus_space_write_2(sc->sc_st, sc->sc_sh, STE_EepromCtrl,
1465 	    EC_EepromAddress(offset) | EC_EepromOpcode(EC_OP_R));
1466 	if (ste_eeprom_wait(sc))
1467 		printf("%s: EEPROM read timed out\n",
1468 		    sc->sc_dev.dv_xname);
1469 	*data = bus_space_read_2(sc->sc_st, sc->sc_sh, STE_EepromData);
1470 }
1471 
1472 /*
1473  * ste_add_rxbuf:
1474  *
1475  *	Add a receive buffer to the indicated descriptor.
1476  */
1477 static int
1478 ste_add_rxbuf(struct ste_softc *sc, int idx)
1479 {
1480 	struct ste_descsoft *ds = &sc->sc_rxsoft[idx];
1481 	struct mbuf *m;
1482 	int error;
1483 
1484 	MGETHDR(m, M_DONTWAIT, MT_DATA);
1485 	if (m == NULL)
1486 		return (ENOBUFS);
1487 
1488 	MCLGET(m, M_DONTWAIT);
1489 	if ((m->m_flags & M_EXT) == 0) {
1490 		m_freem(m);
1491 		return (ENOBUFS);
1492 	}
1493 
1494 	if (ds->ds_mbuf != NULL)
1495 		bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1496 
1497 	ds->ds_mbuf = m;
1498 
1499 	error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1500 	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1501 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
1502 	if (error) {
1503 		printf("%s: can't load rx DMA map %d, error = %d\n",
1504 		    sc->sc_dev.dv_xname, idx, error);
1505 		panic("ste_add_rxbuf");		/* XXX */
1506 	}
1507 
1508 	bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1509 	    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1510 
1511 	STE_INIT_RXDESC(sc, idx);
1512 
1513 	return (0);
1514 }
1515 
1516 /*
1517  * ste_set_filter:
1518  *
1519  *	Set up the receive filter.
1520  */
1521 static void
1522 ste_set_filter(struct ste_softc *sc)
1523 {
1524 	struct ethercom *ec = &sc->sc_ethercom;
1525 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1526 	struct ether_multi *enm;
1527 	struct ether_multistep step;
1528 	uint32_t crc;
1529 	uint16_t mchash[4];
1530 
1531 	sc->sc_ReceiveMode = RM_ReceiveUnicast;
1532 	if (ifp->if_flags & IFF_BROADCAST)
1533 		sc->sc_ReceiveMode |= RM_ReceiveBroadcast;
1534 
1535 	if (ifp->if_flags & IFF_PROMISC) {
1536 		sc->sc_ReceiveMode |= RM_ReceiveAllFrames;
1537 		goto allmulti;
1538 	}
1539 
1540 	/*
1541 	 * Set up the multicast address filter by passing all multicast
1542 	 * addresses through a CRC generator, and then using the low-order
1543 	 * 6 bits as an index into the 64 bit multicast hash table.  The
1544 	 * high order bits select the register, while the rest of the bits
1545 	 * select the bit within the register.
1546 	 */
1547 
1548 	memset(mchash, 0, sizeof(mchash));
1549 
1550 	ETHER_FIRST_MULTI(step, ec, enm);
1551 	if (enm == NULL)
1552 		goto done;
1553 
1554 	while (enm != NULL) {
1555 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1556 			/*
1557 			 * We must listen to a range of multicast addresses.
1558 			 * For now, just accept all multicasts, rather than
1559 			 * trying to set only those filter bits needed to match
1560 			 * the range.  (At this time, the only use of address
1561 			 * ranges is for IP multicast routing, for which the
1562 			 * range is big enough to require all bits set.)
1563 			 */
1564 			goto allmulti;
1565 		}
1566 
1567 		crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
1568 
1569 		/* Just want the 6 least significant bits. */
1570 		crc &= 0x3f;
1571 
1572 		/* Set the corresponding bit in the hash table. */
1573 		mchash[crc >> 4] |= 1 << (crc & 0xf);
1574 
1575 		ETHER_NEXT_MULTI(step, enm);
1576 	}
1577 
1578 	sc->sc_ReceiveMode |= RM_ReceiveMulticastHash;
1579 
1580 	ifp->if_flags &= ~IFF_ALLMULTI;
1581 	goto done;
1582 
1583  allmulti:
1584 	ifp->if_flags |= IFF_ALLMULTI;
1585 	sc->sc_ReceiveMode |= RM_ReceiveMulticast;
1586 
1587  done:
1588 	if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
1589 		/*
1590 		 * Program the multicast hash table.
1591 		 */
1592 		bus_space_write_2(sc->sc_st, sc->sc_sh, STE_HashTable0,
1593 		    mchash[0]);
1594 		bus_space_write_2(sc->sc_st, sc->sc_sh, STE_HashTable1,
1595 		    mchash[1]);
1596 		bus_space_write_2(sc->sc_st, sc->sc_sh, STE_HashTable2,
1597 		    mchash[2]);
1598 		bus_space_write_2(sc->sc_st, sc->sc_sh, STE_HashTable3,
1599 		    mchash[3]);
1600 	}
1601 
1602 	bus_space_write_1(sc->sc_st, sc->sc_sh, STE_ReceiveMode,
1603 	    sc->sc_ReceiveMode);
1604 }
1605 
1606 /*
1607  * ste_mii_readreg:	[mii interface function]
1608  *
1609  *	Read a PHY register on the MII of the ST-201.
1610  */
1611 static int
1612 ste_mii_readreg(struct device *self, int phy, int reg)
1613 {
1614 
1615 	return (mii_bitbang_readreg(self, &ste_mii_bitbang_ops, phy, reg));
1616 }
1617 
1618 /*
1619  * ste_mii_writereg:	[mii interface function]
1620  *
1621  *	Write a PHY register on the MII of the ST-201.
1622  */
1623 static void
1624 ste_mii_writereg(struct device *self, int phy, int reg, int val)
1625 {
1626 
1627 	mii_bitbang_writereg(self, &ste_mii_bitbang_ops, phy, reg, val);
1628 }
1629 
1630 /*
1631  * ste_mii_statchg:	[mii interface function]
1632  *
1633  *	Callback from MII layer when media changes.
1634  */
1635 static void
1636 ste_mii_statchg(struct device *self)
1637 {
1638 	struct ste_softc *sc = (struct ste_softc *) self;
1639 
1640 	if (sc->sc_mii.mii_media_active & IFM_FDX)
1641 		sc->sc_MacCtrl0 |= MC0_FullDuplexEnable;
1642 	else
1643 		sc->sc_MacCtrl0 &= ~MC0_FullDuplexEnable;
1644 
1645 	/* XXX 802.1x flow-control? */
1646 
1647 	bus_space_write_2(sc->sc_st, sc->sc_sh, STE_MacCtrl0, sc->sc_MacCtrl0);
1648 }
1649 
1650 /*
1651  * ste_mii_bitbang_read: [mii bit-bang interface function]
1652  *
1653  *	Read the MII serial port for the MII bit-bang module.
1654  */
1655 static uint32_t
1656 ste_mii_bitbang_read(struct device *self)
1657 {
1658 	struct ste_softc *sc = (void *) self;
1659 
1660 	return (bus_space_read_1(sc->sc_st, sc->sc_sh, STE_PhyCtrl));
1661 }
1662 
1663 /*
1664  * ste_mii_bitbang_write: [mii big-bang interface function]
1665  *
1666  *	Write the MII serial port for the MII bit-bang module.
1667  */
1668 static void
1669 ste_mii_bitbang_write(struct device *self, uint32_t val)
1670 {
1671 	struct ste_softc *sc = (void *) self;
1672 
1673 	bus_space_write_1(sc->sc_st, sc->sc_sh, STE_PhyCtrl, val);
1674 }
1675