xref: /openbsd-src/sys/dev/pci/if_bce.c (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /* $OpenBSD: if_bce.c,v 1.37 2013/08/21 05:21:43 dlg Exp $ */
2 /* $NetBSD: if_bce.c,v 1.3 2003/09/29 01:53:02 mrg Exp $	 */
3 
4 /*
5  * Copyright (c) 2003 Clifford Wright. All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The name of the author may not be used to endorse or promote products
16  *    derived from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 /*
32  * Broadcom BCM440x 10/100 ethernet (broadcom.com)
33  * SiliconBackplane is technology from Sonics, Inc.(sonicsinc.com)
34  *
35  * Cliff Wright cliff@snipe444.org
36  */
37 
38 #include "bpfilter.h"
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/timeout.h>
43 #include <sys/sockio.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/device.h>
48 #include <sys/socket.h>
49 
50 #include <net/if.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53 
54 #ifdef INET
55 #include <netinet/in.h>
56 #include <netinet/in_systm.h>
57 #include <netinet/ip.h>
58 #include <netinet/if_ether.h>
59 #endif
60 #if NBPFILTER > 0
61 #include <net/bpf.h>
62 #endif
63 
64 #include <dev/pci/pcireg.h>
65 #include <dev/pci/pcivar.h>
66 #include <dev/pci/pcidevs.h>
67 
68 #include <dev/mii/mii.h>
69 #include <dev/mii/miivar.h>
70 #include <dev/mii/miidevs.h>
71 #include <dev/mii/brgphyreg.h>
72 
73 #include <dev/pci/if_bcereg.h>
74 
75 #include <uvm/uvm.h>
76 
77 /* ring descriptor */
78 struct bce_dma_slot {
79 	u_int32_t ctrl;
80 	u_int32_t addr;
81 };
82 #define CTRL_BC_MASK	0x1fff	/* buffer byte count */
83 #define CTRL_EOT	0x10000000	/* end of descriptor table */
84 #define CTRL_IOC	0x20000000	/* interrupt on completion */
85 #define CTRL_EOF	0x40000000	/* end of frame */
86 #define CTRL_SOF	0x80000000	/* start of frame */
87 
88 #define BCE_RXBUF_LEN	(MCLBYTES - 4)
89 
90 /* Packet status is returned in a pre-packet header */
91 struct rx_pph {
92 	u_int16_t len;
93 	u_int16_t flags;
94 	u_int16_t pad[12];
95 };
96 
97 #define	BCE_PREPKT_HEADER_SIZE		30
98 
99 /* packet status flags bits */
100 #define RXF_NO				0x8	/* odd number of nibbles */
101 #define RXF_RXER			0x4	/* receive symbol error */
102 #define RXF_CRC				0x2	/* crc error */
103 #define RXF_OV				0x1	/* fifo overflow */
104 
105 /* number of descriptors used in a ring */
106 #define BCE_NRXDESC		64
107 #define BCE_NTXDESC		64
108 
109 #define BCE_TIMEOUT		100	/* # 10us for mii read/write */
110 
111 struct bce_softc {
112 	struct device		bce_dev;
113 	bus_space_tag_t		bce_btag;
114 	bus_space_handle_t	bce_bhandle;
115 	bus_dma_tag_t		bce_dmatag;
116 	struct arpcom		bce_ac;		/* interface info */
117 	void			*bce_intrhand;
118 	struct pci_attach_args	bce_pa;
119 	struct mii_data		bce_mii;
120 	u_int32_t		bce_phy;	/* eeprom indicated phy */
121 	struct bce_dma_slot	*bce_rx_ring;	/* receive ring */
122 	struct bce_dma_slot	*bce_tx_ring;	/* transmit ring */
123 	caddr_t			bce_data;
124 	bus_dmamap_t		bce_ring_map;
125 	bus_dmamap_t		bce_rxdata_map;
126 	bus_dmamap_t		bce_txdata_map;
127 	u_int32_t		bce_intmask;	/* current intr mask */
128 	u_int32_t		bce_rxin;	/* last rx descriptor seen */
129 	u_int32_t		bce_txin;	/* last tx descriptor seen */
130 	int			bce_txsfree;	/* no. tx slots available */
131 	int			bce_txsnext;	/* next available tx slot */
132 	struct timeout		bce_timeout;
133 };
134 
135 int	bce_probe(struct device *, void *, void *);
136 void	bce_attach(struct device *, struct device *, void *);
137 int	bce_activate(struct device *, int);
138 int	bce_ioctl(struct ifnet *, u_long, caddr_t);
139 void	bce_start(struct ifnet *);
140 void	bce_watchdog(struct ifnet *);
141 int	bce_intr(void *);
142 void	bce_rxintr(struct bce_softc *);
143 void	bce_txintr(struct bce_softc *);
144 int	bce_init(struct ifnet *);
145 void	bce_add_mac(struct bce_softc *, u_int8_t *, unsigned long);
146 void	bce_add_rxbuf(struct bce_softc *, int);
147 void	bce_stop(struct ifnet *);
148 void	bce_reset(struct bce_softc *);
149 void	bce_set_filter(struct ifnet *);
150 int	bce_mii_read(struct device *, int, int);
151 void	bce_mii_write(struct device *, int, int, int);
152 void	bce_statchg(struct device *);
153 int	bce_mediachange(struct ifnet *);
154 void	bce_mediastatus(struct ifnet *, struct ifmediareq *);
155 void	bce_tick(void *);
156 
157 #ifdef BCE_DEBUG
158 #define DPRINTF(x)	do {		\
159 	if (bcedebug)			\
160 		printf x;		\
161 } while (/* CONSTCOND */ 0)
162 #define DPRINTFN(n,x)	do {		\
163 	if (bcedebug >= (n))		\
164 		printf x;		\
165 } while (/* CONSTCOND */ 0)
166 int	bcedebug = 0;
167 #else
168 #define DPRINTF(x)
169 #define DPRINTFN(n,x)
170 #endif
171 
172 struct cfattach bce_ca = {
173 	sizeof(struct bce_softc), bce_probe, bce_attach, NULL, bce_activate
174 };
175 struct cfdriver bce_cd = {
176 	NULL, "bce", DV_IFNET
177 };
178 
179 const struct pci_matchid bce_devices[] = {
180 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4401 },
181 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4401B0 },
182 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4401B1 }
183 };
184 
185 int
186 bce_probe(struct device *parent, void *match, void *aux)
187 {
188 	return (pci_matchbyid((struct pci_attach_args *)aux, bce_devices,
189 	    nitems(bce_devices)));
190 }
191 
192 void
193 bce_attach(struct device *parent, struct device *self, void *aux)
194 {
195 	struct bce_softc *sc = (struct bce_softc *) self;
196 	struct pci_attach_args *pa = aux;
197 	pci_chipset_tag_t pc = pa->pa_pc;
198 	pci_intr_handle_t ih;
199 	const char *intrstr = NULL;
200 	caddr_t kva;
201 	bus_dma_segment_t seg;
202 	int rseg;
203 	struct ifnet *ifp;
204 	pcireg_t memtype;
205 	bus_addr_t memaddr;
206 	bus_size_t memsize;
207 	int pmreg;
208 	pcireg_t pmode;
209 	int error;
210 
211 	sc->bce_pa = *pa;
212 	sc->bce_dmatag = pa->pa_dmat;
213 
214 	/*
215 	 * Map control/status registers.
216 	 */
217 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BCE_PCI_BAR0);
218 	if (pci_mapreg_map(pa, BCE_PCI_BAR0, memtype, 0, &sc->bce_btag,
219 	    &sc->bce_bhandle, &memaddr, &memsize, 0)) {
220 		printf(": unable to find mem space\n");
221 		return;
222 	}
223 
224 	/* Get it out of power save mode if needed. */
225 	if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
226 		pmode = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
227 		if (pmode == 3) {
228 			/*
229 			 * The card has lost all configuration data in
230 			 * this state, so punt.
231 			 */
232 			printf(": unable to wake up from power state D3\n");
233 			return;
234 		}
235 		if (pmode != 0) {
236 			printf(": waking up from power state D%d\n",
237 			    pmode);
238 			pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
239 		}
240 	}
241 
242 	if (pci_intr_map(pa, &ih)) {
243 		printf(": couldn't map interrupt\n");
244 		return;
245 	}
246 
247 	intrstr = pci_intr_string(pc, ih);
248 	sc->bce_intrhand = pci_intr_establish(pc, ih, IPL_NET, bce_intr, sc,
249 	    self->dv_xname);
250 	if (sc->bce_intrhand == NULL) {
251 		printf(": couldn't establish interrupt");
252 		if (intrstr != NULL)
253 			printf(" at %s", intrstr);
254 		printf("\n");
255 		return;
256 	}
257 
258 	/* reset the chip */
259 	bce_reset(sc);
260 
261 	/* Create the data DMA region and maps. */
262 	if ((sc->bce_data = (caddr_t)uvm_km_kmemalloc_pla(kernel_map,
263 	    uvm.kernel_object, (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES, 0,
264 	    UVM_KMF_NOWAIT, 0, (paddr_t)(0x40000000 - 1), 0, 0, 1)) == NULL) {
265 		printf(": unable to alloc space for ring");
266 		return;
267 	}
268 
269 	/* create a dma map for the RX ring */
270 	if ((error = bus_dmamap_create(sc->bce_dmatag, BCE_NRXDESC * MCLBYTES,
271 	    1, BCE_NRXDESC * MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
272 	    &sc->bce_rxdata_map))) {
273 		printf(": unable to create ring DMA map, error = %d\n", error);
274 		uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
275 		    (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
276 		return;
277 	}
278 
279 	/* connect the ring space to the dma map */
280 	if (bus_dmamap_load(sc->bce_dmatag, sc->bce_rxdata_map, sc->bce_data,
281 	    BCE_NRXDESC * MCLBYTES, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT)) {
282 		printf(": unable to load rx ring DMA map\n");
283 		uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
284 		    (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
285 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
286 		return;
287 	}
288 
289 	/* create a dma map for the TX ring */
290 	if ((error = bus_dmamap_create(sc->bce_dmatag, BCE_NTXDESC * MCLBYTES,
291 	    1, BCE_NTXDESC * MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
292 	    &sc->bce_txdata_map))) {
293 		printf(": unable to create ring DMA map, error = %d\n", error);
294 		uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
295 		    (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
296 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
297 		return;
298 	}
299 
300 	/* connect the ring space to the dma map */
301 	if (bus_dmamap_load(sc->bce_dmatag, sc->bce_txdata_map,
302 	    sc->bce_data + BCE_NRXDESC * MCLBYTES,
303 	    BCE_NTXDESC * MCLBYTES, NULL, BUS_DMA_WRITE | BUS_DMA_NOWAIT)) {
304 		printf(": unable to load tx ring DMA map\n");
305 		uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
306 		    (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
307 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
308 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map);
309 		return;
310 	}
311 
312 
313 	/*
314 	 * Allocate DMA-safe memory for ring descriptors.
315 	 * The receive, and transmit rings can not share the same
316 	 * 4k space, however both are allocated at once here.
317 	 */
318 	/*
319 	 * XXX PAGE_SIZE is wasteful; we only need 1KB + 1KB, but
320 	 * due to the limition above. ??
321 	 */
322 	if ((error = bus_dmamem_alloc(sc->bce_dmatag, 2 * PAGE_SIZE,
323 	    PAGE_SIZE, 2 * PAGE_SIZE, &seg, 1, &rseg, BUS_DMA_NOWAIT))) {
324 		printf(": unable to alloc space for ring descriptors, "
325 		    "error = %d\n", error);
326 		uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
327 		    (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
328 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
329 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map);
330 		return;
331 	}
332 
333 	/* map ring space to kernel */
334 	if ((error = bus_dmamem_map(sc->bce_dmatag, &seg, rseg,
335 	    2 * PAGE_SIZE, &kva, BUS_DMA_NOWAIT))) {
336 		printf(": unable to map DMA buffers, error = %d\n", error);
337 		uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
338 		    (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
339 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
340 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map);
341 		bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
342 		return;
343 	}
344 
345 	/* create a dma map for the ring */
346 	if ((error = bus_dmamap_create(sc->bce_dmatag, 2 * PAGE_SIZE, 1,
347 	    2 * PAGE_SIZE, 0, BUS_DMA_NOWAIT, &sc->bce_ring_map))) {
348 		printf(": unable to create ring DMA map, error = %d\n", error);
349 		uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
350 		    (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
351 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
352 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map);
353 		bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
354 		return;
355 	}
356 
357 	/* connect the ring space to the dma map */
358 	if (bus_dmamap_load(sc->bce_dmatag, sc->bce_ring_map, kva,
359 	    2 * PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
360 		printf(": unable to load ring DMA map\n");
361 		uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
362 		    (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
363 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
364 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map);
365 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_ring_map);
366 		bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
367 		return;
368 	}
369 
370 	/* save the ring space in softc */
371 	sc->bce_rx_ring = (struct bce_dma_slot *)kva;
372 	sc->bce_tx_ring = (struct bce_dma_slot *)(kva + PAGE_SIZE);
373 
374 	/* Set up ifnet structure */
375 	ifp = &sc->bce_ac.ac_if;
376 	strlcpy(ifp->if_xname, sc->bce_dev.dv_xname, IF_NAMESIZE);
377 	ifp->if_softc = sc;
378 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
379 	ifp->if_ioctl = bce_ioctl;
380 	ifp->if_start = bce_start;
381 	ifp->if_watchdog = bce_watchdog;
382 	IFQ_SET_READY(&ifp->if_snd);
383 
384 	ifp->if_capabilities = IFCAP_VLAN_MTU;
385 
386 	/* MAC address */
387 	sc->bce_ac.ac_enaddr[0] =
388 	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET0);
389 	sc->bce_ac.ac_enaddr[1] =
390 	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET1);
391 	sc->bce_ac.ac_enaddr[2] =
392 	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET2);
393 	sc->bce_ac.ac_enaddr[3] =
394 	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET3);
395 	sc->bce_ac.ac_enaddr[4] =
396 	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET4);
397 	sc->bce_ac.ac_enaddr[5] =
398 	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET5);
399 
400 	printf(": %s, address %s\n", intrstr,
401 	    ether_sprintf(sc->bce_ac.ac_enaddr));
402 
403 	/* Initialize our media structures and probe the MII. */
404 	sc->bce_mii.mii_ifp = ifp;
405 	sc->bce_mii.mii_readreg = bce_mii_read;
406 	sc->bce_mii.mii_writereg = bce_mii_write;
407 	sc->bce_mii.mii_statchg = bce_statchg;
408 	ifmedia_init(&sc->bce_mii.mii_media, 0, bce_mediachange,
409 	    bce_mediastatus);
410 	mii_attach(&sc->bce_dev, &sc->bce_mii, 0xffffffff, MII_PHY_ANY,
411 	    MII_OFFSET_ANY, 0);
412 	if (LIST_FIRST(&sc->bce_mii.mii_phys) == NULL) {
413 		ifmedia_add(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
414 		ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE);
415 	} else
416 		ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_AUTO);
417 
418 	/* get the phy */
419 	sc->bce_phy = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
420 	    BCE_PHY) & 0x1f;
421 
422 	/*
423 	 * Enable activity led.
424 	 * XXX This should be in a phy driver, but not currently.
425 	 */
426 	bce_mii_write((struct device *) sc, 1, 26,	 /* MAGIC */
427 	    bce_mii_read((struct device *) sc, 1, 26) & 0x7fff);	 /* MAGIC */
428 
429 	/* enable traffic meter led mode */
430 	bce_mii_write((struct device *) sc, 1, 27,	 /* MAGIC */
431 	    bce_mii_read((struct device *) sc, 1, 27) | (1 << 6));	 /* MAGIC */
432 
433 	/* Attach the interface */
434 	if_attach(ifp);
435 	ether_ifattach(ifp);
436 
437 	timeout_set(&sc->bce_timeout, bce_tick, sc);
438 }
439 
440 int
441 bce_activate(struct device *self, int act)
442 {
443 	struct bce_softc *sc = (struct bce_softc *)self;
444 	struct ifnet *ifp = &sc->bce_ac.ac_if;
445 
446 	switch (act) {
447 	case DVACT_SUSPEND:
448 		if (ifp->if_flags & IFF_RUNNING)
449 			bce_stop(ifp);
450 		break;
451 	case DVACT_RESUME:
452 		if (ifp->if_flags & IFF_UP) {
453 			bce_init(ifp);
454 			bce_start(ifp);
455 		}
456 		break;
457 	}
458 
459 	return (0);
460 }
461 
462 /* handle media, and ethernet requests */
463 int
464 bce_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
465 {
466 	struct bce_softc *sc = ifp->if_softc;
467 	struct ifaddr *ifa = (struct ifaddr *) data;
468 	struct ifreq *ifr = (struct ifreq *) data;
469 	int s, error = 0;
470 
471 	s = splnet();
472 
473 	switch (cmd) {
474 	case SIOCSIFADDR:
475 		ifp->if_flags |= IFF_UP;
476 
477 		switch (ifa->ifa_addr->sa_family) {
478 #ifdef INET
479 		case AF_INET:
480 			bce_init(ifp);
481 			arp_ifinit(&sc->bce_ac, ifa);
482 			break;
483 #endif /* INET */
484 		default:
485 			bce_init(ifp);
486 			break;
487 		}
488 		break;
489 
490 	case SIOCSIFFLAGS:
491 		if (ifp->if_flags & IFF_UP)
492 			if (ifp->if_flags & IFF_RUNNING)
493 				bce_set_filter(ifp);
494 			else
495 				bce_init(ifp);
496 		else if (ifp->if_flags & IFF_RUNNING)
497 			bce_stop(ifp);
498 		break;
499 
500 	case SIOCSIFMEDIA:
501 	case SIOCGIFMEDIA:
502 		error = ifmedia_ioctl(ifp, ifr, &sc->bce_mii.mii_media, cmd);
503 		break;
504 
505 	default:
506 		error = ether_ioctl(ifp, &sc->bce_ac, cmd, data);
507 	}
508 
509 	if (error == ENETRESET) {
510 		if (ifp->if_flags & IFF_RUNNING)
511 			bce_set_filter(ifp);
512 		error = 0;
513 	}
514 
515 	if (error == 0) {
516 		/* Try to get more packets going. */
517 		bce_start(ifp);
518 	}
519 
520 	splx(s);
521 	return error;
522 }
523 
524 /* Start packet transmission on the interface. */
525 void
526 bce_start(struct ifnet *ifp)
527 {
528 	struct bce_softc *sc = ifp->if_softc;
529 	struct mbuf *m0;
530 	u_int32_t ctrl;
531 	int txstart;
532 	int txsfree;
533 	int newpkts = 0;
534 
535 	/*
536 	 * do not start another if currently transmitting, and more
537 	 * descriptors(tx slots) are needed for next packet.
538 	 */
539 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
540 		return;
541 
542 	/* determine number of descriptors available */
543 	if (sc->bce_txsnext >= sc->bce_txin)
544 		txsfree = BCE_NTXDESC - 1 + sc->bce_txin - sc->bce_txsnext;
545 	else
546 		txsfree = sc->bce_txin - sc->bce_txsnext - 1;
547 
548 	/*
549 	 * Loop through the send queue, setting up transmit descriptors
550 	 * until we drain the queue, or use up all available transmit
551 	 * descriptors.
552 	 */
553 	while (txsfree > 0) {
554 
555 		/* Grab a packet off the queue. */
556 		IFQ_POLL(&ifp->if_snd, m0);
557 		if (m0 == NULL)
558 			break;
559 
560 		/*
561 		 * copy mbuf chain into DMA memory buffer.
562 		 */
563 		m_copydata(m0, 0, m0->m_pkthdr.len, sc->bce_data +
564 		    (sc->bce_txsnext + BCE_NRXDESC) * MCLBYTES);
565 		ctrl = m0->m_pkthdr.len & CTRL_BC_MASK;
566 		ctrl |= CTRL_SOF | CTRL_EOF | CTRL_IOC;
567 
568 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
569 		IFQ_DEQUEUE(&ifp->if_snd, m0);
570 
571 #if NBPFILTER > 0
572 		/* Pass the packet to any BPF listeners. */
573 		if (ifp->if_bpf)
574 			bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
575 #endif
576 		/* mbuf no longer needed */
577 		m_freem(m0);
578 
579 		/* Sync the data DMA map. */
580 		bus_dmamap_sync(sc->bce_dmatag, sc->bce_txdata_map,
581 		    sc->bce_txsnext * MCLBYTES, MCLBYTES, BUS_DMASYNC_PREWRITE);
582 
583 		/* Initialize the transmit descriptor(s). */
584 		txstart = sc->bce_txsnext;
585 
586 		if (sc->bce_txsnext == BCE_NTXDESC - 1)
587 			ctrl |= CTRL_EOT;
588 		sc->bce_tx_ring[sc->bce_txsnext].ctrl = htole32(ctrl);
589 		sc->bce_tx_ring[sc->bce_txsnext].addr =
590 		    htole32(sc->bce_txdata_map->dm_segs[0].ds_addr +
591 		    sc->bce_txsnext * MCLBYTES + 0x40000000);	/* MAGIC */
592 		if (sc->bce_txsnext + 1 > BCE_NTXDESC - 1)
593 			sc->bce_txsnext = 0;
594 		else
595 			sc->bce_txsnext++;
596 		txsfree--;
597 
598 		/* sync descriptors being used */
599 		bus_dmamap_sync(sc->bce_dmatag, sc->bce_ring_map,
600 		    sizeof(struct bce_dma_slot) * txstart + PAGE_SIZE,
601 		    sizeof(struct bce_dma_slot),
602 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
603 
604 		/* Give the packet to the chip. */
605 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_DPTR,
606 		    sc->bce_txsnext * sizeof(struct bce_dma_slot));
607 
608 		newpkts++;
609 	}
610 	if (txsfree == 0) {
611 		/* No more slots left; notify upper layer. */
612 		ifp->if_flags |= IFF_OACTIVE;
613 	}
614 	if (newpkts) {
615 		/* Set a watchdog timer in case the chip flakes out. */
616 		ifp->if_timer = 5;
617 	}
618 }
619 
620 /* Watchdog timer handler. */
621 void
622 bce_watchdog(struct ifnet *ifp)
623 {
624 	struct bce_softc *sc = ifp->if_softc;
625 
626 	printf("%s: device timeout\n", sc->bce_dev.dv_xname);
627 	ifp->if_oerrors++;
628 
629 	(void) bce_init(ifp);
630 
631 	/* Try to get more packets going. */
632 	bce_start(ifp);
633 }
634 
635 int
636 bce_intr(void *xsc)
637 {
638 	struct bce_softc *sc;
639 	struct ifnet *ifp;
640 	u_int32_t intstatus;
641 	int wantinit;
642 	int handled = 0;
643 
644 	sc = xsc;
645 	ifp = &sc->bce_ac.ac_if;
646 
647 
648 	for (wantinit = 0; wantinit == 0;) {
649 		intstatus = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
650 		    BCE_INT_STS);
651 
652 		/* ignore if not ours, or unsolicited interrupts */
653 		intstatus &= sc->bce_intmask;
654 		if (intstatus == 0)
655 			break;
656 
657 		handled = 1;
658 
659 		/* Ack interrupt */
660 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_STS,
661 		    intstatus);
662 
663 		/* Receive interrupts. */
664 		if (intstatus & I_RI)
665 			bce_rxintr(sc);
666 		/* Transmit interrupts. */
667 		if (intstatus & I_XI)
668 			bce_txintr(sc);
669 		/* Error interrupts */
670 		if (intstatus & ~(I_RI | I_XI)) {
671 			if (intstatus & I_XU)
672 				printf("%s: transmit fifo underflow\n",
673 				    sc->bce_dev.dv_xname);
674 			if (intstatus & I_RO) {
675 				printf("%s: receive fifo overflow\n",
676 				    sc->bce_dev.dv_xname);
677 				ifp->if_ierrors++;
678 			}
679 			if (intstatus & I_RU)
680 				printf("%s: receive descriptor underflow\n",
681 				    sc->bce_dev.dv_xname);
682 			if (intstatus & I_DE)
683 				printf("%s: descriptor protocol error\n",
684 				    sc->bce_dev.dv_xname);
685 			if (intstatus & I_PD)
686 				printf("%s: data error\n",
687 				    sc->bce_dev.dv_xname);
688 			if (intstatus & I_PC)
689 				printf("%s: descriptor error\n",
690 				    sc->bce_dev.dv_xname);
691 			if (intstatus & I_TO)
692 				printf("%s: general purpose timeout\n",
693 				    sc->bce_dev.dv_xname);
694 			wantinit = 1;
695 		}
696 	}
697 
698 	if (handled) {
699 		if (wantinit)
700 			bce_init(ifp);
701 		/* Try to get more packets going. */
702 		bce_start(ifp);
703 	}
704 	return (handled);
705 }
706 
707 /* Receive interrupt handler */
708 void
709 bce_rxintr(struct bce_softc *sc)
710 {
711 	struct ifnet *ifp = &sc->bce_ac.ac_if;
712 	struct rx_pph *pph;
713 	struct mbuf *m;
714 	int curr;
715 	int len;
716 	int i;
717 
718 	/* get pointer to active receive slot */
719 	curr = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXSTATUS)
720 	    & RS_CD_MASK;
721 	curr = curr / sizeof(struct bce_dma_slot);
722 	if (curr >= BCE_NRXDESC)
723 		curr = BCE_NRXDESC - 1;
724 
725 	/* process packets up to but not current packet being worked on */
726 	for (i = sc->bce_rxin; i != curr; i = (i + 1) % BCE_NRXDESC) {
727 		/* complete any post dma memory ops on packet */
728 		bus_dmamap_sync(sc->bce_dmatag, sc->bce_rxdata_map,
729 		    i * MCLBYTES, MCLBYTES, BUS_DMASYNC_POSTREAD);
730 
731 		/*
732 		 * If the packet had an error, simply recycle the buffer,
733 		 * resetting the len, and flags.
734 		 */
735 		pph = (struct rx_pph *)(sc->bce_data + i * MCLBYTES);
736 		if (pph->flags & (RXF_NO | RXF_RXER | RXF_CRC | RXF_OV)) {
737 			ifp->if_ierrors++;
738 			pph->len = 0;
739 			pph->flags = 0;
740 			continue;
741 		}
742 		/* receive the packet */
743 		len = pph->len;
744 		if (len == 0)
745 			continue;	/* no packet if empty */
746 		pph->len = 0;
747 		pph->flags = 0;
748 
749  		/*
750 		 * The chip includes the CRC with every packet.  Trim
751 		 * it off here.
752 		 */
753 		len -= ETHER_CRC_LEN;
754 
755 		m = m_devget(sc->bce_data + i * MCLBYTES +
756 		    BCE_PREPKT_HEADER_SIZE, len, ETHER_ALIGN, ifp);
757 		ifp->if_ipackets++;
758 
759 #if NBPFILTER > 0
760 		/*
761 		 * Pass this up to any BPF listeners, but only
762 		 * pass it up the stack if it's for us.
763 		 */
764 		if (ifp->if_bpf)
765 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
766 #endif
767 
768 		/* Pass it on. */
769 		ether_input_mbuf(ifp, m);
770 
771 		/* re-check current in case it changed */
772 		curr = (bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
773 		    BCE_DMA_RXSTATUS) & RS_CD_MASK) /
774 		    sizeof(struct bce_dma_slot);
775 		if (curr >= BCE_NRXDESC)
776 			curr = BCE_NRXDESC - 1;
777 	}
778 	sc->bce_rxin = curr;
779 }
780 
781 /* Transmit interrupt handler */
782 void
783 bce_txintr(struct bce_softc *sc)
784 {
785 	struct ifnet   *ifp = &sc->bce_ac.ac_if;
786 	int curr;
787 	int i;
788 
789 	ifp->if_flags &= ~IFF_OACTIVE;
790 
791 	/*
792 	 * Go through the Tx list and free mbufs for those
793 	 * frames which have been transmitted.
794 	 */
795 	curr = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
796 	    BCE_DMA_TXSTATUS) & RS_CD_MASK;
797 	curr = curr / sizeof(struct bce_dma_slot);
798 	if (curr >= BCE_NTXDESC)
799 		curr = BCE_NTXDESC - 1;
800 	for (i = sc->bce_txin; i != curr; i = (i + 1) % BCE_NTXDESC) {
801 		/* do any post dma memory ops on transmit data */
802 		bus_dmamap_sync(sc->bce_dmatag, sc->bce_txdata_map,
803 		    i * MCLBYTES, MCLBYTES, BUS_DMASYNC_POSTWRITE);
804 		ifp->if_opackets++;
805 	}
806 	sc->bce_txin = curr;
807 
808 	/*
809 	 * If there are no more pending transmissions, cancel the watchdog
810 	 * timer
811 	 */
812 	if (sc->bce_txsnext == sc->bce_txin)
813 		ifp->if_timer = 0;
814 }
815 
816 /* initialize the interface */
817 int
818 bce_init(struct ifnet *ifp)
819 {
820 	struct bce_softc *sc = ifp->if_softc;
821 	u_int32_t reg_win;
822 	int i;
823 
824 	/* Cancel any pending I/O. */
825 	bce_stop(ifp);
826 
827 	/* enable pci inerrupts, bursts, and prefetch */
828 
829 	/* remap the pci registers to the Sonics config registers */
830 
831 	/* save the current map, so it can be restored */
832 	reg_win = pci_conf_read(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag,
833 	    BCE_REG_WIN);
834 
835 	/* set register window to Sonics registers */
836 	pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN,
837 	    BCE_SONICS_WIN);
838 
839 	/* enable SB to PCI interrupt */
840 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC,
841 	    bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC) |
842 	    SBIV_ENET0);
843 
844 	/* enable prefetch and bursts for sonics-to-pci translation 2 */
845 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2,
846 	    bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2) |
847 	    SBTOPCI_PREF | SBTOPCI_BURST);
848 
849 	/* restore to ethernet register space */
850 	pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN,
851 	    reg_win);
852 
853 	/* Reset the chip to a known state. */
854 	bce_reset(sc);
855 
856 	/* Initialize transmit descriptors */
857 	memset(sc->bce_tx_ring, 0, BCE_NTXDESC * sizeof(struct bce_dma_slot));
858 	sc->bce_txsnext = 0;
859 	sc->bce_txin = 0;
860 
861 	/* enable crc32 generation and set proper LED modes */
862 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL,
863 	    bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL) |
864 	    BCE_EMC_CRC32_ENAB | BCE_EMC_LED);
865 
866 	/* reset or clear powerdown control bit  */
867 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL,
868 	    bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL) &
869 	    ~BCE_EMC_PDOWN);
870 
871 	/* setup DMA interrupt control */
872 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMAI_CTL, 1 << 24);	/* MAGIC */
873 
874 	/* setup packet filter */
875 	bce_set_filter(ifp);
876 
877 	/* set max frame length, account for possible VLAN tag */
878 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_MAX,
879 	    ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
880 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_MAX,
881 	    ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
882 
883 	/* set tx watermark */
884 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_WATER, 56);
885 
886 	/* enable transmit */
887 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL, XC_XE);
888 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXADDR,
889 	    sc->bce_ring_map->dm_segs[0].ds_addr + PAGE_SIZE + 0x40000000);	/* MAGIC */
890 
891 	/*
892 	 * Give the receive ring to the chip, and
893 	 * start the receive DMA engine.
894 	 */
895 	sc->bce_rxin = 0;
896 
897 	/* clear the rx descriptor ring */
898 	memset(sc->bce_rx_ring, 0, BCE_NRXDESC * sizeof(struct bce_dma_slot));
899 	/* enable receive */
900 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXCTL,
901 	    BCE_PREPKT_HEADER_SIZE << 1 | XC_XE);
902 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXADDR,
903 	    sc->bce_ring_map->dm_segs[0].ds_addr + 0x40000000);		/* MAGIC */
904 
905 	/* Initialize receive descriptors */
906 	for (i = 0; i < BCE_NRXDESC; i++)
907 		bce_add_rxbuf(sc, i);
908 
909 	/* Enable interrupts */
910 	sc->bce_intmask =
911 	    I_XI | I_RI | I_XU | I_RO | I_RU | I_DE | I_PD | I_PC | I_TO;
912 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_MASK,
913 	    sc->bce_intmask);
914 
915 	/* start the receive dma */
916 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXDPTR,
917 	    BCE_NRXDESC * sizeof(struct bce_dma_slot));
918 
919 	/* set media */
920 	mii_mediachg(&sc->bce_mii);
921 
922 	/* turn on the ethernet mac */
923 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL,
924 	    bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
925 	    BCE_ENET_CTL) | EC_EE);
926 
927 	/* start timer */
928 	timeout_add_sec(&sc->bce_timeout, 1);
929 
930 	/* mark as running, and no outputs active */
931 	ifp->if_flags |= IFF_RUNNING;
932 	ifp->if_flags &= ~IFF_OACTIVE;
933 
934 	return 0;
935 }
936 
937 /* add a mac address to packet filter */
938 void
939 bce_add_mac(struct bce_softc *sc, u_int8_t *mac, unsigned long idx)
940 {
941 	int i;
942 	u_int32_t rval;
943 
944 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_LOW,
945 	    mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]);
946 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_HI,
947 	    mac[0] << 8 | mac[1] | 0x10000);	/* MAGIC */
948 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL,
949 	    idx << 16 | 8);	/* MAGIC */
950 	/* wait for write to complete */
951 	for (i = 0; i < 100; i++) {
952 		rval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
953 		    BCE_FILT_CTL);
954 		if (!(rval & 0x80000000))	/* MAGIC */
955 			break;
956 		delay(10);
957 	}
958 	if (i == 100) {
959 		printf("%s: timed out writing pkt filter ctl\n",
960 		   sc->bce_dev.dv_xname);
961 	}
962 }
963 
964 /* Add a receive buffer to the indiciated descriptor. */
965 void
966 bce_add_rxbuf(struct bce_softc *sc, int idx)
967 {
968 	struct bce_dma_slot *bced = &sc->bce_rx_ring[idx];
969 
970 	bus_dmamap_sync(sc->bce_dmatag, sc->bce_rxdata_map, idx * MCLBYTES,
971 	    MCLBYTES, BUS_DMASYNC_PREREAD);
972 
973 	*(u_int32_t *)(sc->bce_data + idx * MCLBYTES) = 0;
974 	bced->addr = htole32(sc->bce_rxdata_map->dm_segs[0].ds_addr +
975 	    idx * MCLBYTES + 0x40000000);
976 	if (idx != (BCE_NRXDESC - 1))
977 		bced->ctrl = htole32(BCE_RXBUF_LEN);
978 	else
979 		bced->ctrl = htole32(BCE_RXBUF_LEN | CTRL_EOT);
980 
981 	bus_dmamap_sync(sc->bce_dmatag, sc->bce_ring_map,
982 	    sizeof(struct bce_dma_slot) * idx,
983 	    sizeof(struct bce_dma_slot),
984 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
985 
986 }
987 
988 /* Stop transmission on the interface */
989 void
990 bce_stop(struct ifnet *ifp)
991 {
992 	struct bce_softc *sc = ifp->if_softc;
993 	int i;
994 	u_int32_t val;
995 
996 	/* Stop the 1 second timer */
997 	timeout_del(&sc->bce_timeout);
998 
999 	/* Mark the interface down and cancel the watchdog timer. */
1000 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1001 	ifp->if_timer = 0;
1002 
1003 	/* Down the MII. */
1004 	mii_down(&sc->bce_mii);
1005 
1006 	/* Disable interrupts. */
1007 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_MASK, 0);
1008 	sc->bce_intmask = 0;
1009 	delay(10);
1010 
1011 	/* Disable emac */
1012 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL, EC_ED);
1013 	for (i = 0; i < 200; i++) {
1014 		val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1015 		    BCE_ENET_CTL);
1016 		if (!(val & EC_ED))
1017 			break;
1018 		delay(10);
1019 	}
1020 
1021 	/* Stop the DMA */
1022 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXCTL, 0);
1023 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL, 0);
1024 	delay(10);
1025 }
1026 
1027 /* reset the chip */
1028 void
1029 bce_reset(struct bce_softc *sc)
1030 {
1031 	u_int32_t val;
1032 	u_int32_t sbval;
1033 	int i;
1034 
1035 	/* if SB core is up */
1036 	sbval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1037 	    BCE_SBTMSTATELOW);
1038 	if ((sbval & (SBTML_RESET | SBTML_REJ | SBTML_CLK)) == SBTML_CLK) {
1039 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMAI_CTL,
1040 		    0);
1041 
1042 		/* disable emac */
1043 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL,
1044 		    EC_ED);
1045 		for (i = 0; i < 200; i++) {
1046 			val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1047 			    BCE_ENET_CTL);
1048 			if (!(val & EC_ED))
1049 				break;
1050 			delay(10);
1051 		}
1052 		if (i == 200)
1053 			printf("%s: timed out disabling ethernet mac\n",
1054 			    sc->bce_dev.dv_xname);
1055 
1056 		/* reset the dma engines */
1057 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL,
1058 		    0);
1059 		val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1060 		    BCE_DMA_RXSTATUS);
1061 		/* if error on receive, wait to go idle */
1062 		if (val & RS_ERROR) {
1063 			for (i = 0; i < 100; i++) {
1064 				val = bus_space_read_4(sc->bce_btag,
1065 				    sc->bce_bhandle, BCE_DMA_RXSTATUS);
1066 				if (val & RS_DMA_IDLE)
1067 					break;
1068 				delay(10);
1069 			}
1070 			if (i == 100)
1071 				printf("%s: receive dma did not go idle after"
1072 				    " error\n", sc->bce_dev.dv_xname);
1073 		}
1074 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle,
1075 		   BCE_DMA_RXSTATUS, 0);
1076 
1077 		/* reset ethernet mac */
1078 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL,
1079 		    EC_ES);
1080 		for (i = 0; i < 200; i++) {
1081 			val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1082 			    BCE_ENET_CTL);
1083 			if (!(val & EC_ES))
1084 				break;
1085 			delay(10);
1086 		}
1087 		if (i == 200)
1088 			printf("%s: timed out resetting ethernet mac\n",
1089 			    sc->bce_dev.dv_xname);
1090 	} else {
1091 		u_int32_t reg_win;
1092 
1093 		/* remap the pci registers to the Sonics config registers */
1094 
1095 		/* save the current map, so it can be restored */
1096 		reg_win = pci_conf_read(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag,
1097 		    BCE_REG_WIN);
1098 		/* set register window to Sonics registers */
1099 		pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag,
1100 		    BCE_REG_WIN, BCE_SONICS_WIN);
1101 
1102 		/* enable SB to PCI interrupt */
1103 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC,
1104 		    bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1105 		    BCE_SBINTVEC) | SBIV_ENET0);
1106 
1107 		/* enable prefetch and bursts for sonics-to-pci translation 2 */
1108 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2,
1109 		    bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1110 		    BCE_SPCI_TR2) | SBTOPCI_PREF | SBTOPCI_BURST);
1111 
1112 		/* restore to ethernet register space */
1113 		pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN,
1114 		    reg_win);
1115 	}
1116 
1117 	/* disable SB core if not in reset */
1118 	if (!(sbval & SBTML_RESET)) {
1119 
1120 		/* set the reject bit */
1121 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle,
1122 		    BCE_SBTMSTATELOW, SBTML_REJ | SBTML_CLK);
1123 		for (i = 0; i < 200; i++) {
1124 			val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1125 			    BCE_SBTMSTATELOW);
1126 			if (val & SBTML_REJ)
1127 				break;
1128 			delay(1);
1129 		}
1130 		if (i == 200)
1131 			printf("%s: while resetting core, reject did not set\n",
1132 			    sc->bce_dev.dv_xname);
1133 		/* wait until busy is clear */
1134 		for (i = 0; i < 200; i++) {
1135 			val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1136 			    BCE_SBTMSTATEHI);
1137 			if (!(val & 0x4))
1138 				break;
1139 			delay(1);
1140 		}
1141 		if (i == 200)
1142 			printf("%s: while resetting core, busy did not clear\n",
1143 			    sc->bce_dev.dv_xname);
1144 		/* set reset and reject while enabling the clocks */
1145 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle,
1146 		    BCE_SBTMSTATELOW,
1147 		    SBTML_FGC | SBTML_CLK | SBTML_REJ | SBTML_RESET);
1148 		val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1149 		    BCE_SBTMSTATELOW);
1150 		delay(10);
1151 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle,
1152 		    BCE_SBTMSTATELOW, SBTML_REJ | SBTML_RESET);
1153 		delay(1);
1154 	}
1155 	/* enable clock */
1156 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW,
1157 	    SBTML_FGC | SBTML_CLK | SBTML_RESET);
1158 	val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW);
1159 	delay(1);
1160 
1161 	/* clear any error bits that may be on */
1162 	val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATEHI);
1163 	if (val & 1)
1164 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATEHI,
1165 		    0);
1166 	val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBIMSTATE);
1167 	if (val & SBIM_ERRORBITS)
1168 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBIMSTATE,
1169 		    val & ~SBIM_ERRORBITS);
1170 
1171 	/* clear reset and allow it to propagate throughout the core */
1172 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW,
1173 	    SBTML_FGC | SBTML_CLK);
1174 	val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW);
1175 	delay(1);
1176 
1177 	/* leave clock enabled */
1178 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW,
1179 	    SBTML_CLK);
1180 	val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW);
1181 	delay(1);
1182 
1183 	/* initialize MDC preamble, frequency */
1184 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_CTL, 0x8d);	/* MAGIC */
1185 
1186 	/* enable phy, differs for internal, and external */
1187 	val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_DEVCTL);
1188 	if (!(val & BCE_DC_IP)) {
1189 		/* select external phy */
1190 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL,
1191 		    EC_EP);
1192 	} else if (val & BCE_DC_ER) {	/* internal, clear reset bit if on */
1193 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DEVCTL,
1194 		    val & ~BCE_DC_ER);
1195 		delay(100);
1196 	}
1197 }
1198 
1199 /* Set up the receive filter. */
1200 void
1201 bce_set_filter(struct ifnet *ifp)
1202 {
1203 	struct bce_softc *sc = ifp->if_softc;
1204 
1205 	if (ifp->if_flags & IFF_PROMISC) {
1206 		ifp->if_flags |= IFF_ALLMULTI;
1207 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL,
1208 		    bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL)
1209 		    | ERC_PE);
1210 	} else {
1211 		ifp->if_flags &= ~IFF_ALLMULTI;
1212 
1213 		/* turn off promiscuous */
1214 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL,
1215 		    bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1216 		    BCE_RX_CTL) & ~ERC_PE);
1217 
1218 		/* enable/disable broadcast */
1219 		if (ifp->if_flags & IFF_BROADCAST)
1220 			bus_space_write_4(sc->bce_btag, sc->bce_bhandle,
1221 			    BCE_RX_CTL, bus_space_read_4(sc->bce_btag,
1222 			    sc->bce_bhandle, BCE_RX_CTL) & ~ERC_DB);
1223 		else
1224 			bus_space_write_4(sc->bce_btag, sc->bce_bhandle,
1225 			    BCE_RX_CTL, bus_space_read_4(sc->bce_btag,
1226 			    sc->bce_bhandle, BCE_RX_CTL) | ERC_DB);
1227 
1228 		/* disable the filter */
1229 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL,
1230 		    0);
1231 
1232 		/* add our own address */
1233 		bce_add_mac(sc, sc->bce_ac.ac_enaddr, 0);
1234 
1235 		/* for now accept all multicast */
1236 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL,
1237 		bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL) |
1238 		    ERC_AM);
1239 		ifp->if_flags |= IFF_ALLMULTI;
1240 
1241 		/* enable the filter */
1242 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL,
1243 		    bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1244 		    BCE_FILT_CTL) | 1);
1245 	}
1246 }
1247 
1248 /* Read a PHY register on the MII. */
1249 int
1250 bce_mii_read(struct device *self, int phy, int reg)
1251 {
1252 	struct bce_softc *sc = (struct bce_softc *) self;
1253 	int i;
1254 	u_int32_t val;
1255 
1256 	/* clear mii_int */
1257 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_STS,
1258 	    BCE_MIINTR);
1259 
1260 	/* Read the PHY register */
1261 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM,
1262 	    (MII_COMMAND_READ << 28) | (MII_COMMAND_START << 30) |	/* MAGIC */
1263 	    (MII_COMMAND_ACK << 16) | BCE_MIPHY(phy) | BCE_MIREG(reg));	/* MAGIC */
1264 
1265 	for (i = 0; i < BCE_TIMEOUT; i++) {
1266 		val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1267 		    BCE_MI_STS);
1268 		if (val & BCE_MIINTR)
1269 			break;
1270 		delay(10);
1271 	}
1272 	val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM);
1273 	if (i == BCE_TIMEOUT) {
1274 		printf("%s: PHY read timed out reading phy %d, reg %d, val = "
1275 		    "0x%08x\n", sc->bce_dev.dv_xname, phy, reg, val);
1276 		return (0);
1277 	}
1278 	return (val & BCE_MICOMM_DATA);
1279 }
1280 
1281 /* Write a PHY register on the MII */
1282 void
1283 bce_mii_write(struct device *self, int phy, int reg, int val)
1284 {
1285 	struct bce_softc *sc = (struct bce_softc *) self;
1286 	int i;
1287 	u_int32_t rval;
1288 
1289 	/* clear mii_int */
1290 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_STS,
1291 	    BCE_MIINTR);
1292 
1293 	/* Write the PHY register */
1294 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM,
1295 	    (MII_COMMAND_WRITE << 28) | (MII_COMMAND_START << 30) |	/* MAGIC */
1296 	    (MII_COMMAND_ACK << 16) | (val & BCE_MICOMM_DATA) |	/* MAGIC */
1297 	    BCE_MIPHY(phy) | BCE_MIREG(reg));
1298 
1299 	/* wait for write to complete */
1300 	for (i = 0; i < BCE_TIMEOUT; i++) {
1301 		rval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1302 		    BCE_MI_STS);
1303 		if (rval & BCE_MIINTR)
1304 			break;
1305 		delay(10);
1306 	}
1307 	rval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM);
1308 	if (i == BCE_TIMEOUT) {
1309 		printf("%s: PHY timed out writing phy %d, reg %d, val "
1310 		    "= 0x%08x\n", sc->bce_dev.dv_xname, phy, reg, val);
1311 	}
1312 }
1313 
1314 /* sync hardware duplex mode to software state */
1315 void
1316 bce_statchg(struct device *self)
1317 {
1318 	struct bce_softc *sc = (struct bce_softc *) self;
1319 	u_int32_t reg;
1320 
1321 	/* if needed, change register to match duplex mode */
1322 	reg = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_CTL);
1323 	if (sc->bce_mii.mii_media_active & IFM_FDX && !(reg & EXC_FD))
1324 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_CTL,
1325 		    reg | EXC_FD);
1326 	else if (!(sc->bce_mii.mii_media_active & IFM_FDX) && reg & EXC_FD)
1327 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_CTL,
1328 		    reg & ~EXC_FD);
1329 
1330 	/*
1331 	 * Enable activity led.
1332 	 * XXX This should be in a phy driver, but not currently.
1333 	 */
1334 	bce_mii_write((struct device *) sc, 1, 26,	/* MAGIC */
1335 	    bce_mii_read((struct device *) sc, 1, 26) & 0x7fff);	/* MAGIC */
1336 	/* enable traffic meter led mode */
1337 	bce_mii_write((struct device *) sc, 1, 26,	/* MAGIC */
1338 	    bce_mii_read((struct device *) sc, 1, 27) | (1 << 6));	/* MAGIC */
1339 }
1340 
1341 /* Set hardware to newly-selected media */
1342 int
1343 bce_mediachange(struct ifnet *ifp)
1344 {
1345 	struct bce_softc *sc = ifp->if_softc;
1346 
1347 	if (ifp->if_flags & IFF_UP)
1348 		mii_mediachg(&sc->bce_mii);
1349 	return (0);
1350 }
1351 
1352 /* Get the current interface media status */
1353 void
1354 bce_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1355 {
1356 	struct bce_softc *sc = ifp->if_softc;
1357 
1358 	mii_pollstat(&sc->bce_mii);
1359 	ifmr->ifm_active = sc->bce_mii.mii_media_active;
1360 	ifmr->ifm_status = sc->bce_mii.mii_media_status;
1361 }
1362 
1363 /* One second timer, checks link status */
1364 void
1365 bce_tick(void *v)
1366 {
1367 	struct bce_softc *sc = v;
1368 	int s;
1369 
1370 	s = splnet();
1371 	mii_tick(&sc->bce_mii);
1372 	splx(s);
1373 
1374 	timeout_add_sec(&sc->bce_timeout, 1);
1375 }
1376