xref: /openbsd-src/sys/dev/pci/if_bce.c (revision d13be5d47e4149db2549a9828e244d59dbc43f15)
1 /* $OpenBSD: if_bce.c,v 1.35 2011/04/03 15:36:02 jasper Exp $ */
2 /* $NetBSD: if_bce.c,v 1.3 2003/09/29 01:53:02 mrg Exp $	 */
3 
4 /*
5  * Copyright (c) 2003 Clifford Wright. All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The name of the author may not be used to endorse or promote products
16  *    derived from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 /*
32  * Broadcom BCM440x 10/100 ethernet (broadcom.com)
33  * SiliconBackplane is technology from Sonics, Inc.(sonicsinc.com)
34  *
35  * Cliff Wright cliff@snipe444.org
36  */
37 
38 #include "bpfilter.h"
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/timeout.h>
43 #include <sys/sockio.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/device.h>
48 #include <sys/socket.h>
49 
50 #include <net/if.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53 
54 #ifdef INET
55 #include <netinet/in.h>
56 #include <netinet/in_systm.h>
57 #include <netinet/in_var.h>
58 #include <netinet/ip.h>
59 #include <netinet/if_ether.h>
60 #endif
61 #if NBPFILTER > 0
62 #include <net/bpf.h>
63 #endif
64 
65 #include <dev/pci/pcireg.h>
66 #include <dev/pci/pcivar.h>
67 #include <dev/pci/pcidevs.h>
68 
69 #include <dev/mii/mii.h>
70 #include <dev/mii/miivar.h>
71 #include <dev/mii/miidevs.h>
72 #include <dev/mii/brgphyreg.h>
73 
74 #include <dev/pci/if_bcereg.h>
75 
76 #include <uvm/uvm.h>
77 
78 /* ring descriptor */
79 struct bce_dma_slot {
80 	u_int32_t ctrl;
81 	u_int32_t addr;
82 };
83 #define CTRL_BC_MASK	0x1fff	/* buffer byte count */
84 #define CTRL_EOT	0x10000000	/* end of descriptor table */
85 #define CTRL_IOC	0x20000000	/* interrupt on completion */
86 #define CTRL_EOF	0x40000000	/* end of frame */
87 #define CTRL_SOF	0x80000000	/* start of frame */
88 
89 #define BCE_RXBUF_LEN	(MCLBYTES - 4)
90 
91 /* Packet status is returned in a pre-packet header */
92 struct rx_pph {
93 	u_int16_t len;
94 	u_int16_t flags;
95 	u_int16_t pad[12];
96 };
97 
98 #define	BCE_PREPKT_HEADER_SIZE		30
99 
100 /* packet status flags bits */
101 #define RXF_NO				0x8	/* odd number of nibbles */
102 #define RXF_RXER			0x4	/* receive symbol error */
103 #define RXF_CRC				0x2	/* crc error */
104 #define RXF_OV				0x1	/* fifo overflow */
105 
106 /* number of descriptors used in a ring */
107 #define BCE_NRXDESC		64
108 #define BCE_NTXDESC		64
109 
110 #define BCE_TIMEOUT		100	/* # 10us for mii read/write */
111 
112 struct bce_softc {
113 	struct device		bce_dev;
114 	bus_space_tag_t		bce_btag;
115 	bus_space_handle_t	bce_bhandle;
116 	bus_dma_tag_t		bce_dmatag;
117 	struct arpcom		bce_ac;		/* interface info */
118 	void			*bce_intrhand;
119 	struct pci_attach_args	bce_pa;
120 	struct mii_data		bce_mii;
121 	u_int32_t		bce_phy;	/* eeprom indicated phy */
122 	struct bce_dma_slot	*bce_rx_ring;	/* receive ring */
123 	struct bce_dma_slot	*bce_tx_ring;	/* transmit ring */
124 	caddr_t			bce_data;
125 	bus_dmamap_t		bce_ring_map;
126 	bus_dmamap_t		bce_rxdata_map;
127 	bus_dmamap_t		bce_txdata_map;
128 	u_int32_t		bce_intmask;	/* current intr mask */
129 	u_int32_t		bce_rxin;	/* last rx descriptor seen */
130 	u_int32_t		bce_txin;	/* last tx descriptor seen */
131 	int			bce_txsfree;	/* no. tx slots available */
132 	int			bce_txsnext;	/* next available tx slot */
133 	struct timeout		bce_timeout;
134 };
135 
136 int	bce_probe(struct device *, void *, void *);
137 void	bce_attach(struct device *, struct device *, void *);
138 int	bce_activate(struct device *, int);
139 int	bce_ioctl(struct ifnet *, u_long, caddr_t);
140 void	bce_start(struct ifnet *);
141 void	bce_watchdog(struct ifnet *);
142 int	bce_intr(void *);
143 void	bce_rxintr(struct bce_softc *);
144 void	bce_txintr(struct bce_softc *);
145 int	bce_init(struct ifnet *);
146 void	bce_add_mac(struct bce_softc *, u_int8_t *, unsigned long);
147 void	bce_add_rxbuf(struct bce_softc *, int);
148 void	bce_stop(struct ifnet *);
149 void	bce_reset(struct bce_softc *);
150 void	bce_set_filter(struct ifnet *);
151 int	bce_mii_read(struct device *, int, int);
152 void	bce_mii_write(struct device *, int, int, int);
153 void	bce_statchg(struct device *);
154 int	bce_mediachange(struct ifnet *);
155 void	bce_mediastatus(struct ifnet *, struct ifmediareq *);
156 void	bce_tick(void *);
157 
158 #ifdef BCE_DEBUG
159 #define DPRINTF(x)	do {		\
160 	if (bcedebug)			\
161 		printf x;		\
162 } while (/* CONSTCOND */ 0)
163 #define DPRINTFN(n,x)	do {		\
164 	if (bcedebug >= (n))		\
165 		printf x;		\
166 } while (/* CONSTCOND */ 0)
167 int	bcedebug = 0;
168 #else
169 #define DPRINTF(x)
170 #define DPRINTFN(n,x)
171 #endif
172 
173 struct cfattach bce_ca = {
174 	sizeof(struct bce_softc), bce_probe, bce_attach, NULL, bce_activate
175 };
176 struct cfdriver bce_cd = {
177 	NULL, "bce", DV_IFNET
178 };
179 
180 const struct pci_matchid bce_devices[] = {
181 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4401 },
182 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4401B0 },
183 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4401B1 }
184 };
185 
186 int
187 bce_probe(struct device *parent, void *match, void *aux)
188 {
189 	return (pci_matchbyid((struct pci_attach_args *)aux, bce_devices,
190 	    nitems(bce_devices)));
191 }
192 
193 void
194 bce_attach(struct device *parent, struct device *self, void *aux)
195 {
196 	struct bce_softc *sc = (struct bce_softc *) self;
197 	struct pci_attach_args *pa = aux;
198 	pci_chipset_tag_t pc = pa->pa_pc;
199 	pci_intr_handle_t ih;
200 	const char *intrstr = NULL;
201 	caddr_t kva;
202 	bus_dma_segment_t seg;
203 	int rseg;
204 	struct ifnet *ifp;
205 	pcireg_t memtype;
206 	bus_addr_t memaddr;
207 	bus_size_t memsize;
208 	int pmreg;
209 	pcireg_t pmode;
210 	int error;
211 
212 	sc->bce_pa = *pa;
213 	sc->bce_dmatag = pa->pa_dmat;
214 
215 	/*
216 	 * Map control/status registers.
217 	 */
218 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BCE_PCI_BAR0);
219 	if (pci_mapreg_map(pa, BCE_PCI_BAR0, memtype, 0, &sc->bce_btag,
220 	    &sc->bce_bhandle, &memaddr, &memsize, 0)) {
221 		printf(": unable to find mem space\n");
222 		return;
223 	}
224 
225 	/* Get it out of power save mode if needed. */
226 	if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
227 		pmode = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
228 		if (pmode == 3) {
229 			/*
230 			 * The card has lost all configuration data in
231 			 * this state, so punt.
232 			 */
233 			printf(": unable to wake up from power state D3\n");
234 			return;
235 		}
236 		if (pmode != 0) {
237 			printf(": waking up from power state D%d\n",
238 			    pmode);
239 			pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
240 		}
241 	}
242 
243 	if (pci_intr_map(pa, &ih)) {
244 		printf(": couldn't map interrupt\n");
245 		return;
246 	}
247 
248 	intrstr = pci_intr_string(pc, ih);
249 	sc->bce_intrhand = pci_intr_establish(pc, ih, IPL_NET, bce_intr, sc,
250 	    self->dv_xname);
251 	if (sc->bce_intrhand == NULL) {
252 		printf(": couldn't establish interrupt");
253 		if (intrstr != NULL)
254 			printf(" at %s", intrstr);
255 		printf("\n");
256 		return;
257 	}
258 
259 	/* reset the chip */
260 	bce_reset(sc);
261 
262 	/* Create the data DMA region and maps. */
263 	if ((sc->bce_data = (caddr_t)uvm_km_kmemalloc_pla(kernel_map,
264 	    uvm.kernel_object, (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES, 0,
265 	    UVM_KMF_NOWAIT, 0, (paddr_t)(0x40000000 - 1), 0, 0, 1)) == NULL) {
266 		printf(": unable to alloc space for ring");
267 		return;
268 	}
269 
270 	/* create a dma map for the RX ring */
271 	if ((error = bus_dmamap_create(sc->bce_dmatag, BCE_NRXDESC * MCLBYTES,
272 	    1, BCE_NRXDESC * MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
273 	    &sc->bce_rxdata_map))) {
274 		printf(": unable to create ring DMA map, error = %d\n", error);
275 		uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
276 		    (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
277 		return;
278 	}
279 
280 	/* connect the ring space to the dma map */
281 	if (bus_dmamap_load(sc->bce_dmatag, sc->bce_rxdata_map, sc->bce_data,
282 	    BCE_NRXDESC * MCLBYTES, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT)) {
283 		printf(": unable to load rx ring DMA map\n");
284 		uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
285 		    (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
286 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
287 		return;
288 	}
289 
290 	/* create a dma map for the TX ring */
291 	if ((error = bus_dmamap_create(sc->bce_dmatag, BCE_NTXDESC * MCLBYTES,
292 	    1, BCE_NTXDESC * MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
293 	    &sc->bce_txdata_map))) {
294 		printf(": unable to create ring DMA map, error = %d\n", error);
295 		uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
296 		    (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
297 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
298 		return;
299 	}
300 
301 	/* connect the ring space to the dma map */
302 	if (bus_dmamap_load(sc->bce_dmatag, sc->bce_txdata_map,
303 	    sc->bce_data + BCE_NRXDESC * MCLBYTES,
304 	    BCE_NTXDESC * MCLBYTES, NULL, BUS_DMA_WRITE | BUS_DMA_NOWAIT)) {
305 		printf(": unable to load tx ring DMA map\n");
306 		uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
307 		    (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
308 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
309 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map);
310 		return;
311 	}
312 
313 
314 	/*
315 	 * Allocate DMA-safe memory for ring descriptors.
316 	 * The receive, and transmit rings can not share the same
317 	 * 4k space, however both are allocated at once here.
318 	 */
319 	/*
320 	 * XXX PAGE_SIZE is wasteful; we only need 1KB + 1KB, but
321 	 * due to the limition above. ??
322 	 */
323 	if ((error = bus_dmamem_alloc(sc->bce_dmatag, 2 * PAGE_SIZE,
324 	    PAGE_SIZE, 2 * PAGE_SIZE, &seg, 1, &rseg, BUS_DMA_NOWAIT))) {
325 		printf(": unable to alloc space for ring descriptors, "
326 		    "error = %d\n", error);
327 		uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
328 		    (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
329 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
330 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map);
331 		return;
332 	}
333 
334 	/* map ring space to kernel */
335 	if ((error = bus_dmamem_map(sc->bce_dmatag, &seg, rseg,
336 	    2 * PAGE_SIZE, &kva, BUS_DMA_NOWAIT))) {
337 		printf(": unable to map DMA buffers, error = %d\n", error);
338 		uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
339 		    (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
340 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
341 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map);
342 		bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
343 		return;
344 	}
345 
346 	/* create a dma map for the ring */
347 	if ((error = bus_dmamap_create(sc->bce_dmatag, 2 * PAGE_SIZE, 1,
348 	    2 * PAGE_SIZE, 0, BUS_DMA_NOWAIT, &sc->bce_ring_map))) {
349 		printf(": unable to create ring DMA map, error = %d\n", error);
350 		uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
351 		    (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
352 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
353 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map);
354 		bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
355 		return;
356 	}
357 
358 	/* connect the ring space to the dma map */
359 	if (bus_dmamap_load(sc->bce_dmatag, sc->bce_ring_map, kva,
360 	    2 * PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
361 		printf(": unable to load ring DMA map\n");
362 		uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
363 		    (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
364 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
365 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map);
366 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_ring_map);
367 		bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
368 		return;
369 	}
370 
371 	/* save the ring space in softc */
372 	sc->bce_rx_ring = (struct bce_dma_slot *)kva;
373 	sc->bce_tx_ring = (struct bce_dma_slot *)(kva + PAGE_SIZE);
374 
375 	/* Set up ifnet structure */
376 	ifp = &sc->bce_ac.ac_if;
377 	strlcpy(ifp->if_xname, sc->bce_dev.dv_xname, IF_NAMESIZE);
378 	ifp->if_softc = sc;
379 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
380 	ifp->if_ioctl = bce_ioctl;
381 	ifp->if_start = bce_start;
382 	ifp->if_watchdog = bce_watchdog;
383 	IFQ_SET_READY(&ifp->if_snd);
384 
385 	ifp->if_capabilities = IFCAP_VLAN_MTU;
386 
387 	/* MAC address */
388 	sc->bce_ac.ac_enaddr[0] =
389 	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET0);
390 	sc->bce_ac.ac_enaddr[1] =
391 	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET1);
392 	sc->bce_ac.ac_enaddr[2] =
393 	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET2);
394 	sc->bce_ac.ac_enaddr[3] =
395 	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET3);
396 	sc->bce_ac.ac_enaddr[4] =
397 	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET4);
398 	sc->bce_ac.ac_enaddr[5] =
399 	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET5);
400 
401 	printf(": %s, address %s\n", intrstr,
402 	    ether_sprintf(sc->bce_ac.ac_enaddr));
403 
404 	/* Initialize our media structures and probe the MII. */
405 	sc->bce_mii.mii_ifp = ifp;
406 	sc->bce_mii.mii_readreg = bce_mii_read;
407 	sc->bce_mii.mii_writereg = bce_mii_write;
408 	sc->bce_mii.mii_statchg = bce_statchg;
409 	ifmedia_init(&sc->bce_mii.mii_media, 0, bce_mediachange,
410 	    bce_mediastatus);
411 	mii_attach(&sc->bce_dev, &sc->bce_mii, 0xffffffff, MII_PHY_ANY,
412 	    MII_OFFSET_ANY, 0);
413 	if (LIST_FIRST(&sc->bce_mii.mii_phys) == NULL) {
414 		ifmedia_add(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
415 		ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE);
416 	} else
417 		ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_AUTO);
418 
419 	/* get the phy */
420 	sc->bce_phy = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
421 	    BCE_PHY) & 0x1f;
422 
423 	/*
424 	 * Enable activity led.
425 	 * XXX This should be in a phy driver, but not currently.
426 	 */
427 	bce_mii_write((struct device *) sc, 1, 26,	 /* MAGIC */
428 	    bce_mii_read((struct device *) sc, 1, 26) & 0x7fff);	 /* MAGIC */
429 
430 	/* enable traffic meter led mode */
431 	bce_mii_write((struct device *) sc, 1, 27,	 /* MAGIC */
432 	    bce_mii_read((struct device *) sc, 1, 27) | (1 << 6));	 /* MAGIC */
433 
434 	/* Attach the interface */
435 	if_attach(ifp);
436 	ether_ifattach(ifp);
437 
438 	timeout_set(&sc->bce_timeout, bce_tick, sc);
439 }
440 
441 int
442 bce_activate(struct device *self, int act)
443 {
444 	struct bce_softc *sc = (struct bce_softc *)self;
445 	struct ifnet *ifp = &sc->bce_ac.ac_if;
446 
447 	switch (act) {
448 	case DVACT_SUSPEND:
449 		if (ifp->if_flags & IFF_RUNNING)
450 			bce_stop(ifp);
451 		break;
452 	case DVACT_RESUME:
453 		if (ifp->if_flags & IFF_UP) {
454 			bce_init(ifp);
455 			bce_start(ifp);
456 		}
457 		break;
458 	}
459 
460 	return (0);
461 }
462 
463 /* handle media, and ethernet requests */
464 int
465 bce_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
466 {
467 	struct bce_softc *sc = ifp->if_softc;
468 	struct ifaddr *ifa = (struct ifaddr *) data;
469 	struct ifreq *ifr = (struct ifreq *) data;
470 	int s, error = 0;
471 
472 	s = splnet();
473 
474 	switch (cmd) {
475 	case SIOCSIFADDR:
476 		ifp->if_flags |= IFF_UP;
477 
478 		switch (ifa->ifa_addr->sa_family) {
479 #ifdef INET
480 		case AF_INET:
481 			bce_init(ifp);
482 			arp_ifinit(&sc->bce_ac, ifa);
483 			break;
484 #endif /* INET */
485 		default:
486 			bce_init(ifp);
487 			break;
488 		}
489 		break;
490 
491 	case SIOCSIFFLAGS:
492 		if (ifp->if_flags & IFF_UP)
493 			if (ifp->if_flags & IFF_RUNNING)
494 				bce_set_filter(ifp);
495 			else
496 				bce_init(ifp);
497 		else if (ifp->if_flags & IFF_RUNNING)
498 			bce_stop(ifp);
499 		break;
500 
501 	case SIOCSIFMEDIA:
502 	case SIOCGIFMEDIA:
503 		error = ifmedia_ioctl(ifp, ifr, &sc->bce_mii.mii_media, cmd);
504 		break;
505 
506 	default:
507 		error = ether_ioctl(ifp, &sc->bce_ac, cmd, data);
508 	}
509 
510 	if (error == ENETRESET) {
511 		if (ifp->if_flags & IFF_RUNNING)
512 			bce_set_filter(ifp);
513 		error = 0;
514 	}
515 
516 	if (error == 0) {
517 		/* Try to get more packets going. */
518 		bce_start(ifp);
519 	}
520 
521 	splx(s);
522 	return error;
523 }
524 
525 /* Start packet transmission on the interface. */
526 void
527 bce_start(struct ifnet *ifp)
528 {
529 	struct bce_softc *sc = ifp->if_softc;
530 	struct mbuf *m0;
531 	u_int32_t ctrl;
532 	int txstart;
533 	int txsfree;
534 	int newpkts = 0;
535 
536 	/*
537 	 * do not start another if currently transmitting, and more
538 	 * descriptors(tx slots) are needed for next packet.
539 	 */
540 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
541 		return;
542 
543 	/* determine number of descriptors available */
544 	if (sc->bce_txsnext >= sc->bce_txin)
545 		txsfree = BCE_NTXDESC - 1 + sc->bce_txin - sc->bce_txsnext;
546 	else
547 		txsfree = sc->bce_txin - sc->bce_txsnext - 1;
548 
549 	/*
550 	 * Loop through the send queue, setting up transmit descriptors
551 	 * until we drain the queue, or use up all available transmit
552 	 * descriptors.
553 	 */
554 	while (txsfree > 0) {
555 
556 		/* Grab a packet off the queue. */
557 		IFQ_POLL(&ifp->if_snd, m0);
558 		if (m0 == NULL)
559 			break;
560 
561 		/*
562 		 * copy mbuf chain into DMA memory buffer.
563 		 */
564 		m_copydata(m0, 0, m0->m_pkthdr.len, sc->bce_data +
565 		    (sc->bce_txsnext + BCE_NRXDESC) * MCLBYTES);
566 		ctrl = m0->m_pkthdr.len & CTRL_BC_MASK;
567 		ctrl |= CTRL_SOF | CTRL_EOF | CTRL_IOC;
568 
569 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
570 		IFQ_DEQUEUE(&ifp->if_snd, m0);
571 
572 #if NBPFILTER > 0
573 		/* Pass the packet to any BPF listeners. */
574 		if (ifp->if_bpf)
575 			bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
576 #endif
577 		/* mbuf no longer needed */
578 		m_freem(m0);
579 
580 		/* Sync the data DMA map. */
581 		bus_dmamap_sync(sc->bce_dmatag, sc->bce_txdata_map,
582 		    sc->bce_txsnext * MCLBYTES, MCLBYTES, BUS_DMASYNC_PREWRITE);
583 
584 		/* Initialize the transmit descriptor(s). */
585 		txstart = sc->bce_txsnext;
586 
587 		if (sc->bce_txsnext == BCE_NTXDESC - 1)
588 			ctrl |= CTRL_EOT;
589 		sc->bce_tx_ring[sc->bce_txsnext].ctrl = htole32(ctrl);
590 		sc->bce_tx_ring[sc->bce_txsnext].addr =
591 		    htole32(sc->bce_txdata_map->dm_segs[0].ds_addr +
592 		    sc->bce_txsnext * MCLBYTES + 0x40000000);	/* MAGIC */
593 		if (sc->bce_txsnext + 1 > BCE_NTXDESC - 1)
594 			sc->bce_txsnext = 0;
595 		else
596 			sc->bce_txsnext++;
597 		txsfree--;
598 
599 		/* sync descriptors being used */
600 		bus_dmamap_sync(sc->bce_dmatag, sc->bce_ring_map,
601 		    sizeof(struct bce_dma_slot) * txstart + PAGE_SIZE,
602 		    sizeof(struct bce_dma_slot),
603 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
604 
605 		/* Give the packet to the chip. */
606 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_DPTR,
607 		    sc->bce_txsnext * sizeof(struct bce_dma_slot));
608 
609 		newpkts++;
610 	}
611 	if (txsfree == 0) {
612 		/* No more slots left; notify upper layer. */
613 		ifp->if_flags |= IFF_OACTIVE;
614 	}
615 	if (newpkts) {
616 		/* Set a watchdog timer in case the chip flakes out. */
617 		ifp->if_timer = 5;
618 	}
619 }
620 
621 /* Watchdog timer handler. */
622 void
623 bce_watchdog(struct ifnet *ifp)
624 {
625 	struct bce_softc *sc = ifp->if_softc;
626 
627 	printf("%s: device timeout\n", sc->bce_dev.dv_xname);
628 	ifp->if_oerrors++;
629 
630 	(void) bce_init(ifp);
631 
632 	/* Try to get more packets going. */
633 	bce_start(ifp);
634 }
635 
636 int
637 bce_intr(void *xsc)
638 {
639 	struct bce_softc *sc;
640 	struct ifnet *ifp;
641 	u_int32_t intstatus;
642 	int wantinit;
643 	int handled = 0;
644 
645 	sc = xsc;
646 	ifp = &sc->bce_ac.ac_if;
647 
648 
649 	for (wantinit = 0; wantinit == 0;) {
650 		intstatus = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
651 		    BCE_INT_STS);
652 
653 		/* ignore if not ours, or unsolicited interrupts */
654 		intstatus &= sc->bce_intmask;
655 		if (intstatus == 0)
656 			break;
657 
658 		handled = 1;
659 
660 		/* Ack interrupt */
661 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_STS,
662 		    intstatus);
663 
664 		/* Receive interrupts. */
665 		if (intstatus & I_RI)
666 			bce_rxintr(sc);
667 		/* Transmit interrupts. */
668 		if (intstatus & I_XI)
669 			bce_txintr(sc);
670 		/* Error interrupts */
671 		if (intstatus & ~(I_RI | I_XI)) {
672 			if (intstatus & I_XU)
673 				printf("%s: transmit fifo underflow\n",
674 				    sc->bce_dev.dv_xname);
675 			if (intstatus & I_RO) {
676 				printf("%s: receive fifo overflow\n",
677 				    sc->bce_dev.dv_xname);
678 				ifp->if_ierrors++;
679 			}
680 			if (intstatus & I_RU)
681 				printf("%s: receive descriptor underflow\n",
682 				    sc->bce_dev.dv_xname);
683 			if (intstatus & I_DE)
684 				printf("%s: descriptor protocol error\n",
685 				    sc->bce_dev.dv_xname);
686 			if (intstatus & I_PD)
687 				printf("%s: data error\n",
688 				    sc->bce_dev.dv_xname);
689 			if (intstatus & I_PC)
690 				printf("%s: descriptor error\n",
691 				    sc->bce_dev.dv_xname);
692 			if (intstatus & I_TO)
693 				printf("%s: general purpose timeout\n",
694 				    sc->bce_dev.dv_xname);
695 			wantinit = 1;
696 		}
697 	}
698 
699 	if (handled) {
700 		if (wantinit)
701 			bce_init(ifp);
702 		/* Try to get more packets going. */
703 		bce_start(ifp);
704 	}
705 	return (handled);
706 }
707 
708 /* Receive interrupt handler */
709 void
710 bce_rxintr(struct bce_softc *sc)
711 {
712 	struct ifnet *ifp = &sc->bce_ac.ac_if;
713 	struct rx_pph *pph;
714 	struct mbuf *m;
715 	int curr;
716 	int len;
717 	int i;
718 
719 	/* get pointer to active receive slot */
720 	curr = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXSTATUS)
721 	    & RS_CD_MASK;
722 	curr = curr / sizeof(struct bce_dma_slot);
723 	if (curr >= BCE_NRXDESC)
724 		curr = BCE_NRXDESC - 1;
725 
726 	/* process packets up to but not current packet being worked on */
727 	for (i = sc->bce_rxin; i != curr; i = (i + 1) % BCE_NRXDESC) {
728 		/* complete any post dma memory ops on packet */
729 		bus_dmamap_sync(sc->bce_dmatag, sc->bce_rxdata_map,
730 		    i * MCLBYTES, MCLBYTES, BUS_DMASYNC_POSTREAD);
731 
732 		/*
733 		 * If the packet had an error, simply recycle the buffer,
734 		 * resetting the len, and flags.
735 		 */
736 		pph = (struct rx_pph *)(sc->bce_data + i * MCLBYTES);
737 		if (pph->flags & (RXF_NO | RXF_RXER | RXF_CRC | RXF_OV)) {
738 			ifp->if_ierrors++;
739 			pph->len = 0;
740 			pph->flags = 0;
741 			continue;
742 		}
743 		/* receive the packet */
744 		len = pph->len;
745 		if (len == 0)
746 			continue;	/* no packet if empty */
747 		pph->len = 0;
748 		pph->flags = 0;
749 
750  		/*
751 		 * The chip includes the CRC with every packet.  Trim
752 		 * it off here.
753 		 */
754 		len -= ETHER_CRC_LEN;
755 
756 		m = m_devget(sc->bce_data + i * MCLBYTES +
757 		    BCE_PREPKT_HEADER_SIZE, len, ETHER_ALIGN, ifp, NULL);
758 		ifp->if_ipackets++;
759 
760 #if NBPFILTER > 0
761 		/*
762 		 * Pass this up to any BPF listeners, but only
763 		 * pass it up the stack if it's for us.
764 		 */
765 		if (ifp->if_bpf)
766 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
767 #endif
768 
769 		/* Pass it on. */
770 		ether_input_mbuf(ifp, m);
771 
772 		/* re-check current in case it changed */
773 		curr = (bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
774 		    BCE_DMA_RXSTATUS) & RS_CD_MASK) /
775 		    sizeof(struct bce_dma_slot);
776 		if (curr >= BCE_NRXDESC)
777 			curr = BCE_NRXDESC - 1;
778 	}
779 	sc->bce_rxin = curr;
780 }
781 
782 /* Transmit interrupt handler */
783 void
784 bce_txintr(struct bce_softc *sc)
785 {
786 	struct ifnet   *ifp = &sc->bce_ac.ac_if;
787 	int curr;
788 	int i;
789 
790 	ifp->if_flags &= ~IFF_OACTIVE;
791 
792 	/*
793 	 * Go through the Tx list and free mbufs for those
794 	 * frames which have been transmitted.
795 	 */
796 	curr = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
797 	    BCE_DMA_TXSTATUS) & RS_CD_MASK;
798 	curr = curr / sizeof(struct bce_dma_slot);
799 	if (curr >= BCE_NTXDESC)
800 		curr = BCE_NTXDESC - 1;
801 	for (i = sc->bce_txin; i != curr; i = (i + 1) % BCE_NTXDESC) {
802 		/* do any post dma memory ops on transmit data */
803 		bus_dmamap_sync(sc->bce_dmatag, sc->bce_txdata_map,
804 		    i * MCLBYTES, MCLBYTES, BUS_DMASYNC_POSTWRITE);
805 		ifp->if_opackets++;
806 	}
807 	sc->bce_txin = curr;
808 
809 	/*
810 	 * If there are no more pending transmissions, cancel the watchdog
811 	 * timer
812 	 */
813 	if (sc->bce_txsnext == sc->bce_txin)
814 		ifp->if_timer = 0;
815 }
816 
817 /* initialize the interface */
818 int
819 bce_init(struct ifnet *ifp)
820 {
821 	struct bce_softc *sc = ifp->if_softc;
822 	u_int32_t reg_win;
823 	int i;
824 
825 	/* Cancel any pending I/O. */
826 	bce_stop(ifp);
827 
828 	/* enable pci inerrupts, bursts, and prefetch */
829 
830 	/* remap the pci registers to the Sonics config registers */
831 
832 	/* save the current map, so it can be restored */
833 	reg_win = pci_conf_read(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag,
834 	    BCE_REG_WIN);
835 
836 	/* set register window to Sonics registers */
837 	pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN,
838 	    BCE_SONICS_WIN);
839 
840 	/* enable SB to PCI interrupt */
841 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC,
842 	    bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC) |
843 	    SBIV_ENET0);
844 
845 	/* enable prefetch and bursts for sonics-to-pci translation 2 */
846 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2,
847 	    bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2) |
848 	    SBTOPCI_PREF | SBTOPCI_BURST);
849 
850 	/* restore to ethernet register space */
851 	pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN,
852 	    reg_win);
853 
854 	/* Reset the chip to a known state. */
855 	bce_reset(sc);
856 
857 	/* Initialize transmit descriptors */
858 	memset(sc->bce_tx_ring, 0, BCE_NTXDESC * sizeof(struct bce_dma_slot));
859 	sc->bce_txsnext = 0;
860 	sc->bce_txin = 0;
861 
862 	/* enable crc32 generation and set proper LED modes */
863 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL,
864 	    bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL) |
865 	    BCE_EMC_CRC32_ENAB | BCE_EMC_LED);
866 
867 	/* reset or clear powerdown control bit  */
868 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL,
869 	    bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL) &
870 	    ~BCE_EMC_PDOWN);
871 
872 	/* setup DMA interrupt control */
873 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMAI_CTL, 1 << 24);	/* MAGIC */
874 
875 	/* setup packet filter */
876 	bce_set_filter(ifp);
877 
878 	/* set max frame length, account for possible VLAN tag */
879 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_MAX,
880 	    ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
881 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_MAX,
882 	    ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
883 
884 	/* set tx watermark */
885 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_WATER, 56);
886 
887 	/* enable transmit */
888 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL, XC_XE);
889 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXADDR,
890 	    sc->bce_ring_map->dm_segs[0].ds_addr + PAGE_SIZE + 0x40000000);	/* MAGIC */
891 
892 	/*
893 	 * Give the receive ring to the chip, and
894 	 * start the receive DMA engine.
895 	 */
896 	sc->bce_rxin = 0;
897 
898 	/* clear the rx descriptor ring */
899 	memset(sc->bce_rx_ring, 0, BCE_NRXDESC * sizeof(struct bce_dma_slot));
900 	/* enable receive */
901 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXCTL,
902 	    BCE_PREPKT_HEADER_SIZE << 1 | XC_XE);
903 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXADDR,
904 	    sc->bce_ring_map->dm_segs[0].ds_addr + 0x40000000);		/* MAGIC */
905 
906 	/* Initialize receive descriptors */
907 	for (i = 0; i < BCE_NRXDESC; i++)
908 		bce_add_rxbuf(sc, i);
909 
910 	/* Enable interrupts */
911 	sc->bce_intmask =
912 	    I_XI | I_RI | I_XU | I_RO | I_RU | I_DE | I_PD | I_PC | I_TO;
913 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_MASK,
914 	    sc->bce_intmask);
915 
916 	/* start the receive dma */
917 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXDPTR,
918 	    BCE_NRXDESC * sizeof(struct bce_dma_slot));
919 
920 	/* set media */
921 	mii_mediachg(&sc->bce_mii);
922 
923 	/* turn on the ethernet mac */
924 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL,
925 	    bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
926 	    BCE_ENET_CTL) | EC_EE);
927 
928 	/* start timer */
929 	timeout_add_sec(&sc->bce_timeout, 1);
930 
931 	/* mark as running, and no outputs active */
932 	ifp->if_flags |= IFF_RUNNING;
933 	ifp->if_flags &= ~IFF_OACTIVE;
934 
935 	return 0;
936 }
937 
938 /* add a mac address to packet filter */
939 void
940 bce_add_mac(struct bce_softc *sc, u_int8_t *mac, unsigned long idx)
941 {
942 	int i;
943 	u_int32_t rval;
944 
945 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_LOW,
946 	    mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]);
947 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_HI,
948 	    mac[0] << 8 | mac[1] | 0x10000);	/* MAGIC */
949 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL,
950 	    idx << 16 | 8);	/* MAGIC */
951 	/* wait for write to complete */
952 	for (i = 0; i < 100; i++) {
953 		rval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
954 		    BCE_FILT_CTL);
955 		if (!(rval & 0x80000000))	/* MAGIC */
956 			break;
957 		delay(10);
958 	}
959 	if (i == 100) {
960 		printf("%s: timed out writing pkt filter ctl\n",
961 		   sc->bce_dev.dv_xname);
962 	}
963 }
964 
965 /* Add a receive buffer to the indiciated descriptor. */
966 void
967 bce_add_rxbuf(struct bce_softc *sc, int idx)
968 {
969 	struct bce_dma_slot *bced = &sc->bce_rx_ring[idx];
970 
971 	bus_dmamap_sync(sc->bce_dmatag, sc->bce_rxdata_map, idx * MCLBYTES,
972 	    MCLBYTES, BUS_DMASYNC_PREREAD);
973 
974 	*(u_int32_t *)(sc->bce_data + idx * MCLBYTES) = 0;
975 	bced->addr = htole32(sc->bce_rxdata_map->dm_segs[0].ds_addr +
976 	    idx * MCLBYTES + 0x40000000);
977 	if (idx != (BCE_NRXDESC - 1))
978 		bced->ctrl = htole32(BCE_RXBUF_LEN);
979 	else
980 		bced->ctrl = htole32(BCE_RXBUF_LEN | CTRL_EOT);
981 
982 	bus_dmamap_sync(sc->bce_dmatag, sc->bce_ring_map,
983 	    sizeof(struct bce_dma_slot) * idx,
984 	    sizeof(struct bce_dma_slot),
985 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
986 
987 }
988 
989 /* Stop transmission on the interface */
990 void
991 bce_stop(struct ifnet *ifp)
992 {
993 	struct bce_softc *sc = ifp->if_softc;
994 	int i;
995 	u_int32_t val;
996 
997 	/* Stop the 1 second timer */
998 	timeout_del(&sc->bce_timeout);
999 
1000 	/* Mark the interface down and cancel the watchdog timer. */
1001 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1002 	ifp->if_timer = 0;
1003 
1004 	/* Down the MII. */
1005 	mii_down(&sc->bce_mii);
1006 
1007 	/* Disable interrupts. */
1008 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_MASK, 0);
1009 	sc->bce_intmask = 0;
1010 	delay(10);
1011 
1012 	/* Disable emac */
1013 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL, EC_ED);
1014 	for (i = 0; i < 200; i++) {
1015 		val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1016 		    BCE_ENET_CTL);
1017 		if (!(val & EC_ED))
1018 			break;
1019 		delay(10);
1020 	}
1021 
1022 	/* Stop the DMA */
1023 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXCTL, 0);
1024 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL, 0);
1025 	delay(10);
1026 }
1027 
1028 /* reset the chip */
1029 void
1030 bce_reset(struct bce_softc *sc)
1031 {
1032 	u_int32_t val;
1033 	u_int32_t sbval;
1034 	int i;
1035 
1036 	/* if SB core is up */
1037 	sbval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1038 	    BCE_SBTMSTATELOW);
1039 	if ((sbval & (SBTML_RESET | SBTML_REJ | SBTML_CLK)) == SBTML_CLK) {
1040 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMAI_CTL,
1041 		    0);
1042 
1043 		/* disable emac */
1044 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL,
1045 		    EC_ED);
1046 		for (i = 0; i < 200; i++) {
1047 			val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1048 			    BCE_ENET_CTL);
1049 			if (!(val & EC_ED))
1050 				break;
1051 			delay(10);
1052 		}
1053 		if (i == 200)
1054 			printf("%s: timed out disabling ethernet mac\n",
1055 			    sc->bce_dev.dv_xname);
1056 
1057 		/* reset the dma engines */
1058 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL,
1059 		    0);
1060 		val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1061 		    BCE_DMA_RXSTATUS);
1062 		/* if error on receive, wait to go idle */
1063 		if (val & RS_ERROR) {
1064 			for (i = 0; i < 100; i++) {
1065 				val = bus_space_read_4(sc->bce_btag,
1066 				    sc->bce_bhandle, BCE_DMA_RXSTATUS);
1067 				if (val & RS_DMA_IDLE)
1068 					break;
1069 				delay(10);
1070 			}
1071 			if (i == 100)
1072 				printf("%s: receive dma did not go idle after"
1073 				    " error\n", sc->bce_dev.dv_xname);
1074 		}
1075 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle,
1076 		   BCE_DMA_RXSTATUS, 0);
1077 
1078 		/* reset ethernet mac */
1079 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL,
1080 		    EC_ES);
1081 		for (i = 0; i < 200; i++) {
1082 			val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1083 			    BCE_ENET_CTL);
1084 			if (!(val & EC_ES))
1085 				break;
1086 			delay(10);
1087 		}
1088 		if (i == 200)
1089 			printf("%s: timed out resetting ethernet mac\n",
1090 			    sc->bce_dev.dv_xname);
1091 	} else {
1092 		u_int32_t reg_win;
1093 
1094 		/* remap the pci registers to the Sonics config registers */
1095 
1096 		/* save the current map, so it can be restored */
1097 		reg_win = pci_conf_read(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag,
1098 		    BCE_REG_WIN);
1099 		/* set register window to Sonics registers */
1100 		pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag,
1101 		    BCE_REG_WIN, BCE_SONICS_WIN);
1102 
1103 		/* enable SB to PCI interrupt */
1104 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC,
1105 		    bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1106 		    BCE_SBINTVEC) | SBIV_ENET0);
1107 
1108 		/* enable prefetch and bursts for sonics-to-pci translation 2 */
1109 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2,
1110 		    bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1111 		    BCE_SPCI_TR2) | SBTOPCI_PREF | SBTOPCI_BURST);
1112 
1113 		/* restore to ethernet register space */
1114 		pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN,
1115 		    reg_win);
1116 	}
1117 
1118 	/* disable SB core if not in reset */
1119 	if (!(sbval & SBTML_RESET)) {
1120 
1121 		/* set the reject bit */
1122 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle,
1123 		    BCE_SBTMSTATELOW, SBTML_REJ | SBTML_CLK);
1124 		for (i = 0; i < 200; i++) {
1125 			val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1126 			    BCE_SBTMSTATELOW);
1127 			if (val & SBTML_REJ)
1128 				break;
1129 			delay(1);
1130 		}
1131 		if (i == 200)
1132 			printf("%s: while resetting core, reject did not set\n",
1133 			    sc->bce_dev.dv_xname);
1134 		/* wait until busy is clear */
1135 		for (i = 0; i < 200; i++) {
1136 			val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1137 			    BCE_SBTMSTATEHI);
1138 			if (!(val & 0x4))
1139 				break;
1140 			delay(1);
1141 		}
1142 		if (i == 200)
1143 			printf("%s: while resetting core, busy did not clear\n",
1144 			    sc->bce_dev.dv_xname);
1145 		/* set reset and reject while enabling the clocks */
1146 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle,
1147 		    BCE_SBTMSTATELOW,
1148 		    SBTML_FGC | SBTML_CLK | SBTML_REJ | SBTML_RESET);
1149 		val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1150 		    BCE_SBTMSTATELOW);
1151 		delay(10);
1152 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle,
1153 		    BCE_SBTMSTATELOW, SBTML_REJ | SBTML_RESET);
1154 		delay(1);
1155 	}
1156 	/* enable clock */
1157 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW,
1158 	    SBTML_FGC | SBTML_CLK | SBTML_RESET);
1159 	val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW);
1160 	delay(1);
1161 
1162 	/* clear any error bits that may be on */
1163 	val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATEHI);
1164 	if (val & 1)
1165 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATEHI,
1166 		    0);
1167 	val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBIMSTATE);
1168 	if (val & SBIM_ERRORBITS)
1169 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBIMSTATE,
1170 		    val & ~SBIM_ERRORBITS);
1171 
1172 	/* clear reset and allow it to propagate throughout the core */
1173 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW,
1174 	    SBTML_FGC | SBTML_CLK);
1175 	val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW);
1176 	delay(1);
1177 
1178 	/* leave clock enabled */
1179 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW,
1180 	    SBTML_CLK);
1181 	val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW);
1182 	delay(1);
1183 
1184 	/* initialize MDC preamble, frequency */
1185 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_CTL, 0x8d);	/* MAGIC */
1186 
1187 	/* enable phy, differs for internal, and external */
1188 	val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_DEVCTL);
1189 	if (!(val & BCE_DC_IP)) {
1190 		/* select external phy */
1191 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL,
1192 		    EC_EP);
1193 	} else if (val & BCE_DC_ER) {	/* internal, clear reset bit if on */
1194 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DEVCTL,
1195 		    val & ~BCE_DC_ER);
1196 		delay(100);
1197 	}
1198 }
1199 
1200 /* Set up the receive filter. */
1201 void
1202 bce_set_filter(struct ifnet *ifp)
1203 {
1204 	struct bce_softc *sc = ifp->if_softc;
1205 
1206 	if (ifp->if_flags & IFF_PROMISC) {
1207 		ifp->if_flags |= IFF_ALLMULTI;
1208 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL,
1209 		    bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL)
1210 		    | ERC_PE);
1211 	} else {
1212 		ifp->if_flags &= ~IFF_ALLMULTI;
1213 
1214 		/* turn off promiscuous */
1215 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL,
1216 		    bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1217 		    BCE_RX_CTL) & ~ERC_PE);
1218 
1219 		/* enable/disable broadcast */
1220 		if (ifp->if_flags & IFF_BROADCAST)
1221 			bus_space_write_4(sc->bce_btag, sc->bce_bhandle,
1222 			    BCE_RX_CTL, bus_space_read_4(sc->bce_btag,
1223 			    sc->bce_bhandle, BCE_RX_CTL) & ~ERC_DB);
1224 		else
1225 			bus_space_write_4(sc->bce_btag, sc->bce_bhandle,
1226 			    BCE_RX_CTL, bus_space_read_4(sc->bce_btag,
1227 			    sc->bce_bhandle, BCE_RX_CTL) | ERC_DB);
1228 
1229 		/* disable the filter */
1230 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL,
1231 		    0);
1232 
1233 		/* add our own address */
1234 		bce_add_mac(sc, sc->bce_ac.ac_enaddr, 0);
1235 
1236 		/* for now accept all multicast */
1237 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL,
1238 		bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL) |
1239 		    ERC_AM);
1240 		ifp->if_flags |= IFF_ALLMULTI;
1241 
1242 		/* enable the filter */
1243 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL,
1244 		    bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1245 		    BCE_FILT_CTL) | 1);
1246 	}
1247 }
1248 
1249 /* Read a PHY register on the MII. */
1250 int
1251 bce_mii_read(struct device *self, int phy, int reg)
1252 {
1253 	struct bce_softc *sc = (struct bce_softc *) self;
1254 	int i;
1255 	u_int32_t val;
1256 
1257 	/* clear mii_int */
1258 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_STS,
1259 	    BCE_MIINTR);
1260 
1261 	/* Read the PHY register */
1262 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM,
1263 	    (MII_COMMAND_READ << 28) | (MII_COMMAND_START << 30) |	/* MAGIC */
1264 	    (MII_COMMAND_ACK << 16) | BCE_MIPHY(phy) | BCE_MIREG(reg));	/* MAGIC */
1265 
1266 	for (i = 0; i < BCE_TIMEOUT; i++) {
1267 		val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1268 		    BCE_MI_STS);
1269 		if (val & BCE_MIINTR)
1270 			break;
1271 		delay(10);
1272 	}
1273 	val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM);
1274 	if (i == BCE_TIMEOUT) {
1275 		printf("%s: PHY read timed out reading phy %d, reg %d, val = "
1276 		    "0x%08x\n", sc->bce_dev.dv_xname, phy, reg, val);
1277 		return (0);
1278 	}
1279 	return (val & BCE_MICOMM_DATA);
1280 }
1281 
1282 /* Write a PHY register on the MII */
1283 void
1284 bce_mii_write(struct device *self, int phy, int reg, int val)
1285 {
1286 	struct bce_softc *sc = (struct bce_softc *) self;
1287 	int i;
1288 	u_int32_t rval;
1289 
1290 	/* clear mii_int */
1291 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_STS,
1292 	    BCE_MIINTR);
1293 
1294 	/* Write the PHY register */
1295 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM,
1296 	    (MII_COMMAND_WRITE << 28) | (MII_COMMAND_START << 30) |	/* MAGIC */
1297 	    (MII_COMMAND_ACK << 16) | (val & BCE_MICOMM_DATA) |	/* MAGIC */
1298 	    BCE_MIPHY(phy) | BCE_MIREG(reg));
1299 
1300 	/* wait for write to complete */
1301 	for (i = 0; i < BCE_TIMEOUT; i++) {
1302 		rval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1303 		    BCE_MI_STS);
1304 		if (rval & BCE_MIINTR)
1305 			break;
1306 		delay(10);
1307 	}
1308 	rval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM);
1309 	if (i == BCE_TIMEOUT) {
1310 		printf("%s: PHY timed out writing phy %d, reg %d, val "
1311 		    "= 0x%08x\n", sc->bce_dev.dv_xname, phy, reg, val);
1312 	}
1313 }
1314 
1315 /* sync hardware duplex mode to software state */
1316 void
1317 bce_statchg(struct device *self)
1318 {
1319 	struct bce_softc *sc = (struct bce_softc *) self;
1320 	u_int32_t reg;
1321 
1322 	/* if needed, change register to match duplex mode */
1323 	reg = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_CTL);
1324 	if (sc->bce_mii.mii_media_active & IFM_FDX && !(reg & EXC_FD))
1325 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_CTL,
1326 		    reg | EXC_FD);
1327 	else if (!(sc->bce_mii.mii_media_active & IFM_FDX) && reg & EXC_FD)
1328 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_CTL,
1329 		    reg & ~EXC_FD);
1330 
1331 	/*
1332 	 * Enable activity led.
1333 	 * XXX This should be in a phy driver, but not currently.
1334 	 */
1335 	bce_mii_write((struct device *) sc, 1, 26,	/* MAGIC */
1336 	    bce_mii_read((struct device *) sc, 1, 26) & 0x7fff);	/* MAGIC */
1337 	/* enable traffic meter led mode */
1338 	bce_mii_write((struct device *) sc, 1, 26,	/* MAGIC */
1339 	    bce_mii_read((struct device *) sc, 1, 27) | (1 << 6));	/* MAGIC */
1340 }
1341 
1342 /* Set hardware to newly-selected media */
1343 int
1344 bce_mediachange(struct ifnet *ifp)
1345 {
1346 	struct bce_softc *sc = ifp->if_softc;
1347 
1348 	if (ifp->if_flags & IFF_UP)
1349 		mii_mediachg(&sc->bce_mii);
1350 	return (0);
1351 }
1352 
1353 /* Get the current interface media status */
1354 void
1355 bce_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1356 {
1357 	struct bce_softc *sc = ifp->if_softc;
1358 
1359 	mii_pollstat(&sc->bce_mii);
1360 	ifmr->ifm_active = sc->bce_mii.mii_media_active;
1361 	ifmr->ifm_status = sc->bce_mii.mii_media_status;
1362 }
1363 
1364 /* One second timer, checks link status */
1365 void
1366 bce_tick(void *v)
1367 {
1368 	struct bce_softc *sc = v;
1369 	int s;
1370 
1371 	s = splnet();
1372 	mii_tick(&sc->bce_mii);
1373 	splx(s);
1374 
1375 	timeout_add_sec(&sc->bce_timeout, 1);
1376 }
1377