xref: /openbsd-src/sys/dev/ic/hme.c (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /*	$OpenBSD: hme.c,v 1.80 2016/04/13 10:49:26 mpi Exp $	*/
2 /*	$NetBSD: hme.c,v 1.21 2001/07/07 15:59:37 thorpej Exp $	*/
3 
4 /*-
5  * Copyright (c) 1999 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Paul Kranenburg.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * HME Ethernet module driver.
35  */
36 
37 #include "bpfilter.h"
38 
39 #undef HMEDEBUG
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/mbuf.h>
45 #include <sys/syslog.h>
46 #include <sys/socket.h>
47 #include <sys/device.h>
48 #include <sys/malloc.h>
49 #include <sys/ioctl.h>
50 #include <sys/errno.h>
51 
52 #include <net/if.h>
53 #include <net/if_media.h>
54 
55 #include <netinet/in.h>
56 #include <netinet/if_ether.h>
57 
58 #if NBPFILTER > 0
59 #include <net/bpf.h>
60 #endif
61 
62 #include <dev/mii/mii.h>
63 #include <dev/mii/miivar.h>
64 
65 #include <machine/bus.h>
66 
67 #include <dev/ic/hmereg.h>
68 #include <dev/ic/hmevar.h>
69 
70 struct cfdriver hme_cd = {
71 	NULL, "hme", DV_IFNET
72 };
73 
74 #define	HME_RX_OFFSET	2
75 
76 void		hme_start(struct ifnet *);
77 void		hme_stop(struct hme_softc *, int);
78 int		hme_ioctl(struct ifnet *, u_long, caddr_t);
79 void		hme_tick(void *);
80 void		hme_watchdog(struct ifnet *);
81 void		hme_init(struct hme_softc *);
82 void		hme_meminit(struct hme_softc *);
83 void		hme_mifinit(struct hme_softc *);
84 void		hme_reset(struct hme_softc *);
85 void		hme_iff(struct hme_softc *);
86 void		hme_fill_rx_ring(struct hme_softc *);
87 int		hme_newbuf(struct hme_softc *, struct hme_sxd *);
88 
89 /* MII methods & callbacks */
90 static int	hme_mii_readreg(struct device *, int, int);
91 static void	hme_mii_writereg(struct device *, int, int, int);
92 static void	hme_mii_statchg(struct device *);
93 
94 int		hme_mediachange(struct ifnet *);
95 void		hme_mediastatus(struct ifnet *, struct ifmediareq *);
96 
97 int		hme_eint(struct hme_softc *, u_int);
98 int		hme_rint(struct hme_softc *);
99 int		hme_tint(struct hme_softc *);
100 
101 void
102 hme_config(struct hme_softc *sc)
103 {
104 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
105 	struct mii_data *mii = &sc->sc_mii;
106 	struct mii_softc *child;
107 	bus_dma_tag_t dmatag = sc->sc_dmatag;
108 	bus_dma_segment_t seg;
109 	bus_size_t size;
110 	int rseg, error, i;
111 
112 	/*
113 	 * HME common initialization.
114 	 *
115 	 * hme_softc fields that must be initialized by the front-end:
116 	 *
117 	 * the bus tag:
118 	 *	sc_bustag
119 	 *
120 	 * the dma bus tag:
121 	 *	sc_dmatag
122 	 *
123 	 * the bus handles:
124 	 *	sc_seb		(Shared Ethernet Block registers)
125 	 *	sc_erx		(Receiver Unit registers)
126 	 *	sc_etx		(Transmitter Unit registers)
127 	 *	sc_mac		(MAC registers)
128 	 *	sc_mif		(Management Interface registers)
129 	 *
130 	 * the maximum bus burst size:
131 	 *	sc_burst
132 	 *
133 	 * the local Ethernet address:
134 	 *	sc_arpcom.ac_enaddr
135 	 *
136 	 */
137 
138 	/* Make sure the chip is stopped. */
139 	hme_stop(sc, 0);
140 
141 	for (i = 0; i < HME_TX_RING_SIZE; i++) {
142 		if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, HME_TX_NSEGS,
143 		    MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
144 		    &sc->sc_txd[i].sd_map) != 0) {
145 			sc->sc_txd[i].sd_map = NULL;
146 			goto fail;
147 		}
148 	}
149 	for (i = 0; i < HME_RX_RING_SIZE; i++) {
150 		if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1,
151 		    MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
152 		    &sc->sc_rxd[i].sd_map) != 0) {
153 			sc->sc_rxd[i].sd_map = NULL;
154 			goto fail;
155 		}
156 	}
157 	if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1, MCLBYTES, 0,
158 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_rxmap_spare) != 0) {
159 		sc->sc_rxmap_spare = NULL;
160 		goto fail;
161 	}
162 
163 	/*
164 	 * Allocate DMA capable memory
165 	 * Buffer descriptors must be aligned on a 2048 byte boundary;
166 	 * take this into account when calculating the size. Note that
167 	 * the maximum number of descriptors (256) occupies 2048 bytes,
168 	 * so we allocate that much regardless of the number of descriptors.
169 	 */
170 	size = (HME_XD_SIZE * HME_RX_RING_MAX) +	/* RX descriptors */
171 	    (HME_XD_SIZE * HME_TX_RING_MAX);		/* TX descriptors */
172 
173 	/* Allocate DMA buffer */
174 	if ((error = bus_dmamem_alloc(dmatag, size, 2048, 0, &seg, 1, &rseg,
175 	    BUS_DMA_NOWAIT)) != 0) {
176 		printf("\n%s: DMA buffer alloc error %d\n",
177 		    sc->sc_dev.dv_xname, error);
178 		return;
179 	}
180 
181 	/* Map DMA memory in CPU addressable space */
182 	if ((error = bus_dmamem_map(dmatag, &seg, rseg, size,
183 	    &sc->sc_rb.rb_membase, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
184 		printf("\n%s: DMA buffer map error %d\n",
185 		    sc->sc_dev.dv_xname, error);
186 		bus_dmamap_unload(dmatag, sc->sc_dmamap);
187 		bus_dmamem_free(dmatag, &seg, rseg);
188 		return;
189 	}
190 
191 	if ((error = bus_dmamap_create(dmatag, size, 1, size, 0,
192 	    BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
193 		printf("\n%s: DMA map create error %d\n",
194 		    sc->sc_dev.dv_xname, error);
195 		return;
196 	}
197 
198 	/* Load the buffer */
199 	if ((error = bus_dmamap_load(dmatag, sc->sc_dmamap,
200 	    sc->sc_rb.rb_membase, size, NULL,
201 	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
202 		printf("\n%s: DMA buffer map load error %d\n",
203 		    sc->sc_dev.dv_xname, error);
204 		bus_dmamem_free(dmatag, &seg, rseg);
205 		return;
206 	}
207 	sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr;
208 
209 	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
210 
211 	/* Initialize ifnet structure. */
212 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof ifp->if_xname);
213 	ifp->if_softc = sc;
214 	ifp->if_start = hme_start;
215 	ifp->if_ioctl = hme_ioctl;
216 	ifp->if_watchdog = hme_watchdog;
217 	ifp->if_flags =
218 	    IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
219 	ifp->if_capabilities = IFCAP_VLAN_MTU;
220 
221 	/* Initialize ifmedia structures and MII info */
222 	mii->mii_ifp = ifp;
223 	mii->mii_readreg = hme_mii_readreg;
224 	mii->mii_writereg = hme_mii_writereg;
225 	mii->mii_statchg = hme_mii_statchg;
226 
227 	ifmedia_init(&mii->mii_media, IFM_IMASK,
228 	    hme_mediachange, hme_mediastatus);
229 
230 	hme_mifinit(sc);
231 
232 	if (sc->sc_tcvr == -1)
233 		mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
234 		    MII_OFFSET_ANY, 0);
235 	else
236 		mii_attach(&sc->sc_dev, mii, 0xffffffff, sc->sc_tcvr,
237 		    MII_OFFSET_ANY, 0);
238 
239 	child = LIST_FIRST(&mii->mii_phys);
240 	if (child == NULL) {
241 		/* No PHY attached */
242 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
243 		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
244 	} else {
245 		/*
246 		 * Walk along the list of attached MII devices and
247 		 * establish an `MII instance' to `phy number'
248 		 * mapping. We'll use this mapping in media change
249 		 * requests to determine which phy to use to program
250 		 * the MIF configuration register.
251 		 */
252 		for (; child != NULL; child = LIST_NEXT(child, mii_list)) {
253 			/*
254 			 * Note: we support just two PHYs: the built-in
255 			 * internal device and an external on the MII
256 			 * connector.
257 			 */
258 			if (child->mii_phy > 1 || child->mii_inst > 1) {
259 				printf("%s: cannot accommodate MII device %s"
260 				    " at phy %d, instance %lld\n",
261 				    sc->sc_dev.dv_xname,
262 				    child->mii_dev.dv_xname,
263 				    child->mii_phy, child->mii_inst);
264 				continue;
265 			}
266 
267 			sc->sc_phys[child->mii_inst] = child->mii_phy;
268 		}
269 
270 		/*
271 		 * XXX - we can really do the following ONLY if the
272 		 * phy indeed has the auto negotiation capability!!
273 		 */
274 		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
275 	}
276 
277 	/* Attach the interface. */
278 	if_attach(ifp);
279 	ether_ifattach(ifp);
280 
281 	timeout_set(&sc->sc_tick_ch, hme_tick, sc);
282 	return;
283 
284 fail:
285 	if (sc->sc_rxmap_spare != NULL)
286 		bus_dmamap_destroy(sc->sc_dmatag, sc->sc_rxmap_spare);
287 	for (i = 0; i < HME_TX_RING_SIZE; i++)
288 		if (sc->sc_txd[i].sd_map != NULL)
289 			bus_dmamap_destroy(sc->sc_dmatag, sc->sc_txd[i].sd_map);
290 	for (i = 0; i < HME_RX_RING_SIZE; i++)
291 		if (sc->sc_rxd[i].sd_map != NULL)
292 			bus_dmamap_destroy(sc->sc_dmatag, sc->sc_rxd[i].sd_map);
293 }
294 
295 void
296 hme_unconfig(struct hme_softc *sc)
297 {
298 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
299 	int i;
300 
301 	hme_stop(sc, 1);
302 
303 	bus_dmamap_destroy(sc->sc_dmatag, sc->sc_rxmap_spare);
304 	for (i = 0; i < HME_TX_RING_SIZE; i++)
305 		if (sc->sc_txd[i].sd_map != NULL)
306 			bus_dmamap_destroy(sc->sc_dmatag, sc->sc_txd[i].sd_map);
307 	for (i = 0; i < HME_RX_RING_SIZE; i++)
308 		if (sc->sc_rxd[i].sd_map != NULL)
309 			bus_dmamap_destroy(sc->sc_dmatag, sc->sc_rxd[i].sd_map);
310 
311 	/* Detach all PHYs */
312 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
313 
314 	/* Delete all remaining media. */
315 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
316 
317 	ether_ifdetach(ifp);
318 	if_detach(ifp);
319 }
320 
321 void
322 hme_tick(void *arg)
323 {
324 	struct hme_softc *sc = arg;
325 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
326 	bus_space_tag_t t = sc->sc_bustag;
327 	bus_space_handle_t mac = sc->sc_mac;
328 	int s;
329 
330 	s = splnet();
331 	/*
332 	 * Unload collision counters
333 	 */
334 	ifp->if_collisions +=
335 	    bus_space_read_4(t, mac, HME_MACI_NCCNT) +
336 	    bus_space_read_4(t, mac, HME_MACI_FCCNT) +
337 	    bus_space_read_4(t, mac, HME_MACI_EXCNT) +
338 	    bus_space_read_4(t, mac, HME_MACI_LTCNT);
339 
340 	/*
341 	 * then clear the hardware counters.
342 	 */
343 	bus_space_write_4(t, mac, HME_MACI_NCCNT, 0);
344 	bus_space_write_4(t, mac, HME_MACI_FCCNT, 0);
345 	bus_space_write_4(t, mac, HME_MACI_EXCNT, 0);
346 	bus_space_write_4(t, mac, HME_MACI_LTCNT, 0);
347 
348 	/*
349 	 * If buffer allocation fails, the receive ring may become
350 	 * empty. There is no receive interrupt to recover from that.
351 	 */
352 	if (if_rxr_inuse(&sc->sc_rx_ring) == 0)
353 		hme_fill_rx_ring(sc);
354 
355 	mii_tick(&sc->sc_mii);
356 	splx(s);
357 
358 	timeout_add_sec(&sc->sc_tick_ch, 1);
359 }
360 
361 void
362 hme_reset(struct hme_softc *sc)
363 {
364 	int s;
365 
366 	s = splnet();
367 	hme_init(sc);
368 	splx(s);
369 }
370 
371 void
372 hme_stop(struct hme_softc *sc, int softonly)
373 {
374 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
375 	bus_space_tag_t t = sc->sc_bustag;
376 	bus_space_handle_t seb = sc->sc_seb;
377 	int n;
378 
379 	timeout_del(&sc->sc_tick_ch);
380 
381 	/*
382 	 * Mark the interface down and cancel the watchdog timer.
383 	 */
384 	ifp->if_flags &= ~IFF_RUNNING;
385 	ifq_clr_oactive(&ifp->if_snd);
386 	ifp->if_timer = 0;
387 
388 	if (!softonly) {
389 		mii_down(&sc->sc_mii);
390 
391 		/* Mask all interrupts */
392 		bus_space_write_4(t, seb, HME_SEBI_IMASK, 0xffffffff);
393 
394 		/* Reset transmitter and receiver */
395 		bus_space_write_4(t, seb, HME_SEBI_RESET,
396 		    (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX));
397 
398 		for (n = 0; n < 20; n++) {
399 			u_int32_t v = bus_space_read_4(t, seb, HME_SEBI_RESET);
400 			if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
401 				break;
402 			DELAY(20);
403 		}
404 		if (n >= 20)
405 			printf("%s: hme_stop: reset failed\n", sc->sc_dev.dv_xname);
406 	}
407 
408 	for (n = 0; n < HME_TX_RING_SIZE; n++) {
409 		if (sc->sc_txd[n].sd_mbuf != NULL) {
410 			bus_dmamap_sync(sc->sc_dmatag, sc->sc_txd[n].sd_map,
411 			    0, sc->sc_txd[n].sd_map->dm_mapsize,
412 			    BUS_DMASYNC_POSTWRITE);
413 			bus_dmamap_unload(sc->sc_dmatag, sc->sc_txd[n].sd_map);
414 			m_freem(sc->sc_txd[n].sd_mbuf);
415 			sc->sc_txd[n].sd_mbuf = NULL;
416 		}
417 	}
418 	sc->sc_tx_prod = sc->sc_tx_cons = sc->sc_tx_cnt = 0;
419 
420 	for (n = 0; n < HME_RX_RING_SIZE; n++) {
421 		if (sc->sc_rxd[n].sd_mbuf != NULL) {
422 			bus_dmamap_sync(sc->sc_dmatag, sc->sc_rxd[n].sd_map,
423 			    0, sc->sc_rxd[n].sd_map->dm_mapsize,
424 			    BUS_DMASYNC_POSTREAD);
425 			bus_dmamap_unload(sc->sc_dmatag, sc->sc_rxd[n].sd_map);
426 			m_freem(sc->sc_rxd[n].sd_mbuf);
427 			sc->sc_rxd[n].sd_mbuf = NULL;
428 		}
429 	}
430 	sc->sc_rx_prod = sc->sc_rx_cons = 0;
431 }
432 
433 void
434 hme_meminit(struct hme_softc *sc)
435 {
436 	bus_addr_t dma;
437 	caddr_t p;
438 	unsigned int i;
439 	struct hme_ring *hr = &sc->sc_rb;
440 
441 	p = hr->rb_membase;
442 	dma = hr->rb_dmabase;
443 
444 	/*
445 	 * Allocate transmit descriptors
446 	 */
447 	hr->rb_txd = p;
448 	hr->rb_txddma = dma;
449 	p += HME_TX_RING_SIZE * HME_XD_SIZE;
450 	dma += HME_TX_RING_SIZE * HME_XD_SIZE;
451 	/* We have reserved descriptor space until the next 2048 byte boundary.*/
452 	dma = (bus_addr_t)roundup((u_long)dma, 2048);
453 	p = (caddr_t)roundup((u_long)p, 2048);
454 
455 	/*
456 	 * Allocate receive descriptors
457 	 */
458 	hr->rb_rxd = p;
459 	hr->rb_rxddma = dma;
460 	p += HME_RX_RING_SIZE * HME_XD_SIZE;
461 	dma += HME_RX_RING_SIZE * HME_XD_SIZE;
462 	/* Again move forward to the next 2048 byte boundary.*/
463 	dma = (bus_addr_t)roundup((u_long)dma, 2048);
464 	p = (caddr_t)roundup((u_long)p, 2048);
465 
466 	/*
467 	 * Initialize transmit descriptors
468 	 */
469 	for (i = 0; i < HME_TX_RING_SIZE; i++) {
470 		HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, 0);
471 		HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0);
472 		sc->sc_txd[i].sd_mbuf = NULL;
473 	}
474 
475 	/*
476 	 * Initialize receive descriptors
477 	 */
478 	for (i = 0; i < HME_RX_RING_SIZE; i++) {
479 		HME_XD_SETADDR(sc->sc_pci, hr->rb_rxd, i, 0);
480 		HME_XD_SETFLAGS(sc->sc_pci, hr->rb_rxd, i, 0);
481 		sc->sc_rxd[i].sd_mbuf = NULL;
482 	}
483 
484 	if_rxr_init(&sc->sc_rx_ring, 2, HME_RX_RING_SIZE);
485 	hme_fill_rx_ring(sc);
486 }
487 
488 /*
489  * Initialization of interface; set up initialization block
490  * and transmit/receive descriptor rings.
491  */
492 void
493 hme_init(struct hme_softc *sc)
494 {
495 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
496 	bus_space_tag_t t = sc->sc_bustag;
497 	bus_space_handle_t seb = sc->sc_seb;
498 	bus_space_handle_t etx = sc->sc_etx;
499 	bus_space_handle_t erx = sc->sc_erx;
500 	bus_space_handle_t mac = sc->sc_mac;
501 	u_int8_t *ea;
502 	u_int32_t v;
503 
504 	/*
505 	 * Initialization sequence. The numbered steps below correspond
506 	 * to the sequence outlined in section 6.3.5.1 in the Ethernet
507 	 * Channel Engine manual (part of the PCIO manual).
508 	 * See also the STP2002-STQ document from Sun Microsystems.
509 	 */
510 
511 	/* step 1 & 2. Reset the Ethernet Channel */
512 	hme_stop(sc, 0);
513 
514 	/* Re-initialize the MIF */
515 	hme_mifinit(sc);
516 
517 	/* step 3. Setup data structures in host memory */
518 	hme_meminit(sc);
519 
520 	/* step 4. TX MAC registers & counters */
521 	bus_space_write_4(t, mac, HME_MACI_NCCNT, 0);
522 	bus_space_write_4(t, mac, HME_MACI_FCCNT, 0);
523 	bus_space_write_4(t, mac, HME_MACI_EXCNT, 0);
524 	bus_space_write_4(t, mac, HME_MACI_LTCNT, 0);
525 	bus_space_write_4(t, mac, HME_MACI_TXSIZE, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
526 
527 	/* Load station MAC address */
528 	ea = sc->sc_arpcom.ac_enaddr;
529 	bus_space_write_4(t, mac, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
530 	bus_space_write_4(t, mac, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
531 	bus_space_write_4(t, mac, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
532 
533 	/*
534 	 * Init seed for backoff
535 	 * (source suggested by manual: low 10 bits of MAC address)
536 	 */
537 	v = ((ea[4] << 8) | ea[5]) & 0x3fff;
538 	bus_space_write_4(t, mac, HME_MACI_RANDSEED, v);
539 
540 
541 	/* Note: Accepting power-on default for other MAC registers here.. */
542 
543 
544 	/* step 5. RX MAC registers & counters */
545 	hme_iff(sc);
546 
547 	/* step 6 & 7. Program Descriptor Ring Base Addresses */
548 	bus_space_write_4(t, etx, HME_ETXI_RING, sc->sc_rb.rb_txddma);
549 	bus_space_write_4(t, etx, HME_ETXI_RSIZE, HME_TX_RING_SIZE);
550 
551 	bus_space_write_4(t, erx, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
552 	bus_space_write_4(t, mac, HME_MACI_RXSIZE, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
553 
554 	/* step 8. Global Configuration & Interrupt Mask */
555 	bus_space_write_4(t, seb, HME_SEBI_IMASK,
556 	    ~(HME_SEB_STAT_HOSTTOTX | HME_SEB_STAT_RXTOHOST |
557 	      HME_SEB_STAT_TXALL | HME_SEB_STAT_TXPERR |
558 	      HME_SEB_STAT_RCNTEXP | HME_SEB_STAT_ALL_ERRORS));
559 
560 	switch (sc->sc_burst) {
561 	default:
562 		v = 0;
563 		break;
564 	case 16:
565 		v = HME_SEB_CFG_BURST16;
566 		break;
567 	case 32:
568 		v = HME_SEB_CFG_BURST32;
569 		break;
570 	case 64:
571 		v = HME_SEB_CFG_BURST64;
572 		break;
573 	}
574 	bus_space_write_4(t, seb, HME_SEBI_CFG, v);
575 
576 	/* step 9. ETX Configuration: use mostly default values */
577 
578 	/* Enable DMA */
579 	v = bus_space_read_4(t, etx, HME_ETXI_CFG);
580 	v |= HME_ETX_CFG_DMAENABLE;
581 	bus_space_write_4(t, etx, HME_ETXI_CFG, v);
582 
583 	/* Transmit Descriptor ring size: in increments of 16 */
584 	bus_space_write_4(t, etx, HME_ETXI_RSIZE, HME_TX_RING_SIZE / 16 - 1);
585 
586 	/* step 10. ERX Configuration */
587 	v = bus_space_read_4(t, erx, HME_ERXI_CFG);
588 	v &= ~HME_ERX_CFG_RINGSIZE256;
589 #if HME_RX_RING_SIZE == 32
590 	v |= HME_ERX_CFG_RINGSIZE32;
591 #elif HME_RX_RING_SIZE == 64
592 	v |= HME_ERX_CFG_RINGSIZE64;
593 #elif HME_RX_RING_SIZE == 128
594 	v |= HME_ERX_CFG_RINGSIZE128;
595 #elif HME_RX_RING_SIZE == 256
596 	v |= HME_ERX_CFG_RINGSIZE256;
597 #else
598 # error	"RX ring size must be 32, 64, 128, or 256"
599 #endif
600 	/* Enable DMA */
601 	v |= HME_ERX_CFG_DMAENABLE | (HME_RX_OFFSET << 3);
602 	bus_space_write_4(t, erx, HME_ERXI_CFG, v);
603 
604 	/* step 11. XIF Configuration */
605 	v = bus_space_read_4(t, mac, HME_MACI_XIF);
606 	v |= HME_MAC_XIF_OE;
607 	bus_space_write_4(t, mac, HME_MACI_XIF, v);
608 
609 	/* step 12. RX_MAC Configuration Register */
610 	v = bus_space_read_4(t, mac, HME_MACI_RXCFG);
611 	v |= HME_MAC_RXCFG_ENABLE;
612 	bus_space_write_4(t, mac, HME_MACI_RXCFG, v);
613 
614 	/* step 13. TX_MAC Configuration Register */
615 	v = bus_space_read_4(t, mac, HME_MACI_TXCFG);
616 	v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
617 	bus_space_write_4(t, mac, HME_MACI_TXCFG, v);
618 
619 	/* Set the current media. */
620 	mii_mediachg(&sc->sc_mii);
621 
622 	/* Start the one second timer. */
623 	timeout_add_sec(&sc->sc_tick_ch, 1);
624 
625 	ifp->if_flags |= IFF_RUNNING;
626 	ifq_clr_oactive(&ifp->if_snd);
627 
628 	hme_start(ifp);
629 }
630 
631 void
632 hme_start(struct ifnet *ifp)
633 {
634 	struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
635 	struct hme_ring *hr = &sc->sc_rb;
636 	struct mbuf *m;
637 	u_int32_t flags;
638 	bus_dmamap_t map;
639 	u_int32_t frag, cur, i;
640 	int error;
641 
642 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
643 		return;
644 
645 	while (sc->sc_txd[sc->sc_tx_prod].sd_mbuf == NULL) {
646 		m = ifq_deq_begin(&ifp->if_snd);
647 		if (m == NULL)
648 			break;
649 
650 		/*
651 		 * Encapsulate this packet and start it going...
652 		 * or fail...
653 		 */
654 
655 		cur = frag = sc->sc_tx_prod;
656 		map = sc->sc_txd[cur].sd_map;
657 
658 		error = bus_dmamap_load_mbuf(sc->sc_dmatag, map, m,
659 		    BUS_DMA_NOWAIT);
660 		if (error != 0 && error != EFBIG)
661 			goto drop;
662 		if (error != 0) {
663 			/* Too many fragments, linearize. */
664 			if (m_defrag(m, M_DONTWAIT))
665 				goto drop;
666 			error = bus_dmamap_load_mbuf(sc->sc_dmatag, map, m,
667 			    BUS_DMA_NOWAIT);
668 			if (error != 0)
669 				goto drop;
670 		}
671 
672 		if ((HME_TX_RING_SIZE - (sc->sc_tx_cnt + map->dm_nsegs)) < 5) {
673 			bus_dmamap_unload(sc->sc_dmatag, map);
674 			ifq_deq_rollback(&ifp->if_snd, m);
675 			ifq_set_oactive(&ifp->if_snd);
676 			break;
677 		}
678 
679 		/* We are now committed to transmitting the packet. */
680 		ifq_deq_commit(&ifp->if_snd, m);
681 
682 #if NBPFILTER > 0
683 		/*
684 		 * If BPF is listening on this interface, let it see the
685 		 * packet before we commit it to the wire.
686 		 */
687 		if (ifp->if_bpf)
688 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
689 #endif
690 
691 		bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize,
692 		    BUS_DMASYNC_PREWRITE);
693 
694 		for (i = 0; i < map->dm_nsegs; i++) {
695 			flags = HME_XD_ENCODE_TSIZE(map->dm_segs[i].ds_len);
696 			if (i == 0)
697 				flags |= HME_XD_SOP;
698 			else
699 				flags |= HME_XD_OWN;
700 
701 			HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, frag,
702 			    map->dm_segs[i].ds_addr);
703 			HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, frag, flags);
704 
705 			cur = frag;
706 			if (++frag == HME_TX_RING_SIZE)
707 				frag = 0;
708 		}
709 
710 		/* Set end of packet on last descriptor. */
711 		flags = HME_XD_GETFLAGS(sc->sc_pci, hr->rb_txd, cur);
712 		flags |= HME_XD_EOP;
713 		HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, cur, flags);
714 
715 		sc->sc_tx_cnt += map->dm_nsegs;
716 		sc->sc_txd[sc->sc_tx_prod].sd_map = sc->sc_txd[cur].sd_map;
717 		sc->sc_txd[cur].sd_map = map;
718 		sc->sc_txd[cur].sd_mbuf = m;
719 
720 		/* Give first frame over to the hardware. */
721 		flags = HME_XD_GETFLAGS(sc->sc_pci, hr->rb_txd, sc->sc_tx_prod);
722 		flags |= HME_XD_OWN;
723 		HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, sc->sc_tx_prod, flags);
724 
725 		bus_space_write_4(sc->sc_bustag, sc->sc_etx, HME_ETXI_PENDING,
726 		    HME_ETX_TP_DMAWAKEUP);
727 		sc->sc_tx_prod = frag;
728 
729 		ifp->if_timer = 5;
730 	}
731 
732 	return;
733 
734  drop:
735 	ifq_deq_commit(&ifp->if_snd, m);
736 	m_freem(m);
737 	ifp->if_oerrors++;
738 }
739 
740 /*
741  * Transmit interrupt.
742  */
743 int
744 hme_tint(struct hme_softc *sc)
745 {
746 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
747 	unsigned int ri, txflags;
748 	struct hme_sxd *sd;
749 	int cnt = sc->sc_tx_cnt;
750 
751 	/* Fetch current position in the transmit ring */
752 	ri = sc->sc_tx_cons;
753 	sd = &sc->sc_txd[ri];
754 
755 	for (;;) {
756 		if (cnt <= 0)
757 			break;
758 
759 		txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri);
760 
761 		if (txflags & HME_XD_OWN)
762 			break;
763 
764 		ifq_clr_oactive(&ifp->if_snd);
765 		if (txflags & HME_XD_EOP)
766 			ifp->if_opackets++;
767 
768 		if (sd->sd_mbuf != NULL) {
769 			bus_dmamap_sync(sc->sc_dmatag, sd->sd_map,
770 			    0, sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
771 			bus_dmamap_unload(sc->sc_dmatag, sd->sd_map);
772 			m_freem(sd->sd_mbuf);
773 			sd->sd_mbuf = NULL;
774 		}
775 
776 		if (++ri == HME_TX_RING_SIZE) {
777 			ri = 0;
778 			sd = sc->sc_txd;
779 		} else
780 			sd++;
781 
782 		--cnt;
783 	}
784 
785 	sc->sc_tx_cnt = cnt;
786 	ifp->if_timer = cnt > 0 ? 5 : 0;
787 
788 	/* Update ring */
789 	sc->sc_tx_cons = ri;
790 
791 	hme_start(ifp);
792 
793 	return (1);
794 }
795 
796 /*
797  * Receive interrupt.
798  */
799 int
800 hme_rint(struct hme_softc *sc)
801 {
802 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
803 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
804 	struct mbuf *m;
805 	struct hme_sxd *sd;
806 	unsigned int ri, len;
807 	u_int32_t flags;
808 
809 	ri = sc->sc_rx_cons;
810 	sd = &sc->sc_rxd[ri];
811 
812 	/*
813 	 * Process all buffers with valid data.
814 	 */
815 	while (if_rxr_inuse(&sc->sc_rx_ring) > 0) {
816 		flags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri);
817 		if (flags & HME_XD_OWN)
818 			break;
819 
820 		bus_dmamap_sync(sc->sc_dmatag, sd->sd_map,
821 		    0, sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
822 		bus_dmamap_unload(sc->sc_dmatag, sd->sd_map);
823 
824 		m = sd->sd_mbuf;
825 		sd->sd_mbuf = NULL;
826 
827 		if (++ri == HME_RX_RING_SIZE) {
828 			ri = 0;
829 			sd = sc->sc_rxd;
830 		} else
831 			sd++;
832 
833 		if_rxr_put(&sc->sc_rx_ring, 1);
834 
835 		if (flags & HME_XD_OFL) {
836 			ifp->if_ierrors++;
837 			printf("%s: buffer overflow, ri=%d; flags=0x%x\n",
838 			    sc->sc_dev.dv_xname, ri, flags);
839 			m_freem(m);
840 			continue;
841 		}
842 
843 		len = HME_XD_DECODE_RSIZE(flags);
844 		m->m_pkthdr.len = m->m_len = len;
845 
846 		ml_enqueue(&ml, m);
847 	}
848 
849 	if_input(ifp, &ml);
850 
851 	sc->sc_rx_cons = ri;
852 	hme_fill_rx_ring(sc);
853 	return (1);
854 }
855 
856 int
857 hme_eint(struct hme_softc *sc, u_int status)
858 {
859 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
860 
861 	if (status & HME_SEB_STAT_MIFIRQ) {
862 		printf("%s: XXXlink status changed\n", sc->sc_dev.dv_xname);
863 		status &= ~HME_SEB_STAT_MIFIRQ;
864 	}
865 
866 	if (status & HME_SEB_STAT_DTIMEXP) {
867 		ifp->if_oerrors++;
868 		status &= ~HME_SEB_STAT_DTIMEXP;
869 	}
870 
871 	if (status & HME_SEB_STAT_NORXD) {
872 		ifp->if_ierrors++;
873 		status &= ~HME_SEB_STAT_NORXD;
874 	}
875 
876 	status &= ~(HME_SEB_STAT_RXTOHOST | HME_SEB_STAT_GOTFRAME |
877 	    HME_SEB_STAT_SENTFRAME | HME_SEB_STAT_HOSTTOTX |
878 	    HME_SEB_STAT_TXALL);
879 
880 	if (status == 0)
881 		return (1);
882 
883 #ifdef HME_DEBUG
884 	printf("%s: status=%b\n", sc->sc_dev.dv_xname, status, HME_SEB_STAT_BITS);
885 #endif
886 	return (1);
887 }
888 
889 int
890 hme_intr(void *v)
891 {
892 	struct hme_softc *sc = (struct hme_softc *)v;
893 	bus_space_tag_t t = sc->sc_bustag;
894 	bus_space_handle_t seb = sc->sc_seb;
895 	u_int32_t status;
896 	int r = 0;
897 
898 	status = bus_space_read_4(t, seb, HME_SEBI_STAT);
899 	if (status == 0xffffffff)
900 		return (0);
901 
902 	if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
903 		r |= hme_eint(sc, status);
904 
905 	if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
906 		r |= hme_tint(sc);
907 
908 	if ((status & HME_SEB_STAT_RXTOHOST) != 0)
909 		r |= hme_rint(sc);
910 
911 	return (r);
912 }
913 
914 
915 void
916 hme_watchdog(struct ifnet *ifp)
917 {
918 	struct hme_softc *sc = ifp->if_softc;
919 
920 	log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
921 	ifp->if_oerrors++;
922 
923 	hme_reset(sc);
924 }
925 
926 /*
927  * Initialize the MII Management Interface
928  */
929 void
930 hme_mifinit(struct hme_softc *sc)
931 {
932 	bus_space_tag_t t = sc->sc_bustag;
933 	bus_space_handle_t mif = sc->sc_mif;
934 	bus_space_handle_t mac = sc->sc_mac;
935 	int phy;
936 	u_int32_t v;
937 
938 	v = bus_space_read_4(t, mif, HME_MIFI_CFG);
939 	phy = HME_PHYAD_EXTERNAL;
940 	if (v & HME_MIF_CFG_MDI1)
941 		phy = sc->sc_tcvr = HME_PHYAD_EXTERNAL;
942 	else if (v & HME_MIF_CFG_MDI0)
943 		phy = sc->sc_tcvr = HME_PHYAD_INTERNAL;
944 	else
945 		sc->sc_tcvr = -1;
946 
947 	/* Configure the MIF in frame mode, no poll, current phy select */
948 	v = 0;
949 	if (phy == HME_PHYAD_EXTERNAL)
950 		v |= HME_MIF_CFG_PHY;
951 	bus_space_write_4(t, mif, HME_MIFI_CFG, v);
952 
953 	/* If an external transceiver is selected, enable its MII drivers */
954 	v = bus_space_read_4(t, mac, HME_MACI_XIF);
955 	v &= ~HME_MAC_XIF_MIIENABLE;
956 	if (phy == HME_PHYAD_EXTERNAL)
957 		v |= HME_MAC_XIF_MIIENABLE;
958 	bus_space_write_4(t, mac, HME_MACI_XIF, v);
959 }
960 
961 /*
962  * MII interface
963  */
964 static int
965 hme_mii_readreg(struct device *self, int phy, int reg)
966 {
967 	struct hme_softc *sc = (struct hme_softc *)self;
968 	bus_space_tag_t t = sc->sc_bustag;
969 	bus_space_handle_t mif = sc->sc_mif;
970 	bus_space_handle_t mac = sc->sc_mac;
971 	u_int32_t v, xif_cfg, mifi_cfg;
972 	int n;
973 
974 	if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL)
975 		return (0);
976 
977 	/* Select the desired PHY in the MIF configuration register */
978 	v = mifi_cfg = bus_space_read_4(t, mif, HME_MIFI_CFG);
979 	v &= ~HME_MIF_CFG_PHY;
980 	if (phy == HME_PHYAD_EXTERNAL)
981 		v |= HME_MIF_CFG_PHY;
982 	bus_space_write_4(t, mif, HME_MIFI_CFG, v);
983 
984 	/* Enable MII drivers on external transceiver */
985 	v = xif_cfg = bus_space_read_4(t, mac, HME_MACI_XIF);
986 	if (phy == HME_PHYAD_EXTERNAL)
987 		v |= HME_MAC_XIF_MIIENABLE;
988 	else
989 		v &= ~HME_MAC_XIF_MIIENABLE;
990 	bus_space_write_4(t, mac, HME_MACI_XIF, v);
991 
992 	/* Construct the frame command */
993 	v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
994 	    HME_MIF_FO_TAMSB |
995 	    (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
996 	    (phy << HME_MIF_FO_PHYAD_SHIFT) |
997 	    (reg << HME_MIF_FO_REGAD_SHIFT);
998 
999 	bus_space_write_4(t, mif, HME_MIFI_FO, v);
1000 	for (n = 0; n < 100; n++) {
1001 		DELAY(1);
1002 		v = bus_space_read_4(t, mif, HME_MIFI_FO);
1003 		if (v & HME_MIF_FO_TALSB) {
1004 			v &= HME_MIF_FO_DATA;
1005 			goto out;
1006 		}
1007 	}
1008 
1009 	v = 0;
1010 	printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname);
1011 
1012 out:
1013 	/* Restore MIFI_CFG register */
1014 	bus_space_write_4(t, mif, HME_MIFI_CFG, mifi_cfg);
1015 	/* Restore XIF register */
1016 	bus_space_write_4(t, mac, HME_MACI_XIF, xif_cfg);
1017 	return (v);
1018 }
1019 
1020 static void
1021 hme_mii_writereg(struct device *self, int phy, int reg, int val)
1022 {
1023 	struct hme_softc *sc = (void *)self;
1024 	bus_space_tag_t t = sc->sc_bustag;
1025 	bus_space_handle_t mif = sc->sc_mif;
1026 	bus_space_handle_t mac = sc->sc_mac;
1027 	u_int32_t v, xif_cfg, mifi_cfg;
1028 	int n;
1029 
1030 	/* We can at most have two PHYs */
1031 	if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL)
1032 		return;
1033 
1034 	/* Select the desired PHY in the MIF configuration register */
1035 	v = mifi_cfg = bus_space_read_4(t, mif, HME_MIFI_CFG);
1036 	v &= ~HME_MIF_CFG_PHY;
1037 	if (phy == HME_PHYAD_EXTERNAL)
1038 		v |= HME_MIF_CFG_PHY;
1039 	bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1040 
1041 	/* Enable MII drivers on external transceiver */
1042 	v = xif_cfg = bus_space_read_4(t, mac, HME_MACI_XIF);
1043 	if (phy == HME_PHYAD_EXTERNAL)
1044 		v |= HME_MAC_XIF_MIIENABLE;
1045 	else
1046 		v &= ~HME_MAC_XIF_MIIENABLE;
1047 	bus_space_write_4(t, mac, HME_MACI_XIF, v);
1048 
1049 	/* Construct the frame command */
1050 	v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT)	|
1051 	    HME_MIF_FO_TAMSB				|
1052 	    (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT)	|
1053 	    (phy << HME_MIF_FO_PHYAD_SHIFT)		|
1054 	    (reg << HME_MIF_FO_REGAD_SHIFT)		|
1055 	    (val & HME_MIF_FO_DATA);
1056 
1057 	bus_space_write_4(t, mif, HME_MIFI_FO, v);
1058 	for (n = 0; n < 100; n++) {
1059 		DELAY(1);
1060 		v = bus_space_read_4(t, mif, HME_MIFI_FO);
1061 		if (v & HME_MIF_FO_TALSB)
1062 			goto out;
1063 	}
1064 
1065 	printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname);
1066 out:
1067 	/* Restore MIFI_CFG register */
1068 	bus_space_write_4(t, mif, HME_MIFI_CFG, mifi_cfg);
1069 	/* Restore XIF register */
1070 	bus_space_write_4(t, mac, HME_MACI_XIF, xif_cfg);
1071 }
1072 
1073 static void
1074 hme_mii_statchg(struct device *dev)
1075 {
1076 	struct hme_softc *sc = (void *)dev;
1077 	bus_space_tag_t t = sc->sc_bustag;
1078 	bus_space_handle_t mac = sc->sc_mac;
1079 	u_int32_t v;
1080 
1081 #ifdef HMEDEBUG
1082 	if (sc->sc_debug)
1083 		printf("hme_mii_statchg: status change\n", phy);
1084 #endif
1085 
1086 	/* Set the MAC Full Duplex bit appropriately */
1087 	/* Apparently the hme chip is SIMPLEX if working in full duplex mode,
1088 	   but not otherwise. */
1089 	v = bus_space_read_4(t, mac, HME_MACI_TXCFG);
1090 	if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) {
1091 		v |= HME_MAC_TXCFG_FULLDPLX;
1092 		sc->sc_arpcom.ac_if.if_flags |= IFF_SIMPLEX;
1093 	} else {
1094 		v &= ~HME_MAC_TXCFG_FULLDPLX;
1095 		sc->sc_arpcom.ac_if.if_flags &= ~IFF_SIMPLEX;
1096 	}
1097 	bus_space_write_4(t, mac, HME_MACI_TXCFG, v);
1098 }
1099 
1100 int
1101 hme_mediachange(struct ifnet *ifp)
1102 {
1103 	struct hme_softc *sc = ifp->if_softc;
1104 	bus_space_tag_t t = sc->sc_bustag;
1105 	bus_space_handle_t mif = sc->sc_mif;
1106 	bus_space_handle_t mac = sc->sc_mac;
1107 	uint64_t instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media);
1108 	int phy = sc->sc_phys[instance];
1109 	u_int32_t v;
1110 
1111 #ifdef HMEDEBUG
1112 	if (sc->sc_debug)
1113 		printf("hme_mediachange: phy = %d\n", phy);
1114 #endif
1115 	if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
1116 		return (EINVAL);
1117 
1118 	/* Select the current PHY in the MIF configuration register */
1119 	v = bus_space_read_4(t, mif, HME_MIFI_CFG);
1120 	v &= ~HME_MIF_CFG_PHY;
1121 	if (phy == HME_PHYAD_EXTERNAL)
1122 		v |= HME_MIF_CFG_PHY;
1123 	bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1124 
1125 	/* If an external transceiver is selected, enable its MII drivers */
1126 	v = bus_space_read_4(t, mac, HME_MACI_XIF);
1127 	v &= ~HME_MAC_XIF_MIIENABLE;
1128 	if (phy == HME_PHYAD_EXTERNAL)
1129 		v |= HME_MAC_XIF_MIIENABLE;
1130 	bus_space_write_4(t, mac, HME_MACI_XIF, v);
1131 
1132 	return (mii_mediachg(&sc->sc_mii));
1133 }
1134 
1135 void
1136 hme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1137 {
1138 	struct hme_softc *sc = ifp->if_softc;
1139 
1140 	if ((ifp->if_flags & IFF_UP) == 0)
1141 		return;
1142 
1143 	mii_pollstat(&sc->sc_mii);
1144 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
1145 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
1146 }
1147 
1148 /*
1149  * Process an ioctl request.
1150  */
1151 int
1152 hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1153 {
1154 	struct hme_softc *sc = ifp->if_softc;
1155 	struct ifreq *ifr = (struct ifreq *)data;
1156 	int s, error = 0;
1157 
1158 	s = splnet();
1159 
1160 	switch (cmd) {
1161 	case SIOCSIFADDR:
1162 		ifp->if_flags |= IFF_UP;
1163 		if (!(ifp->if_flags & IFF_RUNNING))
1164 			hme_init(sc);
1165 		break;
1166 
1167 	case SIOCSIFFLAGS:
1168 		if (ifp->if_flags & IFF_UP) {
1169 			if (ifp->if_flags & IFF_RUNNING)
1170 				error = ENETRESET;
1171 			else
1172 				hme_init(sc);
1173 		} else {
1174 			if (ifp->if_flags & IFF_RUNNING)
1175 				hme_stop(sc, 0);
1176 		}
1177 #ifdef HMEDEBUG
1178 		sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
1179 #endif
1180 		break;
1181 
1182 	case SIOCGIFMEDIA:
1183 	case SIOCSIFMEDIA:
1184 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1185 		break;
1186 
1187 	case SIOCGIFRXR:
1188 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
1189 		    NULL, MCLBYTES, &sc->sc_rx_ring);
1190  		break;
1191 
1192 	default:
1193 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
1194 	}
1195 
1196 	if (error == ENETRESET) {
1197 		if (ifp->if_flags & IFF_RUNNING)
1198 			hme_iff(sc);
1199 		error = 0;
1200 	}
1201 
1202 	splx(s);
1203 	return (error);
1204 }
1205 
1206 void
1207 hme_iff(struct hme_softc *sc)
1208 {
1209 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1210 	struct arpcom *ac = &sc->sc_arpcom;
1211 	struct ether_multi *enm;
1212 	struct ether_multistep step;
1213 	bus_space_tag_t t = sc->sc_bustag;
1214 	bus_space_handle_t mac = sc->sc_mac;
1215 	u_int32_t hash[4];
1216 	u_int32_t rxcfg, crc;
1217 
1218 	rxcfg = bus_space_read_4(t, mac, HME_MACI_RXCFG);
1219 	rxcfg &= ~(HME_MAC_RXCFG_HENABLE | HME_MAC_RXCFG_PMISC);
1220 	ifp->if_flags &= ~IFF_ALLMULTI;
1221 	/* Clear hash table */
1222 	hash[0] = hash[1] = hash[2] = hash[3] = 0;
1223 
1224 	if (ifp->if_flags & IFF_PROMISC) {
1225 		ifp->if_flags |= IFF_ALLMULTI;
1226 		rxcfg |= HME_MAC_RXCFG_PMISC;
1227 	} else if (ac->ac_multirangecnt > 0) {
1228 		ifp->if_flags |= IFF_ALLMULTI;
1229 		rxcfg |= HME_MAC_RXCFG_HENABLE;
1230 		hash[0] = hash[1] = hash[2] = hash[3] = 0xffff;
1231 	} else {
1232 		rxcfg |= HME_MAC_RXCFG_HENABLE;
1233 
1234 		ETHER_FIRST_MULTI(step, ac, enm);
1235 		while (enm != NULL) {
1236 			crc = ether_crc32_le(enm->enm_addrlo,
1237 			    ETHER_ADDR_LEN) >> 26;
1238 
1239 			/* Set the corresponding bit in the filter. */
1240 			hash[crc >> 4] |= 1 << (crc & 0xf);
1241 
1242 			ETHER_NEXT_MULTI(step, enm);
1243 		}
1244 	}
1245 
1246 	/* Now load the hash table into the chip */
1247 	bus_space_write_4(t, mac, HME_MACI_HASHTAB0, hash[0]);
1248 	bus_space_write_4(t, mac, HME_MACI_HASHTAB1, hash[1]);
1249 	bus_space_write_4(t, mac, HME_MACI_HASHTAB2, hash[2]);
1250 	bus_space_write_4(t, mac, HME_MACI_HASHTAB3, hash[3]);
1251 	bus_space_write_4(t, mac, HME_MACI_RXCFG, rxcfg);
1252 }
1253 
1254 void
1255 hme_fill_rx_ring(struct hme_softc *sc)
1256 {
1257 	struct hme_sxd *sd;
1258 	u_int slots;
1259 
1260 	for (slots = if_rxr_get(&sc->sc_rx_ring, HME_RX_RING_SIZE);
1261 	    slots > 0; slots--) {
1262 		if (hme_newbuf(sc, &sc->sc_rxd[sc->sc_rx_prod]))
1263 			break;
1264 
1265 		sd = &sc->sc_rxd[sc->sc_rx_prod];
1266 		HME_XD_SETADDR(sc->sc_pci, sc->sc_rb.rb_rxd, sc->sc_rx_prod,
1267 		    sd->sd_map->dm_segs[0].ds_addr);
1268 		HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, sc->sc_rx_prod,
1269 		    HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_RX_PKTSIZE));
1270 
1271 		if (++sc->sc_rx_prod == HME_RX_RING_SIZE)
1272 			sc->sc_rx_prod = 0;
1273         }
1274 	if_rxr_put(&sc->sc_rx_ring, slots);
1275 }
1276 
1277 int
1278 hme_newbuf(struct hme_softc *sc, struct hme_sxd *d)
1279 {
1280 	struct mbuf *m;
1281 	bus_dmamap_t map;
1282 
1283 	/*
1284 	 * All operations should be on local variables and/or rx spare map
1285 	 * until we're sure everything is a success.
1286 	 */
1287 
1288 	m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
1289 	if (!m)
1290 		return (ENOBUFS);
1291 
1292 	if (bus_dmamap_load(sc->sc_dmatag, sc->sc_rxmap_spare,
1293 	    mtod(m, caddr_t), MCLBYTES - HME_RX_OFFSET, NULL,
1294 	    BUS_DMA_NOWAIT) != 0) {
1295 		m_freem(m);
1296 		return (ENOBUFS);
1297 	}
1298 
1299 	/*
1300 	 * At this point we have a new buffer loaded into the spare map.
1301 	 * Just need to clear out the old mbuf/map and put the new one
1302 	 * in place.
1303 	 */
1304 
1305 	map = d->sd_map;
1306 	d->sd_map = sc->sc_rxmap_spare;
1307 	sc->sc_rxmap_spare = map;
1308 
1309 	bus_dmamap_sync(sc->sc_dmatag, d->sd_map, 0, d->sd_map->dm_mapsize,
1310 	    BUS_DMASYNC_PREREAD);
1311 
1312 	m->m_data += HME_RX_OFFSET;
1313 	d->sd_mbuf = m;
1314 	return (0);
1315 }
1316